Lines Matching +full:wait +full:- +full:free +full:- +full:us
1 // SPDX-License-Identifier: GPL-2.0
5 #include "space-info.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
11 #include "block-group.h"
15 #include "extent-tree.h"
29 * reservations we care about total_bytes - SUM(space_info->bytes_) when
35 * code on the rules for each type, but generally block_rsv->reserved is how
36 * much space is accounted for in space_info->bytes_may_use.
50 * ->reserve
51 * space_info->bytes_may_reserve += num_bytes
53 * ->extent allocation
55 * space_info->bytes_may_reserve -= num_bytes
56 * space_info->bytes_reserved += extent_bytes
58 * ->insert reference
60 * space_info->bytes_reserved -= extent_bytes
61 * space_info->bytes_used += extent_bytes
63 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
68 * -> __reserve_bytes
69 * create a reserve_ticket with ->bytes set to our reservation, add it to
70 * the tail of space_info->tickets, kick async flush thread
72 * ->handle_reserve_ticket
73 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
76 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
77 * Flushes various things attempting to free up space.
79 * -> btrfs_try_granting_tickets()
81 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
82 * space_info->total_bytes. This loops through the ->priority_tickets and
83 * then the ->tickets list checking to see if the reservation can be
84 * completed. If it can the space is added to space_info->bytes_may_use and
87 * -> ticket wakeup
88 * Check if ->bytes == 0, if it does we got our reservation and we can carry
95 * space_info->priority_tickets, and we do not use ticket->wait, we simply
96 * call flush_space() ourselves for the states that are safe for us to call
105 * things however hold reservations, and so letting them run allows us to
119 * running delalloc, but usually we need to wait for ordered extents to
142 * out of a pre-tickets era where we could end up committing the transaction
151 * reserve more space than is currently free in the currently allocate
158 * free space in the allocated metadata chunks.
169 return s_info->bytes_used + s_info->bytes_reserved + in btrfs_space_info_used()
170 s_info->bytes_pinned + s_info->bytes_readonly + in btrfs_space_info_used()
171 s_info->bytes_zone_unusable + in btrfs_space_info_used()
172 (may_use_included ? s_info->bytes_may_use : 0); in btrfs_space_info_used()
181 struct list_head *head = &info->space_info; in btrfs_clear_space_info_full()
185 found->full = 0; in btrfs_clear_space_info_full()
200 return fs_info->zone_size; in calc_chunk_size()
210 if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G) in calc_chunk_size()
222 WRITE_ONCE(space_info->chunk_size, chunk_size); in btrfs_update_space_info_chunk_size()
234 return -ENOMEM; in create_space_info()
237 INIT_LIST_HEAD(&space_info->block_groups[i]); in create_space_info()
238 init_rwsem(&space_info->groups_sem); in create_space_info()
239 spin_lock_init(&space_info->lock); in create_space_info()
240 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; in create_space_info()
241 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; in create_space_info()
242 INIT_LIST_HEAD(&space_info->ro_bgs); in create_space_info()
243 INIT_LIST_HEAD(&space_info->tickets); in create_space_info()
244 INIT_LIST_HEAD(&space_info->priority_tickets); in create_space_info()
245 space_info->clamp = 1; in create_space_info()
249 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH; in create_space_info()
255 list_add(&space_info->list, &info->space_info); in create_space_info()
257 info->data_sinfo = space_info; in create_space_info()
270 disk_super = fs_info->super_copy; in btrfs_init_space_info()
272 return -EINVAL; in btrfs_init_space_info()
305 factor = btrfs_bg_type_to_factor(block_group->flags); in btrfs_add_bg_to_space_info()
307 found = btrfs_find_space_info(info, block_group->flags); in btrfs_add_bg_to_space_info()
309 spin_lock(&found->lock); in btrfs_add_bg_to_space_info()
310 found->total_bytes += block_group->length; in btrfs_add_bg_to_space_info()
311 found->disk_total += block_group->length * factor; in btrfs_add_bg_to_space_info()
312 found->bytes_used += block_group->used; in btrfs_add_bg_to_space_info()
313 found->disk_used += block_group->used * factor; in btrfs_add_bg_to_space_info()
314 found->bytes_readonly += block_group->bytes_super; in btrfs_add_bg_to_space_info()
315 found->bytes_zone_unusable += block_group->zone_unusable; in btrfs_add_bg_to_space_info()
316 if (block_group->length > 0) in btrfs_add_bg_to_space_info()
317 found->full = 0; in btrfs_add_bg_to_space_info()
319 spin_unlock(&found->lock); in btrfs_add_bg_to_space_info()
321 block_group->space_info = found; in btrfs_add_bg_to_space_info()
323 index = btrfs_bg_flags_to_raid_index(block_group->flags); in btrfs_add_bg_to_space_info()
324 down_write(&found->groups_sem); in btrfs_add_bg_to_space_info()
325 list_add_tail(&block_group->list, &found->block_groups[index]); in btrfs_add_bg_to_space_info()
326 up_write(&found->groups_sem); in btrfs_add_bg_to_space_info()
332 struct list_head *head = &info->space_info; in btrfs_find_space_info()
338 if (found->flags & flags) in btrfs_find_space_info()
352 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) in calc_available_free_space()
357 avail = atomic64_read(&fs_info->free_chunk_space); in calc_available_free_space()
360 * If we have dup, raid1 or raid10 then only half of the free in calc_available_free_space()
369 * If we aren't flushing all things, let us overcommit up to in calc_available_free_space()
370 * 1/2th of the space. If we can flush, don't let us overcommit in calc_available_free_space()
388 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) in btrfs_can_overcommit()
394 if (used + bytes < space_info->total_bytes + avail) in btrfs_can_overcommit()
402 if (!list_empty(&ticket->list)) { in remove_ticket()
403 list_del_init(&ticket->list); in remove_ticket()
404 ASSERT(space_info->reclaim_size >= ticket->bytes); in remove_ticket()
405 space_info->reclaim_size -= ticket->bytes; in remove_ticket()
410 * This is for space we already have accounted in space_info->bytes_may_use, so
419 lockdep_assert_held(&space_info->lock); in btrfs_try_granting_tickets()
421 head = &space_info->priority_tickets; in btrfs_try_granting_tickets()
430 if ((used + ticket->bytes <= space_info->total_bytes) || in btrfs_try_granting_tickets()
431 btrfs_can_overcommit(fs_info, space_info, ticket->bytes, in btrfs_try_granting_tickets()
435 ticket->bytes); in btrfs_try_granting_tickets()
437 ticket->bytes = 0; in btrfs_try_granting_tickets()
438 space_info->tickets_id++; in btrfs_try_granting_tickets()
439 wake_up(&ticket->wait); in btrfs_try_granting_tickets()
445 if (head == &space_info->priority_tickets) { in btrfs_try_granting_tickets()
446 head = &space_info->tickets; in btrfs_try_granting_tickets()
454 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
455 spin_lock(&__rsv->lock); \
457 __rsv->size, __rsv->reserved); \
458 spin_unlock(&__rsv->lock); \
463 switch (space_info->flags) { in space_info_flag_to_str()
490 lockdep_assert_held(&info->lock); in __btrfs_dump_space_info()
492 /* The free space could be negative in case of overcommit */ in __btrfs_dump_space_info()
493 btrfs_info(fs_info, "space_info %s has %lld free, is %sfull", in __btrfs_dump_space_info()
495 (s64)(info->total_bytes - btrfs_space_info_used(info, true)), in __btrfs_dump_space_info()
496 info->full ? "" : "not "); in __btrfs_dump_space_info()
499 info->total_bytes, info->bytes_used, info->bytes_pinned, in __btrfs_dump_space_info()
500 info->bytes_reserved, info->bytes_may_use, in __btrfs_dump_space_info()
501 info->bytes_readonly, info->bytes_zone_unusable); in __btrfs_dump_space_info()
512 spin_lock(&info->lock); in btrfs_dump_space_info()
515 spin_unlock(&info->lock); in btrfs_dump_space_info()
520 down_read(&info->groups_sem); in btrfs_dump_space_info()
522 list_for_each_entry(cache, &info->block_groups[index], list) { in btrfs_dump_space_info()
525 spin_lock(&cache->lock); in btrfs_dump_space_info()
526 avail = cache->length - cache->used - cache->pinned - in btrfs_dump_space_info()
527 cache->reserved - cache->delalloc_bytes - in btrfs_dump_space_info()
528 cache->bytes_super - cache->zone_unusable; in btrfs_dump_space_info()
531 cache->start, cache->length, cache->used, cache->pinned, in btrfs_dump_space_info()
532 cache->reserved, cache->delalloc_bytes, in btrfs_dump_space_info()
533 cache->bytes_super, cache->zone_unusable, in btrfs_dump_space_info()
534 avail, cache->ro ? "[readonly]" : ""); in btrfs_dump_space_info()
535 spin_unlock(&cache->lock); in btrfs_dump_space_info()
541 up_read(&info->groups_sem); in btrfs_dump_space_info()
588 delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes); in shrink_delalloc()
589 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes); in shrink_delalloc()
601 * worth of reservations, however that's not available to us in shrink_delalloc()
613 trans = current->journal_info; in shrink_delalloc()
616 * If we are doing more ordered than delalloc we need to just wait on in shrink_delalloc()
618 * that likely won't give us the space back we need. in shrink_delalloc()
641 * This exists because we do not want to wait for each in shrink_delalloc()
643 * start the IO on everybody, and then come back here and wait in shrink_delalloc()
646 * can decide if we wait for that or not. in shrink_delalloc()
652 async_pages = atomic_read(&fs_info->async_delalloc_pages); in shrink_delalloc()
657 * We don't want to wait forever, if we wrote less pages in this in shrink_delalloc()
658 * loop than we have outstanding, only wait for that number of in shrink_delalloc()
659 * pages, otherwise we can wait for all async pages to finish in shrink_delalloc()
663 async_pages -= nr_pages; in shrink_delalloc()
666 wait_event(fs_info->async_submit_wait, in shrink_delalloc()
667 atomic_read(&fs_info->async_delalloc_pages) <= in shrink_delalloc()
672 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); in shrink_delalloc()
680 * If we are for preemption we just want a one-shot of delalloc in shrink_delalloc()
687 spin_lock(&space_info->lock); in shrink_delalloc()
688 if (list_empty(&space_info->tickets) && in shrink_delalloc()
689 list_empty(&space_info->priority_tickets)) { in shrink_delalloc()
690 spin_unlock(&space_info->lock); in shrink_delalloc()
693 spin_unlock(&space_info->lock); in shrink_delalloc()
696 &fs_info->delalloc_bytes); in shrink_delalloc()
698 &fs_info->ordered_bytes); in shrink_delalloc()
711 struct btrfs_root *root = fs_info->tree_root; in flush_space()
722 nr = -1; in flush_space()
727 if (ret == -ENOENT) in flush_space()
747 if (ret == -ENOENT) in flush_space()
766 btrfs_get_alloc_profile(fs_info, space_info->flags), in flush_space()
771 if (ret > 0 || ret == -ENOSPC) in flush_space()
776 * If we have pending delayed iputs then we could free up a in flush_space()
784 ASSERT(current->journal_info == NULL); in flush_space()
787 * current one or wait it fully commits in case its commit is in flush_space()
789 * because that does not wait for a transaction to fully commit in flush_space()
795 if (ret == -ENOENT) in flush_space()
802 ret = -ENOSPC; in flush_space()
806 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, in flush_space()
817 u64 to_reclaim = space_info->reclaim_size; in btrfs_calc_reclaim_metadata_size()
819 lockdep_assert_held(&space_info->lock); in btrfs_calc_reclaim_metadata_size()
827 * before, and now we're well over-committed based on our current free in btrfs_calc_reclaim_metadata_size()
831 if (space_info->total_bytes + avail < used) in btrfs_calc_reclaim_metadata_size()
832 to_reclaim += used - (space_info->total_bytes + avail); in btrfs_calc_reclaim_metadata_size()
840 const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv); in need_preemptive_reclaim()
845 thresh = mult_perc(space_info->total_bytes, 90); in need_preemptive_reclaim()
847 lockdep_assert_held(&space_info->lock); in need_preemptive_reclaim()
849 /* If we're just plain full then async reclaim just slows us down. */ in need_preemptive_reclaim()
850 if ((space_info->bytes_used + space_info->bytes_reserved + in need_preemptive_reclaim()
854 used = space_info->bytes_may_use + space_info->bytes_pinned; in need_preemptive_reclaim()
865 if (used - global_rsv_size <= SZ_128M) in need_preemptive_reclaim()
872 if (space_info->reclaim_size) in need_preemptive_reclaim()
876 * If we have over half of the free space occupied by reservations or in need_preemptive_reclaim()
887 * if our reclaimable space exceeds our clamped free space. in need_preemptive_reclaim()
889 * Our clamping range is 2^1 -> 2^8. Practically speaking that means in need_preemptive_reclaim()
906 used = space_info->bytes_used + space_info->bytes_reserved + in need_preemptive_reclaim()
907 space_info->bytes_readonly + global_rsv_size; in need_preemptive_reclaim()
908 if (used < space_info->total_bytes) in need_preemptive_reclaim()
909 thresh += space_info->total_bytes - used; in need_preemptive_reclaim()
910 thresh >>= space_info->clamp; in need_preemptive_reclaim()
912 used = space_info->bytes_pinned; in need_preemptive_reclaim()
917 * around. Preemptive flushing is only useful in that it can free up in need_preemptive_reclaim()
918 * space before tickets need to wait for things to finish. In the case in need_preemptive_reclaim()
919 * of ordered extents, preemptively waiting on ordered extents gets us in need_preemptive_reclaim()
921 * simply have to slow down writers by forcing them to wait on ordered in need_preemptive_reclaim()
929 * waste time and cause us to slow down. in need_preemptive_reclaim()
937 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1; in need_preemptive_reclaim()
938 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes); in need_preemptive_reclaim()
940 used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) + in need_preemptive_reclaim()
941 btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv); in need_preemptive_reclaim()
943 used += space_info->bytes_may_use - global_rsv_size; in need_preemptive_reclaim()
946 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); in need_preemptive_reclaim()
953 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; in steal_from_global_rsv()
956 if (!ticket->steal) in steal_from_global_rsv()
959 if (global_rsv->space_info != space_info) in steal_from_global_rsv()
962 spin_lock(&global_rsv->lock); in steal_from_global_rsv()
963 min_bytes = mult_perc(global_rsv->size, 10); in steal_from_global_rsv()
964 if (global_rsv->reserved < min_bytes + ticket->bytes) { in steal_from_global_rsv()
965 spin_unlock(&global_rsv->lock); in steal_from_global_rsv()
968 global_rsv->reserved -= ticket->bytes; in steal_from_global_rsv()
970 ticket->bytes = 0; in steal_from_global_rsv()
971 wake_up(&ticket->wait); in steal_from_global_rsv()
972 space_info->tickets_id++; in steal_from_global_rsv()
973 if (global_rsv->reserved < global_rsv->size) in steal_from_global_rsv()
974 global_rsv->full = 0; in steal_from_global_rsv()
975 spin_unlock(&global_rsv->lock); in steal_from_global_rsv()
981 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
982 * @fs_info - fs_info for this fs
983 * @space_info - the space info we were flushing
999 u64 tickets_id = space_info->tickets_id; in maybe_fail_all_tickets()
1009 while (!list_empty(&space_info->tickets) && in maybe_fail_all_tickets()
1010 tickets_id == space_info->tickets_id) { in maybe_fail_all_tickets()
1011 ticket = list_first_entry(&space_info->tickets, in maybe_fail_all_tickets()
1019 ticket->bytes); in maybe_fail_all_tickets()
1023 ticket->error = -EIO; in maybe_fail_all_tickets()
1025 ticket->error = -ENOSPC; in maybe_fail_all_tickets()
1026 wake_up(&ticket->wait); in maybe_fail_all_tickets()
1037 return (tickets_id != space_info->tickets_id); in maybe_fail_all_tickets()
1041 * This is for normal flushers, we can wait all goddamned day if we want to. We
1057 spin_lock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1060 space_info->flush = 0; in btrfs_async_reclaim_metadata_space()
1061 spin_unlock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1064 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_metadata_space()
1065 spin_unlock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1070 spin_lock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1071 if (list_empty(&space_info->tickets)) { in btrfs_async_reclaim_metadata_space()
1072 space_info->flush = 0; in btrfs_async_reclaim_metadata_space()
1073 spin_unlock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1078 if (last_tickets_id == space_info->tickets_id) { in btrfs_async_reclaim_metadata_space()
1081 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_metadata_space()
1084 commit_cycles--; in btrfs_async_reclaim_metadata_space()
1113 commit_cycles--; in btrfs_async_reclaim_metadata_space()
1115 space_info->flush = 0; in btrfs_async_reclaim_metadata_space()
1121 spin_unlock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1126 * This handles pre-flushing of metadata space before we get to the point that
1128 * from the other flush paths because it doesn't rely on tickets to tell us how
1129 * much we need to flush, instead it attempts to keep us below the 80% full
1146 delayed_block_rsv = &fs_info->delayed_block_rsv; in btrfs_preempt_reclaim_metadata_space()
1147 delayed_refs_rsv = &fs_info->delayed_refs_rsv; in btrfs_preempt_reclaim_metadata_space()
1148 global_rsv = &fs_info->global_block_rsv; in btrfs_preempt_reclaim_metadata_space()
1149 trans_rsv = &fs_info->trans_block_rsv; in btrfs_preempt_reclaim_metadata_space()
1151 spin_lock(&space_info->lock); in btrfs_preempt_reclaim_metadata_space()
1171 if (block_rsv_size < space_info->bytes_may_use) in btrfs_preempt_reclaim_metadata_space()
1172 delalloc_size = space_info->bytes_may_use - block_rsv_size; in btrfs_preempt_reclaim_metadata_space()
1179 block_rsv_size -= global_rsv_size; in btrfs_preempt_reclaim_metadata_space()
1189 } else if (space_info->bytes_pinned > in btrfs_preempt_reclaim_metadata_space()
1192 to_reclaim = space_info->bytes_pinned; in btrfs_preempt_reclaim_metadata_space()
1203 spin_unlock(&space_info->lock); in btrfs_preempt_reclaim_metadata_space()
1207 * down the to_reclaim by 1/4. If it takes us down to 0, in btrfs_preempt_reclaim_metadata_space()
1215 spin_lock(&space_info->lock); in btrfs_preempt_reclaim_metadata_space()
1219 if (loops == 1 && !space_info->reclaim_size) in btrfs_preempt_reclaim_metadata_space()
1220 space_info->clamp = max(1, space_info->clamp - 1); in btrfs_preempt_reclaim_metadata_space()
1222 spin_unlock(&space_info->lock); in btrfs_preempt_reclaim_metadata_space()
1234 * length to ->bytes_reserved, and subtracts the reserved space from
1235 * ->bytes_may_use.
1246 * immediately re-usable, it comes in the form of a delayed ref, which must be
1273 space_info = fs_info->data_sinfo; in btrfs_async_reclaim_data_space()
1275 spin_lock(&space_info->lock); in btrfs_async_reclaim_data_space()
1276 if (list_empty(&space_info->tickets)) { in btrfs_async_reclaim_data_space()
1277 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1278 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1281 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_data_space()
1282 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1284 while (!space_info->full) { in btrfs_async_reclaim_data_space()
1286 spin_lock(&space_info->lock); in btrfs_async_reclaim_data_space()
1287 if (list_empty(&space_info->tickets)) { in btrfs_async_reclaim_data_space()
1288 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1289 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1296 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_data_space()
1297 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1303 spin_lock(&space_info->lock); in btrfs_async_reclaim_data_space()
1304 if (list_empty(&space_info->tickets)) { in btrfs_async_reclaim_data_space()
1305 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1306 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1310 if (last_tickets_id == space_info->tickets_id) { in btrfs_async_reclaim_data_space()
1313 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_data_space()
1318 if (space_info->full) { in btrfs_async_reclaim_data_space()
1322 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1332 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1338 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1339 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1344 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); in btrfs_init_async_reclaim_work()
1345 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space); in btrfs_init_async_reclaim_work()
1346 INIT_WORK(&fs_info->preempt_reclaim_work, in btrfs_init_async_reclaim_work()
1377 spin_lock(&space_info->lock); in priority_reclaim_metadata_space()
1383 * to_reclaim but ->bytes == 0. in priority_reclaim_metadata_space()
1385 if (ticket->bytes == 0) { in priority_reclaim_metadata_space()
1386 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1391 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1395 spin_lock(&space_info->lock); in priority_reclaim_metadata_space()
1396 if (ticket->bytes == 0) { in priority_reclaim_metadata_space()
1397 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1406 * success to the caller if we can steal from the global rsv - this is in priority_reclaim_metadata_space()
1408 * modify the fs, making it easier to debug -ENOSPC problems. in priority_reclaim_metadata_space()
1411 ticket->error = BTRFS_FS_ERROR(fs_info); in priority_reclaim_metadata_space()
1414 ticket->error = -ENOSPC; in priority_reclaim_metadata_space()
1424 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1431 spin_lock(&space_info->lock); in priority_reclaim_data_space()
1434 if (ticket->bytes == 0) { in priority_reclaim_data_space()
1435 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1439 while (!space_info->full) { in priority_reclaim_data_space()
1440 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1442 spin_lock(&space_info->lock); in priority_reclaim_data_space()
1443 if (ticket->bytes == 0) { in priority_reclaim_data_space()
1444 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1449 ticket->error = -ENOSPC; in priority_reclaim_data_space()
1452 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1460 DEFINE_WAIT(wait); in wait_reserve_ticket()
1463 spin_lock(&space_info->lock); in wait_reserve_ticket()
1464 while (ticket->bytes > 0 && ticket->error == 0) { in wait_reserve_ticket()
1465 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); in wait_reserve_ticket()
1468 * Delete us from the list. After we unlock the space in wait_reserve_ticket()
1476 ticket->error = -EINTR; in wait_reserve_ticket()
1479 spin_unlock(&space_info->lock); in wait_reserve_ticket()
1483 finish_wait(&ticket->wait, &wait); in wait_reserve_ticket()
1484 spin_lock(&space_info->lock); in wait_reserve_ticket()
1486 spin_unlock(&space_info->lock); in wait_reserve_ticket()
1534 ret = ticket->error; in handle_reserve_ticket()
1535 ASSERT(list_empty(&ticket->list)); in handle_reserve_ticket()
1542 ASSERT(!(ticket->bytes == 0 && ticket->error)); in handle_reserve_ticket()
1543 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes, in handle_reserve_ticket()
1544 start_ns, flush, ticket->error); in handle_reserve_ticket()
1561 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes); in maybe_clamp_preempt()
1562 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes); in maybe_clamp_preempt()
1565 * If we're heavy on ordered operations then clamping won't help us. We in maybe_clamp_preempt()
1573 space_info->clamp = min(space_info->clamp + 1, 8); in maybe_clamp_preempt()
1615 int ret = -ENOSPC; in __reserve_bytes()
1620 * If have a transaction handle (current->journal_info != NULL), then in __reserve_bytes()
1625 if (current->journal_info) { in __reserve_bytes()
1633 async_work = &fs_info->async_data_reclaim_work; in __reserve_bytes()
1635 async_work = &fs_info->async_reclaim_work; in __reserve_bytes()
1637 spin_lock(&space_info->lock); in __reserve_bytes()
1646 pending_tickets = !list_empty(&space_info->tickets) || in __reserve_bytes()
1647 !list_empty(&space_info->priority_tickets); in __reserve_bytes()
1649 pending_tickets = !list_empty(&space_info->priority_tickets); in __reserve_bytes()
1652 * Carry on if we have enough space (short-circuit) OR call in __reserve_bytes()
1656 ((used + orig_bytes <= space_info->total_bytes) || in __reserve_bytes()
1670 if (used + orig_bytes <= space_info->total_bytes) { in __reserve_bytes()
1687 space_info->reclaim_size += ticket.bytes; in __reserve_bytes()
1688 init_waitqueue_head(&ticket.wait); in __reserve_bytes()
1696 list_add_tail(&ticket.list, &space_info->tickets); in __reserve_bytes()
1697 if (!space_info->flush) { in __reserve_bytes()
1707 space_info->flush = 1; in __reserve_bytes()
1709 space_info->flags, in __reserve_bytes()
1716 &space_info->priority_tickets); in __reserve_bytes()
1718 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { in __reserve_bytes()
1721 * which means we won't have fs_info->fs_root set, so don't do in __reserve_bytes()
1724 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && in __reserve_bytes()
1725 !work_busy(&fs_info->preempt_reclaim_work) && in __reserve_bytes()
1727 trace_btrfs_trigger_flush(fs_info, space_info->flags, in __reserve_bytes()
1730 &fs_info->preempt_reclaim_work); in __reserve_bytes()
1733 spin_unlock(&space_info->lock); in __reserve_bytes()
1763 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush); in btrfs_reserve_metadata_bytes()
1764 if (ret == -ENOSPC) { in btrfs_reserve_metadata_bytes()
1766 block_rsv->space_info->flags, in btrfs_reserve_metadata_bytes()
1770 btrfs_dump_space_info(fs_info, block_rsv->space_info, in btrfs_reserve_metadata_bytes()
1789 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; in btrfs_reserve_data_bytes()
1795 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); in btrfs_reserve_data_bytes()
1798 if (ret == -ENOSPC) { in btrfs_reserve_data_bytes()
1800 data_sinfo->flags, bytes, 1); in btrfs_reserve_data_bytes()
1813 list_for_each_entry(space_info, &fs_info->space_info, list) { in btrfs_dump_space_info_for_trans_abort()
1814 spin_lock(&space_info->lock); in btrfs_dump_space_info_for_trans_abort()
1816 spin_unlock(&space_info->lock); in btrfs_dump_space_info_for_trans_abort()
1832 if (list_empty(&sinfo->ro_bgs)) in btrfs_account_ro_block_groups_free_space()
1835 spin_lock(&sinfo->lock); in btrfs_account_ro_block_groups_free_space()
1836 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) { in btrfs_account_ro_block_groups_free_space()
1837 spin_lock(&block_group->lock); in btrfs_account_ro_block_groups_free_space()
1839 if (!block_group->ro) { in btrfs_account_ro_block_groups_free_space()
1840 spin_unlock(&block_group->lock); in btrfs_account_ro_block_groups_free_space()
1844 factor = btrfs_bg_type_to_factor(block_group->flags); in btrfs_account_ro_block_groups_free_space()
1845 free_bytes += (block_group->length - in btrfs_account_ro_block_groups_free_space()
1846 block_group->used) * factor; in btrfs_account_ro_block_groups_free_space()
1848 spin_unlock(&block_group->lock); in btrfs_account_ro_block_groups_free_space()
1850 spin_unlock(&sinfo->lock); in btrfs_account_ro_block_groups_free_space()