• Home
  • Raw
  • Download

Lines Matching +full:wait +full:- +full:free +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0
5 #include "space-info.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
11 #include "block-group.h"
25 * reservations we care about total_bytes - SUM(space_info->bytes_) when
31 * code on the rules for each type, but generally block_rsv->reserved is how
32 * much space is accounted for in space_info->bytes_may_use.
46 * ->reserve
47 * space_info->bytes_may_reserve += num_bytes
49 * ->extent allocation
51 * space_info->bytes_may_reserve -= num_bytes
52 * space_info->bytes_reserved += extent_bytes
54 * ->insert reference
56 * space_info->bytes_reserved -= extent_bytes
57 * space_info->bytes_used += extent_bytes
59 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
64 * -> __reserve_bytes
65 * create a reserve_ticket with ->bytes set to our reservation, add it to
66 * the tail of space_info->tickets, kick async flush thread
68 * ->handle_reserve_ticket
69 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
72 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
73 * Flushes various things attempting to free up space.
75 * -> btrfs_try_granting_tickets()
77 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
78 * space_info->total_bytes. This loops through the ->priority_tickets and
79 * then the ->tickets list checking to see if the reservation can be
80 * completed. If it can the space is added to space_info->bytes_may_use and
83 * -> ticket wakeup
84 * Check if ->bytes == 0, if it does we got our reservation and we can carry
91 * space_info->priority_tickets, and we do not use ticket->wait, we simply
92 * call flush_space() ourselves for the states that are safe for us to call
101 * things however hold reservations, and so letting them run allows us to
115 * running delalloc, but usually we need to wait for ordered extents to
146 * reserve more space than is currently free in the currently allocate
153 * free space in the allocated metadata chunks.
164 return s_info->bytes_used + s_info->bytes_reserved + in btrfs_space_info_used()
165 s_info->bytes_pinned + s_info->bytes_readonly + in btrfs_space_info_used()
166 (may_use_included ? s_info->bytes_may_use : 0); in btrfs_space_info_used()
175 struct list_head *head = &info->space_info; in btrfs_clear_space_info_full()
179 found->full = 0; in btrfs_clear_space_info_full()
191 return -ENOMEM; in create_space_info()
193 ret = percpu_counter_init(&space_info->total_bytes_pinned, 0, in create_space_info()
201 INIT_LIST_HEAD(&space_info->block_groups[i]); in create_space_info()
202 init_rwsem(&space_info->groups_sem); in create_space_info()
203 spin_lock_init(&space_info->lock); in create_space_info()
204 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; in create_space_info()
205 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; in create_space_info()
206 INIT_LIST_HEAD(&space_info->ro_bgs); in create_space_info()
207 INIT_LIST_HEAD(&space_info->tickets); in create_space_info()
208 INIT_LIST_HEAD(&space_info->priority_tickets); in create_space_info()
214 list_add(&space_info->list, &info->space_info); in create_space_info()
216 info->data_sinfo = space_info; in create_space_info()
229 disk_super = fs_info->super_copy; in btrfs_init_space_info()
231 return -EINVAL; in btrfs_init_space_info()
270 spin_lock(&found->lock); in btrfs_update_space_info()
271 found->total_bytes += total_bytes; in btrfs_update_space_info()
272 found->disk_total += total_bytes * factor; in btrfs_update_space_info()
273 found->bytes_used += bytes_used; in btrfs_update_space_info()
274 found->disk_used += bytes_used * factor; in btrfs_update_space_info()
275 found->bytes_readonly += bytes_readonly; in btrfs_update_space_info()
277 found->full = 0; in btrfs_update_space_info()
279 spin_unlock(&found->lock); in btrfs_update_space_info()
286 struct list_head *head = &info->space_info; in btrfs_find_space_info()
292 if (found->flags & flags) in btrfs_find_space_info()
306 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) in calc_available_free_space()
311 avail = atomic64_read(&fs_info->free_chunk_space); in calc_available_free_space()
314 * If we have dup, raid1 or raid10 then only half of the free in calc_available_free_space()
323 * If we aren't flushing all things, let us overcommit up to in calc_available_free_space()
324 * 1/2th of the space. If we can flush, don't let us overcommit in calc_available_free_space()
342 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) in btrfs_can_overcommit()
348 if (used + bytes < space_info->total_bytes + avail) in btrfs_can_overcommit()
356 if (!list_empty(&ticket->list)) { in remove_ticket()
357 list_del_init(&ticket->list); in remove_ticket()
358 ASSERT(space_info->reclaim_size >= ticket->bytes); in remove_ticket()
359 space_info->reclaim_size -= ticket->bytes; in remove_ticket()
364 * This is for space we already have accounted in space_info->bytes_may_use, so
373 lockdep_assert_held(&space_info->lock); in btrfs_try_granting_tickets()
375 head = &space_info->priority_tickets; in btrfs_try_granting_tickets()
384 if ((used + ticket->bytes <= space_info->total_bytes) || in btrfs_try_granting_tickets()
385 btrfs_can_overcommit(fs_info, space_info, ticket->bytes, in btrfs_try_granting_tickets()
389 ticket->bytes); in btrfs_try_granting_tickets()
391 ticket->bytes = 0; in btrfs_try_granting_tickets()
392 space_info->tickets_id++; in btrfs_try_granting_tickets()
393 wake_up(&ticket->wait); in btrfs_try_granting_tickets()
399 if (head == &space_info->priority_tickets) { in btrfs_try_granting_tickets()
400 head = &space_info->tickets; in btrfs_try_granting_tickets()
408 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
409 spin_lock(&__rsv->lock); \
411 __rsv->size, __rsv->reserved); \
412 spin_unlock(&__rsv->lock); \
418 lockdep_assert_held(&info->lock); in __btrfs_dump_space_info()
420 /* The free space could be negative in case of overcommit */ in __btrfs_dump_space_info()
421 btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull", in __btrfs_dump_space_info()
422 info->flags, in __btrfs_dump_space_info()
423 (s64)(info->total_bytes - btrfs_space_info_used(info, true)), in __btrfs_dump_space_info()
424 info->full ? "" : "not "); in __btrfs_dump_space_info()
427 info->total_bytes, info->bytes_used, info->bytes_pinned, in __btrfs_dump_space_info()
428 info->bytes_reserved, info->bytes_may_use, in __btrfs_dump_space_info()
429 info->bytes_readonly); in __btrfs_dump_space_info()
446 spin_lock(&info->lock); in btrfs_dump_space_info()
448 spin_unlock(&info->lock); in btrfs_dump_space_info()
453 down_read(&info->groups_sem); in btrfs_dump_space_info()
455 list_for_each_entry(cache, &info->block_groups[index], list) { in btrfs_dump_space_info()
456 spin_lock(&cache->lock); in btrfs_dump_space_info()
459 cache->start, cache->length, cache->used, cache->pinned, in btrfs_dump_space_info()
460 cache->reserved, cache->ro ? "[readonly]" : ""); in btrfs_dump_space_info()
461 spin_unlock(&cache->lock); in btrfs_dump_space_info()
466 up_read(&info->groups_sem); in btrfs_dump_space_info()
513 trans = (struct btrfs_trans_handle *)current->journal_info; in shrink_delalloc()
516 &fs_info->delalloc_bytes); in shrink_delalloc()
517 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes); in shrink_delalloc()
522 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); in shrink_delalloc()
527 * If we are doing more ordered than delalloc we need to just wait on in shrink_delalloc()
529 * that likely won't give us the space back we need. in shrink_delalloc()
542 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); in shrink_delalloc()
549 spin_lock(&space_info->lock); in shrink_delalloc()
550 if (list_empty(&space_info->tickets) && in shrink_delalloc()
551 list_empty(&space_info->priority_tickets)) { in shrink_delalloc()
552 spin_unlock(&space_info->lock); in shrink_delalloc()
555 spin_unlock(&space_info->lock); in shrink_delalloc()
558 &fs_info->delalloc_bytes); in shrink_delalloc()
559 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes); in shrink_delalloc()
564 * maybe_commit_transaction - possibly commit the transaction if its ok to
565 * @root - the root we're allocating for
566 * @bytes - the number of bytes we want to reserve
567 * @force - force the commit
570 * get us somewhere and then commit the transaction if it does. Otherwise it
571 * will return -ENOSPC.
577 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv; in may_commit_transaction()
578 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; in may_commit_transaction()
579 struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv; in may_commit_transaction()
585 trans = (struct btrfs_trans_handle *)current->journal_info; in may_commit_transaction()
587 return -EAGAIN; in may_commit_transaction()
589 spin_lock(&space_info->lock); in may_commit_transaction()
591 if (cur_free_bytes < space_info->total_bytes) in may_commit_transaction()
592 cur_free_bytes = space_info->total_bytes - cur_free_bytes; in may_commit_transaction()
596 if (!list_empty(&space_info->priority_tickets)) in may_commit_transaction()
597 ticket = list_first_entry(&space_info->priority_tickets, in may_commit_transaction()
599 else if (!list_empty(&space_info->tickets)) in may_commit_transaction()
600 ticket = list_first_entry(&space_info->tickets, in may_commit_transaction()
603 bytes_needed = ticket->bytes; in may_commit_transaction()
606 bytes_needed -= cur_free_bytes; in may_commit_transaction()
609 spin_unlock(&space_info->lock); in may_commit_transaction()
614 trans = btrfs_join_transaction(fs_info->extent_root); in may_commit_transaction()
620 * we have block groups that are going to be freed, allowing us to in may_commit_transaction()
623 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) || in may_commit_transaction()
624 __percpu_counter_compare(&space_info->total_bytes_pinned, in may_commit_transaction()
635 if (space_info != delayed_rsv->space_info) in may_commit_transaction()
638 spin_lock(&delayed_rsv->lock); in may_commit_transaction()
639 reclaim_bytes += delayed_rsv->reserved; in may_commit_transaction()
640 spin_unlock(&delayed_rsv->lock); in may_commit_transaction()
642 spin_lock(&delayed_refs_rsv->lock); in may_commit_transaction()
643 reclaim_bytes += delayed_refs_rsv->reserved; in may_commit_transaction()
644 spin_unlock(&delayed_refs_rsv->lock); in may_commit_transaction()
646 spin_lock(&trans_rsv->lock); in may_commit_transaction()
647 reclaim_bytes += trans_rsv->reserved; in may_commit_transaction()
648 spin_unlock(&trans_rsv->lock); in may_commit_transaction()
652 bytes_needed -= reclaim_bytes; in may_commit_transaction()
654 if (__percpu_counter_compare(&space_info->total_bytes_pinned, in may_commit_transaction()
663 return -ENOSPC; in may_commit_transaction()
675 struct btrfs_root *root = fs_info->extent_root; in flush_space()
686 nr = -1; in flush_space()
723 btrfs_get_alloc_profile(fs_info, space_info->flags), in flush_space()
727 if (ret > 0 || ret == -ENOSPC) in flush_space()
732 * If we have pending delayed iputs then we could free up a in flush_space()
743 ret = -ENOSPC; in flush_space()
747 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, in flush_space()
759 u64 to_reclaim = space_info->reclaim_size; in btrfs_calc_reclaim_metadata_size()
761 lockdep_assert_held(&space_info->lock); in btrfs_calc_reclaim_metadata_size()
769 * before, and now we're well over-committed based on our current free in btrfs_calc_reclaim_metadata_size()
773 if (space_info->total_bytes + avail < used) in btrfs_calc_reclaim_metadata_size()
774 to_reclaim += used - (space_info->total_bytes + avail); in btrfs_calc_reclaim_metadata_size()
788 expected = div_factor_fine(space_info->total_bytes, 95); in btrfs_calc_reclaim_metadata_size()
790 expected = div_factor_fine(space_info->total_bytes, 90); in btrfs_calc_reclaim_metadata_size()
793 to_reclaim = used - expected; in btrfs_calc_reclaim_metadata_size()
796 to_reclaim = min(to_reclaim, space_info->bytes_may_use + in btrfs_calc_reclaim_metadata_size()
797 space_info->bytes_reserved); in btrfs_calc_reclaim_metadata_size()
805 u64 thresh = div_factor_fine(space_info->total_bytes, 98); in need_do_async_reclaim()
807 /* If we're just plain full then async reclaim just slows us down. */ in need_do_async_reclaim()
808 if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh) in need_do_async_reclaim()
815 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); in need_do_async_reclaim()
822 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; in steal_from_global_rsv()
825 if (global_rsv->space_info != space_info) in steal_from_global_rsv()
828 spin_lock(&global_rsv->lock); in steal_from_global_rsv()
829 min_bytes = div_factor(global_rsv->size, 1); in steal_from_global_rsv()
830 if (global_rsv->reserved < min_bytes + ticket->bytes) { in steal_from_global_rsv()
831 spin_unlock(&global_rsv->lock); in steal_from_global_rsv()
834 global_rsv->reserved -= ticket->bytes; in steal_from_global_rsv()
836 ticket->bytes = 0; in steal_from_global_rsv()
837 wake_up(&ticket->wait); in steal_from_global_rsv()
838 space_info->tickets_id++; in steal_from_global_rsv()
839 if (global_rsv->reserved < global_rsv->size) in steal_from_global_rsv()
840 global_rsv->full = 0; in steal_from_global_rsv()
841 spin_unlock(&global_rsv->lock); in steal_from_global_rsv()
847 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
848 * @fs_info - fs_info for this fs
849 * @space_info - the space info we were flushing
865 u64 tickets_id = space_info->tickets_id; in maybe_fail_all_tickets()
873 while (!list_empty(&space_info->tickets) && in maybe_fail_all_tickets()
874 tickets_id == space_info->tickets_id) { in maybe_fail_all_tickets()
875 ticket = list_first_entry(&space_info->tickets, in maybe_fail_all_tickets()
878 if (ticket->steal && in maybe_fail_all_tickets()
889 * and send us back for another loop through the enospc flushing in maybe_fail_all_tickets()
893 first_ticket_bytes = ticket->bytes; in maybe_fail_all_tickets()
894 else if (first_ticket_bytes > ticket->bytes) in maybe_fail_all_tickets()
899 ticket->bytes); in maybe_fail_all_tickets()
902 ticket->error = -ENOSPC; in maybe_fail_all_tickets()
903 wake_up(&ticket->wait); in maybe_fail_all_tickets()
913 return (tickets_id != space_info->tickets_id); in maybe_fail_all_tickets()
917 * This is for normal flushers, we can wait all goddamned day if we want to. We
933 spin_lock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
936 space_info->flush = 0; in btrfs_async_reclaim_metadata_space()
937 spin_unlock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
940 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_metadata_space()
941 spin_unlock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
946 spin_lock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
947 if (list_empty(&space_info->tickets)) { in btrfs_async_reclaim_metadata_space()
948 space_info->flush = 0; in btrfs_async_reclaim_metadata_space()
949 spin_unlock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
954 if (last_tickets_id == space_info->tickets_id) { in btrfs_async_reclaim_metadata_space()
957 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_metadata_space()
960 commit_cycles--; in btrfs_async_reclaim_metadata_space()
981 commit_cycles--; in btrfs_async_reclaim_metadata_space()
983 space_info->flush = 0; in btrfs_async_reclaim_metadata_space()
989 spin_unlock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1002 * length to ->bytes_reserved, and subtracts the reserved space from
1003 * ->bytes_may_use.
1014 * immediately re-usable, it comes in the form of a delayed ref, which must be
1019 * ->total_bytes_pinned. However this counter can be inconsistent with
1022 * any on-disk state which might include more refs. So for example, if we
1025 * will be freed, and thus increase ->total_bytes_pinned.
1027 * Running the delayed refs gives us the actual real view of what will be
1028 * freed at the transaction commit time. This stage will not actually free
1029 * space for us, it just makes sure that may_commit_transaction() has all of
1035 * likely to satisfy our request, which means if our current free space +
1038 * whether committing the transaction will allow us to make progress.
1061 space_info = fs_info->data_sinfo; in btrfs_async_reclaim_data_space()
1063 spin_lock(&space_info->lock); in btrfs_async_reclaim_data_space()
1064 if (list_empty(&space_info->tickets)) { in btrfs_async_reclaim_data_space()
1065 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1066 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1069 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_data_space()
1070 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1072 while (!space_info->full) { in btrfs_async_reclaim_data_space()
1074 spin_lock(&space_info->lock); in btrfs_async_reclaim_data_space()
1075 if (list_empty(&space_info->tickets)) { in btrfs_async_reclaim_data_space()
1076 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1077 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1080 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_data_space()
1081 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1087 spin_lock(&space_info->lock); in btrfs_async_reclaim_data_space()
1088 if (list_empty(&space_info->tickets)) { in btrfs_async_reclaim_data_space()
1089 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1090 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1094 if (last_tickets_id == space_info->tickets_id) { in btrfs_async_reclaim_data_space()
1097 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_data_space()
1102 if (space_info->full) { in btrfs_async_reclaim_data_space()
1106 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1111 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1117 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); in btrfs_init_async_reclaim_work()
1118 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space); in btrfs_init_async_reclaim_work()
1147 spin_lock(&space_info->lock); in priority_reclaim_metadata_space()
1150 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1153 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1159 spin_lock(&space_info->lock); in priority_reclaim_metadata_space()
1160 if (ticket->bytes == 0) { in priority_reclaim_metadata_space()
1161 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1164 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1172 while (!space_info->full) { in priority_reclaim_data_space()
1174 spin_lock(&space_info->lock); in priority_reclaim_data_space()
1175 if (ticket->bytes == 0) { in priority_reclaim_data_space()
1176 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1179 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1188 DEFINE_WAIT(wait); in wait_reserve_ticket()
1191 spin_lock(&space_info->lock); in wait_reserve_ticket()
1192 while (ticket->bytes > 0 && ticket->error == 0) { in wait_reserve_ticket()
1193 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); in wait_reserve_ticket()
1196 * Delete us from the list. After we unlock the space in wait_reserve_ticket()
1204 ticket->error = -EINTR; in wait_reserve_ticket()
1207 spin_unlock(&space_info->lock); in wait_reserve_ticket()
1211 finish_wait(&ticket->wait, &wait); in wait_reserve_ticket()
1212 spin_lock(&space_info->lock); in wait_reserve_ticket()
1214 spin_unlock(&space_info->lock); in wait_reserve_ticket()
1218 * handle_reserve_ticket - do the appropriate flushing and waiting for a ticket
1219 * @fs_info - the fs
1220 * @space_info - the space_info for the reservation
1221 * @ticket - the ticket for the reservation
1222 * @flush - how much we can flush
1258 spin_lock(&space_info->lock); in handle_reserve_ticket()
1259 ret = ticket->error; in handle_reserve_ticket()
1260 if (ticket->bytes || ticket->error) { in handle_reserve_ticket()
1264 * behind us that require less space, run in handle_reserve_ticket()
1268 if (!list_empty(&ticket->list)) { in handle_reserve_ticket()
1274 ret = -ENOSPC; in handle_reserve_ticket()
1276 spin_unlock(&space_info->lock); in handle_reserve_ticket()
1277 ASSERT(list_empty(&ticket->list)); in handle_reserve_ticket()
1284 ASSERT(!(ticket->bytes == 0 && ticket->error)); in handle_reserve_ticket()
1299 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
1300 * @root - the root we're allocating for
1301 * @space_info - the space info we want to allocate from
1302 * @orig_bytes - the number of bytes we want
1303 * @flush - whether or not we can flush to make our reservation
1323 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); in __reserve_bytes()
1326 async_work = &fs_info->async_data_reclaim_work; in __reserve_bytes()
1328 async_work = &fs_info->async_reclaim_work; in __reserve_bytes()
1330 spin_lock(&space_info->lock); in __reserve_bytes()
1331 ret = -ENOSPC; in __reserve_bytes()
1340 pending_tickets = !list_empty(&space_info->tickets) || in __reserve_bytes()
1341 !list_empty(&space_info->priority_tickets); in __reserve_bytes()
1343 pending_tickets = !list_empty(&space_info->priority_tickets); in __reserve_bytes()
1346 * Carry on if we have enough space (short-circuit) OR call in __reserve_bytes()
1350 ((used + orig_bytes <= space_info->total_bytes) || in __reserve_bytes()
1367 space_info->reclaim_size += ticket.bytes; in __reserve_bytes()
1368 init_waitqueue_head(&ticket.wait); in __reserve_bytes()
1373 list_add_tail(&ticket.list, &space_info->tickets); in __reserve_bytes()
1374 if (!space_info->flush) { in __reserve_bytes()
1375 space_info->flush = 1; in __reserve_bytes()
1377 space_info->flags, in __reserve_bytes()
1384 &space_info->priority_tickets); in __reserve_bytes()
1386 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { in __reserve_bytes()
1390 * which means we won't have fs_info->fs_root set, so don't do in __reserve_bytes()
1393 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && in __reserve_bytes()
1395 !work_busy(&fs_info->async_reclaim_work)) { in __reserve_bytes()
1396 trace_btrfs_trigger_flush(fs_info, space_info->flags, in __reserve_bytes()
1399 &fs_info->async_reclaim_work); in __reserve_bytes()
1402 spin_unlock(&space_info->lock); in __reserve_bytes()
1410 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
1411 * @root - the root we're allocating for
1412 * @block_rsv - the block_rsv we're allocating for
1413 * @orig_bytes - the number of bytes we want
1414 * @flush - whether or not we can flush to make our reservation
1428 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_reserve_metadata_bytes()
1429 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; in btrfs_reserve_metadata_bytes()
1432 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush); in btrfs_reserve_metadata_bytes()
1433 if (ret == -ENOSPC && in btrfs_reserve_metadata_bytes()
1434 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) { in btrfs_reserve_metadata_bytes()
1439 if (ret == -ENOSPC) { in btrfs_reserve_metadata_bytes()
1441 block_rsv->space_info->flags, in btrfs_reserve_metadata_bytes()
1445 btrfs_dump_space_info(fs_info, block_rsv->space_info, in btrfs_reserve_metadata_bytes()
1452 * btrfs_reserve_data_bytes - try to reserve data bytes for an allocation
1453 * @fs_info - the filesystem
1454 * @bytes - the number of bytes we need
1455 * @flush - how we are allowed to flush
1463 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; in btrfs_reserve_data_bytes()
1468 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); in btrfs_reserve_data_bytes()
1471 if (ret == -ENOSPC) { in btrfs_reserve_data_bytes()
1473 data_sinfo->flags, bytes, 1); in btrfs_reserve_data_bytes()