Searched refs:delayed_refs (Results 1 – 10 of 10) sorted by relevance
/kernel/linux/linux-5.10/fs/btrfs/ |
D | delayed-ref.c | 56 atomic_read(&trans->transaction->delayed_refs.num_entries); in btrfs_should_throttle_delayed_refs() 393 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs, in btrfs_delayed_ref_lock() argument 396 lockdep_assert_held(&delayed_refs->lock); in btrfs_delayed_ref_lock() 401 spin_unlock(&delayed_refs->lock); in btrfs_delayed_ref_lock() 404 spin_lock(&delayed_refs->lock); in btrfs_delayed_ref_lock() 415 struct btrfs_delayed_ref_root *delayed_refs, in drop_delayed_ref() argument 426 atomic_dec(&delayed_refs->num_entries); in drop_delayed_ref() 430 struct btrfs_delayed_ref_root *delayed_refs, in merge_ref() argument 459 drop_delayed_ref(trans, delayed_refs, head, next); in merge_ref() 462 drop_delayed_ref(trans, delayed_refs, head, ref); in merge_ref() [all …]
|
D | transaction.h | 86 struct btrfs_delayed_ref_root delayed_refs; member 185 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_set_skip_qgroup() local 187 delayed_refs = &trans->transaction->delayed_refs; in btrfs_set_skip_qgroup() 188 WARN_ON(delayed_refs->qgroup_to_skip); in btrfs_set_skip_qgroup() 189 delayed_refs->qgroup_to_skip = qgroupid; in btrfs_set_skip_qgroup() 194 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_clear_skip_qgroup() local 196 delayed_refs = &trans->transaction->delayed_refs; in btrfs_clear_skip_qgroup() 197 WARN_ON(!delayed_refs->qgroup_to_skip); in btrfs_clear_skip_qgroup() 198 delayed_refs->qgroup_to_skip = 0; in btrfs_clear_skip_qgroup()
|
D | delayed-ref.h | 355 struct btrfs_delayed_ref_root *delayed_refs, 359 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, 361 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs, 367 void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs, 371 struct btrfs_delayed_ref_root *delayed_refs);
|
D | extent-tree.c | 118 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_lookup_extent_info() local 199 delayed_refs = &trans->transaction->delayed_refs; in btrfs_lookup_extent_info() 200 spin_lock(&delayed_refs->lock); in btrfs_lookup_extent_info() 201 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); in btrfs_lookup_extent_info() 205 spin_unlock(&delayed_refs->lock); in btrfs_lookup_extent_info() 228 spin_unlock(&delayed_refs->lock); in btrfs_lookup_extent_info() 1719 static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, in unselect_delayed_ref_head() argument 1722 spin_lock(&delayed_refs->lock); in unselect_delayed_ref_head() 1724 delayed_refs->num_heads_ready++; in unselect_delayed_ref_head() 1725 spin_unlock(&delayed_refs->lock); in unselect_delayed_ref_head() [all …]
|
D | transaction.c | 124 &transaction->delayed_refs.href_root.rb_root)); in btrfs_put_transaction() 126 &transaction->delayed_refs.dirty_extent_root)); in btrfs_put_transaction() 127 if (transaction->delayed_refs.pending_csums) in btrfs_put_transaction() 130 transaction->delayed_refs.pending_csums); in btrfs_put_transaction() 350 memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs)); in join_transaction() 352 cur_trans->delayed_refs.href_root = RB_ROOT_CACHED; in join_transaction() 353 cur_trans->delayed_refs.dirty_extent_root = RB_ROOT; in join_transaction() 354 atomic_set(&cur_trans->delayed_refs.num_entries, 0); in join_transaction() 367 spin_lock_init(&cur_trans->delayed_refs.lock); in join_transaction() 915 cur_trans->delayed_refs.flushing) in btrfs_should_end_transaction() [all …]
|
D | qgroup.c | 1729 struct btrfs_delayed_ref_root *delayed_refs, in btrfs_qgroup_trace_extent_nolock() argument 1732 struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node; in btrfs_qgroup_trace_extent_nolock() 1737 lockdep_assert_held(&delayed_refs->lock); in btrfs_qgroup_trace_extent_nolock() 1759 rb_insert_color(&record->node, &delayed_refs->dirty_extent_root); in btrfs_qgroup_trace_extent_nolock() 1795 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_qgroup_trace_extent() local 1805 delayed_refs = &trans->transaction->delayed_refs; in btrfs_qgroup_trace_extent() 1810 spin_lock(&delayed_refs->lock); in btrfs_qgroup_trace_extent() 1811 ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record); in btrfs_qgroup_trace_extent() 1812 spin_unlock(&delayed_refs->lock); in btrfs_qgroup_trace_extent() 2695 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_qgroup_account_extents() local [all …]
|
D | disk-io.c | 4428 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_destroy_delayed_refs() local 4432 delayed_refs = &trans->delayed_refs; in btrfs_destroy_delayed_refs() 4434 spin_lock(&delayed_refs->lock); in btrfs_destroy_delayed_refs() 4435 if (atomic_read(&delayed_refs->num_entries) == 0) { in btrfs_destroy_delayed_refs() 4436 spin_unlock(&delayed_refs->lock); in btrfs_destroy_delayed_refs() 4441 while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) { in btrfs_destroy_delayed_refs() 4448 if (btrfs_delayed_ref_lock(delayed_refs, head)) in btrfs_destroy_delayed_refs() 4460 atomic_dec(&delayed_refs->num_entries); in btrfs_destroy_delayed_refs() 4466 btrfs_delete_ref_head(delayed_refs, head); in btrfs_destroy_delayed_refs() 4468 spin_unlock(&delayed_refs->lock); in btrfs_destroy_delayed_refs() [all …]
|
D | qgroup.h | 277 struct btrfs_delayed_ref_root *delayed_refs,
|
D | backref.c | 1210 struct btrfs_delayed_ref_root *delayed_refs = NULL; local 1269 delayed_refs = &trans->transaction->delayed_refs; 1270 spin_lock(&delayed_refs->lock); 1271 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 1275 spin_unlock(&delayed_refs->lock); 1288 spin_unlock(&delayed_refs->lock); 1295 spin_unlock(&delayed_refs->lock);
|
D | ctree.h | 2557 struct btrfs_delayed_ref_root *delayed_refs,
|