1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
9 #include "ctree.h"
10 #include "delayed-ref.h"
11 #include "transaction.h"
12 #include "qgroup.h"
13 #include "space-info.h"
14 #include "tree-mod-log.h"
15
16 struct kmem_cache *btrfs_delayed_ref_head_cachep;
17 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
18 struct kmem_cache *btrfs_delayed_data_ref_cachep;
19 struct kmem_cache *btrfs_delayed_extent_op_cachep;
20 /*
21 * delayed back reference update tracking. For subvolume trees
22 * we queue up extent allocations and backref maintenance for
23 * delayed processing. This avoids deep call chains where we
24 * add extents in the middle of btrfs_search_slot, and it allows
25 * us to buffer up frequently modified backrefs in an rb tree instead
26 * of hammering updates on the extent allocation tree.
27 */
28
btrfs_check_space_for_delayed_refs(struct btrfs_fs_info * fs_info)29 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
30 {
31 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
32 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
33 bool ret = false;
34 u64 reserved;
35
36 spin_lock(&global_rsv->lock);
37 reserved = global_rsv->reserved;
38 spin_unlock(&global_rsv->lock);
39
40 /*
41 * Since the global reserve is just kind of magic we don't really want
42 * to rely on it to save our bacon, so if our size is more than the
43 * delayed_refs_rsv and the global rsv then it's time to think about
44 * bailing.
45 */
46 spin_lock(&delayed_refs_rsv->lock);
47 reserved += delayed_refs_rsv->reserved;
48 if (delayed_refs_rsv->size >= reserved)
49 ret = true;
50 spin_unlock(&delayed_refs_rsv->lock);
51 return ret;
52 }
53
btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle * trans)54 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
55 {
56 u64 num_entries =
57 atomic_read(&trans->transaction->delayed_refs.num_entries);
58 u64 avg_runtime;
59 u64 val;
60
61 smp_mb();
62 avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
63 val = num_entries * avg_runtime;
64 if (val >= NSEC_PER_SEC)
65 return 1;
66 if (val >= NSEC_PER_SEC / 2)
67 return 2;
68
69 return btrfs_check_space_for_delayed_refs(trans->fs_info);
70 }
71
72 /**
73 * Release a ref head's reservation
74 *
75 * @fs_info: the filesystem
76 * @nr: number of items to drop
77 *
78 * This drops the delayed ref head's count from the delayed refs rsv and frees
79 * any excess reservation we had.
80 */
btrfs_delayed_refs_rsv_release(struct btrfs_fs_info * fs_info,int nr)81 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
82 {
83 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
84 u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
85 u64 released = 0;
86
87 /*
88 * We have to check the mount option here because we could be enabling
89 * the free space tree for the first time and don't have the compat_ro
90 * option set yet.
91 *
92 * We need extra reservations if we have the free space tree because
93 * we'll have to modify that tree as well.
94 */
95 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
96 num_bytes *= 2;
97
98 released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
99 if (released)
100 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
101 0, released, 0);
102 }
103
104 /*
105 * btrfs_update_delayed_refs_rsv - adjust the size of the delayed refs rsv
106 * @trans - the trans that may have generated delayed refs
107 *
108 * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
109 * it'll calculate the additional size and add it to the delayed_refs_rsv.
110 */
btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle * trans)111 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
112 {
113 struct btrfs_fs_info *fs_info = trans->fs_info;
114 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
115 u64 num_bytes;
116
117 if (!trans->delayed_ref_updates)
118 return;
119
120 num_bytes = btrfs_calc_insert_metadata_size(fs_info,
121 trans->delayed_ref_updates);
122 /*
123 * We have to check the mount option here because we could be enabling
124 * the free space tree for the first time and don't have the compat_ro
125 * option set yet.
126 *
127 * We need extra reservations if we have the free space tree because
128 * we'll have to modify that tree as well.
129 */
130 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
131 num_bytes *= 2;
132
133 spin_lock(&delayed_rsv->lock);
134 delayed_rsv->size += num_bytes;
135 delayed_rsv->full = false;
136 spin_unlock(&delayed_rsv->lock);
137 trans->delayed_ref_updates = 0;
138 }
139
140 /**
141 * Transfer bytes to our delayed refs rsv
142 *
143 * @fs_info: the filesystem
144 * @num_bytes: number of bytes to transfer
145 *
146 * This transfers up to the num_bytes amount, previously reserved, to the
147 * delayed_refs_rsv. Any extra bytes are returned to the space info.
148 */
btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info * fs_info,u64 num_bytes)149 void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
150 u64 num_bytes)
151 {
152 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
153 u64 to_free = 0;
154
155 spin_lock(&delayed_refs_rsv->lock);
156 if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
157 u64 delta = delayed_refs_rsv->size -
158 delayed_refs_rsv->reserved;
159 if (num_bytes > delta) {
160 to_free = num_bytes - delta;
161 num_bytes = delta;
162 }
163 } else {
164 to_free = num_bytes;
165 num_bytes = 0;
166 }
167
168 if (num_bytes)
169 delayed_refs_rsv->reserved += num_bytes;
170 if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
171 delayed_refs_rsv->full = true;
172 spin_unlock(&delayed_refs_rsv->lock);
173
174 if (num_bytes)
175 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
176 0, num_bytes, 1);
177 if (to_free)
178 btrfs_space_info_free_bytes_may_use(fs_info,
179 delayed_refs_rsv->space_info, to_free);
180 }
181
182 /**
183 * Refill based on our delayed refs usage
184 *
185 * @fs_info: the filesystem
186 * @flush: control how we can flush for this reservation.
187 *
188 * This will refill the delayed block_rsv up to 1 items size worth of space and
189 * will return -ENOSPC if we can't make the reservation.
190 */
btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info * fs_info,enum btrfs_reserve_flush_enum flush)191 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
192 enum btrfs_reserve_flush_enum flush)
193 {
194 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
195 u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
196 u64 num_bytes = 0;
197 int ret = -ENOSPC;
198
199 spin_lock(&block_rsv->lock);
200 if (block_rsv->reserved < block_rsv->size) {
201 num_bytes = block_rsv->size - block_rsv->reserved;
202 num_bytes = min(num_bytes, limit);
203 }
204 spin_unlock(&block_rsv->lock);
205
206 if (!num_bytes)
207 return 0;
208
209 ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
210 if (ret)
211 return ret;
212 btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
213 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
214 0, num_bytes, 1);
215 return 0;
216 }
217
218 /*
219 * compare two delayed tree backrefs with same bytenr and type
220 */
comp_tree_refs(struct btrfs_delayed_tree_ref * ref1,struct btrfs_delayed_tree_ref * ref2)221 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
222 struct btrfs_delayed_tree_ref *ref2)
223 {
224 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
225 if (ref1->root < ref2->root)
226 return -1;
227 if (ref1->root > ref2->root)
228 return 1;
229 } else {
230 if (ref1->parent < ref2->parent)
231 return -1;
232 if (ref1->parent > ref2->parent)
233 return 1;
234 }
235 return 0;
236 }
237
238 /*
239 * compare two delayed data backrefs with same bytenr and type
240 */
comp_data_refs(struct btrfs_delayed_data_ref * ref1,struct btrfs_delayed_data_ref * ref2)241 static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
242 struct btrfs_delayed_data_ref *ref2)
243 {
244 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
245 if (ref1->root < ref2->root)
246 return -1;
247 if (ref1->root > ref2->root)
248 return 1;
249 if (ref1->objectid < ref2->objectid)
250 return -1;
251 if (ref1->objectid > ref2->objectid)
252 return 1;
253 if (ref1->offset < ref2->offset)
254 return -1;
255 if (ref1->offset > ref2->offset)
256 return 1;
257 } else {
258 if (ref1->parent < ref2->parent)
259 return -1;
260 if (ref1->parent > ref2->parent)
261 return 1;
262 }
263 return 0;
264 }
265
comp_refs(struct btrfs_delayed_ref_node * ref1,struct btrfs_delayed_ref_node * ref2,bool check_seq)266 static int comp_refs(struct btrfs_delayed_ref_node *ref1,
267 struct btrfs_delayed_ref_node *ref2,
268 bool check_seq)
269 {
270 int ret = 0;
271
272 if (ref1->type < ref2->type)
273 return -1;
274 if (ref1->type > ref2->type)
275 return 1;
276 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
277 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
278 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
279 btrfs_delayed_node_to_tree_ref(ref2));
280 else
281 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
282 btrfs_delayed_node_to_data_ref(ref2));
283 if (ret)
284 return ret;
285 if (check_seq) {
286 if (ref1->seq < ref2->seq)
287 return -1;
288 if (ref1->seq > ref2->seq)
289 return 1;
290 }
291 return 0;
292 }
293
294 /* insert a new ref to head ref rbtree */
htree_insert(struct rb_root_cached * root,struct rb_node * node)295 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
296 struct rb_node *node)
297 {
298 struct rb_node **p = &root->rb_root.rb_node;
299 struct rb_node *parent_node = NULL;
300 struct btrfs_delayed_ref_head *entry;
301 struct btrfs_delayed_ref_head *ins;
302 u64 bytenr;
303 bool leftmost = true;
304
305 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
306 bytenr = ins->bytenr;
307 while (*p) {
308 parent_node = *p;
309 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
310 href_node);
311
312 if (bytenr < entry->bytenr) {
313 p = &(*p)->rb_left;
314 } else if (bytenr > entry->bytenr) {
315 p = &(*p)->rb_right;
316 leftmost = false;
317 } else {
318 return entry;
319 }
320 }
321
322 rb_link_node(node, parent_node, p);
323 rb_insert_color_cached(node, root, leftmost);
324 return NULL;
325 }
326
tree_insert(struct rb_root_cached * root,struct btrfs_delayed_ref_node * ins)327 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
328 struct btrfs_delayed_ref_node *ins)
329 {
330 struct rb_node **p = &root->rb_root.rb_node;
331 struct rb_node *node = &ins->ref_node;
332 struct rb_node *parent_node = NULL;
333 struct btrfs_delayed_ref_node *entry;
334 bool leftmost = true;
335
336 while (*p) {
337 int comp;
338
339 parent_node = *p;
340 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
341 ref_node);
342 comp = comp_refs(ins, entry, true);
343 if (comp < 0) {
344 p = &(*p)->rb_left;
345 } else if (comp > 0) {
346 p = &(*p)->rb_right;
347 leftmost = false;
348 } else {
349 return entry;
350 }
351 }
352
353 rb_link_node(node, parent_node, p);
354 rb_insert_color_cached(node, root, leftmost);
355 return NULL;
356 }
357
find_first_ref_head(struct btrfs_delayed_ref_root * dr)358 static struct btrfs_delayed_ref_head *find_first_ref_head(
359 struct btrfs_delayed_ref_root *dr)
360 {
361 struct rb_node *n;
362 struct btrfs_delayed_ref_head *entry;
363
364 n = rb_first_cached(&dr->href_root);
365 if (!n)
366 return NULL;
367
368 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
369
370 return entry;
371 }
372
373 /*
374 * Find a head entry based on bytenr. This returns the delayed ref head if it
375 * was able to find one, or NULL if nothing was in that spot. If return_bigger
376 * is given, the next bigger entry is returned if no exact match is found.
377 */
find_ref_head(struct btrfs_delayed_ref_root * dr,u64 bytenr,bool return_bigger)378 static struct btrfs_delayed_ref_head *find_ref_head(
379 struct btrfs_delayed_ref_root *dr, u64 bytenr,
380 bool return_bigger)
381 {
382 struct rb_root *root = &dr->href_root.rb_root;
383 struct rb_node *n;
384 struct btrfs_delayed_ref_head *entry;
385
386 n = root->rb_node;
387 entry = NULL;
388 while (n) {
389 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
390
391 if (bytenr < entry->bytenr)
392 n = n->rb_left;
393 else if (bytenr > entry->bytenr)
394 n = n->rb_right;
395 else
396 return entry;
397 }
398 if (entry && return_bigger) {
399 if (bytenr > entry->bytenr) {
400 n = rb_next(&entry->href_node);
401 if (!n)
402 return NULL;
403 entry = rb_entry(n, struct btrfs_delayed_ref_head,
404 href_node);
405 }
406 return entry;
407 }
408 return NULL;
409 }
410
btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)411 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
412 struct btrfs_delayed_ref_head *head)
413 {
414 lockdep_assert_held(&delayed_refs->lock);
415 if (mutex_trylock(&head->mutex))
416 return 0;
417
418 refcount_inc(&head->refs);
419 spin_unlock(&delayed_refs->lock);
420
421 mutex_lock(&head->mutex);
422 spin_lock(&delayed_refs->lock);
423 if (RB_EMPTY_NODE(&head->href_node)) {
424 mutex_unlock(&head->mutex);
425 btrfs_put_delayed_ref_head(head);
426 return -EAGAIN;
427 }
428 btrfs_put_delayed_ref_head(head);
429 return 0;
430 }
431
drop_delayed_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head,struct btrfs_delayed_ref_node * ref)432 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
433 struct btrfs_delayed_ref_root *delayed_refs,
434 struct btrfs_delayed_ref_head *head,
435 struct btrfs_delayed_ref_node *ref)
436 {
437 lockdep_assert_held(&head->lock);
438 rb_erase_cached(&ref->ref_node, &head->ref_tree);
439 RB_CLEAR_NODE(&ref->ref_node);
440 if (!list_empty(&ref->add_list))
441 list_del(&ref->add_list);
442 ref->in_tree = 0;
443 btrfs_put_delayed_ref(ref);
444 atomic_dec(&delayed_refs->num_entries);
445 }
446
merge_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head,struct btrfs_delayed_ref_node * ref,u64 seq)447 static bool merge_ref(struct btrfs_trans_handle *trans,
448 struct btrfs_delayed_ref_root *delayed_refs,
449 struct btrfs_delayed_ref_head *head,
450 struct btrfs_delayed_ref_node *ref,
451 u64 seq)
452 {
453 struct btrfs_delayed_ref_node *next;
454 struct rb_node *node = rb_next(&ref->ref_node);
455 bool done = false;
456
457 while (!done && node) {
458 int mod;
459
460 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
461 node = rb_next(node);
462 if (seq && next->seq >= seq)
463 break;
464 if (comp_refs(ref, next, false))
465 break;
466
467 if (ref->action == next->action) {
468 mod = next->ref_mod;
469 } else {
470 if (ref->ref_mod < next->ref_mod) {
471 swap(ref, next);
472 done = true;
473 }
474 mod = -next->ref_mod;
475 }
476
477 drop_delayed_ref(trans, delayed_refs, head, next);
478 ref->ref_mod += mod;
479 if (ref->ref_mod == 0) {
480 drop_delayed_ref(trans, delayed_refs, head, ref);
481 done = true;
482 } else {
483 /*
484 * Can't have multiples of the same ref on a tree block.
485 */
486 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
487 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
488 }
489 }
490
491 return done;
492 }
493
btrfs_merge_delayed_refs(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)494 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
495 struct btrfs_delayed_ref_root *delayed_refs,
496 struct btrfs_delayed_ref_head *head)
497 {
498 struct btrfs_fs_info *fs_info = trans->fs_info;
499 struct btrfs_delayed_ref_node *ref;
500 struct rb_node *node;
501 u64 seq = 0;
502
503 lockdep_assert_held(&head->lock);
504
505 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
506 return;
507
508 /* We don't have too many refs to merge for data. */
509 if (head->is_data)
510 return;
511
512 seq = btrfs_tree_mod_log_lowest_seq(fs_info);
513 again:
514 for (node = rb_first_cached(&head->ref_tree); node;
515 node = rb_next(node)) {
516 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
517 if (seq && ref->seq >= seq)
518 continue;
519 if (merge_ref(trans, delayed_refs, head, ref, seq))
520 goto again;
521 }
522 }
523
btrfs_check_delayed_seq(struct btrfs_fs_info * fs_info,u64 seq)524 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
525 {
526 int ret = 0;
527 u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
528
529 if (min_seq != 0 && seq >= min_seq) {
530 btrfs_debug(fs_info,
531 "holding back delayed_ref %llu, lowest is %llu",
532 seq, min_seq);
533 ret = 1;
534 }
535
536 return ret;
537 }
538
btrfs_select_ref_head(struct btrfs_delayed_ref_root * delayed_refs)539 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
540 struct btrfs_delayed_ref_root *delayed_refs)
541 {
542 struct btrfs_delayed_ref_head *head;
543
544 again:
545 head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
546 true);
547 if (!head && delayed_refs->run_delayed_start != 0) {
548 delayed_refs->run_delayed_start = 0;
549 head = find_first_ref_head(delayed_refs);
550 }
551 if (!head)
552 return NULL;
553
554 while (head->processing) {
555 struct rb_node *node;
556
557 node = rb_next(&head->href_node);
558 if (!node) {
559 if (delayed_refs->run_delayed_start == 0)
560 return NULL;
561 delayed_refs->run_delayed_start = 0;
562 goto again;
563 }
564 head = rb_entry(node, struct btrfs_delayed_ref_head,
565 href_node);
566 }
567
568 head->processing = 1;
569 WARN_ON(delayed_refs->num_heads_ready == 0);
570 delayed_refs->num_heads_ready--;
571 delayed_refs->run_delayed_start = head->bytenr +
572 head->num_bytes;
573 return head;
574 }
575
btrfs_delete_ref_head(struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)576 void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
577 struct btrfs_delayed_ref_head *head)
578 {
579 lockdep_assert_held(&delayed_refs->lock);
580 lockdep_assert_held(&head->lock);
581
582 rb_erase_cached(&head->href_node, &delayed_refs->href_root);
583 RB_CLEAR_NODE(&head->href_node);
584 atomic_dec(&delayed_refs->num_entries);
585 delayed_refs->num_heads--;
586 if (head->processing == 0)
587 delayed_refs->num_heads_ready--;
588 }
589
590 /*
591 * Helper to insert the ref_node to the tail or merge with tail.
592 *
593 * Return 0 for insert.
594 * Return >0 for merge.
595 */
insert_delayed_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_root * root,struct btrfs_delayed_ref_head * href,struct btrfs_delayed_ref_node * ref)596 static int insert_delayed_ref(struct btrfs_trans_handle *trans,
597 struct btrfs_delayed_ref_root *root,
598 struct btrfs_delayed_ref_head *href,
599 struct btrfs_delayed_ref_node *ref)
600 {
601 struct btrfs_delayed_ref_node *exist;
602 int mod;
603 int ret = 0;
604
605 spin_lock(&href->lock);
606 exist = tree_insert(&href->ref_tree, ref);
607 if (!exist)
608 goto inserted;
609
610 /* Now we are sure we can merge */
611 ret = 1;
612 if (exist->action == ref->action) {
613 mod = ref->ref_mod;
614 } else {
615 /* Need to change action */
616 if (exist->ref_mod < ref->ref_mod) {
617 exist->action = ref->action;
618 mod = -exist->ref_mod;
619 exist->ref_mod = ref->ref_mod;
620 if (ref->action == BTRFS_ADD_DELAYED_REF)
621 list_add_tail(&exist->add_list,
622 &href->ref_add_list);
623 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
624 ASSERT(!list_empty(&exist->add_list));
625 list_del(&exist->add_list);
626 } else {
627 ASSERT(0);
628 }
629 } else
630 mod = -ref->ref_mod;
631 }
632 exist->ref_mod += mod;
633
634 /* remove existing tail if its ref_mod is zero */
635 if (exist->ref_mod == 0)
636 drop_delayed_ref(trans, root, href, exist);
637 spin_unlock(&href->lock);
638 return ret;
639 inserted:
640 if (ref->action == BTRFS_ADD_DELAYED_REF)
641 list_add_tail(&ref->add_list, &href->ref_add_list);
642 atomic_inc(&root->num_entries);
643 spin_unlock(&href->lock);
644 return ret;
645 }
646
647 /*
648 * helper function to update the accounting in the head ref
649 * existing and update must have the same bytenr
650 */
update_existing_head_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * existing,struct btrfs_delayed_ref_head * update)651 static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
652 struct btrfs_delayed_ref_head *existing,
653 struct btrfs_delayed_ref_head *update)
654 {
655 struct btrfs_delayed_ref_root *delayed_refs =
656 &trans->transaction->delayed_refs;
657 struct btrfs_fs_info *fs_info = trans->fs_info;
658 int old_ref_mod;
659
660 BUG_ON(existing->is_data != update->is_data);
661
662 spin_lock(&existing->lock);
663 if (update->must_insert_reserved) {
664 /* if the extent was freed and then
665 * reallocated before the delayed ref
666 * entries were processed, we can end up
667 * with an existing head ref without
668 * the must_insert_reserved flag set.
669 * Set it again here
670 */
671 existing->must_insert_reserved = update->must_insert_reserved;
672
673 /*
674 * update the num_bytes so we make sure the accounting
675 * is done correctly
676 */
677 existing->num_bytes = update->num_bytes;
678
679 }
680
681 if (update->extent_op) {
682 if (!existing->extent_op) {
683 existing->extent_op = update->extent_op;
684 } else {
685 if (update->extent_op->update_key) {
686 memcpy(&existing->extent_op->key,
687 &update->extent_op->key,
688 sizeof(update->extent_op->key));
689 existing->extent_op->update_key = true;
690 }
691 if (update->extent_op->update_flags) {
692 existing->extent_op->flags_to_set |=
693 update->extent_op->flags_to_set;
694 existing->extent_op->update_flags = true;
695 }
696 btrfs_free_delayed_extent_op(update->extent_op);
697 }
698 }
699 /*
700 * update the reference mod on the head to reflect this new operation,
701 * only need the lock for this case cause we could be processing it
702 * currently, for refs we just added we know we're a-ok.
703 */
704 old_ref_mod = existing->total_ref_mod;
705 existing->ref_mod += update->ref_mod;
706 existing->total_ref_mod += update->ref_mod;
707
708 /*
709 * If we are going to from a positive ref mod to a negative or vice
710 * versa we need to make sure to adjust pending_csums accordingly.
711 */
712 if (existing->is_data) {
713 u64 csum_leaves =
714 btrfs_csum_bytes_to_leaves(fs_info,
715 existing->num_bytes);
716
717 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
718 delayed_refs->pending_csums -= existing->num_bytes;
719 btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
720 }
721 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
722 delayed_refs->pending_csums += existing->num_bytes;
723 trans->delayed_ref_updates += csum_leaves;
724 }
725 }
726
727 spin_unlock(&existing->lock);
728 }
729
init_delayed_ref_head(struct btrfs_delayed_ref_head * head_ref,struct btrfs_qgroup_extent_record * qrecord,u64 bytenr,u64 num_bytes,u64 ref_root,u64 reserved,int action,bool is_data,bool is_system)730 static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
731 struct btrfs_qgroup_extent_record *qrecord,
732 u64 bytenr, u64 num_bytes, u64 ref_root,
733 u64 reserved, int action, bool is_data,
734 bool is_system)
735 {
736 int count_mod = 1;
737 int must_insert_reserved = 0;
738
739 /* If reserved is provided, it must be a data extent. */
740 BUG_ON(!is_data && reserved);
741
742 /*
743 * The head node stores the sum of all the mods, so dropping a ref
744 * should drop the sum in the head node by one.
745 */
746 if (action == BTRFS_UPDATE_DELAYED_HEAD)
747 count_mod = 0;
748 else if (action == BTRFS_DROP_DELAYED_REF)
749 count_mod = -1;
750
751 /*
752 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
753 * accounting when the extent is finally added, or if a later
754 * modification deletes the delayed ref without ever inserting the
755 * extent into the extent allocation tree. ref->must_insert_reserved
756 * is the flag used to record that accounting mods are required.
757 *
758 * Once we record must_insert_reserved, switch the action to
759 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
760 */
761 if (action == BTRFS_ADD_DELAYED_EXTENT)
762 must_insert_reserved = 1;
763 else
764 must_insert_reserved = 0;
765
766 refcount_set(&head_ref->refs, 1);
767 head_ref->bytenr = bytenr;
768 head_ref->num_bytes = num_bytes;
769 head_ref->ref_mod = count_mod;
770 head_ref->must_insert_reserved = must_insert_reserved;
771 head_ref->is_data = is_data;
772 head_ref->is_system = is_system;
773 head_ref->ref_tree = RB_ROOT_CACHED;
774 INIT_LIST_HEAD(&head_ref->ref_add_list);
775 RB_CLEAR_NODE(&head_ref->href_node);
776 head_ref->processing = 0;
777 head_ref->total_ref_mod = count_mod;
778 spin_lock_init(&head_ref->lock);
779 mutex_init(&head_ref->mutex);
780
781 if (qrecord) {
782 if (ref_root && reserved) {
783 qrecord->data_rsv = reserved;
784 qrecord->data_rsv_refroot = ref_root;
785 }
786 qrecord->bytenr = bytenr;
787 qrecord->num_bytes = num_bytes;
788 qrecord->old_roots = NULL;
789 }
790 }
791
792 /*
793 * helper function to actually insert a head node into the rbtree.
794 * this does all the dirty work in terms of maintaining the correct
795 * overall modification count.
796 */
797 static noinline struct btrfs_delayed_ref_head *
add_delayed_ref_head(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head_ref,struct btrfs_qgroup_extent_record * qrecord,int action,int * qrecord_inserted_ret)798 add_delayed_ref_head(struct btrfs_trans_handle *trans,
799 struct btrfs_delayed_ref_head *head_ref,
800 struct btrfs_qgroup_extent_record *qrecord,
801 int action, int *qrecord_inserted_ret)
802 {
803 struct btrfs_delayed_ref_head *existing;
804 struct btrfs_delayed_ref_root *delayed_refs;
805 int qrecord_inserted = 0;
806
807 delayed_refs = &trans->transaction->delayed_refs;
808
809 /* Record qgroup extent info if provided */
810 if (qrecord) {
811 if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
812 delayed_refs, qrecord))
813 kfree(qrecord);
814 else
815 qrecord_inserted = 1;
816 }
817
818 trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
819
820 existing = htree_insert(&delayed_refs->href_root,
821 &head_ref->href_node);
822 if (existing) {
823 update_existing_head_ref(trans, existing, head_ref);
824 /*
825 * we've updated the existing ref, free the newly
826 * allocated ref
827 */
828 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
829 head_ref = existing;
830 } else {
831 if (head_ref->is_data && head_ref->ref_mod < 0) {
832 delayed_refs->pending_csums += head_ref->num_bytes;
833 trans->delayed_ref_updates +=
834 btrfs_csum_bytes_to_leaves(trans->fs_info,
835 head_ref->num_bytes);
836 }
837 delayed_refs->num_heads++;
838 delayed_refs->num_heads_ready++;
839 atomic_inc(&delayed_refs->num_entries);
840 trans->delayed_ref_updates++;
841 }
842 if (qrecord_inserted_ret)
843 *qrecord_inserted_ret = qrecord_inserted;
844
845 return head_ref;
846 }
847
848 /*
849 * init_delayed_ref_common - Initialize the structure which represents a
850 * modification to a an extent.
851 *
852 * @fs_info: Internal to the mounted filesystem mount structure.
853 *
854 * @ref: The structure which is going to be initialized.
855 *
856 * @bytenr: The logical address of the extent for which a modification is
857 * going to be recorded.
858 *
859 * @num_bytes: Size of the extent whose modification is being recorded.
860 *
861 * @ref_root: The id of the root where this modification has originated, this
862 * can be either one of the well-known metadata trees or the
863 * subvolume id which references this extent.
864 *
865 * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
866 * BTRFS_ADD_DELAYED_EXTENT
867 *
868 * @ref_type: Holds the type of the extent which is being recorded, can be
869 * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
870 * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
871 * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
872 */
init_delayed_ref_common(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_node * ref,u64 bytenr,u64 num_bytes,u64 ref_root,int action,u8 ref_type)873 static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
874 struct btrfs_delayed_ref_node *ref,
875 u64 bytenr, u64 num_bytes, u64 ref_root,
876 int action, u8 ref_type)
877 {
878 u64 seq = 0;
879
880 if (action == BTRFS_ADD_DELAYED_EXTENT)
881 action = BTRFS_ADD_DELAYED_REF;
882
883 if (is_fstree(ref_root))
884 seq = atomic64_read(&fs_info->tree_mod_seq);
885
886 refcount_set(&ref->refs, 1);
887 ref->bytenr = bytenr;
888 ref->num_bytes = num_bytes;
889 ref->ref_mod = 1;
890 ref->action = action;
891 ref->is_head = 0;
892 ref->in_tree = 1;
893 ref->seq = seq;
894 ref->type = ref_type;
895 RB_CLEAR_NODE(&ref->ref_node);
896 INIT_LIST_HEAD(&ref->add_list);
897 }
898
899 /*
900 * add a delayed tree ref. This does all of the accounting required
901 * to make sure the delayed ref is eventually processed before this
902 * transaction commits.
903 */
btrfs_add_delayed_tree_ref(struct btrfs_trans_handle * trans,struct btrfs_ref * generic_ref,struct btrfs_delayed_extent_op * extent_op)904 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
905 struct btrfs_ref *generic_ref,
906 struct btrfs_delayed_extent_op *extent_op)
907 {
908 struct btrfs_fs_info *fs_info = trans->fs_info;
909 struct btrfs_delayed_tree_ref *ref;
910 struct btrfs_delayed_ref_head *head_ref;
911 struct btrfs_delayed_ref_root *delayed_refs;
912 struct btrfs_qgroup_extent_record *record = NULL;
913 int qrecord_inserted;
914 bool is_system;
915 int action = generic_ref->action;
916 int level = generic_ref->tree_ref.level;
917 int ret;
918 u64 bytenr = generic_ref->bytenr;
919 u64 num_bytes = generic_ref->len;
920 u64 parent = generic_ref->parent;
921 u8 ref_type;
922
923 is_system = (generic_ref->tree_ref.owning_root == BTRFS_CHUNK_TREE_OBJECTID);
924
925 ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
926 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
927 if (!ref)
928 return -ENOMEM;
929
930 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
931 if (!head_ref) {
932 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
933 return -ENOMEM;
934 }
935
936 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
937 !generic_ref->skip_qgroup) {
938 record = kzalloc(sizeof(*record), GFP_NOFS);
939 if (!record) {
940 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
941 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
942 return -ENOMEM;
943 }
944 }
945
946 if (parent)
947 ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
948 else
949 ref_type = BTRFS_TREE_BLOCK_REF_KEY;
950
951 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
952 generic_ref->tree_ref.owning_root, action,
953 ref_type);
954 ref->root = generic_ref->tree_ref.owning_root;
955 ref->parent = parent;
956 ref->level = level;
957
958 init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
959 generic_ref->tree_ref.owning_root, 0, action,
960 false, is_system);
961 head_ref->extent_op = extent_op;
962
963 delayed_refs = &trans->transaction->delayed_refs;
964 spin_lock(&delayed_refs->lock);
965
966 /*
967 * insert both the head node and the new ref without dropping
968 * the spin lock
969 */
970 head_ref = add_delayed_ref_head(trans, head_ref, record,
971 action, &qrecord_inserted);
972
973 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
974 spin_unlock(&delayed_refs->lock);
975
976 /*
977 * Need to update the delayed_refs_rsv with any changes we may have
978 * made.
979 */
980 btrfs_update_delayed_refs_rsv(trans);
981
982 trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
983 action == BTRFS_ADD_DELAYED_EXTENT ?
984 BTRFS_ADD_DELAYED_REF : action);
985 if (ret > 0)
986 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
987
988 if (qrecord_inserted)
989 btrfs_qgroup_trace_extent_post(trans, record);
990
991 return 0;
992 }
993
994 /*
995 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
996 */
btrfs_add_delayed_data_ref(struct btrfs_trans_handle * trans,struct btrfs_ref * generic_ref,u64 reserved)997 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
998 struct btrfs_ref *generic_ref,
999 u64 reserved)
1000 {
1001 struct btrfs_fs_info *fs_info = trans->fs_info;
1002 struct btrfs_delayed_data_ref *ref;
1003 struct btrfs_delayed_ref_head *head_ref;
1004 struct btrfs_delayed_ref_root *delayed_refs;
1005 struct btrfs_qgroup_extent_record *record = NULL;
1006 int qrecord_inserted;
1007 int action = generic_ref->action;
1008 int ret;
1009 u64 bytenr = generic_ref->bytenr;
1010 u64 num_bytes = generic_ref->len;
1011 u64 parent = generic_ref->parent;
1012 u64 ref_root = generic_ref->data_ref.owning_root;
1013 u64 owner = generic_ref->data_ref.ino;
1014 u64 offset = generic_ref->data_ref.offset;
1015 u8 ref_type;
1016
1017 ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
1018 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
1019 if (!ref)
1020 return -ENOMEM;
1021
1022 if (parent)
1023 ref_type = BTRFS_SHARED_DATA_REF_KEY;
1024 else
1025 ref_type = BTRFS_EXTENT_DATA_REF_KEY;
1026 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
1027 ref_root, action, ref_type);
1028 ref->root = ref_root;
1029 ref->parent = parent;
1030 ref->objectid = owner;
1031 ref->offset = offset;
1032
1033
1034 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1035 if (!head_ref) {
1036 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1037 return -ENOMEM;
1038 }
1039
1040 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
1041 !generic_ref->skip_qgroup) {
1042 record = kzalloc(sizeof(*record), GFP_NOFS);
1043 if (!record) {
1044 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1045 kmem_cache_free(btrfs_delayed_ref_head_cachep,
1046 head_ref);
1047 return -ENOMEM;
1048 }
1049 }
1050
1051 init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
1052 reserved, action, true, false);
1053 head_ref->extent_op = NULL;
1054
1055 delayed_refs = &trans->transaction->delayed_refs;
1056 spin_lock(&delayed_refs->lock);
1057
1058 /*
1059 * insert both the head node and the new ref without dropping
1060 * the spin lock
1061 */
1062 head_ref = add_delayed_ref_head(trans, head_ref, record,
1063 action, &qrecord_inserted);
1064
1065 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
1066 spin_unlock(&delayed_refs->lock);
1067
1068 /*
1069 * Need to update the delayed_refs_rsv with any changes we may have
1070 * made.
1071 */
1072 btrfs_update_delayed_refs_rsv(trans);
1073
1074 trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
1075 action == BTRFS_ADD_DELAYED_EXTENT ?
1076 BTRFS_ADD_DELAYED_REF : action);
1077 if (ret > 0)
1078 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1079
1080
1081 if (qrecord_inserted)
1082 return btrfs_qgroup_trace_extent_post(trans, record);
1083 return 0;
1084 }
1085
btrfs_add_delayed_extent_op(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,struct btrfs_delayed_extent_op * extent_op)1086 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1087 u64 bytenr, u64 num_bytes,
1088 struct btrfs_delayed_extent_op *extent_op)
1089 {
1090 struct btrfs_delayed_ref_head *head_ref;
1091 struct btrfs_delayed_ref_root *delayed_refs;
1092
1093 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1094 if (!head_ref)
1095 return -ENOMEM;
1096
1097 init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
1098 BTRFS_UPDATE_DELAYED_HEAD, false, false);
1099 head_ref->extent_op = extent_op;
1100
1101 delayed_refs = &trans->transaction->delayed_refs;
1102 spin_lock(&delayed_refs->lock);
1103
1104 add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
1105 NULL);
1106
1107 spin_unlock(&delayed_refs->lock);
1108
1109 /*
1110 * Need to update the delayed_refs_rsv with any changes we may have
1111 * made.
1112 */
1113 btrfs_update_delayed_refs_rsv(trans);
1114 return 0;
1115 }
1116
1117 /*
1118 * This does a simple search for the head node for a given extent. Returns the
1119 * head node if found, or NULL if not.
1120 */
1121 struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root * delayed_refs,u64 bytenr)1122 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
1123 {
1124 lockdep_assert_held(&delayed_refs->lock);
1125
1126 return find_ref_head(delayed_refs, bytenr, false);
1127 }
1128
btrfs_delayed_ref_exit(void)1129 void __cold btrfs_delayed_ref_exit(void)
1130 {
1131 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1132 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
1133 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
1134 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1135 }
1136
btrfs_delayed_ref_init(void)1137 int __init btrfs_delayed_ref_init(void)
1138 {
1139 btrfs_delayed_ref_head_cachep = kmem_cache_create(
1140 "btrfs_delayed_ref_head",
1141 sizeof(struct btrfs_delayed_ref_head), 0,
1142 SLAB_MEM_SPREAD, NULL);
1143 if (!btrfs_delayed_ref_head_cachep)
1144 goto fail;
1145
1146 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
1147 "btrfs_delayed_tree_ref",
1148 sizeof(struct btrfs_delayed_tree_ref), 0,
1149 SLAB_MEM_SPREAD, NULL);
1150 if (!btrfs_delayed_tree_ref_cachep)
1151 goto fail;
1152
1153 btrfs_delayed_data_ref_cachep = kmem_cache_create(
1154 "btrfs_delayed_data_ref",
1155 sizeof(struct btrfs_delayed_data_ref), 0,
1156 SLAB_MEM_SPREAD, NULL);
1157 if (!btrfs_delayed_data_ref_cachep)
1158 goto fail;
1159
1160 btrfs_delayed_extent_op_cachep = kmem_cache_create(
1161 "btrfs_delayed_extent_op",
1162 sizeof(struct btrfs_delayed_extent_op), 0,
1163 SLAB_MEM_SPREAD, NULL);
1164 if (!btrfs_delayed_extent_op_cachep)
1165 goto fail;
1166
1167 return 0;
1168 fail:
1169 btrfs_delayed_ref_exit();
1170 return -ENOMEM;
1171 }
1172