1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <linux/fs.h>
7 #include <linux/slab.h>
8 #include <linux/sched.h>
9 #include <linux/writeback.h>
10 #include <linux/pagemap.h>
11 #include <linux/blkdev.h>
12 #include <linux/uuid.h>
13 #include "misc.h"
14 #include "ctree.h"
15 #include "disk-io.h"
16 #include "transaction.h"
17 #include "locking.h"
18 #include "tree-log.h"
19 #include "inode-map.h"
20 #include "volumes.h"
21 #include "dev-replace.h"
22 #include "qgroup.h"
23 #include "block-group.h"
24 #include "space-info.h"
25
26 #define BTRFS_ROOT_TRANS_TAG 0
27
28 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
29 [TRANS_STATE_RUNNING] = 0U,
30 [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
31 [TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
32 __TRANS_ATTACH |
33 __TRANS_JOIN |
34 __TRANS_JOIN_NOSTART),
35 [TRANS_STATE_UNBLOCKED] = (__TRANS_START |
36 __TRANS_ATTACH |
37 __TRANS_JOIN |
38 __TRANS_JOIN_NOLOCK |
39 __TRANS_JOIN_NOSTART),
40 [TRANS_STATE_COMPLETED] = (__TRANS_START |
41 __TRANS_ATTACH |
42 __TRANS_JOIN |
43 __TRANS_JOIN_NOLOCK |
44 __TRANS_JOIN_NOSTART),
45 };
46
btrfs_put_transaction(struct btrfs_transaction * transaction)47 void btrfs_put_transaction(struct btrfs_transaction *transaction)
48 {
49 WARN_ON(refcount_read(&transaction->use_count) == 0);
50 if (refcount_dec_and_test(&transaction->use_count)) {
51 BUG_ON(!list_empty(&transaction->list));
52 WARN_ON(!RB_EMPTY_ROOT(
53 &transaction->delayed_refs.href_root.rb_root));
54 WARN_ON(!RB_EMPTY_ROOT(
55 &transaction->delayed_refs.dirty_extent_root));
56 if (transaction->delayed_refs.pending_csums)
57 btrfs_err(transaction->fs_info,
58 "pending csums is %llu",
59 transaction->delayed_refs.pending_csums);
60 /*
61 * If any block groups are found in ->deleted_bgs then it's
62 * because the transaction was aborted and a commit did not
63 * happen (things failed before writing the new superblock
64 * and calling btrfs_finish_extent_commit()), so we can not
65 * discard the physical locations of the block groups.
66 */
67 while (!list_empty(&transaction->deleted_bgs)) {
68 struct btrfs_block_group_cache *cache;
69
70 cache = list_first_entry(&transaction->deleted_bgs,
71 struct btrfs_block_group_cache,
72 bg_list);
73 list_del_init(&cache->bg_list);
74 btrfs_put_block_group_trimming(cache);
75 btrfs_put_block_group(cache);
76 }
77 WARN_ON(!list_empty(&transaction->dev_update_list));
78 kfree(transaction);
79 }
80 }
81
switch_commit_roots(struct btrfs_trans_handle * trans)82 static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
83 {
84 struct btrfs_transaction *cur_trans = trans->transaction;
85 struct btrfs_fs_info *fs_info = trans->fs_info;
86 struct btrfs_root *root, *tmp;
87
88 down_write(&fs_info->commit_root_sem);
89 list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits,
90 dirty_list) {
91 list_del_init(&root->dirty_list);
92 free_extent_buffer(root->commit_root);
93 root->commit_root = btrfs_root_node(root);
94 if (is_fstree(root->root_key.objectid))
95 btrfs_unpin_free_ino(root);
96 extent_io_tree_release(&root->dirty_log_pages);
97 btrfs_qgroup_clean_swapped_blocks(root);
98 }
99
100 /* We can free old roots now. */
101 spin_lock(&cur_trans->dropped_roots_lock);
102 while (!list_empty(&cur_trans->dropped_roots)) {
103 root = list_first_entry(&cur_trans->dropped_roots,
104 struct btrfs_root, root_list);
105 list_del_init(&root->root_list);
106 spin_unlock(&cur_trans->dropped_roots_lock);
107 btrfs_free_log(trans, root);
108 btrfs_drop_and_free_fs_root(fs_info, root);
109 spin_lock(&cur_trans->dropped_roots_lock);
110 }
111 spin_unlock(&cur_trans->dropped_roots_lock);
112 up_write(&fs_info->commit_root_sem);
113 }
114
extwriter_counter_inc(struct btrfs_transaction * trans,unsigned int type)115 static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
116 unsigned int type)
117 {
118 if (type & TRANS_EXTWRITERS)
119 atomic_inc(&trans->num_extwriters);
120 }
121
extwriter_counter_dec(struct btrfs_transaction * trans,unsigned int type)122 static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
123 unsigned int type)
124 {
125 if (type & TRANS_EXTWRITERS)
126 atomic_dec(&trans->num_extwriters);
127 }
128
extwriter_counter_init(struct btrfs_transaction * trans,unsigned int type)129 static inline void extwriter_counter_init(struct btrfs_transaction *trans,
130 unsigned int type)
131 {
132 atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
133 }
134
extwriter_counter_read(struct btrfs_transaction * trans)135 static inline int extwriter_counter_read(struct btrfs_transaction *trans)
136 {
137 return atomic_read(&trans->num_extwriters);
138 }
139
140 /*
141 * To be called after all the new block groups attached to the transaction
142 * handle have been created (btrfs_create_pending_block_groups()).
143 */
btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle * trans)144 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
145 {
146 struct btrfs_fs_info *fs_info = trans->fs_info;
147
148 if (!trans->chunk_bytes_reserved)
149 return;
150
151 WARN_ON_ONCE(!list_empty(&trans->new_bgs));
152
153 btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
154 trans->chunk_bytes_reserved);
155 trans->chunk_bytes_reserved = 0;
156 }
157
158 /*
159 * either allocate a new transaction or hop into the existing one
160 */
join_transaction(struct btrfs_fs_info * fs_info,unsigned int type)161 static noinline int join_transaction(struct btrfs_fs_info *fs_info,
162 unsigned int type)
163 {
164 struct btrfs_transaction *cur_trans;
165
166 spin_lock(&fs_info->trans_lock);
167 loop:
168 /* The file system has been taken offline. No new transactions. */
169 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
170 spin_unlock(&fs_info->trans_lock);
171 return -EROFS;
172 }
173
174 cur_trans = fs_info->running_transaction;
175 if (cur_trans) {
176 if (TRANS_ABORTED(cur_trans)) {
177 spin_unlock(&fs_info->trans_lock);
178 return cur_trans->aborted;
179 }
180 if (btrfs_blocked_trans_types[cur_trans->state] & type) {
181 spin_unlock(&fs_info->trans_lock);
182 return -EBUSY;
183 }
184 refcount_inc(&cur_trans->use_count);
185 atomic_inc(&cur_trans->num_writers);
186 extwriter_counter_inc(cur_trans, type);
187 spin_unlock(&fs_info->trans_lock);
188 return 0;
189 }
190 spin_unlock(&fs_info->trans_lock);
191
192 /*
193 * If we are ATTACH or TRANS_JOIN_NOSTART, we just want to catch the
194 * current transaction, and commit it. If there is no transaction, just
195 * return ENOENT.
196 */
197 if (type == TRANS_ATTACH || type == TRANS_JOIN_NOSTART)
198 return -ENOENT;
199
200 /*
201 * JOIN_NOLOCK only happens during the transaction commit, so
202 * it is impossible that ->running_transaction is NULL
203 */
204 BUG_ON(type == TRANS_JOIN_NOLOCK);
205
206 cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS);
207 if (!cur_trans)
208 return -ENOMEM;
209
210 spin_lock(&fs_info->trans_lock);
211 if (fs_info->running_transaction) {
212 /*
213 * someone started a transaction after we unlocked. Make sure
214 * to redo the checks above
215 */
216 kfree(cur_trans);
217 goto loop;
218 } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
219 spin_unlock(&fs_info->trans_lock);
220 kfree(cur_trans);
221 return -EROFS;
222 }
223
224 cur_trans->fs_info = fs_info;
225 atomic_set(&cur_trans->num_writers, 1);
226 extwriter_counter_init(cur_trans, type);
227 init_waitqueue_head(&cur_trans->writer_wait);
228 init_waitqueue_head(&cur_trans->commit_wait);
229 cur_trans->state = TRANS_STATE_RUNNING;
230 /*
231 * One for this trans handle, one so it will live on until we
232 * commit the transaction.
233 */
234 refcount_set(&cur_trans->use_count, 2);
235 cur_trans->flags = 0;
236 cur_trans->start_time = ktime_get_seconds();
237
238 memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
239
240 cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
241 cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
242 atomic_set(&cur_trans->delayed_refs.num_entries, 0);
243
244 /*
245 * although the tree mod log is per file system and not per transaction,
246 * the log must never go across transaction boundaries.
247 */
248 smp_mb();
249 if (!list_empty(&fs_info->tree_mod_seq_list))
250 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
251 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
252 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
253 atomic64_set(&fs_info->tree_mod_seq, 0);
254
255 spin_lock_init(&cur_trans->delayed_refs.lock);
256
257 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
258 INIT_LIST_HEAD(&cur_trans->dev_update_list);
259 INIT_LIST_HEAD(&cur_trans->switch_commits);
260 INIT_LIST_HEAD(&cur_trans->dirty_bgs);
261 INIT_LIST_HEAD(&cur_trans->io_bgs);
262 INIT_LIST_HEAD(&cur_trans->dropped_roots);
263 mutex_init(&cur_trans->cache_write_mutex);
264 spin_lock_init(&cur_trans->dirty_bgs_lock);
265 INIT_LIST_HEAD(&cur_trans->deleted_bgs);
266 spin_lock_init(&cur_trans->dropped_roots_lock);
267 list_add_tail(&cur_trans->list, &fs_info->trans_list);
268 extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
269 IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
270 fs_info->generation++;
271 cur_trans->transid = fs_info->generation;
272 fs_info->running_transaction = cur_trans;
273 cur_trans->aborted = 0;
274 spin_unlock(&fs_info->trans_lock);
275
276 return 0;
277 }
278
279 /*
280 * this does all the record keeping required to make sure that a reference
281 * counted root is properly recorded in a given transaction. This is required
282 * to make sure the old root from before we joined the transaction is deleted
283 * when the transaction commits
284 */
record_root_in_trans(struct btrfs_trans_handle * trans,struct btrfs_root * root,int force)285 static int record_root_in_trans(struct btrfs_trans_handle *trans,
286 struct btrfs_root *root,
287 int force)
288 {
289 struct btrfs_fs_info *fs_info = root->fs_info;
290
291 if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
292 root->last_trans < trans->transid) || force) {
293 WARN_ON(root == fs_info->extent_root);
294 WARN_ON(!force && root->commit_root != root->node);
295
296 /*
297 * see below for IN_TRANS_SETUP usage rules
298 * we have the reloc mutex held now, so there
299 * is only one writer in this function
300 */
301 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
302
303 /* make sure readers find IN_TRANS_SETUP before
304 * they find our root->last_trans update
305 */
306 smp_wmb();
307
308 spin_lock(&fs_info->fs_roots_radix_lock);
309 if (root->last_trans == trans->transid && !force) {
310 spin_unlock(&fs_info->fs_roots_radix_lock);
311 return 0;
312 }
313 radix_tree_tag_set(&fs_info->fs_roots_radix,
314 (unsigned long)root->root_key.objectid,
315 BTRFS_ROOT_TRANS_TAG);
316 spin_unlock(&fs_info->fs_roots_radix_lock);
317 root->last_trans = trans->transid;
318
319 /* this is pretty tricky. We don't want to
320 * take the relocation lock in btrfs_record_root_in_trans
321 * unless we're really doing the first setup for this root in
322 * this transaction.
323 *
324 * Normally we'd use root->last_trans as a flag to decide
325 * if we want to take the expensive mutex.
326 *
327 * But, we have to set root->last_trans before we
328 * init the relocation root, otherwise, we trip over warnings
329 * in ctree.c. The solution used here is to flag ourselves
330 * with root IN_TRANS_SETUP. When this is 1, we're still
331 * fixing up the reloc trees and everyone must wait.
332 *
333 * When this is zero, they can trust root->last_trans and fly
334 * through btrfs_record_root_in_trans without having to take the
335 * lock. smp_wmb() makes sure that all the writes above are
336 * done before we pop in the zero below
337 */
338 btrfs_init_reloc_root(trans, root);
339 smp_mb__before_atomic();
340 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
341 }
342 return 0;
343 }
344
345
btrfs_add_dropped_root(struct btrfs_trans_handle * trans,struct btrfs_root * root)346 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
347 struct btrfs_root *root)
348 {
349 struct btrfs_fs_info *fs_info = root->fs_info;
350 struct btrfs_transaction *cur_trans = trans->transaction;
351
352 /* Add ourselves to the transaction dropped list */
353 spin_lock(&cur_trans->dropped_roots_lock);
354 list_add_tail(&root->root_list, &cur_trans->dropped_roots);
355 spin_unlock(&cur_trans->dropped_roots_lock);
356
357 /* Make sure we don't try to update the root at commit time */
358 spin_lock(&fs_info->fs_roots_radix_lock);
359 radix_tree_tag_clear(&fs_info->fs_roots_radix,
360 (unsigned long)root->root_key.objectid,
361 BTRFS_ROOT_TRANS_TAG);
362 spin_unlock(&fs_info->fs_roots_radix_lock);
363 }
364
btrfs_record_root_in_trans(struct btrfs_trans_handle * trans,struct btrfs_root * root)365 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
366 struct btrfs_root *root)
367 {
368 struct btrfs_fs_info *fs_info = root->fs_info;
369
370 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
371 return 0;
372
373 /*
374 * see record_root_in_trans for comments about IN_TRANS_SETUP usage
375 * and barriers
376 */
377 smp_rmb();
378 if (root->last_trans == trans->transid &&
379 !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
380 return 0;
381
382 mutex_lock(&fs_info->reloc_mutex);
383 record_root_in_trans(trans, root, 0);
384 mutex_unlock(&fs_info->reloc_mutex);
385
386 return 0;
387 }
388
is_transaction_blocked(struct btrfs_transaction * trans)389 static inline int is_transaction_blocked(struct btrfs_transaction *trans)
390 {
391 return (trans->state >= TRANS_STATE_COMMIT_START &&
392 trans->state < TRANS_STATE_UNBLOCKED &&
393 !TRANS_ABORTED(trans));
394 }
395
396 /* wait for commit against the current transaction to become unblocked
397 * when this is done, it is safe to start a new transaction, but the current
398 * transaction might not be fully on disk.
399 */
wait_current_trans(struct btrfs_fs_info * fs_info)400 static void wait_current_trans(struct btrfs_fs_info *fs_info)
401 {
402 struct btrfs_transaction *cur_trans;
403
404 spin_lock(&fs_info->trans_lock);
405 cur_trans = fs_info->running_transaction;
406 if (cur_trans && is_transaction_blocked(cur_trans)) {
407 refcount_inc(&cur_trans->use_count);
408 spin_unlock(&fs_info->trans_lock);
409
410 wait_event(fs_info->transaction_wait,
411 cur_trans->state >= TRANS_STATE_UNBLOCKED ||
412 TRANS_ABORTED(cur_trans));
413 btrfs_put_transaction(cur_trans);
414 } else {
415 spin_unlock(&fs_info->trans_lock);
416 }
417 }
418
may_wait_transaction(struct btrfs_fs_info * fs_info,int type)419 static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
420 {
421 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
422 return 0;
423
424 if (type == TRANS_START)
425 return 1;
426
427 return 0;
428 }
429
need_reserve_reloc_root(struct btrfs_root * root)430 static inline bool need_reserve_reloc_root(struct btrfs_root *root)
431 {
432 struct btrfs_fs_info *fs_info = root->fs_info;
433
434 if (!fs_info->reloc_ctl ||
435 !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
436 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
437 root->reloc_root)
438 return false;
439
440 return true;
441 }
442
443 static struct btrfs_trans_handle *
start_transaction(struct btrfs_root * root,unsigned int num_items,unsigned int type,enum btrfs_reserve_flush_enum flush,bool enforce_qgroups)444 start_transaction(struct btrfs_root *root, unsigned int num_items,
445 unsigned int type, enum btrfs_reserve_flush_enum flush,
446 bool enforce_qgroups)
447 {
448 struct btrfs_fs_info *fs_info = root->fs_info;
449 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
450 struct btrfs_trans_handle *h;
451 struct btrfs_transaction *cur_trans;
452 u64 num_bytes = 0;
453 u64 qgroup_reserved = 0;
454 bool reloc_reserved = false;
455 bool do_chunk_alloc = false;
456 int ret;
457
458 /* Send isn't supposed to start transactions. */
459 ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
460
461 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
462 return ERR_PTR(-EROFS);
463
464 if (current->journal_info) {
465 WARN_ON(type & TRANS_EXTWRITERS);
466 h = current->journal_info;
467 refcount_inc(&h->use_count);
468 WARN_ON(refcount_read(&h->use_count) > 2);
469 h->orig_rsv = h->block_rsv;
470 h->block_rsv = NULL;
471 goto got_it;
472 }
473
474 /*
475 * Do the reservation before we join the transaction so we can do all
476 * the appropriate flushing if need be.
477 */
478 if (num_items && root != fs_info->chunk_root) {
479 struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv;
480 u64 delayed_refs_bytes = 0;
481
482 qgroup_reserved = num_items * fs_info->nodesize;
483 ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved,
484 enforce_qgroups);
485 if (ret)
486 return ERR_PTR(ret);
487
488 /*
489 * We want to reserve all the bytes we may need all at once, so
490 * we only do 1 enospc flushing cycle per transaction start. We
491 * accomplish this by simply assuming we'll do 2 x num_items
492 * worth of delayed refs updates in this trans handle, and
493 * refill that amount for whatever is missing in the reserve.
494 */
495 num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
496 if (flush == BTRFS_RESERVE_FLUSH_ALL &&
497 delayed_refs_rsv->full == 0) {
498 delayed_refs_bytes = num_bytes;
499 num_bytes <<= 1;
500 }
501
502 /*
503 * Do the reservation for the relocation root creation
504 */
505 if (need_reserve_reloc_root(root)) {
506 num_bytes += fs_info->nodesize;
507 reloc_reserved = true;
508 }
509
510 ret = btrfs_block_rsv_add(root, rsv, num_bytes, flush);
511 if (ret)
512 goto reserve_fail;
513 if (delayed_refs_bytes) {
514 btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv,
515 delayed_refs_bytes);
516 num_bytes -= delayed_refs_bytes;
517 }
518
519 if (rsv->space_info->force_alloc)
520 do_chunk_alloc = true;
521 } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
522 !delayed_refs_rsv->full) {
523 /*
524 * Some people call with btrfs_start_transaction(root, 0)
525 * because they can be throttled, but have some other mechanism
526 * for reserving space. We still want these guys to refill the
527 * delayed block_rsv so just add 1 items worth of reservation
528 * here.
529 */
530 ret = btrfs_delayed_refs_rsv_refill(fs_info, flush);
531 if (ret)
532 goto reserve_fail;
533 }
534 again:
535 h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
536 if (!h) {
537 ret = -ENOMEM;
538 goto alloc_fail;
539 }
540
541 /*
542 * If we are JOIN_NOLOCK we're already committing a transaction and
543 * waiting on this guy, so we don't need to do the sb_start_intwrite
544 * because we're already holding a ref. We need this because we could
545 * have raced in and did an fsync() on a file which can kick a commit
546 * and then we deadlock with somebody doing a freeze.
547 *
548 * If we are ATTACH, it means we just want to catch the current
549 * transaction and commit it, so we needn't do sb_start_intwrite().
550 */
551 if (type & __TRANS_FREEZABLE)
552 sb_start_intwrite(fs_info->sb);
553
554 if (may_wait_transaction(fs_info, type))
555 wait_current_trans(fs_info);
556
557 do {
558 ret = join_transaction(fs_info, type);
559 if (ret == -EBUSY) {
560 wait_current_trans(fs_info);
561 if (unlikely(type == TRANS_ATTACH ||
562 type == TRANS_JOIN_NOSTART))
563 ret = -ENOENT;
564 }
565 } while (ret == -EBUSY);
566
567 if (ret < 0)
568 goto join_fail;
569
570 cur_trans = fs_info->running_transaction;
571
572 h->transid = cur_trans->transid;
573 h->transaction = cur_trans;
574 h->root = root;
575 refcount_set(&h->use_count, 1);
576 h->fs_info = root->fs_info;
577
578 h->type = type;
579 h->can_flush_pending_bgs = true;
580 INIT_LIST_HEAD(&h->new_bgs);
581
582 smp_mb();
583 if (cur_trans->state >= TRANS_STATE_COMMIT_START &&
584 may_wait_transaction(fs_info, type)) {
585 current->journal_info = h;
586 btrfs_commit_transaction(h);
587 goto again;
588 }
589
590 if (num_bytes) {
591 trace_btrfs_space_reservation(fs_info, "transaction",
592 h->transid, num_bytes, 1);
593 h->block_rsv = &fs_info->trans_block_rsv;
594 h->bytes_reserved = num_bytes;
595 h->reloc_reserved = reloc_reserved;
596 }
597
598 got_it:
599 if (!current->journal_info)
600 current->journal_info = h;
601
602 /*
603 * If the space_info is marked ALLOC_FORCE then we'll get upgraded to
604 * ALLOC_FORCE the first run through, and then we won't allocate for
605 * anybody else who races in later. We don't care about the return
606 * value here.
607 */
608 if (do_chunk_alloc && num_bytes) {
609 u64 flags = h->block_rsv->space_info->flags;
610
611 btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags),
612 CHUNK_ALLOC_NO_FORCE);
613 }
614
615 /*
616 * btrfs_record_root_in_trans() needs to alloc new extents, and may
617 * call btrfs_join_transaction() while we're also starting a
618 * transaction.
619 *
620 * Thus it need to be called after current->journal_info initialized,
621 * or we can deadlock.
622 */
623 btrfs_record_root_in_trans(h, root);
624
625 return h;
626
627 join_fail:
628 if (type & __TRANS_FREEZABLE)
629 sb_end_intwrite(fs_info->sb);
630 kmem_cache_free(btrfs_trans_handle_cachep, h);
631 alloc_fail:
632 if (num_bytes)
633 btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
634 num_bytes);
635 reserve_fail:
636 btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved);
637 return ERR_PTR(ret);
638 }
639
btrfs_start_transaction(struct btrfs_root * root,unsigned int num_items)640 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
641 unsigned int num_items)
642 {
643 return start_transaction(root, num_items, TRANS_START,
644 BTRFS_RESERVE_FLUSH_ALL, true);
645 }
646
btrfs_start_transaction_fallback_global_rsv(struct btrfs_root * root,unsigned int num_items)647 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
648 struct btrfs_root *root,
649 unsigned int num_items)
650 {
651 return start_transaction(root, num_items, TRANS_START,
652 BTRFS_RESERVE_FLUSH_ALL_STEAL, false);
653 }
654
btrfs_join_transaction(struct btrfs_root * root)655 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
656 {
657 return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH,
658 true);
659 }
660
btrfs_join_transaction_nolock(struct btrfs_root * root)661 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
662 {
663 return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
664 BTRFS_RESERVE_NO_FLUSH, true);
665 }
666
667 /*
668 * Similar to regular join but it never starts a transaction when none is
669 * running or after waiting for the current one to finish.
670 */
btrfs_join_transaction_nostart(struct btrfs_root * root)671 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
672 {
673 return start_transaction(root, 0, TRANS_JOIN_NOSTART,
674 BTRFS_RESERVE_NO_FLUSH, true);
675 }
676
677 /*
678 * btrfs_attach_transaction() - catch the running transaction
679 *
680 * It is used when we want to commit the current the transaction, but
681 * don't want to start a new one.
682 *
683 * Note: If this function return -ENOENT, it just means there is no
684 * running transaction. But it is possible that the inactive transaction
685 * is still in the memory, not fully on disk. If you hope there is no
686 * inactive transaction in the fs when -ENOENT is returned, you should
687 * invoke
688 * btrfs_attach_transaction_barrier()
689 */
btrfs_attach_transaction(struct btrfs_root * root)690 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
691 {
692 return start_transaction(root, 0, TRANS_ATTACH,
693 BTRFS_RESERVE_NO_FLUSH, true);
694 }
695
696 /*
697 * btrfs_attach_transaction_barrier() - catch the running transaction
698 *
699 * It is similar to the above function, the difference is this one
700 * will wait for all the inactive transactions until they fully
701 * complete.
702 */
703 struct btrfs_trans_handle *
btrfs_attach_transaction_barrier(struct btrfs_root * root)704 btrfs_attach_transaction_barrier(struct btrfs_root *root)
705 {
706 struct btrfs_trans_handle *trans;
707
708 trans = start_transaction(root, 0, TRANS_ATTACH,
709 BTRFS_RESERVE_NO_FLUSH, true);
710 if (trans == ERR_PTR(-ENOENT)) {
711 int ret;
712
713 ret = btrfs_wait_for_commit(root->fs_info, 0);
714 if (ret)
715 return ERR_PTR(ret);
716 }
717
718 return trans;
719 }
720
721 /* wait for a transaction commit to be fully complete */
wait_for_commit(struct btrfs_transaction * commit)722 static noinline void wait_for_commit(struct btrfs_transaction *commit)
723 {
724 wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
725 }
726
btrfs_wait_for_commit(struct btrfs_fs_info * fs_info,u64 transid)727 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
728 {
729 struct btrfs_transaction *cur_trans = NULL, *t;
730 int ret = 0;
731
732 if (transid) {
733 if (transid <= fs_info->last_trans_committed)
734 goto out;
735
736 /* find specified transaction */
737 spin_lock(&fs_info->trans_lock);
738 list_for_each_entry(t, &fs_info->trans_list, list) {
739 if (t->transid == transid) {
740 cur_trans = t;
741 refcount_inc(&cur_trans->use_count);
742 ret = 0;
743 break;
744 }
745 if (t->transid > transid) {
746 ret = 0;
747 break;
748 }
749 }
750 spin_unlock(&fs_info->trans_lock);
751
752 /*
753 * The specified transaction doesn't exist, or we
754 * raced with btrfs_commit_transaction
755 */
756 if (!cur_trans) {
757 if (transid > fs_info->last_trans_committed)
758 ret = -EINVAL;
759 goto out;
760 }
761 } else {
762 /* find newest transaction that is committing | committed */
763 spin_lock(&fs_info->trans_lock);
764 list_for_each_entry_reverse(t, &fs_info->trans_list,
765 list) {
766 if (t->state >= TRANS_STATE_COMMIT_START) {
767 if (t->state == TRANS_STATE_COMPLETED)
768 break;
769 cur_trans = t;
770 refcount_inc(&cur_trans->use_count);
771 break;
772 }
773 }
774 spin_unlock(&fs_info->trans_lock);
775 if (!cur_trans)
776 goto out; /* nothing committing|committed */
777 }
778
779 wait_for_commit(cur_trans);
780 ret = cur_trans->aborted;
781 btrfs_put_transaction(cur_trans);
782 out:
783 return ret;
784 }
785
btrfs_throttle(struct btrfs_fs_info * fs_info)786 void btrfs_throttle(struct btrfs_fs_info *fs_info)
787 {
788 wait_current_trans(fs_info);
789 }
790
should_end_transaction(struct btrfs_trans_handle * trans)791 static int should_end_transaction(struct btrfs_trans_handle *trans)
792 {
793 struct btrfs_fs_info *fs_info = trans->fs_info;
794
795 if (btrfs_check_space_for_delayed_refs(fs_info))
796 return 1;
797
798 return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
799 }
800
btrfs_should_end_transaction(struct btrfs_trans_handle * trans)801 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
802 {
803 struct btrfs_transaction *cur_trans = trans->transaction;
804
805 smp_mb();
806 if (cur_trans->state >= TRANS_STATE_COMMIT_START ||
807 cur_trans->delayed_refs.flushing)
808 return 1;
809
810 return should_end_transaction(trans);
811 }
812
btrfs_trans_release_metadata(struct btrfs_trans_handle * trans)813 static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
814
815 {
816 struct btrfs_fs_info *fs_info = trans->fs_info;
817
818 if (!trans->block_rsv) {
819 ASSERT(!trans->bytes_reserved);
820 return;
821 }
822
823 if (!trans->bytes_reserved)
824 return;
825
826 ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
827 trace_btrfs_space_reservation(fs_info, "transaction",
828 trans->transid, trans->bytes_reserved, 0);
829 btrfs_block_rsv_release(fs_info, trans->block_rsv,
830 trans->bytes_reserved);
831 trans->bytes_reserved = 0;
832 }
833
__btrfs_end_transaction(struct btrfs_trans_handle * trans,int throttle)834 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
835 int throttle)
836 {
837 struct btrfs_fs_info *info = trans->fs_info;
838 struct btrfs_transaction *cur_trans = trans->transaction;
839 int err = 0;
840
841 if (refcount_read(&trans->use_count) > 1) {
842 refcount_dec(&trans->use_count);
843 trans->block_rsv = trans->orig_rsv;
844 return 0;
845 }
846
847 btrfs_trans_release_metadata(trans);
848 trans->block_rsv = NULL;
849
850 btrfs_create_pending_block_groups(trans);
851
852 btrfs_trans_release_chunk_metadata(trans);
853
854 if (trans->type & __TRANS_FREEZABLE)
855 sb_end_intwrite(info->sb);
856
857 WARN_ON(cur_trans != info->running_transaction);
858 WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
859 atomic_dec(&cur_trans->num_writers);
860 extwriter_counter_dec(cur_trans, trans->type);
861
862 cond_wake_up(&cur_trans->writer_wait);
863 btrfs_put_transaction(cur_trans);
864
865 if (current->journal_info == trans)
866 current->journal_info = NULL;
867
868 if (throttle)
869 btrfs_run_delayed_iputs(info);
870
871 if (TRANS_ABORTED(trans) ||
872 test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) {
873 wake_up_process(info->transaction_kthread);
874 if (TRANS_ABORTED(trans))
875 err = trans->aborted;
876 else
877 err = -EROFS;
878 }
879
880 kmem_cache_free(btrfs_trans_handle_cachep, trans);
881 return err;
882 }
883
btrfs_end_transaction(struct btrfs_trans_handle * trans)884 int btrfs_end_transaction(struct btrfs_trans_handle *trans)
885 {
886 return __btrfs_end_transaction(trans, 0);
887 }
888
btrfs_end_transaction_throttle(struct btrfs_trans_handle * trans)889 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
890 {
891 return __btrfs_end_transaction(trans, 1);
892 }
893
894 /*
895 * when btree blocks are allocated, they have some corresponding bits set for
896 * them in one of two extent_io trees. This is used to make sure all of
897 * those extents are sent to disk but does not wait on them
898 */
btrfs_write_marked_extents(struct btrfs_fs_info * fs_info,struct extent_io_tree * dirty_pages,int mark)899 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
900 struct extent_io_tree *dirty_pages, int mark)
901 {
902 int err = 0;
903 int werr = 0;
904 struct address_space *mapping = fs_info->btree_inode->i_mapping;
905 struct extent_state *cached_state = NULL;
906 u64 start = 0;
907 u64 end;
908
909 atomic_inc(&BTRFS_I(fs_info->btree_inode)->sync_writers);
910 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
911 mark, &cached_state)) {
912 bool wait_writeback = false;
913
914 err = convert_extent_bit(dirty_pages, start, end,
915 EXTENT_NEED_WAIT,
916 mark, &cached_state);
917 /*
918 * convert_extent_bit can return -ENOMEM, which is most of the
919 * time a temporary error. So when it happens, ignore the error
920 * and wait for writeback of this range to finish - because we
921 * failed to set the bit EXTENT_NEED_WAIT for the range, a call
922 * to __btrfs_wait_marked_extents() would not know that
923 * writeback for this range started and therefore wouldn't
924 * wait for it to finish - we don't want to commit a
925 * superblock that points to btree nodes/leafs for which
926 * writeback hasn't finished yet (and without errors).
927 * We cleanup any entries left in the io tree when committing
928 * the transaction (through extent_io_tree_release()).
929 */
930 if (err == -ENOMEM) {
931 err = 0;
932 wait_writeback = true;
933 }
934 if (!err)
935 err = filemap_fdatawrite_range(mapping, start, end);
936 if (err)
937 werr = err;
938 else if (wait_writeback)
939 werr = filemap_fdatawait_range(mapping, start, end);
940 free_extent_state(cached_state);
941 cached_state = NULL;
942 cond_resched();
943 start = end + 1;
944 }
945 atomic_dec(&BTRFS_I(fs_info->btree_inode)->sync_writers);
946 return werr;
947 }
948
949 /*
950 * when btree blocks are allocated, they have some corresponding bits set for
951 * them in one of two extent_io trees. This is used to make sure all of
952 * those extents are on disk for transaction or log commit. We wait
953 * on all the pages and clear them from the dirty pages state tree
954 */
__btrfs_wait_marked_extents(struct btrfs_fs_info * fs_info,struct extent_io_tree * dirty_pages)955 static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
956 struct extent_io_tree *dirty_pages)
957 {
958 int err = 0;
959 int werr = 0;
960 struct address_space *mapping = fs_info->btree_inode->i_mapping;
961 struct extent_state *cached_state = NULL;
962 u64 start = 0;
963 u64 end;
964
965 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
966 EXTENT_NEED_WAIT, &cached_state)) {
967 /*
968 * Ignore -ENOMEM errors returned by clear_extent_bit().
969 * When committing the transaction, we'll remove any entries
970 * left in the io tree. For a log commit, we don't remove them
971 * after committing the log because the tree can be accessed
972 * concurrently - we do it only at transaction commit time when
973 * it's safe to do it (through extent_io_tree_release()).
974 */
975 err = clear_extent_bit(dirty_pages, start, end,
976 EXTENT_NEED_WAIT, 0, 0, &cached_state);
977 if (err == -ENOMEM)
978 err = 0;
979 if (!err)
980 err = filemap_fdatawait_range(mapping, start, end);
981 if (err)
982 werr = err;
983 free_extent_state(cached_state);
984 cached_state = NULL;
985 cond_resched();
986 start = end + 1;
987 }
988 if (err)
989 werr = err;
990 return werr;
991 }
992
btrfs_wait_extents(struct btrfs_fs_info * fs_info,struct extent_io_tree * dirty_pages)993 int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
994 struct extent_io_tree *dirty_pages)
995 {
996 bool errors = false;
997 int err;
998
999 err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1000 if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
1001 errors = true;
1002
1003 if (errors && !err)
1004 err = -EIO;
1005 return err;
1006 }
1007
btrfs_wait_tree_log_extents(struct btrfs_root * log_root,int mark)1008 int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
1009 {
1010 struct btrfs_fs_info *fs_info = log_root->fs_info;
1011 struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
1012 bool errors = false;
1013 int err;
1014
1015 ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
1016
1017 err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1018 if ((mark & EXTENT_DIRTY) &&
1019 test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
1020 errors = true;
1021
1022 if ((mark & EXTENT_NEW) &&
1023 test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
1024 errors = true;
1025
1026 if (errors && !err)
1027 err = -EIO;
1028 return err;
1029 }
1030
1031 /*
1032 * When btree blocks are allocated the corresponding extents are marked dirty.
1033 * This function ensures such extents are persisted on disk for transaction or
1034 * log commit.
1035 *
1036 * @trans: transaction whose dirty pages we'd like to write
1037 */
btrfs_write_and_wait_transaction(struct btrfs_trans_handle * trans)1038 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans)
1039 {
1040 int ret;
1041 int ret2;
1042 struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages;
1043 struct btrfs_fs_info *fs_info = trans->fs_info;
1044 struct blk_plug plug;
1045
1046 blk_start_plug(&plug);
1047 ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY);
1048 blk_finish_plug(&plug);
1049 ret2 = btrfs_wait_extents(fs_info, dirty_pages);
1050
1051 extent_io_tree_release(&trans->transaction->dirty_pages);
1052
1053 if (ret)
1054 return ret;
1055 else if (ret2)
1056 return ret2;
1057 else
1058 return 0;
1059 }
1060
1061 /*
1062 * this is used to update the root pointer in the tree of tree roots.
1063 *
1064 * But, in the case of the extent allocation tree, updating the root
1065 * pointer may allocate blocks which may change the root of the extent
1066 * allocation tree.
1067 *
1068 * So, this loops and repeats and makes sure the cowonly root didn't
1069 * change while the root pointer was being updated in the metadata.
1070 */
update_cowonly_root(struct btrfs_trans_handle * trans,struct btrfs_root * root)1071 static int update_cowonly_root(struct btrfs_trans_handle *trans,
1072 struct btrfs_root *root)
1073 {
1074 int ret;
1075 u64 old_root_bytenr;
1076 u64 old_root_used;
1077 struct btrfs_fs_info *fs_info = root->fs_info;
1078 struct btrfs_root *tree_root = fs_info->tree_root;
1079
1080 old_root_used = btrfs_root_used(&root->root_item);
1081
1082 while (1) {
1083 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1084 if (old_root_bytenr == root->node->start &&
1085 old_root_used == btrfs_root_used(&root->root_item))
1086 break;
1087
1088 btrfs_set_root_node(&root->root_item, root->node);
1089 ret = btrfs_update_root(trans, tree_root,
1090 &root->root_key,
1091 &root->root_item);
1092 if (ret)
1093 return ret;
1094
1095 old_root_used = btrfs_root_used(&root->root_item);
1096 }
1097
1098 return 0;
1099 }
1100
1101 /*
1102 * update all the cowonly tree roots on disk
1103 *
1104 * The error handling in this function may not be obvious. Any of the
1105 * failures will cause the file system to go offline. We still need
1106 * to clean up the delayed refs.
1107 */
commit_cowonly_roots(struct btrfs_trans_handle * trans)1108 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
1109 {
1110 struct btrfs_fs_info *fs_info = trans->fs_info;
1111 struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1112 struct list_head *io_bgs = &trans->transaction->io_bgs;
1113 struct list_head *next;
1114 struct extent_buffer *eb;
1115 int ret;
1116
1117 eb = btrfs_lock_root_node(fs_info->tree_root);
1118 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
1119 0, &eb);
1120 btrfs_tree_unlock(eb);
1121 free_extent_buffer(eb);
1122
1123 if (ret)
1124 return ret;
1125
1126 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1127 if (ret)
1128 return ret;
1129
1130 ret = btrfs_run_dev_stats(trans);
1131 if (ret)
1132 return ret;
1133 ret = btrfs_run_dev_replace(trans);
1134 if (ret)
1135 return ret;
1136 ret = btrfs_run_qgroups(trans);
1137 if (ret)
1138 return ret;
1139
1140 ret = btrfs_setup_space_cache(trans);
1141 if (ret)
1142 return ret;
1143
1144 /* run_qgroups might have added some more refs */
1145 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1146 if (ret)
1147 return ret;
1148 again:
1149 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1150 struct btrfs_root *root;
1151 next = fs_info->dirty_cowonly_roots.next;
1152 list_del_init(next);
1153 root = list_entry(next, struct btrfs_root, dirty_list);
1154 clear_bit(BTRFS_ROOT_DIRTY, &root->state);
1155
1156 if (root != fs_info->extent_root)
1157 list_add_tail(&root->dirty_list,
1158 &trans->transaction->switch_commits);
1159 ret = update_cowonly_root(trans, root);
1160 if (ret)
1161 return ret;
1162 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1163 if (ret)
1164 return ret;
1165 }
1166
1167 while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
1168 ret = btrfs_write_dirty_block_groups(trans);
1169 if (ret)
1170 return ret;
1171 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1172 if (ret)
1173 return ret;
1174 }
1175
1176 if (!list_empty(&fs_info->dirty_cowonly_roots))
1177 goto again;
1178
1179 list_add_tail(&fs_info->extent_root->dirty_list,
1180 &trans->transaction->switch_commits);
1181
1182 /* Update dev-replace pointer once everything is committed */
1183 fs_info->dev_replace.committed_cursor_left =
1184 fs_info->dev_replace.cursor_left_last_write_of_item;
1185
1186 return 0;
1187 }
1188
1189 /*
1190 * dead roots are old snapshots that need to be deleted. This allocates
1191 * a dirty root struct and adds it into the list of dead roots that need to
1192 * be deleted
1193 */
btrfs_add_dead_root(struct btrfs_root * root)1194 void btrfs_add_dead_root(struct btrfs_root *root)
1195 {
1196 struct btrfs_fs_info *fs_info = root->fs_info;
1197
1198 spin_lock(&fs_info->trans_lock);
1199 if (list_empty(&root->root_list))
1200 list_add_tail(&root->root_list, &fs_info->dead_roots);
1201 spin_unlock(&fs_info->trans_lock);
1202 }
1203
1204 /*
1205 * update all the cowonly tree roots on disk
1206 */
commit_fs_roots(struct btrfs_trans_handle * trans)1207 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
1208 {
1209 struct btrfs_fs_info *fs_info = trans->fs_info;
1210 struct btrfs_root *gang[8];
1211 int i;
1212 int ret;
1213
1214 spin_lock(&fs_info->fs_roots_radix_lock);
1215 while (1) {
1216 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1217 (void **)gang, 0,
1218 ARRAY_SIZE(gang),
1219 BTRFS_ROOT_TRANS_TAG);
1220 if (ret == 0)
1221 break;
1222 for (i = 0; i < ret; i++) {
1223 struct btrfs_root *root = gang[i];
1224 int ret2;
1225
1226 radix_tree_tag_clear(&fs_info->fs_roots_radix,
1227 (unsigned long)root->root_key.objectid,
1228 BTRFS_ROOT_TRANS_TAG);
1229 spin_unlock(&fs_info->fs_roots_radix_lock);
1230
1231 btrfs_free_log(trans, root);
1232 btrfs_update_reloc_root(trans, root);
1233
1234 btrfs_save_ino_cache(root, trans);
1235
1236 /* see comments in should_cow_block() */
1237 clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1238 smp_mb__after_atomic();
1239
1240 if (root->commit_root != root->node) {
1241 list_add_tail(&root->dirty_list,
1242 &trans->transaction->switch_commits);
1243 btrfs_set_root_node(&root->root_item,
1244 root->node);
1245 }
1246
1247 ret2 = btrfs_update_root(trans, fs_info->tree_root,
1248 &root->root_key,
1249 &root->root_item);
1250 if (ret2)
1251 return ret2;
1252 spin_lock(&fs_info->fs_roots_radix_lock);
1253 btrfs_qgroup_free_meta_all_pertrans(root);
1254 }
1255 }
1256 spin_unlock(&fs_info->fs_roots_radix_lock);
1257 return 0;
1258 }
1259
1260 /*
1261 * defrag a given btree.
1262 * Every leaf in the btree is read and defragged.
1263 */
btrfs_defrag_root(struct btrfs_root * root)1264 int btrfs_defrag_root(struct btrfs_root *root)
1265 {
1266 struct btrfs_fs_info *info = root->fs_info;
1267 struct btrfs_trans_handle *trans;
1268 int ret;
1269
1270 if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
1271 return 0;
1272
1273 while (1) {
1274 trans = btrfs_start_transaction(root, 0);
1275 if (IS_ERR(trans)) {
1276 ret = PTR_ERR(trans);
1277 break;
1278 }
1279
1280 ret = btrfs_defrag_leaves(trans, root);
1281
1282 btrfs_end_transaction(trans);
1283 btrfs_btree_balance_dirty(info);
1284 cond_resched();
1285
1286 if (btrfs_fs_closing(info) || ret != -EAGAIN)
1287 break;
1288
1289 if (btrfs_defrag_cancelled(info)) {
1290 btrfs_debug(info, "defrag_root cancelled");
1291 ret = -EAGAIN;
1292 break;
1293 }
1294 }
1295 clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
1296 return ret;
1297 }
1298
1299 /*
1300 * Do all special snapshot related qgroup dirty hack.
1301 *
1302 * Will do all needed qgroup inherit and dirty hack like switch commit
1303 * roots inside one transaction and write all btree into disk, to make
1304 * qgroup works.
1305 */
qgroup_account_snapshot(struct btrfs_trans_handle * trans,struct btrfs_root * src,struct btrfs_root * parent,struct btrfs_qgroup_inherit * inherit,u64 dst_objectid)1306 static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1307 struct btrfs_root *src,
1308 struct btrfs_root *parent,
1309 struct btrfs_qgroup_inherit *inherit,
1310 u64 dst_objectid)
1311 {
1312 struct btrfs_fs_info *fs_info = src->fs_info;
1313 int ret;
1314
1315 /*
1316 * Save some performance in the case that qgroups are not
1317 * enabled. If this check races with the ioctl, rescan will
1318 * kick in anyway.
1319 */
1320 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1321 return 0;
1322
1323 /*
1324 * Ensure dirty @src will be committed. Or, after coming
1325 * commit_fs_roots() and switch_commit_roots(), any dirty but not
1326 * recorded root will never be updated again, causing an outdated root
1327 * item.
1328 */
1329 record_root_in_trans(trans, src, 1);
1330
1331 /*
1332 * We are going to commit transaction, see btrfs_commit_transaction()
1333 * comment for reason locking tree_log_mutex
1334 */
1335 mutex_lock(&fs_info->tree_log_mutex);
1336
1337 ret = commit_fs_roots(trans);
1338 if (ret)
1339 goto out;
1340 ret = btrfs_qgroup_account_extents(trans);
1341 if (ret < 0)
1342 goto out;
1343
1344 /* Now qgroup are all updated, we can inherit it to new qgroups */
1345 ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
1346 inherit);
1347 if (ret < 0)
1348 goto out;
1349
1350 /*
1351 * Now we do a simplified commit transaction, which will:
1352 * 1) commit all subvolume and extent tree
1353 * To ensure all subvolume and extent tree have a valid
1354 * commit_root to accounting later insert_dir_item()
1355 * 2) write all btree blocks onto disk
1356 * This is to make sure later btree modification will be cowed
1357 * Or commit_root can be populated and cause wrong qgroup numbers
1358 * In this simplified commit, we don't really care about other trees
1359 * like chunk and root tree, as they won't affect qgroup.
1360 * And we don't write super to avoid half committed status.
1361 */
1362 ret = commit_cowonly_roots(trans);
1363 if (ret)
1364 goto out;
1365 switch_commit_roots(trans);
1366 ret = btrfs_write_and_wait_transaction(trans);
1367 if (ret)
1368 btrfs_handle_fs_error(fs_info, ret,
1369 "Error while writing out transaction for qgroup");
1370
1371 out:
1372 mutex_unlock(&fs_info->tree_log_mutex);
1373
1374 /*
1375 * Force parent root to be updated, as we recorded it before so its
1376 * last_trans == cur_transid.
1377 * Or it won't be committed again onto disk after later
1378 * insert_dir_item()
1379 */
1380 if (!ret)
1381 record_root_in_trans(trans, parent, 1);
1382 return ret;
1383 }
1384
1385 /*
1386 * new snapshots need to be created at a very specific time in the
1387 * transaction commit. This does the actual creation.
1388 *
1389 * Note:
1390 * If the error which may affect the commitment of the current transaction
1391 * happens, we should return the error number. If the error which just affect
1392 * the creation of the pending snapshots, just return 0.
1393 */
create_pending_snapshot(struct btrfs_trans_handle * trans,struct btrfs_pending_snapshot * pending)1394 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1395 struct btrfs_pending_snapshot *pending)
1396 {
1397
1398 struct btrfs_fs_info *fs_info = trans->fs_info;
1399 struct btrfs_key key;
1400 struct btrfs_root_item *new_root_item;
1401 struct btrfs_root *tree_root = fs_info->tree_root;
1402 struct btrfs_root *root = pending->root;
1403 struct btrfs_root *parent_root;
1404 struct btrfs_block_rsv *rsv;
1405 struct inode *parent_inode;
1406 struct btrfs_path *path;
1407 struct btrfs_dir_item *dir_item;
1408 struct dentry *dentry;
1409 struct extent_buffer *tmp;
1410 struct extent_buffer *old;
1411 struct timespec64 cur_time;
1412 int ret = 0;
1413 u64 to_reserve = 0;
1414 u64 index = 0;
1415 u64 objectid;
1416 u64 root_flags;
1417 uuid_le new_uuid;
1418
1419 ASSERT(pending->path);
1420 path = pending->path;
1421
1422 ASSERT(pending->root_item);
1423 new_root_item = pending->root_item;
1424
1425 pending->error = btrfs_find_free_objectid(tree_root, &objectid);
1426 if (pending->error)
1427 goto no_free_objectid;
1428
1429 /*
1430 * Make qgroup to skip current new snapshot's qgroupid, as it is
1431 * accounted by later btrfs_qgroup_inherit().
1432 */
1433 btrfs_set_skip_qgroup(trans, objectid);
1434
1435 btrfs_reloc_pre_snapshot(pending, &to_reserve);
1436
1437 if (to_reserve > 0) {
1438 pending->error = btrfs_block_rsv_add(root,
1439 &pending->block_rsv,
1440 to_reserve,
1441 BTRFS_RESERVE_NO_FLUSH);
1442 if (pending->error)
1443 goto clear_skip_qgroup;
1444 }
1445
1446 key.objectid = objectid;
1447 key.offset = (u64)-1;
1448 key.type = BTRFS_ROOT_ITEM_KEY;
1449
1450 rsv = trans->block_rsv;
1451 trans->block_rsv = &pending->block_rsv;
1452 trans->bytes_reserved = trans->block_rsv->reserved;
1453 trace_btrfs_space_reservation(fs_info, "transaction",
1454 trans->transid,
1455 trans->bytes_reserved, 1);
1456 dentry = pending->dentry;
1457 parent_inode = pending->dir;
1458 parent_root = BTRFS_I(parent_inode)->root;
1459 record_root_in_trans(trans, parent_root, 0);
1460
1461 cur_time = current_time(parent_inode);
1462
1463 /*
1464 * insert the directory item
1465 */
1466 ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index);
1467 BUG_ON(ret); /* -ENOMEM */
1468
1469 /* check if there is a file/dir which has the same name. */
1470 dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1471 btrfs_ino(BTRFS_I(parent_inode)),
1472 dentry->d_name.name,
1473 dentry->d_name.len, 0);
1474 if (dir_item != NULL && !IS_ERR(dir_item)) {
1475 pending->error = -EEXIST;
1476 goto dir_item_existed;
1477 } else if (IS_ERR(dir_item)) {
1478 ret = PTR_ERR(dir_item);
1479 btrfs_abort_transaction(trans, ret);
1480 goto fail;
1481 }
1482 btrfs_release_path(path);
1483
1484 /*
1485 * pull in the delayed directory update
1486 * and the delayed inode item
1487 * otherwise we corrupt the FS during
1488 * snapshot
1489 */
1490 ret = btrfs_run_delayed_items(trans);
1491 if (ret) { /* Transaction aborted */
1492 btrfs_abort_transaction(trans, ret);
1493 goto fail;
1494 }
1495
1496 record_root_in_trans(trans, root, 0);
1497 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1498 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1499 btrfs_check_and_init_root_item(new_root_item);
1500
1501 root_flags = btrfs_root_flags(new_root_item);
1502 if (pending->readonly)
1503 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1504 else
1505 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1506 btrfs_set_root_flags(new_root_item, root_flags);
1507
1508 btrfs_set_root_generation_v2(new_root_item,
1509 trans->transid);
1510 uuid_le_gen(&new_uuid);
1511 memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1512 memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1513 BTRFS_UUID_SIZE);
1514 if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1515 memset(new_root_item->received_uuid, 0,
1516 sizeof(new_root_item->received_uuid));
1517 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1518 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1519 btrfs_set_root_stransid(new_root_item, 0);
1520 btrfs_set_root_rtransid(new_root_item, 0);
1521 }
1522 btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1523 btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1524 btrfs_set_root_otransid(new_root_item, trans->transid);
1525
1526 old = btrfs_lock_root_node(root);
1527 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1528 if (ret) {
1529 btrfs_tree_unlock(old);
1530 free_extent_buffer(old);
1531 btrfs_abort_transaction(trans, ret);
1532 goto fail;
1533 }
1534
1535 btrfs_set_lock_blocking_write(old);
1536
1537 ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1538 /* clean up in any case */
1539 btrfs_tree_unlock(old);
1540 free_extent_buffer(old);
1541 if (ret) {
1542 btrfs_abort_transaction(trans, ret);
1543 goto fail;
1544 }
1545 /* see comments in should_cow_block() */
1546 set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1547 smp_wmb();
1548
1549 btrfs_set_root_node(new_root_item, tmp);
1550 /* record when the snapshot was created in key.offset */
1551 key.offset = trans->transid;
1552 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1553 btrfs_tree_unlock(tmp);
1554 free_extent_buffer(tmp);
1555 if (ret) {
1556 btrfs_abort_transaction(trans, ret);
1557 goto fail;
1558 }
1559
1560 /*
1561 * insert root back/forward references
1562 */
1563 ret = btrfs_add_root_ref(trans, objectid,
1564 parent_root->root_key.objectid,
1565 btrfs_ino(BTRFS_I(parent_inode)), index,
1566 dentry->d_name.name, dentry->d_name.len);
1567 if (ret) {
1568 btrfs_abort_transaction(trans, ret);
1569 goto fail;
1570 }
1571
1572 key.offset = (u64)-1;
1573 pending->snap = btrfs_read_fs_root_no_name(fs_info, &key);
1574 if (IS_ERR(pending->snap)) {
1575 ret = PTR_ERR(pending->snap);
1576 btrfs_abort_transaction(trans, ret);
1577 goto fail;
1578 }
1579
1580 ret = btrfs_reloc_post_snapshot(trans, pending);
1581 if (ret) {
1582 btrfs_abort_transaction(trans, ret);
1583 goto fail;
1584 }
1585
1586 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1587 if (ret) {
1588 btrfs_abort_transaction(trans, ret);
1589 goto fail;
1590 }
1591
1592 /*
1593 * Do special qgroup accounting for snapshot, as we do some qgroup
1594 * snapshot hack to do fast snapshot.
1595 * To co-operate with that hack, we do hack again.
1596 * Or snapshot will be greatly slowed down by a subtree qgroup rescan
1597 */
1598 ret = qgroup_account_snapshot(trans, root, parent_root,
1599 pending->inherit, objectid);
1600 if (ret < 0)
1601 goto fail;
1602
1603 ret = btrfs_insert_dir_item(trans, dentry->d_name.name,
1604 dentry->d_name.len, BTRFS_I(parent_inode),
1605 &key, BTRFS_FT_DIR, index);
1606 /* We have check then name at the beginning, so it is impossible. */
1607 BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1608 if (ret) {
1609 btrfs_abort_transaction(trans, ret);
1610 goto fail;
1611 }
1612
1613 btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
1614 dentry->d_name.len * 2);
1615 parent_inode->i_mtime = parent_inode->i_ctime =
1616 current_time(parent_inode);
1617 ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1618 if (ret) {
1619 btrfs_abort_transaction(trans, ret);
1620 goto fail;
1621 }
1622 ret = btrfs_uuid_tree_add(trans, new_uuid.b, BTRFS_UUID_KEY_SUBVOL,
1623 objectid);
1624 if (ret) {
1625 btrfs_abort_transaction(trans, ret);
1626 goto fail;
1627 }
1628 if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1629 ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
1630 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1631 objectid);
1632 if (ret && ret != -EEXIST) {
1633 btrfs_abort_transaction(trans, ret);
1634 goto fail;
1635 }
1636 }
1637
1638 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1639 if (ret) {
1640 btrfs_abort_transaction(trans, ret);
1641 goto fail;
1642 }
1643
1644 fail:
1645 pending->error = ret;
1646 dir_item_existed:
1647 trans->block_rsv = rsv;
1648 trans->bytes_reserved = 0;
1649 clear_skip_qgroup:
1650 btrfs_clear_skip_qgroup(trans);
1651 no_free_objectid:
1652 kfree(new_root_item);
1653 pending->root_item = NULL;
1654 btrfs_free_path(path);
1655 pending->path = NULL;
1656
1657 return ret;
1658 }
1659
1660 /*
1661 * create all the snapshots we've scheduled for creation
1662 */
create_pending_snapshots(struct btrfs_trans_handle * trans)1663 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans)
1664 {
1665 struct btrfs_pending_snapshot *pending, *next;
1666 struct list_head *head = &trans->transaction->pending_snapshots;
1667 int ret = 0;
1668
1669 list_for_each_entry_safe(pending, next, head, list) {
1670 list_del(&pending->list);
1671 ret = create_pending_snapshot(trans, pending);
1672 if (ret)
1673 break;
1674 }
1675 return ret;
1676 }
1677
update_super_roots(struct btrfs_fs_info * fs_info)1678 static void update_super_roots(struct btrfs_fs_info *fs_info)
1679 {
1680 struct btrfs_root_item *root_item;
1681 struct btrfs_super_block *super;
1682
1683 super = fs_info->super_copy;
1684
1685 root_item = &fs_info->chunk_root->root_item;
1686 super->chunk_root = root_item->bytenr;
1687 super->chunk_root_generation = root_item->generation;
1688 super->chunk_root_level = root_item->level;
1689
1690 root_item = &fs_info->tree_root->root_item;
1691 super->root = root_item->bytenr;
1692 super->generation = root_item->generation;
1693 super->root_level = root_item->level;
1694 if (btrfs_test_opt(fs_info, SPACE_CACHE))
1695 super->cache_generation = root_item->generation;
1696 if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
1697 super->uuid_tree_generation = root_item->generation;
1698 }
1699
btrfs_transaction_in_commit(struct btrfs_fs_info * info)1700 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1701 {
1702 struct btrfs_transaction *trans;
1703 int ret = 0;
1704
1705 spin_lock(&info->trans_lock);
1706 trans = info->running_transaction;
1707 if (trans)
1708 ret = (trans->state >= TRANS_STATE_COMMIT_START);
1709 spin_unlock(&info->trans_lock);
1710 return ret;
1711 }
1712
btrfs_transaction_blocked(struct btrfs_fs_info * info)1713 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1714 {
1715 struct btrfs_transaction *trans;
1716 int ret = 0;
1717
1718 spin_lock(&info->trans_lock);
1719 trans = info->running_transaction;
1720 if (trans)
1721 ret = is_transaction_blocked(trans);
1722 spin_unlock(&info->trans_lock);
1723 return ret;
1724 }
1725
1726 /*
1727 * wait for the current transaction commit to start and block subsequent
1728 * transaction joins
1729 */
wait_current_trans_commit_start(struct btrfs_fs_info * fs_info,struct btrfs_transaction * trans)1730 static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info,
1731 struct btrfs_transaction *trans)
1732 {
1733 wait_event(fs_info->transaction_blocked_wait,
1734 trans->state >= TRANS_STATE_COMMIT_START ||
1735 TRANS_ABORTED(trans));
1736 }
1737
1738 /*
1739 * wait for the current transaction to start and then become unblocked.
1740 * caller holds ref.
1741 */
wait_current_trans_commit_start_and_unblock(struct btrfs_fs_info * fs_info,struct btrfs_transaction * trans)1742 static void wait_current_trans_commit_start_and_unblock(
1743 struct btrfs_fs_info *fs_info,
1744 struct btrfs_transaction *trans)
1745 {
1746 wait_event(fs_info->transaction_wait,
1747 trans->state >= TRANS_STATE_UNBLOCKED ||
1748 TRANS_ABORTED(trans));
1749 }
1750
1751 /*
1752 * commit transactions asynchronously. once btrfs_commit_transaction_async
1753 * returns, any subsequent transaction will not be allowed to join.
1754 */
1755 struct btrfs_async_commit {
1756 struct btrfs_trans_handle *newtrans;
1757 struct work_struct work;
1758 };
1759
do_async_commit(struct work_struct * work)1760 static void do_async_commit(struct work_struct *work)
1761 {
1762 struct btrfs_async_commit *ac =
1763 container_of(work, struct btrfs_async_commit, work);
1764
1765 /*
1766 * We've got freeze protection passed with the transaction.
1767 * Tell lockdep about it.
1768 */
1769 if (ac->newtrans->type & __TRANS_FREEZABLE)
1770 __sb_writers_acquired(ac->newtrans->fs_info->sb, SB_FREEZE_FS);
1771
1772 current->journal_info = ac->newtrans;
1773
1774 btrfs_commit_transaction(ac->newtrans);
1775 kfree(ac);
1776 }
1777
btrfs_commit_transaction_async(struct btrfs_trans_handle * trans,int wait_for_unblock)1778 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1779 int wait_for_unblock)
1780 {
1781 struct btrfs_fs_info *fs_info = trans->fs_info;
1782 struct btrfs_async_commit *ac;
1783 struct btrfs_transaction *cur_trans;
1784
1785 ac = kmalloc(sizeof(*ac), GFP_NOFS);
1786 if (!ac)
1787 return -ENOMEM;
1788
1789 INIT_WORK(&ac->work, do_async_commit);
1790 ac->newtrans = btrfs_join_transaction(trans->root);
1791 if (IS_ERR(ac->newtrans)) {
1792 int err = PTR_ERR(ac->newtrans);
1793 kfree(ac);
1794 return err;
1795 }
1796
1797 /* take transaction reference */
1798 cur_trans = trans->transaction;
1799 refcount_inc(&cur_trans->use_count);
1800
1801 btrfs_end_transaction(trans);
1802
1803 /*
1804 * Tell lockdep we've released the freeze rwsem, since the
1805 * async commit thread will be the one to unlock it.
1806 */
1807 if (ac->newtrans->type & __TRANS_FREEZABLE)
1808 __sb_writers_release(fs_info->sb, SB_FREEZE_FS);
1809
1810 schedule_work(&ac->work);
1811
1812 /* wait for transaction to start and unblock */
1813 if (wait_for_unblock)
1814 wait_current_trans_commit_start_and_unblock(fs_info, cur_trans);
1815 else
1816 wait_current_trans_commit_start(fs_info, cur_trans);
1817
1818 if (current->journal_info == trans)
1819 current->journal_info = NULL;
1820
1821 btrfs_put_transaction(cur_trans);
1822 return 0;
1823 }
1824
1825
cleanup_transaction(struct btrfs_trans_handle * trans,int err)1826 static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
1827 {
1828 struct btrfs_fs_info *fs_info = trans->fs_info;
1829 struct btrfs_transaction *cur_trans = trans->transaction;
1830
1831 WARN_ON(refcount_read(&trans->use_count) > 1);
1832
1833 btrfs_abort_transaction(trans, err);
1834
1835 spin_lock(&fs_info->trans_lock);
1836
1837 /*
1838 * If the transaction is removed from the list, it means this
1839 * transaction has been committed successfully, so it is impossible
1840 * to call the cleanup function.
1841 */
1842 BUG_ON(list_empty(&cur_trans->list));
1843
1844 list_del_init(&cur_trans->list);
1845 if (cur_trans == fs_info->running_transaction) {
1846 cur_trans->state = TRANS_STATE_COMMIT_DOING;
1847 spin_unlock(&fs_info->trans_lock);
1848 wait_event(cur_trans->writer_wait,
1849 atomic_read(&cur_trans->num_writers) == 1);
1850
1851 spin_lock(&fs_info->trans_lock);
1852 }
1853 spin_unlock(&fs_info->trans_lock);
1854
1855 btrfs_cleanup_one_transaction(trans->transaction, fs_info);
1856
1857 spin_lock(&fs_info->trans_lock);
1858 if (cur_trans == fs_info->running_transaction)
1859 fs_info->running_transaction = NULL;
1860 spin_unlock(&fs_info->trans_lock);
1861
1862 if (trans->type & __TRANS_FREEZABLE)
1863 sb_end_intwrite(fs_info->sb);
1864 btrfs_put_transaction(cur_trans);
1865 btrfs_put_transaction(cur_trans);
1866
1867 trace_btrfs_transaction_commit(trans->root);
1868
1869 if (current->journal_info == trans)
1870 current->journal_info = NULL;
1871 btrfs_scrub_cancel(fs_info);
1872
1873 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1874 }
1875
1876 /*
1877 * Release reserved delayed ref space of all pending block groups of the
1878 * transaction and remove them from the list
1879 */
btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle * trans)1880 static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
1881 {
1882 struct btrfs_fs_info *fs_info = trans->fs_info;
1883 struct btrfs_block_group_cache *block_group, *tmp;
1884
1885 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
1886 btrfs_delayed_refs_rsv_release(fs_info, 1);
1887 list_del_init(&block_group->bg_list);
1888 }
1889 }
1890
btrfs_start_delalloc_flush(struct btrfs_trans_handle * trans)1891 static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans)
1892 {
1893 struct btrfs_fs_info *fs_info = trans->fs_info;
1894
1895 /*
1896 * We use writeback_inodes_sb here because if we used
1897 * btrfs_start_delalloc_roots we would deadlock with fs freeze.
1898 * Currently are holding the fs freeze lock, if we do an async flush
1899 * we'll do btrfs_join_transaction() and deadlock because we need to
1900 * wait for the fs freeze lock. Using the direct flushing we benefit
1901 * from already being in a transaction and our join_transaction doesn't
1902 * have to re-take the fs freeze lock.
1903 */
1904 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
1905 writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
1906 } else {
1907 struct btrfs_pending_snapshot *pending;
1908 struct list_head *head = &trans->transaction->pending_snapshots;
1909
1910 /*
1911 * Flush dellaloc for any root that is going to be snapshotted.
1912 * This is done to avoid a corrupted version of files, in the
1913 * snapshots, that had both buffered and direct IO writes (even
1914 * if they were done sequentially) due to an unordered update of
1915 * the inode's size on disk.
1916 */
1917 list_for_each_entry(pending, head, list) {
1918 int ret;
1919
1920 ret = btrfs_start_delalloc_snapshot(pending->root);
1921 if (ret)
1922 return ret;
1923 }
1924 }
1925 return 0;
1926 }
1927
btrfs_wait_delalloc_flush(struct btrfs_trans_handle * trans)1928 static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans)
1929 {
1930 struct btrfs_fs_info *fs_info = trans->fs_info;
1931
1932 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
1933 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
1934 } else {
1935 struct btrfs_pending_snapshot *pending;
1936 struct list_head *head = &trans->transaction->pending_snapshots;
1937
1938 /*
1939 * Wait for any dellaloc that we started previously for the roots
1940 * that are going to be snapshotted. This is to avoid a corrupted
1941 * version of files in the snapshots that had both buffered and
1942 * direct IO writes (even if they were done sequentially).
1943 */
1944 list_for_each_entry(pending, head, list)
1945 btrfs_wait_ordered_extents(pending->root,
1946 U64_MAX, 0, U64_MAX);
1947 }
1948 }
1949
btrfs_commit_transaction(struct btrfs_trans_handle * trans)1950 int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
1951 {
1952 struct btrfs_fs_info *fs_info = trans->fs_info;
1953 struct btrfs_transaction *cur_trans = trans->transaction;
1954 struct btrfs_transaction *prev_trans = NULL;
1955 int ret;
1956
1957 /*
1958 * Some places just start a transaction to commit it. We need to make
1959 * sure that if this commit fails that the abort code actually marks the
1960 * transaction as failed, so set trans->dirty to make the abort code do
1961 * the right thing.
1962 */
1963 trans->dirty = true;
1964
1965 /* Stop the commit early if ->aborted is set */
1966 if (TRANS_ABORTED(cur_trans)) {
1967 ret = cur_trans->aborted;
1968 btrfs_end_transaction(trans);
1969 return ret;
1970 }
1971
1972 btrfs_trans_release_metadata(trans);
1973 trans->block_rsv = NULL;
1974
1975 /* make a pass through all the delayed refs we have so far
1976 * any runnings procs may add more while we are here
1977 */
1978 ret = btrfs_run_delayed_refs(trans, 0);
1979 if (ret) {
1980 btrfs_end_transaction(trans);
1981 return ret;
1982 }
1983
1984 cur_trans = trans->transaction;
1985
1986 /*
1987 * set the flushing flag so procs in this transaction have to
1988 * start sending their work down.
1989 */
1990 cur_trans->delayed_refs.flushing = 1;
1991 smp_wmb();
1992
1993 btrfs_create_pending_block_groups(trans);
1994
1995 ret = btrfs_run_delayed_refs(trans, 0);
1996 if (ret) {
1997 btrfs_end_transaction(trans);
1998 return ret;
1999 }
2000
2001 if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
2002 int run_it = 0;
2003
2004 /* this mutex is also taken before trying to set
2005 * block groups readonly. We need to make sure
2006 * that nobody has set a block group readonly
2007 * after a extents from that block group have been
2008 * allocated for cache files. btrfs_set_block_group_ro
2009 * will wait for the transaction to commit if it
2010 * finds BTRFS_TRANS_DIRTY_BG_RUN set.
2011 *
2012 * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
2013 * only one process starts all the block group IO. It wouldn't
2014 * hurt to have more than one go through, but there's no
2015 * real advantage to it either.
2016 */
2017 mutex_lock(&fs_info->ro_block_group_mutex);
2018 if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
2019 &cur_trans->flags))
2020 run_it = 1;
2021 mutex_unlock(&fs_info->ro_block_group_mutex);
2022
2023 if (run_it) {
2024 ret = btrfs_start_dirty_block_groups(trans);
2025 if (ret) {
2026 btrfs_end_transaction(trans);
2027 return ret;
2028 }
2029 }
2030 }
2031
2032 spin_lock(&fs_info->trans_lock);
2033 if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
2034 spin_unlock(&fs_info->trans_lock);
2035 refcount_inc(&cur_trans->use_count);
2036 ret = btrfs_end_transaction(trans);
2037
2038 wait_for_commit(cur_trans);
2039
2040 if (TRANS_ABORTED(cur_trans))
2041 ret = cur_trans->aborted;
2042
2043 btrfs_put_transaction(cur_trans);
2044
2045 return ret;
2046 }
2047
2048 cur_trans->state = TRANS_STATE_COMMIT_START;
2049 wake_up(&fs_info->transaction_blocked_wait);
2050
2051 if (cur_trans->list.prev != &fs_info->trans_list) {
2052 prev_trans = list_entry(cur_trans->list.prev,
2053 struct btrfs_transaction, list);
2054 if (prev_trans->state != TRANS_STATE_COMPLETED) {
2055 refcount_inc(&prev_trans->use_count);
2056 spin_unlock(&fs_info->trans_lock);
2057
2058 wait_for_commit(prev_trans);
2059 ret = READ_ONCE(prev_trans->aborted);
2060
2061 btrfs_put_transaction(prev_trans);
2062 if (ret)
2063 goto cleanup_transaction;
2064 } else {
2065 spin_unlock(&fs_info->trans_lock);
2066 }
2067 } else {
2068 spin_unlock(&fs_info->trans_lock);
2069 /*
2070 * The previous transaction was aborted and was already removed
2071 * from the list of transactions at fs_info->trans_list. So we
2072 * abort to prevent writing a new superblock that reflects a
2073 * corrupt state (pointing to trees with unwritten nodes/leafs).
2074 */
2075 if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) {
2076 ret = -EROFS;
2077 goto cleanup_transaction;
2078 }
2079 }
2080
2081 extwriter_counter_dec(cur_trans, trans->type);
2082
2083 ret = btrfs_start_delalloc_flush(trans);
2084 if (ret)
2085 goto cleanup_transaction;
2086
2087 ret = btrfs_run_delayed_items(trans);
2088 if (ret)
2089 goto cleanup_transaction;
2090
2091 wait_event(cur_trans->writer_wait,
2092 extwriter_counter_read(cur_trans) == 0);
2093
2094 /* some pending stuffs might be added after the previous flush. */
2095 ret = btrfs_run_delayed_items(trans);
2096 if (ret)
2097 goto cleanup_transaction;
2098
2099 btrfs_wait_delalloc_flush(trans);
2100
2101 btrfs_scrub_pause(fs_info);
2102 /*
2103 * Ok now we need to make sure to block out any other joins while we
2104 * commit the transaction. We could have started a join before setting
2105 * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
2106 */
2107 spin_lock(&fs_info->trans_lock);
2108 cur_trans->state = TRANS_STATE_COMMIT_DOING;
2109 spin_unlock(&fs_info->trans_lock);
2110 wait_event(cur_trans->writer_wait,
2111 atomic_read(&cur_trans->num_writers) == 1);
2112
2113 if (TRANS_ABORTED(cur_trans)) {
2114 ret = cur_trans->aborted;
2115 goto scrub_continue;
2116 }
2117 /*
2118 * the reloc mutex makes sure that we stop
2119 * the balancing code from coming in and moving
2120 * extents around in the middle of the commit
2121 */
2122 mutex_lock(&fs_info->reloc_mutex);
2123
2124 /*
2125 * We needn't worry about the delayed items because we will
2126 * deal with them in create_pending_snapshot(), which is the
2127 * core function of the snapshot creation.
2128 */
2129 ret = create_pending_snapshots(trans);
2130 if (ret) {
2131 mutex_unlock(&fs_info->reloc_mutex);
2132 goto scrub_continue;
2133 }
2134
2135 /*
2136 * We insert the dir indexes of the snapshots and update the inode
2137 * of the snapshots' parents after the snapshot creation, so there
2138 * are some delayed items which are not dealt with. Now deal with
2139 * them.
2140 *
2141 * We needn't worry that this operation will corrupt the snapshots,
2142 * because all the tree which are snapshoted will be forced to COW
2143 * the nodes and leaves.
2144 */
2145 ret = btrfs_run_delayed_items(trans);
2146 if (ret) {
2147 mutex_unlock(&fs_info->reloc_mutex);
2148 goto scrub_continue;
2149 }
2150
2151 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
2152 if (ret) {
2153 mutex_unlock(&fs_info->reloc_mutex);
2154 goto scrub_continue;
2155 }
2156
2157 /*
2158 * make sure none of the code above managed to slip in a
2159 * delayed item
2160 */
2161 btrfs_assert_delayed_root_empty(fs_info);
2162
2163 WARN_ON(cur_trans != trans->transaction);
2164
2165 /* btrfs_commit_tree_roots is responsible for getting the
2166 * various roots consistent with each other. Every pointer
2167 * in the tree of tree roots has to point to the most up to date
2168 * root for every subvolume and other tree. So, we have to keep
2169 * the tree logging code from jumping in and changing any
2170 * of the trees.
2171 *
2172 * At this point in the commit, there can't be any tree-log
2173 * writers, but a little lower down we drop the trans mutex
2174 * and let new people in. By holding the tree_log_mutex
2175 * from now until after the super is written, we avoid races
2176 * with the tree-log code.
2177 */
2178 mutex_lock(&fs_info->tree_log_mutex);
2179
2180 ret = commit_fs_roots(trans);
2181 if (ret) {
2182 mutex_unlock(&fs_info->tree_log_mutex);
2183 mutex_unlock(&fs_info->reloc_mutex);
2184 goto scrub_continue;
2185 }
2186
2187 /*
2188 * Since the transaction is done, we can apply the pending changes
2189 * before the next transaction.
2190 */
2191 btrfs_apply_pending_changes(fs_info);
2192
2193 /* commit_fs_roots gets rid of all the tree log roots, it is now
2194 * safe to free the root of tree log roots
2195 */
2196 btrfs_free_log_root_tree(trans, fs_info);
2197
2198 /*
2199 * commit_fs_roots() can call btrfs_save_ino_cache(), which generates
2200 * new delayed refs. Must handle them or qgroup can be wrong.
2201 */
2202 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
2203 if (ret) {
2204 mutex_unlock(&fs_info->tree_log_mutex);
2205 mutex_unlock(&fs_info->reloc_mutex);
2206 goto scrub_continue;
2207 }
2208
2209 /*
2210 * Since fs roots are all committed, we can get a quite accurate
2211 * new_roots. So let's do quota accounting.
2212 */
2213 ret = btrfs_qgroup_account_extents(trans);
2214 if (ret < 0) {
2215 mutex_unlock(&fs_info->tree_log_mutex);
2216 mutex_unlock(&fs_info->reloc_mutex);
2217 goto scrub_continue;
2218 }
2219
2220 ret = commit_cowonly_roots(trans);
2221 if (ret) {
2222 mutex_unlock(&fs_info->tree_log_mutex);
2223 mutex_unlock(&fs_info->reloc_mutex);
2224 goto scrub_continue;
2225 }
2226
2227 /*
2228 * The tasks which save the space cache and inode cache may also
2229 * update ->aborted, check it.
2230 */
2231 if (TRANS_ABORTED(cur_trans)) {
2232 ret = cur_trans->aborted;
2233 mutex_unlock(&fs_info->tree_log_mutex);
2234 mutex_unlock(&fs_info->reloc_mutex);
2235 goto scrub_continue;
2236 }
2237
2238 btrfs_prepare_extent_commit(fs_info);
2239
2240 cur_trans = fs_info->running_transaction;
2241
2242 btrfs_set_root_node(&fs_info->tree_root->root_item,
2243 fs_info->tree_root->node);
2244 list_add_tail(&fs_info->tree_root->dirty_list,
2245 &cur_trans->switch_commits);
2246
2247 btrfs_set_root_node(&fs_info->chunk_root->root_item,
2248 fs_info->chunk_root->node);
2249 list_add_tail(&fs_info->chunk_root->dirty_list,
2250 &cur_trans->switch_commits);
2251
2252 switch_commit_roots(trans);
2253
2254 ASSERT(list_empty(&cur_trans->dirty_bgs));
2255 ASSERT(list_empty(&cur_trans->io_bgs));
2256 update_super_roots(fs_info);
2257
2258 btrfs_set_super_log_root(fs_info->super_copy, 0);
2259 btrfs_set_super_log_root_level(fs_info->super_copy, 0);
2260 memcpy(fs_info->super_for_commit, fs_info->super_copy,
2261 sizeof(*fs_info->super_copy));
2262
2263 btrfs_commit_device_sizes(cur_trans);
2264
2265 clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
2266 clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
2267
2268 btrfs_trans_release_chunk_metadata(trans);
2269
2270 spin_lock(&fs_info->trans_lock);
2271 cur_trans->state = TRANS_STATE_UNBLOCKED;
2272 fs_info->running_transaction = NULL;
2273 spin_unlock(&fs_info->trans_lock);
2274 mutex_unlock(&fs_info->reloc_mutex);
2275
2276 wake_up(&fs_info->transaction_wait);
2277
2278 ret = btrfs_write_and_wait_transaction(trans);
2279 if (ret) {
2280 btrfs_handle_fs_error(fs_info, ret,
2281 "Error while writing out transaction");
2282 mutex_unlock(&fs_info->tree_log_mutex);
2283 goto scrub_continue;
2284 }
2285
2286 ret = write_all_supers(fs_info, 0);
2287 /*
2288 * the super is written, we can safely allow the tree-loggers
2289 * to go about their business
2290 */
2291 mutex_unlock(&fs_info->tree_log_mutex);
2292 if (ret)
2293 goto scrub_continue;
2294
2295 btrfs_finish_extent_commit(trans);
2296
2297 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
2298 btrfs_clear_space_info_full(fs_info);
2299
2300 fs_info->last_trans_committed = cur_trans->transid;
2301 /*
2302 * We needn't acquire the lock here because there is no other task
2303 * which can change it.
2304 */
2305 cur_trans->state = TRANS_STATE_COMPLETED;
2306 wake_up(&cur_trans->commit_wait);
2307
2308 spin_lock(&fs_info->trans_lock);
2309 list_del_init(&cur_trans->list);
2310 spin_unlock(&fs_info->trans_lock);
2311
2312 btrfs_put_transaction(cur_trans);
2313 btrfs_put_transaction(cur_trans);
2314
2315 if (trans->type & __TRANS_FREEZABLE)
2316 sb_end_intwrite(fs_info->sb);
2317
2318 trace_btrfs_transaction_commit(trans->root);
2319
2320 btrfs_scrub_continue(fs_info);
2321
2322 if (current->journal_info == trans)
2323 current->journal_info = NULL;
2324
2325 kmem_cache_free(btrfs_trans_handle_cachep, trans);
2326
2327 return ret;
2328
2329 scrub_continue:
2330 btrfs_scrub_continue(fs_info);
2331 cleanup_transaction:
2332 btrfs_trans_release_metadata(trans);
2333 btrfs_cleanup_pending_block_groups(trans);
2334 btrfs_trans_release_chunk_metadata(trans);
2335 trans->block_rsv = NULL;
2336 btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
2337 if (current->journal_info == trans)
2338 current->journal_info = NULL;
2339 cleanup_transaction(trans, ret);
2340
2341 return ret;
2342 }
2343
2344 /*
2345 * return < 0 if error
2346 * 0 if there are no more dead_roots at the time of call
2347 * 1 there are more to be processed, call me again
2348 *
2349 * The return value indicates there are certainly more snapshots to delete, but
2350 * if there comes a new one during processing, it may return 0. We don't mind,
2351 * because btrfs_commit_super will poke cleaner thread and it will process it a
2352 * few seconds later.
2353 */
btrfs_clean_one_deleted_snapshot(struct btrfs_root * root)2354 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
2355 {
2356 int ret;
2357 struct btrfs_fs_info *fs_info = root->fs_info;
2358
2359 spin_lock(&fs_info->trans_lock);
2360 if (list_empty(&fs_info->dead_roots)) {
2361 spin_unlock(&fs_info->trans_lock);
2362 return 0;
2363 }
2364 root = list_first_entry(&fs_info->dead_roots,
2365 struct btrfs_root, root_list);
2366 list_del_init(&root->root_list);
2367 spin_unlock(&fs_info->trans_lock);
2368
2369 btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid);
2370
2371 btrfs_kill_all_delayed_nodes(root);
2372
2373 if (btrfs_header_backref_rev(root->node) <
2374 BTRFS_MIXED_BACKREF_REV)
2375 ret = btrfs_drop_snapshot(root, NULL, 0, 0);
2376 else
2377 ret = btrfs_drop_snapshot(root, NULL, 1, 0);
2378
2379 return (ret < 0) ? 0 : 1;
2380 }
2381
btrfs_apply_pending_changes(struct btrfs_fs_info * fs_info)2382 void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
2383 {
2384 unsigned long prev;
2385 unsigned long bit;
2386
2387 prev = xchg(&fs_info->pending_changes, 0);
2388 if (!prev)
2389 return;
2390
2391 bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE;
2392 if (prev & bit)
2393 btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2394 prev &= ~bit;
2395
2396 bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE;
2397 if (prev & bit)
2398 btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2399 prev &= ~bit;
2400
2401 bit = 1 << BTRFS_PENDING_COMMIT;
2402 if (prev & bit)
2403 btrfs_debug(fs_info, "pending commit done");
2404 prev &= ~bit;
2405
2406 if (prev)
2407 btrfs_warn(fs_info,
2408 "unknown pending changes left 0x%lx, ignoring", prev);
2409 }
2410