1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/error-injection.h>
13 #include "ctree.h"
14 #include "disk-io.h"
15 #include "transaction.h"
16 #include "volumes.h"
17 #include "locking.h"
18 #include "btrfs_inode.h"
19 #include "async-thread.h"
20 #include "free-space-cache.h"
21 #include "qgroup.h"
22 #include "print-tree.h"
23 #include "delalloc-space.h"
24 #include "block-group.h"
25 #include "backref.h"
26 #include "misc.h"
27 #include "subpage.h"
28 #include "zoned.h"
29 #include "inode-item.h"
30
31 /*
32 * Relocation overview
33 *
34 * [What does relocation do]
35 *
36 * The objective of relocation is to relocate all extents of the target block
37 * group to other block groups.
38 * This is utilized by resize (shrink only), profile converting, compacting
39 * space, or balance routine to spread chunks over devices.
40 *
41 * Before | After
42 * ------------------------------------------------------------------
43 * BG A: 10 data extents | BG A: deleted
44 * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated)
45 * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated)
46 *
47 * [How does relocation work]
48 *
49 * 1. Mark the target block group read-only
50 * New extents won't be allocated from the target block group.
51 *
52 * 2.1 Record each extent in the target block group
53 * To build a proper map of extents to be relocated.
54 *
55 * 2.2 Build data reloc tree and reloc trees
56 * Data reloc tree will contain an inode, recording all newly relocated
57 * data extents.
58 * There will be only one data reloc tree for one data block group.
59 *
60 * Reloc tree will be a special snapshot of its source tree, containing
61 * relocated tree blocks.
62 * Each tree referring to a tree block in target block group will get its
63 * reloc tree built.
64 *
65 * 2.3 Swap source tree with its corresponding reloc tree
66 * Each involved tree only refers to new extents after swap.
67 *
68 * 3. Cleanup reloc trees and data reloc tree.
69 * As old extents in the target block group are still referenced by reloc
70 * trees, we need to clean them up before really freeing the target block
71 * group.
72 *
73 * The main complexity is in steps 2.2 and 2.3.
74 *
75 * The entry point of relocation is relocate_block_group() function.
76 */
77
78 #define RELOCATION_RESERVED_NODES 256
79 /*
80 * map address of tree root to tree
81 */
82 struct mapping_node {
83 struct {
84 struct rb_node rb_node;
85 u64 bytenr;
86 }; /* Use rb_simle_node for search/insert */
87 void *data;
88 };
89
90 struct mapping_tree {
91 struct rb_root rb_root;
92 spinlock_t lock;
93 };
94
95 /*
96 * present a tree block to process
97 */
98 struct tree_block {
99 struct {
100 struct rb_node rb_node;
101 u64 bytenr;
102 }; /* Use rb_simple_node for search/insert */
103 u64 owner;
104 struct btrfs_key key;
105 unsigned int level:8;
106 unsigned int key_ready:1;
107 };
108
109 #define MAX_EXTENTS 128
110
111 struct file_extent_cluster {
112 u64 start;
113 u64 end;
114 u64 boundary[MAX_EXTENTS];
115 unsigned int nr;
116 };
117
118 struct reloc_control {
119 /* block group to relocate */
120 struct btrfs_block_group *block_group;
121 /* extent tree */
122 struct btrfs_root *extent_root;
123 /* inode for moving data */
124 struct inode *data_inode;
125
126 struct btrfs_block_rsv *block_rsv;
127
128 struct btrfs_backref_cache backref_cache;
129
130 struct file_extent_cluster cluster;
131 /* tree blocks have been processed */
132 struct extent_io_tree processed_blocks;
133 /* map start of tree root to corresponding reloc tree */
134 struct mapping_tree reloc_root_tree;
135 /* list of reloc trees */
136 struct list_head reloc_roots;
137 /* list of subvolume trees that get relocated */
138 struct list_head dirty_subvol_roots;
139 /* size of metadata reservation for merging reloc trees */
140 u64 merging_rsv_size;
141 /* size of relocated tree nodes */
142 u64 nodes_relocated;
143 /* reserved size for block group relocation*/
144 u64 reserved_bytes;
145
146 u64 search_start;
147 u64 extents_found;
148
149 unsigned int stage:8;
150 unsigned int create_reloc_tree:1;
151 unsigned int merge_reloc_tree:1;
152 unsigned int found_file_extent:1;
153 };
154
155 /* stages of data relocation */
156 #define MOVE_DATA_EXTENTS 0
157 #define UPDATE_DATA_PTRS 1
158
mark_block_processed(struct reloc_control * rc,struct btrfs_backref_node * node)159 static void mark_block_processed(struct reloc_control *rc,
160 struct btrfs_backref_node *node)
161 {
162 u32 blocksize;
163
164 if (node->level == 0 ||
165 in_range(node->bytenr, rc->block_group->start,
166 rc->block_group->length)) {
167 blocksize = rc->extent_root->fs_info->nodesize;
168 set_extent_bits(&rc->processed_blocks, node->bytenr,
169 node->bytenr + blocksize - 1, EXTENT_DIRTY);
170 }
171 node->processed = 1;
172 }
173
174
mapping_tree_init(struct mapping_tree * tree)175 static void mapping_tree_init(struct mapping_tree *tree)
176 {
177 tree->rb_root = RB_ROOT;
178 spin_lock_init(&tree->lock);
179 }
180
181 /*
182 * walk up backref nodes until reach node presents tree root
183 */
walk_up_backref(struct btrfs_backref_node * node,struct btrfs_backref_edge * edges[],int * index)184 static struct btrfs_backref_node *walk_up_backref(
185 struct btrfs_backref_node *node,
186 struct btrfs_backref_edge *edges[], int *index)
187 {
188 struct btrfs_backref_edge *edge;
189 int idx = *index;
190
191 while (!list_empty(&node->upper)) {
192 edge = list_entry(node->upper.next,
193 struct btrfs_backref_edge, list[LOWER]);
194 edges[idx++] = edge;
195 node = edge->node[UPPER];
196 }
197 BUG_ON(node->detached);
198 *index = idx;
199 return node;
200 }
201
202 /*
203 * walk down backref nodes to find start of next reference path
204 */
walk_down_backref(struct btrfs_backref_edge * edges[],int * index)205 static struct btrfs_backref_node *walk_down_backref(
206 struct btrfs_backref_edge *edges[], int *index)
207 {
208 struct btrfs_backref_edge *edge;
209 struct btrfs_backref_node *lower;
210 int idx = *index;
211
212 while (idx > 0) {
213 edge = edges[idx - 1];
214 lower = edge->node[LOWER];
215 if (list_is_last(&edge->list[LOWER], &lower->upper)) {
216 idx--;
217 continue;
218 }
219 edge = list_entry(edge->list[LOWER].next,
220 struct btrfs_backref_edge, list[LOWER]);
221 edges[idx - 1] = edge;
222 *index = idx;
223 return edge->node[UPPER];
224 }
225 *index = 0;
226 return NULL;
227 }
228
update_backref_node(struct btrfs_backref_cache * cache,struct btrfs_backref_node * node,u64 bytenr)229 static void update_backref_node(struct btrfs_backref_cache *cache,
230 struct btrfs_backref_node *node, u64 bytenr)
231 {
232 struct rb_node *rb_node;
233 rb_erase(&node->rb_node, &cache->rb_root);
234 node->bytenr = bytenr;
235 rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node);
236 if (rb_node)
237 btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST);
238 }
239
240 /*
241 * update backref cache after a transaction commit
242 */
update_backref_cache(struct btrfs_trans_handle * trans,struct btrfs_backref_cache * cache)243 static int update_backref_cache(struct btrfs_trans_handle *trans,
244 struct btrfs_backref_cache *cache)
245 {
246 struct btrfs_backref_node *node;
247 int level = 0;
248
249 if (cache->last_trans == 0) {
250 cache->last_trans = trans->transid;
251 return 0;
252 }
253
254 if (cache->last_trans == trans->transid)
255 return 0;
256
257 /*
258 * detached nodes are used to avoid unnecessary backref
259 * lookup. transaction commit changes the extent tree.
260 * so the detached nodes are no longer useful.
261 */
262 while (!list_empty(&cache->detached)) {
263 node = list_entry(cache->detached.next,
264 struct btrfs_backref_node, list);
265 btrfs_backref_cleanup_node(cache, node);
266 }
267
268 while (!list_empty(&cache->changed)) {
269 node = list_entry(cache->changed.next,
270 struct btrfs_backref_node, list);
271 list_del_init(&node->list);
272 BUG_ON(node->pending);
273 update_backref_node(cache, node, node->new_bytenr);
274 }
275
276 /*
277 * some nodes can be left in the pending list if there were
278 * errors during processing the pending nodes.
279 */
280 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
281 list_for_each_entry(node, &cache->pending[level], list) {
282 BUG_ON(!node->pending);
283 if (node->bytenr == node->new_bytenr)
284 continue;
285 update_backref_node(cache, node, node->new_bytenr);
286 }
287 }
288
289 cache->last_trans = 0;
290 return 1;
291 }
292
reloc_root_is_dead(struct btrfs_root * root)293 static bool reloc_root_is_dead(struct btrfs_root *root)
294 {
295 /*
296 * Pair with set_bit/clear_bit in clean_dirty_subvols and
297 * btrfs_update_reloc_root. We need to see the updated bit before
298 * trying to access reloc_root
299 */
300 smp_rmb();
301 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
302 return true;
303 return false;
304 }
305
306 /*
307 * Check if this subvolume tree has valid reloc tree.
308 *
309 * Reloc tree after swap is considered dead, thus not considered as valid.
310 * This is enough for most callers, as they don't distinguish dead reloc root
311 * from no reloc root. But btrfs_should_ignore_reloc_root() below is a
312 * special case.
313 */
have_reloc_root(struct btrfs_root * root)314 static bool have_reloc_root(struct btrfs_root *root)
315 {
316 if (reloc_root_is_dead(root))
317 return false;
318 if (!root->reloc_root)
319 return false;
320 return true;
321 }
322
btrfs_should_ignore_reloc_root(struct btrfs_root * root)323 int btrfs_should_ignore_reloc_root(struct btrfs_root *root)
324 {
325 struct btrfs_root *reloc_root;
326
327 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
328 return 0;
329
330 /* This root has been merged with its reloc tree, we can ignore it */
331 if (reloc_root_is_dead(root))
332 return 1;
333
334 reloc_root = root->reloc_root;
335 if (!reloc_root)
336 return 0;
337
338 if (btrfs_header_generation(reloc_root->commit_root) ==
339 root->fs_info->running_transaction->transid)
340 return 0;
341 /*
342 * if there is reloc tree and it was created in previous
343 * transaction backref lookup can find the reloc tree,
344 * so backref node for the fs tree root is useless for
345 * relocation.
346 */
347 return 1;
348 }
349
350 /*
351 * find reloc tree by address of tree root
352 */
find_reloc_root(struct btrfs_fs_info * fs_info,u64 bytenr)353 struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
354 {
355 struct reloc_control *rc = fs_info->reloc_ctl;
356 struct rb_node *rb_node;
357 struct mapping_node *node;
358 struct btrfs_root *root = NULL;
359
360 ASSERT(rc);
361 spin_lock(&rc->reloc_root_tree.lock);
362 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
363 if (rb_node) {
364 node = rb_entry(rb_node, struct mapping_node, rb_node);
365 root = node->data;
366 }
367 spin_unlock(&rc->reloc_root_tree.lock);
368 return btrfs_grab_root(root);
369 }
370
371 /*
372 * For useless nodes, do two major clean ups:
373 *
374 * - Cleanup the children edges and nodes
375 * If child node is also orphan (no parent) during cleanup, then the child
376 * node will also be cleaned up.
377 *
378 * - Freeing up leaves (level 0), keeps nodes detached
379 * For nodes, the node is still cached as "detached"
380 *
381 * Return false if @node is not in the @useless_nodes list.
382 * Return true if @node is in the @useless_nodes list.
383 */
handle_useless_nodes(struct reloc_control * rc,struct btrfs_backref_node * node)384 static bool handle_useless_nodes(struct reloc_control *rc,
385 struct btrfs_backref_node *node)
386 {
387 struct btrfs_backref_cache *cache = &rc->backref_cache;
388 struct list_head *useless_node = &cache->useless_node;
389 bool ret = false;
390
391 while (!list_empty(useless_node)) {
392 struct btrfs_backref_node *cur;
393
394 cur = list_first_entry(useless_node, struct btrfs_backref_node,
395 list);
396 list_del_init(&cur->list);
397
398 /* Only tree root nodes can be added to @useless_nodes */
399 ASSERT(list_empty(&cur->upper));
400
401 if (cur == node)
402 ret = true;
403
404 /* The node is the lowest node */
405 if (cur->lowest) {
406 list_del_init(&cur->lower);
407 cur->lowest = 0;
408 }
409
410 /* Cleanup the lower edges */
411 while (!list_empty(&cur->lower)) {
412 struct btrfs_backref_edge *edge;
413 struct btrfs_backref_node *lower;
414
415 edge = list_entry(cur->lower.next,
416 struct btrfs_backref_edge, list[UPPER]);
417 list_del(&edge->list[UPPER]);
418 list_del(&edge->list[LOWER]);
419 lower = edge->node[LOWER];
420 btrfs_backref_free_edge(cache, edge);
421
422 /* Child node is also orphan, queue for cleanup */
423 if (list_empty(&lower->upper))
424 list_add(&lower->list, useless_node);
425 }
426 /* Mark this block processed for relocation */
427 mark_block_processed(rc, cur);
428
429 /*
430 * Backref nodes for tree leaves are deleted from the cache.
431 * Backref nodes for upper level tree blocks are left in the
432 * cache to avoid unnecessary backref lookup.
433 */
434 if (cur->level > 0) {
435 list_add(&cur->list, &cache->detached);
436 cur->detached = 1;
437 } else {
438 rb_erase(&cur->rb_node, &cache->rb_root);
439 btrfs_backref_free_node(cache, cur);
440 }
441 }
442 return ret;
443 }
444
445 /*
446 * Build backref tree for a given tree block. Root of the backref tree
447 * corresponds the tree block, leaves of the backref tree correspond roots of
448 * b-trees that reference the tree block.
449 *
450 * The basic idea of this function is check backrefs of a given block to find
451 * upper level blocks that reference the block, and then check backrefs of
452 * these upper level blocks recursively. The recursion stops when tree root is
453 * reached or backrefs for the block is cached.
454 *
455 * NOTE: if we find that backrefs for a block are cached, we know backrefs for
456 * all upper level blocks that directly/indirectly reference the block are also
457 * cached.
458 */
build_backref_tree(struct reloc_control * rc,struct btrfs_key * node_key,int level,u64 bytenr)459 static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
460 struct reloc_control *rc, struct btrfs_key *node_key,
461 int level, u64 bytenr)
462 {
463 struct btrfs_backref_iter *iter;
464 struct btrfs_backref_cache *cache = &rc->backref_cache;
465 /* For searching parent of TREE_BLOCK_REF */
466 struct btrfs_path *path;
467 struct btrfs_backref_node *cur;
468 struct btrfs_backref_node *node = NULL;
469 struct btrfs_backref_edge *edge;
470 int ret;
471 int err = 0;
472
473 iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info, GFP_NOFS);
474 if (!iter)
475 return ERR_PTR(-ENOMEM);
476 path = btrfs_alloc_path();
477 if (!path) {
478 err = -ENOMEM;
479 goto out;
480 }
481
482 node = btrfs_backref_alloc_node(cache, bytenr, level);
483 if (!node) {
484 err = -ENOMEM;
485 goto out;
486 }
487
488 node->lowest = 1;
489 cur = node;
490
491 /* Breadth-first search to build backref cache */
492 do {
493 ret = btrfs_backref_add_tree_node(cache, path, iter, node_key,
494 cur);
495 if (ret < 0) {
496 err = ret;
497 goto out;
498 }
499 edge = list_first_entry_or_null(&cache->pending_edge,
500 struct btrfs_backref_edge, list[UPPER]);
501 /*
502 * The pending list isn't empty, take the first block to
503 * process
504 */
505 if (edge) {
506 list_del_init(&edge->list[UPPER]);
507 cur = edge->node[UPPER];
508 }
509 } while (edge);
510
511 /* Finish the upper linkage of newly added edges/nodes */
512 ret = btrfs_backref_finish_upper_links(cache, node);
513 if (ret < 0) {
514 err = ret;
515 goto out;
516 }
517
518 if (handle_useless_nodes(rc, node))
519 node = NULL;
520 out:
521 btrfs_backref_iter_free(iter);
522 btrfs_free_path(path);
523 if (err) {
524 btrfs_backref_error_cleanup(cache, node);
525 return ERR_PTR(err);
526 }
527 ASSERT(!node || !node->detached);
528 ASSERT(list_empty(&cache->useless_node) &&
529 list_empty(&cache->pending_edge));
530 return node;
531 }
532
533 /*
534 * helper to add backref node for the newly created snapshot.
535 * the backref node is created by cloning backref node that
536 * corresponds to root of source tree
537 */
clone_backref_node(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_root * src,struct btrfs_root * dest)538 static int clone_backref_node(struct btrfs_trans_handle *trans,
539 struct reloc_control *rc,
540 struct btrfs_root *src,
541 struct btrfs_root *dest)
542 {
543 struct btrfs_root *reloc_root = src->reloc_root;
544 struct btrfs_backref_cache *cache = &rc->backref_cache;
545 struct btrfs_backref_node *node = NULL;
546 struct btrfs_backref_node *new_node;
547 struct btrfs_backref_edge *edge;
548 struct btrfs_backref_edge *new_edge;
549 struct rb_node *rb_node;
550
551 if (cache->last_trans > 0)
552 update_backref_cache(trans, cache);
553
554 rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
555 if (rb_node) {
556 node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
557 if (node->detached)
558 node = NULL;
559 else
560 BUG_ON(node->new_bytenr != reloc_root->node->start);
561 }
562
563 if (!node) {
564 rb_node = rb_simple_search(&cache->rb_root,
565 reloc_root->commit_root->start);
566 if (rb_node) {
567 node = rb_entry(rb_node, struct btrfs_backref_node,
568 rb_node);
569 BUG_ON(node->detached);
570 }
571 }
572
573 if (!node)
574 return 0;
575
576 new_node = btrfs_backref_alloc_node(cache, dest->node->start,
577 node->level);
578 if (!new_node)
579 return -ENOMEM;
580
581 new_node->lowest = node->lowest;
582 new_node->checked = 1;
583 new_node->root = btrfs_grab_root(dest);
584 ASSERT(new_node->root);
585
586 if (!node->lowest) {
587 list_for_each_entry(edge, &node->lower, list[UPPER]) {
588 new_edge = btrfs_backref_alloc_edge(cache);
589 if (!new_edge)
590 goto fail;
591
592 btrfs_backref_link_edge(new_edge, edge->node[LOWER],
593 new_node, LINK_UPPER);
594 }
595 } else {
596 list_add_tail(&new_node->lower, &cache->leaves);
597 }
598
599 rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr,
600 &new_node->rb_node);
601 if (rb_node)
602 btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST);
603
604 if (!new_node->lowest) {
605 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
606 list_add_tail(&new_edge->list[LOWER],
607 &new_edge->node[LOWER]->upper);
608 }
609 }
610 return 0;
611 fail:
612 while (!list_empty(&new_node->lower)) {
613 new_edge = list_entry(new_node->lower.next,
614 struct btrfs_backref_edge, list[UPPER]);
615 list_del(&new_edge->list[UPPER]);
616 btrfs_backref_free_edge(cache, new_edge);
617 }
618 btrfs_backref_free_node(cache, new_node);
619 return -ENOMEM;
620 }
621
622 /*
623 * helper to add 'address of tree root -> reloc tree' mapping
624 */
__add_reloc_root(struct btrfs_root * root)625 static int __must_check __add_reloc_root(struct btrfs_root *root)
626 {
627 struct btrfs_fs_info *fs_info = root->fs_info;
628 struct rb_node *rb_node;
629 struct mapping_node *node;
630 struct reloc_control *rc = fs_info->reloc_ctl;
631
632 node = kmalloc(sizeof(*node), GFP_NOFS);
633 if (!node)
634 return -ENOMEM;
635
636 node->bytenr = root->commit_root->start;
637 node->data = root;
638
639 spin_lock(&rc->reloc_root_tree.lock);
640 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
641 node->bytenr, &node->rb_node);
642 spin_unlock(&rc->reloc_root_tree.lock);
643 if (rb_node) {
644 btrfs_err(fs_info,
645 "Duplicate root found for start=%llu while inserting into relocation tree",
646 node->bytenr);
647 return -EEXIST;
648 }
649
650 list_add_tail(&root->root_list, &rc->reloc_roots);
651 return 0;
652 }
653
654 /*
655 * helper to delete the 'address of tree root -> reloc tree'
656 * mapping
657 */
__del_reloc_root(struct btrfs_root * root)658 static void __del_reloc_root(struct btrfs_root *root)
659 {
660 struct btrfs_fs_info *fs_info = root->fs_info;
661 struct rb_node *rb_node;
662 struct mapping_node *node = NULL;
663 struct reloc_control *rc = fs_info->reloc_ctl;
664 bool put_ref = false;
665
666 if (rc && root->node) {
667 spin_lock(&rc->reloc_root_tree.lock);
668 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
669 root->commit_root->start);
670 if (rb_node) {
671 node = rb_entry(rb_node, struct mapping_node, rb_node);
672 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
673 RB_CLEAR_NODE(&node->rb_node);
674 }
675 spin_unlock(&rc->reloc_root_tree.lock);
676 ASSERT(!node || (struct btrfs_root *)node->data == root);
677 }
678
679 /*
680 * We only put the reloc root here if it's on the list. There's a lot
681 * of places where the pattern is to splice the rc->reloc_roots, process
682 * the reloc roots, and then add the reloc root back onto
683 * rc->reloc_roots. If we call __del_reloc_root while it's off of the
684 * list we don't want the reference being dropped, because the guy
685 * messing with the list is in charge of the reference.
686 */
687 spin_lock(&fs_info->trans_lock);
688 if (!list_empty(&root->root_list)) {
689 put_ref = true;
690 list_del_init(&root->root_list);
691 }
692 spin_unlock(&fs_info->trans_lock);
693 if (put_ref)
694 btrfs_put_root(root);
695 kfree(node);
696 }
697
698 /*
699 * helper to update the 'address of tree root -> reloc tree'
700 * mapping
701 */
__update_reloc_root(struct btrfs_root * root)702 static int __update_reloc_root(struct btrfs_root *root)
703 {
704 struct btrfs_fs_info *fs_info = root->fs_info;
705 struct rb_node *rb_node;
706 struct mapping_node *node = NULL;
707 struct reloc_control *rc = fs_info->reloc_ctl;
708
709 spin_lock(&rc->reloc_root_tree.lock);
710 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
711 root->commit_root->start);
712 if (rb_node) {
713 node = rb_entry(rb_node, struct mapping_node, rb_node);
714 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
715 }
716 spin_unlock(&rc->reloc_root_tree.lock);
717
718 if (!node)
719 return 0;
720 BUG_ON((struct btrfs_root *)node->data != root);
721
722 spin_lock(&rc->reloc_root_tree.lock);
723 node->bytenr = root->node->start;
724 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
725 node->bytenr, &node->rb_node);
726 spin_unlock(&rc->reloc_root_tree.lock);
727 if (rb_node)
728 btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
729 return 0;
730 }
731
create_reloc_root(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 objectid)732 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
733 struct btrfs_root *root, u64 objectid)
734 {
735 struct btrfs_fs_info *fs_info = root->fs_info;
736 struct btrfs_root *reloc_root;
737 struct extent_buffer *eb;
738 struct btrfs_root_item *root_item;
739 struct btrfs_key root_key;
740 int ret = 0;
741 bool must_abort = false;
742
743 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
744 if (!root_item)
745 return ERR_PTR(-ENOMEM);
746
747 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
748 root_key.type = BTRFS_ROOT_ITEM_KEY;
749 root_key.offset = objectid;
750
751 if (root->root_key.objectid == objectid) {
752 u64 commit_root_gen;
753
754 /* called by btrfs_init_reloc_root */
755 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
756 BTRFS_TREE_RELOC_OBJECTID);
757 if (ret)
758 goto fail;
759
760 /*
761 * Set the last_snapshot field to the generation of the commit
762 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
763 * correctly (returns true) when the relocation root is created
764 * either inside the critical section of a transaction commit
765 * (through transaction.c:qgroup_account_snapshot()) and when
766 * it's created before the transaction commit is started.
767 */
768 commit_root_gen = btrfs_header_generation(root->commit_root);
769 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
770 } else {
771 /*
772 * called by btrfs_reloc_post_snapshot_hook.
773 * the source tree is a reloc tree, all tree blocks
774 * modified after it was created have RELOC flag
775 * set in their headers. so it's OK to not update
776 * the 'last_snapshot'.
777 */
778 ret = btrfs_copy_root(trans, root, root->node, &eb,
779 BTRFS_TREE_RELOC_OBJECTID);
780 if (ret)
781 goto fail;
782 }
783
784 /*
785 * We have changed references at this point, we must abort the
786 * transaction if anything fails.
787 */
788 must_abort = true;
789
790 memcpy(root_item, &root->root_item, sizeof(*root_item));
791 btrfs_set_root_bytenr(root_item, eb->start);
792 btrfs_set_root_level(root_item, btrfs_header_level(eb));
793 btrfs_set_root_generation(root_item, trans->transid);
794
795 if (root->root_key.objectid == objectid) {
796 btrfs_set_root_refs(root_item, 0);
797 memset(&root_item->drop_progress, 0,
798 sizeof(struct btrfs_disk_key));
799 btrfs_set_root_drop_level(root_item, 0);
800 }
801
802 btrfs_tree_unlock(eb);
803 free_extent_buffer(eb);
804
805 ret = btrfs_insert_root(trans, fs_info->tree_root,
806 &root_key, root_item);
807 if (ret)
808 goto fail;
809
810 kfree(root_item);
811
812 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
813 if (IS_ERR(reloc_root)) {
814 ret = PTR_ERR(reloc_root);
815 goto abort;
816 }
817 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
818 reloc_root->last_trans = trans->transid;
819 return reloc_root;
820 fail:
821 kfree(root_item);
822 abort:
823 if (must_abort)
824 btrfs_abort_transaction(trans, ret);
825 return ERR_PTR(ret);
826 }
827
828 /*
829 * create reloc tree for a given fs tree. reloc tree is just a
830 * snapshot of the fs tree with special root objectid.
831 *
832 * The reloc_root comes out of here with two references, one for
833 * root->reloc_root, and another for being on the rc->reloc_roots list.
834 */
btrfs_init_reloc_root(struct btrfs_trans_handle * trans,struct btrfs_root * root)835 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
836 struct btrfs_root *root)
837 {
838 struct btrfs_fs_info *fs_info = root->fs_info;
839 struct btrfs_root *reloc_root;
840 struct reloc_control *rc = fs_info->reloc_ctl;
841 struct btrfs_block_rsv *rsv;
842 int clear_rsv = 0;
843 int ret;
844
845 if (!rc)
846 return 0;
847
848 /*
849 * The subvolume has reloc tree but the swap is finished, no need to
850 * create/update the dead reloc tree
851 */
852 if (reloc_root_is_dead(root))
853 return 0;
854
855 /*
856 * This is subtle but important. We do not do
857 * record_root_in_transaction for reloc roots, instead we record their
858 * corresponding fs root, and then here we update the last trans for the
859 * reloc root. This means that we have to do this for the entire life
860 * of the reloc root, regardless of which stage of the relocation we are
861 * in.
862 */
863 if (root->reloc_root) {
864 reloc_root = root->reloc_root;
865 reloc_root->last_trans = trans->transid;
866 return 0;
867 }
868
869 /*
870 * We are merging reloc roots, we do not need new reloc trees. Also
871 * reloc trees never need their own reloc tree.
872 */
873 if (!rc->create_reloc_tree ||
874 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
875 return 0;
876
877 if (!trans->reloc_reserved) {
878 rsv = trans->block_rsv;
879 trans->block_rsv = rc->block_rsv;
880 clear_rsv = 1;
881 }
882 reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
883 if (clear_rsv)
884 trans->block_rsv = rsv;
885 if (IS_ERR(reloc_root))
886 return PTR_ERR(reloc_root);
887
888 ret = __add_reloc_root(reloc_root);
889 ASSERT(ret != -EEXIST);
890 if (ret) {
891 /* Pairs with create_reloc_root */
892 btrfs_put_root(reloc_root);
893 return ret;
894 }
895 root->reloc_root = btrfs_grab_root(reloc_root);
896 return 0;
897 }
898
899 /*
900 * update root item of reloc tree
901 */
btrfs_update_reloc_root(struct btrfs_trans_handle * trans,struct btrfs_root * root)902 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
903 struct btrfs_root *root)
904 {
905 struct btrfs_fs_info *fs_info = root->fs_info;
906 struct btrfs_root *reloc_root;
907 struct btrfs_root_item *root_item;
908 int ret;
909
910 if (!have_reloc_root(root))
911 return 0;
912
913 reloc_root = root->reloc_root;
914 root_item = &reloc_root->root_item;
915
916 /*
917 * We are probably ok here, but __del_reloc_root() will drop its ref of
918 * the root. We have the ref for root->reloc_root, but just in case
919 * hold it while we update the reloc root.
920 */
921 btrfs_grab_root(reloc_root);
922
923 /* root->reloc_root will stay until current relocation finished */
924 if (fs_info->reloc_ctl->merge_reloc_tree &&
925 btrfs_root_refs(root_item) == 0) {
926 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
927 /*
928 * Mark the tree as dead before we change reloc_root so
929 * have_reloc_root will not touch it from now on.
930 */
931 smp_wmb();
932 __del_reloc_root(reloc_root);
933 }
934
935 if (reloc_root->commit_root != reloc_root->node) {
936 __update_reloc_root(reloc_root);
937 btrfs_set_root_node(root_item, reloc_root->node);
938 free_extent_buffer(reloc_root->commit_root);
939 reloc_root->commit_root = btrfs_root_node(reloc_root);
940 }
941
942 ret = btrfs_update_root(trans, fs_info->tree_root,
943 &reloc_root->root_key, root_item);
944 btrfs_put_root(reloc_root);
945 return ret;
946 }
947
948 /*
949 * helper to find first cached inode with inode number >= objectid
950 * in a subvolume
951 */
find_next_inode(struct btrfs_root * root,u64 objectid)952 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
953 {
954 struct rb_node *node;
955 struct rb_node *prev;
956 struct btrfs_inode *entry;
957 struct inode *inode;
958
959 spin_lock(&root->inode_lock);
960 again:
961 node = root->inode_tree.rb_node;
962 prev = NULL;
963 while (node) {
964 prev = node;
965 entry = rb_entry(node, struct btrfs_inode, rb_node);
966
967 if (objectid < btrfs_ino(entry))
968 node = node->rb_left;
969 else if (objectid > btrfs_ino(entry))
970 node = node->rb_right;
971 else
972 break;
973 }
974 if (!node) {
975 while (prev) {
976 entry = rb_entry(prev, struct btrfs_inode, rb_node);
977 if (objectid <= btrfs_ino(entry)) {
978 node = prev;
979 break;
980 }
981 prev = rb_next(prev);
982 }
983 }
984 while (node) {
985 entry = rb_entry(node, struct btrfs_inode, rb_node);
986 inode = igrab(&entry->vfs_inode);
987 if (inode) {
988 spin_unlock(&root->inode_lock);
989 return inode;
990 }
991
992 objectid = btrfs_ino(entry) + 1;
993 if (cond_resched_lock(&root->inode_lock))
994 goto again;
995
996 node = rb_next(node);
997 }
998 spin_unlock(&root->inode_lock);
999 return NULL;
1000 }
1001
1002 /*
1003 * get new location of data
1004 */
get_new_location(struct inode * reloc_inode,u64 * new_bytenr,u64 bytenr,u64 num_bytes)1005 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1006 u64 bytenr, u64 num_bytes)
1007 {
1008 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1009 struct btrfs_path *path;
1010 struct btrfs_file_extent_item *fi;
1011 struct extent_buffer *leaf;
1012 int ret;
1013
1014 path = btrfs_alloc_path();
1015 if (!path)
1016 return -ENOMEM;
1017
1018 bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1019 ret = btrfs_lookup_file_extent(NULL, root, path,
1020 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
1021 if (ret < 0)
1022 goto out;
1023 if (ret > 0) {
1024 ret = -ENOENT;
1025 goto out;
1026 }
1027
1028 leaf = path->nodes[0];
1029 fi = btrfs_item_ptr(leaf, path->slots[0],
1030 struct btrfs_file_extent_item);
1031
1032 BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1033 btrfs_file_extent_compression(leaf, fi) ||
1034 btrfs_file_extent_encryption(leaf, fi) ||
1035 btrfs_file_extent_other_encoding(leaf, fi));
1036
1037 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1038 ret = -EINVAL;
1039 goto out;
1040 }
1041
1042 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1043 ret = 0;
1044 out:
1045 btrfs_free_path(path);
1046 return ret;
1047 }
1048
1049 /*
1050 * update file extent items in the tree leaf to point to
1051 * the new locations.
1052 */
1053 static noinline_for_stack
replace_file_extents(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_root * root,struct extent_buffer * leaf)1054 int replace_file_extents(struct btrfs_trans_handle *trans,
1055 struct reloc_control *rc,
1056 struct btrfs_root *root,
1057 struct extent_buffer *leaf)
1058 {
1059 struct btrfs_fs_info *fs_info = root->fs_info;
1060 struct btrfs_key key;
1061 struct btrfs_file_extent_item *fi;
1062 struct inode *inode = NULL;
1063 u64 parent;
1064 u64 bytenr;
1065 u64 new_bytenr = 0;
1066 u64 num_bytes;
1067 u64 end;
1068 u32 nritems;
1069 u32 i;
1070 int ret = 0;
1071 int first = 1;
1072 int dirty = 0;
1073
1074 if (rc->stage != UPDATE_DATA_PTRS)
1075 return 0;
1076
1077 /* reloc trees always use full backref */
1078 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1079 parent = leaf->start;
1080 else
1081 parent = 0;
1082
1083 nritems = btrfs_header_nritems(leaf);
1084 for (i = 0; i < nritems; i++) {
1085 struct btrfs_ref ref = { 0 };
1086
1087 cond_resched();
1088 btrfs_item_key_to_cpu(leaf, &key, i);
1089 if (key.type != BTRFS_EXTENT_DATA_KEY)
1090 continue;
1091 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1092 if (btrfs_file_extent_type(leaf, fi) ==
1093 BTRFS_FILE_EXTENT_INLINE)
1094 continue;
1095 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1096 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1097 if (bytenr == 0)
1098 continue;
1099 if (!in_range(bytenr, rc->block_group->start,
1100 rc->block_group->length))
1101 continue;
1102
1103 /*
1104 * if we are modifying block in fs tree, wait for read_folio
1105 * to complete and drop the extent cache
1106 */
1107 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1108 if (first) {
1109 inode = find_next_inode(root, key.objectid);
1110 first = 0;
1111 } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
1112 btrfs_add_delayed_iput(inode);
1113 inode = find_next_inode(root, key.objectid);
1114 }
1115 if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
1116 end = key.offset +
1117 btrfs_file_extent_num_bytes(leaf, fi);
1118 WARN_ON(!IS_ALIGNED(key.offset,
1119 fs_info->sectorsize));
1120 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1121 end--;
1122 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1123 key.offset, end);
1124 if (!ret)
1125 continue;
1126
1127 btrfs_drop_extent_map_range(BTRFS_I(inode),
1128 key.offset, end, true);
1129 unlock_extent(&BTRFS_I(inode)->io_tree,
1130 key.offset, end, NULL);
1131 }
1132 }
1133
1134 ret = get_new_location(rc->data_inode, &new_bytenr,
1135 bytenr, num_bytes);
1136 if (ret) {
1137 /*
1138 * Don't have to abort since we've not changed anything
1139 * in the file extent yet.
1140 */
1141 break;
1142 }
1143
1144 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1145 dirty = 1;
1146
1147 key.offset -= btrfs_file_extent_offset(leaf, fi);
1148 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1149 num_bytes, parent);
1150 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1151 key.objectid, key.offset,
1152 root->root_key.objectid, false);
1153 ret = btrfs_inc_extent_ref(trans, &ref);
1154 if (ret) {
1155 btrfs_abort_transaction(trans, ret);
1156 break;
1157 }
1158
1159 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1160 num_bytes, parent);
1161 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1162 key.objectid, key.offset,
1163 root->root_key.objectid, false);
1164 ret = btrfs_free_extent(trans, &ref);
1165 if (ret) {
1166 btrfs_abort_transaction(trans, ret);
1167 break;
1168 }
1169 }
1170 if (dirty)
1171 btrfs_mark_buffer_dirty(leaf);
1172 if (inode)
1173 btrfs_add_delayed_iput(inode);
1174 return ret;
1175 }
1176
1177 static noinline_for_stack
memcmp_node_keys(struct extent_buffer * eb,int slot,struct btrfs_path * path,int level)1178 int memcmp_node_keys(struct extent_buffer *eb, int slot,
1179 struct btrfs_path *path, int level)
1180 {
1181 struct btrfs_disk_key key1;
1182 struct btrfs_disk_key key2;
1183 btrfs_node_key(eb, &key1, slot);
1184 btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1185 return memcmp(&key1, &key2, sizeof(key1));
1186 }
1187
1188 /*
1189 * try to replace tree blocks in fs tree with the new blocks
1190 * in reloc tree. tree blocks haven't been modified since the
1191 * reloc tree was create can be replaced.
1192 *
1193 * if a block was replaced, level of the block + 1 is returned.
1194 * if no block got replaced, 0 is returned. if there are other
1195 * errors, a negative error number is returned.
1196 */
1197 static noinline_for_stack
replace_path(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_root * dest,struct btrfs_root * src,struct btrfs_path * path,struct btrfs_key * next_key,int lowest_level,int max_level)1198 int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
1199 struct btrfs_root *dest, struct btrfs_root *src,
1200 struct btrfs_path *path, struct btrfs_key *next_key,
1201 int lowest_level, int max_level)
1202 {
1203 struct btrfs_fs_info *fs_info = dest->fs_info;
1204 struct extent_buffer *eb;
1205 struct extent_buffer *parent;
1206 struct btrfs_ref ref = { 0 };
1207 struct btrfs_key key;
1208 u64 old_bytenr;
1209 u64 new_bytenr;
1210 u64 old_ptr_gen;
1211 u64 new_ptr_gen;
1212 u64 last_snapshot;
1213 u32 blocksize;
1214 int cow = 0;
1215 int level;
1216 int ret;
1217 int slot;
1218
1219 ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1220 ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1221
1222 last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1223 again:
1224 slot = path->slots[lowest_level];
1225 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1226
1227 eb = btrfs_lock_root_node(dest);
1228 level = btrfs_header_level(eb);
1229
1230 if (level < lowest_level) {
1231 btrfs_tree_unlock(eb);
1232 free_extent_buffer(eb);
1233 return 0;
1234 }
1235
1236 if (cow) {
1237 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb,
1238 BTRFS_NESTING_COW);
1239 if (ret) {
1240 btrfs_tree_unlock(eb);
1241 free_extent_buffer(eb);
1242 return ret;
1243 }
1244 }
1245
1246 if (next_key) {
1247 next_key->objectid = (u64)-1;
1248 next_key->type = (u8)-1;
1249 next_key->offset = (u64)-1;
1250 }
1251
1252 parent = eb;
1253 while (1) {
1254 level = btrfs_header_level(parent);
1255 ASSERT(level >= lowest_level);
1256
1257 ret = btrfs_bin_search(parent, &key, &slot);
1258 if (ret < 0)
1259 break;
1260 if (ret && slot > 0)
1261 slot--;
1262
1263 if (next_key && slot + 1 < btrfs_header_nritems(parent))
1264 btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1265
1266 old_bytenr = btrfs_node_blockptr(parent, slot);
1267 blocksize = fs_info->nodesize;
1268 old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1269
1270 if (level <= max_level) {
1271 eb = path->nodes[level];
1272 new_bytenr = btrfs_node_blockptr(eb,
1273 path->slots[level]);
1274 new_ptr_gen = btrfs_node_ptr_generation(eb,
1275 path->slots[level]);
1276 } else {
1277 new_bytenr = 0;
1278 new_ptr_gen = 0;
1279 }
1280
1281 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1282 ret = level;
1283 break;
1284 }
1285
1286 if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1287 memcmp_node_keys(parent, slot, path, level)) {
1288 if (level <= lowest_level) {
1289 ret = 0;
1290 break;
1291 }
1292
1293 eb = btrfs_read_node_slot(parent, slot);
1294 if (IS_ERR(eb)) {
1295 ret = PTR_ERR(eb);
1296 break;
1297 }
1298 btrfs_tree_lock(eb);
1299 if (cow) {
1300 ret = btrfs_cow_block(trans, dest, eb, parent,
1301 slot, &eb,
1302 BTRFS_NESTING_COW);
1303 if (ret) {
1304 btrfs_tree_unlock(eb);
1305 free_extent_buffer(eb);
1306 break;
1307 }
1308 }
1309
1310 btrfs_tree_unlock(parent);
1311 free_extent_buffer(parent);
1312
1313 parent = eb;
1314 continue;
1315 }
1316
1317 if (!cow) {
1318 btrfs_tree_unlock(parent);
1319 free_extent_buffer(parent);
1320 cow = 1;
1321 goto again;
1322 }
1323
1324 btrfs_node_key_to_cpu(path->nodes[level], &key,
1325 path->slots[level]);
1326 btrfs_release_path(path);
1327
1328 path->lowest_level = level;
1329 set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1330 ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1331 clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1332 path->lowest_level = 0;
1333 if (ret) {
1334 if (ret > 0)
1335 ret = -ENOENT;
1336 break;
1337 }
1338
1339 /*
1340 * Info qgroup to trace both subtrees.
1341 *
1342 * We must trace both trees.
1343 * 1) Tree reloc subtree
1344 * If not traced, we will leak data numbers
1345 * 2) Fs subtree
1346 * If not traced, we will double count old data
1347 *
1348 * We don't scan the subtree right now, but only record
1349 * the swapped tree blocks.
1350 * The real subtree rescan is delayed until we have new
1351 * CoW on the subtree root node before transaction commit.
1352 */
1353 ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
1354 rc->block_group, parent, slot,
1355 path->nodes[level], path->slots[level],
1356 last_snapshot);
1357 if (ret < 0)
1358 break;
1359 /*
1360 * swap blocks in fs tree and reloc tree.
1361 */
1362 btrfs_set_node_blockptr(parent, slot, new_bytenr);
1363 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1364 btrfs_mark_buffer_dirty(parent);
1365
1366 btrfs_set_node_blockptr(path->nodes[level],
1367 path->slots[level], old_bytenr);
1368 btrfs_set_node_ptr_generation(path->nodes[level],
1369 path->slots[level], old_ptr_gen);
1370 btrfs_mark_buffer_dirty(path->nodes[level]);
1371
1372 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
1373 blocksize, path->nodes[level]->start);
1374 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
1375 0, true);
1376 ret = btrfs_inc_extent_ref(trans, &ref);
1377 if (ret) {
1378 btrfs_abort_transaction(trans, ret);
1379 break;
1380 }
1381 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1382 blocksize, 0);
1383 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0,
1384 true);
1385 ret = btrfs_inc_extent_ref(trans, &ref);
1386 if (ret) {
1387 btrfs_abort_transaction(trans, ret);
1388 break;
1389 }
1390
1391 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
1392 blocksize, path->nodes[level]->start);
1393 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
1394 0, true);
1395 ret = btrfs_free_extent(trans, &ref);
1396 if (ret) {
1397 btrfs_abort_transaction(trans, ret);
1398 break;
1399 }
1400
1401 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
1402 blocksize, 0);
1403 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid,
1404 0, true);
1405 ret = btrfs_free_extent(trans, &ref);
1406 if (ret) {
1407 btrfs_abort_transaction(trans, ret);
1408 break;
1409 }
1410
1411 btrfs_unlock_up_safe(path, 0);
1412
1413 ret = level;
1414 break;
1415 }
1416 btrfs_tree_unlock(parent);
1417 free_extent_buffer(parent);
1418 return ret;
1419 }
1420
1421 /*
1422 * helper to find next relocated block in reloc tree
1423 */
1424 static noinline_for_stack
walk_up_reloc_tree(struct btrfs_root * root,struct btrfs_path * path,int * level)1425 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1426 int *level)
1427 {
1428 struct extent_buffer *eb;
1429 int i;
1430 u64 last_snapshot;
1431 u32 nritems;
1432
1433 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1434
1435 for (i = 0; i < *level; i++) {
1436 free_extent_buffer(path->nodes[i]);
1437 path->nodes[i] = NULL;
1438 }
1439
1440 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1441 eb = path->nodes[i];
1442 nritems = btrfs_header_nritems(eb);
1443 while (path->slots[i] + 1 < nritems) {
1444 path->slots[i]++;
1445 if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1446 last_snapshot)
1447 continue;
1448
1449 *level = i;
1450 return 0;
1451 }
1452 free_extent_buffer(path->nodes[i]);
1453 path->nodes[i] = NULL;
1454 }
1455 return 1;
1456 }
1457
1458 /*
1459 * walk down reloc tree to find relocated block of lowest level
1460 */
1461 static noinline_for_stack
walk_down_reloc_tree(struct btrfs_root * root,struct btrfs_path * path,int * level)1462 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1463 int *level)
1464 {
1465 struct extent_buffer *eb = NULL;
1466 int i;
1467 u64 ptr_gen = 0;
1468 u64 last_snapshot;
1469 u32 nritems;
1470
1471 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1472
1473 for (i = *level; i > 0; i--) {
1474 eb = path->nodes[i];
1475 nritems = btrfs_header_nritems(eb);
1476 while (path->slots[i] < nritems) {
1477 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
1478 if (ptr_gen > last_snapshot)
1479 break;
1480 path->slots[i]++;
1481 }
1482 if (path->slots[i] >= nritems) {
1483 if (i == *level)
1484 break;
1485 *level = i + 1;
1486 return 0;
1487 }
1488 if (i == 1) {
1489 *level = i;
1490 return 0;
1491 }
1492
1493 eb = btrfs_read_node_slot(eb, path->slots[i]);
1494 if (IS_ERR(eb))
1495 return PTR_ERR(eb);
1496 BUG_ON(btrfs_header_level(eb) != i - 1);
1497 path->nodes[i - 1] = eb;
1498 path->slots[i - 1] = 0;
1499 }
1500 return 1;
1501 }
1502
1503 /*
1504 * invalidate extent cache for file extents whose key in range of
1505 * [min_key, max_key)
1506 */
invalidate_extent_cache(struct btrfs_root * root,struct btrfs_key * min_key,struct btrfs_key * max_key)1507 static int invalidate_extent_cache(struct btrfs_root *root,
1508 struct btrfs_key *min_key,
1509 struct btrfs_key *max_key)
1510 {
1511 struct btrfs_fs_info *fs_info = root->fs_info;
1512 struct inode *inode = NULL;
1513 u64 objectid;
1514 u64 start, end;
1515 u64 ino;
1516
1517 objectid = min_key->objectid;
1518 while (1) {
1519 cond_resched();
1520 iput(inode);
1521
1522 if (objectid > max_key->objectid)
1523 break;
1524
1525 inode = find_next_inode(root, objectid);
1526 if (!inode)
1527 break;
1528 ino = btrfs_ino(BTRFS_I(inode));
1529
1530 if (ino > max_key->objectid) {
1531 iput(inode);
1532 break;
1533 }
1534
1535 objectid = ino + 1;
1536 if (!S_ISREG(inode->i_mode))
1537 continue;
1538
1539 if (unlikely(min_key->objectid == ino)) {
1540 if (min_key->type > BTRFS_EXTENT_DATA_KEY)
1541 continue;
1542 if (min_key->type < BTRFS_EXTENT_DATA_KEY)
1543 start = 0;
1544 else {
1545 start = min_key->offset;
1546 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
1547 }
1548 } else {
1549 start = 0;
1550 }
1551
1552 if (unlikely(max_key->objectid == ino)) {
1553 if (max_key->type < BTRFS_EXTENT_DATA_KEY)
1554 continue;
1555 if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
1556 end = (u64)-1;
1557 } else {
1558 if (max_key->offset == 0)
1559 continue;
1560 end = max_key->offset;
1561 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1562 end--;
1563 }
1564 } else {
1565 end = (u64)-1;
1566 }
1567
1568 /* the lock_extent waits for read_folio to complete */
1569 lock_extent(&BTRFS_I(inode)->io_tree, start, end, NULL);
1570 btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, true);
1571 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, NULL);
1572 }
1573 return 0;
1574 }
1575
find_next_key(struct btrfs_path * path,int level,struct btrfs_key * key)1576 static int find_next_key(struct btrfs_path *path, int level,
1577 struct btrfs_key *key)
1578
1579 {
1580 while (level < BTRFS_MAX_LEVEL) {
1581 if (!path->nodes[level])
1582 break;
1583 if (path->slots[level] + 1 <
1584 btrfs_header_nritems(path->nodes[level])) {
1585 btrfs_node_key_to_cpu(path->nodes[level], key,
1586 path->slots[level] + 1);
1587 return 0;
1588 }
1589 level++;
1590 }
1591 return 1;
1592 }
1593
1594 /*
1595 * Insert current subvolume into reloc_control::dirty_subvol_roots
1596 */
insert_dirty_subvol(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_root * root)1597 static int insert_dirty_subvol(struct btrfs_trans_handle *trans,
1598 struct reloc_control *rc,
1599 struct btrfs_root *root)
1600 {
1601 struct btrfs_root *reloc_root = root->reloc_root;
1602 struct btrfs_root_item *reloc_root_item;
1603 int ret;
1604
1605 /* @root must be a subvolume tree root with a valid reloc tree */
1606 ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1607 ASSERT(reloc_root);
1608
1609 reloc_root_item = &reloc_root->root_item;
1610 memset(&reloc_root_item->drop_progress, 0,
1611 sizeof(reloc_root_item->drop_progress));
1612 btrfs_set_root_drop_level(reloc_root_item, 0);
1613 btrfs_set_root_refs(reloc_root_item, 0);
1614 ret = btrfs_update_reloc_root(trans, root);
1615 if (ret)
1616 return ret;
1617
1618 if (list_empty(&root->reloc_dirty_list)) {
1619 btrfs_grab_root(root);
1620 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
1621 }
1622
1623 return 0;
1624 }
1625
clean_dirty_subvols(struct reloc_control * rc)1626 static int clean_dirty_subvols(struct reloc_control *rc)
1627 {
1628 struct btrfs_root *root;
1629 struct btrfs_root *next;
1630 int ret = 0;
1631 int ret2;
1632
1633 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
1634 reloc_dirty_list) {
1635 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1636 /* Merged subvolume, cleanup its reloc root */
1637 struct btrfs_root *reloc_root = root->reloc_root;
1638
1639 list_del_init(&root->reloc_dirty_list);
1640 root->reloc_root = NULL;
1641 /*
1642 * Need barrier to ensure clear_bit() only happens after
1643 * root->reloc_root = NULL. Pairs with have_reloc_root.
1644 */
1645 smp_wmb();
1646 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
1647 if (reloc_root) {
1648 /*
1649 * btrfs_drop_snapshot drops our ref we hold for
1650 * ->reloc_root. If it fails however we must
1651 * drop the ref ourselves.
1652 */
1653 ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
1654 if (ret2 < 0) {
1655 btrfs_put_root(reloc_root);
1656 if (!ret)
1657 ret = ret2;
1658 }
1659 }
1660 btrfs_put_root(root);
1661 } else {
1662 /* Orphan reloc tree, just clean it up */
1663 ret2 = btrfs_drop_snapshot(root, 0, 1);
1664 if (ret2 < 0) {
1665 btrfs_put_root(root);
1666 if (!ret)
1667 ret = ret2;
1668 }
1669 }
1670 }
1671 return ret;
1672 }
1673
1674 /*
1675 * merge the relocated tree blocks in reloc tree with corresponding
1676 * fs tree.
1677 */
merge_reloc_root(struct reloc_control * rc,struct btrfs_root * root)1678 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1679 struct btrfs_root *root)
1680 {
1681 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1682 struct btrfs_key key;
1683 struct btrfs_key next_key;
1684 struct btrfs_trans_handle *trans = NULL;
1685 struct btrfs_root *reloc_root;
1686 struct btrfs_root_item *root_item;
1687 struct btrfs_path *path;
1688 struct extent_buffer *leaf;
1689 int reserve_level;
1690 int level;
1691 int max_level;
1692 int replaced = 0;
1693 int ret = 0;
1694 u32 min_reserved;
1695
1696 path = btrfs_alloc_path();
1697 if (!path)
1698 return -ENOMEM;
1699 path->reada = READA_FORWARD;
1700
1701 reloc_root = root->reloc_root;
1702 root_item = &reloc_root->root_item;
1703
1704 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
1705 level = btrfs_root_level(root_item);
1706 atomic_inc(&reloc_root->node->refs);
1707 path->nodes[level] = reloc_root->node;
1708 path->slots[level] = 0;
1709 } else {
1710 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
1711
1712 level = btrfs_root_drop_level(root_item);
1713 BUG_ON(level == 0);
1714 path->lowest_level = level;
1715 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
1716 path->lowest_level = 0;
1717 if (ret < 0) {
1718 btrfs_free_path(path);
1719 return ret;
1720 }
1721
1722 btrfs_node_key_to_cpu(path->nodes[level], &next_key,
1723 path->slots[level]);
1724 WARN_ON(memcmp(&key, &next_key, sizeof(key)));
1725
1726 btrfs_unlock_up_safe(path, 0);
1727 }
1728
1729 /*
1730 * In merge_reloc_root(), we modify the upper level pointer to swap the
1731 * tree blocks between reloc tree and subvolume tree. Thus for tree
1732 * block COW, we COW at most from level 1 to root level for each tree.
1733 *
1734 * Thus the needed metadata size is at most root_level * nodesize,
1735 * and * 2 since we have two trees to COW.
1736 */
1737 reserve_level = max_t(int, 1, btrfs_root_level(root_item));
1738 min_reserved = fs_info->nodesize * reserve_level * 2;
1739 memset(&next_key, 0, sizeof(next_key));
1740
1741 while (1) {
1742 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
1743 min_reserved,
1744 BTRFS_RESERVE_FLUSH_LIMIT);
1745 if (ret)
1746 goto out;
1747 trans = btrfs_start_transaction(root, 0);
1748 if (IS_ERR(trans)) {
1749 ret = PTR_ERR(trans);
1750 trans = NULL;
1751 goto out;
1752 }
1753
1754 /*
1755 * At this point we no longer have a reloc_control, so we can't
1756 * depend on btrfs_init_reloc_root to update our last_trans.
1757 *
1758 * But that's ok, we started the trans handle on our
1759 * corresponding fs_root, which means it's been added to the
1760 * dirty list. At commit time we'll still call
1761 * btrfs_update_reloc_root() and update our root item
1762 * appropriately.
1763 */
1764 reloc_root->last_trans = trans->transid;
1765 trans->block_rsv = rc->block_rsv;
1766
1767 replaced = 0;
1768 max_level = level;
1769
1770 ret = walk_down_reloc_tree(reloc_root, path, &level);
1771 if (ret < 0)
1772 goto out;
1773 if (ret > 0)
1774 break;
1775
1776 if (!find_next_key(path, level, &key) &&
1777 btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
1778 ret = 0;
1779 } else {
1780 ret = replace_path(trans, rc, root, reloc_root, path,
1781 &next_key, level, max_level);
1782 }
1783 if (ret < 0)
1784 goto out;
1785 if (ret > 0) {
1786 level = ret;
1787 btrfs_node_key_to_cpu(path->nodes[level], &key,
1788 path->slots[level]);
1789 replaced = 1;
1790 }
1791
1792 ret = walk_up_reloc_tree(reloc_root, path, &level);
1793 if (ret > 0)
1794 break;
1795
1796 BUG_ON(level == 0);
1797 /*
1798 * save the merging progress in the drop_progress.
1799 * this is OK since root refs == 1 in this case.
1800 */
1801 btrfs_node_key(path->nodes[level], &root_item->drop_progress,
1802 path->slots[level]);
1803 btrfs_set_root_drop_level(root_item, level);
1804
1805 btrfs_end_transaction_throttle(trans);
1806 trans = NULL;
1807
1808 btrfs_btree_balance_dirty(fs_info);
1809
1810 if (replaced && rc->stage == UPDATE_DATA_PTRS)
1811 invalidate_extent_cache(root, &key, &next_key);
1812 }
1813
1814 /*
1815 * handle the case only one block in the fs tree need to be
1816 * relocated and the block is tree root.
1817 */
1818 leaf = btrfs_lock_root_node(root);
1819 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf,
1820 BTRFS_NESTING_COW);
1821 btrfs_tree_unlock(leaf);
1822 free_extent_buffer(leaf);
1823 out:
1824 btrfs_free_path(path);
1825
1826 if (ret == 0) {
1827 ret = insert_dirty_subvol(trans, rc, root);
1828 if (ret)
1829 btrfs_abort_transaction(trans, ret);
1830 }
1831
1832 if (trans)
1833 btrfs_end_transaction_throttle(trans);
1834
1835 btrfs_btree_balance_dirty(fs_info);
1836
1837 if (replaced && rc->stage == UPDATE_DATA_PTRS)
1838 invalidate_extent_cache(root, &key, &next_key);
1839
1840 return ret;
1841 }
1842
1843 static noinline_for_stack
prepare_to_merge(struct reloc_control * rc,int err)1844 int prepare_to_merge(struct reloc_control *rc, int err)
1845 {
1846 struct btrfs_root *root = rc->extent_root;
1847 struct btrfs_fs_info *fs_info = root->fs_info;
1848 struct btrfs_root *reloc_root;
1849 struct btrfs_trans_handle *trans;
1850 LIST_HEAD(reloc_roots);
1851 u64 num_bytes = 0;
1852 int ret;
1853
1854 mutex_lock(&fs_info->reloc_mutex);
1855 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
1856 rc->merging_rsv_size += rc->nodes_relocated * 2;
1857 mutex_unlock(&fs_info->reloc_mutex);
1858
1859 again:
1860 if (!err) {
1861 num_bytes = rc->merging_rsv_size;
1862 ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes,
1863 BTRFS_RESERVE_FLUSH_ALL);
1864 if (ret)
1865 err = ret;
1866 }
1867
1868 trans = btrfs_join_transaction(rc->extent_root);
1869 if (IS_ERR(trans)) {
1870 if (!err)
1871 btrfs_block_rsv_release(fs_info, rc->block_rsv,
1872 num_bytes, NULL);
1873 return PTR_ERR(trans);
1874 }
1875
1876 if (!err) {
1877 if (num_bytes != rc->merging_rsv_size) {
1878 btrfs_end_transaction(trans);
1879 btrfs_block_rsv_release(fs_info, rc->block_rsv,
1880 num_bytes, NULL);
1881 goto again;
1882 }
1883 }
1884
1885 rc->merge_reloc_tree = 1;
1886
1887 while (!list_empty(&rc->reloc_roots)) {
1888 reloc_root = list_entry(rc->reloc_roots.next,
1889 struct btrfs_root, root_list);
1890 list_del_init(&reloc_root->root_list);
1891
1892 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1893 false);
1894 if (IS_ERR(root)) {
1895 /*
1896 * Even if we have an error we need this reloc root
1897 * back on our list so we can clean up properly.
1898 */
1899 list_add(&reloc_root->root_list, &reloc_roots);
1900 btrfs_abort_transaction(trans, (int)PTR_ERR(root));
1901 if (!err)
1902 err = PTR_ERR(root);
1903 break;
1904 }
1905
1906 if (unlikely(root->reloc_root != reloc_root)) {
1907 if (root->reloc_root) {
1908 btrfs_err(fs_info,
1909 "reloc tree mismatch, root %lld has reloc root key (%lld %u %llu) gen %llu, expect reloc root key (%lld %u %llu) gen %llu",
1910 root->root_key.objectid,
1911 root->reloc_root->root_key.objectid,
1912 root->reloc_root->root_key.type,
1913 root->reloc_root->root_key.offset,
1914 btrfs_root_generation(
1915 &root->reloc_root->root_item),
1916 reloc_root->root_key.objectid,
1917 reloc_root->root_key.type,
1918 reloc_root->root_key.offset,
1919 btrfs_root_generation(
1920 &reloc_root->root_item));
1921 } else {
1922 btrfs_err(fs_info,
1923 "reloc tree mismatch, root %lld has no reloc root, expect reloc root key (%lld %u %llu) gen %llu",
1924 root->root_key.objectid,
1925 reloc_root->root_key.objectid,
1926 reloc_root->root_key.type,
1927 reloc_root->root_key.offset,
1928 btrfs_root_generation(
1929 &reloc_root->root_item));
1930 }
1931 list_add(&reloc_root->root_list, &reloc_roots);
1932 btrfs_put_root(root);
1933 btrfs_abort_transaction(trans, -EUCLEAN);
1934 if (!err)
1935 err = -EUCLEAN;
1936 break;
1937 }
1938
1939 /*
1940 * set reference count to 1, so btrfs_recover_relocation
1941 * knows it should resumes merging
1942 */
1943 if (!err)
1944 btrfs_set_root_refs(&reloc_root->root_item, 1);
1945 ret = btrfs_update_reloc_root(trans, root);
1946
1947 /*
1948 * Even if we have an error we need this reloc root back on our
1949 * list so we can clean up properly.
1950 */
1951 list_add(&reloc_root->root_list, &reloc_roots);
1952 btrfs_put_root(root);
1953
1954 if (ret) {
1955 btrfs_abort_transaction(trans, ret);
1956 if (!err)
1957 err = ret;
1958 break;
1959 }
1960 }
1961
1962 list_splice(&reloc_roots, &rc->reloc_roots);
1963
1964 if (!err)
1965 err = btrfs_commit_transaction(trans);
1966 else
1967 btrfs_end_transaction(trans);
1968 return err;
1969 }
1970
1971 static noinline_for_stack
free_reloc_roots(struct list_head * list)1972 void free_reloc_roots(struct list_head *list)
1973 {
1974 struct btrfs_root *reloc_root, *tmp;
1975
1976 list_for_each_entry_safe(reloc_root, tmp, list, root_list)
1977 __del_reloc_root(reloc_root);
1978 }
1979
1980 static noinline_for_stack
merge_reloc_roots(struct reloc_control * rc)1981 void merge_reloc_roots(struct reloc_control *rc)
1982 {
1983 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1984 struct btrfs_root *root;
1985 struct btrfs_root *reloc_root;
1986 LIST_HEAD(reloc_roots);
1987 int found = 0;
1988 int ret = 0;
1989 again:
1990 root = rc->extent_root;
1991
1992 /*
1993 * this serializes us with btrfs_record_root_in_transaction,
1994 * we have to make sure nobody is in the middle of
1995 * adding their roots to the list while we are
1996 * doing this splice
1997 */
1998 mutex_lock(&fs_info->reloc_mutex);
1999 list_splice_init(&rc->reloc_roots, &reloc_roots);
2000 mutex_unlock(&fs_info->reloc_mutex);
2001
2002 while (!list_empty(&reloc_roots)) {
2003 found = 1;
2004 reloc_root = list_entry(reloc_roots.next,
2005 struct btrfs_root, root_list);
2006
2007 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
2008 false);
2009 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
2010 if (WARN_ON(IS_ERR(root))) {
2011 /*
2012 * For recovery we read the fs roots on mount,
2013 * and if we didn't find the root then we marked
2014 * the reloc root as a garbage root. For normal
2015 * relocation obviously the root should exist in
2016 * memory. However there's no reason we can't
2017 * handle the error properly here just in case.
2018 */
2019 ret = PTR_ERR(root);
2020 goto out;
2021 }
2022 if (WARN_ON(root->reloc_root != reloc_root)) {
2023 /*
2024 * This can happen if on-disk metadata has some
2025 * corruption, e.g. bad reloc tree key offset.
2026 */
2027 ret = -EINVAL;
2028 goto out;
2029 }
2030 ret = merge_reloc_root(rc, root);
2031 btrfs_put_root(root);
2032 if (ret) {
2033 if (list_empty(&reloc_root->root_list))
2034 list_add_tail(&reloc_root->root_list,
2035 &reloc_roots);
2036 goto out;
2037 }
2038 } else {
2039 if (!IS_ERR(root)) {
2040 if (root->reloc_root == reloc_root) {
2041 root->reloc_root = NULL;
2042 btrfs_put_root(reloc_root);
2043 }
2044 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE,
2045 &root->state);
2046 btrfs_put_root(root);
2047 }
2048
2049 list_del_init(&reloc_root->root_list);
2050 /* Don't forget to queue this reloc root for cleanup */
2051 list_add_tail(&reloc_root->reloc_dirty_list,
2052 &rc->dirty_subvol_roots);
2053 }
2054 }
2055
2056 if (found) {
2057 found = 0;
2058 goto again;
2059 }
2060 out:
2061 if (ret) {
2062 btrfs_handle_fs_error(fs_info, ret, NULL);
2063 free_reloc_roots(&reloc_roots);
2064
2065 /* new reloc root may be added */
2066 mutex_lock(&fs_info->reloc_mutex);
2067 list_splice_init(&rc->reloc_roots, &reloc_roots);
2068 mutex_unlock(&fs_info->reloc_mutex);
2069 free_reloc_roots(&reloc_roots);
2070 }
2071
2072 /*
2073 * We used to have
2074 *
2075 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
2076 *
2077 * here, but it's wrong. If we fail to start the transaction in
2078 * prepare_to_merge() we will have only 0 ref reloc roots, none of which
2079 * have actually been removed from the reloc_root_tree rb tree. This is
2080 * fine because we're bailing here, and we hold a reference on the root
2081 * for the list that holds it, so these roots will be cleaned up when we
2082 * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root
2083 * will be cleaned up on unmount.
2084 *
2085 * The remaining nodes will be cleaned up by free_reloc_control.
2086 */
2087 }
2088
free_block_list(struct rb_root * blocks)2089 static void free_block_list(struct rb_root *blocks)
2090 {
2091 struct tree_block *block;
2092 struct rb_node *rb_node;
2093 while ((rb_node = rb_first(blocks))) {
2094 block = rb_entry(rb_node, struct tree_block, rb_node);
2095 rb_erase(rb_node, blocks);
2096 kfree(block);
2097 }
2098 }
2099
record_reloc_root_in_trans(struct btrfs_trans_handle * trans,struct btrfs_root * reloc_root)2100 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2101 struct btrfs_root *reloc_root)
2102 {
2103 struct btrfs_fs_info *fs_info = reloc_root->fs_info;
2104 struct btrfs_root *root;
2105 int ret;
2106
2107 if (reloc_root->last_trans == trans->transid)
2108 return 0;
2109
2110 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
2111
2112 /*
2113 * This should succeed, since we can't have a reloc root without having
2114 * already looked up the actual root and created the reloc root for this
2115 * root.
2116 *
2117 * However if there's some sort of corruption where we have a ref to a
2118 * reloc root without a corresponding root this could return ENOENT.
2119 */
2120 if (IS_ERR(root)) {
2121 ASSERT(0);
2122 return PTR_ERR(root);
2123 }
2124 if (root->reloc_root != reloc_root) {
2125 ASSERT(0);
2126 btrfs_err(fs_info,
2127 "root %llu has two reloc roots associated with it",
2128 reloc_root->root_key.offset);
2129 btrfs_put_root(root);
2130 return -EUCLEAN;
2131 }
2132 ret = btrfs_record_root_in_trans(trans, root);
2133 btrfs_put_root(root);
2134
2135 return ret;
2136 }
2137
2138 static noinline_for_stack
select_reloc_root(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_backref_node * node,struct btrfs_backref_edge * edges[])2139 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2140 struct reloc_control *rc,
2141 struct btrfs_backref_node *node,
2142 struct btrfs_backref_edge *edges[])
2143 {
2144 struct btrfs_backref_node *next;
2145 struct btrfs_root *root;
2146 int index = 0;
2147 int ret;
2148
2149 next = node;
2150 while (1) {
2151 cond_resched();
2152 next = walk_up_backref(next, edges, &index);
2153 root = next->root;
2154
2155 /*
2156 * If there is no root, then our references for this block are
2157 * incomplete, as we should be able to walk all the way up to a
2158 * block that is owned by a root.
2159 *
2160 * This path is only for SHAREABLE roots, so if we come upon a
2161 * non-SHAREABLE root then we have backrefs that resolve
2162 * improperly.
2163 *
2164 * Both of these cases indicate file system corruption, or a bug
2165 * in the backref walking code.
2166 */
2167 if (!root) {
2168 ASSERT(0);
2169 btrfs_err(trans->fs_info,
2170 "bytenr %llu doesn't have a backref path ending in a root",
2171 node->bytenr);
2172 return ERR_PTR(-EUCLEAN);
2173 }
2174 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2175 ASSERT(0);
2176 btrfs_err(trans->fs_info,
2177 "bytenr %llu has multiple refs with one ending in a non-shareable root",
2178 node->bytenr);
2179 return ERR_PTR(-EUCLEAN);
2180 }
2181
2182 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2183 ret = record_reloc_root_in_trans(trans, root);
2184 if (ret)
2185 return ERR_PTR(ret);
2186 break;
2187 }
2188
2189 ret = btrfs_record_root_in_trans(trans, root);
2190 if (ret)
2191 return ERR_PTR(ret);
2192 root = root->reloc_root;
2193
2194 /*
2195 * We could have raced with another thread which failed, so
2196 * root->reloc_root may not be set, return ENOENT in this case.
2197 */
2198 if (!root)
2199 return ERR_PTR(-ENOENT);
2200
2201 if (next->new_bytenr != root->node->start) {
2202 /*
2203 * We just created the reloc root, so we shouldn't have
2204 * ->new_bytenr set and this shouldn't be in the changed
2205 * list. If it is then we have multiple roots pointing
2206 * at the same bytenr which indicates corruption, or
2207 * we've made a mistake in the backref walking code.
2208 */
2209 ASSERT(next->new_bytenr == 0);
2210 ASSERT(list_empty(&next->list));
2211 if (next->new_bytenr || !list_empty(&next->list)) {
2212 btrfs_err(trans->fs_info,
2213 "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
2214 node->bytenr, next->bytenr);
2215 return ERR_PTR(-EUCLEAN);
2216 }
2217
2218 next->new_bytenr = root->node->start;
2219 btrfs_put_root(next->root);
2220 next->root = btrfs_grab_root(root);
2221 ASSERT(next->root);
2222 list_add_tail(&next->list,
2223 &rc->backref_cache.changed);
2224 mark_block_processed(rc, next);
2225 break;
2226 }
2227
2228 WARN_ON(1);
2229 root = NULL;
2230 next = walk_down_backref(edges, &index);
2231 if (!next || next->level <= node->level)
2232 break;
2233 }
2234 if (!root) {
2235 /*
2236 * This can happen if there's fs corruption or if there's a bug
2237 * in the backref lookup code.
2238 */
2239 ASSERT(0);
2240 return ERR_PTR(-ENOENT);
2241 }
2242
2243 next = node;
2244 /* setup backref node path for btrfs_reloc_cow_block */
2245 while (1) {
2246 rc->backref_cache.path[next->level] = next;
2247 if (--index < 0)
2248 break;
2249 next = edges[index]->node[UPPER];
2250 }
2251 return root;
2252 }
2253
2254 /*
2255 * Select a tree root for relocation.
2256 *
2257 * Return NULL if the block is not shareable. We should use do_relocation() in
2258 * this case.
2259 *
2260 * Return a tree root pointer if the block is shareable.
2261 * Return -ENOENT if the block is root of reloc tree.
2262 */
2263 static noinline_for_stack
select_one_root(struct btrfs_backref_node * node)2264 struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
2265 {
2266 struct btrfs_backref_node *next;
2267 struct btrfs_root *root;
2268 struct btrfs_root *fs_root = NULL;
2269 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2270 int index = 0;
2271
2272 next = node;
2273 while (1) {
2274 cond_resched();
2275 next = walk_up_backref(next, edges, &index);
2276 root = next->root;
2277
2278 /*
2279 * This can occur if we have incomplete extent refs leading all
2280 * the way up a particular path, in this case return -EUCLEAN.
2281 */
2282 if (!root)
2283 return ERR_PTR(-EUCLEAN);
2284
2285 /* No other choice for non-shareable tree */
2286 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2287 return root;
2288
2289 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2290 fs_root = root;
2291
2292 if (next != node)
2293 return NULL;
2294
2295 next = walk_down_backref(edges, &index);
2296 if (!next || next->level <= node->level)
2297 break;
2298 }
2299
2300 if (!fs_root)
2301 return ERR_PTR(-ENOENT);
2302 return fs_root;
2303 }
2304
2305 static noinline_for_stack
calcu_metadata_size(struct reloc_control * rc,struct btrfs_backref_node * node,int reserve)2306 u64 calcu_metadata_size(struct reloc_control *rc,
2307 struct btrfs_backref_node *node, int reserve)
2308 {
2309 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2310 struct btrfs_backref_node *next = node;
2311 struct btrfs_backref_edge *edge;
2312 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2313 u64 num_bytes = 0;
2314 int index = 0;
2315
2316 BUG_ON(reserve && node->processed);
2317
2318 while (next) {
2319 cond_resched();
2320 while (1) {
2321 if (next->processed && (reserve || next != node))
2322 break;
2323
2324 num_bytes += fs_info->nodesize;
2325
2326 if (list_empty(&next->upper))
2327 break;
2328
2329 edge = list_entry(next->upper.next,
2330 struct btrfs_backref_edge, list[LOWER]);
2331 edges[index++] = edge;
2332 next = edge->node[UPPER];
2333 }
2334 next = walk_down_backref(edges, &index);
2335 }
2336 return num_bytes;
2337 }
2338
reserve_metadata_space(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_backref_node * node)2339 static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2340 struct reloc_control *rc,
2341 struct btrfs_backref_node *node)
2342 {
2343 struct btrfs_root *root = rc->extent_root;
2344 struct btrfs_fs_info *fs_info = root->fs_info;
2345 u64 num_bytes;
2346 int ret;
2347 u64 tmp;
2348
2349 num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2350
2351 trans->block_rsv = rc->block_rsv;
2352 rc->reserved_bytes += num_bytes;
2353
2354 /*
2355 * We are under a transaction here so we can only do limited flushing.
2356 * If we get an enospc just kick back -EAGAIN so we know to drop the
2357 * transaction and try to refill when we can flush all the things.
2358 */
2359 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes,
2360 BTRFS_RESERVE_FLUSH_LIMIT);
2361 if (ret) {
2362 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2363 while (tmp <= rc->reserved_bytes)
2364 tmp <<= 1;
2365 /*
2366 * only one thread can access block_rsv at this point,
2367 * so we don't need hold lock to protect block_rsv.
2368 * we expand more reservation size here to allow enough
2369 * space for relocation and we will return earlier in
2370 * enospc case.
2371 */
2372 rc->block_rsv->size = tmp + fs_info->nodesize *
2373 RELOCATION_RESERVED_NODES;
2374 return -EAGAIN;
2375 }
2376
2377 return 0;
2378 }
2379
2380 /*
2381 * relocate a block tree, and then update pointers in upper level
2382 * blocks that reference the block to point to the new location.
2383 *
2384 * if called by link_to_upper, the block has already been relocated.
2385 * in that case this function just updates pointers.
2386 */
do_relocation(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_backref_node * node,struct btrfs_key * key,struct btrfs_path * path,int lowest)2387 static int do_relocation(struct btrfs_trans_handle *trans,
2388 struct reloc_control *rc,
2389 struct btrfs_backref_node *node,
2390 struct btrfs_key *key,
2391 struct btrfs_path *path, int lowest)
2392 {
2393 struct btrfs_backref_node *upper;
2394 struct btrfs_backref_edge *edge;
2395 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2396 struct btrfs_root *root;
2397 struct extent_buffer *eb;
2398 u32 blocksize;
2399 u64 bytenr;
2400 int slot;
2401 int ret = 0;
2402
2403 /*
2404 * If we are lowest then this is the first time we're processing this
2405 * block, and thus shouldn't have an eb associated with it yet.
2406 */
2407 ASSERT(!lowest || !node->eb);
2408
2409 path->lowest_level = node->level + 1;
2410 rc->backref_cache.path[node->level] = node;
2411 list_for_each_entry(edge, &node->upper, list[LOWER]) {
2412 struct btrfs_ref ref = { 0 };
2413
2414 cond_resched();
2415
2416 upper = edge->node[UPPER];
2417 root = select_reloc_root(trans, rc, upper, edges);
2418 if (IS_ERR(root)) {
2419 ret = PTR_ERR(root);
2420 goto next;
2421 }
2422
2423 if (upper->eb && !upper->locked) {
2424 if (!lowest) {
2425 ret = btrfs_bin_search(upper->eb, key, &slot);
2426 if (ret < 0)
2427 goto next;
2428 BUG_ON(ret);
2429 bytenr = btrfs_node_blockptr(upper->eb, slot);
2430 if (node->eb->start == bytenr)
2431 goto next;
2432 }
2433 btrfs_backref_drop_node_buffer(upper);
2434 }
2435
2436 if (!upper->eb) {
2437 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2438 if (ret) {
2439 if (ret > 0)
2440 ret = -ENOENT;
2441
2442 btrfs_release_path(path);
2443 break;
2444 }
2445
2446 if (!upper->eb) {
2447 upper->eb = path->nodes[upper->level];
2448 path->nodes[upper->level] = NULL;
2449 } else {
2450 BUG_ON(upper->eb != path->nodes[upper->level]);
2451 }
2452
2453 upper->locked = 1;
2454 path->locks[upper->level] = 0;
2455
2456 slot = path->slots[upper->level];
2457 btrfs_release_path(path);
2458 } else {
2459 ret = btrfs_bin_search(upper->eb, key, &slot);
2460 if (ret < 0)
2461 goto next;
2462 BUG_ON(ret);
2463 }
2464
2465 bytenr = btrfs_node_blockptr(upper->eb, slot);
2466 if (lowest) {
2467 if (bytenr != node->bytenr) {
2468 btrfs_err(root->fs_info,
2469 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2470 bytenr, node->bytenr, slot,
2471 upper->eb->start);
2472 ret = -EIO;
2473 goto next;
2474 }
2475 } else {
2476 if (node->eb->start == bytenr)
2477 goto next;
2478 }
2479
2480 blocksize = root->fs_info->nodesize;
2481 eb = btrfs_read_node_slot(upper->eb, slot);
2482 if (IS_ERR(eb)) {
2483 ret = PTR_ERR(eb);
2484 goto next;
2485 }
2486 btrfs_tree_lock(eb);
2487
2488 if (!node->eb) {
2489 ret = btrfs_cow_block(trans, root, eb, upper->eb,
2490 slot, &eb, BTRFS_NESTING_COW);
2491 btrfs_tree_unlock(eb);
2492 free_extent_buffer(eb);
2493 if (ret < 0)
2494 goto next;
2495 /*
2496 * We've just COWed this block, it should have updated
2497 * the correct backref node entry.
2498 */
2499 ASSERT(node->eb == eb);
2500 } else {
2501 btrfs_set_node_blockptr(upper->eb, slot,
2502 node->eb->start);
2503 btrfs_set_node_ptr_generation(upper->eb, slot,
2504 trans->transid);
2505 btrfs_mark_buffer_dirty(upper->eb);
2506
2507 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2508 node->eb->start, blocksize,
2509 upper->eb->start);
2510 btrfs_init_tree_ref(&ref, node->level,
2511 btrfs_header_owner(upper->eb),
2512 root->root_key.objectid, false);
2513 ret = btrfs_inc_extent_ref(trans, &ref);
2514 if (!ret)
2515 ret = btrfs_drop_subtree(trans, root, eb,
2516 upper->eb);
2517 if (ret)
2518 btrfs_abort_transaction(trans, ret);
2519 }
2520 next:
2521 if (!upper->pending)
2522 btrfs_backref_drop_node_buffer(upper);
2523 else
2524 btrfs_backref_unlock_node_buffer(upper);
2525 if (ret)
2526 break;
2527 }
2528
2529 if (!ret && node->pending) {
2530 btrfs_backref_drop_node_buffer(node);
2531 list_move_tail(&node->list, &rc->backref_cache.changed);
2532 node->pending = 0;
2533 }
2534
2535 path->lowest_level = 0;
2536
2537 /*
2538 * We should have allocated all of our space in the block rsv and thus
2539 * shouldn't ENOSPC.
2540 */
2541 ASSERT(ret != -ENOSPC);
2542 return ret;
2543 }
2544
link_to_upper(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_backref_node * node,struct btrfs_path * path)2545 static int link_to_upper(struct btrfs_trans_handle *trans,
2546 struct reloc_control *rc,
2547 struct btrfs_backref_node *node,
2548 struct btrfs_path *path)
2549 {
2550 struct btrfs_key key;
2551
2552 btrfs_node_key_to_cpu(node->eb, &key, 0);
2553 return do_relocation(trans, rc, node, &key, path, 0);
2554 }
2555
finish_pending_nodes(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_path * path,int err)2556 static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2557 struct reloc_control *rc,
2558 struct btrfs_path *path, int err)
2559 {
2560 LIST_HEAD(list);
2561 struct btrfs_backref_cache *cache = &rc->backref_cache;
2562 struct btrfs_backref_node *node;
2563 int level;
2564 int ret;
2565
2566 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2567 while (!list_empty(&cache->pending[level])) {
2568 node = list_entry(cache->pending[level].next,
2569 struct btrfs_backref_node, list);
2570 list_move_tail(&node->list, &list);
2571 BUG_ON(!node->pending);
2572
2573 if (!err) {
2574 ret = link_to_upper(trans, rc, node, path);
2575 if (ret < 0)
2576 err = ret;
2577 }
2578 }
2579 list_splice_init(&list, &cache->pending[level]);
2580 }
2581 return err;
2582 }
2583
2584 /*
2585 * mark a block and all blocks directly/indirectly reference the block
2586 * as processed.
2587 */
update_processed_blocks(struct reloc_control * rc,struct btrfs_backref_node * node)2588 static void update_processed_blocks(struct reloc_control *rc,
2589 struct btrfs_backref_node *node)
2590 {
2591 struct btrfs_backref_node *next = node;
2592 struct btrfs_backref_edge *edge;
2593 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2594 int index = 0;
2595
2596 while (next) {
2597 cond_resched();
2598 while (1) {
2599 if (next->processed)
2600 break;
2601
2602 mark_block_processed(rc, next);
2603
2604 if (list_empty(&next->upper))
2605 break;
2606
2607 edge = list_entry(next->upper.next,
2608 struct btrfs_backref_edge, list[LOWER]);
2609 edges[index++] = edge;
2610 next = edge->node[UPPER];
2611 }
2612 next = walk_down_backref(edges, &index);
2613 }
2614 }
2615
tree_block_processed(u64 bytenr,struct reloc_control * rc)2616 static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
2617 {
2618 u32 blocksize = rc->extent_root->fs_info->nodesize;
2619
2620 if (test_range_bit(&rc->processed_blocks, bytenr,
2621 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
2622 return 1;
2623 return 0;
2624 }
2625
get_tree_block_key(struct btrfs_fs_info * fs_info,struct tree_block * block)2626 static int get_tree_block_key(struct btrfs_fs_info *fs_info,
2627 struct tree_block *block)
2628 {
2629 struct extent_buffer *eb;
2630
2631 eb = read_tree_block(fs_info, block->bytenr, block->owner,
2632 block->key.offset, block->level, NULL);
2633 if (IS_ERR(eb))
2634 return PTR_ERR(eb);
2635 if (!extent_buffer_uptodate(eb)) {
2636 free_extent_buffer(eb);
2637 return -EIO;
2638 }
2639 if (block->level == 0)
2640 btrfs_item_key_to_cpu(eb, &block->key, 0);
2641 else
2642 btrfs_node_key_to_cpu(eb, &block->key, 0);
2643 free_extent_buffer(eb);
2644 block->key_ready = 1;
2645 return 0;
2646 }
2647
2648 /*
2649 * helper function to relocate a tree block
2650 */
relocate_tree_block(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_backref_node * node,struct btrfs_key * key,struct btrfs_path * path)2651 static int relocate_tree_block(struct btrfs_trans_handle *trans,
2652 struct reloc_control *rc,
2653 struct btrfs_backref_node *node,
2654 struct btrfs_key *key,
2655 struct btrfs_path *path)
2656 {
2657 struct btrfs_root *root;
2658 int ret = 0;
2659
2660 if (!node)
2661 return 0;
2662
2663 /*
2664 * If we fail here we want to drop our backref_node because we are going
2665 * to start over and regenerate the tree for it.
2666 */
2667 ret = reserve_metadata_space(trans, rc, node);
2668 if (ret)
2669 goto out;
2670
2671 BUG_ON(node->processed);
2672 root = select_one_root(node);
2673 if (IS_ERR(root)) {
2674 ret = PTR_ERR(root);
2675
2676 /* See explanation in select_one_root for the -EUCLEAN case. */
2677 ASSERT(ret == -ENOENT);
2678 if (ret == -ENOENT) {
2679 ret = 0;
2680 update_processed_blocks(rc, node);
2681 }
2682 goto out;
2683 }
2684
2685 if (root) {
2686 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2687 /*
2688 * This block was the root block of a root, and this is
2689 * the first time we're processing the block and thus it
2690 * should not have had the ->new_bytenr modified and
2691 * should have not been included on the changed list.
2692 *
2693 * However in the case of corruption we could have
2694 * multiple refs pointing to the same block improperly,
2695 * and thus we would trip over these checks. ASSERT()
2696 * for the developer case, because it could indicate a
2697 * bug in the backref code, however error out for a
2698 * normal user in the case of corruption.
2699 */
2700 ASSERT(node->new_bytenr == 0);
2701 ASSERT(list_empty(&node->list));
2702 if (node->new_bytenr || !list_empty(&node->list)) {
2703 btrfs_err(root->fs_info,
2704 "bytenr %llu has improper references to it",
2705 node->bytenr);
2706 ret = -EUCLEAN;
2707 goto out;
2708 }
2709 ret = btrfs_record_root_in_trans(trans, root);
2710 if (ret)
2711 goto out;
2712 /*
2713 * Another thread could have failed, need to check if we
2714 * have reloc_root actually set.
2715 */
2716 if (!root->reloc_root) {
2717 ret = -ENOENT;
2718 goto out;
2719 }
2720 root = root->reloc_root;
2721 node->new_bytenr = root->node->start;
2722 btrfs_put_root(node->root);
2723 node->root = btrfs_grab_root(root);
2724 ASSERT(node->root);
2725 list_add_tail(&node->list, &rc->backref_cache.changed);
2726 } else {
2727 path->lowest_level = node->level;
2728 if (root == root->fs_info->chunk_root)
2729 btrfs_reserve_chunk_metadata(trans, false);
2730 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2731 btrfs_release_path(path);
2732 if (root == root->fs_info->chunk_root)
2733 btrfs_trans_release_chunk_metadata(trans);
2734 if (ret > 0)
2735 ret = 0;
2736 }
2737 if (!ret)
2738 update_processed_blocks(rc, node);
2739 } else {
2740 ret = do_relocation(trans, rc, node, key, path, 1);
2741 }
2742 out:
2743 if (ret || node->level == 0 || node->cowonly)
2744 btrfs_backref_cleanup_node(&rc->backref_cache, node);
2745 return ret;
2746 }
2747
2748 /*
2749 * relocate a list of blocks
2750 */
2751 static noinline_for_stack
relocate_tree_blocks(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct rb_root * blocks)2752 int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2753 struct reloc_control *rc, struct rb_root *blocks)
2754 {
2755 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2756 struct btrfs_backref_node *node;
2757 struct btrfs_path *path;
2758 struct tree_block *block;
2759 struct tree_block *next;
2760 int ret;
2761 int err = 0;
2762
2763 path = btrfs_alloc_path();
2764 if (!path) {
2765 err = -ENOMEM;
2766 goto out_free_blocks;
2767 }
2768
2769 /* Kick in readahead for tree blocks with missing keys */
2770 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2771 if (!block->key_ready)
2772 btrfs_readahead_tree_block(fs_info, block->bytenr,
2773 block->owner, 0,
2774 block->level);
2775 }
2776
2777 /* Get first keys */
2778 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2779 if (!block->key_ready) {
2780 err = get_tree_block_key(fs_info, block);
2781 if (err)
2782 goto out_free_path;
2783 }
2784 }
2785
2786 /* Do tree relocation */
2787 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2788 node = build_backref_tree(rc, &block->key,
2789 block->level, block->bytenr);
2790 if (IS_ERR(node)) {
2791 err = PTR_ERR(node);
2792 goto out;
2793 }
2794
2795 ret = relocate_tree_block(trans, rc, node, &block->key,
2796 path);
2797 if (ret < 0) {
2798 err = ret;
2799 break;
2800 }
2801 }
2802 out:
2803 err = finish_pending_nodes(trans, rc, path, err);
2804
2805 out_free_path:
2806 btrfs_free_path(path);
2807 out_free_blocks:
2808 free_block_list(blocks);
2809 return err;
2810 }
2811
prealloc_file_extent_cluster(struct btrfs_inode * inode,struct file_extent_cluster * cluster)2812 static noinline_for_stack int prealloc_file_extent_cluster(
2813 struct btrfs_inode *inode,
2814 struct file_extent_cluster *cluster)
2815 {
2816 u64 alloc_hint = 0;
2817 u64 start;
2818 u64 end;
2819 u64 offset = inode->index_cnt;
2820 u64 num_bytes;
2821 int nr;
2822 int ret = 0;
2823 u64 i_size = i_size_read(&inode->vfs_inode);
2824 u64 prealloc_start = cluster->start - offset;
2825 u64 prealloc_end = cluster->end - offset;
2826 u64 cur_offset = prealloc_start;
2827
2828 /*
2829 * For subpage case, previous i_size may not be aligned to PAGE_SIZE.
2830 * This means the range [i_size, PAGE_END + 1) is filled with zeros by
2831 * btrfs_do_readpage() call of previously relocated file cluster.
2832 *
2833 * If the current cluster starts in the above range, btrfs_do_readpage()
2834 * will skip the read, and relocate_one_page() will later writeback
2835 * the padding zeros as new data, causing data corruption.
2836 *
2837 * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
2838 */
2839 if (!IS_ALIGNED(i_size, PAGE_SIZE)) {
2840 struct address_space *mapping = inode->vfs_inode.i_mapping;
2841 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2842 const u32 sectorsize = fs_info->sectorsize;
2843 struct page *page;
2844
2845 ASSERT(sectorsize < PAGE_SIZE);
2846 ASSERT(IS_ALIGNED(i_size, sectorsize));
2847
2848 /*
2849 * Subpage can't handle page with DIRTY but without UPTODATE
2850 * bit as it can lead to the following deadlock:
2851 *
2852 * btrfs_read_folio()
2853 * | Page already *locked*
2854 * |- btrfs_lock_and_flush_ordered_range()
2855 * |- btrfs_start_ordered_extent()
2856 * |- extent_write_cache_pages()
2857 * |- lock_page()
2858 * We try to lock the page we already hold.
2859 *
2860 * Here we just writeback the whole data reloc inode, so that
2861 * we will be ensured to have no dirty range in the page, and
2862 * are safe to clear the uptodate bits.
2863 *
2864 * This shouldn't cause too much overhead, as we need to write
2865 * the data back anyway.
2866 */
2867 ret = filemap_write_and_wait(mapping);
2868 if (ret < 0)
2869 return ret;
2870
2871 clear_extent_bits(&inode->io_tree, i_size,
2872 round_up(i_size, PAGE_SIZE) - 1,
2873 EXTENT_UPTODATE);
2874 page = find_lock_page(mapping, i_size >> PAGE_SHIFT);
2875 /*
2876 * If page is freed we don't need to do anything then, as we
2877 * will re-read the whole page anyway.
2878 */
2879 if (page) {
2880 btrfs_subpage_clear_uptodate(fs_info, page, i_size,
2881 round_up(i_size, PAGE_SIZE) - i_size);
2882 unlock_page(page);
2883 put_page(page);
2884 }
2885 }
2886
2887 BUG_ON(cluster->start != cluster->boundary[0]);
2888 ret = btrfs_alloc_data_chunk_ondemand(inode,
2889 prealloc_end + 1 - prealloc_start);
2890 if (ret)
2891 return ret;
2892
2893 btrfs_inode_lock(&inode->vfs_inode, 0);
2894 for (nr = 0; nr < cluster->nr; nr++) {
2895 start = cluster->boundary[nr] - offset;
2896 if (nr + 1 < cluster->nr)
2897 end = cluster->boundary[nr + 1] - 1 - offset;
2898 else
2899 end = cluster->end - offset;
2900
2901 lock_extent(&inode->io_tree, start, end, NULL);
2902 num_bytes = end + 1 - start;
2903 ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
2904 num_bytes, num_bytes,
2905 end + 1, &alloc_hint);
2906 cur_offset = end + 1;
2907 unlock_extent(&inode->io_tree, start, end, NULL);
2908 if (ret)
2909 break;
2910 }
2911 btrfs_inode_unlock(&inode->vfs_inode, 0);
2912
2913 if (cur_offset < prealloc_end)
2914 btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
2915 prealloc_end + 1 - cur_offset);
2916 return ret;
2917 }
2918
setup_relocation_extent_mapping(struct inode * inode,u64 start,u64 end,u64 block_start)2919 static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inode,
2920 u64 start, u64 end, u64 block_start)
2921 {
2922 struct extent_map *em;
2923 int ret = 0;
2924
2925 em = alloc_extent_map();
2926 if (!em)
2927 return -ENOMEM;
2928
2929 em->start = start;
2930 em->len = end + 1 - start;
2931 em->block_len = em->len;
2932 em->block_start = block_start;
2933 set_bit(EXTENT_FLAG_PINNED, &em->flags);
2934
2935 lock_extent(&BTRFS_I(inode)->io_tree, start, end, NULL);
2936 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, false);
2937 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, NULL);
2938 free_extent_map(em);
2939
2940 return ret;
2941 }
2942
2943 /*
2944 * Allow error injection to test balance/relocation cancellation
2945 */
btrfs_should_cancel_balance(struct btrfs_fs_info * fs_info)2946 noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
2947 {
2948 return atomic_read(&fs_info->balance_cancel_req) ||
2949 atomic_read(&fs_info->reloc_cancel_req) ||
2950 fatal_signal_pending(current);
2951 }
2952 ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
2953
get_cluster_boundary_end(struct file_extent_cluster * cluster,int cluster_nr)2954 static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster,
2955 int cluster_nr)
2956 {
2957 /* Last extent, use cluster end directly */
2958 if (cluster_nr >= cluster->nr - 1)
2959 return cluster->end;
2960
2961 /* Use next boundary start*/
2962 return cluster->boundary[cluster_nr + 1] - 1;
2963 }
2964
relocate_one_page(struct inode * inode,struct file_ra_state * ra,struct file_extent_cluster * cluster,int * cluster_nr,unsigned long page_index)2965 static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
2966 struct file_extent_cluster *cluster,
2967 int *cluster_nr, unsigned long page_index)
2968 {
2969 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2970 u64 offset = BTRFS_I(inode)->index_cnt;
2971 const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
2972 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
2973 struct page *page;
2974 u64 page_start;
2975 u64 page_end;
2976 u64 cur;
2977 int ret;
2978
2979 ASSERT(page_index <= last_index);
2980 page = find_lock_page(inode->i_mapping, page_index);
2981 if (!page) {
2982 page_cache_sync_readahead(inode->i_mapping, ra, NULL,
2983 page_index, last_index + 1 - page_index);
2984 page = find_or_create_page(inode->i_mapping, page_index, mask);
2985 if (!page)
2986 return -ENOMEM;
2987 }
2988
2989 if (PageReadahead(page))
2990 page_cache_async_readahead(inode->i_mapping, ra, NULL,
2991 page_folio(page), page_index,
2992 last_index + 1 - page_index);
2993
2994 if (!PageUptodate(page)) {
2995 btrfs_read_folio(NULL, page_folio(page));
2996 lock_page(page);
2997 if (!PageUptodate(page)) {
2998 ret = -EIO;
2999 goto release_page;
3000 }
3001 }
3002
3003 /*
3004 * We could have lost page private when we dropped the lock to read the
3005 * page above, make sure we set_page_extent_mapped here so we have any
3006 * of the subpage blocksize stuff we need in place.
3007 */
3008 ret = set_page_extent_mapped(page);
3009 if (ret < 0)
3010 goto release_page;
3011
3012 page_start = page_offset(page);
3013 page_end = page_start + PAGE_SIZE - 1;
3014
3015 /*
3016 * Start from the cluster, as for subpage case, the cluster can start
3017 * inside the page.
3018 */
3019 cur = max(page_start, cluster->boundary[*cluster_nr] - offset);
3020 while (cur <= page_end) {
3021 u64 extent_start = cluster->boundary[*cluster_nr] - offset;
3022 u64 extent_end = get_cluster_boundary_end(cluster,
3023 *cluster_nr) - offset;
3024 u64 clamped_start = max(page_start, extent_start);
3025 u64 clamped_end = min(page_end, extent_end);
3026 u32 clamped_len = clamped_end + 1 - clamped_start;
3027
3028 /* Reserve metadata for this range */
3029 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
3030 clamped_len, clamped_len,
3031 false);
3032 if (ret)
3033 goto release_page;
3034
3035 /* Mark the range delalloc and dirty for later writeback */
3036 lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, NULL);
3037 ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
3038 clamped_end, 0, NULL);
3039 if (ret) {
3040 clear_extent_bits(&BTRFS_I(inode)->io_tree,
3041 clamped_start, clamped_end,
3042 EXTENT_LOCKED | EXTENT_BOUNDARY);
3043 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3044 clamped_len, true);
3045 btrfs_delalloc_release_extents(BTRFS_I(inode),
3046 clamped_len);
3047 goto release_page;
3048 }
3049 btrfs_page_set_dirty(fs_info, page, clamped_start, clamped_len);
3050
3051 /*
3052 * Set the boundary if it's inside the page.
3053 * Data relocation requires the destination extents to have the
3054 * same size as the source.
3055 * EXTENT_BOUNDARY bit prevents current extent from being merged
3056 * with previous extent.
3057 */
3058 if (in_range(cluster->boundary[*cluster_nr] - offset,
3059 page_start, PAGE_SIZE)) {
3060 u64 boundary_start = cluster->boundary[*cluster_nr] -
3061 offset;
3062 u64 boundary_end = boundary_start +
3063 fs_info->sectorsize - 1;
3064
3065 set_extent_bits(&BTRFS_I(inode)->io_tree,
3066 boundary_start, boundary_end,
3067 EXTENT_BOUNDARY);
3068 }
3069 unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, NULL);
3070 btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
3071 cur += clamped_len;
3072
3073 /* Crossed extent end, go to next extent */
3074 if (cur >= extent_end) {
3075 (*cluster_nr)++;
3076 /* Just finished the last extent of the cluster, exit. */
3077 if (*cluster_nr >= cluster->nr)
3078 break;
3079 }
3080 }
3081 unlock_page(page);
3082 put_page(page);
3083
3084 balance_dirty_pages_ratelimited(inode->i_mapping);
3085 btrfs_throttle(fs_info);
3086 if (btrfs_should_cancel_balance(fs_info))
3087 ret = -ECANCELED;
3088 return ret;
3089
3090 release_page:
3091 unlock_page(page);
3092 put_page(page);
3093 return ret;
3094 }
3095
relocate_file_extent_cluster(struct inode * inode,struct file_extent_cluster * cluster)3096 static int relocate_file_extent_cluster(struct inode *inode,
3097 struct file_extent_cluster *cluster)
3098 {
3099 u64 offset = BTRFS_I(inode)->index_cnt;
3100 unsigned long index;
3101 unsigned long last_index;
3102 struct file_ra_state *ra;
3103 int cluster_nr = 0;
3104 int ret = 0;
3105
3106 if (!cluster->nr)
3107 return 0;
3108
3109 ra = kzalloc(sizeof(*ra), GFP_NOFS);
3110 if (!ra)
3111 return -ENOMEM;
3112
3113 ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster);
3114 if (ret)
3115 goto out;
3116
3117 file_ra_state_init(ra, inode->i_mapping);
3118
3119 ret = setup_relocation_extent_mapping(inode, cluster->start - offset,
3120 cluster->end - offset, cluster->start);
3121 if (ret)
3122 goto out;
3123
3124 last_index = (cluster->end - offset) >> PAGE_SHIFT;
3125 for (index = (cluster->start - offset) >> PAGE_SHIFT;
3126 index <= last_index && !ret; index++)
3127 ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index);
3128 if (ret == 0)
3129 WARN_ON(cluster_nr != cluster->nr);
3130 out:
3131 kfree(ra);
3132 return ret;
3133 }
3134
3135 static noinline_for_stack
relocate_data_extent(struct inode * inode,struct btrfs_key * extent_key,struct file_extent_cluster * cluster)3136 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
3137 struct file_extent_cluster *cluster)
3138 {
3139 int ret;
3140
3141 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3142 ret = relocate_file_extent_cluster(inode, cluster);
3143 if (ret)
3144 return ret;
3145 cluster->nr = 0;
3146 }
3147
3148 if (!cluster->nr)
3149 cluster->start = extent_key->objectid;
3150 else
3151 BUG_ON(cluster->nr >= MAX_EXTENTS);
3152 cluster->end = extent_key->objectid + extent_key->offset - 1;
3153 cluster->boundary[cluster->nr] = extent_key->objectid;
3154 cluster->nr++;
3155
3156 if (cluster->nr >= MAX_EXTENTS) {
3157 ret = relocate_file_extent_cluster(inode, cluster);
3158 if (ret)
3159 return ret;
3160 cluster->nr = 0;
3161 }
3162 return 0;
3163 }
3164
3165 /*
3166 * helper to add a tree block to the list.
3167 * the major work is getting the generation and level of the block
3168 */
add_tree_block(struct reloc_control * rc,struct btrfs_key * extent_key,struct btrfs_path * path,struct rb_root * blocks)3169 static int add_tree_block(struct reloc_control *rc,
3170 struct btrfs_key *extent_key,
3171 struct btrfs_path *path,
3172 struct rb_root *blocks)
3173 {
3174 struct extent_buffer *eb;
3175 struct btrfs_extent_item *ei;
3176 struct btrfs_tree_block_info *bi;
3177 struct tree_block *block;
3178 struct rb_node *rb_node;
3179 u32 item_size;
3180 int level = -1;
3181 u64 generation;
3182 u64 owner = 0;
3183
3184 eb = path->nodes[0];
3185 item_size = btrfs_item_size(eb, path->slots[0]);
3186
3187 if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3188 item_size >= sizeof(*ei) + sizeof(*bi)) {
3189 unsigned long ptr = 0, end;
3190
3191 ei = btrfs_item_ptr(eb, path->slots[0],
3192 struct btrfs_extent_item);
3193 end = (unsigned long)ei + item_size;
3194 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3195 bi = (struct btrfs_tree_block_info *)(ei + 1);
3196 level = btrfs_tree_block_level(eb, bi);
3197 ptr = (unsigned long)(bi + 1);
3198 } else {
3199 level = (int)extent_key->offset;
3200 ptr = (unsigned long)(ei + 1);
3201 }
3202 generation = btrfs_extent_generation(eb, ei);
3203
3204 /*
3205 * We're reading random blocks without knowing their owner ahead
3206 * of time. This is ok most of the time, as all reloc roots and
3207 * fs roots have the same lock type. However normal trees do
3208 * not, and the only way to know ahead of time is to read the
3209 * inline ref offset. We know it's an fs root if
3210 *
3211 * 1. There's more than one ref.
3212 * 2. There's a SHARED_DATA_REF_KEY set.
3213 * 3. FULL_BACKREF is set on the flags.
3214 *
3215 * Otherwise it's safe to assume that the ref offset == the
3216 * owner of this block, so we can use that when calling
3217 * read_tree_block.
3218 */
3219 if (btrfs_extent_refs(eb, ei) == 1 &&
3220 !(btrfs_extent_flags(eb, ei) &
3221 BTRFS_BLOCK_FLAG_FULL_BACKREF) &&
3222 ptr < end) {
3223 struct btrfs_extent_inline_ref *iref;
3224 int type;
3225
3226 iref = (struct btrfs_extent_inline_ref *)ptr;
3227 type = btrfs_get_extent_inline_ref_type(eb, iref,
3228 BTRFS_REF_TYPE_BLOCK);
3229 if (type == BTRFS_REF_TYPE_INVALID)
3230 return -EINVAL;
3231 if (type == BTRFS_TREE_BLOCK_REF_KEY)
3232 owner = btrfs_extent_inline_ref_offset(eb, iref);
3233 }
3234 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
3235 btrfs_print_v0_err(eb->fs_info);
3236 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
3237 return -EINVAL;
3238 } else {
3239 BUG();
3240 }
3241
3242 btrfs_release_path(path);
3243
3244 BUG_ON(level == -1);
3245
3246 block = kmalloc(sizeof(*block), GFP_NOFS);
3247 if (!block)
3248 return -ENOMEM;
3249
3250 block->bytenr = extent_key->objectid;
3251 block->key.objectid = rc->extent_root->fs_info->nodesize;
3252 block->key.offset = generation;
3253 block->level = level;
3254 block->key_ready = 0;
3255 block->owner = owner;
3256
3257 rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
3258 if (rb_node)
3259 btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
3260 -EEXIST);
3261
3262 return 0;
3263 }
3264
3265 /*
3266 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3267 */
__add_tree_block(struct reloc_control * rc,u64 bytenr,u32 blocksize,struct rb_root * blocks)3268 static int __add_tree_block(struct reloc_control *rc,
3269 u64 bytenr, u32 blocksize,
3270 struct rb_root *blocks)
3271 {
3272 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3273 struct btrfs_path *path;
3274 struct btrfs_key key;
3275 int ret;
3276 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
3277
3278 if (tree_block_processed(bytenr, rc))
3279 return 0;
3280
3281 if (rb_simple_search(blocks, bytenr))
3282 return 0;
3283
3284 path = btrfs_alloc_path();
3285 if (!path)
3286 return -ENOMEM;
3287 again:
3288 key.objectid = bytenr;
3289 if (skinny) {
3290 key.type = BTRFS_METADATA_ITEM_KEY;
3291 key.offset = (u64)-1;
3292 } else {
3293 key.type = BTRFS_EXTENT_ITEM_KEY;
3294 key.offset = blocksize;
3295 }
3296
3297 path->search_commit_root = 1;
3298 path->skip_locking = 1;
3299 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3300 if (ret < 0)
3301 goto out;
3302
3303 if (ret > 0 && skinny) {
3304 if (path->slots[0]) {
3305 path->slots[0]--;
3306 btrfs_item_key_to_cpu(path->nodes[0], &key,
3307 path->slots[0]);
3308 if (key.objectid == bytenr &&
3309 (key.type == BTRFS_METADATA_ITEM_KEY ||
3310 (key.type == BTRFS_EXTENT_ITEM_KEY &&
3311 key.offset == blocksize)))
3312 ret = 0;
3313 }
3314
3315 if (ret) {
3316 skinny = false;
3317 btrfs_release_path(path);
3318 goto again;
3319 }
3320 }
3321 if (ret) {
3322 ASSERT(ret == 1);
3323 btrfs_print_leaf(path->nodes[0]);
3324 btrfs_err(fs_info,
3325 "tree block extent item (%llu) is not found in extent tree",
3326 bytenr);
3327 WARN_ON(1);
3328 ret = -EINVAL;
3329 goto out;
3330 }
3331
3332 ret = add_tree_block(rc, &key, path, blocks);
3333 out:
3334 btrfs_free_path(path);
3335 return ret;
3336 }
3337
delete_block_group_cache(struct btrfs_fs_info * fs_info,struct btrfs_block_group * block_group,struct inode * inode,u64 ino)3338 static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3339 struct btrfs_block_group *block_group,
3340 struct inode *inode,
3341 u64 ino)
3342 {
3343 struct btrfs_root *root = fs_info->tree_root;
3344 struct btrfs_trans_handle *trans;
3345 int ret = 0;
3346
3347 if (inode)
3348 goto truncate;
3349
3350 inode = btrfs_iget(fs_info->sb, ino, root);
3351 if (IS_ERR(inode))
3352 return -ENOENT;
3353
3354 truncate:
3355 ret = btrfs_check_trunc_cache_free_space(fs_info,
3356 &fs_info->global_block_rsv);
3357 if (ret)
3358 goto out;
3359
3360 trans = btrfs_join_transaction(root);
3361 if (IS_ERR(trans)) {
3362 ret = PTR_ERR(trans);
3363 goto out;
3364 }
3365
3366 ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
3367
3368 btrfs_end_transaction(trans);
3369 btrfs_btree_balance_dirty(fs_info);
3370 out:
3371 iput(inode);
3372 return ret;
3373 }
3374
3375 /*
3376 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
3377 * cache inode, to avoid free space cache data extent blocking data relocation.
3378 */
delete_v1_space_cache(struct extent_buffer * leaf,struct btrfs_block_group * block_group,u64 data_bytenr)3379 static int delete_v1_space_cache(struct extent_buffer *leaf,
3380 struct btrfs_block_group *block_group,
3381 u64 data_bytenr)
3382 {
3383 u64 space_cache_ino;
3384 struct btrfs_file_extent_item *ei;
3385 struct btrfs_key key;
3386 bool found = false;
3387 int i;
3388 int ret;
3389
3390 if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID)
3391 return 0;
3392
3393 for (i = 0; i < btrfs_header_nritems(leaf); i++) {
3394 u8 type;
3395
3396 btrfs_item_key_to_cpu(leaf, &key, i);
3397 if (key.type != BTRFS_EXTENT_DATA_KEY)
3398 continue;
3399 ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3400 type = btrfs_file_extent_type(leaf, ei);
3401
3402 if ((type == BTRFS_FILE_EXTENT_REG ||
3403 type == BTRFS_FILE_EXTENT_PREALLOC) &&
3404 btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
3405 found = true;
3406 space_cache_ino = key.objectid;
3407 break;
3408 }
3409 }
3410 if (!found)
3411 return -ENOENT;
3412 ret = delete_block_group_cache(leaf->fs_info, block_group, NULL,
3413 space_cache_ino);
3414 return ret;
3415 }
3416
3417 /*
3418 * helper to find all tree blocks that reference a given data extent
3419 */
3420 static noinline_for_stack
add_data_references(struct reloc_control * rc,struct btrfs_key * extent_key,struct btrfs_path * path,struct rb_root * blocks)3421 int add_data_references(struct reloc_control *rc,
3422 struct btrfs_key *extent_key,
3423 struct btrfs_path *path,
3424 struct rb_root *blocks)
3425 {
3426 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3427 struct ulist *leaves = NULL;
3428 struct ulist_iterator leaf_uiter;
3429 struct ulist_node *ref_node = NULL;
3430 const u32 blocksize = fs_info->nodesize;
3431 int ret = 0;
3432
3433 btrfs_release_path(path);
3434 ret = btrfs_find_all_leafs(NULL, fs_info, extent_key->objectid,
3435 0, &leaves, NULL, true);
3436 if (ret < 0)
3437 return ret;
3438
3439 ULIST_ITER_INIT(&leaf_uiter);
3440 while ((ref_node = ulist_next(leaves, &leaf_uiter))) {
3441 struct extent_buffer *eb;
3442
3443 eb = read_tree_block(fs_info, ref_node->val, 0, 0, 0, NULL);
3444 if (IS_ERR(eb)) {
3445 ret = PTR_ERR(eb);
3446 break;
3447 }
3448 ret = delete_v1_space_cache(eb, rc->block_group,
3449 extent_key->objectid);
3450 free_extent_buffer(eb);
3451 if (ret < 0)
3452 break;
3453 ret = __add_tree_block(rc, ref_node->val, blocksize, blocks);
3454 if (ret < 0)
3455 break;
3456 }
3457 if (ret < 0)
3458 free_block_list(blocks);
3459 ulist_free(leaves);
3460 return ret;
3461 }
3462
3463 /*
3464 * helper to find next unprocessed extent
3465 */
3466 static noinline_for_stack
find_next_extent(struct reloc_control * rc,struct btrfs_path * path,struct btrfs_key * extent_key)3467 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
3468 struct btrfs_key *extent_key)
3469 {
3470 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3471 struct btrfs_key key;
3472 struct extent_buffer *leaf;
3473 u64 start, end, last;
3474 int ret;
3475
3476 last = rc->block_group->start + rc->block_group->length;
3477 while (1) {
3478 cond_resched();
3479 if (rc->search_start >= last) {
3480 ret = 1;
3481 break;
3482 }
3483
3484 key.objectid = rc->search_start;
3485 key.type = BTRFS_EXTENT_ITEM_KEY;
3486 key.offset = 0;
3487
3488 path->search_commit_root = 1;
3489 path->skip_locking = 1;
3490 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3491 0, 0);
3492 if (ret < 0)
3493 break;
3494 next:
3495 leaf = path->nodes[0];
3496 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3497 ret = btrfs_next_leaf(rc->extent_root, path);
3498 if (ret != 0)
3499 break;
3500 leaf = path->nodes[0];
3501 }
3502
3503 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3504 if (key.objectid >= last) {
3505 ret = 1;
3506 break;
3507 }
3508
3509 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3510 key.type != BTRFS_METADATA_ITEM_KEY) {
3511 path->slots[0]++;
3512 goto next;
3513 }
3514
3515 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3516 key.objectid + key.offset <= rc->search_start) {
3517 path->slots[0]++;
3518 goto next;
3519 }
3520
3521 if (key.type == BTRFS_METADATA_ITEM_KEY &&
3522 key.objectid + fs_info->nodesize <=
3523 rc->search_start) {
3524 path->slots[0]++;
3525 goto next;
3526 }
3527
3528 ret = find_first_extent_bit(&rc->processed_blocks,
3529 key.objectid, &start, &end,
3530 EXTENT_DIRTY, NULL);
3531
3532 if (ret == 0 && start <= key.objectid) {
3533 btrfs_release_path(path);
3534 rc->search_start = end + 1;
3535 } else {
3536 if (key.type == BTRFS_EXTENT_ITEM_KEY)
3537 rc->search_start = key.objectid + key.offset;
3538 else
3539 rc->search_start = key.objectid +
3540 fs_info->nodesize;
3541 memcpy(extent_key, &key, sizeof(key));
3542 return 0;
3543 }
3544 }
3545 btrfs_release_path(path);
3546 return ret;
3547 }
3548
set_reloc_control(struct reloc_control * rc)3549 static void set_reloc_control(struct reloc_control *rc)
3550 {
3551 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3552
3553 mutex_lock(&fs_info->reloc_mutex);
3554 fs_info->reloc_ctl = rc;
3555 mutex_unlock(&fs_info->reloc_mutex);
3556 }
3557
unset_reloc_control(struct reloc_control * rc)3558 static void unset_reloc_control(struct reloc_control *rc)
3559 {
3560 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3561
3562 mutex_lock(&fs_info->reloc_mutex);
3563 fs_info->reloc_ctl = NULL;
3564 mutex_unlock(&fs_info->reloc_mutex);
3565 }
3566
3567 static noinline_for_stack
prepare_to_relocate(struct reloc_control * rc)3568 int prepare_to_relocate(struct reloc_control *rc)
3569 {
3570 struct btrfs_trans_handle *trans;
3571 int ret;
3572
3573 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
3574 BTRFS_BLOCK_RSV_TEMP);
3575 if (!rc->block_rsv)
3576 return -ENOMEM;
3577
3578 memset(&rc->cluster, 0, sizeof(rc->cluster));
3579 rc->search_start = rc->block_group->start;
3580 rc->extents_found = 0;
3581 rc->nodes_relocated = 0;
3582 rc->merging_rsv_size = 0;
3583 rc->reserved_bytes = 0;
3584 rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
3585 RELOCATION_RESERVED_NODES;
3586 ret = btrfs_block_rsv_refill(rc->extent_root->fs_info,
3587 rc->block_rsv, rc->block_rsv->size,
3588 BTRFS_RESERVE_FLUSH_ALL);
3589 if (ret)
3590 return ret;
3591
3592 rc->create_reloc_tree = 1;
3593 set_reloc_control(rc);
3594
3595 trans = btrfs_join_transaction(rc->extent_root);
3596 if (IS_ERR(trans)) {
3597 unset_reloc_control(rc);
3598 /*
3599 * extent tree is not a ref_cow tree and has no reloc_root to
3600 * cleanup. And callers are responsible to free the above
3601 * block rsv.
3602 */
3603 return PTR_ERR(trans);
3604 }
3605
3606 ret = btrfs_commit_transaction(trans);
3607 if (ret)
3608 unset_reloc_control(rc);
3609
3610 return ret;
3611 }
3612
relocate_block_group(struct reloc_control * rc)3613 static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3614 {
3615 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3616 struct rb_root blocks = RB_ROOT;
3617 struct btrfs_key key;
3618 struct btrfs_trans_handle *trans = NULL;
3619 struct btrfs_path *path;
3620 struct btrfs_extent_item *ei;
3621 u64 flags;
3622 int ret;
3623 int err = 0;
3624 int progress = 0;
3625
3626 path = btrfs_alloc_path();
3627 if (!path)
3628 return -ENOMEM;
3629 path->reada = READA_FORWARD;
3630
3631 ret = prepare_to_relocate(rc);
3632 if (ret) {
3633 err = ret;
3634 goto out_free;
3635 }
3636
3637 while (1) {
3638 rc->reserved_bytes = 0;
3639 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
3640 rc->block_rsv->size,
3641 BTRFS_RESERVE_FLUSH_ALL);
3642 if (ret) {
3643 err = ret;
3644 break;
3645 }
3646 progress++;
3647 trans = btrfs_start_transaction(rc->extent_root, 0);
3648 if (IS_ERR(trans)) {
3649 err = PTR_ERR(trans);
3650 trans = NULL;
3651 break;
3652 }
3653 restart:
3654 if (update_backref_cache(trans, &rc->backref_cache)) {
3655 btrfs_end_transaction(trans);
3656 trans = NULL;
3657 continue;
3658 }
3659
3660 ret = find_next_extent(rc, path, &key);
3661 if (ret < 0)
3662 err = ret;
3663 if (ret != 0)
3664 break;
3665
3666 rc->extents_found++;
3667
3668 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3669 struct btrfs_extent_item);
3670 flags = btrfs_extent_flags(path->nodes[0], ei);
3671
3672 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3673 ret = add_tree_block(rc, &key, path, &blocks);
3674 } else if (rc->stage == UPDATE_DATA_PTRS &&
3675 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3676 ret = add_data_references(rc, &key, path, &blocks);
3677 } else {
3678 btrfs_release_path(path);
3679 ret = 0;
3680 }
3681 if (ret < 0) {
3682 err = ret;
3683 break;
3684 }
3685
3686 if (!RB_EMPTY_ROOT(&blocks)) {
3687 ret = relocate_tree_blocks(trans, rc, &blocks);
3688 if (ret < 0) {
3689 if (ret != -EAGAIN) {
3690 err = ret;
3691 break;
3692 }
3693 rc->extents_found--;
3694 rc->search_start = key.objectid;
3695 }
3696 }
3697
3698 btrfs_end_transaction_throttle(trans);
3699 btrfs_btree_balance_dirty(fs_info);
3700 trans = NULL;
3701
3702 if (rc->stage == MOVE_DATA_EXTENTS &&
3703 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3704 rc->found_file_extent = 1;
3705 ret = relocate_data_extent(rc->data_inode,
3706 &key, &rc->cluster);
3707 if (ret < 0) {
3708 err = ret;
3709 break;
3710 }
3711 }
3712 if (btrfs_should_cancel_balance(fs_info)) {
3713 err = -ECANCELED;
3714 break;
3715 }
3716 }
3717 if (trans && progress && err == -ENOSPC) {
3718 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
3719 if (ret == 1) {
3720 err = 0;
3721 progress = 0;
3722 goto restart;
3723 }
3724 }
3725
3726 btrfs_release_path(path);
3727 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
3728
3729 if (trans) {
3730 btrfs_end_transaction_throttle(trans);
3731 btrfs_btree_balance_dirty(fs_info);
3732 }
3733
3734 if (!err) {
3735 ret = relocate_file_extent_cluster(rc->data_inode,
3736 &rc->cluster);
3737 if (ret < 0)
3738 err = ret;
3739 }
3740
3741 rc->create_reloc_tree = 0;
3742 set_reloc_control(rc);
3743
3744 btrfs_backref_release_cache(&rc->backref_cache);
3745 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3746
3747 /*
3748 * Even in the case when the relocation is cancelled, we should all go
3749 * through prepare_to_merge() and merge_reloc_roots().
3750 *
3751 * For error (including cancelled balance), prepare_to_merge() will
3752 * mark all reloc trees orphan, then queue them for cleanup in
3753 * merge_reloc_roots()
3754 */
3755 err = prepare_to_merge(rc, err);
3756
3757 merge_reloc_roots(rc);
3758
3759 rc->merge_reloc_tree = 0;
3760 unset_reloc_control(rc);
3761 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3762
3763 /* get rid of pinned extents */
3764 trans = btrfs_join_transaction(rc->extent_root);
3765 if (IS_ERR(trans)) {
3766 err = PTR_ERR(trans);
3767 goto out_free;
3768 }
3769 ret = btrfs_commit_transaction(trans);
3770 if (ret && !err)
3771 err = ret;
3772 out_free:
3773 ret = clean_dirty_subvols(rc);
3774 if (ret < 0 && !err)
3775 err = ret;
3776 btrfs_free_block_rsv(fs_info, rc->block_rsv);
3777 btrfs_free_path(path);
3778 return err;
3779 }
3780
__insert_orphan_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 objectid)3781 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
3782 struct btrfs_root *root, u64 objectid)
3783 {
3784 struct btrfs_path *path;
3785 struct btrfs_inode_item *item;
3786 struct extent_buffer *leaf;
3787 int ret;
3788
3789 path = btrfs_alloc_path();
3790 if (!path)
3791 return -ENOMEM;
3792
3793 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
3794 if (ret)
3795 goto out;
3796
3797 leaf = path->nodes[0];
3798 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
3799 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3800 btrfs_set_inode_generation(leaf, item, 1);
3801 btrfs_set_inode_size(leaf, item, 0);
3802 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
3803 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
3804 BTRFS_INODE_PREALLOC);
3805 btrfs_mark_buffer_dirty(leaf);
3806 out:
3807 btrfs_free_path(path);
3808 return ret;
3809 }
3810
delete_orphan_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 objectid)3811 static void delete_orphan_inode(struct btrfs_trans_handle *trans,
3812 struct btrfs_root *root, u64 objectid)
3813 {
3814 struct btrfs_path *path;
3815 struct btrfs_key key;
3816 int ret = 0;
3817
3818 path = btrfs_alloc_path();
3819 if (!path) {
3820 ret = -ENOMEM;
3821 goto out;
3822 }
3823
3824 key.objectid = objectid;
3825 key.type = BTRFS_INODE_ITEM_KEY;
3826 key.offset = 0;
3827 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3828 if (ret) {
3829 if (ret > 0)
3830 ret = -ENOENT;
3831 goto out;
3832 }
3833 ret = btrfs_del_item(trans, root, path);
3834 out:
3835 if (ret)
3836 btrfs_abort_transaction(trans, ret);
3837 btrfs_free_path(path);
3838 }
3839
3840 /*
3841 * helper to create inode for data relocation.
3842 * the inode is in data relocation tree and its link count is 0
3843 */
3844 static noinline_for_stack
create_reloc_inode(struct btrfs_fs_info * fs_info,struct btrfs_block_group * group)3845 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
3846 struct btrfs_block_group *group)
3847 {
3848 struct inode *inode = NULL;
3849 struct btrfs_trans_handle *trans;
3850 struct btrfs_root *root;
3851 u64 objectid;
3852 int err = 0;
3853
3854 root = btrfs_grab_root(fs_info->data_reloc_root);
3855 trans = btrfs_start_transaction(root, 6);
3856 if (IS_ERR(trans)) {
3857 btrfs_put_root(root);
3858 return ERR_CAST(trans);
3859 }
3860
3861 err = btrfs_get_free_objectid(root, &objectid);
3862 if (err)
3863 goto out;
3864
3865 err = __insert_orphan_inode(trans, root, objectid);
3866 if (err)
3867 goto out;
3868
3869 inode = btrfs_iget(fs_info->sb, objectid, root);
3870 if (IS_ERR(inode)) {
3871 delete_orphan_inode(trans, root, objectid);
3872 err = PTR_ERR(inode);
3873 inode = NULL;
3874 goto out;
3875 }
3876 BTRFS_I(inode)->index_cnt = group->start;
3877
3878 err = btrfs_orphan_add(trans, BTRFS_I(inode));
3879 out:
3880 btrfs_put_root(root);
3881 btrfs_end_transaction(trans);
3882 btrfs_btree_balance_dirty(fs_info);
3883 if (err) {
3884 iput(inode);
3885 inode = ERR_PTR(err);
3886 }
3887 return inode;
3888 }
3889
3890 /*
3891 * Mark start of chunk relocation that is cancellable. Check if the cancellation
3892 * has been requested meanwhile and don't start in that case.
3893 *
3894 * Return:
3895 * 0 success
3896 * -EINPROGRESS operation is already in progress, that's probably a bug
3897 * -ECANCELED cancellation request was set before the operation started
3898 */
reloc_chunk_start(struct btrfs_fs_info * fs_info)3899 static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
3900 {
3901 if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
3902 /* This should not happen */
3903 btrfs_err(fs_info, "reloc already running, cannot start");
3904 return -EINPROGRESS;
3905 }
3906
3907 if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
3908 btrfs_info(fs_info, "chunk relocation canceled on start");
3909 /*
3910 * On cancel, clear all requests but let the caller mark
3911 * the end after cleanup operations.
3912 */
3913 atomic_set(&fs_info->reloc_cancel_req, 0);
3914 return -ECANCELED;
3915 }
3916 return 0;
3917 }
3918
3919 /*
3920 * Mark end of chunk relocation that is cancellable and wake any waiters.
3921 */
reloc_chunk_end(struct btrfs_fs_info * fs_info)3922 static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
3923 {
3924 /* Requested after start, clear bit first so any waiters can continue */
3925 if (atomic_read(&fs_info->reloc_cancel_req) > 0)
3926 btrfs_info(fs_info, "chunk relocation canceled during operation");
3927 clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
3928 atomic_set(&fs_info->reloc_cancel_req, 0);
3929 }
3930
alloc_reloc_control(struct btrfs_fs_info * fs_info)3931 static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
3932 {
3933 struct reloc_control *rc;
3934
3935 rc = kzalloc(sizeof(*rc), GFP_NOFS);
3936 if (!rc)
3937 return NULL;
3938
3939 INIT_LIST_HEAD(&rc->reloc_roots);
3940 INIT_LIST_HEAD(&rc->dirty_subvol_roots);
3941 btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1);
3942 mapping_tree_init(&rc->reloc_root_tree);
3943 extent_io_tree_init(fs_info, &rc->processed_blocks,
3944 IO_TREE_RELOC_BLOCKS, NULL);
3945 return rc;
3946 }
3947
free_reloc_control(struct reloc_control * rc)3948 static void free_reloc_control(struct reloc_control *rc)
3949 {
3950 struct mapping_node *node, *tmp;
3951
3952 free_reloc_roots(&rc->reloc_roots);
3953 rbtree_postorder_for_each_entry_safe(node, tmp,
3954 &rc->reloc_root_tree.rb_root, rb_node)
3955 kfree(node);
3956
3957 kfree(rc);
3958 }
3959
3960 /*
3961 * Print the block group being relocated
3962 */
describe_relocation(struct btrfs_fs_info * fs_info,struct btrfs_block_group * block_group)3963 static void describe_relocation(struct btrfs_fs_info *fs_info,
3964 struct btrfs_block_group *block_group)
3965 {
3966 char buf[128] = {'\0'};
3967
3968 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
3969
3970 btrfs_info(fs_info,
3971 "relocating block group %llu flags %s",
3972 block_group->start, buf);
3973 }
3974
stage_to_string(int stage)3975 static const char *stage_to_string(int stage)
3976 {
3977 if (stage == MOVE_DATA_EXTENTS)
3978 return "move data extents";
3979 if (stage == UPDATE_DATA_PTRS)
3980 return "update data pointers";
3981 return "unknown";
3982 }
3983
3984 /*
3985 * function to relocate all extents in a block group.
3986 */
btrfs_relocate_block_group(struct btrfs_fs_info * fs_info,u64 group_start)3987 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
3988 {
3989 struct btrfs_block_group *bg;
3990 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start);
3991 struct reloc_control *rc;
3992 struct inode *inode;
3993 struct btrfs_path *path;
3994 int ret;
3995 int rw = 0;
3996 int err = 0;
3997
3998 /*
3999 * This only gets set if we had a half-deleted snapshot on mount. We
4000 * cannot allow relocation to start while we're still trying to clean up
4001 * these pending deletions.
4002 */
4003 ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE);
4004 if (ret)
4005 return ret;
4006
4007 /* We may have been woken up by close_ctree, so bail if we're closing. */
4008 if (btrfs_fs_closing(fs_info))
4009 return -EINTR;
4010
4011 bg = btrfs_lookup_block_group(fs_info, group_start);
4012 if (!bg)
4013 return -ENOENT;
4014
4015 /*
4016 * Relocation of a data block group creates ordered extents. Without
4017 * sb_start_write(), we can freeze the filesystem while unfinished
4018 * ordered extents are left. Such ordered extents can cause a deadlock
4019 * e.g. when syncfs() is waiting for their completion but they can't
4020 * finish because they block when joining a transaction, due to the
4021 * fact that the freeze locks are being held in write mode.
4022 */
4023 if (bg->flags & BTRFS_BLOCK_GROUP_DATA)
4024 ASSERT(sb_write_started(fs_info->sb));
4025
4026 if (btrfs_pinned_by_swapfile(fs_info, bg)) {
4027 btrfs_put_block_group(bg);
4028 return -ETXTBSY;
4029 }
4030
4031 rc = alloc_reloc_control(fs_info);
4032 if (!rc) {
4033 btrfs_put_block_group(bg);
4034 return -ENOMEM;
4035 }
4036
4037 ret = reloc_chunk_start(fs_info);
4038 if (ret < 0) {
4039 err = ret;
4040 goto out_put_bg;
4041 }
4042
4043 rc->extent_root = extent_root;
4044 rc->block_group = bg;
4045
4046 ret = btrfs_inc_block_group_ro(rc->block_group, true);
4047 if (ret) {
4048 err = ret;
4049 goto out;
4050 }
4051 rw = 1;
4052
4053 path = btrfs_alloc_path();
4054 if (!path) {
4055 err = -ENOMEM;
4056 goto out;
4057 }
4058
4059 inode = lookup_free_space_inode(rc->block_group, path);
4060 btrfs_free_path(path);
4061
4062 if (!IS_ERR(inode))
4063 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
4064 else
4065 ret = PTR_ERR(inode);
4066
4067 if (ret && ret != -ENOENT) {
4068 err = ret;
4069 goto out;
4070 }
4071
4072 rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
4073 if (IS_ERR(rc->data_inode)) {
4074 err = PTR_ERR(rc->data_inode);
4075 rc->data_inode = NULL;
4076 goto out;
4077 }
4078
4079 describe_relocation(fs_info, rc->block_group);
4080
4081 btrfs_wait_block_group_reservations(rc->block_group);
4082 btrfs_wait_nocow_writers(rc->block_group);
4083 btrfs_wait_ordered_roots(fs_info, U64_MAX,
4084 rc->block_group->start,
4085 rc->block_group->length);
4086
4087 ret = btrfs_zone_finish(rc->block_group);
4088 WARN_ON(ret && ret != -EAGAIN);
4089
4090 while (1) {
4091 int finishes_stage;
4092
4093 mutex_lock(&fs_info->cleaner_mutex);
4094 ret = relocate_block_group(rc);
4095 mutex_unlock(&fs_info->cleaner_mutex);
4096 if (ret < 0)
4097 err = ret;
4098
4099 finishes_stage = rc->stage;
4100 /*
4101 * We may have gotten ENOSPC after we already dirtied some
4102 * extents. If writeout happens while we're relocating a
4103 * different block group we could end up hitting the
4104 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
4105 * btrfs_reloc_cow_block. Make sure we write everything out
4106 * properly so we don't trip over this problem, and then break
4107 * out of the loop if we hit an error.
4108 */
4109 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
4110 ret = btrfs_wait_ordered_range(rc->data_inode, 0,
4111 (u64)-1);
4112 if (ret)
4113 err = ret;
4114 invalidate_mapping_pages(rc->data_inode->i_mapping,
4115 0, -1);
4116 rc->stage = UPDATE_DATA_PTRS;
4117 }
4118
4119 if (err < 0)
4120 goto out;
4121
4122 if (rc->extents_found == 0)
4123 break;
4124
4125 btrfs_info(fs_info, "found %llu extents, stage: %s",
4126 rc->extents_found, stage_to_string(finishes_stage));
4127 }
4128
4129 WARN_ON(rc->block_group->pinned > 0);
4130 WARN_ON(rc->block_group->reserved > 0);
4131 WARN_ON(rc->block_group->used > 0);
4132 out:
4133 if (err && rw)
4134 btrfs_dec_block_group_ro(rc->block_group);
4135 iput(rc->data_inode);
4136 out_put_bg:
4137 btrfs_put_block_group(bg);
4138 reloc_chunk_end(fs_info);
4139 free_reloc_control(rc);
4140 return err;
4141 }
4142
mark_garbage_root(struct btrfs_root * root)4143 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4144 {
4145 struct btrfs_fs_info *fs_info = root->fs_info;
4146 struct btrfs_trans_handle *trans;
4147 int ret, err;
4148
4149 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4150 if (IS_ERR(trans))
4151 return PTR_ERR(trans);
4152
4153 memset(&root->root_item.drop_progress, 0,
4154 sizeof(root->root_item.drop_progress));
4155 btrfs_set_root_drop_level(&root->root_item, 0);
4156 btrfs_set_root_refs(&root->root_item, 0);
4157 ret = btrfs_update_root(trans, fs_info->tree_root,
4158 &root->root_key, &root->root_item);
4159
4160 err = btrfs_end_transaction(trans);
4161 if (err)
4162 return err;
4163 return ret;
4164 }
4165
4166 /*
4167 * recover relocation interrupted by system crash.
4168 *
4169 * this function resumes merging reloc trees with corresponding fs trees.
4170 * this is important for keeping the sharing of tree blocks
4171 */
btrfs_recover_relocation(struct btrfs_fs_info * fs_info)4172 int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
4173 {
4174 LIST_HEAD(reloc_roots);
4175 struct btrfs_key key;
4176 struct btrfs_root *fs_root;
4177 struct btrfs_root *reloc_root;
4178 struct btrfs_path *path;
4179 struct extent_buffer *leaf;
4180 struct reloc_control *rc = NULL;
4181 struct btrfs_trans_handle *trans;
4182 int ret;
4183 int err = 0;
4184
4185 path = btrfs_alloc_path();
4186 if (!path)
4187 return -ENOMEM;
4188 path->reada = READA_BACK;
4189
4190 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4191 key.type = BTRFS_ROOT_ITEM_KEY;
4192 key.offset = (u64)-1;
4193
4194 while (1) {
4195 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
4196 path, 0, 0);
4197 if (ret < 0) {
4198 err = ret;
4199 goto out;
4200 }
4201 if (ret > 0) {
4202 if (path->slots[0] == 0)
4203 break;
4204 path->slots[0]--;
4205 }
4206 leaf = path->nodes[0];
4207 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4208 btrfs_release_path(path);
4209
4210 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4211 key.type != BTRFS_ROOT_ITEM_KEY)
4212 break;
4213
4214 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key);
4215 if (IS_ERR(reloc_root)) {
4216 err = PTR_ERR(reloc_root);
4217 goto out;
4218 }
4219
4220 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
4221 list_add(&reloc_root->root_list, &reloc_roots);
4222
4223 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4224 fs_root = btrfs_get_fs_root(fs_info,
4225 reloc_root->root_key.offset, false);
4226 if (IS_ERR(fs_root)) {
4227 ret = PTR_ERR(fs_root);
4228 if (ret != -ENOENT) {
4229 err = ret;
4230 goto out;
4231 }
4232 ret = mark_garbage_root(reloc_root);
4233 if (ret < 0) {
4234 err = ret;
4235 goto out;
4236 }
4237 } else {
4238 btrfs_put_root(fs_root);
4239 }
4240 }
4241
4242 if (key.offset == 0)
4243 break;
4244
4245 key.offset--;
4246 }
4247 btrfs_release_path(path);
4248
4249 if (list_empty(&reloc_roots))
4250 goto out;
4251
4252 rc = alloc_reloc_control(fs_info);
4253 if (!rc) {
4254 err = -ENOMEM;
4255 goto out;
4256 }
4257
4258 ret = reloc_chunk_start(fs_info);
4259 if (ret < 0) {
4260 err = ret;
4261 goto out_end;
4262 }
4263
4264 rc->extent_root = btrfs_extent_root(fs_info, 0);
4265
4266 set_reloc_control(rc);
4267
4268 trans = btrfs_join_transaction(rc->extent_root);
4269 if (IS_ERR(trans)) {
4270 err = PTR_ERR(trans);
4271 goto out_unset;
4272 }
4273
4274 rc->merge_reloc_tree = 1;
4275
4276 while (!list_empty(&reloc_roots)) {
4277 reloc_root = list_entry(reloc_roots.next,
4278 struct btrfs_root, root_list);
4279 list_del(&reloc_root->root_list);
4280
4281 if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4282 list_add_tail(&reloc_root->root_list,
4283 &rc->reloc_roots);
4284 continue;
4285 }
4286
4287 fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
4288 false);
4289 if (IS_ERR(fs_root)) {
4290 err = PTR_ERR(fs_root);
4291 list_add_tail(&reloc_root->root_list, &reloc_roots);
4292 btrfs_end_transaction(trans);
4293 goto out_unset;
4294 }
4295
4296 err = __add_reloc_root(reloc_root);
4297 ASSERT(err != -EEXIST);
4298 if (err) {
4299 list_add_tail(&reloc_root->root_list, &reloc_roots);
4300 btrfs_put_root(fs_root);
4301 btrfs_end_transaction(trans);
4302 goto out_unset;
4303 }
4304 fs_root->reloc_root = btrfs_grab_root(reloc_root);
4305 btrfs_put_root(fs_root);
4306 }
4307
4308 err = btrfs_commit_transaction(trans);
4309 if (err)
4310 goto out_unset;
4311
4312 merge_reloc_roots(rc);
4313
4314 unset_reloc_control(rc);
4315
4316 trans = btrfs_join_transaction(rc->extent_root);
4317 if (IS_ERR(trans)) {
4318 err = PTR_ERR(trans);
4319 goto out_clean;
4320 }
4321 err = btrfs_commit_transaction(trans);
4322 out_clean:
4323 ret = clean_dirty_subvols(rc);
4324 if (ret < 0 && !err)
4325 err = ret;
4326 out_unset:
4327 unset_reloc_control(rc);
4328 out_end:
4329 reloc_chunk_end(fs_info);
4330 free_reloc_control(rc);
4331 out:
4332 free_reloc_roots(&reloc_roots);
4333
4334 btrfs_free_path(path);
4335
4336 if (err == 0) {
4337 /* cleanup orphan inode in data relocation tree */
4338 fs_root = btrfs_grab_root(fs_info->data_reloc_root);
4339 ASSERT(fs_root);
4340 err = btrfs_orphan_cleanup(fs_root);
4341 btrfs_put_root(fs_root);
4342 }
4343 return err;
4344 }
4345
4346 /*
4347 * helper to add ordered checksum for data relocation.
4348 *
4349 * cloning checksum properly handles the nodatasum extents.
4350 * it also saves CPU time to re-calculate the checksum.
4351 */
btrfs_reloc_clone_csums(struct btrfs_inode * inode,u64 file_pos,u64 len)4352 int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len)
4353 {
4354 struct btrfs_fs_info *fs_info = inode->root->fs_info;
4355 struct btrfs_root *csum_root;
4356 struct btrfs_ordered_sum *sums;
4357 struct btrfs_ordered_extent *ordered;
4358 int ret;
4359 u64 disk_bytenr;
4360 u64 new_bytenr;
4361 LIST_HEAD(list);
4362
4363 ordered = btrfs_lookup_ordered_extent(inode, file_pos);
4364 BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len);
4365
4366 disk_bytenr = file_pos + inode->index_cnt;
4367 csum_root = btrfs_csum_root(fs_info, disk_bytenr);
4368 ret = btrfs_lookup_csums_range(csum_root, disk_bytenr,
4369 disk_bytenr + len - 1, &list, 0, false);
4370 if (ret)
4371 goto out;
4372
4373 while (!list_empty(&list)) {
4374 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
4375 list_del_init(&sums->list);
4376
4377 /*
4378 * We need to offset the new_bytenr based on where the csum is.
4379 * We need to do this because we will read in entire prealloc
4380 * extents but we may have written to say the middle of the
4381 * prealloc extent, so we need to make sure the csum goes with
4382 * the right disk offset.
4383 *
4384 * We can do this because the data reloc inode refers strictly
4385 * to the on disk bytes, so we don't have to worry about
4386 * disk_len vs real len like with real inodes since it's all
4387 * disk length.
4388 */
4389 new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr;
4390 sums->bytenr = new_bytenr;
4391
4392 btrfs_add_ordered_sum(ordered, sums);
4393 }
4394 out:
4395 btrfs_put_ordered_extent(ordered);
4396 return ret;
4397 }
4398
btrfs_reloc_cow_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,struct extent_buffer * cow)4399 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4400 struct btrfs_root *root, struct extent_buffer *buf,
4401 struct extent_buffer *cow)
4402 {
4403 struct btrfs_fs_info *fs_info = root->fs_info;
4404 struct reloc_control *rc;
4405 struct btrfs_backref_node *node;
4406 int first_cow = 0;
4407 int level;
4408 int ret = 0;
4409
4410 rc = fs_info->reloc_ctl;
4411 if (!rc)
4412 return 0;
4413
4414 BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root));
4415
4416 level = btrfs_header_level(buf);
4417 if (btrfs_header_generation(buf) <=
4418 btrfs_root_last_snapshot(&root->root_item))
4419 first_cow = 1;
4420
4421 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
4422 rc->create_reloc_tree) {
4423 WARN_ON(!first_cow && level == 0);
4424
4425 node = rc->backref_cache.path[level];
4426 BUG_ON(node->bytenr != buf->start &&
4427 node->new_bytenr != buf->start);
4428
4429 btrfs_backref_drop_node_buffer(node);
4430 atomic_inc(&cow->refs);
4431 node->eb = cow;
4432 node->new_bytenr = cow->start;
4433
4434 if (!node->pending) {
4435 list_move_tail(&node->list,
4436 &rc->backref_cache.pending[level]);
4437 node->pending = 1;
4438 }
4439
4440 if (first_cow)
4441 mark_block_processed(rc, node);
4442
4443 if (first_cow && level > 0)
4444 rc->nodes_relocated += buf->len;
4445 }
4446
4447 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
4448 ret = replace_file_extents(trans, rc, root, cow);
4449 return ret;
4450 }
4451
4452 /*
4453 * called before creating snapshot. it calculates metadata reservation
4454 * required for relocating tree blocks in the snapshot
4455 */
btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot * pending,u64 * bytes_to_reserve)4456 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4457 u64 *bytes_to_reserve)
4458 {
4459 struct btrfs_root *root = pending->root;
4460 struct reloc_control *rc = root->fs_info->reloc_ctl;
4461
4462 if (!rc || !have_reloc_root(root))
4463 return;
4464
4465 if (!rc->merge_reloc_tree)
4466 return;
4467
4468 root = root->reloc_root;
4469 BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4470 /*
4471 * relocation is in the stage of merging trees. the space
4472 * used by merging a reloc tree is twice the size of
4473 * relocated tree nodes in the worst case. half for cowing
4474 * the reloc tree, half for cowing the fs tree. the space
4475 * used by cowing the reloc tree will be freed after the
4476 * tree is dropped. if we create snapshot, cowing the fs
4477 * tree may use more space than it frees. so we need
4478 * reserve extra space.
4479 */
4480 *bytes_to_reserve += rc->nodes_relocated;
4481 }
4482
4483 /*
4484 * called after snapshot is created. migrate block reservation
4485 * and create reloc root for the newly created snapshot
4486 *
4487 * This is similar to btrfs_init_reloc_root(), we come out of here with two
4488 * references held on the reloc_root, one for root->reloc_root and one for
4489 * rc->reloc_roots.
4490 */
btrfs_reloc_post_snapshot(struct btrfs_trans_handle * trans,struct btrfs_pending_snapshot * pending)4491 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4492 struct btrfs_pending_snapshot *pending)
4493 {
4494 struct btrfs_root *root = pending->root;
4495 struct btrfs_root *reloc_root;
4496 struct btrfs_root *new_root;
4497 struct reloc_control *rc = root->fs_info->reloc_ctl;
4498 int ret;
4499
4500 if (!rc || !have_reloc_root(root))
4501 return 0;
4502
4503 rc = root->fs_info->reloc_ctl;
4504 rc->merging_rsv_size += rc->nodes_relocated;
4505
4506 if (rc->merge_reloc_tree) {
4507 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4508 rc->block_rsv,
4509 rc->nodes_relocated, true);
4510 if (ret)
4511 return ret;
4512 }
4513
4514 new_root = pending->snap;
4515 reloc_root = create_reloc_root(trans, root->reloc_root,
4516 new_root->root_key.objectid);
4517 if (IS_ERR(reloc_root))
4518 return PTR_ERR(reloc_root);
4519
4520 ret = __add_reloc_root(reloc_root);
4521 ASSERT(ret != -EEXIST);
4522 if (ret) {
4523 /* Pairs with create_reloc_root */
4524 btrfs_put_root(reloc_root);
4525 return ret;
4526 }
4527 new_root->reloc_root = btrfs_grab_root(reloc_root);
4528
4529 if (rc->create_reloc_tree)
4530 ret = clone_backref_node(trans, rc, root, reloc_root);
4531 return ret;
4532 }
4533