1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/blkdev.h>
9 #include <linux/list_sort.h>
10 #include <linux/iversion.h>
11 #include "misc.h"
12 #include "ctree.h"
13 #include "tree-log.h"
14 #include "disk-io.h"
15 #include "locking.h"
16 #include "print-tree.h"
17 #include "backref.h"
18 #include "compression.h"
19 #include "qgroup.h"
20 #include "block-group.h"
21 #include "space-info.h"
22 #include "zoned.h"
23
24 /* magic values for the inode_only field in btrfs_log_inode:
25 *
26 * LOG_INODE_ALL means to log everything
27 * LOG_INODE_EXISTS means to log just enough to recreate the inode
28 * during log replay
29 */
30 enum {
31 LOG_INODE_ALL,
32 LOG_INODE_EXISTS,
33 LOG_OTHER_INODE,
34 LOG_OTHER_INODE_ALL,
35 };
36
37 /*
38 * directory trouble cases
39 *
40 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
41 * log, we must force a full commit before doing an fsync of the directory
42 * where the unlink was done.
43 * ---> record transid of last unlink/rename per directory
44 *
45 * mkdir foo/some_dir
46 * normal commit
47 * rename foo/some_dir foo2/some_dir
48 * mkdir foo/some_dir
49 * fsync foo/some_dir/some_file
50 *
51 * The fsync above will unlink the original some_dir without recording
52 * it in its new location (foo2). After a crash, some_dir will be gone
53 * unless the fsync of some_file forces a full commit
54 *
55 * 2) we must log any new names for any file or dir that is in the fsync
56 * log. ---> check inode while renaming/linking.
57 *
58 * 2a) we must log any new names for any file or dir during rename
59 * when the directory they are being removed from was logged.
60 * ---> check inode and old parent dir during rename
61 *
62 * 2a is actually the more important variant. With the extra logging
63 * a crash might unlink the old name without recreating the new one
64 *
65 * 3) after a crash, we must go through any directories with a link count
66 * of zero and redo the rm -rf
67 *
68 * mkdir f1/foo
69 * normal commit
70 * rm -rf f1/foo
71 * fsync(f1)
72 *
73 * The directory f1 was fully removed from the FS, but fsync was never
74 * called on f1, only its parent dir. After a crash the rm -rf must
75 * be replayed. This must be able to recurse down the entire
76 * directory tree. The inode link count fixup code takes care of the
77 * ugly details.
78 */
79
80 /*
81 * stages for the tree walking. The first
82 * stage (0) is to only pin down the blocks we find
83 * the second stage (1) is to make sure that all the inodes
84 * we find in the log are created in the subvolume.
85 *
86 * The last stage is to deal with directories and links and extents
87 * and all the other fun semantics
88 */
89 enum {
90 LOG_WALK_PIN_ONLY,
91 LOG_WALK_REPLAY_INODES,
92 LOG_WALK_REPLAY_DIR_INDEX,
93 LOG_WALK_REPLAY_ALL,
94 };
95
96 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
97 struct btrfs_root *root, struct btrfs_inode *inode,
98 int inode_only,
99 struct btrfs_log_ctx *ctx);
100 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
101 struct btrfs_root *root,
102 struct btrfs_path *path, u64 objectid);
103 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
104 struct btrfs_root *root,
105 struct btrfs_root *log,
106 struct btrfs_path *path,
107 u64 dirid, int del_all);
108 static void wait_log_commit(struct btrfs_root *root, int transid);
109
110 /*
111 * tree logging is a special write ahead log used to make sure that
112 * fsyncs and O_SYNCs can happen without doing full tree commits.
113 *
114 * Full tree commits are expensive because they require commonly
115 * modified blocks to be recowed, creating many dirty pages in the
116 * extent tree an 4x-6x higher write load than ext3.
117 *
118 * Instead of doing a tree commit on every fsync, we use the
119 * key ranges and transaction ids to find items for a given file or directory
120 * that have changed in this transaction. Those items are copied into
121 * a special tree (one per subvolume root), that tree is written to disk
122 * and then the fsync is considered complete.
123 *
124 * After a crash, items are copied out of the log-tree back into the
125 * subvolume tree. Any file data extents found are recorded in the extent
126 * allocation tree, and the log-tree freed.
127 *
128 * The log tree is read three times, once to pin down all the extents it is
129 * using in ram and once, once to create all the inodes logged in the tree
130 * and once to do all the other items.
131 */
132
133 /*
134 * start a sub transaction and setup the log tree
135 * this increments the log tree writer count to make the people
136 * syncing the tree wait for us to finish
137 */
start_log_trans(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_log_ctx * ctx)138 static int start_log_trans(struct btrfs_trans_handle *trans,
139 struct btrfs_root *root,
140 struct btrfs_log_ctx *ctx)
141 {
142 struct btrfs_fs_info *fs_info = root->fs_info;
143 struct btrfs_root *tree_root = fs_info->tree_root;
144 const bool zoned = btrfs_is_zoned(fs_info);
145 int ret = 0;
146 bool created = false;
147
148 /*
149 * First check if the log root tree was already created. If not, create
150 * it before locking the root's log_mutex, just to keep lockdep happy.
151 */
152 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state)) {
153 mutex_lock(&tree_root->log_mutex);
154 if (!fs_info->log_root_tree) {
155 ret = btrfs_init_log_root_tree(trans, fs_info);
156 if (!ret) {
157 set_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state);
158 created = true;
159 }
160 }
161 mutex_unlock(&tree_root->log_mutex);
162 if (ret)
163 return ret;
164 }
165
166 mutex_lock(&root->log_mutex);
167
168 again:
169 if (root->log_root) {
170 int index = (root->log_transid + 1) % 2;
171
172 if (btrfs_need_log_full_commit(trans)) {
173 ret = -EAGAIN;
174 goto out;
175 }
176
177 if (zoned && atomic_read(&root->log_commit[index])) {
178 wait_log_commit(root, root->log_transid - 1);
179 goto again;
180 }
181
182 if (!root->log_start_pid) {
183 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
184 root->log_start_pid = current->pid;
185 } else if (root->log_start_pid != current->pid) {
186 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
187 }
188 } else {
189 /*
190 * This means fs_info->log_root_tree was already created
191 * for some other FS trees. Do the full commit not to mix
192 * nodes from multiple log transactions to do sequential
193 * writing.
194 */
195 if (zoned && !created) {
196 ret = -EAGAIN;
197 goto out;
198 }
199
200 ret = btrfs_add_log_tree(trans, root);
201 if (ret)
202 goto out;
203
204 set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
205 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
206 root->log_start_pid = current->pid;
207 }
208
209 atomic_inc(&root->log_writers);
210 if (ctx && !ctx->logging_new_name) {
211 int index = root->log_transid % 2;
212 list_add_tail(&ctx->list, &root->log_ctxs[index]);
213 ctx->log_transid = root->log_transid;
214 }
215
216 out:
217 mutex_unlock(&root->log_mutex);
218 return ret;
219 }
220
221 /*
222 * returns 0 if there was a log transaction running and we were able
223 * to join, or returns -ENOENT if there were not transactions
224 * in progress
225 */
join_running_log_trans(struct btrfs_root * root)226 static int join_running_log_trans(struct btrfs_root *root)
227 {
228 const bool zoned = btrfs_is_zoned(root->fs_info);
229 int ret = -ENOENT;
230
231 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state))
232 return ret;
233
234 mutex_lock(&root->log_mutex);
235 again:
236 if (root->log_root) {
237 int index = (root->log_transid + 1) % 2;
238
239 ret = 0;
240 if (zoned && atomic_read(&root->log_commit[index])) {
241 wait_log_commit(root, root->log_transid - 1);
242 goto again;
243 }
244 atomic_inc(&root->log_writers);
245 }
246 mutex_unlock(&root->log_mutex);
247 return ret;
248 }
249
250 /*
251 * This either makes the current running log transaction wait
252 * until you call btrfs_end_log_trans() or it makes any future
253 * log transactions wait until you call btrfs_end_log_trans()
254 */
btrfs_pin_log_trans(struct btrfs_root * root)255 void btrfs_pin_log_trans(struct btrfs_root *root)
256 {
257 atomic_inc(&root->log_writers);
258 }
259
260 /*
261 * indicate we're done making changes to the log tree
262 * and wake up anyone waiting to do a sync
263 */
btrfs_end_log_trans(struct btrfs_root * root)264 void btrfs_end_log_trans(struct btrfs_root *root)
265 {
266 if (atomic_dec_and_test(&root->log_writers)) {
267 /* atomic_dec_and_test implies a barrier */
268 cond_wake_up_nomb(&root->log_writer_wait);
269 }
270 }
271
btrfs_write_tree_block(struct extent_buffer * buf)272 static int btrfs_write_tree_block(struct extent_buffer *buf)
273 {
274 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
275 buf->start + buf->len - 1);
276 }
277
btrfs_wait_tree_block_writeback(struct extent_buffer * buf)278 static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
279 {
280 filemap_fdatawait_range(buf->pages[0]->mapping,
281 buf->start, buf->start + buf->len - 1);
282 }
283
284 /*
285 * the walk control struct is used to pass state down the chain when
286 * processing the log tree. The stage field tells us which part
287 * of the log tree processing we are currently doing. The others
288 * are state fields used for that specific part
289 */
290 struct walk_control {
291 /* should we free the extent on disk when done? This is used
292 * at transaction commit time while freeing a log tree
293 */
294 int free;
295
296 /* should we write out the extent buffer? This is used
297 * while flushing the log tree to disk during a sync
298 */
299 int write;
300
301 /* should we wait for the extent buffer io to finish? Also used
302 * while flushing the log tree to disk for a sync
303 */
304 int wait;
305
306 /* pin only walk, we record which extents on disk belong to the
307 * log trees
308 */
309 int pin;
310
311 /* what stage of the replay code we're currently in */
312 int stage;
313
314 /*
315 * Ignore any items from the inode currently being processed. Needs
316 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
317 * the LOG_WALK_REPLAY_INODES stage.
318 */
319 bool ignore_cur_inode;
320
321 /* the root we are currently replaying */
322 struct btrfs_root *replay_dest;
323
324 /* the trans handle for the current replay */
325 struct btrfs_trans_handle *trans;
326
327 /* the function that gets used to process blocks we find in the
328 * tree. Note the extent_buffer might not be up to date when it is
329 * passed in, and it must be checked or read if you need the data
330 * inside it
331 */
332 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
333 struct walk_control *wc, u64 gen, int level);
334 };
335
336 /*
337 * process_func used to pin down extents, write them or wait on them
338 */
process_one_buffer(struct btrfs_root * log,struct extent_buffer * eb,struct walk_control * wc,u64 gen,int level)339 static int process_one_buffer(struct btrfs_root *log,
340 struct extent_buffer *eb,
341 struct walk_control *wc, u64 gen, int level)
342 {
343 struct btrfs_fs_info *fs_info = log->fs_info;
344 int ret = 0;
345
346 /*
347 * If this fs is mixed then we need to be able to process the leaves to
348 * pin down any logged extents, so we have to read the block.
349 */
350 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
351 ret = btrfs_read_buffer(eb, gen, level, NULL);
352 if (ret)
353 return ret;
354 }
355
356 if (wc->pin)
357 ret = btrfs_pin_extent_for_log_replay(wc->trans, eb->start,
358 eb->len);
359
360 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
361 if (wc->pin && btrfs_header_level(eb) == 0)
362 ret = btrfs_exclude_logged_extents(eb);
363 if (wc->write)
364 btrfs_write_tree_block(eb);
365 if (wc->wait)
366 btrfs_wait_tree_block_writeback(eb);
367 }
368 return ret;
369 }
370
371 /*
372 * Item overwrite used by replay and tree logging. eb, slot and key all refer
373 * to the src data we are copying out.
374 *
375 * root is the tree we are copying into, and path is a scratch
376 * path for use in this function (it should be released on entry and
377 * will be released on exit).
378 *
379 * If the key is already in the destination tree the existing item is
380 * overwritten. If the existing item isn't big enough, it is extended.
381 * If it is too large, it is truncated.
382 *
383 * If the key isn't in the destination yet, a new item is inserted.
384 */
overwrite_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct extent_buffer * eb,int slot,struct btrfs_key * key)385 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
386 struct btrfs_root *root,
387 struct btrfs_path *path,
388 struct extent_buffer *eb, int slot,
389 struct btrfs_key *key)
390 {
391 int ret;
392 u32 item_size;
393 u64 saved_i_size = 0;
394 int save_old_i_size = 0;
395 unsigned long src_ptr;
396 unsigned long dst_ptr;
397 int overwrite_root = 0;
398 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
399
400 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
401 overwrite_root = 1;
402
403 item_size = btrfs_item_size_nr(eb, slot);
404 src_ptr = btrfs_item_ptr_offset(eb, slot);
405
406 /* look for the key in the destination tree */
407 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
408 if (ret < 0)
409 return ret;
410
411 if (ret == 0) {
412 char *src_copy;
413 char *dst_copy;
414 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
415 path->slots[0]);
416 if (dst_size != item_size)
417 goto insert;
418
419 if (item_size == 0) {
420 btrfs_release_path(path);
421 return 0;
422 }
423 dst_copy = kmalloc(item_size, GFP_NOFS);
424 src_copy = kmalloc(item_size, GFP_NOFS);
425 if (!dst_copy || !src_copy) {
426 btrfs_release_path(path);
427 kfree(dst_copy);
428 kfree(src_copy);
429 return -ENOMEM;
430 }
431
432 read_extent_buffer(eb, src_copy, src_ptr, item_size);
433
434 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
435 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
436 item_size);
437 ret = memcmp(dst_copy, src_copy, item_size);
438
439 kfree(dst_copy);
440 kfree(src_copy);
441 /*
442 * they have the same contents, just return, this saves
443 * us from cowing blocks in the destination tree and doing
444 * extra writes that may not have been done by a previous
445 * sync
446 */
447 if (ret == 0) {
448 btrfs_release_path(path);
449 return 0;
450 }
451
452 /*
453 * We need to load the old nbytes into the inode so when we
454 * replay the extents we've logged we get the right nbytes.
455 */
456 if (inode_item) {
457 struct btrfs_inode_item *item;
458 u64 nbytes;
459 u32 mode;
460
461 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
462 struct btrfs_inode_item);
463 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
464 item = btrfs_item_ptr(eb, slot,
465 struct btrfs_inode_item);
466 btrfs_set_inode_nbytes(eb, item, nbytes);
467
468 /*
469 * If this is a directory we need to reset the i_size to
470 * 0 so that we can set it up properly when replaying
471 * the rest of the items in this log.
472 */
473 mode = btrfs_inode_mode(eb, item);
474 if (S_ISDIR(mode))
475 btrfs_set_inode_size(eb, item, 0);
476 }
477 } else if (inode_item) {
478 struct btrfs_inode_item *item;
479 u32 mode;
480
481 /*
482 * New inode, set nbytes to 0 so that the nbytes comes out
483 * properly when we replay the extents.
484 */
485 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
486 btrfs_set_inode_nbytes(eb, item, 0);
487
488 /*
489 * If this is a directory we need to reset the i_size to 0 so
490 * that we can set it up properly when replaying the rest of
491 * the items in this log.
492 */
493 mode = btrfs_inode_mode(eb, item);
494 if (S_ISDIR(mode))
495 btrfs_set_inode_size(eb, item, 0);
496 }
497 insert:
498 btrfs_release_path(path);
499 /* try to insert the key into the destination tree */
500 path->skip_release_on_error = 1;
501 ret = btrfs_insert_empty_item(trans, root, path,
502 key, item_size);
503 path->skip_release_on_error = 0;
504
505 /* make sure any existing item is the correct size */
506 if (ret == -EEXIST || ret == -EOVERFLOW) {
507 u32 found_size;
508 found_size = btrfs_item_size_nr(path->nodes[0],
509 path->slots[0]);
510 if (found_size > item_size)
511 btrfs_truncate_item(path, item_size, 1);
512 else if (found_size < item_size)
513 btrfs_extend_item(path, item_size - found_size);
514 } else if (ret) {
515 return ret;
516 }
517 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
518 path->slots[0]);
519
520 /* don't overwrite an existing inode if the generation number
521 * was logged as zero. This is done when the tree logging code
522 * is just logging an inode to make sure it exists after recovery.
523 *
524 * Also, don't overwrite i_size on directories during replay.
525 * log replay inserts and removes directory items based on the
526 * state of the tree found in the subvolume, and i_size is modified
527 * as it goes
528 */
529 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
530 struct btrfs_inode_item *src_item;
531 struct btrfs_inode_item *dst_item;
532
533 src_item = (struct btrfs_inode_item *)src_ptr;
534 dst_item = (struct btrfs_inode_item *)dst_ptr;
535
536 if (btrfs_inode_generation(eb, src_item) == 0) {
537 struct extent_buffer *dst_eb = path->nodes[0];
538 const u64 ino_size = btrfs_inode_size(eb, src_item);
539
540 /*
541 * For regular files an ino_size == 0 is used only when
542 * logging that an inode exists, as part of a directory
543 * fsync, and the inode wasn't fsynced before. In this
544 * case don't set the size of the inode in the fs/subvol
545 * tree, otherwise we would be throwing valid data away.
546 */
547 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
548 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
549 ino_size != 0)
550 btrfs_set_inode_size(dst_eb, dst_item, ino_size);
551 goto no_copy;
552 }
553
554 if (overwrite_root &&
555 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
556 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
557 save_old_i_size = 1;
558 saved_i_size = btrfs_inode_size(path->nodes[0],
559 dst_item);
560 }
561 }
562
563 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
564 src_ptr, item_size);
565
566 if (save_old_i_size) {
567 struct btrfs_inode_item *dst_item;
568 dst_item = (struct btrfs_inode_item *)dst_ptr;
569 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
570 }
571
572 /* make sure the generation is filled in */
573 if (key->type == BTRFS_INODE_ITEM_KEY) {
574 struct btrfs_inode_item *dst_item;
575 dst_item = (struct btrfs_inode_item *)dst_ptr;
576 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
577 btrfs_set_inode_generation(path->nodes[0], dst_item,
578 trans->transid);
579 }
580 }
581 no_copy:
582 btrfs_mark_buffer_dirty(path->nodes[0]);
583 btrfs_release_path(path);
584 return 0;
585 }
586
587 /*
588 * simple helper to read an inode off the disk from a given root
589 * This can only be called for subvolume roots and not for the log
590 */
read_one_inode(struct btrfs_root * root,u64 objectid)591 static noinline struct inode *read_one_inode(struct btrfs_root *root,
592 u64 objectid)
593 {
594 struct inode *inode;
595
596 inode = btrfs_iget(root->fs_info->sb, objectid, root);
597 if (IS_ERR(inode))
598 inode = NULL;
599 return inode;
600 }
601
602 /* replays a single extent in 'eb' at 'slot' with 'key' into the
603 * subvolume 'root'. path is released on entry and should be released
604 * on exit.
605 *
606 * extents in the log tree have not been allocated out of the extent
607 * tree yet. So, this completes the allocation, taking a reference
608 * as required if the extent already exists or creating a new extent
609 * if it isn't in the extent allocation tree yet.
610 *
611 * The extent is inserted into the file, dropping any existing extents
612 * from the file that overlap the new one.
613 */
replay_one_extent(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct extent_buffer * eb,int slot,struct btrfs_key * key)614 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
615 struct btrfs_root *root,
616 struct btrfs_path *path,
617 struct extent_buffer *eb, int slot,
618 struct btrfs_key *key)
619 {
620 struct btrfs_drop_extents_args drop_args = { 0 };
621 struct btrfs_fs_info *fs_info = root->fs_info;
622 int found_type;
623 u64 extent_end;
624 u64 start = key->offset;
625 u64 nbytes = 0;
626 struct btrfs_file_extent_item *item;
627 struct inode *inode = NULL;
628 unsigned long size;
629 int ret = 0;
630
631 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
632 found_type = btrfs_file_extent_type(eb, item);
633
634 if (found_type == BTRFS_FILE_EXTENT_REG ||
635 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
636 nbytes = btrfs_file_extent_num_bytes(eb, item);
637 extent_end = start + nbytes;
638
639 /*
640 * We don't add to the inodes nbytes if we are prealloc or a
641 * hole.
642 */
643 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
644 nbytes = 0;
645 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
646 size = btrfs_file_extent_ram_bytes(eb, item);
647 nbytes = btrfs_file_extent_ram_bytes(eb, item);
648 extent_end = ALIGN(start + size,
649 fs_info->sectorsize);
650 } else {
651 ret = 0;
652 goto out;
653 }
654
655 inode = read_one_inode(root, key->objectid);
656 if (!inode) {
657 ret = -EIO;
658 goto out;
659 }
660
661 /*
662 * first check to see if we already have this extent in the
663 * file. This must be done before the btrfs_drop_extents run
664 * so we don't try to drop this extent.
665 */
666 ret = btrfs_lookup_file_extent(trans, root, path,
667 btrfs_ino(BTRFS_I(inode)), start, 0);
668
669 if (ret == 0 &&
670 (found_type == BTRFS_FILE_EXTENT_REG ||
671 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
672 struct btrfs_file_extent_item cmp1;
673 struct btrfs_file_extent_item cmp2;
674 struct btrfs_file_extent_item *existing;
675 struct extent_buffer *leaf;
676
677 leaf = path->nodes[0];
678 existing = btrfs_item_ptr(leaf, path->slots[0],
679 struct btrfs_file_extent_item);
680
681 read_extent_buffer(eb, &cmp1, (unsigned long)item,
682 sizeof(cmp1));
683 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
684 sizeof(cmp2));
685
686 /*
687 * we already have a pointer to this exact extent,
688 * we don't have to do anything
689 */
690 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
691 btrfs_release_path(path);
692 goto out;
693 }
694 }
695 btrfs_release_path(path);
696
697 /* drop any overlapping extents */
698 drop_args.start = start;
699 drop_args.end = extent_end;
700 drop_args.drop_cache = true;
701 ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args);
702 if (ret)
703 goto out;
704
705 if (found_type == BTRFS_FILE_EXTENT_REG ||
706 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
707 u64 offset;
708 unsigned long dest_offset;
709 struct btrfs_key ins;
710
711 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
712 btrfs_fs_incompat(fs_info, NO_HOLES))
713 goto update_inode;
714
715 ret = btrfs_insert_empty_item(trans, root, path, key,
716 sizeof(*item));
717 if (ret)
718 goto out;
719 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
720 path->slots[0]);
721 copy_extent_buffer(path->nodes[0], eb, dest_offset,
722 (unsigned long)item, sizeof(*item));
723
724 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
725 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
726 ins.type = BTRFS_EXTENT_ITEM_KEY;
727 offset = key->offset - btrfs_file_extent_offset(eb, item);
728
729 /*
730 * Manually record dirty extent, as here we did a shallow
731 * file extent item copy and skip normal backref update,
732 * but modifying extent tree all by ourselves.
733 * So need to manually record dirty extent for qgroup,
734 * as the owner of the file extent changed from log tree
735 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
736 */
737 ret = btrfs_qgroup_trace_extent(trans,
738 btrfs_file_extent_disk_bytenr(eb, item),
739 btrfs_file_extent_disk_num_bytes(eb, item),
740 GFP_NOFS);
741 if (ret < 0)
742 goto out;
743
744 if (ins.objectid > 0) {
745 struct btrfs_ref ref = { 0 };
746 u64 csum_start;
747 u64 csum_end;
748 LIST_HEAD(ordered_sums);
749
750 /*
751 * is this extent already allocated in the extent
752 * allocation tree? If so, just add a reference
753 */
754 ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
755 ins.offset);
756 if (ret < 0) {
757 goto out;
758 } else if (ret == 0) {
759 btrfs_init_generic_ref(&ref,
760 BTRFS_ADD_DELAYED_REF,
761 ins.objectid, ins.offset, 0);
762 btrfs_init_data_ref(&ref,
763 root->root_key.objectid,
764 key->objectid, offset, 0, false);
765 ret = btrfs_inc_extent_ref(trans, &ref);
766 if (ret)
767 goto out;
768 } else {
769 /*
770 * insert the extent pointer in the extent
771 * allocation tree
772 */
773 ret = btrfs_alloc_logged_file_extent(trans,
774 root->root_key.objectid,
775 key->objectid, offset, &ins);
776 if (ret)
777 goto out;
778 }
779 btrfs_release_path(path);
780
781 if (btrfs_file_extent_compression(eb, item)) {
782 csum_start = ins.objectid;
783 csum_end = csum_start + ins.offset;
784 } else {
785 csum_start = ins.objectid +
786 btrfs_file_extent_offset(eb, item);
787 csum_end = csum_start +
788 btrfs_file_extent_num_bytes(eb, item);
789 }
790
791 ret = btrfs_lookup_csums_range(root->log_root,
792 csum_start, csum_end - 1,
793 &ordered_sums, 0);
794 if (ret)
795 goto out;
796 /*
797 * Now delete all existing cums in the csum root that
798 * cover our range. We do this because we can have an
799 * extent that is completely referenced by one file
800 * extent item and partially referenced by another
801 * file extent item (like after using the clone or
802 * extent_same ioctls). In this case if we end up doing
803 * the replay of the one that partially references the
804 * extent first, and we do not do the csum deletion
805 * below, we can get 2 csum items in the csum tree that
806 * overlap each other. For example, imagine our log has
807 * the two following file extent items:
808 *
809 * key (257 EXTENT_DATA 409600)
810 * extent data disk byte 12845056 nr 102400
811 * extent data offset 20480 nr 20480 ram 102400
812 *
813 * key (257 EXTENT_DATA 819200)
814 * extent data disk byte 12845056 nr 102400
815 * extent data offset 0 nr 102400 ram 102400
816 *
817 * Where the second one fully references the 100K extent
818 * that starts at disk byte 12845056, and the log tree
819 * has a single csum item that covers the entire range
820 * of the extent:
821 *
822 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
823 *
824 * After the first file extent item is replayed, the
825 * csum tree gets the following csum item:
826 *
827 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
828 *
829 * Which covers the 20K sub-range starting at offset 20K
830 * of our extent. Now when we replay the second file
831 * extent item, if we do not delete existing csum items
832 * that cover any of its blocks, we end up getting two
833 * csum items in our csum tree that overlap each other:
834 *
835 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
836 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
837 *
838 * Which is a problem, because after this anyone trying
839 * to lookup up for the checksum of any block of our
840 * extent starting at an offset of 40K or higher, will
841 * end up looking at the second csum item only, which
842 * does not contain the checksum for any block starting
843 * at offset 40K or higher of our extent.
844 */
845 while (!list_empty(&ordered_sums)) {
846 struct btrfs_ordered_sum *sums;
847 sums = list_entry(ordered_sums.next,
848 struct btrfs_ordered_sum,
849 list);
850 if (!ret)
851 ret = btrfs_del_csums(trans,
852 fs_info->csum_root,
853 sums->bytenr,
854 sums->len);
855 if (!ret)
856 ret = btrfs_csum_file_blocks(trans,
857 fs_info->csum_root, sums);
858 list_del(&sums->list);
859 kfree(sums);
860 }
861 if (ret)
862 goto out;
863 } else {
864 btrfs_release_path(path);
865 }
866 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
867 /* inline extents are easy, we just overwrite them */
868 ret = overwrite_item(trans, root, path, eb, slot, key);
869 if (ret)
870 goto out;
871 }
872
873 ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start,
874 extent_end - start);
875 if (ret)
876 goto out;
877
878 update_inode:
879 btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found);
880 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
881 out:
882 if (inode)
883 iput(inode);
884 return ret;
885 }
886
unlink_inode_for_log_replay(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct btrfs_inode * inode,const char * name,int name_len)887 static int unlink_inode_for_log_replay(struct btrfs_trans_handle *trans,
888 struct btrfs_inode *dir,
889 struct btrfs_inode *inode,
890 const char *name,
891 int name_len)
892 {
893 int ret;
894
895 ret = btrfs_unlink_inode(trans, dir, inode, name, name_len);
896 if (ret)
897 return ret;
898 /*
899 * Whenever we need to check if a name exists or not, we check the
900 * fs/subvolume tree. So after an unlink we must run delayed items, so
901 * that future checks for a name during log replay see that the name
902 * does not exists anymore.
903 */
904 return btrfs_run_delayed_items(trans);
905 }
906
907 /*
908 * when cleaning up conflicts between the directory names in the
909 * subvolume, directory names in the log and directory names in the
910 * inode back references, we may have to unlink inodes from directories.
911 *
912 * This is a helper function to do the unlink of a specific directory
913 * item
914 */
drop_one_dir_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_inode * dir,struct btrfs_dir_item * di)915 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
916 struct btrfs_root *root,
917 struct btrfs_path *path,
918 struct btrfs_inode *dir,
919 struct btrfs_dir_item *di)
920 {
921 struct inode *inode;
922 char *name;
923 int name_len;
924 struct extent_buffer *leaf;
925 struct btrfs_key location;
926 int ret;
927
928 leaf = path->nodes[0];
929
930 btrfs_dir_item_key_to_cpu(leaf, di, &location);
931 name_len = btrfs_dir_name_len(leaf, di);
932 name = kmalloc(name_len, GFP_NOFS);
933 if (!name)
934 return -ENOMEM;
935
936 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
937 btrfs_release_path(path);
938
939 inode = read_one_inode(root, location.objectid);
940 if (!inode) {
941 ret = -EIO;
942 goto out;
943 }
944
945 ret = link_to_fixup_dir(trans, root, path, location.objectid);
946 if (ret)
947 goto out;
948
949 ret = unlink_inode_for_log_replay(trans, dir, BTRFS_I(inode), name,
950 name_len);
951 out:
952 kfree(name);
953 iput(inode);
954 return ret;
955 }
956
957 /*
958 * See if a given name and sequence number found in an inode back reference are
959 * already in a directory and correctly point to this inode.
960 *
961 * Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it
962 * exists.
963 */
inode_in_dir(struct btrfs_root * root,struct btrfs_path * path,u64 dirid,u64 objectid,u64 index,const char * name,int name_len)964 static noinline int inode_in_dir(struct btrfs_root *root,
965 struct btrfs_path *path,
966 u64 dirid, u64 objectid, u64 index,
967 const char *name, int name_len)
968 {
969 struct btrfs_dir_item *di;
970 struct btrfs_key location;
971 int ret = 0;
972
973 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
974 index, name, name_len, 0);
975 if (IS_ERR(di)) {
976 ret = PTR_ERR(di);
977 goto out;
978 } else if (di) {
979 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
980 if (location.objectid != objectid)
981 goto out;
982 } else {
983 goto out;
984 }
985
986 btrfs_release_path(path);
987 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
988 if (IS_ERR(di)) {
989 ret = PTR_ERR(di);
990 goto out;
991 } else if (di) {
992 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
993 if (location.objectid == objectid)
994 ret = 1;
995 }
996 out:
997 btrfs_release_path(path);
998 return ret;
999 }
1000
1001 /*
1002 * helper function to check a log tree for a named back reference in
1003 * an inode. This is used to decide if a back reference that is
1004 * found in the subvolume conflicts with what we find in the log.
1005 *
1006 * inode backreferences may have multiple refs in a single item,
1007 * during replay we process one reference at a time, and we don't
1008 * want to delete valid links to a file from the subvolume if that
1009 * link is also in the log.
1010 */
backref_in_log(struct btrfs_root * log,struct btrfs_key * key,u64 ref_objectid,const char * name,int namelen)1011 static noinline int backref_in_log(struct btrfs_root *log,
1012 struct btrfs_key *key,
1013 u64 ref_objectid,
1014 const char *name, int namelen)
1015 {
1016 struct btrfs_path *path;
1017 int ret;
1018
1019 path = btrfs_alloc_path();
1020 if (!path)
1021 return -ENOMEM;
1022
1023 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
1024 if (ret < 0) {
1025 goto out;
1026 } else if (ret == 1) {
1027 ret = 0;
1028 goto out;
1029 }
1030
1031 if (key->type == BTRFS_INODE_EXTREF_KEY)
1032 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
1033 path->slots[0],
1034 ref_objectid,
1035 name, namelen);
1036 else
1037 ret = !!btrfs_find_name_in_backref(path->nodes[0],
1038 path->slots[0],
1039 name, namelen);
1040 out:
1041 btrfs_free_path(path);
1042 return ret;
1043 }
1044
__add_inode_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_root * log_root,struct btrfs_inode * dir,struct btrfs_inode * inode,u64 inode_objectid,u64 parent_objectid,u64 ref_index,char * name,int namelen,int * search_done)1045 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
1046 struct btrfs_root *root,
1047 struct btrfs_path *path,
1048 struct btrfs_root *log_root,
1049 struct btrfs_inode *dir,
1050 struct btrfs_inode *inode,
1051 u64 inode_objectid, u64 parent_objectid,
1052 u64 ref_index, char *name, int namelen,
1053 int *search_done)
1054 {
1055 int ret;
1056 char *victim_name;
1057 int victim_name_len;
1058 struct extent_buffer *leaf;
1059 struct btrfs_dir_item *di;
1060 struct btrfs_key search_key;
1061 struct btrfs_inode_extref *extref;
1062
1063 again:
1064 /* Search old style refs */
1065 search_key.objectid = inode_objectid;
1066 search_key.type = BTRFS_INODE_REF_KEY;
1067 search_key.offset = parent_objectid;
1068 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1069 if (ret == 0) {
1070 struct btrfs_inode_ref *victim_ref;
1071 unsigned long ptr;
1072 unsigned long ptr_end;
1073
1074 leaf = path->nodes[0];
1075
1076 /* are we trying to overwrite a back ref for the root directory
1077 * if so, just jump out, we're done
1078 */
1079 if (search_key.objectid == search_key.offset)
1080 return 1;
1081
1082 /* check all the names in this back reference to see
1083 * if they are in the log. if so, we allow them to stay
1084 * otherwise they must be unlinked as a conflict
1085 */
1086 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1087 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1088 while (ptr < ptr_end) {
1089 victim_ref = (struct btrfs_inode_ref *)ptr;
1090 victim_name_len = btrfs_inode_ref_name_len(leaf,
1091 victim_ref);
1092 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1093 if (!victim_name)
1094 return -ENOMEM;
1095
1096 read_extent_buffer(leaf, victim_name,
1097 (unsigned long)(victim_ref + 1),
1098 victim_name_len);
1099
1100 ret = backref_in_log(log_root, &search_key,
1101 parent_objectid, victim_name,
1102 victim_name_len);
1103 if (ret < 0) {
1104 kfree(victim_name);
1105 return ret;
1106 } else if (!ret) {
1107 inc_nlink(&inode->vfs_inode);
1108 btrfs_release_path(path);
1109
1110 ret = unlink_inode_for_log_replay(trans, dir, inode,
1111 victim_name, victim_name_len);
1112 kfree(victim_name);
1113 if (ret)
1114 return ret;
1115 *search_done = 1;
1116 goto again;
1117 }
1118 kfree(victim_name);
1119
1120 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1121 }
1122
1123 /*
1124 * NOTE: we have searched root tree and checked the
1125 * corresponding ref, it does not need to check again.
1126 */
1127 *search_done = 1;
1128 }
1129 btrfs_release_path(path);
1130
1131 /* Same search but for extended refs */
1132 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1133 inode_objectid, parent_objectid, 0,
1134 0);
1135 if (IS_ERR(extref)) {
1136 return PTR_ERR(extref);
1137 } else if (extref) {
1138 u32 item_size;
1139 u32 cur_offset = 0;
1140 unsigned long base;
1141 struct inode *victim_parent;
1142
1143 leaf = path->nodes[0];
1144
1145 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1146 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1147
1148 while (cur_offset < item_size) {
1149 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1150
1151 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1152
1153 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1154 goto next;
1155
1156 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1157 if (!victim_name)
1158 return -ENOMEM;
1159 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1160 victim_name_len);
1161
1162 search_key.objectid = inode_objectid;
1163 search_key.type = BTRFS_INODE_EXTREF_KEY;
1164 search_key.offset = btrfs_extref_hash(parent_objectid,
1165 victim_name,
1166 victim_name_len);
1167 ret = backref_in_log(log_root, &search_key,
1168 parent_objectid, victim_name,
1169 victim_name_len);
1170 if (ret < 0) {
1171 kfree(victim_name);
1172 return ret;
1173 } else if (!ret) {
1174 ret = -ENOENT;
1175 victim_parent = read_one_inode(root,
1176 parent_objectid);
1177 if (victim_parent) {
1178 inc_nlink(&inode->vfs_inode);
1179 btrfs_release_path(path);
1180
1181 ret = unlink_inode_for_log_replay(trans,
1182 BTRFS_I(victim_parent),
1183 inode,
1184 victim_name,
1185 victim_name_len);
1186 }
1187 iput(victim_parent);
1188 kfree(victim_name);
1189 if (ret)
1190 return ret;
1191 *search_done = 1;
1192 goto again;
1193 }
1194 kfree(victim_name);
1195 next:
1196 cur_offset += victim_name_len + sizeof(*extref);
1197 }
1198 *search_done = 1;
1199 }
1200 btrfs_release_path(path);
1201
1202 /* look for a conflicting sequence number */
1203 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1204 ref_index, name, namelen, 0);
1205 if (IS_ERR(di)) {
1206 return PTR_ERR(di);
1207 } else if (di) {
1208 ret = drop_one_dir_item(trans, root, path, dir, di);
1209 if (ret)
1210 return ret;
1211 }
1212 btrfs_release_path(path);
1213
1214 /* look for a conflicting name */
1215 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1216 name, namelen, 0);
1217 if (IS_ERR(di)) {
1218 return PTR_ERR(di);
1219 } else if (di) {
1220 ret = drop_one_dir_item(trans, root, path, dir, di);
1221 if (ret)
1222 return ret;
1223 }
1224 btrfs_release_path(path);
1225
1226 return 0;
1227 }
1228
extref_get_fields(struct extent_buffer * eb,unsigned long ref_ptr,u32 * namelen,char ** name,u64 * index,u64 * parent_objectid)1229 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1230 u32 *namelen, char **name, u64 *index,
1231 u64 *parent_objectid)
1232 {
1233 struct btrfs_inode_extref *extref;
1234
1235 extref = (struct btrfs_inode_extref *)ref_ptr;
1236
1237 *namelen = btrfs_inode_extref_name_len(eb, extref);
1238 *name = kmalloc(*namelen, GFP_NOFS);
1239 if (*name == NULL)
1240 return -ENOMEM;
1241
1242 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1243 *namelen);
1244
1245 if (index)
1246 *index = btrfs_inode_extref_index(eb, extref);
1247 if (parent_objectid)
1248 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1249
1250 return 0;
1251 }
1252
ref_get_fields(struct extent_buffer * eb,unsigned long ref_ptr,u32 * namelen,char ** name,u64 * index)1253 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1254 u32 *namelen, char **name, u64 *index)
1255 {
1256 struct btrfs_inode_ref *ref;
1257
1258 ref = (struct btrfs_inode_ref *)ref_ptr;
1259
1260 *namelen = btrfs_inode_ref_name_len(eb, ref);
1261 *name = kmalloc(*namelen, GFP_NOFS);
1262 if (*name == NULL)
1263 return -ENOMEM;
1264
1265 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1266
1267 if (index)
1268 *index = btrfs_inode_ref_index(eb, ref);
1269
1270 return 0;
1271 }
1272
1273 /*
1274 * Take an inode reference item from the log tree and iterate all names from the
1275 * inode reference item in the subvolume tree with the same key (if it exists).
1276 * For any name that is not in the inode reference item from the log tree, do a
1277 * proper unlink of that name (that is, remove its entry from the inode
1278 * reference item and both dir index keys).
1279 */
unlink_old_inode_refs(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_inode * inode,struct extent_buffer * log_eb,int log_slot,struct btrfs_key * key)1280 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
1281 struct btrfs_root *root,
1282 struct btrfs_path *path,
1283 struct btrfs_inode *inode,
1284 struct extent_buffer *log_eb,
1285 int log_slot,
1286 struct btrfs_key *key)
1287 {
1288 int ret;
1289 unsigned long ref_ptr;
1290 unsigned long ref_end;
1291 struct extent_buffer *eb;
1292
1293 again:
1294 btrfs_release_path(path);
1295 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1296 if (ret > 0) {
1297 ret = 0;
1298 goto out;
1299 }
1300 if (ret < 0)
1301 goto out;
1302
1303 eb = path->nodes[0];
1304 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
1305 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
1306 while (ref_ptr < ref_end) {
1307 char *name = NULL;
1308 int namelen;
1309 u64 parent_id;
1310
1311 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1312 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1313 NULL, &parent_id);
1314 } else {
1315 parent_id = key->offset;
1316 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1317 NULL);
1318 }
1319 if (ret)
1320 goto out;
1321
1322 if (key->type == BTRFS_INODE_EXTREF_KEY)
1323 ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot,
1324 parent_id, name,
1325 namelen);
1326 else
1327 ret = !!btrfs_find_name_in_backref(log_eb, log_slot,
1328 name, namelen);
1329
1330 if (!ret) {
1331 struct inode *dir;
1332
1333 btrfs_release_path(path);
1334 dir = read_one_inode(root, parent_id);
1335 if (!dir) {
1336 ret = -ENOENT;
1337 kfree(name);
1338 goto out;
1339 }
1340 ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir),
1341 inode, name, namelen);
1342 kfree(name);
1343 iput(dir);
1344 if (ret)
1345 goto out;
1346 goto again;
1347 }
1348
1349 kfree(name);
1350 ref_ptr += namelen;
1351 if (key->type == BTRFS_INODE_EXTREF_KEY)
1352 ref_ptr += sizeof(struct btrfs_inode_extref);
1353 else
1354 ref_ptr += sizeof(struct btrfs_inode_ref);
1355 }
1356 ret = 0;
1357 out:
1358 btrfs_release_path(path);
1359 return ret;
1360 }
1361
btrfs_inode_ref_exists(struct inode * inode,struct inode * dir,const u8 ref_type,const char * name,const int namelen)1362 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
1363 const u8 ref_type, const char *name,
1364 const int namelen)
1365 {
1366 struct btrfs_key key;
1367 struct btrfs_path *path;
1368 const u64 parent_id = btrfs_ino(BTRFS_I(dir));
1369 int ret;
1370
1371 path = btrfs_alloc_path();
1372 if (!path)
1373 return -ENOMEM;
1374
1375 key.objectid = btrfs_ino(BTRFS_I(inode));
1376 key.type = ref_type;
1377 if (key.type == BTRFS_INODE_REF_KEY)
1378 key.offset = parent_id;
1379 else
1380 key.offset = btrfs_extref_hash(parent_id, name, namelen);
1381
1382 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
1383 if (ret < 0)
1384 goto out;
1385 if (ret > 0) {
1386 ret = 0;
1387 goto out;
1388 }
1389 if (key.type == BTRFS_INODE_EXTREF_KEY)
1390 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
1391 path->slots[0], parent_id, name, namelen);
1392 else
1393 ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
1394 name, namelen);
1395
1396 out:
1397 btrfs_free_path(path);
1398 return ret;
1399 }
1400
add_link(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * dir,struct inode * inode,const char * name,int namelen,u64 ref_index)1401 static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1402 struct inode *dir, struct inode *inode, const char *name,
1403 int namelen, u64 ref_index)
1404 {
1405 struct btrfs_dir_item *dir_item;
1406 struct btrfs_key key;
1407 struct btrfs_path *path;
1408 struct inode *other_inode = NULL;
1409 int ret;
1410
1411 path = btrfs_alloc_path();
1412 if (!path)
1413 return -ENOMEM;
1414
1415 dir_item = btrfs_lookup_dir_item(NULL, root, path,
1416 btrfs_ino(BTRFS_I(dir)),
1417 name, namelen, 0);
1418 if (!dir_item) {
1419 btrfs_release_path(path);
1420 goto add_link;
1421 } else if (IS_ERR(dir_item)) {
1422 ret = PTR_ERR(dir_item);
1423 goto out;
1424 }
1425
1426 /*
1427 * Our inode's dentry collides with the dentry of another inode which is
1428 * in the log but not yet processed since it has a higher inode number.
1429 * So delete that other dentry.
1430 */
1431 btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key);
1432 btrfs_release_path(path);
1433 other_inode = read_one_inode(root, key.objectid);
1434 if (!other_inode) {
1435 ret = -ENOENT;
1436 goto out;
1437 }
1438 ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir), BTRFS_I(other_inode),
1439 name, namelen);
1440 if (ret)
1441 goto out;
1442 /*
1443 * If we dropped the link count to 0, bump it so that later the iput()
1444 * on the inode will not free it. We will fixup the link count later.
1445 */
1446 if (other_inode->i_nlink == 0)
1447 set_nlink(other_inode, 1);
1448 add_link:
1449 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
1450 name, namelen, 0, ref_index);
1451 out:
1452 iput(other_inode);
1453 btrfs_free_path(path);
1454
1455 return ret;
1456 }
1457
1458 /*
1459 * replay one inode back reference item found in the log tree.
1460 * eb, slot and key refer to the buffer and key found in the log tree.
1461 * root is the destination we are replaying into, and path is for temp
1462 * use by this function. (it should be released on return).
1463 */
add_inode_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_root * log,struct btrfs_path * path,struct extent_buffer * eb,int slot,struct btrfs_key * key)1464 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1465 struct btrfs_root *root,
1466 struct btrfs_root *log,
1467 struct btrfs_path *path,
1468 struct extent_buffer *eb, int slot,
1469 struct btrfs_key *key)
1470 {
1471 struct inode *dir = NULL;
1472 struct inode *inode = NULL;
1473 unsigned long ref_ptr;
1474 unsigned long ref_end;
1475 char *name = NULL;
1476 int namelen;
1477 int ret;
1478 int search_done = 0;
1479 int log_ref_ver = 0;
1480 u64 parent_objectid;
1481 u64 inode_objectid;
1482 u64 ref_index = 0;
1483 int ref_struct_size;
1484
1485 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1486 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1487
1488 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1489 struct btrfs_inode_extref *r;
1490
1491 ref_struct_size = sizeof(struct btrfs_inode_extref);
1492 log_ref_ver = 1;
1493 r = (struct btrfs_inode_extref *)ref_ptr;
1494 parent_objectid = btrfs_inode_extref_parent(eb, r);
1495 } else {
1496 ref_struct_size = sizeof(struct btrfs_inode_ref);
1497 parent_objectid = key->offset;
1498 }
1499 inode_objectid = key->objectid;
1500
1501 /*
1502 * it is possible that we didn't log all the parent directories
1503 * for a given inode. If we don't find the dir, just don't
1504 * copy the back ref in. The link count fixup code will take
1505 * care of the rest
1506 */
1507 dir = read_one_inode(root, parent_objectid);
1508 if (!dir) {
1509 ret = -ENOENT;
1510 goto out;
1511 }
1512
1513 inode = read_one_inode(root, inode_objectid);
1514 if (!inode) {
1515 ret = -EIO;
1516 goto out;
1517 }
1518
1519 while (ref_ptr < ref_end) {
1520 if (log_ref_ver) {
1521 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1522 &ref_index, &parent_objectid);
1523 /*
1524 * parent object can change from one array
1525 * item to another.
1526 */
1527 if (!dir)
1528 dir = read_one_inode(root, parent_objectid);
1529 if (!dir) {
1530 ret = -ENOENT;
1531 goto out;
1532 }
1533 } else {
1534 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1535 &ref_index);
1536 }
1537 if (ret)
1538 goto out;
1539
1540 ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1541 btrfs_ino(BTRFS_I(inode)), ref_index,
1542 name, namelen);
1543 if (ret < 0) {
1544 goto out;
1545 } else if (ret == 0) {
1546 /*
1547 * look for a conflicting back reference in the
1548 * metadata. if we find one we have to unlink that name
1549 * of the file before we add our new link. Later on, we
1550 * overwrite any existing back reference, and we don't
1551 * want to create dangling pointers in the directory.
1552 */
1553
1554 if (!search_done) {
1555 ret = __add_inode_ref(trans, root, path, log,
1556 BTRFS_I(dir),
1557 BTRFS_I(inode),
1558 inode_objectid,
1559 parent_objectid,
1560 ref_index, name, namelen,
1561 &search_done);
1562 if (ret) {
1563 if (ret == 1)
1564 ret = 0;
1565 goto out;
1566 }
1567 }
1568
1569 /*
1570 * If a reference item already exists for this inode
1571 * with the same parent and name, but different index,
1572 * drop it and the corresponding directory index entries
1573 * from the parent before adding the new reference item
1574 * and dir index entries, otherwise we would fail with
1575 * -EEXIST returned from btrfs_add_link() below.
1576 */
1577 ret = btrfs_inode_ref_exists(inode, dir, key->type,
1578 name, namelen);
1579 if (ret > 0) {
1580 ret = unlink_inode_for_log_replay(trans,
1581 BTRFS_I(dir),
1582 BTRFS_I(inode),
1583 name, namelen);
1584 /*
1585 * If we dropped the link count to 0, bump it so
1586 * that later the iput() on the inode will not
1587 * free it. We will fixup the link count later.
1588 */
1589 if (!ret && inode->i_nlink == 0)
1590 set_nlink(inode, 1);
1591 }
1592 if (ret < 0)
1593 goto out;
1594
1595 /* insert our name */
1596 ret = add_link(trans, root, dir, inode, name, namelen,
1597 ref_index);
1598 if (ret)
1599 goto out;
1600
1601 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1602 if (ret)
1603 goto out;
1604 }
1605 /* Else, ret == 1, we already have a perfect match, we're done. */
1606
1607 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1608 kfree(name);
1609 name = NULL;
1610 if (log_ref_ver) {
1611 iput(dir);
1612 dir = NULL;
1613 }
1614 }
1615
1616 /*
1617 * Before we overwrite the inode reference item in the subvolume tree
1618 * with the item from the log tree, we must unlink all names from the
1619 * parent directory that are in the subvolume's tree inode reference
1620 * item, otherwise we end up with an inconsistent subvolume tree where
1621 * dir index entries exist for a name but there is no inode reference
1622 * item with the same name.
1623 */
1624 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
1625 key);
1626 if (ret)
1627 goto out;
1628
1629 /* finally write the back reference in the inode */
1630 ret = overwrite_item(trans, root, path, eb, slot, key);
1631 out:
1632 btrfs_release_path(path);
1633 kfree(name);
1634 iput(dir);
1635 iput(inode);
1636 return ret;
1637 }
1638
count_inode_extrefs(struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path)1639 static int count_inode_extrefs(struct btrfs_root *root,
1640 struct btrfs_inode *inode, struct btrfs_path *path)
1641 {
1642 int ret = 0;
1643 int name_len;
1644 unsigned int nlink = 0;
1645 u32 item_size;
1646 u32 cur_offset = 0;
1647 u64 inode_objectid = btrfs_ino(inode);
1648 u64 offset = 0;
1649 unsigned long ptr;
1650 struct btrfs_inode_extref *extref;
1651 struct extent_buffer *leaf;
1652
1653 while (1) {
1654 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1655 &extref, &offset);
1656 if (ret)
1657 break;
1658
1659 leaf = path->nodes[0];
1660 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1661 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1662 cur_offset = 0;
1663
1664 while (cur_offset < item_size) {
1665 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1666 name_len = btrfs_inode_extref_name_len(leaf, extref);
1667
1668 nlink++;
1669
1670 cur_offset += name_len + sizeof(*extref);
1671 }
1672
1673 offset++;
1674 btrfs_release_path(path);
1675 }
1676 btrfs_release_path(path);
1677
1678 if (ret < 0 && ret != -ENOENT)
1679 return ret;
1680 return nlink;
1681 }
1682
count_inode_refs(struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path)1683 static int count_inode_refs(struct btrfs_root *root,
1684 struct btrfs_inode *inode, struct btrfs_path *path)
1685 {
1686 int ret;
1687 struct btrfs_key key;
1688 unsigned int nlink = 0;
1689 unsigned long ptr;
1690 unsigned long ptr_end;
1691 int name_len;
1692 u64 ino = btrfs_ino(inode);
1693
1694 key.objectid = ino;
1695 key.type = BTRFS_INODE_REF_KEY;
1696 key.offset = (u64)-1;
1697
1698 while (1) {
1699 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1700 if (ret < 0)
1701 break;
1702 if (ret > 0) {
1703 if (path->slots[0] == 0)
1704 break;
1705 path->slots[0]--;
1706 }
1707 process_slot:
1708 btrfs_item_key_to_cpu(path->nodes[0], &key,
1709 path->slots[0]);
1710 if (key.objectid != ino ||
1711 key.type != BTRFS_INODE_REF_KEY)
1712 break;
1713 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1714 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1715 path->slots[0]);
1716 while (ptr < ptr_end) {
1717 struct btrfs_inode_ref *ref;
1718
1719 ref = (struct btrfs_inode_ref *)ptr;
1720 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1721 ref);
1722 ptr = (unsigned long)(ref + 1) + name_len;
1723 nlink++;
1724 }
1725
1726 if (key.offset == 0)
1727 break;
1728 if (path->slots[0] > 0) {
1729 path->slots[0]--;
1730 goto process_slot;
1731 }
1732 key.offset--;
1733 btrfs_release_path(path);
1734 }
1735 btrfs_release_path(path);
1736
1737 return nlink;
1738 }
1739
1740 /*
1741 * There are a few corners where the link count of the file can't
1742 * be properly maintained during replay. So, instead of adding
1743 * lots of complexity to the log code, we just scan the backrefs
1744 * for any file that has been through replay.
1745 *
1746 * The scan will update the link count on the inode to reflect the
1747 * number of back refs found. If it goes down to zero, the iput
1748 * will free the inode.
1749 */
fixup_inode_link_count(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * inode)1750 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1751 struct btrfs_root *root,
1752 struct inode *inode)
1753 {
1754 struct btrfs_path *path;
1755 int ret;
1756 u64 nlink = 0;
1757 u64 ino = btrfs_ino(BTRFS_I(inode));
1758
1759 path = btrfs_alloc_path();
1760 if (!path)
1761 return -ENOMEM;
1762
1763 ret = count_inode_refs(root, BTRFS_I(inode), path);
1764 if (ret < 0)
1765 goto out;
1766
1767 nlink = ret;
1768
1769 ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1770 if (ret < 0)
1771 goto out;
1772
1773 nlink += ret;
1774
1775 ret = 0;
1776
1777 if (nlink != inode->i_nlink) {
1778 set_nlink(inode, nlink);
1779 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1780 if (ret)
1781 goto out;
1782 }
1783 BTRFS_I(inode)->index_cnt = (u64)-1;
1784
1785 if (inode->i_nlink == 0) {
1786 if (S_ISDIR(inode->i_mode)) {
1787 ret = replay_dir_deletes(trans, root, NULL, path,
1788 ino, 1);
1789 if (ret)
1790 goto out;
1791 }
1792 ret = btrfs_insert_orphan_item(trans, root, ino);
1793 if (ret == -EEXIST)
1794 ret = 0;
1795 }
1796
1797 out:
1798 btrfs_free_path(path);
1799 return ret;
1800 }
1801
fixup_inode_link_counts(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path)1802 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1803 struct btrfs_root *root,
1804 struct btrfs_path *path)
1805 {
1806 int ret;
1807 struct btrfs_key key;
1808 struct inode *inode;
1809
1810 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1811 key.type = BTRFS_ORPHAN_ITEM_KEY;
1812 key.offset = (u64)-1;
1813 while (1) {
1814 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1815 if (ret < 0)
1816 break;
1817
1818 if (ret == 1) {
1819 ret = 0;
1820 if (path->slots[0] == 0)
1821 break;
1822 path->slots[0]--;
1823 }
1824
1825 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1826 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1827 key.type != BTRFS_ORPHAN_ITEM_KEY)
1828 break;
1829
1830 ret = btrfs_del_item(trans, root, path);
1831 if (ret)
1832 break;
1833
1834 btrfs_release_path(path);
1835 inode = read_one_inode(root, key.offset);
1836 if (!inode) {
1837 ret = -EIO;
1838 break;
1839 }
1840
1841 ret = fixup_inode_link_count(trans, root, inode);
1842 iput(inode);
1843 if (ret)
1844 break;
1845
1846 /*
1847 * fixup on a directory may create new entries,
1848 * make sure we always look for the highset possible
1849 * offset
1850 */
1851 key.offset = (u64)-1;
1852 }
1853 btrfs_release_path(path);
1854 return ret;
1855 }
1856
1857
1858 /*
1859 * record a given inode in the fixup dir so we can check its link
1860 * count when replay is done. The link count is incremented here
1861 * so the inode won't go away until we check it
1862 */
link_to_fixup_dir(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,u64 objectid)1863 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1864 struct btrfs_root *root,
1865 struct btrfs_path *path,
1866 u64 objectid)
1867 {
1868 struct btrfs_key key;
1869 int ret = 0;
1870 struct inode *inode;
1871
1872 inode = read_one_inode(root, objectid);
1873 if (!inode)
1874 return -EIO;
1875
1876 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1877 key.type = BTRFS_ORPHAN_ITEM_KEY;
1878 key.offset = objectid;
1879
1880 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1881
1882 btrfs_release_path(path);
1883 if (ret == 0) {
1884 if (!inode->i_nlink)
1885 set_nlink(inode, 1);
1886 else
1887 inc_nlink(inode);
1888 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1889 } else if (ret == -EEXIST) {
1890 ret = 0;
1891 }
1892 iput(inode);
1893
1894 return ret;
1895 }
1896
1897 /*
1898 * when replaying the log for a directory, we only insert names
1899 * for inodes that actually exist. This means an fsync on a directory
1900 * does not implicitly fsync all the new files in it
1901 */
insert_one_name(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 dirid,u64 index,char * name,int name_len,struct btrfs_key * location)1902 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1903 struct btrfs_root *root,
1904 u64 dirid, u64 index,
1905 char *name, int name_len,
1906 struct btrfs_key *location)
1907 {
1908 struct inode *inode;
1909 struct inode *dir;
1910 int ret;
1911
1912 inode = read_one_inode(root, location->objectid);
1913 if (!inode)
1914 return -ENOENT;
1915
1916 dir = read_one_inode(root, dirid);
1917 if (!dir) {
1918 iput(inode);
1919 return -EIO;
1920 }
1921
1922 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1923 name_len, 1, index);
1924
1925 /* FIXME, put inode into FIXUP list */
1926
1927 iput(inode);
1928 iput(dir);
1929 return ret;
1930 }
1931
1932 /*
1933 * take a single entry in a log directory item and replay it into
1934 * the subvolume.
1935 *
1936 * if a conflicting item exists in the subdirectory already,
1937 * the inode it points to is unlinked and put into the link count
1938 * fix up tree.
1939 *
1940 * If a name from the log points to a file or directory that does
1941 * not exist in the FS, it is skipped. fsyncs on directories
1942 * do not force down inodes inside that directory, just changes to the
1943 * names or unlinks in a directory.
1944 *
1945 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1946 * non-existing inode) and 1 if the name was replayed.
1947 */
replay_one_name(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct extent_buffer * eb,struct btrfs_dir_item * di,struct btrfs_key * key)1948 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1949 struct btrfs_root *root,
1950 struct btrfs_path *path,
1951 struct extent_buffer *eb,
1952 struct btrfs_dir_item *di,
1953 struct btrfs_key *key)
1954 {
1955 char *name;
1956 int name_len;
1957 struct btrfs_dir_item *dst_di;
1958 struct btrfs_key found_key;
1959 struct btrfs_key log_key;
1960 struct inode *dir;
1961 u8 log_type;
1962 bool exists;
1963 int ret;
1964 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1965 bool name_added = false;
1966
1967 dir = read_one_inode(root, key->objectid);
1968 if (!dir)
1969 return -EIO;
1970
1971 name_len = btrfs_dir_name_len(eb, di);
1972 name = kmalloc(name_len, GFP_NOFS);
1973 if (!name) {
1974 ret = -ENOMEM;
1975 goto out;
1976 }
1977
1978 log_type = btrfs_dir_type(eb, di);
1979 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1980 name_len);
1981
1982 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1983 ret = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1984 btrfs_release_path(path);
1985 if (ret < 0)
1986 goto out;
1987 exists = (ret == 0);
1988 ret = 0;
1989
1990 if (key->type == BTRFS_DIR_ITEM_KEY) {
1991 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1992 name, name_len, 1);
1993 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1994 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1995 key->objectid,
1996 key->offset, name,
1997 name_len, 1);
1998 } else {
1999 /* Corruption */
2000 ret = -EINVAL;
2001 goto out;
2002 }
2003
2004 if (IS_ERR(dst_di)) {
2005 ret = PTR_ERR(dst_di);
2006 goto out;
2007 } else if (!dst_di) {
2008 /* we need a sequence number to insert, so we only
2009 * do inserts for the BTRFS_DIR_INDEX_KEY types
2010 */
2011 if (key->type != BTRFS_DIR_INDEX_KEY)
2012 goto out;
2013 goto insert;
2014 }
2015
2016 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
2017 /* the existing item matches the logged item */
2018 if (found_key.objectid == log_key.objectid &&
2019 found_key.type == log_key.type &&
2020 found_key.offset == log_key.offset &&
2021 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
2022 update_size = false;
2023 goto out;
2024 }
2025
2026 /*
2027 * don't drop the conflicting directory entry if the inode
2028 * for the new entry doesn't exist
2029 */
2030 if (!exists)
2031 goto out;
2032
2033 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di);
2034 if (ret)
2035 goto out;
2036
2037 if (key->type == BTRFS_DIR_INDEX_KEY)
2038 goto insert;
2039 out:
2040 btrfs_release_path(path);
2041 if (!ret && update_size) {
2042 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
2043 ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
2044 }
2045 kfree(name);
2046 iput(dir);
2047 if (!ret && name_added)
2048 ret = 1;
2049 return ret;
2050
2051 insert:
2052 /*
2053 * Check if the inode reference exists in the log for the given name,
2054 * inode and parent inode
2055 */
2056 found_key.objectid = log_key.objectid;
2057 found_key.type = BTRFS_INODE_REF_KEY;
2058 found_key.offset = key->objectid;
2059 ret = backref_in_log(root->log_root, &found_key, 0, name, name_len);
2060 if (ret < 0) {
2061 goto out;
2062 } else if (ret) {
2063 /* The dentry will be added later. */
2064 ret = 0;
2065 update_size = false;
2066 goto out;
2067 }
2068
2069 found_key.objectid = log_key.objectid;
2070 found_key.type = BTRFS_INODE_EXTREF_KEY;
2071 found_key.offset = key->objectid;
2072 ret = backref_in_log(root->log_root, &found_key, key->objectid, name,
2073 name_len);
2074 if (ret < 0) {
2075 goto out;
2076 } else if (ret) {
2077 /* The dentry will be added later. */
2078 ret = 0;
2079 update_size = false;
2080 goto out;
2081 }
2082 btrfs_release_path(path);
2083 ret = insert_one_name(trans, root, key->objectid, key->offset,
2084 name, name_len, &log_key);
2085 if (ret && ret != -ENOENT && ret != -EEXIST)
2086 goto out;
2087 if (!ret)
2088 name_added = true;
2089 update_size = false;
2090 ret = 0;
2091 goto out;
2092 }
2093
2094 /*
2095 * find all the names in a directory item and reconcile them into
2096 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
2097 * one name in a directory item, but the same code gets used for
2098 * both directory index types
2099 */
replay_one_dir_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct extent_buffer * eb,int slot,struct btrfs_key * key)2100 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
2101 struct btrfs_root *root,
2102 struct btrfs_path *path,
2103 struct extent_buffer *eb, int slot,
2104 struct btrfs_key *key)
2105 {
2106 int ret = 0;
2107 u32 item_size = btrfs_item_size_nr(eb, slot);
2108 struct btrfs_dir_item *di;
2109 int name_len;
2110 unsigned long ptr;
2111 unsigned long ptr_end;
2112 struct btrfs_path *fixup_path = NULL;
2113
2114 ptr = btrfs_item_ptr_offset(eb, slot);
2115 ptr_end = ptr + item_size;
2116 while (ptr < ptr_end) {
2117 di = (struct btrfs_dir_item *)ptr;
2118 name_len = btrfs_dir_name_len(eb, di);
2119 ret = replay_one_name(trans, root, path, eb, di, key);
2120 if (ret < 0)
2121 break;
2122 ptr = (unsigned long)(di + 1);
2123 ptr += name_len;
2124
2125 /*
2126 * If this entry refers to a non-directory (directories can not
2127 * have a link count > 1) and it was added in the transaction
2128 * that was not committed, make sure we fixup the link count of
2129 * the inode it the entry points to. Otherwise something like
2130 * the following would result in a directory pointing to an
2131 * inode with a wrong link that does not account for this dir
2132 * entry:
2133 *
2134 * mkdir testdir
2135 * touch testdir/foo
2136 * touch testdir/bar
2137 * sync
2138 *
2139 * ln testdir/bar testdir/bar_link
2140 * ln testdir/foo testdir/foo_link
2141 * xfs_io -c "fsync" testdir/bar
2142 *
2143 * <power failure>
2144 *
2145 * mount fs, log replay happens
2146 *
2147 * File foo would remain with a link count of 1 when it has two
2148 * entries pointing to it in the directory testdir. This would
2149 * make it impossible to ever delete the parent directory has
2150 * it would result in stale dentries that can never be deleted.
2151 */
2152 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
2153 struct btrfs_key di_key;
2154
2155 if (!fixup_path) {
2156 fixup_path = btrfs_alloc_path();
2157 if (!fixup_path) {
2158 ret = -ENOMEM;
2159 break;
2160 }
2161 }
2162
2163 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2164 ret = link_to_fixup_dir(trans, root, fixup_path,
2165 di_key.objectid);
2166 if (ret)
2167 break;
2168 }
2169 ret = 0;
2170 }
2171 btrfs_free_path(fixup_path);
2172 return ret;
2173 }
2174
2175 /*
2176 * directory replay has two parts. There are the standard directory
2177 * items in the log copied from the subvolume, and range items
2178 * created in the log while the subvolume was logged.
2179 *
2180 * The range items tell us which parts of the key space the log
2181 * is authoritative for. During replay, if a key in the subvolume
2182 * directory is in a logged range item, but not actually in the log
2183 * that means it was deleted from the directory before the fsync
2184 * and should be removed.
2185 */
find_dir_range(struct btrfs_root * root,struct btrfs_path * path,u64 dirid,u64 * start_ret,u64 * end_ret)2186 static noinline int find_dir_range(struct btrfs_root *root,
2187 struct btrfs_path *path,
2188 u64 dirid,
2189 u64 *start_ret, u64 *end_ret)
2190 {
2191 struct btrfs_key key;
2192 u64 found_end;
2193 struct btrfs_dir_log_item *item;
2194 int ret;
2195 int nritems;
2196
2197 if (*start_ret == (u64)-1)
2198 return 1;
2199
2200 key.objectid = dirid;
2201 key.type = BTRFS_DIR_LOG_INDEX_KEY;
2202 key.offset = *start_ret;
2203
2204 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2205 if (ret < 0)
2206 goto out;
2207 if (ret > 0) {
2208 if (path->slots[0] == 0)
2209 goto out;
2210 path->slots[0]--;
2211 }
2212 if (ret != 0)
2213 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2214
2215 if (key.type != BTRFS_DIR_LOG_INDEX_KEY || key.objectid != dirid) {
2216 ret = 1;
2217 goto next;
2218 }
2219 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2220 struct btrfs_dir_log_item);
2221 found_end = btrfs_dir_log_end(path->nodes[0], item);
2222
2223 if (*start_ret >= key.offset && *start_ret <= found_end) {
2224 ret = 0;
2225 *start_ret = key.offset;
2226 *end_ret = found_end;
2227 goto out;
2228 }
2229 ret = 1;
2230 next:
2231 /* check the next slot in the tree to see if it is a valid item */
2232 nritems = btrfs_header_nritems(path->nodes[0]);
2233 path->slots[0]++;
2234 if (path->slots[0] >= nritems) {
2235 ret = btrfs_next_leaf(root, path);
2236 if (ret)
2237 goto out;
2238 }
2239
2240 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2241
2242 if (key.type != BTRFS_DIR_LOG_INDEX_KEY || key.objectid != dirid) {
2243 ret = 1;
2244 goto out;
2245 }
2246 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2247 struct btrfs_dir_log_item);
2248 found_end = btrfs_dir_log_end(path->nodes[0], item);
2249 *start_ret = key.offset;
2250 *end_ret = found_end;
2251 ret = 0;
2252 out:
2253 btrfs_release_path(path);
2254 return ret;
2255 }
2256
2257 /*
2258 * this looks for a given directory item in the log. If the directory
2259 * item is not in the log, the item is removed and the inode it points
2260 * to is unlinked
2261 */
check_item_in_log(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_root * log,struct btrfs_path * path,struct btrfs_path * log_path,struct inode * dir,struct btrfs_key * dir_key)2262 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
2263 struct btrfs_root *root,
2264 struct btrfs_root *log,
2265 struct btrfs_path *path,
2266 struct btrfs_path *log_path,
2267 struct inode *dir,
2268 struct btrfs_key *dir_key)
2269 {
2270 int ret;
2271 struct extent_buffer *eb;
2272 int slot;
2273 struct btrfs_dir_item *di;
2274 int name_len;
2275 char *name;
2276 struct inode *inode = NULL;
2277 struct btrfs_key location;
2278
2279 /*
2280 * Currenly we only log dir index keys. Even if we replay a log created
2281 * by an older kernel that logged both dir index and dir item keys, all
2282 * we need to do is process the dir index keys, we (and our caller) can
2283 * safely ignore dir item keys (key type BTRFS_DIR_ITEM_KEY).
2284 */
2285 ASSERT(dir_key->type == BTRFS_DIR_INDEX_KEY);
2286
2287 eb = path->nodes[0];
2288 slot = path->slots[0];
2289 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2290 name_len = btrfs_dir_name_len(eb, di);
2291 name = kmalloc(name_len, GFP_NOFS);
2292 if (!name) {
2293 ret = -ENOMEM;
2294 goto out;
2295 }
2296
2297 read_extent_buffer(eb, name, (unsigned long)(di + 1), name_len);
2298
2299 if (log) {
2300 struct btrfs_dir_item *log_di;
2301
2302 log_di = btrfs_lookup_dir_index_item(trans, log, log_path,
2303 dir_key->objectid,
2304 dir_key->offset,
2305 name, name_len, 0);
2306 if (IS_ERR(log_di)) {
2307 ret = PTR_ERR(log_di);
2308 goto out;
2309 } else if (log_di) {
2310 /* The dentry exists in the log, we have nothing to do. */
2311 ret = 0;
2312 goto out;
2313 }
2314 }
2315
2316 btrfs_dir_item_key_to_cpu(eb, di, &location);
2317 btrfs_release_path(path);
2318 btrfs_release_path(log_path);
2319 inode = read_one_inode(root, location.objectid);
2320 if (!inode) {
2321 ret = -EIO;
2322 goto out;
2323 }
2324
2325 ret = link_to_fixup_dir(trans, root, path, location.objectid);
2326 if (ret)
2327 goto out;
2328
2329 inc_nlink(inode);
2330 ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir), BTRFS_I(inode),
2331 name, name_len);
2332 /*
2333 * Unlike dir item keys, dir index keys can only have one name (entry) in
2334 * them, as there are no key collisions since each key has a unique offset
2335 * (an index number), so we're done.
2336 */
2337 out:
2338 btrfs_release_path(path);
2339 btrfs_release_path(log_path);
2340 kfree(name);
2341 iput(inode);
2342 return ret;
2343 }
2344
replay_xattr_deletes(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_root * log,struct btrfs_path * path,const u64 ino)2345 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2346 struct btrfs_root *root,
2347 struct btrfs_root *log,
2348 struct btrfs_path *path,
2349 const u64 ino)
2350 {
2351 struct btrfs_key search_key;
2352 struct btrfs_path *log_path;
2353 int i;
2354 int nritems;
2355 int ret;
2356
2357 log_path = btrfs_alloc_path();
2358 if (!log_path)
2359 return -ENOMEM;
2360
2361 search_key.objectid = ino;
2362 search_key.type = BTRFS_XATTR_ITEM_KEY;
2363 search_key.offset = 0;
2364 again:
2365 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2366 if (ret < 0)
2367 goto out;
2368 process_leaf:
2369 nritems = btrfs_header_nritems(path->nodes[0]);
2370 for (i = path->slots[0]; i < nritems; i++) {
2371 struct btrfs_key key;
2372 struct btrfs_dir_item *di;
2373 struct btrfs_dir_item *log_di;
2374 u32 total_size;
2375 u32 cur;
2376
2377 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2378 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2379 ret = 0;
2380 goto out;
2381 }
2382
2383 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2384 total_size = btrfs_item_size_nr(path->nodes[0], i);
2385 cur = 0;
2386 while (cur < total_size) {
2387 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2388 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2389 u32 this_len = sizeof(*di) + name_len + data_len;
2390 char *name;
2391
2392 name = kmalloc(name_len, GFP_NOFS);
2393 if (!name) {
2394 ret = -ENOMEM;
2395 goto out;
2396 }
2397 read_extent_buffer(path->nodes[0], name,
2398 (unsigned long)(di + 1), name_len);
2399
2400 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2401 name, name_len, 0);
2402 btrfs_release_path(log_path);
2403 if (!log_di) {
2404 /* Doesn't exist in log tree, so delete it. */
2405 btrfs_release_path(path);
2406 di = btrfs_lookup_xattr(trans, root, path, ino,
2407 name, name_len, -1);
2408 kfree(name);
2409 if (IS_ERR(di)) {
2410 ret = PTR_ERR(di);
2411 goto out;
2412 }
2413 ASSERT(di);
2414 ret = btrfs_delete_one_dir_name(trans, root,
2415 path, di);
2416 if (ret)
2417 goto out;
2418 btrfs_release_path(path);
2419 search_key = key;
2420 goto again;
2421 }
2422 kfree(name);
2423 if (IS_ERR(log_di)) {
2424 ret = PTR_ERR(log_di);
2425 goto out;
2426 }
2427 cur += this_len;
2428 di = (struct btrfs_dir_item *)((char *)di + this_len);
2429 }
2430 }
2431 ret = btrfs_next_leaf(root, path);
2432 if (ret > 0)
2433 ret = 0;
2434 else if (ret == 0)
2435 goto process_leaf;
2436 out:
2437 btrfs_free_path(log_path);
2438 btrfs_release_path(path);
2439 return ret;
2440 }
2441
2442
2443 /*
2444 * deletion replay happens before we copy any new directory items
2445 * out of the log or out of backreferences from inodes. It
2446 * scans the log to find ranges of keys that log is authoritative for,
2447 * and then scans the directory to find items in those ranges that are
2448 * not present in the log.
2449 *
2450 * Anything we don't find in the log is unlinked and removed from the
2451 * directory.
2452 */
replay_dir_deletes(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_root * log,struct btrfs_path * path,u64 dirid,int del_all)2453 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2454 struct btrfs_root *root,
2455 struct btrfs_root *log,
2456 struct btrfs_path *path,
2457 u64 dirid, int del_all)
2458 {
2459 u64 range_start;
2460 u64 range_end;
2461 int ret = 0;
2462 struct btrfs_key dir_key;
2463 struct btrfs_key found_key;
2464 struct btrfs_path *log_path;
2465 struct inode *dir;
2466
2467 dir_key.objectid = dirid;
2468 dir_key.type = BTRFS_DIR_INDEX_KEY;
2469 log_path = btrfs_alloc_path();
2470 if (!log_path)
2471 return -ENOMEM;
2472
2473 dir = read_one_inode(root, dirid);
2474 /* it isn't an error if the inode isn't there, that can happen
2475 * because we replay the deletes before we copy in the inode item
2476 * from the log
2477 */
2478 if (!dir) {
2479 btrfs_free_path(log_path);
2480 return 0;
2481 }
2482
2483 range_start = 0;
2484 range_end = 0;
2485 while (1) {
2486 if (del_all)
2487 range_end = (u64)-1;
2488 else {
2489 ret = find_dir_range(log, path, dirid,
2490 &range_start, &range_end);
2491 if (ret < 0)
2492 goto out;
2493 else if (ret > 0)
2494 break;
2495 }
2496
2497 dir_key.offset = range_start;
2498 while (1) {
2499 int nritems;
2500 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2501 0, 0);
2502 if (ret < 0)
2503 goto out;
2504
2505 nritems = btrfs_header_nritems(path->nodes[0]);
2506 if (path->slots[0] >= nritems) {
2507 ret = btrfs_next_leaf(root, path);
2508 if (ret == 1)
2509 break;
2510 else if (ret < 0)
2511 goto out;
2512 }
2513 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2514 path->slots[0]);
2515 if (found_key.objectid != dirid ||
2516 found_key.type != dir_key.type) {
2517 ret = 0;
2518 goto out;
2519 }
2520
2521 if (found_key.offset > range_end)
2522 break;
2523
2524 ret = check_item_in_log(trans, root, log, path,
2525 log_path, dir,
2526 &found_key);
2527 if (ret)
2528 goto out;
2529 if (found_key.offset == (u64)-1)
2530 break;
2531 dir_key.offset = found_key.offset + 1;
2532 }
2533 btrfs_release_path(path);
2534 if (range_end == (u64)-1)
2535 break;
2536 range_start = range_end + 1;
2537 }
2538 ret = 0;
2539 out:
2540 btrfs_release_path(path);
2541 btrfs_free_path(log_path);
2542 iput(dir);
2543 return ret;
2544 }
2545
2546 /*
2547 * the process_func used to replay items from the log tree. This
2548 * gets called in two different stages. The first stage just looks
2549 * for inodes and makes sure they are all copied into the subvolume.
2550 *
2551 * The second stage copies all the other item types from the log into
2552 * the subvolume. The two stage approach is slower, but gets rid of
2553 * lots of complexity around inodes referencing other inodes that exist
2554 * only in the log (references come from either directory items or inode
2555 * back refs).
2556 */
replay_one_buffer(struct btrfs_root * log,struct extent_buffer * eb,struct walk_control * wc,u64 gen,int level)2557 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2558 struct walk_control *wc, u64 gen, int level)
2559 {
2560 int nritems;
2561 struct btrfs_path *path;
2562 struct btrfs_root *root = wc->replay_dest;
2563 struct btrfs_key key;
2564 int i;
2565 int ret;
2566
2567 ret = btrfs_read_buffer(eb, gen, level, NULL);
2568 if (ret)
2569 return ret;
2570
2571 level = btrfs_header_level(eb);
2572
2573 if (level != 0)
2574 return 0;
2575
2576 path = btrfs_alloc_path();
2577 if (!path)
2578 return -ENOMEM;
2579
2580 nritems = btrfs_header_nritems(eb);
2581 for (i = 0; i < nritems; i++) {
2582 btrfs_item_key_to_cpu(eb, &key, i);
2583
2584 /* inode keys are done during the first stage */
2585 if (key.type == BTRFS_INODE_ITEM_KEY &&
2586 wc->stage == LOG_WALK_REPLAY_INODES) {
2587 struct btrfs_inode_item *inode_item;
2588 u32 mode;
2589
2590 inode_item = btrfs_item_ptr(eb, i,
2591 struct btrfs_inode_item);
2592 /*
2593 * If we have a tmpfile (O_TMPFILE) that got fsync'ed
2594 * and never got linked before the fsync, skip it, as
2595 * replaying it is pointless since it would be deleted
2596 * later. We skip logging tmpfiles, but it's always
2597 * possible we are replaying a log created with a kernel
2598 * that used to log tmpfiles.
2599 */
2600 if (btrfs_inode_nlink(eb, inode_item) == 0) {
2601 wc->ignore_cur_inode = true;
2602 continue;
2603 } else {
2604 wc->ignore_cur_inode = false;
2605 }
2606 ret = replay_xattr_deletes(wc->trans, root, log,
2607 path, key.objectid);
2608 if (ret)
2609 break;
2610 mode = btrfs_inode_mode(eb, inode_item);
2611 if (S_ISDIR(mode)) {
2612 ret = replay_dir_deletes(wc->trans,
2613 root, log, path, key.objectid, 0);
2614 if (ret)
2615 break;
2616 }
2617 ret = overwrite_item(wc->trans, root, path,
2618 eb, i, &key);
2619 if (ret)
2620 break;
2621
2622 /*
2623 * Before replaying extents, truncate the inode to its
2624 * size. We need to do it now and not after log replay
2625 * because before an fsync we can have prealloc extents
2626 * added beyond the inode's i_size. If we did it after,
2627 * through orphan cleanup for example, we would drop
2628 * those prealloc extents just after replaying them.
2629 */
2630 if (S_ISREG(mode)) {
2631 struct btrfs_drop_extents_args drop_args = { 0 };
2632 struct inode *inode;
2633 u64 from;
2634
2635 inode = read_one_inode(root, key.objectid);
2636 if (!inode) {
2637 ret = -EIO;
2638 break;
2639 }
2640 from = ALIGN(i_size_read(inode),
2641 root->fs_info->sectorsize);
2642 drop_args.start = from;
2643 drop_args.end = (u64)-1;
2644 drop_args.drop_cache = true;
2645 ret = btrfs_drop_extents(wc->trans, root,
2646 BTRFS_I(inode),
2647 &drop_args);
2648 if (!ret) {
2649 inode_sub_bytes(inode,
2650 drop_args.bytes_found);
2651 /* Update the inode's nbytes. */
2652 ret = btrfs_update_inode(wc->trans,
2653 root, BTRFS_I(inode));
2654 }
2655 iput(inode);
2656 if (ret)
2657 break;
2658 }
2659
2660 ret = link_to_fixup_dir(wc->trans, root,
2661 path, key.objectid);
2662 if (ret)
2663 break;
2664 }
2665
2666 if (wc->ignore_cur_inode)
2667 continue;
2668
2669 if (key.type == BTRFS_DIR_INDEX_KEY &&
2670 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2671 ret = replay_one_dir_item(wc->trans, root, path,
2672 eb, i, &key);
2673 if (ret)
2674 break;
2675 }
2676
2677 if (wc->stage < LOG_WALK_REPLAY_ALL)
2678 continue;
2679
2680 /* these keys are simply copied */
2681 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2682 ret = overwrite_item(wc->trans, root, path,
2683 eb, i, &key);
2684 if (ret)
2685 break;
2686 } else if (key.type == BTRFS_INODE_REF_KEY ||
2687 key.type == BTRFS_INODE_EXTREF_KEY) {
2688 ret = add_inode_ref(wc->trans, root, log, path,
2689 eb, i, &key);
2690 if (ret && ret != -ENOENT)
2691 break;
2692 ret = 0;
2693 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2694 ret = replay_one_extent(wc->trans, root, path,
2695 eb, i, &key);
2696 if (ret)
2697 break;
2698 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2699 ret = replay_one_dir_item(wc->trans, root, path,
2700 eb, i, &key);
2701 if (ret)
2702 break;
2703 }
2704 }
2705 btrfs_free_path(path);
2706 return ret;
2707 }
2708
2709 /*
2710 * Correctly adjust the reserved bytes occupied by a log tree extent buffer
2711 */
unaccount_log_buffer(struct btrfs_fs_info * fs_info,u64 start)2712 static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start)
2713 {
2714 struct btrfs_block_group *cache;
2715
2716 cache = btrfs_lookup_block_group(fs_info, start);
2717 if (!cache) {
2718 btrfs_err(fs_info, "unable to find block group for %llu", start);
2719 return;
2720 }
2721
2722 spin_lock(&cache->space_info->lock);
2723 spin_lock(&cache->lock);
2724 cache->reserved -= fs_info->nodesize;
2725 cache->space_info->bytes_reserved -= fs_info->nodesize;
2726 spin_unlock(&cache->lock);
2727 spin_unlock(&cache->space_info->lock);
2728
2729 btrfs_put_block_group(cache);
2730 }
2731
walk_down_log_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int * level,struct walk_control * wc)2732 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2733 struct btrfs_root *root,
2734 struct btrfs_path *path, int *level,
2735 struct walk_control *wc)
2736 {
2737 struct btrfs_fs_info *fs_info = root->fs_info;
2738 u64 bytenr;
2739 u64 ptr_gen;
2740 struct extent_buffer *next;
2741 struct extent_buffer *cur;
2742 u32 blocksize;
2743 int ret = 0;
2744
2745 while (*level > 0) {
2746 struct btrfs_key first_key;
2747
2748 cur = path->nodes[*level];
2749
2750 WARN_ON(btrfs_header_level(cur) != *level);
2751
2752 if (path->slots[*level] >=
2753 btrfs_header_nritems(cur))
2754 break;
2755
2756 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2757 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2758 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]);
2759 blocksize = fs_info->nodesize;
2760
2761 next = btrfs_find_create_tree_block(fs_info, bytenr,
2762 btrfs_header_owner(cur),
2763 *level - 1);
2764 if (IS_ERR(next))
2765 return PTR_ERR(next);
2766
2767 if (*level == 1) {
2768 ret = wc->process_func(root, next, wc, ptr_gen,
2769 *level - 1);
2770 if (ret) {
2771 free_extent_buffer(next);
2772 return ret;
2773 }
2774
2775 path->slots[*level]++;
2776 if (wc->free) {
2777 ret = btrfs_read_buffer(next, ptr_gen,
2778 *level - 1, &first_key);
2779 if (ret) {
2780 free_extent_buffer(next);
2781 return ret;
2782 }
2783
2784 if (trans) {
2785 btrfs_tree_lock(next);
2786 btrfs_clean_tree_block(next);
2787 btrfs_wait_tree_block_writeback(next);
2788 btrfs_tree_unlock(next);
2789 ret = btrfs_pin_reserved_extent(trans,
2790 bytenr, blocksize);
2791 if (ret) {
2792 free_extent_buffer(next);
2793 return ret;
2794 }
2795 btrfs_redirty_list_add(
2796 trans->transaction, next);
2797 } else {
2798 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2799 clear_extent_buffer_dirty(next);
2800 unaccount_log_buffer(fs_info, bytenr);
2801 }
2802 }
2803 free_extent_buffer(next);
2804 continue;
2805 }
2806 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key);
2807 if (ret) {
2808 free_extent_buffer(next);
2809 return ret;
2810 }
2811
2812 if (path->nodes[*level-1])
2813 free_extent_buffer(path->nodes[*level-1]);
2814 path->nodes[*level-1] = next;
2815 *level = btrfs_header_level(next);
2816 path->slots[*level] = 0;
2817 cond_resched();
2818 }
2819 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2820
2821 cond_resched();
2822 return 0;
2823 }
2824
walk_up_log_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int * level,struct walk_control * wc)2825 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2826 struct btrfs_root *root,
2827 struct btrfs_path *path, int *level,
2828 struct walk_control *wc)
2829 {
2830 struct btrfs_fs_info *fs_info = root->fs_info;
2831 int i;
2832 int slot;
2833 int ret;
2834
2835 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2836 slot = path->slots[i];
2837 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2838 path->slots[i]++;
2839 *level = i;
2840 WARN_ON(*level == 0);
2841 return 0;
2842 } else {
2843 ret = wc->process_func(root, path->nodes[*level], wc,
2844 btrfs_header_generation(path->nodes[*level]),
2845 *level);
2846 if (ret)
2847 return ret;
2848
2849 if (wc->free) {
2850 struct extent_buffer *next;
2851
2852 next = path->nodes[*level];
2853
2854 if (trans) {
2855 btrfs_tree_lock(next);
2856 btrfs_clean_tree_block(next);
2857 btrfs_wait_tree_block_writeback(next);
2858 btrfs_tree_unlock(next);
2859 ret = btrfs_pin_reserved_extent(trans,
2860 path->nodes[*level]->start,
2861 path->nodes[*level]->len);
2862 if (ret)
2863 return ret;
2864 btrfs_redirty_list_add(trans->transaction,
2865 next);
2866 } else {
2867 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2868 clear_extent_buffer_dirty(next);
2869
2870 unaccount_log_buffer(fs_info,
2871 path->nodes[*level]->start);
2872 }
2873 }
2874 free_extent_buffer(path->nodes[*level]);
2875 path->nodes[*level] = NULL;
2876 *level = i + 1;
2877 }
2878 }
2879 return 1;
2880 }
2881
2882 /*
2883 * drop the reference count on the tree rooted at 'snap'. This traverses
2884 * the tree freeing any blocks that have a ref count of zero after being
2885 * decremented.
2886 */
walk_log_tree(struct btrfs_trans_handle * trans,struct btrfs_root * log,struct walk_control * wc)2887 static int walk_log_tree(struct btrfs_trans_handle *trans,
2888 struct btrfs_root *log, struct walk_control *wc)
2889 {
2890 struct btrfs_fs_info *fs_info = log->fs_info;
2891 int ret = 0;
2892 int wret;
2893 int level;
2894 struct btrfs_path *path;
2895 int orig_level;
2896
2897 path = btrfs_alloc_path();
2898 if (!path)
2899 return -ENOMEM;
2900
2901 level = btrfs_header_level(log->node);
2902 orig_level = level;
2903 path->nodes[level] = log->node;
2904 atomic_inc(&log->node->refs);
2905 path->slots[level] = 0;
2906
2907 while (1) {
2908 wret = walk_down_log_tree(trans, log, path, &level, wc);
2909 if (wret > 0)
2910 break;
2911 if (wret < 0) {
2912 ret = wret;
2913 goto out;
2914 }
2915
2916 wret = walk_up_log_tree(trans, log, path, &level, wc);
2917 if (wret > 0)
2918 break;
2919 if (wret < 0) {
2920 ret = wret;
2921 goto out;
2922 }
2923 }
2924
2925 /* was the root node processed? if not, catch it here */
2926 if (path->nodes[orig_level]) {
2927 ret = wc->process_func(log, path->nodes[orig_level], wc,
2928 btrfs_header_generation(path->nodes[orig_level]),
2929 orig_level);
2930 if (ret)
2931 goto out;
2932 if (wc->free) {
2933 struct extent_buffer *next;
2934
2935 next = path->nodes[orig_level];
2936
2937 if (trans) {
2938 btrfs_tree_lock(next);
2939 btrfs_clean_tree_block(next);
2940 btrfs_wait_tree_block_writeback(next);
2941 btrfs_tree_unlock(next);
2942 ret = btrfs_pin_reserved_extent(trans,
2943 next->start, next->len);
2944 if (ret)
2945 goto out;
2946 btrfs_redirty_list_add(trans->transaction, next);
2947 } else {
2948 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2949 clear_extent_buffer_dirty(next);
2950 unaccount_log_buffer(fs_info, next->start);
2951 }
2952 }
2953 }
2954
2955 out:
2956 btrfs_free_path(path);
2957 return ret;
2958 }
2959
2960 /*
2961 * helper function to update the item for a given subvolumes log root
2962 * in the tree of log roots
2963 */
update_log_root(struct btrfs_trans_handle * trans,struct btrfs_root * log,struct btrfs_root_item * root_item)2964 static int update_log_root(struct btrfs_trans_handle *trans,
2965 struct btrfs_root *log,
2966 struct btrfs_root_item *root_item)
2967 {
2968 struct btrfs_fs_info *fs_info = log->fs_info;
2969 int ret;
2970
2971 if (log->log_transid == 1) {
2972 /* insert root item on the first sync */
2973 ret = btrfs_insert_root(trans, fs_info->log_root_tree,
2974 &log->root_key, root_item);
2975 } else {
2976 ret = btrfs_update_root(trans, fs_info->log_root_tree,
2977 &log->root_key, root_item);
2978 }
2979 return ret;
2980 }
2981
wait_log_commit(struct btrfs_root * root,int transid)2982 static void wait_log_commit(struct btrfs_root *root, int transid)
2983 {
2984 DEFINE_WAIT(wait);
2985 int index = transid % 2;
2986
2987 /*
2988 * we only allow two pending log transactions at a time,
2989 * so we know that if ours is more than 2 older than the
2990 * current transaction, we're done
2991 */
2992 for (;;) {
2993 prepare_to_wait(&root->log_commit_wait[index],
2994 &wait, TASK_UNINTERRUPTIBLE);
2995
2996 if (!(root->log_transid_committed < transid &&
2997 atomic_read(&root->log_commit[index])))
2998 break;
2999
3000 mutex_unlock(&root->log_mutex);
3001 schedule();
3002 mutex_lock(&root->log_mutex);
3003 }
3004 finish_wait(&root->log_commit_wait[index], &wait);
3005 }
3006
wait_for_writer(struct btrfs_root * root)3007 static void wait_for_writer(struct btrfs_root *root)
3008 {
3009 DEFINE_WAIT(wait);
3010
3011 for (;;) {
3012 prepare_to_wait(&root->log_writer_wait, &wait,
3013 TASK_UNINTERRUPTIBLE);
3014 if (!atomic_read(&root->log_writers))
3015 break;
3016
3017 mutex_unlock(&root->log_mutex);
3018 schedule();
3019 mutex_lock(&root->log_mutex);
3020 }
3021 finish_wait(&root->log_writer_wait, &wait);
3022 }
3023
btrfs_remove_log_ctx(struct btrfs_root * root,struct btrfs_log_ctx * ctx)3024 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
3025 struct btrfs_log_ctx *ctx)
3026 {
3027 if (!ctx)
3028 return;
3029
3030 mutex_lock(&root->log_mutex);
3031 list_del_init(&ctx->list);
3032 mutex_unlock(&root->log_mutex);
3033 }
3034
3035 /*
3036 * Invoked in log mutex context, or be sure there is no other task which
3037 * can access the list.
3038 */
btrfs_remove_all_log_ctxs(struct btrfs_root * root,int index,int error)3039 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
3040 int index, int error)
3041 {
3042 struct btrfs_log_ctx *ctx;
3043 struct btrfs_log_ctx *safe;
3044
3045 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
3046 list_del_init(&ctx->list);
3047 ctx->log_ret = error;
3048 }
3049 }
3050
3051 /*
3052 * btrfs_sync_log does sends a given tree log down to the disk and
3053 * updates the super blocks to record it. When this call is done,
3054 * you know that any inodes previously logged are safely on disk only
3055 * if it returns 0.
3056 *
3057 * Any other return value means you need to call btrfs_commit_transaction.
3058 * Some of the edge cases for fsyncing directories that have had unlinks
3059 * or renames done in the past mean that sometimes the only safe
3060 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
3061 * that has happened.
3062 */
btrfs_sync_log(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_log_ctx * ctx)3063 int btrfs_sync_log(struct btrfs_trans_handle *trans,
3064 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
3065 {
3066 int index1;
3067 int index2;
3068 int mark;
3069 int ret;
3070 struct btrfs_fs_info *fs_info = root->fs_info;
3071 struct btrfs_root *log = root->log_root;
3072 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
3073 struct btrfs_root_item new_root_item;
3074 int log_transid = 0;
3075 struct btrfs_log_ctx root_log_ctx;
3076 struct blk_plug plug;
3077 u64 log_root_start;
3078 u64 log_root_level;
3079
3080 mutex_lock(&root->log_mutex);
3081 log_transid = ctx->log_transid;
3082 if (root->log_transid_committed >= log_transid) {
3083 mutex_unlock(&root->log_mutex);
3084 return ctx->log_ret;
3085 }
3086
3087 index1 = log_transid % 2;
3088 if (atomic_read(&root->log_commit[index1])) {
3089 wait_log_commit(root, log_transid);
3090 mutex_unlock(&root->log_mutex);
3091 return ctx->log_ret;
3092 }
3093 ASSERT(log_transid == root->log_transid);
3094 atomic_set(&root->log_commit[index1], 1);
3095
3096 /* wait for previous tree log sync to complete */
3097 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
3098 wait_log_commit(root, log_transid - 1);
3099
3100 while (1) {
3101 int batch = atomic_read(&root->log_batch);
3102 /* when we're on an ssd, just kick the log commit out */
3103 if (!btrfs_test_opt(fs_info, SSD) &&
3104 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
3105 mutex_unlock(&root->log_mutex);
3106 schedule_timeout_uninterruptible(1);
3107 mutex_lock(&root->log_mutex);
3108 }
3109 wait_for_writer(root);
3110 if (batch == atomic_read(&root->log_batch))
3111 break;
3112 }
3113
3114 /* bail out if we need to do a full commit */
3115 if (btrfs_need_log_full_commit(trans)) {
3116 ret = -EAGAIN;
3117 mutex_unlock(&root->log_mutex);
3118 goto out;
3119 }
3120
3121 if (log_transid % 2 == 0)
3122 mark = EXTENT_DIRTY;
3123 else
3124 mark = EXTENT_NEW;
3125
3126 /* we start IO on all the marked extents here, but we don't actually
3127 * wait for them until later.
3128 */
3129 blk_start_plug(&plug);
3130 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
3131 /*
3132 * -EAGAIN happens when someone, e.g., a concurrent transaction
3133 * commit, writes a dirty extent in this tree-log commit. This
3134 * concurrent write will create a hole writing out the extents,
3135 * and we cannot proceed on a zoned filesystem, requiring
3136 * sequential writing. While we can bail out to a full commit
3137 * here, but we can continue hoping the concurrent writing fills
3138 * the hole.
3139 */
3140 if (ret == -EAGAIN && btrfs_is_zoned(fs_info))
3141 ret = 0;
3142 if (ret) {
3143 blk_finish_plug(&plug);
3144 btrfs_set_log_full_commit(trans);
3145 mutex_unlock(&root->log_mutex);
3146 goto out;
3147 }
3148
3149 /*
3150 * We _must_ update under the root->log_mutex in order to make sure we
3151 * have a consistent view of the log root we are trying to commit at
3152 * this moment.
3153 *
3154 * We _must_ copy this into a local copy, because we are not holding the
3155 * log_root_tree->log_mutex yet. This is important because when we
3156 * commit the log_root_tree we must have a consistent view of the
3157 * log_root_tree when we update the super block to point at the
3158 * log_root_tree bytenr. If we update the log_root_tree here we'll race
3159 * with the commit and possibly point at the new block which we may not
3160 * have written out.
3161 */
3162 btrfs_set_root_node(&log->root_item, log->node);
3163 memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
3164
3165 root->log_transid++;
3166 log->log_transid = root->log_transid;
3167 root->log_start_pid = 0;
3168 /*
3169 * IO has been started, blocks of the log tree have WRITTEN flag set
3170 * in their headers. new modifications of the log will be written to
3171 * new positions. so it's safe to allow log writers to go in.
3172 */
3173 mutex_unlock(&root->log_mutex);
3174
3175 if (btrfs_is_zoned(fs_info)) {
3176 mutex_lock(&fs_info->tree_root->log_mutex);
3177 if (!log_root_tree->node) {
3178 ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
3179 if (ret) {
3180 mutex_unlock(&fs_info->tree_root->log_mutex);
3181 blk_finish_plug(&plug);
3182 goto out;
3183 }
3184 }
3185 mutex_unlock(&fs_info->tree_root->log_mutex);
3186 }
3187
3188 btrfs_init_log_ctx(&root_log_ctx, NULL);
3189
3190 mutex_lock(&log_root_tree->log_mutex);
3191
3192 index2 = log_root_tree->log_transid % 2;
3193 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
3194 root_log_ctx.log_transid = log_root_tree->log_transid;
3195
3196 /*
3197 * Now we are safe to update the log_root_tree because we're under the
3198 * log_mutex, and we're a current writer so we're holding the commit
3199 * open until we drop the log_mutex.
3200 */
3201 ret = update_log_root(trans, log, &new_root_item);
3202 if (ret) {
3203 if (!list_empty(&root_log_ctx.list))
3204 list_del_init(&root_log_ctx.list);
3205
3206 blk_finish_plug(&plug);
3207 btrfs_set_log_full_commit(trans);
3208
3209 if (ret != -ENOSPC) {
3210 btrfs_abort_transaction(trans, ret);
3211 mutex_unlock(&log_root_tree->log_mutex);
3212 goto out;
3213 }
3214 btrfs_wait_tree_log_extents(log, mark);
3215 mutex_unlock(&log_root_tree->log_mutex);
3216 ret = -EAGAIN;
3217 goto out;
3218 }
3219
3220 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
3221 blk_finish_plug(&plug);
3222 list_del_init(&root_log_ctx.list);
3223 mutex_unlock(&log_root_tree->log_mutex);
3224 ret = root_log_ctx.log_ret;
3225 goto out;
3226 }
3227
3228 index2 = root_log_ctx.log_transid % 2;
3229 if (atomic_read(&log_root_tree->log_commit[index2])) {
3230 blk_finish_plug(&plug);
3231 ret = btrfs_wait_tree_log_extents(log, mark);
3232 wait_log_commit(log_root_tree,
3233 root_log_ctx.log_transid);
3234 mutex_unlock(&log_root_tree->log_mutex);
3235 if (!ret)
3236 ret = root_log_ctx.log_ret;
3237 goto out;
3238 }
3239 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
3240 atomic_set(&log_root_tree->log_commit[index2], 1);
3241
3242 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
3243 wait_log_commit(log_root_tree,
3244 root_log_ctx.log_transid - 1);
3245 }
3246
3247 /*
3248 * now that we've moved on to the tree of log tree roots,
3249 * check the full commit flag again
3250 */
3251 if (btrfs_need_log_full_commit(trans)) {
3252 blk_finish_plug(&plug);
3253 btrfs_wait_tree_log_extents(log, mark);
3254 mutex_unlock(&log_root_tree->log_mutex);
3255 ret = -EAGAIN;
3256 goto out_wake_log_root;
3257 }
3258
3259 ret = btrfs_write_marked_extents(fs_info,
3260 &log_root_tree->dirty_log_pages,
3261 EXTENT_DIRTY | EXTENT_NEW);
3262 blk_finish_plug(&plug);
3263 /*
3264 * As described above, -EAGAIN indicates a hole in the extents. We
3265 * cannot wait for these write outs since the waiting cause a
3266 * deadlock. Bail out to the full commit instead.
3267 */
3268 if (ret == -EAGAIN && btrfs_is_zoned(fs_info)) {
3269 btrfs_set_log_full_commit(trans);
3270 btrfs_wait_tree_log_extents(log, mark);
3271 mutex_unlock(&log_root_tree->log_mutex);
3272 goto out_wake_log_root;
3273 } else if (ret) {
3274 btrfs_set_log_full_commit(trans);
3275 mutex_unlock(&log_root_tree->log_mutex);
3276 goto out_wake_log_root;
3277 }
3278 ret = btrfs_wait_tree_log_extents(log, mark);
3279 if (!ret)
3280 ret = btrfs_wait_tree_log_extents(log_root_tree,
3281 EXTENT_NEW | EXTENT_DIRTY);
3282 if (ret) {
3283 btrfs_set_log_full_commit(trans);
3284 mutex_unlock(&log_root_tree->log_mutex);
3285 goto out_wake_log_root;
3286 }
3287
3288 log_root_start = log_root_tree->node->start;
3289 log_root_level = btrfs_header_level(log_root_tree->node);
3290 log_root_tree->log_transid++;
3291 mutex_unlock(&log_root_tree->log_mutex);
3292
3293 /*
3294 * Here we are guaranteed that nobody is going to write the superblock
3295 * for the current transaction before us and that neither we do write
3296 * our superblock before the previous transaction finishes its commit
3297 * and writes its superblock, because:
3298 *
3299 * 1) We are holding a handle on the current transaction, so no body
3300 * can commit it until we release the handle;
3301 *
3302 * 2) Before writing our superblock we acquire the tree_log_mutex, so
3303 * if the previous transaction is still committing, and hasn't yet
3304 * written its superblock, we wait for it to do it, because a
3305 * transaction commit acquires the tree_log_mutex when the commit
3306 * begins and releases it only after writing its superblock.
3307 */
3308 mutex_lock(&fs_info->tree_log_mutex);
3309
3310 /*
3311 * The previous transaction writeout phase could have failed, and thus
3312 * marked the fs in an error state. We must not commit here, as we
3313 * could have updated our generation in the super_for_commit and
3314 * writing the super here would result in transid mismatches. If there
3315 * is an error here just bail.
3316 */
3317 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3318 ret = -EIO;
3319 btrfs_set_log_full_commit(trans);
3320 btrfs_abort_transaction(trans, ret);
3321 mutex_unlock(&fs_info->tree_log_mutex);
3322 goto out_wake_log_root;
3323 }
3324
3325 btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start);
3326 btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level);
3327 ret = write_all_supers(fs_info, 1);
3328 mutex_unlock(&fs_info->tree_log_mutex);
3329 if (ret) {
3330 btrfs_set_log_full_commit(trans);
3331 btrfs_abort_transaction(trans, ret);
3332 goto out_wake_log_root;
3333 }
3334
3335 /*
3336 * We know there can only be one task here, since we have not yet set
3337 * root->log_commit[index1] to 0 and any task attempting to sync the
3338 * log must wait for the previous log transaction to commit if it's
3339 * still in progress or wait for the current log transaction commit if
3340 * someone else already started it. We use <= and not < because the
3341 * first log transaction has an ID of 0.
3342 */
3343 ASSERT(root->last_log_commit <= log_transid);
3344 root->last_log_commit = log_transid;
3345
3346 out_wake_log_root:
3347 mutex_lock(&log_root_tree->log_mutex);
3348 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3349
3350 log_root_tree->log_transid_committed++;
3351 atomic_set(&log_root_tree->log_commit[index2], 0);
3352 mutex_unlock(&log_root_tree->log_mutex);
3353
3354 /*
3355 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3356 * all the updates above are seen by the woken threads. It might not be
3357 * necessary, but proving that seems to be hard.
3358 */
3359 cond_wake_up(&log_root_tree->log_commit_wait[index2]);
3360 out:
3361 mutex_lock(&root->log_mutex);
3362 btrfs_remove_all_log_ctxs(root, index1, ret);
3363 root->log_transid_committed++;
3364 atomic_set(&root->log_commit[index1], 0);
3365 mutex_unlock(&root->log_mutex);
3366
3367 /*
3368 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3369 * all the updates above are seen by the woken threads. It might not be
3370 * necessary, but proving that seems to be hard.
3371 */
3372 cond_wake_up(&root->log_commit_wait[index1]);
3373 return ret;
3374 }
3375
free_log_tree(struct btrfs_trans_handle * trans,struct btrfs_root * log)3376 static void free_log_tree(struct btrfs_trans_handle *trans,
3377 struct btrfs_root *log)
3378 {
3379 int ret;
3380 struct walk_control wc = {
3381 .free = 1,
3382 .process_func = process_one_buffer
3383 };
3384
3385 if (log->node) {
3386 ret = walk_log_tree(trans, log, &wc);
3387 if (ret) {
3388 /*
3389 * We weren't able to traverse the entire log tree, the
3390 * typical scenario is getting an -EIO when reading an
3391 * extent buffer of the tree, due to a previous writeback
3392 * failure of it.
3393 */
3394 set_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
3395 &log->fs_info->fs_state);
3396
3397 /*
3398 * Some extent buffers of the log tree may still be dirty
3399 * and not yet written back to storage, because we may
3400 * have updates to a log tree without syncing a log tree,
3401 * such as during rename and link operations. So flush
3402 * them out and wait for their writeback to complete, so
3403 * that we properly cleanup their state and pages.
3404 */
3405 btrfs_write_marked_extents(log->fs_info,
3406 &log->dirty_log_pages,
3407 EXTENT_DIRTY | EXTENT_NEW);
3408 btrfs_wait_tree_log_extents(log,
3409 EXTENT_DIRTY | EXTENT_NEW);
3410
3411 if (trans)
3412 btrfs_abort_transaction(trans, ret);
3413 else
3414 btrfs_handle_fs_error(log->fs_info, ret, NULL);
3415 }
3416 }
3417
3418 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
3419 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3420 extent_io_tree_release(&log->log_csum_range);
3421
3422 btrfs_put_root(log);
3423 }
3424
3425 /*
3426 * free all the extents used by the tree log. This should be called
3427 * at commit time of the full transaction
3428 */
btrfs_free_log(struct btrfs_trans_handle * trans,struct btrfs_root * root)3429 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3430 {
3431 if (root->log_root) {
3432 free_log_tree(trans, root->log_root);
3433 root->log_root = NULL;
3434 clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
3435 }
3436 return 0;
3437 }
3438
btrfs_free_log_root_tree(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)3439 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3440 struct btrfs_fs_info *fs_info)
3441 {
3442 if (fs_info->log_root_tree) {
3443 free_log_tree(trans, fs_info->log_root_tree);
3444 fs_info->log_root_tree = NULL;
3445 clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &fs_info->tree_root->state);
3446 }
3447 return 0;
3448 }
3449
3450 /*
3451 * Check if an inode was logged in the current transaction. This may often
3452 * return some false positives, because logged_trans is an in memory only field,
3453 * not persisted anywhere. This is meant to be used in contexts where a false
3454 * positive has no functional consequences.
3455 */
inode_logged(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)3456 static bool inode_logged(struct btrfs_trans_handle *trans,
3457 struct btrfs_inode *inode)
3458 {
3459 if (inode->logged_trans == trans->transid)
3460 return true;
3461
3462 /*
3463 * The inode's logged_trans is always 0 when we load it (because it is
3464 * not persisted in the inode item or elsewhere). So if it is 0, the
3465 * inode was last modified in the current transaction then the inode may
3466 * have been logged before in the current transaction, then evicted and
3467 * loaded again in the current transaction - or may have never been logged
3468 * in the current transaction, but since we can not be sure, we have to
3469 * assume it was, otherwise our callers can leave an inconsistent log.
3470 */
3471 if (inode->logged_trans == 0 &&
3472 inode->last_trans == trans->transid &&
3473 !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags))
3474 return true;
3475
3476 return false;
3477 }
3478
3479 /*
3480 * If both a file and directory are logged, and unlinks or renames are
3481 * mixed in, we have a few interesting corners:
3482 *
3483 * create file X in dir Y
3484 * link file X to X.link in dir Y
3485 * fsync file X
3486 * unlink file X but leave X.link
3487 * fsync dir Y
3488 *
3489 * After a crash we would expect only X.link to exist. But file X
3490 * didn't get fsync'd again so the log has back refs for X and X.link.
3491 *
3492 * We solve this by removing directory entries and inode backrefs from the
3493 * log when a file that was logged in the current transaction is
3494 * unlinked. Any later fsync will include the updated log entries, and
3495 * we'll be able to reconstruct the proper directory items from backrefs.
3496 *
3497 * This optimizations allows us to avoid relogging the entire inode
3498 * or the entire directory.
3499 */
btrfs_del_dir_entries_in_log(struct btrfs_trans_handle * trans,struct btrfs_root * root,const char * name,int name_len,struct btrfs_inode * dir,u64 index)3500 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3501 struct btrfs_root *root,
3502 const char *name, int name_len,
3503 struct btrfs_inode *dir, u64 index)
3504 {
3505 struct btrfs_root *log;
3506 struct btrfs_dir_item *di;
3507 struct btrfs_path *path;
3508 int ret;
3509 int err = 0;
3510 u64 dir_ino = btrfs_ino(dir);
3511
3512 if (!inode_logged(trans, dir))
3513 return 0;
3514
3515 ret = join_running_log_trans(root);
3516 if (ret)
3517 return 0;
3518
3519 mutex_lock(&dir->log_mutex);
3520
3521 log = root->log_root;
3522 path = btrfs_alloc_path();
3523 if (!path) {
3524 err = -ENOMEM;
3525 goto out_unlock;
3526 }
3527
3528 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3529 name, name_len, -1);
3530 if (IS_ERR(di)) {
3531 err = PTR_ERR(di);
3532 goto fail;
3533 }
3534 if (di) {
3535 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3536 if (ret) {
3537 err = ret;
3538 goto fail;
3539 }
3540 }
3541 btrfs_release_path(path);
3542 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3543 index, name, name_len, -1);
3544 if (IS_ERR(di)) {
3545 err = PTR_ERR(di);
3546 goto fail;
3547 }
3548 if (di) {
3549 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3550 if (ret) {
3551 err = ret;
3552 goto fail;
3553 }
3554 }
3555
3556 /*
3557 * We do not need to update the size field of the directory's inode item
3558 * because on log replay we update the field to reflect all existing
3559 * entries in the directory (see overwrite_item()).
3560 */
3561 fail:
3562 btrfs_free_path(path);
3563 out_unlock:
3564 mutex_unlock(&dir->log_mutex);
3565 if (err == -ENOSPC) {
3566 btrfs_set_log_full_commit(trans);
3567 err = 0;
3568 } else if (err < 0) {
3569 btrfs_abort_transaction(trans, err);
3570 }
3571
3572 btrfs_end_log_trans(root);
3573
3574 return err;
3575 }
3576
3577 /* see comments for btrfs_del_dir_entries_in_log */
btrfs_del_inode_ref_in_log(struct btrfs_trans_handle * trans,struct btrfs_root * root,const char * name,int name_len,struct btrfs_inode * inode,u64 dirid)3578 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3579 struct btrfs_root *root,
3580 const char *name, int name_len,
3581 struct btrfs_inode *inode, u64 dirid)
3582 {
3583 struct btrfs_root *log;
3584 u64 index;
3585 int ret;
3586
3587 if (!inode_logged(trans, inode))
3588 return 0;
3589
3590 ret = join_running_log_trans(root);
3591 if (ret)
3592 return 0;
3593 log = root->log_root;
3594 mutex_lock(&inode->log_mutex);
3595
3596 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3597 dirid, &index);
3598 mutex_unlock(&inode->log_mutex);
3599 if (ret == -ENOSPC) {
3600 btrfs_set_log_full_commit(trans);
3601 ret = 0;
3602 } else if (ret < 0 && ret != -ENOENT)
3603 btrfs_abort_transaction(trans, ret);
3604 btrfs_end_log_trans(root);
3605
3606 return ret;
3607 }
3608
3609 /*
3610 * creates a range item in the log for 'dirid'. first_offset and
3611 * last_offset tell us which parts of the key space the log should
3612 * be considered authoritative for.
3613 */
insert_dir_log_key(struct btrfs_trans_handle * trans,struct btrfs_root * log,struct btrfs_path * path,int key_type,u64 dirid,u64 first_offset,u64 last_offset)3614 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3615 struct btrfs_root *log,
3616 struct btrfs_path *path,
3617 int key_type, u64 dirid,
3618 u64 first_offset, u64 last_offset)
3619 {
3620 int ret;
3621 struct btrfs_key key;
3622 struct btrfs_dir_log_item *item;
3623
3624 key.objectid = dirid;
3625 key.offset = first_offset;
3626 if (key_type == BTRFS_DIR_ITEM_KEY)
3627 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3628 else
3629 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3630 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3631 if (ret)
3632 return ret;
3633
3634 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3635 struct btrfs_dir_log_item);
3636 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3637 btrfs_mark_buffer_dirty(path->nodes[0]);
3638 btrfs_release_path(path);
3639 return 0;
3640 }
3641
3642 /*
3643 * log all the items included in the current transaction for a given
3644 * directory. This also creates the range items in the log tree required
3645 * to replay anything deleted before the fsync
3646 */
log_dir_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path,struct btrfs_path * dst_path,int key_type,struct btrfs_log_ctx * ctx,u64 min_offset,u64 * last_offset_ret)3647 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3648 struct btrfs_root *root, struct btrfs_inode *inode,
3649 struct btrfs_path *path,
3650 struct btrfs_path *dst_path, int key_type,
3651 struct btrfs_log_ctx *ctx,
3652 u64 min_offset, u64 *last_offset_ret)
3653 {
3654 struct btrfs_key min_key;
3655 struct btrfs_root *log = root->log_root;
3656 struct extent_buffer *src;
3657 int err = 0;
3658 int ret;
3659 int i;
3660 int nritems;
3661 u64 first_offset = min_offset;
3662 u64 last_offset = (u64)-1;
3663 u64 ino = btrfs_ino(inode);
3664
3665 log = root->log_root;
3666
3667 min_key.objectid = ino;
3668 min_key.type = key_type;
3669 min_key.offset = min_offset;
3670
3671 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3672
3673 /*
3674 * we didn't find anything from this transaction, see if there
3675 * is anything at all
3676 */
3677 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3678 min_key.objectid = ino;
3679 min_key.type = key_type;
3680 min_key.offset = (u64)-1;
3681 btrfs_release_path(path);
3682 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3683 if (ret < 0) {
3684 btrfs_release_path(path);
3685 return ret;
3686 }
3687 ret = btrfs_previous_item(root, path, ino, key_type);
3688
3689 /* if ret == 0 there are items for this type,
3690 * create a range to tell us the last key of this type.
3691 * otherwise, there are no items in this directory after
3692 * *min_offset, and we create a range to indicate that.
3693 */
3694 if (ret == 0) {
3695 struct btrfs_key tmp;
3696 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3697 path->slots[0]);
3698 if (key_type == tmp.type)
3699 first_offset = max(min_offset, tmp.offset) + 1;
3700 }
3701 goto done;
3702 }
3703
3704 /* go backward to find any previous key */
3705 ret = btrfs_previous_item(root, path, ino, key_type);
3706 if (ret == 0) {
3707 struct btrfs_key tmp;
3708 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3709 if (key_type == tmp.type) {
3710 first_offset = tmp.offset;
3711 ret = overwrite_item(trans, log, dst_path,
3712 path->nodes[0], path->slots[0],
3713 &tmp);
3714 if (ret) {
3715 err = ret;
3716 goto done;
3717 }
3718 }
3719 }
3720 btrfs_release_path(path);
3721
3722 /*
3723 * Find the first key from this transaction again. See the note for
3724 * log_new_dir_dentries, if we're logging a directory recursively we
3725 * won't be holding its i_mutex, which means we can modify the directory
3726 * while we're logging it. If we remove an entry between our first
3727 * search and this search we'll not find the key again and can just
3728 * bail.
3729 */
3730 search:
3731 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3732 if (ret != 0)
3733 goto done;
3734
3735 /*
3736 * we have a block from this transaction, log every item in it
3737 * from our directory
3738 */
3739 while (1) {
3740 struct btrfs_key tmp;
3741 src = path->nodes[0];
3742 nritems = btrfs_header_nritems(src);
3743 for (i = path->slots[0]; i < nritems; i++) {
3744 struct btrfs_dir_item *di;
3745
3746 btrfs_item_key_to_cpu(src, &min_key, i);
3747
3748 if (min_key.objectid != ino || min_key.type != key_type)
3749 goto done;
3750
3751 if (need_resched()) {
3752 btrfs_release_path(path);
3753 cond_resched();
3754 goto search;
3755 }
3756
3757 ret = overwrite_item(trans, log, dst_path, src, i,
3758 &min_key);
3759 if (ret) {
3760 err = ret;
3761 goto done;
3762 }
3763
3764 /*
3765 * We must make sure that when we log a directory entry,
3766 * the corresponding inode, after log replay, has a
3767 * matching link count. For example:
3768 *
3769 * touch foo
3770 * mkdir mydir
3771 * sync
3772 * ln foo mydir/bar
3773 * xfs_io -c "fsync" mydir
3774 * <crash>
3775 * <mount fs and log replay>
3776 *
3777 * Would result in a fsync log that when replayed, our
3778 * file inode would have a link count of 1, but we get
3779 * two directory entries pointing to the same inode.
3780 * After removing one of the names, it would not be
3781 * possible to remove the other name, which resulted
3782 * always in stale file handle errors, and would not
3783 * be possible to rmdir the parent directory, since
3784 * its i_size could never decrement to the value
3785 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3786 */
3787 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3788 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3789 if (ctx &&
3790 (btrfs_dir_transid(src, di) == trans->transid ||
3791 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3792 tmp.type != BTRFS_ROOT_ITEM_KEY)
3793 ctx->log_new_dentries = true;
3794 }
3795 path->slots[0] = nritems;
3796
3797 /*
3798 * look ahead to the next item and see if it is also
3799 * from this directory and from this transaction
3800 */
3801 ret = btrfs_next_leaf(root, path);
3802 if (ret) {
3803 if (ret == 1)
3804 last_offset = (u64)-1;
3805 else
3806 err = ret;
3807 goto done;
3808 }
3809 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3810 if (tmp.objectid != ino || tmp.type != key_type) {
3811 last_offset = (u64)-1;
3812 goto done;
3813 }
3814 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3815 ret = overwrite_item(trans, log, dst_path,
3816 path->nodes[0], path->slots[0],
3817 &tmp);
3818 if (ret)
3819 err = ret;
3820 else
3821 last_offset = tmp.offset;
3822 goto done;
3823 }
3824 }
3825 done:
3826 btrfs_release_path(path);
3827 btrfs_release_path(dst_path);
3828
3829 if (err == 0) {
3830 *last_offset_ret = last_offset;
3831 /*
3832 * insert the log range keys to indicate where the log
3833 * is valid
3834 */
3835 ret = insert_dir_log_key(trans, log, path, key_type,
3836 ino, first_offset, last_offset);
3837 if (ret)
3838 err = ret;
3839 }
3840 return err;
3841 }
3842
3843 /*
3844 * logging directories is very similar to logging inodes, We find all the items
3845 * from the current transaction and write them to the log.
3846 *
3847 * The recovery code scans the directory in the subvolume, and if it finds a
3848 * key in the range logged that is not present in the log tree, then it means
3849 * that dir entry was unlinked during the transaction.
3850 *
3851 * In order for that scan to work, we must include one key smaller than
3852 * the smallest logged by this transaction and one key larger than the largest
3853 * key logged by this transaction.
3854 */
log_directory_changes(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path,struct btrfs_path * dst_path,struct btrfs_log_ctx * ctx)3855 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3856 struct btrfs_root *root, struct btrfs_inode *inode,
3857 struct btrfs_path *path,
3858 struct btrfs_path *dst_path,
3859 struct btrfs_log_ctx *ctx)
3860 {
3861 u64 min_key;
3862 u64 max_key;
3863 int ret;
3864 int key_type = BTRFS_DIR_ITEM_KEY;
3865
3866 again:
3867 min_key = 0;
3868 max_key = 0;
3869 while (1) {
3870 ret = log_dir_items(trans, root, inode, path, dst_path, key_type,
3871 ctx, min_key, &max_key);
3872 if (ret)
3873 return ret;
3874 if (max_key == (u64)-1)
3875 break;
3876 min_key = max_key + 1;
3877 }
3878
3879 if (key_type == BTRFS_DIR_ITEM_KEY) {
3880 key_type = BTRFS_DIR_INDEX_KEY;
3881 goto again;
3882 }
3883 return 0;
3884 }
3885
3886 /*
3887 * a helper function to drop items from the log before we relog an
3888 * inode. max_key_type indicates the highest item type to remove.
3889 * This cannot be run for file data extents because it does not
3890 * free the extents they point to.
3891 */
drop_objectid_items(struct btrfs_trans_handle * trans,struct btrfs_root * log,struct btrfs_path * path,u64 objectid,int max_key_type)3892 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3893 struct btrfs_root *log,
3894 struct btrfs_path *path,
3895 u64 objectid, int max_key_type)
3896 {
3897 int ret;
3898 struct btrfs_key key;
3899 struct btrfs_key found_key;
3900 int start_slot;
3901
3902 key.objectid = objectid;
3903 key.type = max_key_type;
3904 key.offset = (u64)-1;
3905
3906 while (1) {
3907 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3908 BUG_ON(ret == 0); /* Logic error */
3909 if (ret < 0)
3910 break;
3911
3912 if (path->slots[0] == 0)
3913 break;
3914
3915 path->slots[0]--;
3916 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3917 path->slots[0]);
3918
3919 if (found_key.objectid != objectid)
3920 break;
3921
3922 found_key.offset = 0;
3923 found_key.type = 0;
3924 ret = btrfs_bin_search(path->nodes[0], &found_key, &start_slot);
3925 if (ret < 0)
3926 break;
3927
3928 ret = btrfs_del_items(trans, log, path, start_slot,
3929 path->slots[0] - start_slot + 1);
3930 /*
3931 * If start slot isn't 0 then we don't need to re-search, we've
3932 * found the last guy with the objectid in this tree.
3933 */
3934 if (ret || start_slot != 0)
3935 break;
3936 btrfs_release_path(path);
3937 }
3938 btrfs_release_path(path);
3939 if (ret > 0)
3940 ret = 0;
3941 return ret;
3942 }
3943
fill_inode_item(struct btrfs_trans_handle * trans,struct extent_buffer * leaf,struct btrfs_inode_item * item,struct inode * inode,int log_inode_only,u64 logged_isize)3944 static void fill_inode_item(struct btrfs_trans_handle *trans,
3945 struct extent_buffer *leaf,
3946 struct btrfs_inode_item *item,
3947 struct inode *inode, int log_inode_only,
3948 u64 logged_isize)
3949 {
3950 struct btrfs_map_token token;
3951 u64 flags;
3952
3953 btrfs_init_map_token(&token, leaf);
3954
3955 if (log_inode_only) {
3956 /* set the generation to zero so the recover code
3957 * can tell the difference between an logging
3958 * just to say 'this inode exists' and a logging
3959 * to say 'update this inode with these values'
3960 */
3961 btrfs_set_token_inode_generation(&token, item, 0);
3962 btrfs_set_token_inode_size(&token, item, logged_isize);
3963 } else {
3964 btrfs_set_token_inode_generation(&token, item,
3965 BTRFS_I(inode)->generation);
3966 btrfs_set_token_inode_size(&token, item, inode->i_size);
3967 }
3968
3969 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
3970 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
3971 btrfs_set_token_inode_mode(&token, item, inode->i_mode);
3972 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
3973
3974 btrfs_set_token_timespec_sec(&token, &item->atime,
3975 inode->i_atime.tv_sec);
3976 btrfs_set_token_timespec_nsec(&token, &item->atime,
3977 inode->i_atime.tv_nsec);
3978
3979 btrfs_set_token_timespec_sec(&token, &item->mtime,
3980 inode->i_mtime.tv_sec);
3981 btrfs_set_token_timespec_nsec(&token, &item->mtime,
3982 inode->i_mtime.tv_nsec);
3983
3984 btrfs_set_token_timespec_sec(&token, &item->ctime,
3985 inode->i_ctime.tv_sec);
3986 btrfs_set_token_timespec_nsec(&token, &item->ctime,
3987 inode->i_ctime.tv_nsec);
3988
3989 /*
3990 * We do not need to set the nbytes field, in fact during a fast fsync
3991 * its value may not even be correct, since a fast fsync does not wait
3992 * for ordered extent completion, which is where we update nbytes, it
3993 * only waits for writeback to complete. During log replay as we find
3994 * file extent items and replay them, we adjust the nbytes field of the
3995 * inode item in subvolume tree as needed (see overwrite_item()).
3996 */
3997
3998 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
3999 btrfs_set_token_inode_transid(&token, item, trans->transid);
4000 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
4001 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
4002 BTRFS_I(inode)->ro_flags);
4003 btrfs_set_token_inode_flags(&token, item, flags);
4004 btrfs_set_token_inode_block_group(&token, item, 0);
4005 }
4006
log_inode_item(struct btrfs_trans_handle * trans,struct btrfs_root * log,struct btrfs_path * path,struct btrfs_inode * inode,bool inode_item_dropped)4007 static int log_inode_item(struct btrfs_trans_handle *trans,
4008 struct btrfs_root *log, struct btrfs_path *path,
4009 struct btrfs_inode *inode, bool inode_item_dropped)
4010 {
4011 struct btrfs_inode_item *inode_item;
4012 int ret;
4013
4014 /*
4015 * If we are doing a fast fsync and the inode was logged before in the
4016 * current transaction, then we know the inode was previously logged and
4017 * it exists in the log tree. For performance reasons, in this case use
4018 * btrfs_search_slot() directly with ins_len set to 0 so that we never
4019 * attempt a write lock on the leaf's parent, which adds unnecessary lock
4020 * contention in case there are concurrent fsyncs for other inodes of the
4021 * same subvolume. Using btrfs_insert_empty_item() when the inode item
4022 * already exists can also result in unnecessarily splitting a leaf.
4023 */
4024 if (!inode_item_dropped && inode->logged_trans == trans->transid) {
4025 ret = btrfs_search_slot(trans, log, &inode->location, path, 0, 1);
4026 ASSERT(ret <= 0);
4027 if (ret > 0)
4028 ret = -ENOENT;
4029 } else {
4030 /*
4031 * This means it is the first fsync in the current transaction,
4032 * so the inode item is not in the log and we need to insert it.
4033 * We can never get -EEXIST because we are only called for a fast
4034 * fsync and in case an inode eviction happens after the inode was
4035 * logged before in the current transaction, when we load again
4036 * the inode, we set BTRFS_INODE_NEEDS_FULL_SYNC on its runtime
4037 * flags and set ->logged_trans to 0.
4038 */
4039 ret = btrfs_insert_empty_item(trans, log, path, &inode->location,
4040 sizeof(*inode_item));
4041 ASSERT(ret != -EEXIST);
4042 }
4043 if (ret)
4044 return ret;
4045 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4046 struct btrfs_inode_item);
4047 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
4048 0, 0);
4049 btrfs_release_path(path);
4050 return 0;
4051 }
4052
log_csums(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_root * log_root,struct btrfs_ordered_sum * sums)4053 static int log_csums(struct btrfs_trans_handle *trans,
4054 struct btrfs_inode *inode,
4055 struct btrfs_root *log_root,
4056 struct btrfs_ordered_sum *sums)
4057 {
4058 const u64 lock_end = sums->bytenr + sums->len - 1;
4059 struct extent_state *cached_state = NULL;
4060 int ret;
4061
4062 /*
4063 * If this inode was not used for reflink operations in the current
4064 * transaction with new extents, then do the fast path, no need to
4065 * worry about logging checksum items with overlapping ranges.
4066 */
4067 if (inode->last_reflink_trans < trans->transid)
4068 return btrfs_csum_file_blocks(trans, log_root, sums);
4069
4070 /*
4071 * Serialize logging for checksums. This is to avoid racing with the
4072 * same checksum being logged by another task that is logging another
4073 * file which happens to refer to the same extent as well. Such races
4074 * can leave checksum items in the log with overlapping ranges.
4075 */
4076 ret = lock_extent_bits(&log_root->log_csum_range, sums->bytenr,
4077 lock_end, &cached_state);
4078 if (ret)
4079 return ret;
4080 /*
4081 * Due to extent cloning, we might have logged a csum item that covers a
4082 * subrange of a cloned extent, and later we can end up logging a csum
4083 * item for a larger subrange of the same extent or the entire range.
4084 * This would leave csum items in the log tree that cover the same range
4085 * and break the searches for checksums in the log tree, resulting in
4086 * some checksums missing in the fs/subvolume tree. So just delete (or
4087 * trim and adjust) any existing csum items in the log for this range.
4088 */
4089 ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len);
4090 if (!ret)
4091 ret = btrfs_csum_file_blocks(trans, log_root, sums);
4092
4093 unlock_extent_cached(&log_root->log_csum_range, sums->bytenr, lock_end,
4094 &cached_state);
4095
4096 return ret;
4097 }
4098
copy_items(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_path * dst_path,struct btrfs_path * src_path,int start_slot,int nr,int inode_only,u64 logged_isize)4099 static noinline int copy_items(struct btrfs_trans_handle *trans,
4100 struct btrfs_inode *inode,
4101 struct btrfs_path *dst_path,
4102 struct btrfs_path *src_path,
4103 int start_slot, int nr, int inode_only,
4104 u64 logged_isize)
4105 {
4106 struct btrfs_fs_info *fs_info = trans->fs_info;
4107 unsigned long src_offset;
4108 unsigned long dst_offset;
4109 struct btrfs_root *log = inode->root->log_root;
4110 struct btrfs_file_extent_item *extent;
4111 struct btrfs_inode_item *inode_item;
4112 struct extent_buffer *src = src_path->nodes[0];
4113 int ret;
4114 struct btrfs_key *ins_keys;
4115 u32 *ins_sizes;
4116 char *ins_data;
4117 int i;
4118 struct list_head ordered_sums;
4119 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM;
4120
4121 INIT_LIST_HEAD(&ordered_sums);
4122
4123 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
4124 nr * sizeof(u32), GFP_NOFS);
4125 if (!ins_data)
4126 return -ENOMEM;
4127
4128 ins_sizes = (u32 *)ins_data;
4129 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
4130
4131 for (i = 0; i < nr; i++) {
4132 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
4133 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
4134 }
4135 ret = btrfs_insert_empty_items(trans, log, dst_path,
4136 ins_keys, ins_sizes, nr);
4137 if (ret) {
4138 kfree(ins_data);
4139 return ret;
4140 }
4141
4142 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
4143 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
4144 dst_path->slots[0]);
4145
4146 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
4147
4148 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
4149 inode_item = btrfs_item_ptr(dst_path->nodes[0],
4150 dst_path->slots[0],
4151 struct btrfs_inode_item);
4152 fill_inode_item(trans, dst_path->nodes[0], inode_item,
4153 &inode->vfs_inode,
4154 inode_only == LOG_INODE_EXISTS,
4155 logged_isize);
4156 } else {
4157 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
4158 src_offset, ins_sizes[i]);
4159 }
4160
4161 /* take a reference on file data extents so that truncates
4162 * or deletes of this inode don't have to relog the inode
4163 * again
4164 */
4165 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
4166 !skip_csum) {
4167 int found_type;
4168 extent = btrfs_item_ptr(src, start_slot + i,
4169 struct btrfs_file_extent_item);
4170
4171 if (btrfs_file_extent_generation(src, extent) < trans->transid)
4172 continue;
4173
4174 found_type = btrfs_file_extent_type(src, extent);
4175 if (found_type == BTRFS_FILE_EXTENT_REG) {
4176 u64 ds, dl, cs, cl;
4177 ds = btrfs_file_extent_disk_bytenr(src,
4178 extent);
4179 /* ds == 0 is a hole */
4180 if (ds == 0)
4181 continue;
4182
4183 dl = btrfs_file_extent_disk_num_bytes(src,
4184 extent);
4185 cs = btrfs_file_extent_offset(src, extent);
4186 cl = btrfs_file_extent_num_bytes(src,
4187 extent);
4188 if (btrfs_file_extent_compression(src,
4189 extent)) {
4190 cs = 0;
4191 cl = dl;
4192 }
4193
4194 ret = btrfs_lookup_csums_range(
4195 fs_info->csum_root,
4196 ds + cs, ds + cs + cl - 1,
4197 &ordered_sums, 0);
4198 if (ret)
4199 break;
4200 }
4201 }
4202 }
4203
4204 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
4205 btrfs_release_path(dst_path);
4206 kfree(ins_data);
4207
4208 /*
4209 * we have to do this after the loop above to avoid changing the
4210 * log tree while trying to change the log tree.
4211 */
4212 while (!list_empty(&ordered_sums)) {
4213 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4214 struct btrfs_ordered_sum,
4215 list);
4216 if (!ret)
4217 ret = log_csums(trans, inode, log, sums);
4218 list_del(&sums->list);
4219 kfree(sums);
4220 }
4221
4222 return ret;
4223 }
4224
extent_cmp(void * priv,const struct list_head * a,const struct list_head * b)4225 static int extent_cmp(void *priv, const struct list_head *a,
4226 const struct list_head *b)
4227 {
4228 const struct extent_map *em1, *em2;
4229
4230 em1 = list_entry(a, struct extent_map, list);
4231 em2 = list_entry(b, struct extent_map, list);
4232
4233 if (em1->start < em2->start)
4234 return -1;
4235 else if (em1->start > em2->start)
4236 return 1;
4237 return 0;
4238 }
4239
log_extent_csums(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_root * log_root,const struct extent_map * em,struct btrfs_log_ctx * ctx)4240 static int log_extent_csums(struct btrfs_trans_handle *trans,
4241 struct btrfs_inode *inode,
4242 struct btrfs_root *log_root,
4243 const struct extent_map *em,
4244 struct btrfs_log_ctx *ctx)
4245 {
4246 struct btrfs_ordered_extent *ordered;
4247 u64 csum_offset;
4248 u64 csum_len;
4249 u64 mod_start = em->mod_start;
4250 u64 mod_len = em->mod_len;
4251 LIST_HEAD(ordered_sums);
4252 int ret = 0;
4253
4254 if (inode->flags & BTRFS_INODE_NODATASUM ||
4255 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
4256 em->block_start == EXTENT_MAP_HOLE)
4257 return 0;
4258
4259 list_for_each_entry(ordered, &ctx->ordered_extents, log_list) {
4260 const u64 ordered_end = ordered->file_offset + ordered->num_bytes;
4261 const u64 mod_end = mod_start + mod_len;
4262 struct btrfs_ordered_sum *sums;
4263
4264 if (mod_len == 0)
4265 break;
4266
4267 if (ordered_end <= mod_start)
4268 continue;
4269 if (mod_end <= ordered->file_offset)
4270 break;
4271
4272 /*
4273 * We are going to copy all the csums on this ordered extent, so
4274 * go ahead and adjust mod_start and mod_len in case this ordered
4275 * extent has already been logged.
4276 */
4277 if (ordered->file_offset > mod_start) {
4278 if (ordered_end >= mod_end)
4279 mod_len = ordered->file_offset - mod_start;
4280 /*
4281 * If we have this case
4282 *
4283 * |--------- logged extent ---------|
4284 * |----- ordered extent ----|
4285 *
4286 * Just don't mess with mod_start and mod_len, we'll
4287 * just end up logging more csums than we need and it
4288 * will be ok.
4289 */
4290 } else {
4291 if (ordered_end < mod_end) {
4292 mod_len = mod_end - ordered_end;
4293 mod_start = ordered_end;
4294 } else {
4295 mod_len = 0;
4296 }
4297 }
4298
4299 /*
4300 * To keep us from looping for the above case of an ordered
4301 * extent that falls inside of the logged extent.
4302 */
4303 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM, &ordered->flags))
4304 continue;
4305
4306 list_for_each_entry(sums, &ordered->list, list) {
4307 ret = log_csums(trans, inode, log_root, sums);
4308 if (ret)
4309 return ret;
4310 }
4311 }
4312
4313 /* We're done, found all csums in the ordered extents. */
4314 if (mod_len == 0)
4315 return 0;
4316
4317 /* If we're compressed we have to save the entire range of csums. */
4318 if (em->compress_type) {
4319 csum_offset = 0;
4320 csum_len = max(em->block_len, em->orig_block_len);
4321 } else {
4322 csum_offset = mod_start - em->start;
4323 csum_len = mod_len;
4324 }
4325
4326 /* block start is already adjusted for the file extent offset. */
4327 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
4328 em->block_start + csum_offset,
4329 em->block_start + csum_offset +
4330 csum_len - 1, &ordered_sums, 0);
4331 if (ret)
4332 return ret;
4333
4334 while (!list_empty(&ordered_sums)) {
4335 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4336 struct btrfs_ordered_sum,
4337 list);
4338 if (!ret)
4339 ret = log_csums(trans, inode, log_root, sums);
4340 list_del(&sums->list);
4341 kfree(sums);
4342 }
4343
4344 return ret;
4345 }
4346
log_one_extent(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_root * root,const struct extent_map * em,struct btrfs_path * path,struct btrfs_log_ctx * ctx)4347 static int log_one_extent(struct btrfs_trans_handle *trans,
4348 struct btrfs_inode *inode, struct btrfs_root *root,
4349 const struct extent_map *em,
4350 struct btrfs_path *path,
4351 struct btrfs_log_ctx *ctx)
4352 {
4353 struct btrfs_drop_extents_args drop_args = { 0 };
4354 struct btrfs_root *log = root->log_root;
4355 struct btrfs_file_extent_item *fi;
4356 struct extent_buffer *leaf;
4357 struct btrfs_map_token token;
4358 struct btrfs_key key;
4359 u64 extent_offset = em->start - em->orig_start;
4360 u64 block_len;
4361 int ret;
4362
4363 ret = log_extent_csums(trans, inode, log, em, ctx);
4364 if (ret)
4365 return ret;
4366
4367 drop_args.path = path;
4368 drop_args.start = em->start;
4369 drop_args.end = em->start + em->len;
4370 drop_args.replace_extent = true;
4371 drop_args.extent_item_size = sizeof(*fi);
4372 ret = btrfs_drop_extents(trans, log, inode, &drop_args);
4373 if (ret)
4374 return ret;
4375
4376 if (!drop_args.extent_inserted) {
4377 key.objectid = btrfs_ino(inode);
4378 key.type = BTRFS_EXTENT_DATA_KEY;
4379 key.offset = em->start;
4380
4381 ret = btrfs_insert_empty_item(trans, log, path, &key,
4382 sizeof(*fi));
4383 if (ret)
4384 return ret;
4385 }
4386 leaf = path->nodes[0];
4387 btrfs_init_map_token(&token, leaf);
4388 fi = btrfs_item_ptr(leaf, path->slots[0],
4389 struct btrfs_file_extent_item);
4390
4391 btrfs_set_token_file_extent_generation(&token, fi, trans->transid);
4392 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4393 btrfs_set_token_file_extent_type(&token, fi,
4394 BTRFS_FILE_EXTENT_PREALLOC);
4395 else
4396 btrfs_set_token_file_extent_type(&token, fi,
4397 BTRFS_FILE_EXTENT_REG);
4398
4399 block_len = max(em->block_len, em->orig_block_len);
4400 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4401 btrfs_set_token_file_extent_disk_bytenr(&token, fi,
4402 em->block_start);
4403 btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len);
4404 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4405 btrfs_set_token_file_extent_disk_bytenr(&token, fi,
4406 em->block_start -
4407 extent_offset);
4408 btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len);
4409 } else {
4410 btrfs_set_token_file_extent_disk_bytenr(&token, fi, 0);
4411 btrfs_set_token_file_extent_disk_num_bytes(&token, fi, 0);
4412 }
4413
4414 btrfs_set_token_file_extent_offset(&token, fi, extent_offset);
4415 btrfs_set_token_file_extent_num_bytes(&token, fi, em->len);
4416 btrfs_set_token_file_extent_ram_bytes(&token, fi, em->ram_bytes);
4417 btrfs_set_token_file_extent_compression(&token, fi, em->compress_type);
4418 btrfs_set_token_file_extent_encryption(&token, fi, 0);
4419 btrfs_set_token_file_extent_other_encoding(&token, fi, 0);
4420 btrfs_mark_buffer_dirty(leaf);
4421
4422 btrfs_release_path(path);
4423
4424 return ret;
4425 }
4426
4427 /*
4428 * Log all prealloc extents beyond the inode's i_size to make sure we do not
4429 * lose them after doing a full/fast fsync and replaying the log. We scan the
4430 * subvolume's root instead of iterating the inode's extent map tree because
4431 * otherwise we can log incorrect extent items based on extent map conversion.
4432 * That can happen due to the fact that extent maps are merged when they
4433 * are not in the extent map tree's list of modified extents.
4434 */
btrfs_log_prealloc_extents(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_path * path)4435 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
4436 struct btrfs_inode *inode,
4437 struct btrfs_path *path)
4438 {
4439 struct btrfs_root *root = inode->root;
4440 struct btrfs_key key;
4441 const u64 i_size = i_size_read(&inode->vfs_inode);
4442 const u64 ino = btrfs_ino(inode);
4443 struct btrfs_path *dst_path = NULL;
4444 bool dropped_extents = false;
4445 u64 truncate_offset = i_size;
4446 struct extent_buffer *leaf;
4447 int slot;
4448 int ins_nr = 0;
4449 int start_slot = 0;
4450 int ret;
4451
4452 if (!(inode->flags & BTRFS_INODE_PREALLOC))
4453 return 0;
4454
4455 key.objectid = ino;
4456 key.type = BTRFS_EXTENT_DATA_KEY;
4457 key.offset = i_size;
4458 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4459 if (ret < 0)
4460 goto out;
4461
4462 /*
4463 * We must check if there is a prealloc extent that starts before the
4464 * i_size and crosses the i_size boundary. This is to ensure later we
4465 * truncate down to the end of that extent and not to the i_size, as
4466 * otherwise we end up losing part of the prealloc extent after a log
4467 * replay and with an implicit hole if there is another prealloc extent
4468 * that starts at an offset beyond i_size.
4469 */
4470 ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
4471 if (ret < 0)
4472 goto out;
4473
4474 if (ret == 0) {
4475 struct btrfs_file_extent_item *ei;
4476
4477 leaf = path->nodes[0];
4478 slot = path->slots[0];
4479 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4480
4481 if (btrfs_file_extent_type(leaf, ei) ==
4482 BTRFS_FILE_EXTENT_PREALLOC) {
4483 u64 extent_end;
4484
4485 btrfs_item_key_to_cpu(leaf, &key, slot);
4486 extent_end = key.offset +
4487 btrfs_file_extent_num_bytes(leaf, ei);
4488
4489 if (extent_end > i_size)
4490 truncate_offset = extent_end;
4491 }
4492 } else {
4493 ret = 0;
4494 }
4495
4496 while (true) {
4497 leaf = path->nodes[0];
4498 slot = path->slots[0];
4499
4500 if (slot >= btrfs_header_nritems(leaf)) {
4501 if (ins_nr > 0) {
4502 ret = copy_items(trans, inode, dst_path, path,
4503 start_slot, ins_nr, 1, 0);
4504 if (ret < 0)
4505 goto out;
4506 ins_nr = 0;
4507 }
4508 ret = btrfs_next_leaf(root, path);
4509 if (ret < 0)
4510 goto out;
4511 if (ret > 0) {
4512 ret = 0;
4513 break;
4514 }
4515 continue;
4516 }
4517
4518 btrfs_item_key_to_cpu(leaf, &key, slot);
4519 if (key.objectid > ino)
4520 break;
4521 if (WARN_ON_ONCE(key.objectid < ino) ||
4522 key.type < BTRFS_EXTENT_DATA_KEY ||
4523 key.offset < i_size) {
4524 path->slots[0]++;
4525 continue;
4526 }
4527 if (!dropped_extents) {
4528 /*
4529 * Avoid logging extent items logged in past fsync calls
4530 * and leading to duplicate keys in the log tree.
4531 */
4532 do {
4533 ret = btrfs_truncate_inode_items(trans,
4534 root->log_root,
4535 inode, truncate_offset,
4536 BTRFS_EXTENT_DATA_KEY,
4537 NULL);
4538 } while (ret == -EAGAIN);
4539 if (ret)
4540 goto out;
4541 dropped_extents = true;
4542 }
4543 if (ins_nr == 0)
4544 start_slot = slot;
4545 ins_nr++;
4546 path->slots[0]++;
4547 if (!dst_path) {
4548 dst_path = btrfs_alloc_path();
4549 if (!dst_path) {
4550 ret = -ENOMEM;
4551 goto out;
4552 }
4553 }
4554 }
4555 if (ins_nr > 0)
4556 ret = copy_items(trans, inode, dst_path, path,
4557 start_slot, ins_nr, 1, 0);
4558 out:
4559 btrfs_release_path(path);
4560 btrfs_free_path(dst_path);
4561 return ret;
4562 }
4563
btrfs_log_changed_extents(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path,struct btrfs_log_ctx * ctx)4564 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4565 struct btrfs_root *root,
4566 struct btrfs_inode *inode,
4567 struct btrfs_path *path,
4568 struct btrfs_log_ctx *ctx)
4569 {
4570 struct btrfs_ordered_extent *ordered;
4571 struct btrfs_ordered_extent *tmp;
4572 struct extent_map *em, *n;
4573 struct list_head extents;
4574 struct extent_map_tree *tree = &inode->extent_tree;
4575 int ret = 0;
4576 int num = 0;
4577
4578 INIT_LIST_HEAD(&extents);
4579
4580 write_lock(&tree->lock);
4581
4582 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4583 list_del_init(&em->list);
4584 /*
4585 * Just an arbitrary number, this can be really CPU intensive
4586 * once we start getting a lot of extents, and really once we
4587 * have a bunch of extents we just want to commit since it will
4588 * be faster.
4589 */
4590 if (++num > 32768) {
4591 list_del_init(&tree->modified_extents);
4592 ret = -EFBIG;
4593 goto process;
4594 }
4595
4596 if (em->generation < trans->transid)
4597 continue;
4598
4599 /* We log prealloc extents beyond eof later. */
4600 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
4601 em->start >= i_size_read(&inode->vfs_inode))
4602 continue;
4603
4604 /* Need a ref to keep it from getting evicted from cache */
4605 refcount_inc(&em->refs);
4606 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4607 list_add_tail(&em->list, &extents);
4608 num++;
4609 }
4610
4611 list_sort(NULL, &extents, extent_cmp);
4612 process:
4613 while (!list_empty(&extents)) {
4614 em = list_entry(extents.next, struct extent_map, list);
4615
4616 list_del_init(&em->list);
4617
4618 /*
4619 * If we had an error we just need to delete everybody from our
4620 * private list.
4621 */
4622 if (ret) {
4623 clear_em_logging(tree, em);
4624 free_extent_map(em);
4625 continue;
4626 }
4627
4628 write_unlock(&tree->lock);
4629
4630 ret = log_one_extent(trans, inode, root, em, path, ctx);
4631 write_lock(&tree->lock);
4632 clear_em_logging(tree, em);
4633 free_extent_map(em);
4634 }
4635 WARN_ON(!list_empty(&extents));
4636 write_unlock(&tree->lock);
4637
4638 btrfs_release_path(path);
4639 if (!ret)
4640 ret = btrfs_log_prealloc_extents(trans, inode, path);
4641 if (ret)
4642 return ret;
4643
4644 /*
4645 * We have logged all extents successfully, now make sure the commit of
4646 * the current transaction waits for the ordered extents to complete
4647 * before it commits and wipes out the log trees, otherwise we would
4648 * lose data if an ordered extents completes after the transaction
4649 * commits and a power failure happens after the transaction commit.
4650 */
4651 list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) {
4652 list_del_init(&ordered->log_list);
4653 set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags);
4654
4655 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
4656 spin_lock_irq(&inode->ordered_tree.lock);
4657 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
4658 set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
4659 atomic_inc(&trans->transaction->pending_ordered);
4660 }
4661 spin_unlock_irq(&inode->ordered_tree.lock);
4662 }
4663 btrfs_put_ordered_extent(ordered);
4664 }
4665
4666 return 0;
4667 }
4668
logged_inode_size(struct btrfs_root * log,struct btrfs_inode * inode,struct btrfs_path * path,u64 * size_ret)4669 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4670 struct btrfs_path *path, u64 *size_ret)
4671 {
4672 struct btrfs_key key;
4673 int ret;
4674
4675 key.objectid = btrfs_ino(inode);
4676 key.type = BTRFS_INODE_ITEM_KEY;
4677 key.offset = 0;
4678
4679 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4680 if (ret < 0) {
4681 return ret;
4682 } else if (ret > 0) {
4683 *size_ret = 0;
4684 } else {
4685 struct btrfs_inode_item *item;
4686
4687 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4688 struct btrfs_inode_item);
4689 *size_ret = btrfs_inode_size(path->nodes[0], item);
4690 /*
4691 * If the in-memory inode's i_size is smaller then the inode
4692 * size stored in the btree, return the inode's i_size, so
4693 * that we get a correct inode size after replaying the log
4694 * when before a power failure we had a shrinking truncate
4695 * followed by addition of a new name (rename / new hard link).
4696 * Otherwise return the inode size from the btree, to avoid
4697 * data loss when replaying a log due to previously doing a
4698 * write that expands the inode's size and logging a new name
4699 * immediately after.
4700 */
4701 if (*size_ret > inode->vfs_inode.i_size)
4702 *size_ret = inode->vfs_inode.i_size;
4703 }
4704
4705 btrfs_release_path(path);
4706 return 0;
4707 }
4708
4709 /*
4710 * At the moment we always log all xattrs. This is to figure out at log replay
4711 * time which xattrs must have their deletion replayed. If a xattr is missing
4712 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4713 * because if a xattr is deleted, the inode is fsynced and a power failure
4714 * happens, causing the log to be replayed the next time the fs is mounted,
4715 * we want the xattr to not exist anymore (same behaviour as other filesystems
4716 * with a journal, ext3/4, xfs, f2fs, etc).
4717 */
btrfs_log_all_xattrs(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path,struct btrfs_path * dst_path)4718 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4719 struct btrfs_root *root,
4720 struct btrfs_inode *inode,
4721 struct btrfs_path *path,
4722 struct btrfs_path *dst_path)
4723 {
4724 int ret;
4725 struct btrfs_key key;
4726 const u64 ino = btrfs_ino(inode);
4727 int ins_nr = 0;
4728 int start_slot = 0;
4729 bool found_xattrs = false;
4730
4731 if (test_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags))
4732 return 0;
4733
4734 key.objectid = ino;
4735 key.type = BTRFS_XATTR_ITEM_KEY;
4736 key.offset = 0;
4737
4738 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4739 if (ret < 0)
4740 return ret;
4741
4742 while (true) {
4743 int slot = path->slots[0];
4744 struct extent_buffer *leaf = path->nodes[0];
4745 int nritems = btrfs_header_nritems(leaf);
4746
4747 if (slot >= nritems) {
4748 if (ins_nr > 0) {
4749 ret = copy_items(trans, inode, dst_path, path,
4750 start_slot, ins_nr, 1, 0);
4751 if (ret < 0)
4752 return ret;
4753 ins_nr = 0;
4754 }
4755 ret = btrfs_next_leaf(root, path);
4756 if (ret < 0)
4757 return ret;
4758 else if (ret > 0)
4759 break;
4760 continue;
4761 }
4762
4763 btrfs_item_key_to_cpu(leaf, &key, slot);
4764 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4765 break;
4766
4767 if (ins_nr == 0)
4768 start_slot = slot;
4769 ins_nr++;
4770 path->slots[0]++;
4771 found_xattrs = true;
4772 cond_resched();
4773 }
4774 if (ins_nr > 0) {
4775 ret = copy_items(trans, inode, dst_path, path,
4776 start_slot, ins_nr, 1, 0);
4777 if (ret < 0)
4778 return ret;
4779 }
4780
4781 if (!found_xattrs)
4782 set_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags);
4783
4784 return 0;
4785 }
4786
4787 /*
4788 * When using the NO_HOLES feature if we punched a hole that causes the
4789 * deletion of entire leafs or all the extent items of the first leaf (the one
4790 * that contains the inode item and references) we may end up not processing
4791 * any extents, because there are no leafs with a generation matching the
4792 * current transaction that have extent items for our inode. So we need to find
4793 * if any holes exist and then log them. We also need to log holes after any
4794 * truncate operation that changes the inode's size.
4795 */
btrfs_log_holes(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path)4796 static int btrfs_log_holes(struct btrfs_trans_handle *trans,
4797 struct btrfs_root *root,
4798 struct btrfs_inode *inode,
4799 struct btrfs_path *path)
4800 {
4801 struct btrfs_fs_info *fs_info = root->fs_info;
4802 struct btrfs_key key;
4803 const u64 ino = btrfs_ino(inode);
4804 const u64 i_size = i_size_read(&inode->vfs_inode);
4805 u64 prev_extent_end = 0;
4806 int ret;
4807
4808 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0)
4809 return 0;
4810
4811 key.objectid = ino;
4812 key.type = BTRFS_EXTENT_DATA_KEY;
4813 key.offset = 0;
4814
4815 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4816 if (ret < 0)
4817 return ret;
4818
4819 while (true) {
4820 struct extent_buffer *leaf = path->nodes[0];
4821
4822 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
4823 ret = btrfs_next_leaf(root, path);
4824 if (ret < 0)
4825 return ret;
4826 if (ret > 0) {
4827 ret = 0;
4828 break;
4829 }
4830 leaf = path->nodes[0];
4831 }
4832
4833 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4834 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
4835 break;
4836
4837 /* We have a hole, log it. */
4838 if (prev_extent_end < key.offset) {
4839 const u64 hole_len = key.offset - prev_extent_end;
4840
4841 /*
4842 * Release the path to avoid deadlocks with other code
4843 * paths that search the root while holding locks on
4844 * leafs from the log root.
4845 */
4846 btrfs_release_path(path);
4847 ret = btrfs_insert_file_extent(trans, root->log_root,
4848 ino, prev_extent_end, 0,
4849 0, hole_len, 0, hole_len,
4850 0, 0, 0);
4851 if (ret < 0)
4852 return ret;
4853
4854 /*
4855 * Search for the same key again in the root. Since it's
4856 * an extent item and we are holding the inode lock, the
4857 * key must still exist. If it doesn't just emit warning
4858 * and return an error to fall back to a transaction
4859 * commit.
4860 */
4861 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4862 if (ret < 0)
4863 return ret;
4864 if (WARN_ON(ret > 0))
4865 return -ENOENT;
4866 leaf = path->nodes[0];
4867 }
4868
4869 prev_extent_end = btrfs_file_extent_end(path);
4870 path->slots[0]++;
4871 cond_resched();
4872 }
4873
4874 if (prev_extent_end < i_size) {
4875 u64 hole_len;
4876
4877 btrfs_release_path(path);
4878 hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize);
4879 ret = btrfs_insert_file_extent(trans, root->log_root,
4880 ino, prev_extent_end, 0, 0,
4881 hole_len, 0, hole_len,
4882 0, 0, 0);
4883 if (ret < 0)
4884 return ret;
4885 }
4886
4887 return 0;
4888 }
4889
4890 /*
4891 * When we are logging a new inode X, check if it doesn't have a reference that
4892 * matches the reference from some other inode Y created in a past transaction
4893 * and that was renamed in the current transaction. If we don't do this, then at
4894 * log replay time we can lose inode Y (and all its files if it's a directory):
4895 *
4896 * mkdir /mnt/x
4897 * echo "hello world" > /mnt/x/foobar
4898 * sync
4899 * mv /mnt/x /mnt/y
4900 * mkdir /mnt/x # or touch /mnt/x
4901 * xfs_io -c fsync /mnt/x
4902 * <power fail>
4903 * mount fs, trigger log replay
4904 *
4905 * After the log replay procedure, we would lose the first directory and all its
4906 * files (file foobar).
4907 * For the case where inode Y is not a directory we simply end up losing it:
4908 *
4909 * echo "123" > /mnt/foo
4910 * sync
4911 * mv /mnt/foo /mnt/bar
4912 * echo "abc" > /mnt/foo
4913 * xfs_io -c fsync /mnt/foo
4914 * <power fail>
4915 *
4916 * We also need this for cases where a snapshot entry is replaced by some other
4917 * entry (file or directory) otherwise we end up with an unreplayable log due to
4918 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4919 * if it were a regular entry:
4920 *
4921 * mkdir /mnt/x
4922 * btrfs subvolume snapshot /mnt /mnt/x/snap
4923 * btrfs subvolume delete /mnt/x/snap
4924 * rmdir /mnt/x
4925 * mkdir /mnt/x
4926 * fsync /mnt/x or fsync some new file inside it
4927 * <power fail>
4928 *
4929 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4930 * the same transaction.
4931 */
btrfs_check_ref_name_override(struct extent_buffer * eb,const int slot,const struct btrfs_key * key,struct btrfs_inode * inode,u64 * other_ino,u64 * other_parent)4932 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4933 const int slot,
4934 const struct btrfs_key *key,
4935 struct btrfs_inode *inode,
4936 u64 *other_ino, u64 *other_parent)
4937 {
4938 int ret;
4939 struct btrfs_path *search_path;
4940 char *name = NULL;
4941 u32 name_len = 0;
4942 u32 item_size = btrfs_item_size_nr(eb, slot);
4943 u32 cur_offset = 0;
4944 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4945
4946 search_path = btrfs_alloc_path();
4947 if (!search_path)
4948 return -ENOMEM;
4949 search_path->search_commit_root = 1;
4950 search_path->skip_locking = 1;
4951
4952 while (cur_offset < item_size) {
4953 u64 parent;
4954 u32 this_name_len;
4955 u32 this_len;
4956 unsigned long name_ptr;
4957 struct btrfs_dir_item *di;
4958
4959 if (key->type == BTRFS_INODE_REF_KEY) {
4960 struct btrfs_inode_ref *iref;
4961
4962 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4963 parent = key->offset;
4964 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4965 name_ptr = (unsigned long)(iref + 1);
4966 this_len = sizeof(*iref) + this_name_len;
4967 } else {
4968 struct btrfs_inode_extref *extref;
4969
4970 extref = (struct btrfs_inode_extref *)(ptr +
4971 cur_offset);
4972 parent = btrfs_inode_extref_parent(eb, extref);
4973 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4974 name_ptr = (unsigned long)&extref->name;
4975 this_len = sizeof(*extref) + this_name_len;
4976 }
4977
4978 if (this_name_len > name_len) {
4979 char *new_name;
4980
4981 new_name = krealloc(name, this_name_len, GFP_NOFS);
4982 if (!new_name) {
4983 ret = -ENOMEM;
4984 goto out;
4985 }
4986 name_len = this_name_len;
4987 name = new_name;
4988 }
4989
4990 read_extent_buffer(eb, name, name_ptr, this_name_len);
4991 di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
4992 parent, name, this_name_len, 0);
4993 if (di && !IS_ERR(di)) {
4994 struct btrfs_key di_key;
4995
4996 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4997 di, &di_key);
4998 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4999 if (di_key.objectid != key->objectid) {
5000 ret = 1;
5001 *other_ino = di_key.objectid;
5002 *other_parent = parent;
5003 } else {
5004 ret = 0;
5005 }
5006 } else {
5007 ret = -EAGAIN;
5008 }
5009 goto out;
5010 } else if (IS_ERR(di)) {
5011 ret = PTR_ERR(di);
5012 goto out;
5013 }
5014 btrfs_release_path(search_path);
5015
5016 cur_offset += this_len;
5017 }
5018 ret = 0;
5019 out:
5020 btrfs_free_path(search_path);
5021 kfree(name);
5022 return ret;
5023 }
5024
5025 struct btrfs_ino_list {
5026 u64 ino;
5027 u64 parent;
5028 struct list_head list;
5029 };
5030
log_conflicting_inodes(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_log_ctx * ctx,u64 ino,u64 parent)5031 static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
5032 struct btrfs_root *root,
5033 struct btrfs_path *path,
5034 struct btrfs_log_ctx *ctx,
5035 u64 ino, u64 parent)
5036 {
5037 struct btrfs_ino_list *ino_elem;
5038 LIST_HEAD(inode_list);
5039 int ret = 0;
5040
5041 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
5042 if (!ino_elem)
5043 return -ENOMEM;
5044 ino_elem->ino = ino;
5045 ino_elem->parent = parent;
5046 list_add_tail(&ino_elem->list, &inode_list);
5047
5048 while (!list_empty(&inode_list)) {
5049 struct btrfs_fs_info *fs_info = root->fs_info;
5050 struct btrfs_key key;
5051 struct inode *inode;
5052
5053 ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list,
5054 list);
5055 ino = ino_elem->ino;
5056 parent = ino_elem->parent;
5057 list_del(&ino_elem->list);
5058 kfree(ino_elem);
5059 if (ret)
5060 continue;
5061
5062 btrfs_release_path(path);
5063
5064 inode = btrfs_iget(fs_info->sb, ino, root);
5065 /*
5066 * If the other inode that had a conflicting dir entry was
5067 * deleted in the current transaction, we need to log its parent
5068 * directory.
5069 */
5070 if (IS_ERR(inode)) {
5071 ret = PTR_ERR(inode);
5072 if (ret == -ENOENT) {
5073 inode = btrfs_iget(fs_info->sb, parent, root);
5074 if (IS_ERR(inode)) {
5075 ret = PTR_ERR(inode);
5076 } else {
5077 ret = btrfs_log_inode(trans, root,
5078 BTRFS_I(inode),
5079 LOG_OTHER_INODE_ALL,
5080 ctx);
5081 btrfs_add_delayed_iput(inode);
5082 }
5083 }
5084 continue;
5085 }
5086 /*
5087 * If the inode was already logged skip it - otherwise we can
5088 * hit an infinite loop. Example:
5089 *
5090 * From the commit root (previous transaction) we have the
5091 * following inodes:
5092 *
5093 * inode 257 a directory
5094 * inode 258 with references "zz" and "zz_link" on inode 257
5095 * inode 259 with reference "a" on inode 257
5096 *
5097 * And in the current (uncommitted) transaction we have:
5098 *
5099 * inode 257 a directory, unchanged
5100 * inode 258 with references "a" and "a2" on inode 257
5101 * inode 259 with reference "zz_link" on inode 257
5102 * inode 261 with reference "zz" on inode 257
5103 *
5104 * When logging inode 261 the following infinite loop could
5105 * happen if we don't skip already logged inodes:
5106 *
5107 * - we detect inode 258 as a conflicting inode, with inode 261
5108 * on reference "zz", and log it;
5109 *
5110 * - we detect inode 259 as a conflicting inode, with inode 258
5111 * on reference "a", and log it;
5112 *
5113 * - we detect inode 258 as a conflicting inode, with inode 259
5114 * on reference "zz_link", and log it - again! After this we
5115 * repeat the above steps forever.
5116 */
5117 spin_lock(&BTRFS_I(inode)->lock);
5118 /*
5119 * Check the inode's logged_trans only instead of
5120 * btrfs_inode_in_log(). This is because the last_log_commit of
5121 * the inode is not updated when we only log that it exists (see
5122 * btrfs_log_inode()).
5123 */
5124 if (BTRFS_I(inode)->logged_trans == trans->transid) {
5125 spin_unlock(&BTRFS_I(inode)->lock);
5126 btrfs_add_delayed_iput(inode);
5127 continue;
5128 }
5129 spin_unlock(&BTRFS_I(inode)->lock);
5130 /*
5131 * We are safe logging the other inode without acquiring its
5132 * lock as long as we log with the LOG_INODE_EXISTS mode. We
5133 * are safe against concurrent renames of the other inode as
5134 * well because during a rename we pin the log and update the
5135 * log with the new name before we unpin it.
5136 */
5137 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
5138 LOG_OTHER_INODE, ctx);
5139 if (ret) {
5140 btrfs_add_delayed_iput(inode);
5141 continue;
5142 }
5143
5144 key.objectid = ino;
5145 key.type = BTRFS_INODE_REF_KEY;
5146 key.offset = 0;
5147 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5148 if (ret < 0) {
5149 btrfs_add_delayed_iput(inode);
5150 continue;
5151 }
5152
5153 while (true) {
5154 struct extent_buffer *leaf = path->nodes[0];
5155 int slot = path->slots[0];
5156 u64 other_ino = 0;
5157 u64 other_parent = 0;
5158
5159 if (slot >= btrfs_header_nritems(leaf)) {
5160 ret = btrfs_next_leaf(root, path);
5161 if (ret < 0) {
5162 break;
5163 } else if (ret > 0) {
5164 ret = 0;
5165 break;
5166 }
5167 continue;
5168 }
5169
5170 btrfs_item_key_to_cpu(leaf, &key, slot);
5171 if (key.objectid != ino ||
5172 (key.type != BTRFS_INODE_REF_KEY &&
5173 key.type != BTRFS_INODE_EXTREF_KEY)) {
5174 ret = 0;
5175 break;
5176 }
5177
5178 ret = btrfs_check_ref_name_override(leaf, slot, &key,
5179 BTRFS_I(inode), &other_ino,
5180 &other_parent);
5181 if (ret < 0)
5182 break;
5183 if (ret > 0) {
5184 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
5185 if (!ino_elem) {
5186 ret = -ENOMEM;
5187 break;
5188 }
5189 ino_elem->ino = other_ino;
5190 ino_elem->parent = other_parent;
5191 list_add_tail(&ino_elem->list, &inode_list);
5192 ret = 0;
5193 }
5194 path->slots[0]++;
5195 }
5196 btrfs_add_delayed_iput(inode);
5197 }
5198
5199 return ret;
5200 }
5201
copy_inode_items_to_log(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_key * min_key,const struct btrfs_key * max_key,struct btrfs_path * path,struct btrfs_path * dst_path,const u64 logged_isize,const bool recursive_logging,const int inode_only,struct btrfs_log_ctx * ctx,bool * need_log_inode_item)5202 static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
5203 struct btrfs_inode *inode,
5204 struct btrfs_key *min_key,
5205 const struct btrfs_key *max_key,
5206 struct btrfs_path *path,
5207 struct btrfs_path *dst_path,
5208 const u64 logged_isize,
5209 const bool recursive_logging,
5210 const int inode_only,
5211 struct btrfs_log_ctx *ctx,
5212 bool *need_log_inode_item)
5213 {
5214 const u64 i_size = i_size_read(&inode->vfs_inode);
5215 struct btrfs_root *root = inode->root;
5216 int ins_start_slot = 0;
5217 int ins_nr = 0;
5218 int ret;
5219
5220 while (1) {
5221 ret = btrfs_search_forward(root, min_key, path, trans->transid);
5222 if (ret < 0)
5223 return ret;
5224 if (ret > 0) {
5225 ret = 0;
5226 break;
5227 }
5228 again:
5229 /* Note, ins_nr might be > 0 here, cleanup outside the loop */
5230 if (min_key->objectid != max_key->objectid)
5231 break;
5232 if (min_key->type > max_key->type)
5233 break;
5234
5235 if (min_key->type == BTRFS_INODE_ITEM_KEY) {
5236 *need_log_inode_item = false;
5237 } else if (min_key->type == BTRFS_EXTENT_DATA_KEY &&
5238 min_key->offset >= i_size) {
5239 /*
5240 * Extents at and beyond eof are logged with
5241 * btrfs_log_prealloc_extents().
5242 * Only regular files have BTRFS_EXTENT_DATA_KEY keys,
5243 * and no keys greater than that, so bail out.
5244 */
5245 break;
5246 } else if ((min_key->type == BTRFS_INODE_REF_KEY ||
5247 min_key->type == BTRFS_INODE_EXTREF_KEY) &&
5248 inode->generation == trans->transid &&
5249 !recursive_logging) {
5250 u64 other_ino = 0;
5251 u64 other_parent = 0;
5252
5253 ret = btrfs_check_ref_name_override(path->nodes[0],
5254 path->slots[0], min_key, inode,
5255 &other_ino, &other_parent);
5256 if (ret < 0) {
5257 return ret;
5258 } else if (ret > 0 && ctx &&
5259 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
5260 if (ins_nr > 0) {
5261 ins_nr++;
5262 } else {
5263 ins_nr = 1;
5264 ins_start_slot = path->slots[0];
5265 }
5266 ret = copy_items(trans, inode, dst_path, path,
5267 ins_start_slot, ins_nr,
5268 inode_only, logged_isize);
5269 if (ret < 0)
5270 return ret;
5271 ins_nr = 0;
5272
5273 ret = log_conflicting_inodes(trans, root, path,
5274 ctx, other_ino, other_parent);
5275 if (ret)
5276 return ret;
5277 btrfs_release_path(path);
5278 goto next_key;
5279 }
5280 } else if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
5281 /* Skip xattrs, logged later with btrfs_log_all_xattrs() */
5282 if (ins_nr == 0)
5283 goto next_slot;
5284 ret = copy_items(trans, inode, dst_path, path,
5285 ins_start_slot,
5286 ins_nr, inode_only, logged_isize);
5287 if (ret < 0)
5288 return ret;
5289 ins_nr = 0;
5290 goto next_slot;
5291 }
5292
5293 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5294 ins_nr++;
5295 goto next_slot;
5296 } else if (!ins_nr) {
5297 ins_start_slot = path->slots[0];
5298 ins_nr = 1;
5299 goto next_slot;
5300 }
5301
5302 ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5303 ins_nr, inode_only, logged_isize);
5304 if (ret < 0)
5305 return ret;
5306 ins_nr = 1;
5307 ins_start_slot = path->slots[0];
5308 next_slot:
5309 path->slots[0]++;
5310 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
5311 btrfs_item_key_to_cpu(path->nodes[0], min_key,
5312 path->slots[0]);
5313 goto again;
5314 }
5315 if (ins_nr) {
5316 ret = copy_items(trans, inode, dst_path, path,
5317 ins_start_slot, ins_nr, inode_only,
5318 logged_isize);
5319 if (ret < 0)
5320 return ret;
5321 ins_nr = 0;
5322 }
5323 btrfs_release_path(path);
5324 next_key:
5325 if (min_key->offset < (u64)-1) {
5326 min_key->offset++;
5327 } else if (min_key->type < max_key->type) {
5328 min_key->type++;
5329 min_key->offset = 0;
5330 } else {
5331 break;
5332 }
5333 }
5334 if (ins_nr) {
5335 ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5336 ins_nr, inode_only, logged_isize);
5337 if (ret)
5338 return ret;
5339 }
5340
5341 if (inode_only == LOG_INODE_ALL && S_ISREG(inode->vfs_inode.i_mode)) {
5342 /*
5343 * Release the path because otherwise we might attempt to double
5344 * lock the same leaf with btrfs_log_prealloc_extents() below.
5345 */
5346 btrfs_release_path(path);
5347 ret = btrfs_log_prealloc_extents(trans, inode, dst_path);
5348 }
5349
5350 return ret;
5351 }
5352
5353 /* log a single inode in the tree log.
5354 * At least one parent directory for this inode must exist in the tree
5355 * or be logged already.
5356 *
5357 * Any items from this inode changed by the current transaction are copied
5358 * to the log tree. An extra reference is taken on any extents in this
5359 * file, allowing us to avoid a whole pile of corner cases around logging
5360 * blocks that have been removed from the tree.
5361 *
5362 * See LOG_INODE_ALL and related defines for a description of what inode_only
5363 * does.
5364 *
5365 * This handles both files and directories.
5366 */
btrfs_log_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,int inode_only,struct btrfs_log_ctx * ctx)5367 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
5368 struct btrfs_root *root, struct btrfs_inode *inode,
5369 int inode_only,
5370 struct btrfs_log_ctx *ctx)
5371 {
5372 struct btrfs_path *path;
5373 struct btrfs_path *dst_path;
5374 struct btrfs_key min_key;
5375 struct btrfs_key max_key;
5376 struct btrfs_root *log = root->log_root;
5377 int err = 0;
5378 int ret = 0;
5379 bool fast_search = false;
5380 u64 ino = btrfs_ino(inode);
5381 struct extent_map_tree *em_tree = &inode->extent_tree;
5382 u64 logged_isize = 0;
5383 bool need_log_inode_item = true;
5384 bool xattrs_logged = false;
5385 bool recursive_logging = false;
5386 bool inode_item_dropped = true;
5387
5388 path = btrfs_alloc_path();
5389 if (!path)
5390 return -ENOMEM;
5391 dst_path = btrfs_alloc_path();
5392 if (!dst_path) {
5393 btrfs_free_path(path);
5394 return -ENOMEM;
5395 }
5396
5397 min_key.objectid = ino;
5398 min_key.type = BTRFS_INODE_ITEM_KEY;
5399 min_key.offset = 0;
5400
5401 max_key.objectid = ino;
5402
5403
5404 /* today the code can only do partial logging of directories */
5405 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5406 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5407 &inode->runtime_flags) &&
5408 inode_only >= LOG_INODE_EXISTS))
5409 max_key.type = BTRFS_XATTR_ITEM_KEY;
5410 else
5411 max_key.type = (u8)-1;
5412 max_key.offset = (u64)-1;
5413
5414 /*
5415 * Only run delayed items if we are a directory. We want to make sure
5416 * all directory indexes hit the fs/subvolume tree so we can find them
5417 * and figure out which index ranges have to be logged.
5418 *
5419 * Otherwise commit the delayed inode only if the full sync flag is set,
5420 * as we want to make sure an up to date version is in the subvolume
5421 * tree so copy_inode_items_to_log() / copy_items() can find it and copy
5422 * it to the log tree. For a non full sync, we always log the inode item
5423 * based on the in-memory struct btrfs_inode which is always up to date.
5424 */
5425 if (S_ISDIR(inode->vfs_inode.i_mode))
5426 ret = btrfs_commit_inode_delayed_items(trans, inode);
5427 else if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
5428 ret = btrfs_commit_inode_delayed_inode(inode);
5429
5430 if (ret) {
5431 btrfs_free_path(path);
5432 btrfs_free_path(dst_path);
5433 return ret;
5434 }
5435
5436 if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) {
5437 recursive_logging = true;
5438 if (inode_only == LOG_OTHER_INODE)
5439 inode_only = LOG_INODE_EXISTS;
5440 else
5441 inode_only = LOG_INODE_ALL;
5442 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
5443 } else {
5444 mutex_lock(&inode->log_mutex);
5445 }
5446
5447 /*
5448 * For symlinks, we must always log their content, which is stored in an
5449 * inline extent, otherwise we could end up with an empty symlink after
5450 * log replay, which is invalid on linux (symlink(2) returns -ENOENT if
5451 * one attempts to create an empty symlink).
5452 * We don't need to worry about flushing delalloc, because when we create
5453 * the inline extent when the symlink is created (we never have delalloc
5454 * for symlinks).
5455 */
5456 if (S_ISLNK(inode->vfs_inode.i_mode))
5457 inode_only = LOG_INODE_ALL;
5458
5459 /*
5460 * This is for cases where logging a directory could result in losing a
5461 * a file after replaying the log. For example, if we move a file from a
5462 * directory A to a directory B, then fsync directory A, we have no way
5463 * to known the file was moved from A to B, so logging just A would
5464 * result in losing the file after a log replay.
5465 */
5466 if (S_ISDIR(inode->vfs_inode.i_mode) &&
5467 inode_only == LOG_INODE_ALL &&
5468 inode->last_unlink_trans >= trans->transid) {
5469 btrfs_set_log_full_commit(trans);
5470 err = 1;
5471 goto out_unlock;
5472 }
5473
5474 /*
5475 * a brute force approach to making sure we get the most uptodate
5476 * copies of everything.
5477 */
5478 if (S_ISDIR(inode->vfs_inode.i_mode)) {
5479 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
5480
5481 clear_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags);
5482 if (inode_only == LOG_INODE_EXISTS)
5483 max_key_type = BTRFS_XATTR_ITEM_KEY;
5484 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
5485 } else {
5486 if (inode_only == LOG_INODE_EXISTS) {
5487 /*
5488 * Make sure the new inode item we write to the log has
5489 * the same isize as the current one (if it exists).
5490 * This is necessary to prevent data loss after log
5491 * replay, and also to prevent doing a wrong expanding
5492 * truncate - for e.g. create file, write 4K into offset
5493 * 0, fsync, write 4K into offset 4096, add hard link,
5494 * fsync some other file (to sync log), power fail - if
5495 * we use the inode's current i_size, after log replay
5496 * we get a 8Kb file, with the last 4Kb extent as a hole
5497 * (zeroes), as if an expanding truncate happened,
5498 * instead of getting a file of 4Kb only.
5499 */
5500 err = logged_inode_size(log, inode, path, &logged_isize);
5501 if (err)
5502 goto out_unlock;
5503 }
5504 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5505 &inode->runtime_flags)) {
5506 if (inode_only == LOG_INODE_EXISTS) {
5507 max_key.type = BTRFS_XATTR_ITEM_KEY;
5508 ret = drop_objectid_items(trans, log, path, ino,
5509 max_key.type);
5510 } else {
5511 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5512 &inode->runtime_flags);
5513 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5514 &inode->runtime_flags);
5515 while(1) {
5516 ret = btrfs_truncate_inode_items(trans,
5517 log, inode, 0, 0, NULL);
5518 if (ret != -EAGAIN)
5519 break;
5520 }
5521 }
5522 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5523 &inode->runtime_flags) ||
5524 inode_only == LOG_INODE_EXISTS) {
5525 if (inode_only == LOG_INODE_ALL)
5526 fast_search = true;
5527 max_key.type = BTRFS_XATTR_ITEM_KEY;
5528 ret = drop_objectid_items(trans, log, path, ino,
5529 max_key.type);
5530 } else {
5531 if (inode_only == LOG_INODE_ALL)
5532 fast_search = true;
5533 inode_item_dropped = false;
5534 goto log_extents;
5535 }
5536
5537 }
5538 if (ret) {
5539 err = ret;
5540 goto out_unlock;
5541 }
5542
5543 err = copy_inode_items_to_log(trans, inode, &min_key, &max_key,
5544 path, dst_path, logged_isize,
5545 recursive_logging, inode_only, ctx,
5546 &need_log_inode_item);
5547 if (err)
5548 goto out_unlock;
5549
5550 btrfs_release_path(path);
5551 btrfs_release_path(dst_path);
5552 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
5553 if (err)
5554 goto out_unlock;
5555 xattrs_logged = true;
5556 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
5557 btrfs_release_path(path);
5558 btrfs_release_path(dst_path);
5559 err = btrfs_log_holes(trans, root, inode, path);
5560 if (err)
5561 goto out_unlock;
5562 }
5563 log_extents:
5564 btrfs_release_path(path);
5565 btrfs_release_path(dst_path);
5566 if (need_log_inode_item) {
5567 err = log_inode_item(trans, log, dst_path, inode, inode_item_dropped);
5568 if (err)
5569 goto out_unlock;
5570 /*
5571 * If we are doing a fast fsync and the inode was logged before
5572 * in this transaction, we don't need to log the xattrs because
5573 * they were logged before. If xattrs were added, changed or
5574 * deleted since the last time we logged the inode, then we have
5575 * already logged them because the inode had the runtime flag
5576 * BTRFS_INODE_COPY_EVERYTHING set.
5577 */
5578 if (!xattrs_logged && inode->logged_trans < trans->transid) {
5579 err = btrfs_log_all_xattrs(trans, root, inode, path,
5580 dst_path);
5581 if (err)
5582 goto out_unlock;
5583 btrfs_release_path(path);
5584 }
5585 }
5586 if (fast_search) {
5587 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
5588 ctx);
5589 if (ret) {
5590 err = ret;
5591 goto out_unlock;
5592 }
5593 } else if (inode_only == LOG_INODE_ALL) {
5594 struct extent_map *em, *n;
5595
5596 write_lock(&em_tree->lock);
5597 list_for_each_entry_safe(em, n, &em_tree->modified_extents, list)
5598 list_del_init(&em->list);
5599 write_unlock(&em_tree->lock);
5600 }
5601
5602 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
5603 ret = log_directory_changes(trans, root, inode, path, dst_path,
5604 ctx);
5605 if (ret) {
5606 err = ret;
5607 goto out_unlock;
5608 }
5609 }
5610
5611 /*
5612 * If we are logging that an ancestor inode exists as part of logging a
5613 * new name from a link or rename operation, don't mark the inode as
5614 * logged - otherwise if an explicit fsync is made against an ancestor,
5615 * the fsync considers the inode in the log and doesn't sync the log,
5616 * resulting in the ancestor missing after a power failure unless the
5617 * log was synced as part of an fsync against any other unrelated inode.
5618 * So keep it simple for this case and just don't flag the ancestors as
5619 * logged.
5620 */
5621 if (!ctx ||
5622 !(S_ISDIR(inode->vfs_inode.i_mode) && ctx->logging_new_name &&
5623 &inode->vfs_inode != ctx->inode)) {
5624 spin_lock(&inode->lock);
5625 inode->logged_trans = trans->transid;
5626 /*
5627 * Don't update last_log_commit if we logged that an inode exists.
5628 * We do this for two reasons:
5629 *
5630 * 1) We might have had buffered writes to this inode that were
5631 * flushed and had their ordered extents completed in this
5632 * transaction, but we did not previously log the inode with
5633 * LOG_INODE_ALL. Later the inode was evicted and after that
5634 * it was loaded again and this LOG_INODE_EXISTS log operation
5635 * happened. We must make sure that if an explicit fsync against
5636 * the inode is performed later, it logs the new extents, an
5637 * updated inode item, etc, and syncs the log. The same logic
5638 * applies to direct IO writes instead of buffered writes.
5639 *
5640 * 2) When we log the inode with LOG_INODE_EXISTS, its inode item
5641 * is logged with an i_size of 0 or whatever value was logged
5642 * before. If later the i_size of the inode is increased by a
5643 * truncate operation, the log is synced through an fsync of
5644 * some other inode and then finally an explicit fsync against
5645 * this inode is made, we must make sure this fsync logs the
5646 * inode with the new i_size, the hole between old i_size and
5647 * the new i_size, and syncs the log.
5648 */
5649 if (inode_only != LOG_INODE_EXISTS)
5650 inode->last_log_commit = inode->last_sub_trans;
5651 spin_unlock(&inode->lock);
5652 }
5653 out_unlock:
5654 mutex_unlock(&inode->log_mutex);
5655
5656 btrfs_free_path(path);
5657 btrfs_free_path(dst_path);
5658 return err;
5659 }
5660
5661 /*
5662 * Check if we need to log an inode. This is used in contexts where while
5663 * logging an inode we need to log another inode (either that it exists or in
5664 * full mode). This is used instead of btrfs_inode_in_log() because the later
5665 * requires the inode to be in the log and have the log transaction committed,
5666 * while here we do not care if the log transaction was already committed - our
5667 * caller will commit the log later - and we want to avoid logging an inode
5668 * multiple times when multiple tasks have joined the same log transaction.
5669 */
need_log_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)5670 static bool need_log_inode(struct btrfs_trans_handle *trans,
5671 struct btrfs_inode *inode)
5672 {
5673 /*
5674 * If a directory was not modified, no dentries added or removed, we can
5675 * and should avoid logging it.
5676 */
5677 if (S_ISDIR(inode->vfs_inode.i_mode) && inode->last_trans < trans->transid)
5678 return false;
5679
5680 /*
5681 * If this inode does not have new/updated/deleted xattrs since the last
5682 * time it was logged and is flagged as logged in the current transaction,
5683 * we can skip logging it. As for new/deleted names, those are updated in
5684 * the log by link/unlink/rename operations.
5685 * In case the inode was logged and then evicted and reloaded, its
5686 * logged_trans will be 0, in which case we have to fully log it since
5687 * logged_trans is a transient field, not persisted.
5688 */
5689 if (inode->logged_trans == trans->transid &&
5690 !test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags))
5691 return false;
5692
5693 return true;
5694 }
5695
5696 struct btrfs_dir_list {
5697 u64 ino;
5698 struct list_head list;
5699 };
5700
5701 /*
5702 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5703 * details about the why it is needed.
5704 * This is a recursive operation - if an existing dentry corresponds to a
5705 * directory, that directory's new entries are logged too (same behaviour as
5706 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5707 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5708 * complains about the following circular lock dependency / possible deadlock:
5709 *
5710 * CPU0 CPU1
5711 * ---- ----
5712 * lock(&type->i_mutex_dir_key#3/2);
5713 * lock(sb_internal#2);
5714 * lock(&type->i_mutex_dir_key#3/2);
5715 * lock(&sb->s_type->i_mutex_key#14);
5716 *
5717 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5718 * sb_start_intwrite() in btrfs_start_transaction().
5719 * Not locking i_mutex of the inodes is still safe because:
5720 *
5721 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5722 * that while logging the inode new references (names) are added or removed
5723 * from the inode, leaving the logged inode item with a link count that does
5724 * not match the number of logged inode reference items. This is fine because
5725 * at log replay time we compute the real number of links and correct the
5726 * link count in the inode item (see replay_one_buffer() and
5727 * link_to_fixup_dir());
5728 *
5729 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5730 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5731 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5732 * has a size that doesn't match the sum of the lengths of all the logged
5733 * names. This does not result in a problem because if a dir_item key is
5734 * logged but its matching dir_index key is not logged, at log replay time we
5735 * don't use it to replay the respective name (see replay_one_name()). On the
5736 * other hand if only the dir_index key ends up being logged, the respective
5737 * name is added to the fs/subvol tree with both the dir_item and dir_index
5738 * keys created (see replay_one_name()).
5739 * The directory's inode item with a wrong i_size is not a problem as well,
5740 * since we don't use it at log replay time to set the i_size in the inode
5741 * item of the fs/subvol tree (see overwrite_item()).
5742 */
log_new_dir_dentries(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * start_inode,struct btrfs_log_ctx * ctx)5743 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5744 struct btrfs_root *root,
5745 struct btrfs_inode *start_inode,
5746 struct btrfs_log_ctx *ctx)
5747 {
5748 struct btrfs_fs_info *fs_info = root->fs_info;
5749 struct btrfs_root *log = root->log_root;
5750 struct btrfs_path *path;
5751 LIST_HEAD(dir_list);
5752 struct btrfs_dir_list *dir_elem;
5753 int ret = 0;
5754
5755 path = btrfs_alloc_path();
5756 if (!path)
5757 return -ENOMEM;
5758
5759 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5760 if (!dir_elem) {
5761 btrfs_free_path(path);
5762 return -ENOMEM;
5763 }
5764 dir_elem->ino = btrfs_ino(start_inode);
5765 list_add_tail(&dir_elem->list, &dir_list);
5766
5767 while (!list_empty(&dir_list)) {
5768 struct extent_buffer *leaf;
5769 struct btrfs_key min_key;
5770 int nritems;
5771 int i;
5772
5773 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5774 list);
5775 if (ret)
5776 goto next_dir_inode;
5777
5778 min_key.objectid = dir_elem->ino;
5779 min_key.type = BTRFS_DIR_ITEM_KEY;
5780 min_key.offset = 0;
5781 again:
5782 btrfs_release_path(path);
5783 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5784 if (ret < 0) {
5785 goto next_dir_inode;
5786 } else if (ret > 0) {
5787 ret = 0;
5788 goto next_dir_inode;
5789 }
5790
5791 process_leaf:
5792 leaf = path->nodes[0];
5793 nritems = btrfs_header_nritems(leaf);
5794 for (i = path->slots[0]; i < nritems; i++) {
5795 struct btrfs_dir_item *di;
5796 struct btrfs_key di_key;
5797 struct inode *di_inode;
5798 struct btrfs_dir_list *new_dir_elem;
5799 int log_mode = LOG_INODE_EXISTS;
5800 int type;
5801
5802 btrfs_item_key_to_cpu(leaf, &min_key, i);
5803 if (min_key.objectid != dir_elem->ino ||
5804 min_key.type != BTRFS_DIR_ITEM_KEY)
5805 goto next_dir_inode;
5806
5807 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5808 type = btrfs_dir_type(leaf, di);
5809 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5810 type != BTRFS_FT_DIR)
5811 continue;
5812 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5813 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5814 continue;
5815
5816 btrfs_release_path(path);
5817 di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root);
5818 if (IS_ERR(di_inode)) {
5819 ret = PTR_ERR(di_inode);
5820 goto next_dir_inode;
5821 }
5822
5823 if (!need_log_inode(trans, BTRFS_I(di_inode))) {
5824 btrfs_add_delayed_iput(di_inode);
5825 break;
5826 }
5827
5828 ctx->log_new_dentries = false;
5829 if (type == BTRFS_FT_DIR)
5830 log_mode = LOG_INODE_ALL;
5831 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
5832 log_mode, ctx);
5833 btrfs_add_delayed_iput(di_inode);
5834 if (ret)
5835 goto next_dir_inode;
5836 if (ctx->log_new_dentries) {
5837 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5838 GFP_NOFS);
5839 if (!new_dir_elem) {
5840 ret = -ENOMEM;
5841 goto next_dir_inode;
5842 }
5843 new_dir_elem->ino = di_key.objectid;
5844 list_add_tail(&new_dir_elem->list, &dir_list);
5845 }
5846 break;
5847 }
5848 if (i == nritems) {
5849 ret = btrfs_next_leaf(log, path);
5850 if (ret < 0) {
5851 goto next_dir_inode;
5852 } else if (ret > 0) {
5853 ret = 0;
5854 goto next_dir_inode;
5855 }
5856 goto process_leaf;
5857 }
5858 if (min_key.offset < (u64)-1) {
5859 min_key.offset++;
5860 goto again;
5861 }
5862 next_dir_inode:
5863 list_del(&dir_elem->list);
5864 kfree(dir_elem);
5865 }
5866
5867 btrfs_free_path(path);
5868 return ret;
5869 }
5870
btrfs_log_all_parents(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_log_ctx * ctx)5871 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5872 struct btrfs_inode *inode,
5873 struct btrfs_log_ctx *ctx)
5874 {
5875 struct btrfs_fs_info *fs_info = trans->fs_info;
5876 int ret;
5877 struct btrfs_path *path;
5878 struct btrfs_key key;
5879 struct btrfs_root *root = inode->root;
5880 const u64 ino = btrfs_ino(inode);
5881
5882 path = btrfs_alloc_path();
5883 if (!path)
5884 return -ENOMEM;
5885 path->skip_locking = 1;
5886 path->search_commit_root = 1;
5887
5888 key.objectid = ino;
5889 key.type = BTRFS_INODE_REF_KEY;
5890 key.offset = 0;
5891 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5892 if (ret < 0)
5893 goto out;
5894
5895 while (true) {
5896 struct extent_buffer *leaf = path->nodes[0];
5897 int slot = path->slots[0];
5898 u32 cur_offset = 0;
5899 u32 item_size;
5900 unsigned long ptr;
5901
5902 if (slot >= btrfs_header_nritems(leaf)) {
5903 ret = btrfs_next_leaf(root, path);
5904 if (ret < 0)
5905 goto out;
5906 else if (ret > 0)
5907 break;
5908 continue;
5909 }
5910
5911 btrfs_item_key_to_cpu(leaf, &key, slot);
5912 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5913 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5914 break;
5915
5916 item_size = btrfs_item_size_nr(leaf, slot);
5917 ptr = btrfs_item_ptr_offset(leaf, slot);
5918 while (cur_offset < item_size) {
5919 struct btrfs_key inode_key;
5920 struct inode *dir_inode;
5921
5922 inode_key.type = BTRFS_INODE_ITEM_KEY;
5923 inode_key.offset = 0;
5924
5925 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5926 struct btrfs_inode_extref *extref;
5927
5928 extref = (struct btrfs_inode_extref *)
5929 (ptr + cur_offset);
5930 inode_key.objectid = btrfs_inode_extref_parent(
5931 leaf, extref);
5932 cur_offset += sizeof(*extref);
5933 cur_offset += btrfs_inode_extref_name_len(leaf,
5934 extref);
5935 } else {
5936 inode_key.objectid = key.offset;
5937 cur_offset = item_size;
5938 }
5939
5940 dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid,
5941 root);
5942 /*
5943 * If the parent inode was deleted, return an error to
5944 * fallback to a transaction commit. This is to prevent
5945 * getting an inode that was moved from one parent A to
5946 * a parent B, got its former parent A deleted and then
5947 * it got fsync'ed, from existing at both parents after
5948 * a log replay (and the old parent still existing).
5949 * Example:
5950 *
5951 * mkdir /mnt/A
5952 * mkdir /mnt/B
5953 * touch /mnt/B/bar
5954 * sync
5955 * mv /mnt/B/bar /mnt/A/bar
5956 * mv -T /mnt/A /mnt/B
5957 * fsync /mnt/B/bar
5958 * <power fail>
5959 *
5960 * If we ignore the old parent B which got deleted,
5961 * after a log replay we would have file bar linked
5962 * at both parents and the old parent B would still
5963 * exist.
5964 */
5965 if (IS_ERR(dir_inode)) {
5966 ret = PTR_ERR(dir_inode);
5967 goto out;
5968 }
5969
5970 if (!need_log_inode(trans, BTRFS_I(dir_inode))) {
5971 btrfs_add_delayed_iput(dir_inode);
5972 continue;
5973 }
5974
5975 if (ctx)
5976 ctx->log_new_dentries = false;
5977 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode),
5978 LOG_INODE_ALL, ctx);
5979 if (!ret && ctx && ctx->log_new_dentries)
5980 ret = log_new_dir_dentries(trans, root,
5981 BTRFS_I(dir_inode), ctx);
5982 btrfs_add_delayed_iput(dir_inode);
5983 if (ret)
5984 goto out;
5985 }
5986 path->slots[0]++;
5987 }
5988 ret = 0;
5989 out:
5990 btrfs_free_path(path);
5991 return ret;
5992 }
5993
log_new_ancestors(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_log_ctx * ctx)5994 static int log_new_ancestors(struct btrfs_trans_handle *trans,
5995 struct btrfs_root *root,
5996 struct btrfs_path *path,
5997 struct btrfs_log_ctx *ctx)
5998 {
5999 struct btrfs_key found_key;
6000
6001 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
6002
6003 while (true) {
6004 struct btrfs_fs_info *fs_info = root->fs_info;
6005 struct extent_buffer *leaf = path->nodes[0];
6006 int slot = path->slots[0];
6007 struct btrfs_key search_key;
6008 struct inode *inode;
6009 u64 ino;
6010 int ret = 0;
6011
6012 btrfs_release_path(path);
6013
6014 ino = found_key.offset;
6015
6016 search_key.objectid = found_key.offset;
6017 search_key.type = BTRFS_INODE_ITEM_KEY;
6018 search_key.offset = 0;
6019 inode = btrfs_iget(fs_info->sb, ino, root);
6020 if (IS_ERR(inode))
6021 return PTR_ERR(inode);
6022
6023 if (BTRFS_I(inode)->generation >= trans->transid &&
6024 need_log_inode(trans, BTRFS_I(inode)))
6025 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
6026 LOG_INODE_EXISTS, ctx);
6027 btrfs_add_delayed_iput(inode);
6028 if (ret)
6029 return ret;
6030
6031 if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID)
6032 break;
6033
6034 search_key.type = BTRFS_INODE_REF_KEY;
6035 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
6036 if (ret < 0)
6037 return ret;
6038
6039 leaf = path->nodes[0];
6040 slot = path->slots[0];
6041 if (slot >= btrfs_header_nritems(leaf)) {
6042 ret = btrfs_next_leaf(root, path);
6043 if (ret < 0)
6044 return ret;
6045 else if (ret > 0)
6046 return -ENOENT;
6047 leaf = path->nodes[0];
6048 slot = path->slots[0];
6049 }
6050
6051 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6052 if (found_key.objectid != search_key.objectid ||
6053 found_key.type != BTRFS_INODE_REF_KEY)
6054 return -ENOENT;
6055 }
6056 return 0;
6057 }
6058
log_new_ancestors_fast(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct dentry * parent,struct btrfs_log_ctx * ctx)6059 static int log_new_ancestors_fast(struct btrfs_trans_handle *trans,
6060 struct btrfs_inode *inode,
6061 struct dentry *parent,
6062 struct btrfs_log_ctx *ctx)
6063 {
6064 struct btrfs_root *root = inode->root;
6065 struct dentry *old_parent = NULL;
6066 struct super_block *sb = inode->vfs_inode.i_sb;
6067 int ret = 0;
6068
6069 while (true) {
6070 if (!parent || d_really_is_negative(parent) ||
6071 sb != parent->d_sb)
6072 break;
6073
6074 inode = BTRFS_I(d_inode(parent));
6075 if (root != inode->root)
6076 break;
6077
6078 if (inode->generation >= trans->transid &&
6079 need_log_inode(trans, inode)) {
6080 ret = btrfs_log_inode(trans, root, inode,
6081 LOG_INODE_EXISTS, ctx);
6082 if (ret)
6083 break;
6084 }
6085 if (IS_ROOT(parent))
6086 break;
6087
6088 parent = dget_parent(parent);
6089 dput(old_parent);
6090 old_parent = parent;
6091 }
6092 dput(old_parent);
6093
6094 return ret;
6095 }
6096
log_all_new_ancestors(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct dentry * parent,struct btrfs_log_ctx * ctx)6097 static int log_all_new_ancestors(struct btrfs_trans_handle *trans,
6098 struct btrfs_inode *inode,
6099 struct dentry *parent,
6100 struct btrfs_log_ctx *ctx)
6101 {
6102 struct btrfs_root *root = inode->root;
6103 const u64 ino = btrfs_ino(inode);
6104 struct btrfs_path *path;
6105 struct btrfs_key search_key;
6106 int ret;
6107
6108 /*
6109 * For a single hard link case, go through a fast path that does not
6110 * need to iterate the fs/subvolume tree.
6111 */
6112 if (inode->vfs_inode.i_nlink < 2)
6113 return log_new_ancestors_fast(trans, inode, parent, ctx);
6114
6115 path = btrfs_alloc_path();
6116 if (!path)
6117 return -ENOMEM;
6118
6119 search_key.objectid = ino;
6120 search_key.type = BTRFS_INODE_REF_KEY;
6121 search_key.offset = 0;
6122 again:
6123 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
6124 if (ret < 0)
6125 goto out;
6126 if (ret == 0)
6127 path->slots[0]++;
6128
6129 while (true) {
6130 struct extent_buffer *leaf = path->nodes[0];
6131 int slot = path->slots[0];
6132 struct btrfs_key found_key;
6133
6134 if (slot >= btrfs_header_nritems(leaf)) {
6135 ret = btrfs_next_leaf(root, path);
6136 if (ret < 0)
6137 goto out;
6138 else if (ret > 0)
6139 break;
6140 continue;
6141 }
6142
6143 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6144 if (found_key.objectid != ino ||
6145 found_key.type > BTRFS_INODE_EXTREF_KEY)
6146 break;
6147
6148 /*
6149 * Don't deal with extended references because they are rare
6150 * cases and too complex to deal with (we would need to keep
6151 * track of which subitem we are processing for each item in
6152 * this loop, etc). So just return some error to fallback to
6153 * a transaction commit.
6154 */
6155 if (found_key.type == BTRFS_INODE_EXTREF_KEY) {
6156 ret = -EMLINK;
6157 goto out;
6158 }
6159
6160 /*
6161 * Logging ancestors needs to do more searches on the fs/subvol
6162 * tree, so it releases the path as needed to avoid deadlocks.
6163 * Keep track of the last inode ref key and resume from that key
6164 * after logging all new ancestors for the current hard link.
6165 */
6166 memcpy(&search_key, &found_key, sizeof(search_key));
6167
6168 ret = log_new_ancestors(trans, root, path, ctx);
6169 if (ret)
6170 goto out;
6171 btrfs_release_path(path);
6172 goto again;
6173 }
6174 ret = 0;
6175 out:
6176 btrfs_free_path(path);
6177 return ret;
6178 }
6179
6180 /*
6181 * helper function around btrfs_log_inode to make sure newly created
6182 * parent directories also end up in the log. A minimal inode and backref
6183 * only logging is done of any parent directories that are older than
6184 * the last committed transaction
6185 */
btrfs_log_inode_parent(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct dentry * parent,int inode_only,struct btrfs_log_ctx * ctx)6186 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
6187 struct btrfs_inode *inode,
6188 struct dentry *parent,
6189 int inode_only,
6190 struct btrfs_log_ctx *ctx)
6191 {
6192 struct btrfs_root *root = inode->root;
6193 struct btrfs_fs_info *fs_info = root->fs_info;
6194 int ret = 0;
6195 bool log_dentries = false;
6196
6197 if (btrfs_test_opt(fs_info, NOTREELOG)) {
6198 ret = 1;
6199 goto end_no_trans;
6200 }
6201
6202 if (btrfs_root_refs(&root->root_item) == 0) {
6203 ret = 1;
6204 goto end_no_trans;
6205 }
6206
6207 /*
6208 * Skip already logged inodes or inodes corresponding to tmpfiles
6209 * (since logging them is pointless, a link count of 0 means they
6210 * will never be accessible).
6211 */
6212 if ((btrfs_inode_in_log(inode, trans->transid) &&
6213 list_empty(&ctx->ordered_extents)) ||
6214 inode->vfs_inode.i_nlink == 0) {
6215 ret = BTRFS_NO_LOG_SYNC;
6216 goto end_no_trans;
6217 }
6218
6219 ret = start_log_trans(trans, root, ctx);
6220 if (ret)
6221 goto end_no_trans;
6222
6223 ret = btrfs_log_inode(trans, root, inode, inode_only, ctx);
6224 if (ret)
6225 goto end_trans;
6226
6227 /*
6228 * for regular files, if its inode is already on disk, we don't
6229 * have to worry about the parents at all. This is because
6230 * we can use the last_unlink_trans field to record renames
6231 * and other fun in this file.
6232 */
6233 if (S_ISREG(inode->vfs_inode.i_mode) &&
6234 inode->generation < trans->transid &&
6235 inode->last_unlink_trans < trans->transid) {
6236 ret = 0;
6237 goto end_trans;
6238 }
6239
6240 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries)
6241 log_dentries = true;
6242
6243 /*
6244 * On unlink we must make sure all our current and old parent directory
6245 * inodes are fully logged. This is to prevent leaving dangling
6246 * directory index entries in directories that were our parents but are
6247 * not anymore. Not doing this results in old parent directory being
6248 * impossible to delete after log replay (rmdir will always fail with
6249 * error -ENOTEMPTY).
6250 *
6251 * Example 1:
6252 *
6253 * mkdir testdir
6254 * touch testdir/foo
6255 * ln testdir/foo testdir/bar
6256 * sync
6257 * unlink testdir/bar
6258 * xfs_io -c fsync testdir/foo
6259 * <power failure>
6260 * mount fs, triggers log replay
6261 *
6262 * If we don't log the parent directory (testdir), after log replay the
6263 * directory still has an entry pointing to the file inode using the bar
6264 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
6265 * the file inode has a link count of 1.
6266 *
6267 * Example 2:
6268 *
6269 * mkdir testdir
6270 * touch foo
6271 * ln foo testdir/foo2
6272 * ln foo testdir/foo3
6273 * sync
6274 * unlink testdir/foo3
6275 * xfs_io -c fsync foo
6276 * <power failure>
6277 * mount fs, triggers log replay
6278 *
6279 * Similar as the first example, after log replay the parent directory
6280 * testdir still has an entry pointing to the inode file with name foo3
6281 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
6282 * and has a link count of 2.
6283 */
6284 if (inode->last_unlink_trans >= trans->transid) {
6285 ret = btrfs_log_all_parents(trans, inode, ctx);
6286 if (ret)
6287 goto end_trans;
6288 }
6289
6290 ret = log_all_new_ancestors(trans, inode, parent, ctx);
6291 if (ret)
6292 goto end_trans;
6293
6294 if (log_dentries)
6295 ret = log_new_dir_dentries(trans, root, inode, ctx);
6296 else
6297 ret = 0;
6298 end_trans:
6299 if (ret < 0) {
6300 btrfs_set_log_full_commit(trans);
6301 ret = 1;
6302 }
6303
6304 if (ret)
6305 btrfs_remove_log_ctx(root, ctx);
6306 btrfs_end_log_trans(root);
6307 end_no_trans:
6308 return ret;
6309 }
6310
6311 /*
6312 * it is not safe to log dentry if the chunk root has added new
6313 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
6314 * If this returns 1, you must commit the transaction to safely get your
6315 * data on disk.
6316 */
btrfs_log_dentry_safe(struct btrfs_trans_handle * trans,struct dentry * dentry,struct btrfs_log_ctx * ctx)6317 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
6318 struct dentry *dentry,
6319 struct btrfs_log_ctx *ctx)
6320 {
6321 struct dentry *parent = dget_parent(dentry);
6322 int ret;
6323
6324 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent,
6325 LOG_INODE_ALL, ctx);
6326 dput(parent);
6327
6328 return ret;
6329 }
6330
6331 /*
6332 * should be called during mount to recover any replay any log trees
6333 * from the FS
6334 */
btrfs_recover_log_trees(struct btrfs_root * log_root_tree)6335 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
6336 {
6337 int ret;
6338 struct btrfs_path *path;
6339 struct btrfs_trans_handle *trans;
6340 struct btrfs_key key;
6341 struct btrfs_key found_key;
6342 struct btrfs_root *log;
6343 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
6344 struct walk_control wc = {
6345 .process_func = process_one_buffer,
6346 .stage = LOG_WALK_PIN_ONLY,
6347 };
6348
6349 path = btrfs_alloc_path();
6350 if (!path)
6351 return -ENOMEM;
6352
6353 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6354
6355 trans = btrfs_start_transaction(fs_info->tree_root, 0);
6356 if (IS_ERR(trans)) {
6357 ret = PTR_ERR(trans);
6358 goto error;
6359 }
6360
6361 wc.trans = trans;
6362 wc.pin = 1;
6363
6364 ret = walk_log_tree(trans, log_root_tree, &wc);
6365 if (ret) {
6366 btrfs_handle_fs_error(fs_info, ret,
6367 "Failed to pin buffers while recovering log root tree.");
6368 goto error;
6369 }
6370
6371 again:
6372 key.objectid = BTRFS_TREE_LOG_OBJECTID;
6373 key.offset = (u64)-1;
6374 key.type = BTRFS_ROOT_ITEM_KEY;
6375
6376 while (1) {
6377 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
6378
6379 if (ret < 0) {
6380 btrfs_handle_fs_error(fs_info, ret,
6381 "Couldn't find tree log root.");
6382 goto error;
6383 }
6384 if (ret > 0) {
6385 if (path->slots[0] == 0)
6386 break;
6387 path->slots[0]--;
6388 }
6389 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
6390 path->slots[0]);
6391 btrfs_release_path(path);
6392 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
6393 break;
6394
6395 log = btrfs_read_tree_root(log_root_tree, &found_key);
6396 if (IS_ERR(log)) {
6397 ret = PTR_ERR(log);
6398 btrfs_handle_fs_error(fs_info, ret,
6399 "Couldn't read tree log root.");
6400 goto error;
6401 }
6402
6403 wc.replay_dest = btrfs_get_fs_root(fs_info, found_key.offset,
6404 true);
6405 if (IS_ERR(wc.replay_dest)) {
6406 ret = PTR_ERR(wc.replay_dest);
6407
6408 /*
6409 * We didn't find the subvol, likely because it was
6410 * deleted. This is ok, simply skip this log and go to
6411 * the next one.
6412 *
6413 * We need to exclude the root because we can't have
6414 * other log replays overwriting this log as we'll read
6415 * it back in a few more times. This will keep our
6416 * block from being modified, and we'll just bail for
6417 * each subsequent pass.
6418 */
6419 if (ret == -ENOENT)
6420 ret = btrfs_pin_extent_for_log_replay(trans,
6421 log->node->start,
6422 log->node->len);
6423 btrfs_put_root(log);
6424
6425 if (!ret)
6426 goto next;
6427 btrfs_handle_fs_error(fs_info, ret,
6428 "Couldn't read target root for tree log recovery.");
6429 goto error;
6430 }
6431
6432 wc.replay_dest->log_root = log;
6433 ret = btrfs_record_root_in_trans(trans, wc.replay_dest);
6434 if (ret)
6435 /* The loop needs to continue due to the root refs */
6436 btrfs_handle_fs_error(fs_info, ret,
6437 "failed to record the log root in transaction");
6438 else
6439 ret = walk_log_tree(trans, log, &wc);
6440
6441 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6442 ret = fixup_inode_link_counts(trans, wc.replay_dest,
6443 path);
6444 }
6445
6446 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6447 struct btrfs_root *root = wc.replay_dest;
6448
6449 btrfs_release_path(path);
6450
6451 /*
6452 * We have just replayed everything, and the highest
6453 * objectid of fs roots probably has changed in case
6454 * some inode_item's got replayed.
6455 *
6456 * root->objectid_mutex is not acquired as log replay
6457 * could only happen during mount.
6458 */
6459 ret = btrfs_init_root_free_objectid(root);
6460 }
6461
6462 wc.replay_dest->log_root = NULL;
6463 btrfs_put_root(wc.replay_dest);
6464 btrfs_put_root(log);
6465
6466 if (ret)
6467 goto error;
6468 next:
6469 if (found_key.offset == 0)
6470 break;
6471 key.offset = found_key.offset - 1;
6472 }
6473 btrfs_release_path(path);
6474
6475 /* step one is to pin it all, step two is to replay just inodes */
6476 if (wc.pin) {
6477 wc.pin = 0;
6478 wc.process_func = replay_one_buffer;
6479 wc.stage = LOG_WALK_REPLAY_INODES;
6480 goto again;
6481 }
6482 /* step three is to replay everything */
6483 if (wc.stage < LOG_WALK_REPLAY_ALL) {
6484 wc.stage++;
6485 goto again;
6486 }
6487
6488 btrfs_free_path(path);
6489
6490 /* step 4: commit the transaction, which also unpins the blocks */
6491 ret = btrfs_commit_transaction(trans);
6492 if (ret)
6493 return ret;
6494
6495 log_root_tree->log_root = NULL;
6496 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6497 btrfs_put_root(log_root_tree);
6498
6499 return 0;
6500 error:
6501 if (wc.trans)
6502 btrfs_end_transaction(wc.trans);
6503 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6504 btrfs_free_path(path);
6505 return ret;
6506 }
6507
6508 /*
6509 * there are some corner cases where we want to force a full
6510 * commit instead of allowing a directory to be logged.
6511 *
6512 * They revolve around files there were unlinked from the directory, and
6513 * this function updates the parent directory so that a full commit is
6514 * properly done if it is fsync'd later after the unlinks are done.
6515 *
6516 * Must be called before the unlink operations (updates to the subvolume tree,
6517 * inodes, etc) are done.
6518 */
btrfs_record_unlink_dir(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct btrfs_inode * inode,int for_rename)6519 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
6520 struct btrfs_inode *dir, struct btrfs_inode *inode,
6521 int for_rename)
6522 {
6523 /*
6524 * when we're logging a file, if it hasn't been renamed
6525 * or unlinked, and its inode is fully committed on disk,
6526 * we don't have to worry about walking up the directory chain
6527 * to log its parents.
6528 *
6529 * So, we use the last_unlink_trans field to put this transid
6530 * into the file. When the file is logged we check it and
6531 * don't log the parents if the file is fully on disk.
6532 */
6533 mutex_lock(&inode->log_mutex);
6534 inode->last_unlink_trans = trans->transid;
6535 mutex_unlock(&inode->log_mutex);
6536
6537 /*
6538 * if this directory was already logged any new
6539 * names for this file/dir will get recorded
6540 */
6541 if (dir->logged_trans == trans->transid)
6542 return;
6543
6544 /*
6545 * if the inode we're about to unlink was logged,
6546 * the log will be properly updated for any new names
6547 */
6548 if (inode->logged_trans == trans->transid)
6549 return;
6550
6551 /*
6552 * when renaming files across directories, if the directory
6553 * there we're unlinking from gets fsync'd later on, there's
6554 * no way to find the destination directory later and fsync it
6555 * properly. So, we have to be conservative and force commits
6556 * so the new name gets discovered.
6557 */
6558 if (for_rename)
6559 goto record;
6560
6561 /* we can safely do the unlink without any special recording */
6562 return;
6563
6564 record:
6565 mutex_lock(&dir->log_mutex);
6566 dir->last_unlink_trans = trans->transid;
6567 mutex_unlock(&dir->log_mutex);
6568 }
6569
6570 /*
6571 * Make sure that if someone attempts to fsync the parent directory of a deleted
6572 * snapshot, it ends up triggering a transaction commit. This is to guarantee
6573 * that after replaying the log tree of the parent directory's root we will not
6574 * see the snapshot anymore and at log replay time we will not see any log tree
6575 * corresponding to the deleted snapshot's root, which could lead to replaying
6576 * it after replaying the log tree of the parent directory (which would replay
6577 * the snapshot delete operation).
6578 *
6579 * Must be called before the actual snapshot destroy operation (updates to the
6580 * parent root and tree of tree roots trees, etc) are done.
6581 */
btrfs_record_snapshot_destroy(struct btrfs_trans_handle * trans,struct btrfs_inode * dir)6582 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
6583 struct btrfs_inode *dir)
6584 {
6585 mutex_lock(&dir->log_mutex);
6586 dir->last_unlink_trans = trans->transid;
6587 mutex_unlock(&dir->log_mutex);
6588 }
6589
6590 /**
6591 * Update the log after adding a new name for an inode.
6592 *
6593 * @trans: Transaction handle.
6594 * @old_dentry: The dentry associated with the old name and the old
6595 * parent directory.
6596 * @old_dir: The inode of the previous parent directory for the case
6597 * of a rename. For a link operation, it must be NULL.
6598 * @parent: The dentry associated with the directory under which the
6599 * new name is located.
6600 *
6601 * Call this after adding a new name for an inode, as a result of a link or
6602 * rename operation, and it will properly update the log to reflect the new name.
6603 */
btrfs_log_new_name(struct btrfs_trans_handle * trans,struct dentry * old_dentry,struct btrfs_inode * old_dir,struct dentry * parent)6604 void btrfs_log_new_name(struct btrfs_trans_handle *trans,
6605 struct dentry *old_dentry, struct btrfs_inode *old_dir,
6606 struct dentry *parent)
6607 {
6608 struct btrfs_inode *inode = BTRFS_I(d_inode(old_dentry));
6609 struct btrfs_log_ctx ctx;
6610
6611 /*
6612 * this will force the logging code to walk the dentry chain
6613 * up for the file
6614 */
6615 if (!S_ISDIR(inode->vfs_inode.i_mode))
6616 inode->last_unlink_trans = trans->transid;
6617
6618 /*
6619 * if this inode hasn't been logged and directory we're renaming it
6620 * from hasn't been logged, we don't need to log it
6621 */
6622 if (!inode_logged(trans, inode) &&
6623 (!old_dir || !inode_logged(trans, old_dir)))
6624 return;
6625
6626 /*
6627 * If we are doing a rename (old_dir is not NULL) from a directory that
6628 * was previously logged, make sure the next log attempt on the directory
6629 * is not skipped and logs the inode again. This is because the log may
6630 * not currently be authoritative for a range including the old
6631 * BTRFS_DIR_ITEM_KEY and BTRFS_DIR_INDEX_KEY keys, so we want to make
6632 * sure after a log replay we do not end up with both the new and old
6633 * dentries around (in case the inode is a directory we would have a
6634 * directory with two hard links and 2 inode references for different
6635 * parents). The next log attempt of old_dir will happen at
6636 * btrfs_log_all_parents(), called through btrfs_log_inode_parent()
6637 * below, because we have previously set inode->last_unlink_trans to the
6638 * current transaction ID, either here or at btrfs_record_unlink_dir() in
6639 * case inode is a directory.
6640 */
6641 if (old_dir)
6642 old_dir->logged_trans = 0;
6643
6644 btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
6645 ctx.logging_new_name = true;
6646 /*
6647 * We don't care about the return value. If we fail to log the new name
6648 * then we know the next attempt to sync the log will fallback to a full
6649 * transaction commit (due to a call to btrfs_set_log_full_commit()), so
6650 * we don't need to worry about getting a log committed that has an
6651 * inconsistent state after a rename operation.
6652 */
6653 btrfs_log_inode_parent(trans, inode, parent, LOG_INODE_EXISTS, &ctx);
6654 }
6655
6656