1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/blkdev.h>
9 #include <linux/list_sort.h>
10 #include <linux/iversion.h>
11 #include "misc.h"
12 #include "ctree.h"
13 #include "tree-log.h"
14 #include "disk-io.h"
15 #include "locking.h"
16 #include "print-tree.h"
17 #include "backref.h"
18 #include "compression.h"
19 #include "qgroup.h"
20 #include "inode-map.h"
21
22 /* magic values for the inode_only field in btrfs_log_inode:
23 *
24 * LOG_INODE_ALL means to log everything
25 * LOG_INODE_EXISTS means to log just enough to recreate the inode
26 * during log replay
27 */
28 enum {
29 LOG_INODE_ALL,
30 LOG_INODE_EXISTS,
31 LOG_OTHER_INODE,
32 LOG_OTHER_INODE_ALL,
33 };
34
35 /*
36 * directory trouble cases
37 *
38 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
39 * log, we must force a full commit before doing an fsync of the directory
40 * where the unlink was done.
41 * ---> record transid of last unlink/rename per directory
42 *
43 * mkdir foo/some_dir
44 * normal commit
45 * rename foo/some_dir foo2/some_dir
46 * mkdir foo/some_dir
47 * fsync foo/some_dir/some_file
48 *
49 * The fsync above will unlink the original some_dir without recording
50 * it in its new location (foo2). After a crash, some_dir will be gone
51 * unless the fsync of some_file forces a full commit
52 *
53 * 2) we must log any new names for any file or dir that is in the fsync
54 * log. ---> check inode while renaming/linking.
55 *
56 * 2a) we must log any new names for any file or dir during rename
57 * when the directory they are being removed from was logged.
58 * ---> check inode and old parent dir during rename
59 *
60 * 2a is actually the more important variant. With the extra logging
61 * a crash might unlink the old name without recreating the new one
62 *
63 * 3) after a crash, we must go through any directories with a link count
64 * of zero and redo the rm -rf
65 *
66 * mkdir f1/foo
67 * normal commit
68 * rm -rf f1/foo
69 * fsync(f1)
70 *
71 * The directory f1 was fully removed from the FS, but fsync was never
72 * called on f1, only its parent dir. After a crash the rm -rf must
73 * be replayed. This must be able to recurse down the entire
74 * directory tree. The inode link count fixup code takes care of the
75 * ugly details.
76 */
77
78 /*
79 * stages for the tree walking. The first
80 * stage (0) is to only pin down the blocks we find
81 * the second stage (1) is to make sure that all the inodes
82 * we find in the log are created in the subvolume.
83 *
84 * The last stage is to deal with directories and links and extents
85 * and all the other fun semantics
86 */
87 enum {
88 LOG_WALK_PIN_ONLY,
89 LOG_WALK_REPLAY_INODES,
90 LOG_WALK_REPLAY_DIR_INDEX,
91 LOG_WALK_REPLAY_ALL,
92 };
93
94 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
95 struct btrfs_root *root, struct btrfs_inode *inode,
96 int inode_only,
97 const loff_t start,
98 const loff_t end,
99 struct btrfs_log_ctx *ctx);
100 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
101 struct btrfs_root *root,
102 struct btrfs_path *path, u64 objectid);
103 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
104 struct btrfs_root *root,
105 struct btrfs_root *log,
106 struct btrfs_path *path,
107 u64 dirid, int del_all);
108
109 /*
110 * tree logging is a special write ahead log used to make sure that
111 * fsyncs and O_SYNCs can happen without doing full tree commits.
112 *
113 * Full tree commits are expensive because they require commonly
114 * modified blocks to be recowed, creating many dirty pages in the
115 * extent tree an 4x-6x higher write load than ext3.
116 *
117 * Instead of doing a tree commit on every fsync, we use the
118 * key ranges and transaction ids to find items for a given file or directory
119 * that have changed in this transaction. Those items are copied into
120 * a special tree (one per subvolume root), that tree is written to disk
121 * and then the fsync is considered complete.
122 *
123 * After a crash, items are copied out of the log-tree back into the
124 * subvolume tree. Any file data extents found are recorded in the extent
125 * allocation tree, and the log-tree freed.
126 *
127 * The log tree is read three times, once to pin down all the extents it is
128 * using in ram and once, once to create all the inodes logged in the tree
129 * and once to do all the other items.
130 */
131
132 /*
133 * start a sub transaction and setup the log tree
134 * this increments the log tree writer count to make the people
135 * syncing the tree wait for us to finish
136 */
start_log_trans(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_log_ctx * ctx)137 static int start_log_trans(struct btrfs_trans_handle *trans,
138 struct btrfs_root *root,
139 struct btrfs_log_ctx *ctx)
140 {
141 struct btrfs_fs_info *fs_info = root->fs_info;
142 int ret = 0;
143
144 mutex_lock(&root->log_mutex);
145
146 if (root->log_root) {
147 if (btrfs_need_log_full_commit(trans)) {
148 ret = -EAGAIN;
149 goto out;
150 }
151
152 if (!root->log_start_pid) {
153 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
154 root->log_start_pid = current->pid;
155 } else if (root->log_start_pid != current->pid) {
156 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
157 }
158 } else {
159 mutex_lock(&fs_info->tree_log_mutex);
160 if (!fs_info->log_root_tree)
161 ret = btrfs_init_log_root_tree(trans, fs_info);
162 mutex_unlock(&fs_info->tree_log_mutex);
163 if (ret)
164 goto out;
165
166 ret = btrfs_add_log_tree(trans, root);
167 if (ret)
168 goto out;
169
170 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
171 root->log_start_pid = current->pid;
172 }
173
174 atomic_inc(&root->log_batch);
175 atomic_inc(&root->log_writers);
176 if (ctx) {
177 int index = root->log_transid % 2;
178 list_add_tail(&ctx->list, &root->log_ctxs[index]);
179 ctx->log_transid = root->log_transid;
180 }
181
182 out:
183 mutex_unlock(&root->log_mutex);
184 return ret;
185 }
186
187 /*
188 * returns 0 if there was a log transaction running and we were able
189 * to join, or returns -ENOENT if there were not transactions
190 * in progress
191 */
join_running_log_trans(struct btrfs_root * root)192 static int join_running_log_trans(struct btrfs_root *root)
193 {
194 int ret = -ENOENT;
195
196 mutex_lock(&root->log_mutex);
197 if (root->log_root) {
198 ret = 0;
199 atomic_inc(&root->log_writers);
200 }
201 mutex_unlock(&root->log_mutex);
202 return ret;
203 }
204
205 /*
206 * This either makes the current running log transaction wait
207 * until you call btrfs_end_log_trans() or it makes any future
208 * log transactions wait until you call btrfs_end_log_trans()
209 */
btrfs_pin_log_trans(struct btrfs_root * root)210 void btrfs_pin_log_trans(struct btrfs_root *root)
211 {
212 mutex_lock(&root->log_mutex);
213 atomic_inc(&root->log_writers);
214 mutex_unlock(&root->log_mutex);
215 }
216
217 /*
218 * indicate we're done making changes to the log tree
219 * and wake up anyone waiting to do a sync
220 */
btrfs_end_log_trans(struct btrfs_root * root)221 void btrfs_end_log_trans(struct btrfs_root *root)
222 {
223 if (atomic_dec_and_test(&root->log_writers)) {
224 /* atomic_dec_and_test implies a barrier */
225 cond_wake_up_nomb(&root->log_writer_wait);
226 }
227 }
228
btrfs_write_tree_block(struct extent_buffer * buf)229 static int btrfs_write_tree_block(struct extent_buffer *buf)
230 {
231 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
232 buf->start + buf->len - 1);
233 }
234
btrfs_wait_tree_block_writeback(struct extent_buffer * buf)235 static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
236 {
237 filemap_fdatawait_range(buf->pages[0]->mapping,
238 buf->start, buf->start + buf->len - 1);
239 }
240
241 /*
242 * the walk control struct is used to pass state down the chain when
243 * processing the log tree. The stage field tells us which part
244 * of the log tree processing we are currently doing. The others
245 * are state fields used for that specific part
246 */
247 struct walk_control {
248 /* should we free the extent on disk when done? This is used
249 * at transaction commit time while freeing a log tree
250 */
251 int free;
252
253 /* should we write out the extent buffer? This is used
254 * while flushing the log tree to disk during a sync
255 */
256 int write;
257
258 /* should we wait for the extent buffer io to finish? Also used
259 * while flushing the log tree to disk for a sync
260 */
261 int wait;
262
263 /* pin only walk, we record which extents on disk belong to the
264 * log trees
265 */
266 int pin;
267
268 /* what stage of the replay code we're currently in */
269 int stage;
270
271 /*
272 * Ignore any items from the inode currently being processed. Needs
273 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
274 * the LOG_WALK_REPLAY_INODES stage.
275 */
276 bool ignore_cur_inode;
277
278 /* the root we are currently replaying */
279 struct btrfs_root *replay_dest;
280
281 /* the trans handle for the current replay */
282 struct btrfs_trans_handle *trans;
283
284 /* the function that gets used to process blocks we find in the
285 * tree. Note the extent_buffer might not be up to date when it is
286 * passed in, and it must be checked or read if you need the data
287 * inside it
288 */
289 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
290 struct walk_control *wc, u64 gen, int level);
291 };
292
293 /*
294 * process_func used to pin down extents, write them or wait on them
295 */
process_one_buffer(struct btrfs_root * log,struct extent_buffer * eb,struct walk_control * wc,u64 gen,int level)296 static int process_one_buffer(struct btrfs_root *log,
297 struct extent_buffer *eb,
298 struct walk_control *wc, u64 gen, int level)
299 {
300 struct btrfs_fs_info *fs_info = log->fs_info;
301 int ret = 0;
302
303 /*
304 * If this fs is mixed then we need to be able to process the leaves to
305 * pin down any logged extents, so we have to read the block.
306 */
307 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
308 ret = btrfs_read_buffer(eb, gen, level, NULL);
309 if (ret)
310 return ret;
311 }
312
313 if (wc->pin)
314 ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
315 eb->len);
316
317 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
318 if (wc->pin && btrfs_header_level(eb) == 0)
319 ret = btrfs_exclude_logged_extents(eb);
320 if (wc->write)
321 btrfs_write_tree_block(eb);
322 if (wc->wait)
323 btrfs_wait_tree_block_writeback(eb);
324 }
325 return ret;
326 }
327
328 /*
329 * Item overwrite used by replay and tree logging. eb, slot and key all refer
330 * to the src data we are copying out.
331 *
332 * root is the tree we are copying into, and path is a scratch
333 * path for use in this function (it should be released on entry and
334 * will be released on exit).
335 *
336 * If the key is already in the destination tree the existing item is
337 * overwritten. If the existing item isn't big enough, it is extended.
338 * If it is too large, it is truncated.
339 *
340 * If the key isn't in the destination yet, a new item is inserted.
341 */
overwrite_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct extent_buffer * eb,int slot,struct btrfs_key * key)342 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
343 struct btrfs_root *root,
344 struct btrfs_path *path,
345 struct extent_buffer *eb, int slot,
346 struct btrfs_key *key)
347 {
348 int ret;
349 u32 item_size;
350 u64 saved_i_size = 0;
351 int save_old_i_size = 0;
352 unsigned long src_ptr;
353 unsigned long dst_ptr;
354 int overwrite_root = 0;
355 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
356
357 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
358 overwrite_root = 1;
359
360 item_size = btrfs_item_size_nr(eb, slot);
361 src_ptr = btrfs_item_ptr_offset(eb, slot);
362
363 /* look for the key in the destination tree */
364 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
365 if (ret < 0)
366 return ret;
367
368 if (ret == 0) {
369 char *src_copy;
370 char *dst_copy;
371 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
372 path->slots[0]);
373 if (dst_size != item_size)
374 goto insert;
375
376 if (item_size == 0) {
377 btrfs_release_path(path);
378 return 0;
379 }
380 dst_copy = kmalloc(item_size, GFP_NOFS);
381 src_copy = kmalloc(item_size, GFP_NOFS);
382 if (!dst_copy || !src_copy) {
383 btrfs_release_path(path);
384 kfree(dst_copy);
385 kfree(src_copy);
386 return -ENOMEM;
387 }
388
389 read_extent_buffer(eb, src_copy, src_ptr, item_size);
390
391 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
392 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
393 item_size);
394 ret = memcmp(dst_copy, src_copy, item_size);
395
396 kfree(dst_copy);
397 kfree(src_copy);
398 /*
399 * they have the same contents, just return, this saves
400 * us from cowing blocks in the destination tree and doing
401 * extra writes that may not have been done by a previous
402 * sync
403 */
404 if (ret == 0) {
405 btrfs_release_path(path);
406 return 0;
407 }
408
409 /*
410 * We need to load the old nbytes into the inode so when we
411 * replay the extents we've logged we get the right nbytes.
412 */
413 if (inode_item) {
414 struct btrfs_inode_item *item;
415 u64 nbytes;
416 u32 mode;
417
418 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
419 struct btrfs_inode_item);
420 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
421 item = btrfs_item_ptr(eb, slot,
422 struct btrfs_inode_item);
423 btrfs_set_inode_nbytes(eb, item, nbytes);
424
425 /*
426 * If this is a directory we need to reset the i_size to
427 * 0 so that we can set it up properly when replaying
428 * the rest of the items in this log.
429 */
430 mode = btrfs_inode_mode(eb, item);
431 if (S_ISDIR(mode))
432 btrfs_set_inode_size(eb, item, 0);
433 }
434 } else if (inode_item) {
435 struct btrfs_inode_item *item;
436 u32 mode;
437
438 /*
439 * New inode, set nbytes to 0 so that the nbytes comes out
440 * properly when we replay the extents.
441 */
442 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
443 btrfs_set_inode_nbytes(eb, item, 0);
444
445 /*
446 * If this is a directory we need to reset the i_size to 0 so
447 * that we can set it up properly when replaying the rest of
448 * the items in this log.
449 */
450 mode = btrfs_inode_mode(eb, item);
451 if (S_ISDIR(mode))
452 btrfs_set_inode_size(eb, item, 0);
453 }
454 insert:
455 btrfs_release_path(path);
456 /* try to insert the key into the destination tree */
457 path->skip_release_on_error = 1;
458 ret = btrfs_insert_empty_item(trans, root, path,
459 key, item_size);
460 path->skip_release_on_error = 0;
461
462 /* make sure any existing item is the correct size */
463 if (ret == -EEXIST || ret == -EOVERFLOW) {
464 u32 found_size;
465 found_size = btrfs_item_size_nr(path->nodes[0],
466 path->slots[0]);
467 if (found_size > item_size)
468 btrfs_truncate_item(path, item_size, 1);
469 else if (found_size < item_size)
470 btrfs_extend_item(path, item_size - found_size);
471 } else if (ret) {
472 return ret;
473 }
474 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
475 path->slots[0]);
476
477 /* don't overwrite an existing inode if the generation number
478 * was logged as zero. This is done when the tree logging code
479 * is just logging an inode to make sure it exists after recovery.
480 *
481 * Also, don't overwrite i_size on directories during replay.
482 * log replay inserts and removes directory items based on the
483 * state of the tree found in the subvolume, and i_size is modified
484 * as it goes
485 */
486 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
487 struct btrfs_inode_item *src_item;
488 struct btrfs_inode_item *dst_item;
489
490 src_item = (struct btrfs_inode_item *)src_ptr;
491 dst_item = (struct btrfs_inode_item *)dst_ptr;
492
493 if (btrfs_inode_generation(eb, src_item) == 0) {
494 struct extent_buffer *dst_eb = path->nodes[0];
495 const u64 ino_size = btrfs_inode_size(eb, src_item);
496
497 /*
498 * For regular files an ino_size == 0 is used only when
499 * logging that an inode exists, as part of a directory
500 * fsync, and the inode wasn't fsynced before. In this
501 * case don't set the size of the inode in the fs/subvol
502 * tree, otherwise we would be throwing valid data away.
503 */
504 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
505 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
506 ino_size != 0) {
507 struct btrfs_map_token token;
508
509 btrfs_init_map_token(&token, dst_eb);
510 btrfs_set_token_inode_size(dst_eb, dst_item,
511 ino_size, &token);
512 }
513 goto no_copy;
514 }
515
516 if (overwrite_root &&
517 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
518 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
519 save_old_i_size = 1;
520 saved_i_size = btrfs_inode_size(path->nodes[0],
521 dst_item);
522 }
523 }
524
525 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
526 src_ptr, item_size);
527
528 if (save_old_i_size) {
529 struct btrfs_inode_item *dst_item;
530 dst_item = (struct btrfs_inode_item *)dst_ptr;
531 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
532 }
533
534 /* make sure the generation is filled in */
535 if (key->type == BTRFS_INODE_ITEM_KEY) {
536 struct btrfs_inode_item *dst_item;
537 dst_item = (struct btrfs_inode_item *)dst_ptr;
538 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
539 btrfs_set_inode_generation(path->nodes[0], dst_item,
540 trans->transid);
541 }
542 }
543 no_copy:
544 btrfs_mark_buffer_dirty(path->nodes[0]);
545 btrfs_release_path(path);
546 return 0;
547 }
548
549 /*
550 * simple helper to read an inode off the disk from a given root
551 * This can only be called for subvolume roots and not for the log
552 */
read_one_inode(struct btrfs_root * root,u64 objectid)553 static noinline struct inode *read_one_inode(struct btrfs_root *root,
554 u64 objectid)
555 {
556 struct btrfs_key key;
557 struct inode *inode;
558
559 key.objectid = objectid;
560 key.type = BTRFS_INODE_ITEM_KEY;
561 key.offset = 0;
562 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
563 if (IS_ERR(inode))
564 inode = NULL;
565 return inode;
566 }
567
568 /* replays a single extent in 'eb' at 'slot' with 'key' into the
569 * subvolume 'root'. path is released on entry and should be released
570 * on exit.
571 *
572 * extents in the log tree have not been allocated out of the extent
573 * tree yet. So, this completes the allocation, taking a reference
574 * as required if the extent already exists or creating a new extent
575 * if it isn't in the extent allocation tree yet.
576 *
577 * The extent is inserted into the file, dropping any existing extents
578 * from the file that overlap the new one.
579 */
replay_one_extent(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct extent_buffer * eb,int slot,struct btrfs_key * key)580 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
581 struct btrfs_root *root,
582 struct btrfs_path *path,
583 struct extent_buffer *eb, int slot,
584 struct btrfs_key *key)
585 {
586 struct btrfs_fs_info *fs_info = root->fs_info;
587 int found_type;
588 u64 extent_end;
589 u64 start = key->offset;
590 u64 nbytes = 0;
591 struct btrfs_file_extent_item *item;
592 struct inode *inode = NULL;
593 unsigned long size;
594 int ret = 0;
595
596 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
597 found_type = btrfs_file_extent_type(eb, item);
598
599 if (found_type == BTRFS_FILE_EXTENT_REG ||
600 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
601 nbytes = btrfs_file_extent_num_bytes(eb, item);
602 extent_end = start + nbytes;
603
604 /*
605 * We don't add to the inodes nbytes if we are prealloc or a
606 * hole.
607 */
608 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
609 nbytes = 0;
610 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
611 size = btrfs_file_extent_ram_bytes(eb, item);
612 nbytes = btrfs_file_extent_ram_bytes(eb, item);
613 extent_end = ALIGN(start + size,
614 fs_info->sectorsize);
615 } else {
616 ret = 0;
617 goto out;
618 }
619
620 inode = read_one_inode(root, key->objectid);
621 if (!inode) {
622 ret = -EIO;
623 goto out;
624 }
625
626 /*
627 * first check to see if we already have this extent in the
628 * file. This must be done before the btrfs_drop_extents run
629 * so we don't try to drop this extent.
630 */
631 ret = btrfs_lookup_file_extent(trans, root, path,
632 btrfs_ino(BTRFS_I(inode)), start, 0);
633
634 if (ret == 0 &&
635 (found_type == BTRFS_FILE_EXTENT_REG ||
636 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
637 struct btrfs_file_extent_item cmp1;
638 struct btrfs_file_extent_item cmp2;
639 struct btrfs_file_extent_item *existing;
640 struct extent_buffer *leaf;
641
642 leaf = path->nodes[0];
643 existing = btrfs_item_ptr(leaf, path->slots[0],
644 struct btrfs_file_extent_item);
645
646 read_extent_buffer(eb, &cmp1, (unsigned long)item,
647 sizeof(cmp1));
648 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
649 sizeof(cmp2));
650
651 /*
652 * we already have a pointer to this exact extent,
653 * we don't have to do anything
654 */
655 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
656 btrfs_release_path(path);
657 goto out;
658 }
659 }
660 btrfs_release_path(path);
661
662 /* drop any overlapping extents */
663 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
664 if (ret)
665 goto out;
666
667 if (found_type == BTRFS_FILE_EXTENT_REG ||
668 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
669 u64 offset;
670 unsigned long dest_offset;
671 struct btrfs_key ins;
672
673 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
674 btrfs_fs_incompat(fs_info, NO_HOLES))
675 goto update_inode;
676
677 ret = btrfs_insert_empty_item(trans, root, path, key,
678 sizeof(*item));
679 if (ret)
680 goto out;
681 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
682 path->slots[0]);
683 copy_extent_buffer(path->nodes[0], eb, dest_offset,
684 (unsigned long)item, sizeof(*item));
685
686 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
687 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
688 ins.type = BTRFS_EXTENT_ITEM_KEY;
689 offset = key->offset - btrfs_file_extent_offset(eb, item);
690
691 /*
692 * Manually record dirty extent, as here we did a shallow
693 * file extent item copy and skip normal backref update,
694 * but modifying extent tree all by ourselves.
695 * So need to manually record dirty extent for qgroup,
696 * as the owner of the file extent changed from log tree
697 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
698 */
699 ret = btrfs_qgroup_trace_extent(trans,
700 btrfs_file_extent_disk_bytenr(eb, item),
701 btrfs_file_extent_disk_num_bytes(eb, item),
702 GFP_NOFS);
703 if (ret < 0)
704 goto out;
705
706 if (ins.objectid > 0) {
707 struct btrfs_ref ref = { 0 };
708 u64 csum_start;
709 u64 csum_end;
710 LIST_HEAD(ordered_sums);
711
712 /*
713 * is this extent already allocated in the extent
714 * allocation tree? If so, just add a reference
715 */
716 ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
717 ins.offset);
718 if (ret == 0) {
719 btrfs_init_generic_ref(&ref,
720 BTRFS_ADD_DELAYED_REF,
721 ins.objectid, ins.offset, 0);
722 btrfs_init_data_ref(&ref,
723 root->root_key.objectid,
724 key->objectid, offset);
725 ret = btrfs_inc_extent_ref(trans, &ref);
726 if (ret)
727 goto out;
728 } else {
729 /*
730 * insert the extent pointer in the extent
731 * allocation tree
732 */
733 ret = btrfs_alloc_logged_file_extent(trans,
734 root->root_key.objectid,
735 key->objectid, offset, &ins);
736 if (ret)
737 goto out;
738 }
739 btrfs_release_path(path);
740
741 if (btrfs_file_extent_compression(eb, item)) {
742 csum_start = ins.objectid;
743 csum_end = csum_start + ins.offset;
744 } else {
745 csum_start = ins.objectid +
746 btrfs_file_extent_offset(eb, item);
747 csum_end = csum_start +
748 btrfs_file_extent_num_bytes(eb, item);
749 }
750
751 ret = btrfs_lookup_csums_range(root->log_root,
752 csum_start, csum_end - 1,
753 &ordered_sums, 0);
754 if (ret)
755 goto out;
756 /*
757 * Now delete all existing cums in the csum root that
758 * cover our range. We do this because we can have an
759 * extent that is completely referenced by one file
760 * extent item and partially referenced by another
761 * file extent item (like after using the clone or
762 * extent_same ioctls). In this case if we end up doing
763 * the replay of the one that partially references the
764 * extent first, and we do not do the csum deletion
765 * below, we can get 2 csum items in the csum tree that
766 * overlap each other. For example, imagine our log has
767 * the two following file extent items:
768 *
769 * key (257 EXTENT_DATA 409600)
770 * extent data disk byte 12845056 nr 102400
771 * extent data offset 20480 nr 20480 ram 102400
772 *
773 * key (257 EXTENT_DATA 819200)
774 * extent data disk byte 12845056 nr 102400
775 * extent data offset 0 nr 102400 ram 102400
776 *
777 * Where the second one fully references the 100K extent
778 * that starts at disk byte 12845056, and the log tree
779 * has a single csum item that covers the entire range
780 * of the extent:
781 *
782 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
783 *
784 * After the first file extent item is replayed, the
785 * csum tree gets the following csum item:
786 *
787 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
788 *
789 * Which covers the 20K sub-range starting at offset 20K
790 * of our extent. Now when we replay the second file
791 * extent item, if we do not delete existing csum items
792 * that cover any of its blocks, we end up getting two
793 * csum items in our csum tree that overlap each other:
794 *
795 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
796 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
797 *
798 * Which is a problem, because after this anyone trying
799 * to lookup up for the checksum of any block of our
800 * extent starting at an offset of 40K or higher, will
801 * end up looking at the second csum item only, which
802 * does not contain the checksum for any block starting
803 * at offset 40K or higher of our extent.
804 */
805 while (!list_empty(&ordered_sums)) {
806 struct btrfs_ordered_sum *sums;
807 sums = list_entry(ordered_sums.next,
808 struct btrfs_ordered_sum,
809 list);
810 if (!ret)
811 ret = btrfs_del_csums(trans,
812 fs_info->csum_root,
813 sums->bytenr,
814 sums->len);
815 if (!ret)
816 ret = btrfs_csum_file_blocks(trans,
817 fs_info->csum_root, sums);
818 list_del(&sums->list);
819 kfree(sums);
820 }
821 if (ret)
822 goto out;
823 } else {
824 btrfs_release_path(path);
825 }
826 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
827 /* inline extents are easy, we just overwrite them */
828 ret = overwrite_item(trans, root, path, eb, slot, key);
829 if (ret)
830 goto out;
831 }
832
833 inode_add_bytes(inode, nbytes);
834 update_inode:
835 ret = btrfs_update_inode(trans, root, inode);
836 out:
837 if (inode)
838 iput(inode);
839 return ret;
840 }
841
842 /*
843 * when cleaning up conflicts between the directory names in the
844 * subvolume, directory names in the log and directory names in the
845 * inode back references, we may have to unlink inodes from directories.
846 *
847 * This is a helper function to do the unlink of a specific directory
848 * item
849 */
drop_one_dir_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_inode * dir,struct btrfs_dir_item * di)850 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
851 struct btrfs_root *root,
852 struct btrfs_path *path,
853 struct btrfs_inode *dir,
854 struct btrfs_dir_item *di)
855 {
856 struct inode *inode;
857 char *name;
858 int name_len;
859 struct extent_buffer *leaf;
860 struct btrfs_key location;
861 int ret;
862
863 leaf = path->nodes[0];
864
865 btrfs_dir_item_key_to_cpu(leaf, di, &location);
866 name_len = btrfs_dir_name_len(leaf, di);
867 name = kmalloc(name_len, GFP_NOFS);
868 if (!name)
869 return -ENOMEM;
870
871 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
872 btrfs_release_path(path);
873
874 inode = read_one_inode(root, location.objectid);
875 if (!inode) {
876 ret = -EIO;
877 goto out;
878 }
879
880 ret = link_to_fixup_dir(trans, root, path, location.objectid);
881 if (ret)
882 goto out;
883
884 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name,
885 name_len);
886 if (ret)
887 goto out;
888 else
889 ret = btrfs_run_delayed_items(trans);
890 out:
891 kfree(name);
892 iput(inode);
893 return ret;
894 }
895
896 /*
897 * helper function to see if a given name and sequence number found
898 * in an inode back reference are already in a directory and correctly
899 * point to this inode
900 */
inode_in_dir(struct btrfs_root * root,struct btrfs_path * path,u64 dirid,u64 objectid,u64 index,const char * name,int name_len)901 static noinline int inode_in_dir(struct btrfs_root *root,
902 struct btrfs_path *path,
903 u64 dirid, u64 objectid, u64 index,
904 const char *name, int name_len)
905 {
906 struct btrfs_dir_item *di;
907 struct btrfs_key location;
908 int match = 0;
909
910 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
911 index, name, name_len, 0);
912 if (di && !IS_ERR(di)) {
913 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
914 if (location.objectid != objectid)
915 goto out;
916 } else
917 goto out;
918 btrfs_release_path(path);
919
920 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
921 if (di && !IS_ERR(di)) {
922 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
923 if (location.objectid != objectid)
924 goto out;
925 } else
926 goto out;
927 match = 1;
928 out:
929 btrfs_release_path(path);
930 return match;
931 }
932
933 /*
934 * helper function to check a log tree for a named back reference in
935 * an inode. This is used to decide if a back reference that is
936 * found in the subvolume conflicts with what we find in the log.
937 *
938 * inode backreferences may have multiple refs in a single item,
939 * during replay we process one reference at a time, and we don't
940 * want to delete valid links to a file from the subvolume if that
941 * link is also in the log.
942 */
backref_in_log(struct btrfs_root * log,struct btrfs_key * key,u64 ref_objectid,const char * name,int namelen)943 static noinline int backref_in_log(struct btrfs_root *log,
944 struct btrfs_key *key,
945 u64 ref_objectid,
946 const char *name, int namelen)
947 {
948 struct btrfs_path *path;
949 struct btrfs_inode_ref *ref;
950 unsigned long ptr;
951 unsigned long ptr_end;
952 unsigned long name_ptr;
953 int found_name_len;
954 int item_size;
955 int ret;
956 int match = 0;
957
958 path = btrfs_alloc_path();
959 if (!path)
960 return -ENOMEM;
961
962 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
963 if (ret != 0)
964 goto out;
965
966 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
967
968 if (key->type == BTRFS_INODE_EXTREF_KEY) {
969 if (btrfs_find_name_in_ext_backref(path->nodes[0],
970 path->slots[0],
971 ref_objectid,
972 name, namelen))
973 match = 1;
974
975 goto out;
976 }
977
978 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
979 ptr_end = ptr + item_size;
980 while (ptr < ptr_end) {
981 ref = (struct btrfs_inode_ref *)ptr;
982 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
983 if (found_name_len == namelen) {
984 name_ptr = (unsigned long)(ref + 1);
985 ret = memcmp_extent_buffer(path->nodes[0], name,
986 name_ptr, namelen);
987 if (ret == 0) {
988 match = 1;
989 goto out;
990 }
991 }
992 ptr = (unsigned long)(ref + 1) + found_name_len;
993 }
994 out:
995 btrfs_free_path(path);
996 return match;
997 }
998
__add_inode_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_root * log_root,struct btrfs_inode * dir,struct btrfs_inode * inode,u64 inode_objectid,u64 parent_objectid,u64 ref_index,char * name,int namelen,int * search_done)999 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
1000 struct btrfs_root *root,
1001 struct btrfs_path *path,
1002 struct btrfs_root *log_root,
1003 struct btrfs_inode *dir,
1004 struct btrfs_inode *inode,
1005 u64 inode_objectid, u64 parent_objectid,
1006 u64 ref_index, char *name, int namelen,
1007 int *search_done)
1008 {
1009 int ret;
1010 char *victim_name;
1011 int victim_name_len;
1012 struct extent_buffer *leaf;
1013 struct btrfs_dir_item *di;
1014 struct btrfs_key search_key;
1015 struct btrfs_inode_extref *extref;
1016
1017 again:
1018 /* Search old style refs */
1019 search_key.objectid = inode_objectid;
1020 search_key.type = BTRFS_INODE_REF_KEY;
1021 search_key.offset = parent_objectid;
1022 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1023 if (ret == 0) {
1024 struct btrfs_inode_ref *victim_ref;
1025 unsigned long ptr;
1026 unsigned long ptr_end;
1027
1028 leaf = path->nodes[0];
1029
1030 /* are we trying to overwrite a back ref for the root directory
1031 * if so, just jump out, we're done
1032 */
1033 if (search_key.objectid == search_key.offset)
1034 return 1;
1035
1036 /* check all the names in this back reference to see
1037 * if they are in the log. if so, we allow them to stay
1038 * otherwise they must be unlinked as a conflict
1039 */
1040 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1041 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1042 while (ptr < ptr_end) {
1043 victim_ref = (struct btrfs_inode_ref *)ptr;
1044 victim_name_len = btrfs_inode_ref_name_len(leaf,
1045 victim_ref);
1046 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1047 if (!victim_name)
1048 return -ENOMEM;
1049
1050 read_extent_buffer(leaf, victim_name,
1051 (unsigned long)(victim_ref + 1),
1052 victim_name_len);
1053
1054 if (!backref_in_log(log_root, &search_key,
1055 parent_objectid,
1056 victim_name,
1057 victim_name_len)) {
1058 inc_nlink(&inode->vfs_inode);
1059 btrfs_release_path(path);
1060
1061 ret = btrfs_unlink_inode(trans, root, dir, inode,
1062 victim_name, victim_name_len);
1063 kfree(victim_name);
1064 if (ret)
1065 return ret;
1066 ret = btrfs_run_delayed_items(trans);
1067 if (ret)
1068 return ret;
1069 *search_done = 1;
1070 goto again;
1071 }
1072 kfree(victim_name);
1073
1074 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1075 }
1076
1077 /*
1078 * NOTE: we have searched root tree and checked the
1079 * corresponding ref, it does not need to check again.
1080 */
1081 *search_done = 1;
1082 }
1083 btrfs_release_path(path);
1084
1085 /* Same search but for extended refs */
1086 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1087 inode_objectid, parent_objectid, 0,
1088 0);
1089 if (!IS_ERR_OR_NULL(extref)) {
1090 u32 item_size;
1091 u32 cur_offset = 0;
1092 unsigned long base;
1093 struct inode *victim_parent;
1094
1095 leaf = path->nodes[0];
1096
1097 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1098 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1099
1100 while (cur_offset < item_size) {
1101 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1102
1103 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1104
1105 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1106 goto next;
1107
1108 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1109 if (!victim_name)
1110 return -ENOMEM;
1111 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1112 victim_name_len);
1113
1114 search_key.objectid = inode_objectid;
1115 search_key.type = BTRFS_INODE_EXTREF_KEY;
1116 search_key.offset = btrfs_extref_hash(parent_objectid,
1117 victim_name,
1118 victim_name_len);
1119 ret = 0;
1120 if (!backref_in_log(log_root, &search_key,
1121 parent_objectid, victim_name,
1122 victim_name_len)) {
1123 ret = -ENOENT;
1124 victim_parent = read_one_inode(root,
1125 parent_objectid);
1126 if (victim_parent) {
1127 inc_nlink(&inode->vfs_inode);
1128 btrfs_release_path(path);
1129
1130 ret = btrfs_unlink_inode(trans, root,
1131 BTRFS_I(victim_parent),
1132 inode,
1133 victim_name,
1134 victim_name_len);
1135 if (!ret)
1136 ret = btrfs_run_delayed_items(
1137 trans);
1138 }
1139 iput(victim_parent);
1140 kfree(victim_name);
1141 if (ret)
1142 return ret;
1143 *search_done = 1;
1144 goto again;
1145 }
1146 kfree(victim_name);
1147 next:
1148 cur_offset += victim_name_len + sizeof(*extref);
1149 }
1150 *search_done = 1;
1151 }
1152 btrfs_release_path(path);
1153
1154 /* look for a conflicting sequence number */
1155 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1156 ref_index, name, namelen, 0);
1157 if (di && !IS_ERR(di)) {
1158 ret = drop_one_dir_item(trans, root, path, dir, di);
1159 if (ret)
1160 return ret;
1161 }
1162 btrfs_release_path(path);
1163
1164 /* look for a conflicting name */
1165 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1166 name, namelen, 0);
1167 if (di && !IS_ERR(di)) {
1168 ret = drop_one_dir_item(trans, root, path, dir, di);
1169 if (ret)
1170 return ret;
1171 }
1172 btrfs_release_path(path);
1173
1174 return 0;
1175 }
1176
extref_get_fields(struct extent_buffer * eb,unsigned long ref_ptr,u32 * namelen,char ** name,u64 * index,u64 * parent_objectid)1177 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1178 u32 *namelen, char **name, u64 *index,
1179 u64 *parent_objectid)
1180 {
1181 struct btrfs_inode_extref *extref;
1182
1183 extref = (struct btrfs_inode_extref *)ref_ptr;
1184
1185 *namelen = btrfs_inode_extref_name_len(eb, extref);
1186 *name = kmalloc(*namelen, GFP_NOFS);
1187 if (*name == NULL)
1188 return -ENOMEM;
1189
1190 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1191 *namelen);
1192
1193 if (index)
1194 *index = btrfs_inode_extref_index(eb, extref);
1195 if (parent_objectid)
1196 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1197
1198 return 0;
1199 }
1200
ref_get_fields(struct extent_buffer * eb,unsigned long ref_ptr,u32 * namelen,char ** name,u64 * index)1201 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1202 u32 *namelen, char **name, u64 *index)
1203 {
1204 struct btrfs_inode_ref *ref;
1205
1206 ref = (struct btrfs_inode_ref *)ref_ptr;
1207
1208 *namelen = btrfs_inode_ref_name_len(eb, ref);
1209 *name = kmalloc(*namelen, GFP_NOFS);
1210 if (*name == NULL)
1211 return -ENOMEM;
1212
1213 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1214
1215 if (index)
1216 *index = btrfs_inode_ref_index(eb, ref);
1217
1218 return 0;
1219 }
1220
1221 /*
1222 * Take an inode reference item from the log tree and iterate all names from the
1223 * inode reference item in the subvolume tree with the same key (if it exists).
1224 * For any name that is not in the inode reference item from the log tree, do a
1225 * proper unlink of that name (that is, remove its entry from the inode
1226 * reference item and both dir index keys).
1227 */
unlink_old_inode_refs(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_inode * inode,struct extent_buffer * log_eb,int log_slot,struct btrfs_key * key)1228 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
1229 struct btrfs_root *root,
1230 struct btrfs_path *path,
1231 struct btrfs_inode *inode,
1232 struct extent_buffer *log_eb,
1233 int log_slot,
1234 struct btrfs_key *key)
1235 {
1236 int ret;
1237 unsigned long ref_ptr;
1238 unsigned long ref_end;
1239 struct extent_buffer *eb;
1240
1241 again:
1242 btrfs_release_path(path);
1243 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1244 if (ret > 0) {
1245 ret = 0;
1246 goto out;
1247 }
1248 if (ret < 0)
1249 goto out;
1250
1251 eb = path->nodes[0];
1252 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
1253 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
1254 while (ref_ptr < ref_end) {
1255 char *name = NULL;
1256 int namelen;
1257 u64 parent_id;
1258
1259 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1260 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1261 NULL, &parent_id);
1262 } else {
1263 parent_id = key->offset;
1264 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1265 NULL);
1266 }
1267 if (ret)
1268 goto out;
1269
1270 if (key->type == BTRFS_INODE_EXTREF_KEY)
1271 ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot,
1272 parent_id, name,
1273 namelen);
1274 else
1275 ret = !!btrfs_find_name_in_backref(log_eb, log_slot,
1276 name, namelen);
1277
1278 if (!ret) {
1279 struct inode *dir;
1280
1281 btrfs_release_path(path);
1282 dir = read_one_inode(root, parent_id);
1283 if (!dir) {
1284 ret = -ENOENT;
1285 kfree(name);
1286 goto out;
1287 }
1288 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
1289 inode, name, namelen);
1290 kfree(name);
1291 iput(dir);
1292 if (ret)
1293 goto out;
1294 goto again;
1295 }
1296
1297 kfree(name);
1298 ref_ptr += namelen;
1299 if (key->type == BTRFS_INODE_EXTREF_KEY)
1300 ref_ptr += sizeof(struct btrfs_inode_extref);
1301 else
1302 ref_ptr += sizeof(struct btrfs_inode_ref);
1303 }
1304 ret = 0;
1305 out:
1306 btrfs_release_path(path);
1307 return ret;
1308 }
1309
btrfs_inode_ref_exists(struct inode * inode,struct inode * dir,const u8 ref_type,const char * name,const int namelen)1310 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
1311 const u8 ref_type, const char *name,
1312 const int namelen)
1313 {
1314 struct btrfs_key key;
1315 struct btrfs_path *path;
1316 const u64 parent_id = btrfs_ino(BTRFS_I(dir));
1317 int ret;
1318
1319 path = btrfs_alloc_path();
1320 if (!path)
1321 return -ENOMEM;
1322
1323 key.objectid = btrfs_ino(BTRFS_I(inode));
1324 key.type = ref_type;
1325 if (key.type == BTRFS_INODE_REF_KEY)
1326 key.offset = parent_id;
1327 else
1328 key.offset = btrfs_extref_hash(parent_id, name, namelen);
1329
1330 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
1331 if (ret < 0)
1332 goto out;
1333 if (ret > 0) {
1334 ret = 0;
1335 goto out;
1336 }
1337 if (key.type == BTRFS_INODE_EXTREF_KEY)
1338 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
1339 path->slots[0], parent_id, name, namelen);
1340 else
1341 ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
1342 name, namelen);
1343
1344 out:
1345 btrfs_free_path(path);
1346 return ret;
1347 }
1348
add_link(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * dir,struct inode * inode,const char * name,int namelen,u64 ref_index)1349 static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1350 struct inode *dir, struct inode *inode, const char *name,
1351 int namelen, u64 ref_index)
1352 {
1353 struct btrfs_dir_item *dir_item;
1354 struct btrfs_key key;
1355 struct btrfs_path *path;
1356 struct inode *other_inode = NULL;
1357 int ret;
1358
1359 path = btrfs_alloc_path();
1360 if (!path)
1361 return -ENOMEM;
1362
1363 dir_item = btrfs_lookup_dir_item(NULL, root, path,
1364 btrfs_ino(BTRFS_I(dir)),
1365 name, namelen, 0);
1366 if (!dir_item) {
1367 btrfs_release_path(path);
1368 goto add_link;
1369 } else if (IS_ERR(dir_item)) {
1370 ret = PTR_ERR(dir_item);
1371 goto out;
1372 }
1373
1374 /*
1375 * Our inode's dentry collides with the dentry of another inode which is
1376 * in the log but not yet processed since it has a higher inode number.
1377 * So delete that other dentry.
1378 */
1379 btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key);
1380 btrfs_release_path(path);
1381 other_inode = read_one_inode(root, key.objectid);
1382 if (!other_inode) {
1383 ret = -ENOENT;
1384 goto out;
1385 }
1386 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode),
1387 name, namelen);
1388 if (ret)
1389 goto out;
1390 /*
1391 * If we dropped the link count to 0, bump it so that later the iput()
1392 * on the inode will not free it. We will fixup the link count later.
1393 */
1394 if (other_inode->i_nlink == 0)
1395 inc_nlink(other_inode);
1396
1397 ret = btrfs_run_delayed_items(trans);
1398 if (ret)
1399 goto out;
1400 add_link:
1401 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
1402 name, namelen, 0, ref_index);
1403 out:
1404 iput(other_inode);
1405 btrfs_free_path(path);
1406
1407 return ret;
1408 }
1409
1410 /*
1411 * replay one inode back reference item found in the log tree.
1412 * eb, slot and key refer to the buffer and key found in the log tree.
1413 * root is the destination we are replaying into, and path is for temp
1414 * use by this function. (it should be released on return).
1415 */
add_inode_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_root * log,struct btrfs_path * path,struct extent_buffer * eb,int slot,struct btrfs_key * key)1416 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1417 struct btrfs_root *root,
1418 struct btrfs_root *log,
1419 struct btrfs_path *path,
1420 struct extent_buffer *eb, int slot,
1421 struct btrfs_key *key)
1422 {
1423 struct inode *dir = NULL;
1424 struct inode *inode = NULL;
1425 unsigned long ref_ptr;
1426 unsigned long ref_end;
1427 char *name = NULL;
1428 int namelen;
1429 int ret;
1430 int search_done = 0;
1431 int log_ref_ver = 0;
1432 u64 parent_objectid;
1433 u64 inode_objectid;
1434 u64 ref_index = 0;
1435 int ref_struct_size;
1436
1437 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1438 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1439
1440 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1441 struct btrfs_inode_extref *r;
1442
1443 ref_struct_size = sizeof(struct btrfs_inode_extref);
1444 log_ref_ver = 1;
1445 r = (struct btrfs_inode_extref *)ref_ptr;
1446 parent_objectid = btrfs_inode_extref_parent(eb, r);
1447 } else {
1448 ref_struct_size = sizeof(struct btrfs_inode_ref);
1449 parent_objectid = key->offset;
1450 }
1451 inode_objectid = key->objectid;
1452
1453 /*
1454 * it is possible that we didn't log all the parent directories
1455 * for a given inode. If we don't find the dir, just don't
1456 * copy the back ref in. The link count fixup code will take
1457 * care of the rest
1458 */
1459 dir = read_one_inode(root, parent_objectid);
1460 if (!dir) {
1461 ret = -ENOENT;
1462 goto out;
1463 }
1464
1465 inode = read_one_inode(root, inode_objectid);
1466 if (!inode) {
1467 ret = -EIO;
1468 goto out;
1469 }
1470
1471 while (ref_ptr < ref_end) {
1472 if (log_ref_ver) {
1473 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1474 &ref_index, &parent_objectid);
1475 /*
1476 * parent object can change from one array
1477 * item to another.
1478 */
1479 if (!dir)
1480 dir = read_one_inode(root, parent_objectid);
1481 if (!dir) {
1482 ret = -ENOENT;
1483 goto out;
1484 }
1485 } else {
1486 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1487 &ref_index);
1488 }
1489 if (ret)
1490 goto out;
1491
1492 /* if we already have a perfect match, we're done */
1493 if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1494 btrfs_ino(BTRFS_I(inode)), ref_index,
1495 name, namelen)) {
1496 /*
1497 * look for a conflicting back reference in the
1498 * metadata. if we find one we have to unlink that name
1499 * of the file before we add our new link. Later on, we
1500 * overwrite any existing back reference, and we don't
1501 * want to create dangling pointers in the directory.
1502 */
1503
1504 if (!search_done) {
1505 ret = __add_inode_ref(trans, root, path, log,
1506 BTRFS_I(dir),
1507 BTRFS_I(inode),
1508 inode_objectid,
1509 parent_objectid,
1510 ref_index, name, namelen,
1511 &search_done);
1512 if (ret) {
1513 if (ret == 1)
1514 ret = 0;
1515 goto out;
1516 }
1517 }
1518
1519 /*
1520 * If a reference item already exists for this inode
1521 * with the same parent and name, but different index,
1522 * drop it and the corresponding directory index entries
1523 * from the parent before adding the new reference item
1524 * and dir index entries, otherwise we would fail with
1525 * -EEXIST returned from btrfs_add_link() below.
1526 */
1527 ret = btrfs_inode_ref_exists(inode, dir, key->type,
1528 name, namelen);
1529 if (ret > 0) {
1530 ret = btrfs_unlink_inode(trans, root,
1531 BTRFS_I(dir),
1532 BTRFS_I(inode),
1533 name, namelen);
1534 /*
1535 * If we dropped the link count to 0, bump it so
1536 * that later the iput() on the inode will not
1537 * free it. We will fixup the link count later.
1538 */
1539 if (!ret && inode->i_nlink == 0)
1540 inc_nlink(inode);
1541 }
1542 if (ret < 0)
1543 goto out;
1544
1545 /* insert our name */
1546 ret = add_link(trans, root, dir, inode, name, namelen,
1547 ref_index);
1548 if (ret)
1549 goto out;
1550
1551 btrfs_update_inode(trans, root, inode);
1552 }
1553
1554 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1555 kfree(name);
1556 name = NULL;
1557 if (log_ref_ver) {
1558 iput(dir);
1559 dir = NULL;
1560 }
1561 }
1562
1563 /*
1564 * Before we overwrite the inode reference item in the subvolume tree
1565 * with the item from the log tree, we must unlink all names from the
1566 * parent directory that are in the subvolume's tree inode reference
1567 * item, otherwise we end up with an inconsistent subvolume tree where
1568 * dir index entries exist for a name but there is no inode reference
1569 * item with the same name.
1570 */
1571 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
1572 key);
1573 if (ret)
1574 goto out;
1575
1576 /* finally write the back reference in the inode */
1577 ret = overwrite_item(trans, root, path, eb, slot, key);
1578 out:
1579 btrfs_release_path(path);
1580 kfree(name);
1581 iput(dir);
1582 iput(inode);
1583 return ret;
1584 }
1585
insert_orphan_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 ino)1586 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1587 struct btrfs_root *root, u64 ino)
1588 {
1589 int ret;
1590
1591 ret = btrfs_insert_orphan_item(trans, root, ino);
1592 if (ret == -EEXIST)
1593 ret = 0;
1594
1595 return ret;
1596 }
1597
count_inode_extrefs(struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path)1598 static int count_inode_extrefs(struct btrfs_root *root,
1599 struct btrfs_inode *inode, struct btrfs_path *path)
1600 {
1601 int ret = 0;
1602 int name_len;
1603 unsigned int nlink = 0;
1604 u32 item_size;
1605 u32 cur_offset = 0;
1606 u64 inode_objectid = btrfs_ino(inode);
1607 u64 offset = 0;
1608 unsigned long ptr;
1609 struct btrfs_inode_extref *extref;
1610 struct extent_buffer *leaf;
1611
1612 while (1) {
1613 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1614 &extref, &offset);
1615 if (ret)
1616 break;
1617
1618 leaf = path->nodes[0];
1619 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1620 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1621 cur_offset = 0;
1622
1623 while (cur_offset < item_size) {
1624 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1625 name_len = btrfs_inode_extref_name_len(leaf, extref);
1626
1627 nlink++;
1628
1629 cur_offset += name_len + sizeof(*extref);
1630 }
1631
1632 offset++;
1633 btrfs_release_path(path);
1634 }
1635 btrfs_release_path(path);
1636
1637 if (ret < 0 && ret != -ENOENT)
1638 return ret;
1639 return nlink;
1640 }
1641
count_inode_refs(struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path)1642 static int count_inode_refs(struct btrfs_root *root,
1643 struct btrfs_inode *inode, struct btrfs_path *path)
1644 {
1645 int ret;
1646 struct btrfs_key key;
1647 unsigned int nlink = 0;
1648 unsigned long ptr;
1649 unsigned long ptr_end;
1650 int name_len;
1651 u64 ino = btrfs_ino(inode);
1652
1653 key.objectid = ino;
1654 key.type = BTRFS_INODE_REF_KEY;
1655 key.offset = (u64)-1;
1656
1657 while (1) {
1658 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1659 if (ret < 0)
1660 break;
1661 if (ret > 0) {
1662 if (path->slots[0] == 0)
1663 break;
1664 path->slots[0]--;
1665 }
1666 process_slot:
1667 btrfs_item_key_to_cpu(path->nodes[0], &key,
1668 path->slots[0]);
1669 if (key.objectid != ino ||
1670 key.type != BTRFS_INODE_REF_KEY)
1671 break;
1672 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1673 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1674 path->slots[0]);
1675 while (ptr < ptr_end) {
1676 struct btrfs_inode_ref *ref;
1677
1678 ref = (struct btrfs_inode_ref *)ptr;
1679 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1680 ref);
1681 ptr = (unsigned long)(ref + 1) + name_len;
1682 nlink++;
1683 }
1684
1685 if (key.offset == 0)
1686 break;
1687 if (path->slots[0] > 0) {
1688 path->slots[0]--;
1689 goto process_slot;
1690 }
1691 key.offset--;
1692 btrfs_release_path(path);
1693 }
1694 btrfs_release_path(path);
1695
1696 return nlink;
1697 }
1698
1699 /*
1700 * There are a few corners where the link count of the file can't
1701 * be properly maintained during replay. So, instead of adding
1702 * lots of complexity to the log code, we just scan the backrefs
1703 * for any file that has been through replay.
1704 *
1705 * The scan will update the link count on the inode to reflect the
1706 * number of back refs found. If it goes down to zero, the iput
1707 * will free the inode.
1708 */
fixup_inode_link_count(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * inode)1709 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1710 struct btrfs_root *root,
1711 struct inode *inode)
1712 {
1713 struct btrfs_path *path;
1714 int ret;
1715 u64 nlink = 0;
1716 u64 ino = btrfs_ino(BTRFS_I(inode));
1717
1718 path = btrfs_alloc_path();
1719 if (!path)
1720 return -ENOMEM;
1721
1722 ret = count_inode_refs(root, BTRFS_I(inode), path);
1723 if (ret < 0)
1724 goto out;
1725
1726 nlink = ret;
1727
1728 ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1729 if (ret < 0)
1730 goto out;
1731
1732 nlink += ret;
1733
1734 ret = 0;
1735
1736 if (nlink != inode->i_nlink) {
1737 set_nlink(inode, nlink);
1738 btrfs_update_inode(trans, root, inode);
1739 }
1740 BTRFS_I(inode)->index_cnt = (u64)-1;
1741
1742 if (inode->i_nlink == 0) {
1743 if (S_ISDIR(inode->i_mode)) {
1744 ret = replay_dir_deletes(trans, root, NULL, path,
1745 ino, 1);
1746 if (ret)
1747 goto out;
1748 }
1749 ret = insert_orphan_item(trans, root, ino);
1750 }
1751
1752 out:
1753 btrfs_free_path(path);
1754 return ret;
1755 }
1756
fixup_inode_link_counts(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path)1757 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1758 struct btrfs_root *root,
1759 struct btrfs_path *path)
1760 {
1761 int ret;
1762 struct btrfs_key key;
1763 struct inode *inode;
1764
1765 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1766 key.type = BTRFS_ORPHAN_ITEM_KEY;
1767 key.offset = (u64)-1;
1768 while (1) {
1769 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1770 if (ret < 0)
1771 break;
1772
1773 if (ret == 1) {
1774 if (path->slots[0] == 0)
1775 break;
1776 path->slots[0]--;
1777 }
1778
1779 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1780 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1781 key.type != BTRFS_ORPHAN_ITEM_KEY)
1782 break;
1783
1784 ret = btrfs_del_item(trans, root, path);
1785 if (ret)
1786 goto out;
1787
1788 btrfs_release_path(path);
1789 inode = read_one_inode(root, key.offset);
1790 if (!inode)
1791 return -EIO;
1792
1793 ret = fixup_inode_link_count(trans, root, inode);
1794 iput(inode);
1795 if (ret)
1796 goto out;
1797
1798 /*
1799 * fixup on a directory may create new entries,
1800 * make sure we always look for the highset possible
1801 * offset
1802 */
1803 key.offset = (u64)-1;
1804 }
1805 ret = 0;
1806 out:
1807 btrfs_release_path(path);
1808 return ret;
1809 }
1810
1811
1812 /*
1813 * record a given inode in the fixup dir so we can check its link
1814 * count when replay is done. The link count is incremented here
1815 * so the inode won't go away until we check it
1816 */
link_to_fixup_dir(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,u64 objectid)1817 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1818 struct btrfs_root *root,
1819 struct btrfs_path *path,
1820 u64 objectid)
1821 {
1822 struct btrfs_key key;
1823 int ret = 0;
1824 struct inode *inode;
1825
1826 inode = read_one_inode(root, objectid);
1827 if (!inode)
1828 return -EIO;
1829
1830 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1831 key.type = BTRFS_ORPHAN_ITEM_KEY;
1832 key.offset = objectid;
1833
1834 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1835
1836 btrfs_release_path(path);
1837 if (ret == 0) {
1838 if (!inode->i_nlink)
1839 set_nlink(inode, 1);
1840 else
1841 inc_nlink(inode);
1842 ret = btrfs_update_inode(trans, root, inode);
1843 } else if (ret == -EEXIST) {
1844 ret = 0;
1845 } else {
1846 BUG(); /* Logic Error */
1847 }
1848 iput(inode);
1849
1850 return ret;
1851 }
1852
1853 /*
1854 * when replaying the log for a directory, we only insert names
1855 * for inodes that actually exist. This means an fsync on a directory
1856 * does not implicitly fsync all the new files in it
1857 */
insert_one_name(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 dirid,u64 index,char * name,int name_len,struct btrfs_key * location)1858 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1859 struct btrfs_root *root,
1860 u64 dirid, u64 index,
1861 char *name, int name_len,
1862 struct btrfs_key *location)
1863 {
1864 struct inode *inode;
1865 struct inode *dir;
1866 int ret;
1867
1868 inode = read_one_inode(root, location->objectid);
1869 if (!inode)
1870 return -ENOENT;
1871
1872 dir = read_one_inode(root, dirid);
1873 if (!dir) {
1874 iput(inode);
1875 return -EIO;
1876 }
1877
1878 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1879 name_len, 1, index);
1880
1881 /* FIXME, put inode into FIXUP list */
1882
1883 iput(inode);
1884 iput(dir);
1885 return ret;
1886 }
1887
1888 /*
1889 * Return true if an inode reference exists in the log for the given name,
1890 * inode and parent inode.
1891 */
name_in_log_ref(struct btrfs_root * log_root,const char * name,const int name_len,const u64 dirid,const u64 ino)1892 static bool name_in_log_ref(struct btrfs_root *log_root,
1893 const char *name, const int name_len,
1894 const u64 dirid, const u64 ino)
1895 {
1896 struct btrfs_key search_key;
1897
1898 search_key.objectid = ino;
1899 search_key.type = BTRFS_INODE_REF_KEY;
1900 search_key.offset = dirid;
1901 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1902 return true;
1903
1904 search_key.type = BTRFS_INODE_EXTREF_KEY;
1905 search_key.offset = btrfs_extref_hash(dirid, name, name_len);
1906 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1907 return true;
1908
1909 return false;
1910 }
1911
1912 /*
1913 * take a single entry in a log directory item and replay it into
1914 * the subvolume.
1915 *
1916 * if a conflicting item exists in the subdirectory already,
1917 * the inode it points to is unlinked and put into the link count
1918 * fix up tree.
1919 *
1920 * If a name from the log points to a file or directory that does
1921 * not exist in the FS, it is skipped. fsyncs on directories
1922 * do not force down inodes inside that directory, just changes to the
1923 * names or unlinks in a directory.
1924 *
1925 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1926 * non-existing inode) and 1 if the name was replayed.
1927 */
replay_one_name(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct extent_buffer * eb,struct btrfs_dir_item * di,struct btrfs_key * key)1928 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1929 struct btrfs_root *root,
1930 struct btrfs_path *path,
1931 struct extent_buffer *eb,
1932 struct btrfs_dir_item *di,
1933 struct btrfs_key *key)
1934 {
1935 char *name;
1936 int name_len;
1937 struct btrfs_dir_item *dst_di;
1938 struct btrfs_key found_key;
1939 struct btrfs_key log_key;
1940 struct inode *dir;
1941 u8 log_type;
1942 int exists;
1943 int ret = 0;
1944 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1945 bool name_added = false;
1946
1947 dir = read_one_inode(root, key->objectid);
1948 if (!dir)
1949 return -EIO;
1950
1951 name_len = btrfs_dir_name_len(eb, di);
1952 name = kmalloc(name_len, GFP_NOFS);
1953 if (!name) {
1954 ret = -ENOMEM;
1955 goto out;
1956 }
1957
1958 log_type = btrfs_dir_type(eb, di);
1959 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1960 name_len);
1961
1962 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1963 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1964 if (exists == 0)
1965 exists = 1;
1966 else
1967 exists = 0;
1968 btrfs_release_path(path);
1969
1970 if (key->type == BTRFS_DIR_ITEM_KEY) {
1971 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1972 name, name_len, 1);
1973 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1974 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1975 key->objectid,
1976 key->offset, name,
1977 name_len, 1);
1978 } else {
1979 /* Corruption */
1980 ret = -EINVAL;
1981 goto out;
1982 }
1983 if (IS_ERR_OR_NULL(dst_di)) {
1984 /* we need a sequence number to insert, so we only
1985 * do inserts for the BTRFS_DIR_INDEX_KEY types
1986 */
1987 if (key->type != BTRFS_DIR_INDEX_KEY)
1988 goto out;
1989 goto insert;
1990 }
1991
1992 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1993 /* the existing item matches the logged item */
1994 if (found_key.objectid == log_key.objectid &&
1995 found_key.type == log_key.type &&
1996 found_key.offset == log_key.offset &&
1997 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1998 update_size = false;
1999 goto out;
2000 }
2001
2002 /*
2003 * don't drop the conflicting directory entry if the inode
2004 * for the new entry doesn't exist
2005 */
2006 if (!exists)
2007 goto out;
2008
2009 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di);
2010 if (ret)
2011 goto out;
2012
2013 if (key->type == BTRFS_DIR_INDEX_KEY)
2014 goto insert;
2015 out:
2016 btrfs_release_path(path);
2017 if (!ret && update_size) {
2018 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
2019 ret = btrfs_update_inode(trans, root, dir);
2020 }
2021 kfree(name);
2022 iput(dir);
2023 if (!ret && name_added)
2024 ret = 1;
2025 return ret;
2026
2027 insert:
2028 if (name_in_log_ref(root->log_root, name, name_len,
2029 key->objectid, log_key.objectid)) {
2030 /* The dentry will be added later. */
2031 ret = 0;
2032 update_size = false;
2033 goto out;
2034 }
2035 btrfs_release_path(path);
2036 ret = insert_one_name(trans, root, key->objectid, key->offset,
2037 name, name_len, &log_key);
2038 if (ret && ret != -ENOENT && ret != -EEXIST)
2039 goto out;
2040 if (!ret)
2041 name_added = true;
2042 update_size = false;
2043 ret = 0;
2044 goto out;
2045 }
2046
2047 /*
2048 * find all the names in a directory item and reconcile them into
2049 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
2050 * one name in a directory item, but the same code gets used for
2051 * both directory index types
2052 */
replay_one_dir_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct extent_buffer * eb,int slot,struct btrfs_key * key)2053 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
2054 struct btrfs_root *root,
2055 struct btrfs_path *path,
2056 struct extent_buffer *eb, int slot,
2057 struct btrfs_key *key)
2058 {
2059 int ret = 0;
2060 u32 item_size = btrfs_item_size_nr(eb, slot);
2061 struct btrfs_dir_item *di;
2062 int name_len;
2063 unsigned long ptr;
2064 unsigned long ptr_end;
2065 struct btrfs_path *fixup_path = NULL;
2066
2067 ptr = btrfs_item_ptr_offset(eb, slot);
2068 ptr_end = ptr + item_size;
2069 while (ptr < ptr_end) {
2070 di = (struct btrfs_dir_item *)ptr;
2071 name_len = btrfs_dir_name_len(eb, di);
2072 ret = replay_one_name(trans, root, path, eb, di, key);
2073 if (ret < 0)
2074 break;
2075 ptr = (unsigned long)(di + 1);
2076 ptr += name_len;
2077
2078 /*
2079 * If this entry refers to a non-directory (directories can not
2080 * have a link count > 1) and it was added in the transaction
2081 * that was not committed, make sure we fixup the link count of
2082 * the inode it the entry points to. Otherwise something like
2083 * the following would result in a directory pointing to an
2084 * inode with a wrong link that does not account for this dir
2085 * entry:
2086 *
2087 * mkdir testdir
2088 * touch testdir/foo
2089 * touch testdir/bar
2090 * sync
2091 *
2092 * ln testdir/bar testdir/bar_link
2093 * ln testdir/foo testdir/foo_link
2094 * xfs_io -c "fsync" testdir/bar
2095 *
2096 * <power failure>
2097 *
2098 * mount fs, log replay happens
2099 *
2100 * File foo would remain with a link count of 1 when it has two
2101 * entries pointing to it in the directory testdir. This would
2102 * make it impossible to ever delete the parent directory has
2103 * it would result in stale dentries that can never be deleted.
2104 */
2105 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
2106 struct btrfs_key di_key;
2107
2108 if (!fixup_path) {
2109 fixup_path = btrfs_alloc_path();
2110 if (!fixup_path) {
2111 ret = -ENOMEM;
2112 break;
2113 }
2114 }
2115
2116 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2117 ret = link_to_fixup_dir(trans, root, fixup_path,
2118 di_key.objectid);
2119 if (ret)
2120 break;
2121 }
2122 ret = 0;
2123 }
2124 btrfs_free_path(fixup_path);
2125 return ret;
2126 }
2127
2128 /*
2129 * directory replay has two parts. There are the standard directory
2130 * items in the log copied from the subvolume, and range items
2131 * created in the log while the subvolume was logged.
2132 *
2133 * The range items tell us which parts of the key space the log
2134 * is authoritative for. During replay, if a key in the subvolume
2135 * directory is in a logged range item, but not actually in the log
2136 * that means it was deleted from the directory before the fsync
2137 * and should be removed.
2138 */
find_dir_range(struct btrfs_root * root,struct btrfs_path * path,u64 dirid,int key_type,u64 * start_ret,u64 * end_ret)2139 static noinline int find_dir_range(struct btrfs_root *root,
2140 struct btrfs_path *path,
2141 u64 dirid, int key_type,
2142 u64 *start_ret, u64 *end_ret)
2143 {
2144 struct btrfs_key key;
2145 u64 found_end;
2146 struct btrfs_dir_log_item *item;
2147 int ret;
2148 int nritems;
2149
2150 if (*start_ret == (u64)-1)
2151 return 1;
2152
2153 key.objectid = dirid;
2154 key.type = key_type;
2155 key.offset = *start_ret;
2156
2157 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2158 if (ret < 0)
2159 goto out;
2160 if (ret > 0) {
2161 if (path->slots[0] == 0)
2162 goto out;
2163 path->slots[0]--;
2164 }
2165 if (ret != 0)
2166 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2167
2168 if (key.type != key_type || key.objectid != dirid) {
2169 ret = 1;
2170 goto next;
2171 }
2172 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2173 struct btrfs_dir_log_item);
2174 found_end = btrfs_dir_log_end(path->nodes[0], item);
2175
2176 if (*start_ret >= key.offset && *start_ret <= found_end) {
2177 ret = 0;
2178 *start_ret = key.offset;
2179 *end_ret = found_end;
2180 goto out;
2181 }
2182 ret = 1;
2183 next:
2184 /* check the next slot in the tree to see if it is a valid item */
2185 nritems = btrfs_header_nritems(path->nodes[0]);
2186 path->slots[0]++;
2187 if (path->slots[0] >= nritems) {
2188 ret = btrfs_next_leaf(root, path);
2189 if (ret)
2190 goto out;
2191 }
2192
2193 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2194
2195 if (key.type != key_type || key.objectid != dirid) {
2196 ret = 1;
2197 goto out;
2198 }
2199 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2200 struct btrfs_dir_log_item);
2201 found_end = btrfs_dir_log_end(path->nodes[0], item);
2202 *start_ret = key.offset;
2203 *end_ret = found_end;
2204 ret = 0;
2205 out:
2206 btrfs_release_path(path);
2207 return ret;
2208 }
2209
2210 /*
2211 * this looks for a given directory item in the log. If the directory
2212 * item is not in the log, the item is removed and the inode it points
2213 * to is unlinked
2214 */
check_item_in_log(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_root * log,struct btrfs_path * path,struct btrfs_path * log_path,struct inode * dir,struct btrfs_key * dir_key)2215 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
2216 struct btrfs_root *root,
2217 struct btrfs_root *log,
2218 struct btrfs_path *path,
2219 struct btrfs_path *log_path,
2220 struct inode *dir,
2221 struct btrfs_key *dir_key)
2222 {
2223 int ret;
2224 struct extent_buffer *eb;
2225 int slot;
2226 u32 item_size;
2227 struct btrfs_dir_item *di;
2228 struct btrfs_dir_item *log_di;
2229 int name_len;
2230 unsigned long ptr;
2231 unsigned long ptr_end;
2232 char *name;
2233 struct inode *inode;
2234 struct btrfs_key location;
2235
2236 again:
2237 eb = path->nodes[0];
2238 slot = path->slots[0];
2239 item_size = btrfs_item_size_nr(eb, slot);
2240 ptr = btrfs_item_ptr_offset(eb, slot);
2241 ptr_end = ptr + item_size;
2242 while (ptr < ptr_end) {
2243 di = (struct btrfs_dir_item *)ptr;
2244 name_len = btrfs_dir_name_len(eb, di);
2245 name = kmalloc(name_len, GFP_NOFS);
2246 if (!name) {
2247 ret = -ENOMEM;
2248 goto out;
2249 }
2250 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2251 name_len);
2252 log_di = NULL;
2253 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2254 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2255 dir_key->objectid,
2256 name, name_len, 0);
2257 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2258 log_di = btrfs_lookup_dir_index_item(trans, log,
2259 log_path,
2260 dir_key->objectid,
2261 dir_key->offset,
2262 name, name_len, 0);
2263 }
2264 if (!log_di || log_di == ERR_PTR(-ENOENT)) {
2265 btrfs_dir_item_key_to_cpu(eb, di, &location);
2266 btrfs_release_path(path);
2267 btrfs_release_path(log_path);
2268 inode = read_one_inode(root, location.objectid);
2269 if (!inode) {
2270 kfree(name);
2271 return -EIO;
2272 }
2273
2274 ret = link_to_fixup_dir(trans, root,
2275 path, location.objectid);
2276 if (ret) {
2277 kfree(name);
2278 iput(inode);
2279 goto out;
2280 }
2281
2282 inc_nlink(inode);
2283 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
2284 BTRFS_I(inode), name, name_len);
2285 if (!ret)
2286 ret = btrfs_run_delayed_items(trans);
2287 kfree(name);
2288 iput(inode);
2289 if (ret)
2290 goto out;
2291
2292 /* there might still be more names under this key
2293 * check and repeat if required
2294 */
2295 ret = btrfs_search_slot(NULL, root, dir_key, path,
2296 0, 0);
2297 if (ret == 0)
2298 goto again;
2299 ret = 0;
2300 goto out;
2301 } else if (IS_ERR(log_di)) {
2302 kfree(name);
2303 return PTR_ERR(log_di);
2304 }
2305 btrfs_release_path(log_path);
2306 kfree(name);
2307
2308 ptr = (unsigned long)(di + 1);
2309 ptr += name_len;
2310 }
2311 ret = 0;
2312 out:
2313 btrfs_release_path(path);
2314 btrfs_release_path(log_path);
2315 return ret;
2316 }
2317
replay_xattr_deletes(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_root * log,struct btrfs_path * path,const u64 ino)2318 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2319 struct btrfs_root *root,
2320 struct btrfs_root *log,
2321 struct btrfs_path *path,
2322 const u64 ino)
2323 {
2324 struct btrfs_key search_key;
2325 struct btrfs_path *log_path;
2326 int i;
2327 int nritems;
2328 int ret;
2329
2330 log_path = btrfs_alloc_path();
2331 if (!log_path)
2332 return -ENOMEM;
2333
2334 search_key.objectid = ino;
2335 search_key.type = BTRFS_XATTR_ITEM_KEY;
2336 search_key.offset = 0;
2337 again:
2338 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2339 if (ret < 0)
2340 goto out;
2341 process_leaf:
2342 nritems = btrfs_header_nritems(path->nodes[0]);
2343 for (i = path->slots[0]; i < nritems; i++) {
2344 struct btrfs_key key;
2345 struct btrfs_dir_item *di;
2346 struct btrfs_dir_item *log_di;
2347 u32 total_size;
2348 u32 cur;
2349
2350 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2351 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2352 ret = 0;
2353 goto out;
2354 }
2355
2356 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2357 total_size = btrfs_item_size_nr(path->nodes[0], i);
2358 cur = 0;
2359 while (cur < total_size) {
2360 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2361 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2362 u32 this_len = sizeof(*di) + name_len + data_len;
2363 char *name;
2364
2365 name = kmalloc(name_len, GFP_NOFS);
2366 if (!name) {
2367 ret = -ENOMEM;
2368 goto out;
2369 }
2370 read_extent_buffer(path->nodes[0], name,
2371 (unsigned long)(di + 1), name_len);
2372
2373 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2374 name, name_len, 0);
2375 btrfs_release_path(log_path);
2376 if (!log_di) {
2377 /* Doesn't exist in log tree, so delete it. */
2378 btrfs_release_path(path);
2379 di = btrfs_lookup_xattr(trans, root, path, ino,
2380 name, name_len, -1);
2381 kfree(name);
2382 if (IS_ERR(di)) {
2383 ret = PTR_ERR(di);
2384 goto out;
2385 }
2386 ASSERT(di);
2387 ret = btrfs_delete_one_dir_name(trans, root,
2388 path, di);
2389 if (ret)
2390 goto out;
2391 btrfs_release_path(path);
2392 search_key = key;
2393 goto again;
2394 }
2395 kfree(name);
2396 if (IS_ERR(log_di)) {
2397 ret = PTR_ERR(log_di);
2398 goto out;
2399 }
2400 cur += this_len;
2401 di = (struct btrfs_dir_item *)((char *)di + this_len);
2402 }
2403 }
2404 ret = btrfs_next_leaf(root, path);
2405 if (ret > 0)
2406 ret = 0;
2407 else if (ret == 0)
2408 goto process_leaf;
2409 out:
2410 btrfs_free_path(log_path);
2411 btrfs_release_path(path);
2412 return ret;
2413 }
2414
2415
2416 /*
2417 * deletion replay happens before we copy any new directory items
2418 * out of the log or out of backreferences from inodes. It
2419 * scans the log to find ranges of keys that log is authoritative for,
2420 * and then scans the directory to find items in those ranges that are
2421 * not present in the log.
2422 *
2423 * Anything we don't find in the log is unlinked and removed from the
2424 * directory.
2425 */
replay_dir_deletes(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_root * log,struct btrfs_path * path,u64 dirid,int del_all)2426 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2427 struct btrfs_root *root,
2428 struct btrfs_root *log,
2429 struct btrfs_path *path,
2430 u64 dirid, int del_all)
2431 {
2432 u64 range_start;
2433 u64 range_end;
2434 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2435 int ret = 0;
2436 struct btrfs_key dir_key;
2437 struct btrfs_key found_key;
2438 struct btrfs_path *log_path;
2439 struct inode *dir;
2440
2441 dir_key.objectid = dirid;
2442 dir_key.type = BTRFS_DIR_ITEM_KEY;
2443 log_path = btrfs_alloc_path();
2444 if (!log_path)
2445 return -ENOMEM;
2446
2447 dir = read_one_inode(root, dirid);
2448 /* it isn't an error if the inode isn't there, that can happen
2449 * because we replay the deletes before we copy in the inode item
2450 * from the log
2451 */
2452 if (!dir) {
2453 btrfs_free_path(log_path);
2454 return 0;
2455 }
2456 again:
2457 range_start = 0;
2458 range_end = 0;
2459 while (1) {
2460 if (del_all)
2461 range_end = (u64)-1;
2462 else {
2463 ret = find_dir_range(log, path, dirid, key_type,
2464 &range_start, &range_end);
2465 if (ret != 0)
2466 break;
2467 }
2468
2469 dir_key.offset = range_start;
2470 while (1) {
2471 int nritems;
2472 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2473 0, 0);
2474 if (ret < 0)
2475 goto out;
2476
2477 nritems = btrfs_header_nritems(path->nodes[0]);
2478 if (path->slots[0] >= nritems) {
2479 ret = btrfs_next_leaf(root, path);
2480 if (ret == 1)
2481 break;
2482 else if (ret < 0)
2483 goto out;
2484 }
2485 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2486 path->slots[0]);
2487 if (found_key.objectid != dirid ||
2488 found_key.type != dir_key.type)
2489 goto next_type;
2490
2491 if (found_key.offset > range_end)
2492 break;
2493
2494 ret = check_item_in_log(trans, root, log, path,
2495 log_path, dir,
2496 &found_key);
2497 if (ret)
2498 goto out;
2499 if (found_key.offset == (u64)-1)
2500 break;
2501 dir_key.offset = found_key.offset + 1;
2502 }
2503 btrfs_release_path(path);
2504 if (range_end == (u64)-1)
2505 break;
2506 range_start = range_end + 1;
2507 }
2508
2509 next_type:
2510 ret = 0;
2511 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2512 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2513 dir_key.type = BTRFS_DIR_INDEX_KEY;
2514 btrfs_release_path(path);
2515 goto again;
2516 }
2517 out:
2518 btrfs_release_path(path);
2519 btrfs_free_path(log_path);
2520 iput(dir);
2521 return ret;
2522 }
2523
2524 /*
2525 * the process_func used to replay items from the log tree. This
2526 * gets called in two different stages. The first stage just looks
2527 * for inodes and makes sure they are all copied into the subvolume.
2528 *
2529 * The second stage copies all the other item types from the log into
2530 * the subvolume. The two stage approach is slower, but gets rid of
2531 * lots of complexity around inodes referencing other inodes that exist
2532 * only in the log (references come from either directory items or inode
2533 * back refs).
2534 */
replay_one_buffer(struct btrfs_root * log,struct extent_buffer * eb,struct walk_control * wc,u64 gen,int level)2535 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2536 struct walk_control *wc, u64 gen, int level)
2537 {
2538 int nritems;
2539 struct btrfs_path *path;
2540 struct btrfs_root *root = wc->replay_dest;
2541 struct btrfs_key key;
2542 int i;
2543 int ret;
2544
2545 ret = btrfs_read_buffer(eb, gen, level, NULL);
2546 if (ret)
2547 return ret;
2548
2549 level = btrfs_header_level(eb);
2550
2551 if (level != 0)
2552 return 0;
2553
2554 path = btrfs_alloc_path();
2555 if (!path)
2556 return -ENOMEM;
2557
2558 nritems = btrfs_header_nritems(eb);
2559 for (i = 0; i < nritems; i++) {
2560 btrfs_item_key_to_cpu(eb, &key, i);
2561
2562 /* inode keys are done during the first stage */
2563 if (key.type == BTRFS_INODE_ITEM_KEY &&
2564 wc->stage == LOG_WALK_REPLAY_INODES) {
2565 struct btrfs_inode_item *inode_item;
2566 u32 mode;
2567
2568 inode_item = btrfs_item_ptr(eb, i,
2569 struct btrfs_inode_item);
2570 /*
2571 * If we have a tmpfile (O_TMPFILE) that got fsync'ed
2572 * and never got linked before the fsync, skip it, as
2573 * replaying it is pointless since it would be deleted
2574 * later. We skip logging tmpfiles, but it's always
2575 * possible we are replaying a log created with a kernel
2576 * that used to log tmpfiles.
2577 */
2578 if (btrfs_inode_nlink(eb, inode_item) == 0) {
2579 wc->ignore_cur_inode = true;
2580 continue;
2581 } else {
2582 wc->ignore_cur_inode = false;
2583 }
2584 ret = replay_xattr_deletes(wc->trans, root, log,
2585 path, key.objectid);
2586 if (ret)
2587 break;
2588 mode = btrfs_inode_mode(eb, inode_item);
2589 if (S_ISDIR(mode)) {
2590 ret = replay_dir_deletes(wc->trans,
2591 root, log, path, key.objectid, 0);
2592 if (ret)
2593 break;
2594 }
2595 ret = overwrite_item(wc->trans, root, path,
2596 eb, i, &key);
2597 if (ret)
2598 break;
2599
2600 /*
2601 * Before replaying extents, truncate the inode to its
2602 * size. We need to do it now and not after log replay
2603 * because before an fsync we can have prealloc extents
2604 * added beyond the inode's i_size. If we did it after,
2605 * through orphan cleanup for example, we would drop
2606 * those prealloc extents just after replaying them.
2607 */
2608 if (S_ISREG(mode)) {
2609 struct inode *inode;
2610 u64 from;
2611
2612 inode = read_one_inode(root, key.objectid);
2613 if (!inode) {
2614 ret = -EIO;
2615 break;
2616 }
2617 from = ALIGN(i_size_read(inode),
2618 root->fs_info->sectorsize);
2619 ret = btrfs_drop_extents(wc->trans, root, inode,
2620 from, (u64)-1, 1);
2621 if (!ret) {
2622 /* Update the inode's nbytes. */
2623 ret = btrfs_update_inode(wc->trans,
2624 root, inode);
2625 }
2626 iput(inode);
2627 if (ret)
2628 break;
2629 }
2630
2631 ret = link_to_fixup_dir(wc->trans, root,
2632 path, key.objectid);
2633 if (ret)
2634 break;
2635 }
2636
2637 if (wc->ignore_cur_inode)
2638 continue;
2639
2640 if (key.type == BTRFS_DIR_INDEX_KEY &&
2641 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2642 ret = replay_one_dir_item(wc->trans, root, path,
2643 eb, i, &key);
2644 if (ret)
2645 break;
2646 }
2647
2648 if (wc->stage < LOG_WALK_REPLAY_ALL)
2649 continue;
2650
2651 /* these keys are simply copied */
2652 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2653 ret = overwrite_item(wc->trans, root, path,
2654 eb, i, &key);
2655 if (ret)
2656 break;
2657 } else if (key.type == BTRFS_INODE_REF_KEY ||
2658 key.type == BTRFS_INODE_EXTREF_KEY) {
2659 ret = add_inode_ref(wc->trans, root, log, path,
2660 eb, i, &key);
2661 if (ret && ret != -ENOENT)
2662 break;
2663 ret = 0;
2664 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2665 ret = replay_one_extent(wc->trans, root, path,
2666 eb, i, &key);
2667 if (ret)
2668 break;
2669 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2670 ret = replay_one_dir_item(wc->trans, root, path,
2671 eb, i, &key);
2672 if (ret)
2673 break;
2674 }
2675 }
2676 btrfs_free_path(path);
2677 return ret;
2678 }
2679
walk_down_log_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int * level,struct walk_control * wc)2680 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2681 struct btrfs_root *root,
2682 struct btrfs_path *path, int *level,
2683 struct walk_control *wc)
2684 {
2685 struct btrfs_fs_info *fs_info = root->fs_info;
2686 u64 root_owner;
2687 u64 bytenr;
2688 u64 ptr_gen;
2689 struct extent_buffer *next;
2690 struct extent_buffer *cur;
2691 struct extent_buffer *parent;
2692 u32 blocksize;
2693 int ret = 0;
2694
2695 WARN_ON(*level < 0);
2696 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2697
2698 while (*level > 0) {
2699 struct btrfs_key first_key;
2700
2701 WARN_ON(*level < 0);
2702 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2703 cur = path->nodes[*level];
2704
2705 WARN_ON(btrfs_header_level(cur) != *level);
2706
2707 if (path->slots[*level] >=
2708 btrfs_header_nritems(cur))
2709 break;
2710
2711 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2712 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2713 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]);
2714 blocksize = fs_info->nodesize;
2715
2716 parent = path->nodes[*level];
2717 root_owner = btrfs_header_owner(parent);
2718
2719 next = btrfs_find_create_tree_block(fs_info, bytenr);
2720 if (IS_ERR(next))
2721 return PTR_ERR(next);
2722
2723 if (*level == 1) {
2724 ret = wc->process_func(root, next, wc, ptr_gen,
2725 *level - 1);
2726 if (ret) {
2727 free_extent_buffer(next);
2728 return ret;
2729 }
2730
2731 path->slots[*level]++;
2732 if (wc->free) {
2733 ret = btrfs_read_buffer(next, ptr_gen,
2734 *level - 1, &first_key);
2735 if (ret) {
2736 free_extent_buffer(next);
2737 return ret;
2738 }
2739
2740 if (trans) {
2741 btrfs_tree_lock(next);
2742 btrfs_set_lock_blocking_write(next);
2743 btrfs_clean_tree_block(next);
2744 btrfs_wait_tree_block_writeback(next);
2745 btrfs_tree_unlock(next);
2746 } else {
2747 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2748 clear_extent_buffer_dirty(next);
2749 }
2750
2751 WARN_ON(root_owner !=
2752 BTRFS_TREE_LOG_OBJECTID);
2753 ret = btrfs_free_and_pin_reserved_extent(
2754 fs_info, bytenr,
2755 blocksize);
2756 if (ret) {
2757 free_extent_buffer(next);
2758 return ret;
2759 }
2760 }
2761 free_extent_buffer(next);
2762 continue;
2763 }
2764 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key);
2765 if (ret) {
2766 free_extent_buffer(next);
2767 return ret;
2768 }
2769
2770 WARN_ON(*level <= 0);
2771 if (path->nodes[*level-1])
2772 free_extent_buffer(path->nodes[*level-1]);
2773 path->nodes[*level-1] = next;
2774 *level = btrfs_header_level(next);
2775 path->slots[*level] = 0;
2776 cond_resched();
2777 }
2778 WARN_ON(*level < 0);
2779 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2780
2781 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2782
2783 cond_resched();
2784 return 0;
2785 }
2786
walk_up_log_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int * level,struct walk_control * wc)2787 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2788 struct btrfs_root *root,
2789 struct btrfs_path *path, int *level,
2790 struct walk_control *wc)
2791 {
2792 struct btrfs_fs_info *fs_info = root->fs_info;
2793 u64 root_owner;
2794 int i;
2795 int slot;
2796 int ret;
2797
2798 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2799 slot = path->slots[i];
2800 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2801 path->slots[i]++;
2802 *level = i;
2803 WARN_ON(*level == 0);
2804 return 0;
2805 } else {
2806 struct extent_buffer *parent;
2807 if (path->nodes[*level] == root->node)
2808 parent = path->nodes[*level];
2809 else
2810 parent = path->nodes[*level + 1];
2811
2812 root_owner = btrfs_header_owner(parent);
2813 ret = wc->process_func(root, path->nodes[*level], wc,
2814 btrfs_header_generation(path->nodes[*level]),
2815 *level);
2816 if (ret)
2817 return ret;
2818
2819 if (wc->free) {
2820 struct extent_buffer *next;
2821
2822 next = path->nodes[*level];
2823
2824 if (trans) {
2825 btrfs_tree_lock(next);
2826 btrfs_set_lock_blocking_write(next);
2827 btrfs_clean_tree_block(next);
2828 btrfs_wait_tree_block_writeback(next);
2829 btrfs_tree_unlock(next);
2830 } else {
2831 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2832 clear_extent_buffer_dirty(next);
2833 }
2834
2835 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2836 ret = btrfs_free_and_pin_reserved_extent(
2837 fs_info,
2838 path->nodes[*level]->start,
2839 path->nodes[*level]->len);
2840 if (ret)
2841 return ret;
2842 }
2843 free_extent_buffer(path->nodes[*level]);
2844 path->nodes[*level] = NULL;
2845 *level = i + 1;
2846 }
2847 }
2848 return 1;
2849 }
2850
2851 /*
2852 * drop the reference count on the tree rooted at 'snap'. This traverses
2853 * the tree freeing any blocks that have a ref count of zero after being
2854 * decremented.
2855 */
walk_log_tree(struct btrfs_trans_handle * trans,struct btrfs_root * log,struct walk_control * wc)2856 static int walk_log_tree(struct btrfs_trans_handle *trans,
2857 struct btrfs_root *log, struct walk_control *wc)
2858 {
2859 struct btrfs_fs_info *fs_info = log->fs_info;
2860 int ret = 0;
2861 int wret;
2862 int level;
2863 struct btrfs_path *path;
2864 int orig_level;
2865
2866 path = btrfs_alloc_path();
2867 if (!path)
2868 return -ENOMEM;
2869
2870 level = btrfs_header_level(log->node);
2871 orig_level = level;
2872 path->nodes[level] = log->node;
2873 extent_buffer_get(log->node);
2874 path->slots[level] = 0;
2875
2876 while (1) {
2877 wret = walk_down_log_tree(trans, log, path, &level, wc);
2878 if (wret > 0)
2879 break;
2880 if (wret < 0) {
2881 ret = wret;
2882 goto out;
2883 }
2884
2885 wret = walk_up_log_tree(trans, log, path, &level, wc);
2886 if (wret > 0)
2887 break;
2888 if (wret < 0) {
2889 ret = wret;
2890 goto out;
2891 }
2892 }
2893
2894 /* was the root node processed? if not, catch it here */
2895 if (path->nodes[orig_level]) {
2896 ret = wc->process_func(log, path->nodes[orig_level], wc,
2897 btrfs_header_generation(path->nodes[orig_level]),
2898 orig_level);
2899 if (ret)
2900 goto out;
2901 if (wc->free) {
2902 struct extent_buffer *next;
2903
2904 next = path->nodes[orig_level];
2905
2906 if (trans) {
2907 btrfs_tree_lock(next);
2908 btrfs_set_lock_blocking_write(next);
2909 btrfs_clean_tree_block(next);
2910 btrfs_wait_tree_block_writeback(next);
2911 btrfs_tree_unlock(next);
2912 } else {
2913 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2914 clear_extent_buffer_dirty(next);
2915 }
2916
2917 WARN_ON(log->root_key.objectid !=
2918 BTRFS_TREE_LOG_OBJECTID);
2919 ret = btrfs_free_and_pin_reserved_extent(fs_info,
2920 next->start, next->len);
2921 if (ret)
2922 goto out;
2923 }
2924 }
2925
2926 out:
2927 btrfs_free_path(path);
2928 return ret;
2929 }
2930
2931 /*
2932 * helper function to update the item for a given subvolumes log root
2933 * in the tree of log roots
2934 */
update_log_root(struct btrfs_trans_handle * trans,struct btrfs_root * log,struct btrfs_root_item * root_item)2935 static int update_log_root(struct btrfs_trans_handle *trans,
2936 struct btrfs_root *log,
2937 struct btrfs_root_item *root_item)
2938 {
2939 struct btrfs_fs_info *fs_info = log->fs_info;
2940 int ret;
2941
2942 if (log->log_transid == 1) {
2943 /* insert root item on the first sync */
2944 ret = btrfs_insert_root(trans, fs_info->log_root_tree,
2945 &log->root_key, root_item);
2946 } else {
2947 ret = btrfs_update_root(trans, fs_info->log_root_tree,
2948 &log->root_key, root_item);
2949 }
2950 return ret;
2951 }
2952
wait_log_commit(struct btrfs_root * root,int transid)2953 static void wait_log_commit(struct btrfs_root *root, int transid)
2954 {
2955 DEFINE_WAIT(wait);
2956 int index = transid % 2;
2957
2958 /*
2959 * we only allow two pending log transactions at a time,
2960 * so we know that if ours is more than 2 older than the
2961 * current transaction, we're done
2962 */
2963 for (;;) {
2964 prepare_to_wait(&root->log_commit_wait[index],
2965 &wait, TASK_UNINTERRUPTIBLE);
2966
2967 if (!(root->log_transid_committed < transid &&
2968 atomic_read(&root->log_commit[index])))
2969 break;
2970
2971 mutex_unlock(&root->log_mutex);
2972 schedule();
2973 mutex_lock(&root->log_mutex);
2974 }
2975 finish_wait(&root->log_commit_wait[index], &wait);
2976 }
2977
wait_for_writer(struct btrfs_root * root)2978 static void wait_for_writer(struct btrfs_root *root)
2979 {
2980 DEFINE_WAIT(wait);
2981
2982 for (;;) {
2983 prepare_to_wait(&root->log_writer_wait, &wait,
2984 TASK_UNINTERRUPTIBLE);
2985 if (!atomic_read(&root->log_writers))
2986 break;
2987
2988 mutex_unlock(&root->log_mutex);
2989 schedule();
2990 mutex_lock(&root->log_mutex);
2991 }
2992 finish_wait(&root->log_writer_wait, &wait);
2993 }
2994
btrfs_remove_log_ctx(struct btrfs_root * root,struct btrfs_log_ctx * ctx)2995 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2996 struct btrfs_log_ctx *ctx)
2997 {
2998 if (!ctx)
2999 return;
3000
3001 mutex_lock(&root->log_mutex);
3002 list_del_init(&ctx->list);
3003 mutex_unlock(&root->log_mutex);
3004 }
3005
3006 /*
3007 * Invoked in log mutex context, or be sure there is no other task which
3008 * can access the list.
3009 */
btrfs_remove_all_log_ctxs(struct btrfs_root * root,int index,int error)3010 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
3011 int index, int error)
3012 {
3013 struct btrfs_log_ctx *ctx;
3014 struct btrfs_log_ctx *safe;
3015
3016 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
3017 list_del_init(&ctx->list);
3018 ctx->log_ret = error;
3019 }
3020
3021 INIT_LIST_HEAD(&root->log_ctxs[index]);
3022 }
3023
3024 /*
3025 * btrfs_sync_log does sends a given tree log down to the disk and
3026 * updates the super blocks to record it. When this call is done,
3027 * you know that any inodes previously logged are safely on disk only
3028 * if it returns 0.
3029 *
3030 * Any other return value means you need to call btrfs_commit_transaction.
3031 * Some of the edge cases for fsyncing directories that have had unlinks
3032 * or renames done in the past mean that sometimes the only safe
3033 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
3034 * that has happened.
3035 */
btrfs_sync_log(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_log_ctx * ctx)3036 int btrfs_sync_log(struct btrfs_trans_handle *trans,
3037 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
3038 {
3039 int index1;
3040 int index2;
3041 int mark;
3042 int ret;
3043 struct btrfs_fs_info *fs_info = root->fs_info;
3044 struct btrfs_root *log = root->log_root;
3045 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
3046 struct btrfs_root_item new_root_item;
3047 int log_transid = 0;
3048 struct btrfs_log_ctx root_log_ctx;
3049 struct blk_plug plug;
3050
3051 mutex_lock(&root->log_mutex);
3052 log_transid = ctx->log_transid;
3053 if (root->log_transid_committed >= log_transid) {
3054 mutex_unlock(&root->log_mutex);
3055 return ctx->log_ret;
3056 }
3057
3058 index1 = log_transid % 2;
3059 if (atomic_read(&root->log_commit[index1])) {
3060 wait_log_commit(root, log_transid);
3061 mutex_unlock(&root->log_mutex);
3062 return ctx->log_ret;
3063 }
3064 ASSERT(log_transid == root->log_transid);
3065 atomic_set(&root->log_commit[index1], 1);
3066
3067 /* wait for previous tree log sync to complete */
3068 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
3069 wait_log_commit(root, log_transid - 1);
3070
3071 while (1) {
3072 int batch = atomic_read(&root->log_batch);
3073 /* when we're on an ssd, just kick the log commit out */
3074 if (!btrfs_test_opt(fs_info, SSD) &&
3075 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
3076 mutex_unlock(&root->log_mutex);
3077 schedule_timeout_uninterruptible(1);
3078 mutex_lock(&root->log_mutex);
3079 }
3080 wait_for_writer(root);
3081 if (batch == atomic_read(&root->log_batch))
3082 break;
3083 }
3084
3085 /* bail out if we need to do a full commit */
3086 if (btrfs_need_log_full_commit(trans)) {
3087 ret = -EAGAIN;
3088 mutex_unlock(&root->log_mutex);
3089 goto out;
3090 }
3091
3092 if (log_transid % 2 == 0)
3093 mark = EXTENT_DIRTY;
3094 else
3095 mark = EXTENT_NEW;
3096
3097 /* we start IO on all the marked extents here, but we don't actually
3098 * wait for them until later.
3099 */
3100 blk_start_plug(&plug);
3101 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
3102 if (ret) {
3103 blk_finish_plug(&plug);
3104 btrfs_abort_transaction(trans, ret);
3105 btrfs_set_log_full_commit(trans);
3106 mutex_unlock(&root->log_mutex);
3107 goto out;
3108 }
3109
3110 /*
3111 * We _must_ update under the root->log_mutex in order to make sure we
3112 * have a consistent view of the log root we are trying to commit at
3113 * this moment.
3114 *
3115 * We _must_ copy this into a local copy, because we are not holding the
3116 * log_root_tree->log_mutex yet. This is important because when we
3117 * commit the log_root_tree we must have a consistent view of the
3118 * log_root_tree when we update the super block to point at the
3119 * log_root_tree bytenr. If we update the log_root_tree here we'll race
3120 * with the commit and possibly point at the new block which we may not
3121 * have written out.
3122 */
3123 btrfs_set_root_node(&log->root_item, log->node);
3124 memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
3125
3126 root->log_transid++;
3127 log->log_transid = root->log_transid;
3128 root->log_start_pid = 0;
3129 /*
3130 * IO has been started, blocks of the log tree have WRITTEN flag set
3131 * in their headers. new modifications of the log will be written to
3132 * new positions. so it's safe to allow log writers to go in.
3133 */
3134 mutex_unlock(&root->log_mutex);
3135
3136 btrfs_init_log_ctx(&root_log_ctx, NULL);
3137
3138 mutex_lock(&log_root_tree->log_mutex);
3139 atomic_inc(&log_root_tree->log_batch);
3140 atomic_inc(&log_root_tree->log_writers);
3141
3142 index2 = log_root_tree->log_transid % 2;
3143 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
3144 root_log_ctx.log_transid = log_root_tree->log_transid;
3145
3146 mutex_unlock(&log_root_tree->log_mutex);
3147
3148 mutex_lock(&log_root_tree->log_mutex);
3149
3150 /*
3151 * Now we are safe to update the log_root_tree because we're under the
3152 * log_mutex, and we're a current writer so we're holding the commit
3153 * open until we drop the log_mutex.
3154 */
3155 ret = update_log_root(trans, log, &new_root_item);
3156
3157 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
3158 /* atomic_dec_and_test implies a barrier */
3159 cond_wake_up_nomb(&log_root_tree->log_writer_wait);
3160 }
3161
3162 if (ret) {
3163 if (!list_empty(&root_log_ctx.list))
3164 list_del_init(&root_log_ctx.list);
3165
3166 blk_finish_plug(&plug);
3167 btrfs_set_log_full_commit(trans);
3168
3169 if (ret != -ENOSPC) {
3170 btrfs_abort_transaction(trans, ret);
3171 mutex_unlock(&log_root_tree->log_mutex);
3172 goto out;
3173 }
3174 btrfs_wait_tree_log_extents(log, mark);
3175 mutex_unlock(&log_root_tree->log_mutex);
3176 ret = -EAGAIN;
3177 goto out;
3178 }
3179
3180 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
3181 blk_finish_plug(&plug);
3182 list_del_init(&root_log_ctx.list);
3183 mutex_unlock(&log_root_tree->log_mutex);
3184 ret = root_log_ctx.log_ret;
3185 goto out;
3186 }
3187
3188 index2 = root_log_ctx.log_transid % 2;
3189 if (atomic_read(&log_root_tree->log_commit[index2])) {
3190 blk_finish_plug(&plug);
3191 ret = btrfs_wait_tree_log_extents(log, mark);
3192 wait_log_commit(log_root_tree,
3193 root_log_ctx.log_transid);
3194 mutex_unlock(&log_root_tree->log_mutex);
3195 if (!ret)
3196 ret = root_log_ctx.log_ret;
3197 goto out;
3198 }
3199 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
3200 atomic_set(&log_root_tree->log_commit[index2], 1);
3201
3202 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
3203 wait_log_commit(log_root_tree,
3204 root_log_ctx.log_transid - 1);
3205 }
3206
3207 wait_for_writer(log_root_tree);
3208
3209 /*
3210 * now that we've moved on to the tree of log tree roots,
3211 * check the full commit flag again
3212 */
3213 if (btrfs_need_log_full_commit(trans)) {
3214 blk_finish_plug(&plug);
3215 btrfs_wait_tree_log_extents(log, mark);
3216 mutex_unlock(&log_root_tree->log_mutex);
3217 ret = -EAGAIN;
3218 goto out_wake_log_root;
3219 }
3220
3221 ret = btrfs_write_marked_extents(fs_info,
3222 &log_root_tree->dirty_log_pages,
3223 EXTENT_DIRTY | EXTENT_NEW);
3224 blk_finish_plug(&plug);
3225 if (ret) {
3226 btrfs_set_log_full_commit(trans);
3227 btrfs_abort_transaction(trans, ret);
3228 mutex_unlock(&log_root_tree->log_mutex);
3229 goto out_wake_log_root;
3230 }
3231 ret = btrfs_wait_tree_log_extents(log, mark);
3232 if (!ret)
3233 ret = btrfs_wait_tree_log_extents(log_root_tree,
3234 EXTENT_NEW | EXTENT_DIRTY);
3235 if (ret) {
3236 btrfs_set_log_full_commit(trans);
3237 mutex_unlock(&log_root_tree->log_mutex);
3238 goto out_wake_log_root;
3239 }
3240
3241 btrfs_set_super_log_root(fs_info->super_for_commit,
3242 log_root_tree->node->start);
3243 btrfs_set_super_log_root_level(fs_info->super_for_commit,
3244 btrfs_header_level(log_root_tree->node));
3245
3246 log_root_tree->log_transid++;
3247 mutex_unlock(&log_root_tree->log_mutex);
3248
3249 /*
3250 * Nobody else is going to jump in and write the ctree
3251 * super here because the log_commit atomic below is protecting
3252 * us. We must be called with a transaction handle pinning
3253 * the running transaction open, so a full commit can't hop
3254 * in and cause problems either.
3255 */
3256 ret = write_all_supers(fs_info, 1);
3257 if (ret) {
3258 btrfs_set_log_full_commit(trans);
3259 btrfs_abort_transaction(trans, ret);
3260 goto out_wake_log_root;
3261 }
3262
3263 mutex_lock(&root->log_mutex);
3264 if (root->last_log_commit < log_transid)
3265 root->last_log_commit = log_transid;
3266 mutex_unlock(&root->log_mutex);
3267
3268 out_wake_log_root:
3269 mutex_lock(&log_root_tree->log_mutex);
3270 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3271
3272 log_root_tree->log_transid_committed++;
3273 atomic_set(&log_root_tree->log_commit[index2], 0);
3274 mutex_unlock(&log_root_tree->log_mutex);
3275
3276 /*
3277 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3278 * all the updates above are seen by the woken threads. It might not be
3279 * necessary, but proving that seems to be hard.
3280 */
3281 cond_wake_up(&log_root_tree->log_commit_wait[index2]);
3282 out:
3283 mutex_lock(&root->log_mutex);
3284 btrfs_remove_all_log_ctxs(root, index1, ret);
3285 root->log_transid_committed++;
3286 atomic_set(&root->log_commit[index1], 0);
3287 mutex_unlock(&root->log_mutex);
3288
3289 /*
3290 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3291 * all the updates above are seen by the woken threads. It might not be
3292 * necessary, but proving that seems to be hard.
3293 */
3294 cond_wake_up(&root->log_commit_wait[index1]);
3295 return ret;
3296 }
3297
free_log_tree(struct btrfs_trans_handle * trans,struct btrfs_root * log)3298 static void free_log_tree(struct btrfs_trans_handle *trans,
3299 struct btrfs_root *log)
3300 {
3301 int ret;
3302 struct walk_control wc = {
3303 .free = 1,
3304 .process_func = process_one_buffer
3305 };
3306
3307 ret = walk_log_tree(trans, log, &wc);
3308 if (ret) {
3309 if (trans)
3310 btrfs_abort_transaction(trans, ret);
3311 else
3312 btrfs_handle_fs_error(log->fs_info, ret, NULL);
3313 }
3314
3315 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
3316 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3317 free_extent_buffer(log->node);
3318 kfree(log);
3319 }
3320
3321 /*
3322 * free all the extents used by the tree log. This should be called
3323 * at commit time of the full transaction
3324 */
btrfs_free_log(struct btrfs_trans_handle * trans,struct btrfs_root * root)3325 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3326 {
3327 if (root->log_root) {
3328 free_log_tree(trans, root->log_root);
3329 root->log_root = NULL;
3330 }
3331 return 0;
3332 }
3333
btrfs_free_log_root_tree(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)3334 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3335 struct btrfs_fs_info *fs_info)
3336 {
3337 if (fs_info->log_root_tree) {
3338 free_log_tree(trans, fs_info->log_root_tree);
3339 fs_info->log_root_tree = NULL;
3340 }
3341 return 0;
3342 }
3343
3344 /*
3345 * Check if an inode was logged in the current transaction. We can't always rely
3346 * on an inode's logged_trans value, because it's an in-memory only field and
3347 * therefore not persisted. This means that its value is lost if the inode gets
3348 * evicted and loaded again from disk (in which case it has a value of 0, and
3349 * certainly it is smaller then any possible transaction ID), when that happens
3350 * the full_sync flag is set in the inode's runtime flags, so on that case we
3351 * assume eviction happened and ignore the logged_trans value, assuming the
3352 * worst case, that the inode was logged before in the current transaction.
3353 */
inode_logged(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)3354 static bool inode_logged(struct btrfs_trans_handle *trans,
3355 struct btrfs_inode *inode)
3356 {
3357 if (inode->logged_trans == trans->transid)
3358 return true;
3359
3360 if (inode->last_trans == trans->transid &&
3361 test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
3362 !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags))
3363 return true;
3364
3365 return false;
3366 }
3367
3368 /*
3369 * If both a file and directory are logged, and unlinks or renames are
3370 * mixed in, we have a few interesting corners:
3371 *
3372 * create file X in dir Y
3373 * link file X to X.link in dir Y
3374 * fsync file X
3375 * unlink file X but leave X.link
3376 * fsync dir Y
3377 *
3378 * After a crash we would expect only X.link to exist. But file X
3379 * didn't get fsync'd again so the log has back refs for X and X.link.
3380 *
3381 * We solve this by removing directory entries and inode backrefs from the
3382 * log when a file that was logged in the current transaction is
3383 * unlinked. Any later fsync will include the updated log entries, and
3384 * we'll be able to reconstruct the proper directory items from backrefs.
3385 *
3386 * This optimizations allows us to avoid relogging the entire inode
3387 * or the entire directory.
3388 */
btrfs_del_dir_entries_in_log(struct btrfs_trans_handle * trans,struct btrfs_root * root,const char * name,int name_len,struct btrfs_inode * dir,u64 index)3389 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3390 struct btrfs_root *root,
3391 const char *name, int name_len,
3392 struct btrfs_inode *dir, u64 index)
3393 {
3394 struct btrfs_root *log;
3395 struct btrfs_dir_item *di;
3396 struct btrfs_path *path;
3397 int ret;
3398 int err = 0;
3399 int bytes_del = 0;
3400 u64 dir_ino = btrfs_ino(dir);
3401
3402 if (!inode_logged(trans, dir))
3403 return 0;
3404
3405 ret = join_running_log_trans(root);
3406 if (ret)
3407 return 0;
3408
3409 mutex_lock(&dir->log_mutex);
3410
3411 log = root->log_root;
3412 path = btrfs_alloc_path();
3413 if (!path) {
3414 err = -ENOMEM;
3415 goto out_unlock;
3416 }
3417
3418 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3419 name, name_len, -1);
3420 if (IS_ERR(di)) {
3421 err = PTR_ERR(di);
3422 goto fail;
3423 }
3424 if (di) {
3425 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3426 bytes_del += name_len;
3427 if (ret) {
3428 err = ret;
3429 goto fail;
3430 }
3431 }
3432 btrfs_release_path(path);
3433 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3434 index, name, name_len, -1);
3435 if (IS_ERR(di)) {
3436 err = PTR_ERR(di);
3437 goto fail;
3438 }
3439 if (di) {
3440 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3441 bytes_del += name_len;
3442 if (ret) {
3443 err = ret;
3444 goto fail;
3445 }
3446 }
3447
3448 /* update the directory size in the log to reflect the names
3449 * we have removed
3450 */
3451 if (bytes_del) {
3452 struct btrfs_key key;
3453
3454 key.objectid = dir_ino;
3455 key.offset = 0;
3456 key.type = BTRFS_INODE_ITEM_KEY;
3457 btrfs_release_path(path);
3458
3459 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
3460 if (ret < 0) {
3461 err = ret;
3462 goto fail;
3463 }
3464 if (ret == 0) {
3465 struct btrfs_inode_item *item;
3466 u64 i_size;
3467
3468 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3469 struct btrfs_inode_item);
3470 i_size = btrfs_inode_size(path->nodes[0], item);
3471 if (i_size > bytes_del)
3472 i_size -= bytes_del;
3473 else
3474 i_size = 0;
3475 btrfs_set_inode_size(path->nodes[0], item, i_size);
3476 btrfs_mark_buffer_dirty(path->nodes[0]);
3477 } else
3478 ret = 0;
3479 btrfs_release_path(path);
3480 }
3481 fail:
3482 btrfs_free_path(path);
3483 out_unlock:
3484 mutex_unlock(&dir->log_mutex);
3485 if (ret == -ENOSPC) {
3486 btrfs_set_log_full_commit(trans);
3487 ret = 0;
3488 } else if (ret < 0)
3489 btrfs_abort_transaction(trans, ret);
3490
3491 btrfs_end_log_trans(root);
3492
3493 return err;
3494 }
3495
3496 /* see comments for btrfs_del_dir_entries_in_log */
btrfs_del_inode_ref_in_log(struct btrfs_trans_handle * trans,struct btrfs_root * root,const char * name,int name_len,struct btrfs_inode * inode,u64 dirid)3497 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3498 struct btrfs_root *root,
3499 const char *name, int name_len,
3500 struct btrfs_inode *inode, u64 dirid)
3501 {
3502 struct btrfs_root *log;
3503 u64 index;
3504 int ret;
3505
3506 if (!inode_logged(trans, inode))
3507 return 0;
3508
3509 ret = join_running_log_trans(root);
3510 if (ret)
3511 return 0;
3512 log = root->log_root;
3513 mutex_lock(&inode->log_mutex);
3514
3515 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3516 dirid, &index);
3517 mutex_unlock(&inode->log_mutex);
3518 if (ret == -ENOSPC) {
3519 btrfs_set_log_full_commit(trans);
3520 ret = 0;
3521 } else if (ret < 0 && ret != -ENOENT)
3522 btrfs_abort_transaction(trans, ret);
3523 btrfs_end_log_trans(root);
3524
3525 return ret;
3526 }
3527
3528 /*
3529 * creates a range item in the log for 'dirid'. first_offset and
3530 * last_offset tell us which parts of the key space the log should
3531 * be considered authoritative for.
3532 */
insert_dir_log_key(struct btrfs_trans_handle * trans,struct btrfs_root * log,struct btrfs_path * path,int key_type,u64 dirid,u64 first_offset,u64 last_offset)3533 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3534 struct btrfs_root *log,
3535 struct btrfs_path *path,
3536 int key_type, u64 dirid,
3537 u64 first_offset, u64 last_offset)
3538 {
3539 int ret;
3540 struct btrfs_key key;
3541 struct btrfs_dir_log_item *item;
3542
3543 key.objectid = dirid;
3544 key.offset = first_offset;
3545 if (key_type == BTRFS_DIR_ITEM_KEY)
3546 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3547 else
3548 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3549 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3550 if (ret)
3551 return ret;
3552
3553 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3554 struct btrfs_dir_log_item);
3555 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3556 btrfs_mark_buffer_dirty(path->nodes[0]);
3557 btrfs_release_path(path);
3558 return 0;
3559 }
3560
3561 /*
3562 * log all the items included in the current transaction for a given
3563 * directory. This also creates the range items in the log tree required
3564 * to replay anything deleted before the fsync
3565 */
log_dir_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path,struct btrfs_path * dst_path,int key_type,struct btrfs_log_ctx * ctx,u64 min_offset,u64 * last_offset_ret)3566 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3567 struct btrfs_root *root, struct btrfs_inode *inode,
3568 struct btrfs_path *path,
3569 struct btrfs_path *dst_path, int key_type,
3570 struct btrfs_log_ctx *ctx,
3571 u64 min_offset, u64 *last_offset_ret)
3572 {
3573 struct btrfs_key min_key;
3574 struct btrfs_root *log = root->log_root;
3575 struct extent_buffer *src;
3576 int err = 0;
3577 int ret;
3578 int i;
3579 int nritems;
3580 u64 first_offset = min_offset;
3581 u64 last_offset = (u64)-1;
3582 u64 ino = btrfs_ino(inode);
3583
3584 log = root->log_root;
3585
3586 min_key.objectid = ino;
3587 min_key.type = key_type;
3588 min_key.offset = min_offset;
3589
3590 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3591
3592 /*
3593 * we didn't find anything from this transaction, see if there
3594 * is anything at all
3595 */
3596 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3597 min_key.objectid = ino;
3598 min_key.type = key_type;
3599 min_key.offset = (u64)-1;
3600 btrfs_release_path(path);
3601 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3602 if (ret < 0) {
3603 btrfs_release_path(path);
3604 return ret;
3605 }
3606 ret = btrfs_previous_item(root, path, ino, key_type);
3607
3608 /* if ret == 0 there are items for this type,
3609 * create a range to tell us the last key of this type.
3610 * otherwise, there are no items in this directory after
3611 * *min_offset, and we create a range to indicate that.
3612 */
3613 if (ret == 0) {
3614 struct btrfs_key tmp;
3615 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3616 path->slots[0]);
3617 if (key_type == tmp.type)
3618 first_offset = max(min_offset, tmp.offset) + 1;
3619 }
3620 goto done;
3621 }
3622
3623 /* go backward to find any previous key */
3624 ret = btrfs_previous_item(root, path, ino, key_type);
3625 if (ret == 0) {
3626 struct btrfs_key tmp;
3627 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3628 if (key_type == tmp.type) {
3629 first_offset = tmp.offset;
3630 ret = overwrite_item(trans, log, dst_path,
3631 path->nodes[0], path->slots[0],
3632 &tmp);
3633 if (ret) {
3634 err = ret;
3635 goto done;
3636 }
3637 }
3638 }
3639 btrfs_release_path(path);
3640
3641 /*
3642 * Find the first key from this transaction again. See the note for
3643 * log_new_dir_dentries, if we're logging a directory recursively we
3644 * won't be holding its i_mutex, which means we can modify the directory
3645 * while we're logging it. If we remove an entry between our first
3646 * search and this search we'll not find the key again and can just
3647 * bail.
3648 */
3649 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3650 if (ret != 0)
3651 goto done;
3652
3653 /*
3654 * we have a block from this transaction, log every item in it
3655 * from our directory
3656 */
3657 while (1) {
3658 struct btrfs_key tmp;
3659 src = path->nodes[0];
3660 nritems = btrfs_header_nritems(src);
3661 for (i = path->slots[0]; i < nritems; i++) {
3662 struct btrfs_dir_item *di;
3663
3664 btrfs_item_key_to_cpu(src, &min_key, i);
3665
3666 if (min_key.objectid != ino || min_key.type != key_type)
3667 goto done;
3668 ret = overwrite_item(trans, log, dst_path, src, i,
3669 &min_key);
3670 if (ret) {
3671 err = ret;
3672 goto done;
3673 }
3674
3675 /*
3676 * We must make sure that when we log a directory entry,
3677 * the corresponding inode, after log replay, has a
3678 * matching link count. For example:
3679 *
3680 * touch foo
3681 * mkdir mydir
3682 * sync
3683 * ln foo mydir/bar
3684 * xfs_io -c "fsync" mydir
3685 * <crash>
3686 * <mount fs and log replay>
3687 *
3688 * Would result in a fsync log that when replayed, our
3689 * file inode would have a link count of 1, but we get
3690 * two directory entries pointing to the same inode.
3691 * After removing one of the names, it would not be
3692 * possible to remove the other name, which resulted
3693 * always in stale file handle errors, and would not
3694 * be possible to rmdir the parent directory, since
3695 * its i_size could never decrement to the value
3696 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3697 */
3698 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3699 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3700 if (ctx &&
3701 (btrfs_dir_transid(src, di) == trans->transid ||
3702 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3703 tmp.type != BTRFS_ROOT_ITEM_KEY)
3704 ctx->log_new_dentries = true;
3705 }
3706 path->slots[0] = nritems;
3707
3708 /*
3709 * look ahead to the next item and see if it is also
3710 * from this directory and from this transaction
3711 */
3712 ret = btrfs_next_leaf(root, path);
3713 if (ret) {
3714 if (ret == 1)
3715 last_offset = (u64)-1;
3716 else
3717 err = ret;
3718 goto done;
3719 }
3720 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3721 if (tmp.objectid != ino || tmp.type != key_type) {
3722 last_offset = (u64)-1;
3723 goto done;
3724 }
3725 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3726 ret = overwrite_item(trans, log, dst_path,
3727 path->nodes[0], path->slots[0],
3728 &tmp);
3729 if (ret)
3730 err = ret;
3731 else
3732 last_offset = tmp.offset;
3733 goto done;
3734 }
3735 }
3736 done:
3737 btrfs_release_path(path);
3738 btrfs_release_path(dst_path);
3739
3740 if (err == 0) {
3741 *last_offset_ret = last_offset;
3742 /*
3743 * insert the log range keys to indicate where the log
3744 * is valid
3745 */
3746 ret = insert_dir_log_key(trans, log, path, key_type,
3747 ino, first_offset, last_offset);
3748 if (ret)
3749 err = ret;
3750 }
3751 return err;
3752 }
3753
3754 /*
3755 * logging directories is very similar to logging inodes, We find all the items
3756 * from the current transaction and write them to the log.
3757 *
3758 * The recovery code scans the directory in the subvolume, and if it finds a
3759 * key in the range logged that is not present in the log tree, then it means
3760 * that dir entry was unlinked during the transaction.
3761 *
3762 * In order for that scan to work, we must include one key smaller than
3763 * the smallest logged by this transaction and one key larger than the largest
3764 * key logged by this transaction.
3765 */
log_directory_changes(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path,struct btrfs_path * dst_path,struct btrfs_log_ctx * ctx)3766 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3767 struct btrfs_root *root, struct btrfs_inode *inode,
3768 struct btrfs_path *path,
3769 struct btrfs_path *dst_path,
3770 struct btrfs_log_ctx *ctx)
3771 {
3772 u64 min_key;
3773 u64 max_key;
3774 int ret;
3775 int key_type = BTRFS_DIR_ITEM_KEY;
3776
3777 again:
3778 min_key = 0;
3779 max_key = 0;
3780 while (1) {
3781 ret = log_dir_items(trans, root, inode, path, dst_path, key_type,
3782 ctx, min_key, &max_key);
3783 if (ret)
3784 return ret;
3785 if (max_key == (u64)-1)
3786 break;
3787 min_key = max_key + 1;
3788 }
3789
3790 if (key_type == BTRFS_DIR_ITEM_KEY) {
3791 key_type = BTRFS_DIR_INDEX_KEY;
3792 goto again;
3793 }
3794 return 0;
3795 }
3796
3797 /*
3798 * a helper function to drop items from the log before we relog an
3799 * inode. max_key_type indicates the highest item type to remove.
3800 * This cannot be run for file data extents because it does not
3801 * free the extents they point to.
3802 */
drop_objectid_items(struct btrfs_trans_handle * trans,struct btrfs_root * log,struct btrfs_path * path,u64 objectid,int max_key_type)3803 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3804 struct btrfs_root *log,
3805 struct btrfs_path *path,
3806 u64 objectid, int max_key_type)
3807 {
3808 int ret;
3809 struct btrfs_key key;
3810 struct btrfs_key found_key;
3811 int start_slot;
3812
3813 key.objectid = objectid;
3814 key.type = max_key_type;
3815 key.offset = (u64)-1;
3816
3817 while (1) {
3818 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3819 BUG_ON(ret == 0); /* Logic error */
3820 if (ret < 0)
3821 break;
3822
3823 if (path->slots[0] == 0)
3824 break;
3825
3826 path->slots[0]--;
3827 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3828 path->slots[0]);
3829
3830 if (found_key.objectid != objectid)
3831 break;
3832
3833 found_key.offset = 0;
3834 found_key.type = 0;
3835 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3836 &start_slot);
3837 if (ret < 0)
3838 break;
3839
3840 ret = btrfs_del_items(trans, log, path, start_slot,
3841 path->slots[0] - start_slot + 1);
3842 /*
3843 * If start slot isn't 0 then we don't need to re-search, we've
3844 * found the last guy with the objectid in this tree.
3845 */
3846 if (ret || start_slot != 0)
3847 break;
3848 btrfs_release_path(path);
3849 }
3850 btrfs_release_path(path);
3851 if (ret > 0)
3852 ret = 0;
3853 return ret;
3854 }
3855
fill_inode_item(struct btrfs_trans_handle * trans,struct extent_buffer * leaf,struct btrfs_inode_item * item,struct inode * inode,int log_inode_only,u64 logged_isize)3856 static void fill_inode_item(struct btrfs_trans_handle *trans,
3857 struct extent_buffer *leaf,
3858 struct btrfs_inode_item *item,
3859 struct inode *inode, int log_inode_only,
3860 u64 logged_isize)
3861 {
3862 struct btrfs_map_token token;
3863
3864 btrfs_init_map_token(&token, leaf);
3865
3866 if (log_inode_only) {
3867 /* set the generation to zero so the recover code
3868 * can tell the difference between an logging
3869 * just to say 'this inode exists' and a logging
3870 * to say 'update this inode with these values'
3871 */
3872 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3873 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3874 } else {
3875 btrfs_set_token_inode_generation(leaf, item,
3876 BTRFS_I(inode)->generation,
3877 &token);
3878 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3879 }
3880
3881 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3882 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3883 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3884 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3885
3886 btrfs_set_token_timespec_sec(leaf, &item->atime,
3887 inode->i_atime.tv_sec, &token);
3888 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3889 inode->i_atime.tv_nsec, &token);
3890
3891 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3892 inode->i_mtime.tv_sec, &token);
3893 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3894 inode->i_mtime.tv_nsec, &token);
3895
3896 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3897 inode->i_ctime.tv_sec, &token);
3898 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3899 inode->i_ctime.tv_nsec, &token);
3900
3901 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3902 &token);
3903
3904 btrfs_set_token_inode_sequence(leaf, item,
3905 inode_peek_iversion(inode), &token);
3906 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3907 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3908 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3909 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3910 }
3911
log_inode_item(struct btrfs_trans_handle * trans,struct btrfs_root * log,struct btrfs_path * path,struct btrfs_inode * inode)3912 static int log_inode_item(struct btrfs_trans_handle *trans,
3913 struct btrfs_root *log, struct btrfs_path *path,
3914 struct btrfs_inode *inode)
3915 {
3916 struct btrfs_inode_item *inode_item;
3917 int ret;
3918
3919 ret = btrfs_insert_empty_item(trans, log, path,
3920 &inode->location, sizeof(*inode_item));
3921 if (ret && ret != -EEXIST)
3922 return ret;
3923 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3924 struct btrfs_inode_item);
3925 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
3926 0, 0);
3927 btrfs_release_path(path);
3928 return 0;
3929 }
3930
log_csums(struct btrfs_trans_handle * trans,struct btrfs_root * log_root,struct btrfs_ordered_sum * sums)3931 static int log_csums(struct btrfs_trans_handle *trans,
3932 struct btrfs_root *log_root,
3933 struct btrfs_ordered_sum *sums)
3934 {
3935 int ret;
3936
3937 /*
3938 * Due to extent cloning, we might have logged a csum item that covers a
3939 * subrange of a cloned extent, and later we can end up logging a csum
3940 * item for a larger subrange of the same extent or the entire range.
3941 * This would leave csum items in the log tree that cover the same range
3942 * and break the searches for checksums in the log tree, resulting in
3943 * some checksums missing in the fs/subvolume tree. So just delete (or
3944 * trim and adjust) any existing csum items in the log for this range.
3945 */
3946 ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len);
3947 if (ret)
3948 return ret;
3949
3950 return btrfs_csum_file_blocks(trans, log_root, sums);
3951 }
3952
copy_items(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_path * dst_path,struct btrfs_path * src_path,u64 * last_extent,int start_slot,int nr,int inode_only,u64 logged_isize)3953 static noinline int copy_items(struct btrfs_trans_handle *trans,
3954 struct btrfs_inode *inode,
3955 struct btrfs_path *dst_path,
3956 struct btrfs_path *src_path, u64 *last_extent,
3957 int start_slot, int nr, int inode_only,
3958 u64 logged_isize)
3959 {
3960 struct btrfs_fs_info *fs_info = trans->fs_info;
3961 unsigned long src_offset;
3962 unsigned long dst_offset;
3963 struct btrfs_root *log = inode->root->log_root;
3964 struct btrfs_file_extent_item *extent;
3965 struct btrfs_inode_item *inode_item;
3966 struct extent_buffer *src = src_path->nodes[0];
3967 struct btrfs_key first_key, last_key, key;
3968 int ret;
3969 struct btrfs_key *ins_keys;
3970 u32 *ins_sizes;
3971 char *ins_data;
3972 int i;
3973 struct list_head ordered_sums;
3974 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM;
3975 bool has_extents = false;
3976 bool need_find_last_extent = true;
3977 bool done = false;
3978
3979 INIT_LIST_HEAD(&ordered_sums);
3980
3981 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3982 nr * sizeof(u32), GFP_NOFS);
3983 if (!ins_data)
3984 return -ENOMEM;
3985
3986 first_key.objectid = (u64)-1;
3987
3988 ins_sizes = (u32 *)ins_data;
3989 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3990
3991 for (i = 0; i < nr; i++) {
3992 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3993 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3994 }
3995 ret = btrfs_insert_empty_items(trans, log, dst_path,
3996 ins_keys, ins_sizes, nr);
3997 if (ret) {
3998 kfree(ins_data);
3999 return ret;
4000 }
4001
4002 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
4003 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
4004 dst_path->slots[0]);
4005
4006 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
4007
4008 if (i == nr - 1)
4009 last_key = ins_keys[i];
4010
4011 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
4012 inode_item = btrfs_item_ptr(dst_path->nodes[0],
4013 dst_path->slots[0],
4014 struct btrfs_inode_item);
4015 fill_inode_item(trans, dst_path->nodes[0], inode_item,
4016 &inode->vfs_inode,
4017 inode_only == LOG_INODE_EXISTS,
4018 logged_isize);
4019 } else {
4020 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
4021 src_offset, ins_sizes[i]);
4022 }
4023
4024 /*
4025 * We set need_find_last_extent here in case we know we were
4026 * processing other items and then walk into the first extent in
4027 * the inode. If we don't hit an extent then nothing changes,
4028 * we'll do the last search the next time around.
4029 */
4030 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
4031 has_extents = true;
4032 if (first_key.objectid == (u64)-1)
4033 first_key = ins_keys[i];
4034 } else {
4035 need_find_last_extent = false;
4036 }
4037
4038 /* take a reference on file data extents so that truncates
4039 * or deletes of this inode don't have to relog the inode
4040 * again
4041 */
4042 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
4043 !skip_csum) {
4044 int found_type;
4045 extent = btrfs_item_ptr(src, start_slot + i,
4046 struct btrfs_file_extent_item);
4047
4048 if (btrfs_file_extent_generation(src, extent) < trans->transid)
4049 continue;
4050
4051 found_type = btrfs_file_extent_type(src, extent);
4052 if (found_type == BTRFS_FILE_EXTENT_REG) {
4053 u64 ds, dl, cs, cl;
4054 ds = btrfs_file_extent_disk_bytenr(src,
4055 extent);
4056 /* ds == 0 is a hole */
4057 if (ds == 0)
4058 continue;
4059
4060 dl = btrfs_file_extent_disk_num_bytes(src,
4061 extent);
4062 cs = btrfs_file_extent_offset(src, extent);
4063 cl = btrfs_file_extent_num_bytes(src,
4064 extent);
4065 if (btrfs_file_extent_compression(src,
4066 extent)) {
4067 cs = 0;
4068 cl = dl;
4069 }
4070
4071 ret = btrfs_lookup_csums_range(
4072 fs_info->csum_root,
4073 ds + cs, ds + cs + cl - 1,
4074 &ordered_sums, 0);
4075 if (ret) {
4076 btrfs_release_path(dst_path);
4077 kfree(ins_data);
4078 return ret;
4079 }
4080 }
4081 }
4082 }
4083
4084 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
4085 btrfs_release_path(dst_path);
4086 kfree(ins_data);
4087
4088 /*
4089 * we have to do this after the loop above to avoid changing the
4090 * log tree while trying to change the log tree.
4091 */
4092 ret = 0;
4093 while (!list_empty(&ordered_sums)) {
4094 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4095 struct btrfs_ordered_sum,
4096 list);
4097 if (!ret)
4098 ret = log_csums(trans, log, sums);
4099 list_del(&sums->list);
4100 kfree(sums);
4101 }
4102
4103 if (!has_extents)
4104 return ret;
4105
4106 if (need_find_last_extent && *last_extent == first_key.offset) {
4107 /*
4108 * We don't have any leafs between our current one and the one
4109 * we processed before that can have file extent items for our
4110 * inode (and have a generation number smaller than our current
4111 * transaction id).
4112 */
4113 need_find_last_extent = false;
4114 }
4115
4116 /*
4117 * Because we use btrfs_search_forward we could skip leaves that were
4118 * not modified and then assume *last_extent is valid when it really
4119 * isn't. So back up to the previous leaf and read the end of the last
4120 * extent before we go and fill in holes.
4121 */
4122 if (need_find_last_extent) {
4123 u64 len;
4124
4125 ret = btrfs_prev_leaf(inode->root, src_path);
4126 if (ret < 0)
4127 return ret;
4128 if (ret)
4129 goto fill_holes;
4130 if (src_path->slots[0])
4131 src_path->slots[0]--;
4132 src = src_path->nodes[0];
4133 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
4134 if (key.objectid != btrfs_ino(inode) ||
4135 key.type != BTRFS_EXTENT_DATA_KEY)
4136 goto fill_holes;
4137 extent = btrfs_item_ptr(src, src_path->slots[0],
4138 struct btrfs_file_extent_item);
4139 if (btrfs_file_extent_type(src, extent) ==
4140 BTRFS_FILE_EXTENT_INLINE) {
4141 len = btrfs_file_extent_ram_bytes(src, extent);
4142 *last_extent = ALIGN(key.offset + len,
4143 fs_info->sectorsize);
4144 } else {
4145 len = btrfs_file_extent_num_bytes(src, extent);
4146 *last_extent = key.offset + len;
4147 }
4148 }
4149 fill_holes:
4150 /* So we did prev_leaf, now we need to move to the next leaf, but a few
4151 * things could have happened
4152 *
4153 * 1) A merge could have happened, so we could currently be on a leaf
4154 * that holds what we were copying in the first place.
4155 * 2) A split could have happened, and now not all of the items we want
4156 * are on the same leaf.
4157 *
4158 * So we need to adjust how we search for holes, we need to drop the
4159 * path and re-search for the first extent key we found, and then walk
4160 * forward until we hit the last one we copied.
4161 */
4162 if (need_find_last_extent) {
4163 /* btrfs_prev_leaf could return 1 without releasing the path */
4164 btrfs_release_path(src_path);
4165 ret = btrfs_search_slot(NULL, inode->root, &first_key,
4166 src_path, 0, 0);
4167 if (ret < 0)
4168 return ret;
4169 ASSERT(ret == 0);
4170 src = src_path->nodes[0];
4171 i = src_path->slots[0];
4172 } else {
4173 i = start_slot;
4174 }
4175
4176 /*
4177 * Ok so here we need to go through and fill in any holes we may have
4178 * to make sure that holes are punched for those areas in case they had
4179 * extents previously.
4180 */
4181 while (!done) {
4182 u64 offset, len;
4183 u64 extent_end;
4184
4185 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
4186 ret = btrfs_next_leaf(inode->root, src_path);
4187 if (ret < 0)
4188 return ret;
4189 ASSERT(ret == 0);
4190 src = src_path->nodes[0];
4191 i = 0;
4192 need_find_last_extent = true;
4193 }
4194
4195 btrfs_item_key_to_cpu(src, &key, i);
4196 if (!btrfs_comp_cpu_keys(&key, &last_key))
4197 done = true;
4198 if (key.objectid != btrfs_ino(inode) ||
4199 key.type != BTRFS_EXTENT_DATA_KEY) {
4200 i++;
4201 continue;
4202 }
4203 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
4204 if (btrfs_file_extent_type(src, extent) ==
4205 BTRFS_FILE_EXTENT_INLINE) {
4206 len = btrfs_file_extent_ram_bytes(src, extent);
4207 extent_end = ALIGN(key.offset + len,
4208 fs_info->sectorsize);
4209 } else {
4210 len = btrfs_file_extent_num_bytes(src, extent);
4211 extent_end = key.offset + len;
4212 }
4213 i++;
4214
4215 if (*last_extent == key.offset) {
4216 *last_extent = extent_end;
4217 continue;
4218 }
4219 offset = *last_extent;
4220 len = key.offset - *last_extent;
4221 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
4222 offset, 0, 0, len, 0, len, 0, 0, 0);
4223 if (ret)
4224 break;
4225 *last_extent = extent_end;
4226 }
4227
4228 /*
4229 * Check if there is a hole between the last extent found in our leaf
4230 * and the first extent in the next leaf. If there is one, we need to
4231 * log an explicit hole so that at replay time we can punch the hole.
4232 */
4233 if (ret == 0 &&
4234 key.objectid == btrfs_ino(inode) &&
4235 key.type == BTRFS_EXTENT_DATA_KEY &&
4236 i == btrfs_header_nritems(src_path->nodes[0])) {
4237 ret = btrfs_next_leaf(inode->root, src_path);
4238 need_find_last_extent = true;
4239 if (ret > 0) {
4240 ret = 0;
4241 } else if (ret == 0) {
4242 btrfs_item_key_to_cpu(src_path->nodes[0], &key,
4243 src_path->slots[0]);
4244 if (key.objectid == btrfs_ino(inode) &&
4245 key.type == BTRFS_EXTENT_DATA_KEY &&
4246 *last_extent < key.offset) {
4247 const u64 len = key.offset - *last_extent;
4248
4249 ret = btrfs_insert_file_extent(trans, log,
4250 btrfs_ino(inode),
4251 *last_extent, 0,
4252 0, len, 0, len,
4253 0, 0, 0);
4254 *last_extent += len;
4255 }
4256 }
4257 }
4258 /*
4259 * Need to let the callers know we dropped the path so they should
4260 * re-search.
4261 */
4262 if (!ret && need_find_last_extent)
4263 ret = 1;
4264 return ret;
4265 }
4266
extent_cmp(void * priv,struct list_head * a,struct list_head * b)4267 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
4268 {
4269 struct extent_map *em1, *em2;
4270
4271 em1 = list_entry(a, struct extent_map, list);
4272 em2 = list_entry(b, struct extent_map, list);
4273
4274 if (em1->start < em2->start)
4275 return -1;
4276 else if (em1->start > em2->start)
4277 return 1;
4278 return 0;
4279 }
4280
log_extent_csums(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_root * log_root,const struct extent_map * em)4281 static int log_extent_csums(struct btrfs_trans_handle *trans,
4282 struct btrfs_inode *inode,
4283 struct btrfs_root *log_root,
4284 const struct extent_map *em)
4285 {
4286 u64 csum_offset;
4287 u64 csum_len;
4288 LIST_HEAD(ordered_sums);
4289 int ret = 0;
4290
4291 if (inode->flags & BTRFS_INODE_NODATASUM ||
4292 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
4293 em->block_start == EXTENT_MAP_HOLE)
4294 return 0;
4295
4296 /* If we're compressed we have to save the entire range of csums. */
4297 if (em->compress_type) {
4298 csum_offset = 0;
4299 csum_len = max(em->block_len, em->orig_block_len);
4300 } else {
4301 csum_offset = em->mod_start - em->start;
4302 csum_len = em->mod_len;
4303 }
4304
4305 /* block start is already adjusted for the file extent offset. */
4306 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
4307 em->block_start + csum_offset,
4308 em->block_start + csum_offset +
4309 csum_len - 1, &ordered_sums, 0);
4310 if (ret)
4311 return ret;
4312
4313 while (!list_empty(&ordered_sums)) {
4314 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4315 struct btrfs_ordered_sum,
4316 list);
4317 if (!ret)
4318 ret = log_csums(trans, log_root, sums);
4319 list_del(&sums->list);
4320 kfree(sums);
4321 }
4322
4323 return ret;
4324 }
4325
log_one_extent(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_root * root,const struct extent_map * em,struct btrfs_path * path,struct btrfs_log_ctx * ctx)4326 static int log_one_extent(struct btrfs_trans_handle *trans,
4327 struct btrfs_inode *inode, struct btrfs_root *root,
4328 const struct extent_map *em,
4329 struct btrfs_path *path,
4330 struct btrfs_log_ctx *ctx)
4331 {
4332 struct btrfs_root *log = root->log_root;
4333 struct btrfs_file_extent_item *fi;
4334 struct extent_buffer *leaf;
4335 struct btrfs_map_token token;
4336 struct btrfs_key key;
4337 u64 extent_offset = em->start - em->orig_start;
4338 u64 block_len;
4339 int ret;
4340 int extent_inserted = 0;
4341
4342 ret = log_extent_csums(trans, inode, log, em);
4343 if (ret)
4344 return ret;
4345
4346 ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start,
4347 em->start + em->len, NULL, 0, 1,
4348 sizeof(*fi), &extent_inserted);
4349 if (ret)
4350 return ret;
4351
4352 if (!extent_inserted) {
4353 key.objectid = btrfs_ino(inode);
4354 key.type = BTRFS_EXTENT_DATA_KEY;
4355 key.offset = em->start;
4356
4357 ret = btrfs_insert_empty_item(trans, log, path, &key,
4358 sizeof(*fi));
4359 if (ret)
4360 return ret;
4361 }
4362 leaf = path->nodes[0];
4363 btrfs_init_map_token(&token, leaf);
4364 fi = btrfs_item_ptr(leaf, path->slots[0],
4365 struct btrfs_file_extent_item);
4366
4367 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
4368 &token);
4369 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4370 btrfs_set_token_file_extent_type(leaf, fi,
4371 BTRFS_FILE_EXTENT_PREALLOC,
4372 &token);
4373 else
4374 btrfs_set_token_file_extent_type(leaf, fi,
4375 BTRFS_FILE_EXTENT_REG,
4376 &token);
4377
4378 block_len = max(em->block_len, em->orig_block_len);
4379 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4380 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4381 em->block_start,
4382 &token);
4383 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4384 &token);
4385 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4386 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4387 em->block_start -
4388 extent_offset, &token);
4389 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4390 &token);
4391 } else {
4392 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
4393 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
4394 &token);
4395 }
4396
4397 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
4398 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
4399 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
4400 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
4401 &token);
4402 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
4403 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
4404 btrfs_mark_buffer_dirty(leaf);
4405
4406 btrfs_release_path(path);
4407
4408 return ret;
4409 }
4410
4411 /*
4412 * Log all prealloc extents beyond the inode's i_size to make sure we do not
4413 * lose them after doing a fast fsync and replaying the log. We scan the
4414 * subvolume's root instead of iterating the inode's extent map tree because
4415 * otherwise we can log incorrect extent items based on extent map conversion.
4416 * That can happen due to the fact that extent maps are merged when they
4417 * are not in the extent map tree's list of modified extents.
4418 */
btrfs_log_prealloc_extents(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_path * path)4419 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
4420 struct btrfs_inode *inode,
4421 struct btrfs_path *path)
4422 {
4423 struct btrfs_root *root = inode->root;
4424 struct btrfs_key key;
4425 const u64 i_size = i_size_read(&inode->vfs_inode);
4426 const u64 ino = btrfs_ino(inode);
4427 struct btrfs_path *dst_path = NULL;
4428 u64 last_extent = (u64)-1;
4429 int ins_nr = 0;
4430 int start_slot;
4431 int ret;
4432
4433 if (!(inode->flags & BTRFS_INODE_PREALLOC))
4434 return 0;
4435
4436 key.objectid = ino;
4437 key.type = BTRFS_EXTENT_DATA_KEY;
4438 key.offset = i_size;
4439 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4440 if (ret < 0)
4441 goto out;
4442
4443 while (true) {
4444 struct extent_buffer *leaf = path->nodes[0];
4445 int slot = path->slots[0];
4446
4447 if (slot >= btrfs_header_nritems(leaf)) {
4448 if (ins_nr > 0) {
4449 ret = copy_items(trans, inode, dst_path, path,
4450 &last_extent, start_slot,
4451 ins_nr, 1, 0);
4452 if (ret < 0)
4453 goto out;
4454 ins_nr = 0;
4455 }
4456 ret = btrfs_next_leaf(root, path);
4457 if (ret < 0)
4458 goto out;
4459 if (ret > 0) {
4460 ret = 0;
4461 break;
4462 }
4463 continue;
4464 }
4465
4466 btrfs_item_key_to_cpu(leaf, &key, slot);
4467 if (key.objectid > ino)
4468 break;
4469 if (WARN_ON_ONCE(key.objectid < ino) ||
4470 key.type < BTRFS_EXTENT_DATA_KEY ||
4471 key.offset < i_size) {
4472 path->slots[0]++;
4473 continue;
4474 }
4475 if (last_extent == (u64)-1) {
4476 last_extent = key.offset;
4477 /*
4478 * Avoid logging extent items logged in past fsync calls
4479 * and leading to duplicate keys in the log tree.
4480 */
4481 do {
4482 ret = btrfs_truncate_inode_items(trans,
4483 root->log_root,
4484 &inode->vfs_inode,
4485 i_size,
4486 BTRFS_EXTENT_DATA_KEY);
4487 } while (ret == -EAGAIN);
4488 if (ret)
4489 goto out;
4490 }
4491 if (ins_nr == 0)
4492 start_slot = slot;
4493 ins_nr++;
4494 path->slots[0]++;
4495 if (!dst_path) {
4496 dst_path = btrfs_alloc_path();
4497 if (!dst_path) {
4498 ret = -ENOMEM;
4499 goto out;
4500 }
4501 }
4502 }
4503 if (ins_nr > 0) {
4504 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4505 start_slot, ins_nr, 1, 0);
4506 if (ret > 0)
4507 ret = 0;
4508 }
4509 out:
4510 btrfs_release_path(path);
4511 btrfs_free_path(dst_path);
4512 return ret;
4513 }
4514
btrfs_log_changed_extents(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path,struct btrfs_log_ctx * ctx,const u64 start,const u64 end)4515 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4516 struct btrfs_root *root,
4517 struct btrfs_inode *inode,
4518 struct btrfs_path *path,
4519 struct btrfs_log_ctx *ctx,
4520 const u64 start,
4521 const u64 end)
4522 {
4523 struct extent_map *em, *n;
4524 struct list_head extents;
4525 struct extent_map_tree *tree = &inode->extent_tree;
4526 u64 test_gen;
4527 int ret = 0;
4528 int num = 0;
4529
4530 INIT_LIST_HEAD(&extents);
4531
4532 write_lock(&tree->lock);
4533 test_gen = root->fs_info->last_trans_committed;
4534
4535 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4536 /*
4537 * Skip extents outside our logging range. It's important to do
4538 * it for correctness because if we don't ignore them, we may
4539 * log them before their ordered extent completes, and therefore
4540 * we could log them without logging their respective checksums
4541 * (the checksum items are added to the csum tree at the very
4542 * end of btrfs_finish_ordered_io()). Also leave such extents
4543 * outside of our range in the list, since we may have another
4544 * ranged fsync in the near future that needs them. If an extent
4545 * outside our range corresponds to a hole, log it to avoid
4546 * leaving gaps between extents (fsck will complain when we are
4547 * not using the NO_HOLES feature).
4548 */
4549 if ((em->start > end || em->start + em->len <= start) &&
4550 em->block_start != EXTENT_MAP_HOLE)
4551 continue;
4552
4553 list_del_init(&em->list);
4554 /*
4555 * Just an arbitrary number, this can be really CPU intensive
4556 * once we start getting a lot of extents, and really once we
4557 * have a bunch of extents we just want to commit since it will
4558 * be faster.
4559 */
4560 if (++num > 32768) {
4561 list_del_init(&tree->modified_extents);
4562 ret = -EFBIG;
4563 goto process;
4564 }
4565
4566 if (em->generation <= test_gen)
4567 continue;
4568
4569 /* We log prealloc extents beyond eof later. */
4570 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
4571 em->start >= i_size_read(&inode->vfs_inode))
4572 continue;
4573
4574 /* Need a ref to keep it from getting evicted from cache */
4575 refcount_inc(&em->refs);
4576 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4577 list_add_tail(&em->list, &extents);
4578 num++;
4579 }
4580
4581 list_sort(NULL, &extents, extent_cmp);
4582 process:
4583 while (!list_empty(&extents)) {
4584 em = list_entry(extents.next, struct extent_map, list);
4585
4586 list_del_init(&em->list);
4587
4588 /*
4589 * If we had an error we just need to delete everybody from our
4590 * private list.
4591 */
4592 if (ret) {
4593 clear_em_logging(tree, em);
4594 free_extent_map(em);
4595 continue;
4596 }
4597
4598 write_unlock(&tree->lock);
4599
4600 ret = log_one_extent(trans, inode, root, em, path, ctx);
4601 write_lock(&tree->lock);
4602 clear_em_logging(tree, em);
4603 free_extent_map(em);
4604 }
4605 WARN_ON(!list_empty(&extents));
4606 write_unlock(&tree->lock);
4607
4608 btrfs_release_path(path);
4609 if (!ret)
4610 ret = btrfs_log_prealloc_extents(trans, inode, path);
4611
4612 return ret;
4613 }
4614
logged_inode_size(struct btrfs_root * log,struct btrfs_inode * inode,struct btrfs_path * path,u64 * size_ret)4615 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4616 struct btrfs_path *path, u64 *size_ret)
4617 {
4618 struct btrfs_key key;
4619 int ret;
4620
4621 key.objectid = btrfs_ino(inode);
4622 key.type = BTRFS_INODE_ITEM_KEY;
4623 key.offset = 0;
4624
4625 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4626 if (ret < 0) {
4627 return ret;
4628 } else if (ret > 0) {
4629 *size_ret = 0;
4630 } else {
4631 struct btrfs_inode_item *item;
4632
4633 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4634 struct btrfs_inode_item);
4635 *size_ret = btrfs_inode_size(path->nodes[0], item);
4636 /*
4637 * If the in-memory inode's i_size is smaller then the inode
4638 * size stored in the btree, return the inode's i_size, so
4639 * that we get a correct inode size after replaying the log
4640 * when before a power failure we had a shrinking truncate
4641 * followed by addition of a new name (rename / new hard link).
4642 * Otherwise return the inode size from the btree, to avoid
4643 * data loss when replaying a log due to previously doing a
4644 * write that expands the inode's size and logging a new name
4645 * immediately after.
4646 */
4647 if (*size_ret > inode->vfs_inode.i_size)
4648 *size_ret = inode->vfs_inode.i_size;
4649 }
4650
4651 btrfs_release_path(path);
4652 return 0;
4653 }
4654
4655 /*
4656 * At the moment we always log all xattrs. This is to figure out at log replay
4657 * time which xattrs must have their deletion replayed. If a xattr is missing
4658 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4659 * because if a xattr is deleted, the inode is fsynced and a power failure
4660 * happens, causing the log to be replayed the next time the fs is mounted,
4661 * we want the xattr to not exist anymore (same behaviour as other filesystems
4662 * with a journal, ext3/4, xfs, f2fs, etc).
4663 */
btrfs_log_all_xattrs(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path,struct btrfs_path * dst_path)4664 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4665 struct btrfs_root *root,
4666 struct btrfs_inode *inode,
4667 struct btrfs_path *path,
4668 struct btrfs_path *dst_path)
4669 {
4670 int ret;
4671 struct btrfs_key key;
4672 const u64 ino = btrfs_ino(inode);
4673 int ins_nr = 0;
4674 int start_slot = 0;
4675
4676 key.objectid = ino;
4677 key.type = BTRFS_XATTR_ITEM_KEY;
4678 key.offset = 0;
4679
4680 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4681 if (ret < 0)
4682 return ret;
4683
4684 while (true) {
4685 int slot = path->slots[0];
4686 struct extent_buffer *leaf = path->nodes[0];
4687 int nritems = btrfs_header_nritems(leaf);
4688
4689 if (slot >= nritems) {
4690 if (ins_nr > 0) {
4691 u64 last_extent = 0;
4692
4693 ret = copy_items(trans, inode, dst_path, path,
4694 &last_extent, start_slot,
4695 ins_nr, 1, 0);
4696 /* can't be 1, extent items aren't processed */
4697 ASSERT(ret <= 0);
4698 if (ret < 0)
4699 return ret;
4700 ins_nr = 0;
4701 }
4702 ret = btrfs_next_leaf(root, path);
4703 if (ret < 0)
4704 return ret;
4705 else if (ret > 0)
4706 break;
4707 continue;
4708 }
4709
4710 btrfs_item_key_to_cpu(leaf, &key, slot);
4711 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4712 break;
4713
4714 if (ins_nr == 0)
4715 start_slot = slot;
4716 ins_nr++;
4717 path->slots[0]++;
4718 cond_resched();
4719 }
4720 if (ins_nr > 0) {
4721 u64 last_extent = 0;
4722
4723 ret = copy_items(trans, inode, dst_path, path,
4724 &last_extent, start_slot,
4725 ins_nr, 1, 0);
4726 /* can't be 1, extent items aren't processed */
4727 ASSERT(ret <= 0);
4728 if (ret < 0)
4729 return ret;
4730 }
4731
4732 return 0;
4733 }
4734
4735 /*
4736 * If the no holes feature is enabled we need to make sure any hole between the
4737 * last extent and the i_size of our inode is explicitly marked in the log. This
4738 * is to make sure that doing something like:
4739 *
4740 * 1) create file with 128Kb of data
4741 * 2) truncate file to 64Kb
4742 * 3) truncate file to 256Kb
4743 * 4) fsync file
4744 * 5) <crash/power failure>
4745 * 6) mount fs and trigger log replay
4746 *
4747 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4748 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4749 * file correspond to a hole. The presence of explicit holes in a log tree is
4750 * what guarantees that log replay will remove/adjust file extent items in the
4751 * fs/subvol tree.
4752 *
4753 * Here we do not need to care about holes between extents, that is already done
4754 * by copy_items(). We also only need to do this in the full sync path, where we
4755 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4756 * lookup the list of modified extent maps and if any represents a hole, we
4757 * insert a corresponding extent representing a hole in the log tree.
4758 */
btrfs_log_trailing_hole(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path)4759 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4760 struct btrfs_root *root,
4761 struct btrfs_inode *inode,
4762 struct btrfs_path *path)
4763 {
4764 struct btrfs_fs_info *fs_info = root->fs_info;
4765 int ret;
4766 struct btrfs_key key;
4767 u64 hole_start;
4768 u64 hole_size;
4769 struct extent_buffer *leaf;
4770 struct btrfs_root *log = root->log_root;
4771 const u64 ino = btrfs_ino(inode);
4772 const u64 i_size = i_size_read(&inode->vfs_inode);
4773
4774 if (!btrfs_fs_incompat(fs_info, NO_HOLES))
4775 return 0;
4776
4777 key.objectid = ino;
4778 key.type = BTRFS_EXTENT_DATA_KEY;
4779 key.offset = (u64)-1;
4780
4781 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4782 ASSERT(ret != 0);
4783 if (ret < 0)
4784 return ret;
4785
4786 ASSERT(path->slots[0] > 0);
4787 path->slots[0]--;
4788 leaf = path->nodes[0];
4789 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4790
4791 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
4792 /* inode does not have any extents */
4793 hole_start = 0;
4794 hole_size = i_size;
4795 } else {
4796 struct btrfs_file_extent_item *extent;
4797 u64 len;
4798
4799 /*
4800 * If there's an extent beyond i_size, an explicit hole was
4801 * already inserted by copy_items().
4802 */
4803 if (key.offset >= i_size)
4804 return 0;
4805
4806 extent = btrfs_item_ptr(leaf, path->slots[0],
4807 struct btrfs_file_extent_item);
4808
4809 if (btrfs_file_extent_type(leaf, extent) ==
4810 BTRFS_FILE_EXTENT_INLINE)
4811 return 0;
4812
4813 len = btrfs_file_extent_num_bytes(leaf, extent);
4814 /* Last extent goes beyond i_size, no need to log a hole. */
4815 if (key.offset + len > i_size)
4816 return 0;
4817 hole_start = key.offset + len;
4818 hole_size = i_size - hole_start;
4819 }
4820 btrfs_release_path(path);
4821
4822 /* Last extent ends at i_size. */
4823 if (hole_size == 0)
4824 return 0;
4825
4826 hole_size = ALIGN(hole_size, fs_info->sectorsize);
4827 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
4828 hole_size, 0, hole_size, 0, 0, 0);
4829 return ret;
4830 }
4831
4832 /*
4833 * When we are logging a new inode X, check if it doesn't have a reference that
4834 * matches the reference from some other inode Y created in a past transaction
4835 * and that was renamed in the current transaction. If we don't do this, then at
4836 * log replay time we can lose inode Y (and all its files if it's a directory):
4837 *
4838 * mkdir /mnt/x
4839 * echo "hello world" > /mnt/x/foobar
4840 * sync
4841 * mv /mnt/x /mnt/y
4842 * mkdir /mnt/x # or touch /mnt/x
4843 * xfs_io -c fsync /mnt/x
4844 * <power fail>
4845 * mount fs, trigger log replay
4846 *
4847 * After the log replay procedure, we would lose the first directory and all its
4848 * files (file foobar).
4849 * For the case where inode Y is not a directory we simply end up losing it:
4850 *
4851 * echo "123" > /mnt/foo
4852 * sync
4853 * mv /mnt/foo /mnt/bar
4854 * echo "abc" > /mnt/foo
4855 * xfs_io -c fsync /mnt/foo
4856 * <power fail>
4857 *
4858 * We also need this for cases where a snapshot entry is replaced by some other
4859 * entry (file or directory) otherwise we end up with an unreplayable log due to
4860 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4861 * if it were a regular entry:
4862 *
4863 * mkdir /mnt/x
4864 * btrfs subvolume snapshot /mnt /mnt/x/snap
4865 * btrfs subvolume delete /mnt/x/snap
4866 * rmdir /mnt/x
4867 * mkdir /mnt/x
4868 * fsync /mnt/x or fsync some new file inside it
4869 * <power fail>
4870 *
4871 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4872 * the same transaction.
4873 */
btrfs_check_ref_name_override(struct extent_buffer * eb,const int slot,const struct btrfs_key * key,struct btrfs_inode * inode,u64 * other_ino,u64 * other_parent)4874 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4875 const int slot,
4876 const struct btrfs_key *key,
4877 struct btrfs_inode *inode,
4878 u64 *other_ino, u64 *other_parent)
4879 {
4880 int ret;
4881 struct btrfs_path *search_path;
4882 char *name = NULL;
4883 u32 name_len = 0;
4884 u32 item_size = btrfs_item_size_nr(eb, slot);
4885 u32 cur_offset = 0;
4886 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4887
4888 search_path = btrfs_alloc_path();
4889 if (!search_path)
4890 return -ENOMEM;
4891 search_path->search_commit_root = 1;
4892 search_path->skip_locking = 1;
4893
4894 while (cur_offset < item_size) {
4895 u64 parent;
4896 u32 this_name_len;
4897 u32 this_len;
4898 unsigned long name_ptr;
4899 struct btrfs_dir_item *di;
4900
4901 if (key->type == BTRFS_INODE_REF_KEY) {
4902 struct btrfs_inode_ref *iref;
4903
4904 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4905 parent = key->offset;
4906 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4907 name_ptr = (unsigned long)(iref + 1);
4908 this_len = sizeof(*iref) + this_name_len;
4909 } else {
4910 struct btrfs_inode_extref *extref;
4911
4912 extref = (struct btrfs_inode_extref *)(ptr +
4913 cur_offset);
4914 parent = btrfs_inode_extref_parent(eb, extref);
4915 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4916 name_ptr = (unsigned long)&extref->name;
4917 this_len = sizeof(*extref) + this_name_len;
4918 }
4919
4920 if (this_name_len > name_len) {
4921 char *new_name;
4922
4923 new_name = krealloc(name, this_name_len, GFP_NOFS);
4924 if (!new_name) {
4925 ret = -ENOMEM;
4926 goto out;
4927 }
4928 name_len = this_name_len;
4929 name = new_name;
4930 }
4931
4932 read_extent_buffer(eb, name, name_ptr, this_name_len);
4933 di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
4934 parent, name, this_name_len, 0);
4935 if (di && !IS_ERR(di)) {
4936 struct btrfs_key di_key;
4937
4938 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4939 di, &di_key);
4940 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4941 if (di_key.objectid != key->objectid) {
4942 ret = 1;
4943 *other_ino = di_key.objectid;
4944 *other_parent = parent;
4945 } else {
4946 ret = 0;
4947 }
4948 } else {
4949 ret = -EAGAIN;
4950 }
4951 goto out;
4952 } else if (IS_ERR(di)) {
4953 ret = PTR_ERR(di);
4954 goto out;
4955 }
4956 btrfs_release_path(search_path);
4957
4958 cur_offset += this_len;
4959 }
4960 ret = 0;
4961 out:
4962 btrfs_free_path(search_path);
4963 kfree(name);
4964 return ret;
4965 }
4966
4967 struct btrfs_ino_list {
4968 u64 ino;
4969 u64 parent;
4970 struct list_head list;
4971 };
4972
log_conflicting_inodes(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_log_ctx * ctx,u64 ino,u64 parent)4973 static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
4974 struct btrfs_root *root,
4975 struct btrfs_path *path,
4976 struct btrfs_log_ctx *ctx,
4977 u64 ino, u64 parent)
4978 {
4979 struct btrfs_ino_list *ino_elem;
4980 LIST_HEAD(inode_list);
4981 int ret = 0;
4982
4983 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
4984 if (!ino_elem)
4985 return -ENOMEM;
4986 ino_elem->ino = ino;
4987 ino_elem->parent = parent;
4988 list_add_tail(&ino_elem->list, &inode_list);
4989
4990 while (!list_empty(&inode_list)) {
4991 struct btrfs_fs_info *fs_info = root->fs_info;
4992 struct btrfs_key key;
4993 struct inode *inode;
4994
4995 ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list,
4996 list);
4997 ino = ino_elem->ino;
4998 parent = ino_elem->parent;
4999 list_del(&ino_elem->list);
5000 kfree(ino_elem);
5001 if (ret)
5002 continue;
5003
5004 btrfs_release_path(path);
5005
5006 key.objectid = ino;
5007 key.type = BTRFS_INODE_ITEM_KEY;
5008 key.offset = 0;
5009 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
5010 /*
5011 * If the other inode that had a conflicting dir entry was
5012 * deleted in the current transaction, we need to log its parent
5013 * directory.
5014 */
5015 if (IS_ERR(inode)) {
5016 ret = PTR_ERR(inode);
5017 if (ret == -ENOENT) {
5018 key.objectid = parent;
5019 inode = btrfs_iget(fs_info->sb, &key, root,
5020 NULL);
5021 if (IS_ERR(inode)) {
5022 ret = PTR_ERR(inode);
5023 } else {
5024 ret = btrfs_log_inode(trans, root,
5025 BTRFS_I(inode),
5026 LOG_OTHER_INODE_ALL,
5027 0, LLONG_MAX, ctx);
5028 btrfs_add_delayed_iput(inode);
5029 }
5030 }
5031 continue;
5032 }
5033 /*
5034 * We are safe logging the other inode without acquiring its
5035 * lock as long as we log with the LOG_INODE_EXISTS mode. We
5036 * are safe against concurrent renames of the other inode as
5037 * well because during a rename we pin the log and update the
5038 * log with the new name before we unpin it.
5039 */
5040 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
5041 LOG_OTHER_INODE, 0, LLONG_MAX, ctx);
5042 if (ret) {
5043 btrfs_add_delayed_iput(inode);
5044 continue;
5045 }
5046
5047 key.objectid = ino;
5048 key.type = BTRFS_INODE_REF_KEY;
5049 key.offset = 0;
5050 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5051 if (ret < 0) {
5052 btrfs_add_delayed_iput(inode);
5053 continue;
5054 }
5055
5056 while (true) {
5057 struct extent_buffer *leaf = path->nodes[0];
5058 int slot = path->slots[0];
5059 u64 other_ino = 0;
5060 u64 other_parent = 0;
5061
5062 if (slot >= btrfs_header_nritems(leaf)) {
5063 ret = btrfs_next_leaf(root, path);
5064 if (ret < 0) {
5065 break;
5066 } else if (ret > 0) {
5067 ret = 0;
5068 break;
5069 }
5070 continue;
5071 }
5072
5073 btrfs_item_key_to_cpu(leaf, &key, slot);
5074 if (key.objectid != ino ||
5075 (key.type != BTRFS_INODE_REF_KEY &&
5076 key.type != BTRFS_INODE_EXTREF_KEY)) {
5077 ret = 0;
5078 break;
5079 }
5080
5081 ret = btrfs_check_ref_name_override(leaf, slot, &key,
5082 BTRFS_I(inode), &other_ino,
5083 &other_parent);
5084 if (ret < 0)
5085 break;
5086 if (ret > 0) {
5087 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
5088 if (!ino_elem) {
5089 ret = -ENOMEM;
5090 break;
5091 }
5092 ino_elem->ino = other_ino;
5093 ino_elem->parent = other_parent;
5094 list_add_tail(&ino_elem->list, &inode_list);
5095 ret = 0;
5096 }
5097 path->slots[0]++;
5098 }
5099 btrfs_add_delayed_iput(inode);
5100 }
5101
5102 return ret;
5103 }
5104
5105 /* log a single inode in the tree log.
5106 * At least one parent directory for this inode must exist in the tree
5107 * or be logged already.
5108 *
5109 * Any items from this inode changed by the current transaction are copied
5110 * to the log tree. An extra reference is taken on any extents in this
5111 * file, allowing us to avoid a whole pile of corner cases around logging
5112 * blocks that have been removed from the tree.
5113 *
5114 * See LOG_INODE_ALL and related defines for a description of what inode_only
5115 * does.
5116 *
5117 * This handles both files and directories.
5118 */
btrfs_log_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,int inode_only,const loff_t start,const loff_t end,struct btrfs_log_ctx * ctx)5119 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
5120 struct btrfs_root *root, struct btrfs_inode *inode,
5121 int inode_only,
5122 const loff_t start,
5123 const loff_t end,
5124 struct btrfs_log_ctx *ctx)
5125 {
5126 struct btrfs_fs_info *fs_info = root->fs_info;
5127 struct btrfs_path *path;
5128 struct btrfs_path *dst_path;
5129 struct btrfs_key min_key;
5130 struct btrfs_key max_key;
5131 struct btrfs_root *log = root->log_root;
5132 u64 last_extent = 0;
5133 int err = 0;
5134 int ret;
5135 int nritems;
5136 int ins_start_slot = 0;
5137 int ins_nr;
5138 bool fast_search = false;
5139 u64 ino = btrfs_ino(inode);
5140 struct extent_map_tree *em_tree = &inode->extent_tree;
5141 u64 logged_isize = 0;
5142 bool need_log_inode_item = true;
5143 bool xattrs_logged = false;
5144 bool recursive_logging = false;
5145
5146 path = btrfs_alloc_path();
5147 if (!path)
5148 return -ENOMEM;
5149 dst_path = btrfs_alloc_path();
5150 if (!dst_path) {
5151 btrfs_free_path(path);
5152 return -ENOMEM;
5153 }
5154
5155 min_key.objectid = ino;
5156 min_key.type = BTRFS_INODE_ITEM_KEY;
5157 min_key.offset = 0;
5158
5159 max_key.objectid = ino;
5160
5161
5162 /* today the code can only do partial logging of directories */
5163 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5164 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5165 &inode->runtime_flags) &&
5166 inode_only >= LOG_INODE_EXISTS))
5167 max_key.type = BTRFS_XATTR_ITEM_KEY;
5168 else
5169 max_key.type = (u8)-1;
5170 max_key.offset = (u64)-1;
5171
5172 /*
5173 * Only run delayed items if we are a dir or a new file.
5174 * Otherwise commit the delayed inode only, which is needed in
5175 * order for the log replay code to mark inodes for link count
5176 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
5177 */
5178 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5179 inode->generation > fs_info->last_trans_committed)
5180 ret = btrfs_commit_inode_delayed_items(trans, inode);
5181 else
5182 ret = btrfs_commit_inode_delayed_inode(inode);
5183
5184 if (ret) {
5185 btrfs_free_path(path);
5186 btrfs_free_path(dst_path);
5187 return ret;
5188 }
5189
5190 if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) {
5191 recursive_logging = true;
5192 if (inode_only == LOG_OTHER_INODE)
5193 inode_only = LOG_INODE_EXISTS;
5194 else
5195 inode_only = LOG_INODE_ALL;
5196 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
5197 } else {
5198 mutex_lock(&inode->log_mutex);
5199 }
5200
5201 /*
5202 * a brute force approach to making sure we get the most uptodate
5203 * copies of everything.
5204 */
5205 if (S_ISDIR(inode->vfs_inode.i_mode)) {
5206 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
5207
5208 if (inode_only == LOG_INODE_EXISTS)
5209 max_key_type = BTRFS_XATTR_ITEM_KEY;
5210 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
5211 } else {
5212 if (inode_only == LOG_INODE_EXISTS) {
5213 /*
5214 * Make sure the new inode item we write to the log has
5215 * the same isize as the current one (if it exists).
5216 * This is necessary to prevent data loss after log
5217 * replay, and also to prevent doing a wrong expanding
5218 * truncate - for e.g. create file, write 4K into offset
5219 * 0, fsync, write 4K into offset 4096, add hard link,
5220 * fsync some other file (to sync log), power fail - if
5221 * we use the inode's current i_size, after log replay
5222 * we get a 8Kb file, with the last 4Kb extent as a hole
5223 * (zeroes), as if an expanding truncate happened,
5224 * instead of getting a file of 4Kb only.
5225 */
5226 err = logged_inode_size(log, inode, path, &logged_isize);
5227 if (err)
5228 goto out_unlock;
5229 }
5230 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5231 &inode->runtime_flags)) {
5232 if (inode_only == LOG_INODE_EXISTS) {
5233 max_key.type = BTRFS_XATTR_ITEM_KEY;
5234 ret = drop_objectid_items(trans, log, path, ino,
5235 max_key.type);
5236 } else {
5237 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5238 &inode->runtime_flags);
5239 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5240 &inode->runtime_flags);
5241 while(1) {
5242 ret = btrfs_truncate_inode_items(trans,
5243 log, &inode->vfs_inode, 0, 0);
5244 if (ret != -EAGAIN)
5245 break;
5246 }
5247 }
5248 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5249 &inode->runtime_flags) ||
5250 inode_only == LOG_INODE_EXISTS) {
5251 if (inode_only == LOG_INODE_ALL)
5252 fast_search = true;
5253 max_key.type = BTRFS_XATTR_ITEM_KEY;
5254 ret = drop_objectid_items(trans, log, path, ino,
5255 max_key.type);
5256 } else {
5257 if (inode_only == LOG_INODE_ALL)
5258 fast_search = true;
5259 goto log_extents;
5260 }
5261
5262 }
5263 if (ret) {
5264 err = ret;
5265 goto out_unlock;
5266 }
5267
5268 while (1) {
5269 ins_nr = 0;
5270 ret = btrfs_search_forward(root, &min_key,
5271 path, trans->transid);
5272 if (ret < 0) {
5273 err = ret;
5274 goto out_unlock;
5275 }
5276 if (ret != 0)
5277 break;
5278 again:
5279 /* note, ins_nr might be > 0 here, cleanup outside the loop */
5280 if (min_key.objectid != ino)
5281 break;
5282 if (min_key.type > max_key.type)
5283 break;
5284
5285 if (min_key.type == BTRFS_INODE_ITEM_KEY)
5286 need_log_inode_item = false;
5287
5288 if ((min_key.type == BTRFS_INODE_REF_KEY ||
5289 min_key.type == BTRFS_INODE_EXTREF_KEY) &&
5290 inode->generation == trans->transid &&
5291 !recursive_logging) {
5292 u64 other_ino = 0;
5293 u64 other_parent = 0;
5294
5295 ret = btrfs_check_ref_name_override(path->nodes[0],
5296 path->slots[0], &min_key, inode,
5297 &other_ino, &other_parent);
5298 if (ret < 0) {
5299 err = ret;
5300 goto out_unlock;
5301 } else if (ret > 0 && ctx &&
5302 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
5303 if (ins_nr > 0) {
5304 ins_nr++;
5305 } else {
5306 ins_nr = 1;
5307 ins_start_slot = path->slots[0];
5308 }
5309 ret = copy_items(trans, inode, dst_path, path,
5310 &last_extent, ins_start_slot,
5311 ins_nr, inode_only,
5312 logged_isize);
5313 if (ret < 0) {
5314 err = ret;
5315 goto out_unlock;
5316 }
5317 ins_nr = 0;
5318
5319 err = log_conflicting_inodes(trans, root, path,
5320 ctx, other_ino, other_parent);
5321 if (err)
5322 goto out_unlock;
5323 btrfs_release_path(path);
5324 goto next_key;
5325 }
5326 }
5327
5328 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
5329 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
5330 if (ins_nr == 0)
5331 goto next_slot;
5332 ret = copy_items(trans, inode, dst_path, path,
5333 &last_extent, ins_start_slot,
5334 ins_nr, inode_only, logged_isize);
5335 if (ret < 0) {
5336 err = ret;
5337 goto out_unlock;
5338 }
5339 ins_nr = 0;
5340 if (ret) {
5341 btrfs_release_path(path);
5342 continue;
5343 }
5344 goto next_slot;
5345 }
5346
5347 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5348 ins_nr++;
5349 goto next_slot;
5350 } else if (!ins_nr) {
5351 ins_start_slot = path->slots[0];
5352 ins_nr = 1;
5353 goto next_slot;
5354 }
5355
5356 ret = copy_items(trans, inode, dst_path, path, &last_extent,
5357 ins_start_slot, ins_nr, inode_only,
5358 logged_isize);
5359 if (ret < 0) {
5360 err = ret;
5361 goto out_unlock;
5362 }
5363 if (ret) {
5364 ins_nr = 0;
5365 btrfs_release_path(path);
5366 continue;
5367 }
5368 ins_nr = 1;
5369 ins_start_slot = path->slots[0];
5370 next_slot:
5371
5372 nritems = btrfs_header_nritems(path->nodes[0]);
5373 path->slots[0]++;
5374 if (path->slots[0] < nritems) {
5375 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
5376 path->slots[0]);
5377 goto again;
5378 }
5379 if (ins_nr) {
5380 ret = copy_items(trans, inode, dst_path, path,
5381 &last_extent, ins_start_slot,
5382 ins_nr, inode_only, logged_isize);
5383 if (ret < 0) {
5384 err = ret;
5385 goto out_unlock;
5386 }
5387 ret = 0;
5388 ins_nr = 0;
5389 }
5390 btrfs_release_path(path);
5391 next_key:
5392 if (min_key.offset < (u64)-1) {
5393 min_key.offset++;
5394 } else if (min_key.type < max_key.type) {
5395 min_key.type++;
5396 min_key.offset = 0;
5397 } else {
5398 break;
5399 }
5400 }
5401 if (ins_nr) {
5402 ret = copy_items(trans, inode, dst_path, path, &last_extent,
5403 ins_start_slot, ins_nr, inode_only,
5404 logged_isize);
5405 if (ret < 0) {
5406 err = ret;
5407 goto out_unlock;
5408 }
5409 ret = 0;
5410 ins_nr = 0;
5411 }
5412
5413 btrfs_release_path(path);
5414 btrfs_release_path(dst_path);
5415 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
5416 if (err)
5417 goto out_unlock;
5418 xattrs_logged = true;
5419 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
5420 btrfs_release_path(path);
5421 btrfs_release_path(dst_path);
5422 err = btrfs_log_trailing_hole(trans, root, inode, path);
5423 if (err)
5424 goto out_unlock;
5425 }
5426 log_extents:
5427 btrfs_release_path(path);
5428 btrfs_release_path(dst_path);
5429 if (need_log_inode_item) {
5430 err = log_inode_item(trans, log, dst_path, inode);
5431 if (!err && !xattrs_logged) {
5432 err = btrfs_log_all_xattrs(trans, root, inode, path,
5433 dst_path);
5434 btrfs_release_path(path);
5435 }
5436 if (err)
5437 goto out_unlock;
5438 }
5439 if (fast_search) {
5440 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
5441 ctx, start, end);
5442 if (ret) {
5443 err = ret;
5444 goto out_unlock;
5445 }
5446 } else if (inode_only == LOG_INODE_ALL) {
5447 struct extent_map *em, *n;
5448
5449 write_lock(&em_tree->lock);
5450 /*
5451 * We can't just remove every em if we're called for a ranged
5452 * fsync - that is, one that doesn't cover the whole possible
5453 * file range (0 to LLONG_MAX). This is because we can have
5454 * em's that fall outside the range we're logging and therefore
5455 * their ordered operations haven't completed yet
5456 * (btrfs_finish_ordered_io() not invoked yet). This means we
5457 * didn't get their respective file extent item in the fs/subvol
5458 * tree yet, and need to let the next fast fsync (one which
5459 * consults the list of modified extent maps) find the em so
5460 * that it logs a matching file extent item and waits for the
5461 * respective ordered operation to complete (if it's still
5462 * running).
5463 *
5464 * Removing every em outside the range we're logging would make
5465 * the next fast fsync not log their matching file extent items,
5466 * therefore making us lose data after a log replay.
5467 */
5468 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
5469 list) {
5470 const u64 mod_end = em->mod_start + em->mod_len - 1;
5471
5472 if (em->mod_start >= start && mod_end <= end)
5473 list_del_init(&em->list);
5474 }
5475 write_unlock(&em_tree->lock);
5476 }
5477
5478 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
5479 ret = log_directory_changes(trans, root, inode, path, dst_path,
5480 ctx);
5481 if (ret) {
5482 err = ret;
5483 goto out_unlock;
5484 }
5485 }
5486
5487 /*
5488 * Don't update last_log_commit if we logged that an inode exists after
5489 * it was loaded to memory (full_sync bit set).
5490 * This is to prevent data loss when we do a write to the inode, then
5491 * the inode gets evicted after all delalloc was flushed, then we log
5492 * it exists (due to a rename for example) and then fsync it. This last
5493 * fsync would do nothing (not logging the extents previously written).
5494 */
5495 spin_lock(&inode->lock);
5496 inode->logged_trans = trans->transid;
5497 if (inode_only != LOG_INODE_EXISTS ||
5498 !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
5499 inode->last_log_commit = inode->last_sub_trans;
5500 spin_unlock(&inode->lock);
5501 out_unlock:
5502 mutex_unlock(&inode->log_mutex);
5503
5504 btrfs_free_path(path);
5505 btrfs_free_path(dst_path);
5506 return err;
5507 }
5508
5509 /*
5510 * Check if we must fallback to a transaction commit when logging an inode.
5511 * This must be called after logging the inode and is used only in the context
5512 * when fsyncing an inode requires the need to log some other inode - in which
5513 * case we can't lock the i_mutex of each other inode we need to log as that
5514 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5515 * log inodes up or down in the hierarchy) or rename operations for example. So
5516 * we take the log_mutex of the inode after we have logged it and then check for
5517 * its last_unlink_trans value - this is safe because any task setting
5518 * last_unlink_trans must take the log_mutex and it must do this before it does
5519 * the actual unlink operation, so if we do this check before a concurrent task
5520 * sets last_unlink_trans it means we've logged a consistent version/state of
5521 * all the inode items, otherwise we are not sure and must do a transaction
5522 * commit (the concurrent task might have only updated last_unlink_trans before
5523 * we logged the inode or it might have also done the unlink).
5524 */
btrfs_must_commit_transaction(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)5525 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
5526 struct btrfs_inode *inode)
5527 {
5528 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5529 bool ret = false;
5530
5531 mutex_lock(&inode->log_mutex);
5532 if (inode->last_unlink_trans > fs_info->last_trans_committed) {
5533 /*
5534 * Make sure any commits to the log are forced to be full
5535 * commits.
5536 */
5537 btrfs_set_log_full_commit(trans);
5538 ret = true;
5539 }
5540 mutex_unlock(&inode->log_mutex);
5541
5542 return ret;
5543 }
5544
5545 /*
5546 * follow the dentry parent pointers up the chain and see if any
5547 * of the directories in it require a full commit before they can
5548 * be logged. Returns zero if nothing special needs to be done or 1 if
5549 * a full commit is required.
5550 */
check_parent_dirs_for_sync(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct dentry * parent,struct super_block * sb,u64 last_committed)5551 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
5552 struct btrfs_inode *inode,
5553 struct dentry *parent,
5554 struct super_block *sb,
5555 u64 last_committed)
5556 {
5557 int ret = 0;
5558 struct dentry *old_parent = NULL;
5559
5560 /*
5561 * for regular files, if its inode is already on disk, we don't
5562 * have to worry about the parents at all. This is because
5563 * we can use the last_unlink_trans field to record renames
5564 * and other fun in this file.
5565 */
5566 if (S_ISREG(inode->vfs_inode.i_mode) &&
5567 inode->generation <= last_committed &&
5568 inode->last_unlink_trans <= last_committed)
5569 goto out;
5570
5571 if (!S_ISDIR(inode->vfs_inode.i_mode)) {
5572 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5573 goto out;
5574 inode = BTRFS_I(d_inode(parent));
5575 }
5576
5577 while (1) {
5578 if (btrfs_must_commit_transaction(trans, inode)) {
5579 ret = 1;
5580 break;
5581 }
5582
5583 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5584 break;
5585
5586 if (IS_ROOT(parent)) {
5587 inode = BTRFS_I(d_inode(parent));
5588 if (btrfs_must_commit_transaction(trans, inode))
5589 ret = 1;
5590 break;
5591 }
5592
5593 parent = dget_parent(parent);
5594 dput(old_parent);
5595 old_parent = parent;
5596 inode = BTRFS_I(d_inode(parent));
5597
5598 }
5599 dput(old_parent);
5600 out:
5601 return ret;
5602 }
5603
5604 struct btrfs_dir_list {
5605 u64 ino;
5606 struct list_head list;
5607 };
5608
5609 /*
5610 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5611 * details about the why it is needed.
5612 * This is a recursive operation - if an existing dentry corresponds to a
5613 * directory, that directory's new entries are logged too (same behaviour as
5614 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5615 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5616 * complains about the following circular lock dependency / possible deadlock:
5617 *
5618 * CPU0 CPU1
5619 * ---- ----
5620 * lock(&type->i_mutex_dir_key#3/2);
5621 * lock(sb_internal#2);
5622 * lock(&type->i_mutex_dir_key#3/2);
5623 * lock(&sb->s_type->i_mutex_key#14);
5624 *
5625 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5626 * sb_start_intwrite() in btrfs_start_transaction().
5627 * Not locking i_mutex of the inodes is still safe because:
5628 *
5629 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5630 * that while logging the inode new references (names) are added or removed
5631 * from the inode, leaving the logged inode item with a link count that does
5632 * not match the number of logged inode reference items. This is fine because
5633 * at log replay time we compute the real number of links and correct the
5634 * link count in the inode item (see replay_one_buffer() and
5635 * link_to_fixup_dir());
5636 *
5637 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5638 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5639 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5640 * has a size that doesn't match the sum of the lengths of all the logged
5641 * names. This does not result in a problem because if a dir_item key is
5642 * logged but its matching dir_index key is not logged, at log replay time we
5643 * don't use it to replay the respective name (see replay_one_name()). On the
5644 * other hand if only the dir_index key ends up being logged, the respective
5645 * name is added to the fs/subvol tree with both the dir_item and dir_index
5646 * keys created (see replay_one_name()).
5647 * The directory's inode item with a wrong i_size is not a problem as well,
5648 * since we don't use it at log replay time to set the i_size in the inode
5649 * item of the fs/subvol tree (see overwrite_item()).
5650 */
log_new_dir_dentries(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * start_inode,struct btrfs_log_ctx * ctx)5651 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5652 struct btrfs_root *root,
5653 struct btrfs_inode *start_inode,
5654 struct btrfs_log_ctx *ctx)
5655 {
5656 struct btrfs_fs_info *fs_info = root->fs_info;
5657 struct btrfs_root *log = root->log_root;
5658 struct btrfs_path *path;
5659 LIST_HEAD(dir_list);
5660 struct btrfs_dir_list *dir_elem;
5661 int ret = 0;
5662
5663 path = btrfs_alloc_path();
5664 if (!path)
5665 return -ENOMEM;
5666
5667 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5668 if (!dir_elem) {
5669 btrfs_free_path(path);
5670 return -ENOMEM;
5671 }
5672 dir_elem->ino = btrfs_ino(start_inode);
5673 list_add_tail(&dir_elem->list, &dir_list);
5674
5675 while (!list_empty(&dir_list)) {
5676 struct extent_buffer *leaf;
5677 struct btrfs_key min_key;
5678 int nritems;
5679 int i;
5680
5681 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5682 list);
5683 if (ret)
5684 goto next_dir_inode;
5685
5686 min_key.objectid = dir_elem->ino;
5687 min_key.type = BTRFS_DIR_ITEM_KEY;
5688 min_key.offset = 0;
5689 again:
5690 btrfs_release_path(path);
5691 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5692 if (ret < 0) {
5693 goto next_dir_inode;
5694 } else if (ret > 0) {
5695 ret = 0;
5696 goto next_dir_inode;
5697 }
5698
5699 process_leaf:
5700 leaf = path->nodes[0];
5701 nritems = btrfs_header_nritems(leaf);
5702 for (i = path->slots[0]; i < nritems; i++) {
5703 struct btrfs_dir_item *di;
5704 struct btrfs_key di_key;
5705 struct inode *di_inode;
5706 struct btrfs_dir_list *new_dir_elem;
5707 int log_mode = LOG_INODE_EXISTS;
5708 int type;
5709
5710 btrfs_item_key_to_cpu(leaf, &min_key, i);
5711 if (min_key.objectid != dir_elem->ino ||
5712 min_key.type != BTRFS_DIR_ITEM_KEY)
5713 goto next_dir_inode;
5714
5715 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5716 type = btrfs_dir_type(leaf, di);
5717 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5718 type != BTRFS_FT_DIR)
5719 continue;
5720 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5721 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5722 continue;
5723
5724 btrfs_release_path(path);
5725 di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
5726 if (IS_ERR(di_inode)) {
5727 ret = PTR_ERR(di_inode);
5728 goto next_dir_inode;
5729 }
5730
5731 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
5732 btrfs_add_delayed_iput(di_inode);
5733 break;
5734 }
5735
5736 ctx->log_new_dentries = false;
5737 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
5738 log_mode = LOG_INODE_ALL;
5739 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
5740 log_mode, 0, LLONG_MAX, ctx);
5741 if (!ret &&
5742 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
5743 ret = 1;
5744 btrfs_add_delayed_iput(di_inode);
5745 if (ret)
5746 goto next_dir_inode;
5747 if (ctx->log_new_dentries) {
5748 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5749 GFP_NOFS);
5750 if (!new_dir_elem) {
5751 ret = -ENOMEM;
5752 goto next_dir_inode;
5753 }
5754 new_dir_elem->ino = di_key.objectid;
5755 list_add_tail(&new_dir_elem->list, &dir_list);
5756 }
5757 break;
5758 }
5759 if (i == nritems) {
5760 ret = btrfs_next_leaf(log, path);
5761 if (ret < 0) {
5762 goto next_dir_inode;
5763 } else if (ret > 0) {
5764 ret = 0;
5765 goto next_dir_inode;
5766 }
5767 goto process_leaf;
5768 }
5769 if (min_key.offset < (u64)-1) {
5770 min_key.offset++;
5771 goto again;
5772 }
5773 next_dir_inode:
5774 list_del(&dir_elem->list);
5775 kfree(dir_elem);
5776 }
5777
5778 btrfs_free_path(path);
5779 return ret;
5780 }
5781
btrfs_log_all_parents(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_log_ctx * ctx)5782 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5783 struct btrfs_inode *inode,
5784 struct btrfs_log_ctx *ctx)
5785 {
5786 struct btrfs_fs_info *fs_info = trans->fs_info;
5787 int ret;
5788 struct btrfs_path *path;
5789 struct btrfs_key key;
5790 struct btrfs_root *root = inode->root;
5791 const u64 ino = btrfs_ino(inode);
5792
5793 path = btrfs_alloc_path();
5794 if (!path)
5795 return -ENOMEM;
5796 path->skip_locking = 1;
5797 path->search_commit_root = 1;
5798
5799 key.objectid = ino;
5800 key.type = BTRFS_INODE_REF_KEY;
5801 key.offset = 0;
5802 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5803 if (ret < 0)
5804 goto out;
5805
5806 while (true) {
5807 struct extent_buffer *leaf = path->nodes[0];
5808 int slot = path->slots[0];
5809 u32 cur_offset = 0;
5810 u32 item_size;
5811 unsigned long ptr;
5812
5813 if (slot >= btrfs_header_nritems(leaf)) {
5814 ret = btrfs_next_leaf(root, path);
5815 if (ret < 0)
5816 goto out;
5817 else if (ret > 0)
5818 break;
5819 continue;
5820 }
5821
5822 btrfs_item_key_to_cpu(leaf, &key, slot);
5823 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5824 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5825 break;
5826
5827 item_size = btrfs_item_size_nr(leaf, slot);
5828 ptr = btrfs_item_ptr_offset(leaf, slot);
5829 while (cur_offset < item_size) {
5830 struct btrfs_key inode_key;
5831 struct inode *dir_inode;
5832
5833 inode_key.type = BTRFS_INODE_ITEM_KEY;
5834 inode_key.offset = 0;
5835
5836 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5837 struct btrfs_inode_extref *extref;
5838
5839 extref = (struct btrfs_inode_extref *)
5840 (ptr + cur_offset);
5841 inode_key.objectid = btrfs_inode_extref_parent(
5842 leaf, extref);
5843 cur_offset += sizeof(*extref);
5844 cur_offset += btrfs_inode_extref_name_len(leaf,
5845 extref);
5846 } else {
5847 inode_key.objectid = key.offset;
5848 cur_offset = item_size;
5849 }
5850
5851 dir_inode = btrfs_iget(fs_info->sb, &inode_key,
5852 root, NULL);
5853 /*
5854 * If the parent inode was deleted, return an error to
5855 * fallback to a transaction commit. This is to prevent
5856 * getting an inode that was moved from one parent A to
5857 * a parent B, got its former parent A deleted and then
5858 * it got fsync'ed, from existing at both parents after
5859 * a log replay (and the old parent still existing).
5860 * Example:
5861 *
5862 * mkdir /mnt/A
5863 * mkdir /mnt/B
5864 * touch /mnt/B/bar
5865 * sync
5866 * mv /mnt/B/bar /mnt/A/bar
5867 * mv -T /mnt/A /mnt/B
5868 * fsync /mnt/B/bar
5869 * <power fail>
5870 *
5871 * If we ignore the old parent B which got deleted,
5872 * after a log replay we would have file bar linked
5873 * at both parents and the old parent B would still
5874 * exist.
5875 */
5876 if (IS_ERR(dir_inode)) {
5877 ret = PTR_ERR(dir_inode);
5878 goto out;
5879 }
5880
5881 if (ctx)
5882 ctx->log_new_dentries = false;
5883 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode),
5884 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
5885 if (!ret &&
5886 btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode)))
5887 ret = 1;
5888 if (!ret && ctx && ctx->log_new_dentries)
5889 ret = log_new_dir_dentries(trans, root,
5890 BTRFS_I(dir_inode), ctx);
5891 btrfs_add_delayed_iput(dir_inode);
5892 if (ret)
5893 goto out;
5894 }
5895 path->slots[0]++;
5896 }
5897 ret = 0;
5898 out:
5899 btrfs_free_path(path);
5900 return ret;
5901 }
5902
log_new_ancestors(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_log_ctx * ctx)5903 static int log_new_ancestors(struct btrfs_trans_handle *trans,
5904 struct btrfs_root *root,
5905 struct btrfs_path *path,
5906 struct btrfs_log_ctx *ctx)
5907 {
5908 struct btrfs_key found_key;
5909
5910 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
5911
5912 while (true) {
5913 struct btrfs_fs_info *fs_info = root->fs_info;
5914 const u64 last_committed = fs_info->last_trans_committed;
5915 struct extent_buffer *leaf = path->nodes[0];
5916 int slot = path->slots[0];
5917 struct btrfs_key search_key;
5918 struct inode *inode;
5919 int ret = 0;
5920
5921 btrfs_release_path(path);
5922
5923 search_key.objectid = found_key.offset;
5924 search_key.type = BTRFS_INODE_ITEM_KEY;
5925 search_key.offset = 0;
5926 inode = btrfs_iget(fs_info->sb, &search_key, root, NULL);
5927 if (IS_ERR(inode))
5928 return PTR_ERR(inode);
5929
5930 if (BTRFS_I(inode)->generation > last_committed)
5931 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
5932 LOG_INODE_EXISTS,
5933 0, LLONG_MAX, ctx);
5934 btrfs_add_delayed_iput(inode);
5935 if (ret)
5936 return ret;
5937
5938 if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID)
5939 break;
5940
5941 search_key.type = BTRFS_INODE_REF_KEY;
5942 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
5943 if (ret < 0)
5944 return ret;
5945
5946 leaf = path->nodes[0];
5947 slot = path->slots[0];
5948 if (slot >= btrfs_header_nritems(leaf)) {
5949 ret = btrfs_next_leaf(root, path);
5950 if (ret < 0)
5951 return ret;
5952 else if (ret > 0)
5953 return -ENOENT;
5954 leaf = path->nodes[0];
5955 slot = path->slots[0];
5956 }
5957
5958 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5959 if (found_key.objectid != search_key.objectid ||
5960 found_key.type != BTRFS_INODE_REF_KEY)
5961 return -ENOENT;
5962 }
5963 return 0;
5964 }
5965
log_new_ancestors_fast(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct dentry * parent,struct btrfs_log_ctx * ctx)5966 static int log_new_ancestors_fast(struct btrfs_trans_handle *trans,
5967 struct btrfs_inode *inode,
5968 struct dentry *parent,
5969 struct btrfs_log_ctx *ctx)
5970 {
5971 struct btrfs_root *root = inode->root;
5972 struct btrfs_fs_info *fs_info = root->fs_info;
5973 struct dentry *old_parent = NULL;
5974 struct super_block *sb = inode->vfs_inode.i_sb;
5975 int ret = 0;
5976
5977 while (true) {
5978 if (!parent || d_really_is_negative(parent) ||
5979 sb != parent->d_sb)
5980 break;
5981
5982 inode = BTRFS_I(d_inode(parent));
5983 if (root != inode->root)
5984 break;
5985
5986 if (inode->generation > fs_info->last_trans_committed) {
5987 ret = btrfs_log_inode(trans, root, inode,
5988 LOG_INODE_EXISTS, 0, LLONG_MAX, ctx);
5989 if (ret)
5990 break;
5991 }
5992 if (IS_ROOT(parent))
5993 break;
5994
5995 parent = dget_parent(parent);
5996 dput(old_parent);
5997 old_parent = parent;
5998 }
5999 dput(old_parent);
6000
6001 return ret;
6002 }
6003
log_all_new_ancestors(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct dentry * parent,struct btrfs_log_ctx * ctx)6004 static int log_all_new_ancestors(struct btrfs_trans_handle *trans,
6005 struct btrfs_inode *inode,
6006 struct dentry *parent,
6007 struct btrfs_log_ctx *ctx)
6008 {
6009 struct btrfs_root *root = inode->root;
6010 const u64 ino = btrfs_ino(inode);
6011 struct btrfs_path *path;
6012 struct btrfs_key search_key;
6013 int ret;
6014
6015 /*
6016 * For a single hard link case, go through a fast path that does not
6017 * need to iterate the fs/subvolume tree.
6018 */
6019 if (inode->vfs_inode.i_nlink < 2)
6020 return log_new_ancestors_fast(trans, inode, parent, ctx);
6021
6022 path = btrfs_alloc_path();
6023 if (!path)
6024 return -ENOMEM;
6025
6026 search_key.objectid = ino;
6027 search_key.type = BTRFS_INODE_REF_KEY;
6028 search_key.offset = 0;
6029 again:
6030 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
6031 if (ret < 0)
6032 goto out;
6033 if (ret == 0)
6034 path->slots[0]++;
6035
6036 while (true) {
6037 struct extent_buffer *leaf = path->nodes[0];
6038 int slot = path->slots[0];
6039 struct btrfs_key found_key;
6040
6041 if (slot >= btrfs_header_nritems(leaf)) {
6042 ret = btrfs_next_leaf(root, path);
6043 if (ret < 0)
6044 goto out;
6045 else if (ret > 0)
6046 break;
6047 continue;
6048 }
6049
6050 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6051 if (found_key.objectid != ino ||
6052 found_key.type > BTRFS_INODE_EXTREF_KEY)
6053 break;
6054
6055 /*
6056 * Don't deal with extended references because they are rare
6057 * cases and too complex to deal with (we would need to keep
6058 * track of which subitem we are processing for each item in
6059 * this loop, etc). So just return some error to fallback to
6060 * a transaction commit.
6061 */
6062 if (found_key.type == BTRFS_INODE_EXTREF_KEY) {
6063 ret = -EMLINK;
6064 goto out;
6065 }
6066
6067 /*
6068 * Logging ancestors needs to do more searches on the fs/subvol
6069 * tree, so it releases the path as needed to avoid deadlocks.
6070 * Keep track of the last inode ref key and resume from that key
6071 * after logging all new ancestors for the current hard link.
6072 */
6073 memcpy(&search_key, &found_key, sizeof(search_key));
6074
6075 ret = log_new_ancestors(trans, root, path, ctx);
6076 if (ret)
6077 goto out;
6078 btrfs_release_path(path);
6079 goto again;
6080 }
6081 ret = 0;
6082 out:
6083 btrfs_free_path(path);
6084 return ret;
6085 }
6086
6087 /*
6088 * helper function around btrfs_log_inode to make sure newly created
6089 * parent directories also end up in the log. A minimal inode and backref
6090 * only logging is done of any parent directories that are older than
6091 * the last committed transaction
6092 */
btrfs_log_inode_parent(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct dentry * parent,const loff_t start,const loff_t end,int inode_only,struct btrfs_log_ctx * ctx)6093 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
6094 struct btrfs_inode *inode,
6095 struct dentry *parent,
6096 const loff_t start,
6097 const loff_t end,
6098 int inode_only,
6099 struct btrfs_log_ctx *ctx)
6100 {
6101 struct btrfs_root *root = inode->root;
6102 struct btrfs_fs_info *fs_info = root->fs_info;
6103 struct super_block *sb;
6104 int ret = 0;
6105 u64 last_committed = fs_info->last_trans_committed;
6106 bool log_dentries = false;
6107
6108 sb = inode->vfs_inode.i_sb;
6109
6110 if (btrfs_test_opt(fs_info, NOTREELOG)) {
6111 ret = 1;
6112 goto end_no_trans;
6113 }
6114
6115 /*
6116 * The prev transaction commit doesn't complete, we need do
6117 * full commit by ourselves.
6118 */
6119 if (fs_info->last_trans_log_full_commit >
6120 fs_info->last_trans_committed) {
6121 ret = 1;
6122 goto end_no_trans;
6123 }
6124
6125 if (btrfs_root_refs(&root->root_item) == 0) {
6126 ret = 1;
6127 goto end_no_trans;
6128 }
6129
6130 ret = check_parent_dirs_for_sync(trans, inode, parent, sb,
6131 last_committed);
6132 if (ret)
6133 goto end_no_trans;
6134
6135 /*
6136 * Skip already logged inodes or inodes corresponding to tmpfiles
6137 * (since logging them is pointless, a link count of 0 means they
6138 * will never be accessible).
6139 */
6140 if (btrfs_inode_in_log(inode, trans->transid) ||
6141 inode->vfs_inode.i_nlink == 0) {
6142 ret = BTRFS_NO_LOG_SYNC;
6143 goto end_no_trans;
6144 }
6145
6146 ret = start_log_trans(trans, root, ctx);
6147 if (ret)
6148 goto end_no_trans;
6149
6150 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
6151 if (ret)
6152 goto end_trans;
6153
6154 /*
6155 * for regular files, if its inode is already on disk, we don't
6156 * have to worry about the parents at all. This is because
6157 * we can use the last_unlink_trans field to record renames
6158 * and other fun in this file.
6159 */
6160 if (S_ISREG(inode->vfs_inode.i_mode) &&
6161 inode->generation <= last_committed &&
6162 inode->last_unlink_trans <= last_committed) {
6163 ret = 0;
6164 goto end_trans;
6165 }
6166
6167 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries)
6168 log_dentries = true;
6169
6170 /*
6171 * On unlink we must make sure all our current and old parent directory
6172 * inodes are fully logged. This is to prevent leaving dangling
6173 * directory index entries in directories that were our parents but are
6174 * not anymore. Not doing this results in old parent directory being
6175 * impossible to delete after log replay (rmdir will always fail with
6176 * error -ENOTEMPTY).
6177 *
6178 * Example 1:
6179 *
6180 * mkdir testdir
6181 * touch testdir/foo
6182 * ln testdir/foo testdir/bar
6183 * sync
6184 * unlink testdir/bar
6185 * xfs_io -c fsync testdir/foo
6186 * <power failure>
6187 * mount fs, triggers log replay
6188 *
6189 * If we don't log the parent directory (testdir), after log replay the
6190 * directory still has an entry pointing to the file inode using the bar
6191 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
6192 * the file inode has a link count of 1.
6193 *
6194 * Example 2:
6195 *
6196 * mkdir testdir
6197 * touch foo
6198 * ln foo testdir/foo2
6199 * ln foo testdir/foo3
6200 * sync
6201 * unlink testdir/foo3
6202 * xfs_io -c fsync foo
6203 * <power failure>
6204 * mount fs, triggers log replay
6205 *
6206 * Similar as the first example, after log replay the parent directory
6207 * testdir still has an entry pointing to the inode file with name foo3
6208 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
6209 * and has a link count of 2.
6210 */
6211 if (inode->last_unlink_trans > last_committed) {
6212 ret = btrfs_log_all_parents(trans, inode, ctx);
6213 if (ret)
6214 goto end_trans;
6215 }
6216
6217 ret = log_all_new_ancestors(trans, inode, parent, ctx);
6218 if (ret)
6219 goto end_trans;
6220
6221 if (log_dentries)
6222 ret = log_new_dir_dentries(trans, root, inode, ctx);
6223 else
6224 ret = 0;
6225 end_trans:
6226 if (ret < 0) {
6227 btrfs_set_log_full_commit(trans);
6228 ret = 1;
6229 }
6230
6231 if (ret)
6232 btrfs_remove_log_ctx(root, ctx);
6233 btrfs_end_log_trans(root);
6234 end_no_trans:
6235 return ret;
6236 }
6237
6238 /*
6239 * it is not safe to log dentry if the chunk root has added new
6240 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
6241 * If this returns 1, you must commit the transaction to safely get your
6242 * data on disk.
6243 */
btrfs_log_dentry_safe(struct btrfs_trans_handle * trans,struct dentry * dentry,const loff_t start,const loff_t end,struct btrfs_log_ctx * ctx)6244 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
6245 struct dentry *dentry,
6246 const loff_t start,
6247 const loff_t end,
6248 struct btrfs_log_ctx *ctx)
6249 {
6250 struct dentry *parent = dget_parent(dentry);
6251 int ret;
6252
6253 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent,
6254 start, end, LOG_INODE_ALL, ctx);
6255 dput(parent);
6256
6257 return ret;
6258 }
6259
6260 /*
6261 * should be called during mount to recover any replay any log trees
6262 * from the FS
6263 */
btrfs_recover_log_trees(struct btrfs_root * log_root_tree)6264 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
6265 {
6266 int ret;
6267 struct btrfs_path *path;
6268 struct btrfs_trans_handle *trans;
6269 struct btrfs_key key;
6270 struct btrfs_key found_key;
6271 struct btrfs_key tmp_key;
6272 struct btrfs_root *log;
6273 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
6274 struct walk_control wc = {
6275 .process_func = process_one_buffer,
6276 .stage = LOG_WALK_PIN_ONLY,
6277 };
6278
6279 path = btrfs_alloc_path();
6280 if (!path)
6281 return -ENOMEM;
6282
6283 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6284
6285 trans = btrfs_start_transaction(fs_info->tree_root, 0);
6286 if (IS_ERR(trans)) {
6287 ret = PTR_ERR(trans);
6288 goto error;
6289 }
6290
6291 wc.trans = trans;
6292 wc.pin = 1;
6293
6294 ret = walk_log_tree(trans, log_root_tree, &wc);
6295 if (ret) {
6296 btrfs_handle_fs_error(fs_info, ret,
6297 "Failed to pin buffers while recovering log root tree.");
6298 goto error;
6299 }
6300
6301 again:
6302 key.objectid = BTRFS_TREE_LOG_OBJECTID;
6303 key.offset = (u64)-1;
6304 key.type = BTRFS_ROOT_ITEM_KEY;
6305
6306 while (1) {
6307 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
6308
6309 if (ret < 0) {
6310 btrfs_handle_fs_error(fs_info, ret,
6311 "Couldn't find tree log root.");
6312 goto error;
6313 }
6314 if (ret > 0) {
6315 if (path->slots[0] == 0)
6316 break;
6317 path->slots[0]--;
6318 }
6319 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
6320 path->slots[0]);
6321 btrfs_release_path(path);
6322 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
6323 break;
6324
6325 log = btrfs_read_fs_root(log_root_tree, &found_key);
6326 if (IS_ERR(log)) {
6327 ret = PTR_ERR(log);
6328 btrfs_handle_fs_error(fs_info, ret,
6329 "Couldn't read tree log root.");
6330 goto error;
6331 }
6332
6333 tmp_key.objectid = found_key.offset;
6334 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
6335 tmp_key.offset = (u64)-1;
6336
6337 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
6338 if (IS_ERR(wc.replay_dest)) {
6339 ret = PTR_ERR(wc.replay_dest);
6340
6341 /*
6342 * We didn't find the subvol, likely because it was
6343 * deleted. This is ok, simply skip this log and go to
6344 * the next one.
6345 *
6346 * We need to exclude the root because we can't have
6347 * other log replays overwriting this log as we'll read
6348 * it back in a few more times. This will keep our
6349 * block from being modified, and we'll just bail for
6350 * each subsequent pass.
6351 */
6352 if (ret == -ENOENT)
6353 ret = btrfs_pin_extent_for_log_replay(fs_info,
6354 log->node->start,
6355 log->node->len);
6356 free_extent_buffer(log->node);
6357 free_extent_buffer(log->commit_root);
6358 kfree(log);
6359
6360 if (!ret)
6361 goto next;
6362 btrfs_handle_fs_error(fs_info, ret,
6363 "Couldn't read target root for tree log recovery.");
6364 goto error;
6365 }
6366
6367 wc.replay_dest->log_root = log;
6368 btrfs_record_root_in_trans(trans, wc.replay_dest);
6369 ret = walk_log_tree(trans, log, &wc);
6370
6371 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6372 ret = fixup_inode_link_counts(trans, wc.replay_dest,
6373 path);
6374 }
6375
6376 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6377 struct btrfs_root *root = wc.replay_dest;
6378
6379 btrfs_release_path(path);
6380
6381 /*
6382 * We have just replayed everything, and the highest
6383 * objectid of fs roots probably has changed in case
6384 * some inode_item's got replayed.
6385 *
6386 * root->objectid_mutex is not acquired as log replay
6387 * could only happen during mount.
6388 */
6389 ret = btrfs_find_highest_objectid(root,
6390 &root->highest_objectid);
6391 }
6392
6393 wc.replay_dest->log_root = NULL;
6394 free_extent_buffer(log->node);
6395 free_extent_buffer(log->commit_root);
6396 kfree(log);
6397
6398 if (ret)
6399 goto error;
6400 next:
6401 if (found_key.offset == 0)
6402 break;
6403 key.offset = found_key.offset - 1;
6404 }
6405 btrfs_release_path(path);
6406
6407 /* step one is to pin it all, step two is to replay just inodes */
6408 if (wc.pin) {
6409 wc.pin = 0;
6410 wc.process_func = replay_one_buffer;
6411 wc.stage = LOG_WALK_REPLAY_INODES;
6412 goto again;
6413 }
6414 /* step three is to replay everything */
6415 if (wc.stage < LOG_WALK_REPLAY_ALL) {
6416 wc.stage++;
6417 goto again;
6418 }
6419
6420 btrfs_free_path(path);
6421
6422 /* step 4: commit the transaction, which also unpins the blocks */
6423 ret = btrfs_commit_transaction(trans);
6424 if (ret)
6425 return ret;
6426
6427 free_extent_buffer(log_root_tree->node);
6428 log_root_tree->log_root = NULL;
6429 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6430 kfree(log_root_tree);
6431
6432 return 0;
6433 error:
6434 if (wc.trans)
6435 btrfs_end_transaction(wc.trans);
6436 btrfs_free_path(path);
6437 return ret;
6438 }
6439
6440 /*
6441 * there are some corner cases where we want to force a full
6442 * commit instead of allowing a directory to be logged.
6443 *
6444 * They revolve around files there were unlinked from the directory, and
6445 * this function updates the parent directory so that a full commit is
6446 * properly done if it is fsync'd later after the unlinks are done.
6447 *
6448 * Must be called before the unlink operations (updates to the subvolume tree,
6449 * inodes, etc) are done.
6450 */
btrfs_record_unlink_dir(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct btrfs_inode * inode,int for_rename)6451 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
6452 struct btrfs_inode *dir, struct btrfs_inode *inode,
6453 int for_rename)
6454 {
6455 /*
6456 * when we're logging a file, if it hasn't been renamed
6457 * or unlinked, and its inode is fully committed on disk,
6458 * we don't have to worry about walking up the directory chain
6459 * to log its parents.
6460 *
6461 * So, we use the last_unlink_trans field to put this transid
6462 * into the file. When the file is logged we check it and
6463 * don't log the parents if the file is fully on disk.
6464 */
6465 mutex_lock(&inode->log_mutex);
6466 inode->last_unlink_trans = trans->transid;
6467 mutex_unlock(&inode->log_mutex);
6468
6469 /*
6470 * if this directory was already logged any new
6471 * names for this file/dir will get recorded
6472 */
6473 if (dir->logged_trans == trans->transid)
6474 return;
6475
6476 /*
6477 * if the inode we're about to unlink was logged,
6478 * the log will be properly updated for any new names
6479 */
6480 if (inode->logged_trans == trans->transid)
6481 return;
6482
6483 /*
6484 * when renaming files across directories, if the directory
6485 * there we're unlinking from gets fsync'd later on, there's
6486 * no way to find the destination directory later and fsync it
6487 * properly. So, we have to be conservative and force commits
6488 * so the new name gets discovered.
6489 */
6490 if (for_rename)
6491 goto record;
6492
6493 /* we can safely do the unlink without any special recording */
6494 return;
6495
6496 record:
6497 mutex_lock(&dir->log_mutex);
6498 dir->last_unlink_trans = trans->transid;
6499 mutex_unlock(&dir->log_mutex);
6500 }
6501
6502 /*
6503 * Make sure that if someone attempts to fsync the parent directory of a deleted
6504 * snapshot, it ends up triggering a transaction commit. This is to guarantee
6505 * that after replaying the log tree of the parent directory's root we will not
6506 * see the snapshot anymore and at log replay time we will not see any log tree
6507 * corresponding to the deleted snapshot's root, which could lead to replaying
6508 * it after replaying the log tree of the parent directory (which would replay
6509 * the snapshot delete operation).
6510 *
6511 * Must be called before the actual snapshot destroy operation (updates to the
6512 * parent root and tree of tree roots trees, etc) are done.
6513 */
btrfs_record_snapshot_destroy(struct btrfs_trans_handle * trans,struct btrfs_inode * dir)6514 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
6515 struct btrfs_inode *dir)
6516 {
6517 mutex_lock(&dir->log_mutex);
6518 dir->last_unlink_trans = trans->transid;
6519 mutex_unlock(&dir->log_mutex);
6520 }
6521
6522 /*
6523 * Call this after adding a new name for a file and it will properly
6524 * update the log to reflect the new name.
6525 *
6526 * @ctx can not be NULL when @sync_log is false, and should be NULL when it's
6527 * true (because it's not used).
6528 *
6529 * Return value depends on whether @sync_log is true or false.
6530 * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6531 * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT
6532 * otherwise.
6533 * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to
6534 * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log,
6535 * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6536 * committed (without attempting to sync the log).
6537 */
btrfs_log_new_name(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_inode * old_dir,struct dentry * parent,bool sync_log,struct btrfs_log_ctx * ctx)6538 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
6539 struct btrfs_inode *inode, struct btrfs_inode *old_dir,
6540 struct dentry *parent,
6541 bool sync_log, struct btrfs_log_ctx *ctx)
6542 {
6543 struct btrfs_fs_info *fs_info = trans->fs_info;
6544 int ret;
6545
6546 /*
6547 * this will force the logging code to walk the dentry chain
6548 * up for the file
6549 */
6550 if (!S_ISDIR(inode->vfs_inode.i_mode))
6551 inode->last_unlink_trans = trans->transid;
6552
6553 /*
6554 * if this inode hasn't been logged and directory we're renaming it
6555 * from hasn't been logged, we don't need to log it
6556 */
6557 if (inode->logged_trans <= fs_info->last_trans_committed &&
6558 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
6559 return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT :
6560 BTRFS_DONT_NEED_LOG_SYNC;
6561
6562 if (sync_log) {
6563 struct btrfs_log_ctx ctx2;
6564
6565 btrfs_init_log_ctx(&ctx2, &inode->vfs_inode);
6566 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
6567 LOG_INODE_EXISTS, &ctx2);
6568 if (ret == BTRFS_NO_LOG_SYNC)
6569 return BTRFS_DONT_NEED_TRANS_COMMIT;
6570 else if (ret)
6571 return BTRFS_NEED_TRANS_COMMIT;
6572
6573 ret = btrfs_sync_log(trans, inode->root, &ctx2);
6574 if (ret)
6575 return BTRFS_NEED_TRANS_COMMIT;
6576 return BTRFS_DONT_NEED_TRANS_COMMIT;
6577 }
6578
6579 ASSERT(ctx);
6580 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
6581 LOG_INODE_EXISTS, ctx);
6582 if (ret == BTRFS_NO_LOG_SYNC)
6583 return BTRFS_DONT_NEED_LOG_SYNC;
6584 else if (ret)
6585 return BTRFS_NEED_TRANS_COMMIT;
6586
6587 return BTRFS_NEED_LOG_SYNC;
6588 }
6589
6590