1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/blkdev.h>
9 #include <linux/list_sort.h>
10 #include <linux/iversion.h>
11 #include "misc.h"
12 #include "ctree.h"
13 #include "tree-log.h"
14 #include "disk-io.h"
15 #include "locking.h"
16 #include "print-tree.h"
17 #include "backref.h"
18 #include "compression.h"
19 #include "qgroup.h"
20 #include "inode-map.h"
21
22 /* magic values for the inode_only field in btrfs_log_inode:
23 *
24 * LOG_INODE_ALL means to log everything
25 * LOG_INODE_EXISTS means to log just enough to recreate the inode
26 * during log replay
27 */
28 enum {
29 LOG_INODE_ALL,
30 LOG_INODE_EXISTS,
31 LOG_OTHER_INODE,
32 LOG_OTHER_INODE_ALL,
33 };
34
35 /*
36 * directory trouble cases
37 *
38 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
39 * log, we must force a full commit before doing an fsync of the directory
40 * where the unlink was done.
41 * ---> record transid of last unlink/rename per directory
42 *
43 * mkdir foo/some_dir
44 * normal commit
45 * rename foo/some_dir foo2/some_dir
46 * mkdir foo/some_dir
47 * fsync foo/some_dir/some_file
48 *
49 * The fsync above will unlink the original some_dir without recording
50 * it in its new location (foo2). After a crash, some_dir will be gone
51 * unless the fsync of some_file forces a full commit
52 *
53 * 2) we must log any new names for any file or dir that is in the fsync
54 * log. ---> check inode while renaming/linking.
55 *
56 * 2a) we must log any new names for any file or dir during rename
57 * when the directory they are being removed from was logged.
58 * ---> check inode and old parent dir during rename
59 *
60 * 2a is actually the more important variant. With the extra logging
61 * a crash might unlink the old name without recreating the new one
62 *
63 * 3) after a crash, we must go through any directories with a link count
64 * of zero and redo the rm -rf
65 *
66 * mkdir f1/foo
67 * normal commit
68 * rm -rf f1/foo
69 * fsync(f1)
70 *
71 * The directory f1 was fully removed from the FS, but fsync was never
72 * called on f1, only its parent dir. After a crash the rm -rf must
73 * be replayed. This must be able to recurse down the entire
74 * directory tree. The inode link count fixup code takes care of the
75 * ugly details.
76 */
77
78 /*
79 * stages for the tree walking. The first
80 * stage (0) is to only pin down the blocks we find
81 * the second stage (1) is to make sure that all the inodes
82 * we find in the log are created in the subvolume.
83 *
84 * The last stage is to deal with directories and links and extents
85 * and all the other fun semantics
86 */
87 enum {
88 LOG_WALK_PIN_ONLY,
89 LOG_WALK_REPLAY_INODES,
90 LOG_WALK_REPLAY_DIR_INDEX,
91 LOG_WALK_REPLAY_ALL,
92 };
93
94 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
95 struct btrfs_root *root, struct btrfs_inode *inode,
96 int inode_only,
97 const loff_t start,
98 const loff_t end,
99 struct btrfs_log_ctx *ctx);
100 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
101 struct btrfs_root *root,
102 struct btrfs_path *path, u64 objectid);
103 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
104 struct btrfs_root *root,
105 struct btrfs_root *log,
106 struct btrfs_path *path,
107 u64 dirid, int del_all);
108
109 /*
110 * tree logging is a special write ahead log used to make sure that
111 * fsyncs and O_SYNCs can happen without doing full tree commits.
112 *
113 * Full tree commits are expensive because they require commonly
114 * modified blocks to be recowed, creating many dirty pages in the
115 * extent tree an 4x-6x higher write load than ext3.
116 *
117 * Instead of doing a tree commit on every fsync, we use the
118 * key ranges and transaction ids to find items for a given file or directory
119 * that have changed in this transaction. Those items are copied into
120 * a special tree (one per subvolume root), that tree is written to disk
121 * and then the fsync is considered complete.
122 *
123 * After a crash, items are copied out of the log-tree back into the
124 * subvolume tree. Any file data extents found are recorded in the extent
125 * allocation tree, and the log-tree freed.
126 *
127 * The log tree is read three times, once to pin down all the extents it is
128 * using in ram and once, once to create all the inodes logged in the tree
129 * and once to do all the other items.
130 */
131
132 /*
133 * start a sub transaction and setup the log tree
134 * this increments the log tree writer count to make the people
135 * syncing the tree wait for us to finish
136 */
start_log_trans(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_log_ctx * ctx)137 static int start_log_trans(struct btrfs_trans_handle *trans,
138 struct btrfs_root *root,
139 struct btrfs_log_ctx *ctx)
140 {
141 struct btrfs_fs_info *fs_info = root->fs_info;
142 int ret = 0;
143
144 mutex_lock(&root->log_mutex);
145
146 if (root->log_root) {
147 if (btrfs_need_log_full_commit(trans)) {
148 ret = -EAGAIN;
149 goto out;
150 }
151
152 if (!root->log_start_pid) {
153 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
154 root->log_start_pid = current->pid;
155 } else if (root->log_start_pid != current->pid) {
156 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
157 }
158 } else {
159 mutex_lock(&fs_info->tree_log_mutex);
160 if (!fs_info->log_root_tree)
161 ret = btrfs_init_log_root_tree(trans, fs_info);
162 mutex_unlock(&fs_info->tree_log_mutex);
163 if (ret)
164 goto out;
165
166 ret = btrfs_add_log_tree(trans, root);
167 if (ret)
168 goto out;
169
170 set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
171 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
172 root->log_start_pid = current->pid;
173 }
174
175 atomic_inc(&root->log_batch);
176 atomic_inc(&root->log_writers);
177 if (ctx && !ctx->logging_new_name) {
178 int index = root->log_transid % 2;
179 list_add_tail(&ctx->list, &root->log_ctxs[index]);
180 ctx->log_transid = root->log_transid;
181 }
182
183 out:
184 mutex_unlock(&root->log_mutex);
185 return ret;
186 }
187
188 /*
189 * returns 0 if there was a log transaction running and we were able
190 * to join, or returns -ENOENT if there were not transactions
191 * in progress
192 */
join_running_log_trans(struct btrfs_root * root)193 static int join_running_log_trans(struct btrfs_root *root)
194 {
195 int ret = -ENOENT;
196
197 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state))
198 return ret;
199
200 mutex_lock(&root->log_mutex);
201 if (root->log_root) {
202 ret = 0;
203 atomic_inc(&root->log_writers);
204 }
205 mutex_unlock(&root->log_mutex);
206 return ret;
207 }
208
209 /*
210 * This either makes the current running log transaction wait
211 * until you call btrfs_end_log_trans() or it makes any future
212 * log transactions wait until you call btrfs_end_log_trans()
213 */
btrfs_pin_log_trans(struct btrfs_root * root)214 void btrfs_pin_log_trans(struct btrfs_root *root)
215 {
216 mutex_lock(&root->log_mutex);
217 atomic_inc(&root->log_writers);
218 mutex_unlock(&root->log_mutex);
219 }
220
221 /*
222 * indicate we're done making changes to the log tree
223 * and wake up anyone waiting to do a sync
224 */
btrfs_end_log_trans(struct btrfs_root * root)225 void btrfs_end_log_trans(struct btrfs_root *root)
226 {
227 if (atomic_dec_and_test(&root->log_writers)) {
228 /* atomic_dec_and_test implies a barrier */
229 cond_wake_up_nomb(&root->log_writer_wait);
230 }
231 }
232
btrfs_write_tree_block(struct extent_buffer * buf)233 static int btrfs_write_tree_block(struct extent_buffer *buf)
234 {
235 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
236 buf->start + buf->len - 1);
237 }
238
btrfs_wait_tree_block_writeback(struct extent_buffer * buf)239 static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
240 {
241 filemap_fdatawait_range(buf->pages[0]->mapping,
242 buf->start, buf->start + buf->len - 1);
243 }
244
245 /*
246 * the walk control struct is used to pass state down the chain when
247 * processing the log tree. The stage field tells us which part
248 * of the log tree processing we are currently doing. The others
249 * are state fields used for that specific part
250 */
251 struct walk_control {
252 /* should we free the extent on disk when done? This is used
253 * at transaction commit time while freeing a log tree
254 */
255 int free;
256
257 /* should we write out the extent buffer? This is used
258 * while flushing the log tree to disk during a sync
259 */
260 int write;
261
262 /* should we wait for the extent buffer io to finish? Also used
263 * while flushing the log tree to disk for a sync
264 */
265 int wait;
266
267 /* pin only walk, we record which extents on disk belong to the
268 * log trees
269 */
270 int pin;
271
272 /* what stage of the replay code we're currently in */
273 int stage;
274
275 /*
276 * Ignore any items from the inode currently being processed. Needs
277 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
278 * the LOG_WALK_REPLAY_INODES stage.
279 */
280 bool ignore_cur_inode;
281
282 /* the root we are currently replaying */
283 struct btrfs_root *replay_dest;
284
285 /* the trans handle for the current replay */
286 struct btrfs_trans_handle *trans;
287
288 /* the function that gets used to process blocks we find in the
289 * tree. Note the extent_buffer might not be up to date when it is
290 * passed in, and it must be checked or read if you need the data
291 * inside it
292 */
293 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
294 struct walk_control *wc, u64 gen, int level);
295 };
296
297 /*
298 * process_func used to pin down extents, write them or wait on them
299 */
process_one_buffer(struct btrfs_root * log,struct extent_buffer * eb,struct walk_control * wc,u64 gen,int level)300 static int process_one_buffer(struct btrfs_root *log,
301 struct extent_buffer *eb,
302 struct walk_control *wc, u64 gen, int level)
303 {
304 struct btrfs_fs_info *fs_info = log->fs_info;
305 int ret = 0;
306
307 /*
308 * If this fs is mixed then we need to be able to process the leaves to
309 * pin down any logged extents, so we have to read the block.
310 */
311 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
312 ret = btrfs_read_buffer(eb, gen, level, NULL);
313 if (ret)
314 return ret;
315 }
316
317 if (wc->pin)
318 ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
319 eb->len);
320
321 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
322 if (wc->pin && btrfs_header_level(eb) == 0)
323 ret = btrfs_exclude_logged_extents(eb);
324 if (wc->write)
325 btrfs_write_tree_block(eb);
326 if (wc->wait)
327 btrfs_wait_tree_block_writeback(eb);
328 }
329 return ret;
330 }
331
332 /*
333 * Item overwrite used by replay and tree logging. eb, slot and key all refer
334 * to the src data we are copying out.
335 *
336 * root is the tree we are copying into, and path is a scratch
337 * path for use in this function (it should be released on entry and
338 * will be released on exit).
339 *
340 * If the key is already in the destination tree the existing item is
341 * overwritten. If the existing item isn't big enough, it is extended.
342 * If it is too large, it is truncated.
343 *
344 * If the key isn't in the destination yet, a new item is inserted.
345 */
overwrite_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct extent_buffer * eb,int slot,struct btrfs_key * key)346 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
347 struct btrfs_root *root,
348 struct btrfs_path *path,
349 struct extent_buffer *eb, int slot,
350 struct btrfs_key *key)
351 {
352 int ret;
353 u32 item_size;
354 u64 saved_i_size = 0;
355 int save_old_i_size = 0;
356 unsigned long src_ptr;
357 unsigned long dst_ptr;
358 int overwrite_root = 0;
359 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
360
361 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
362 overwrite_root = 1;
363
364 item_size = btrfs_item_size_nr(eb, slot);
365 src_ptr = btrfs_item_ptr_offset(eb, slot);
366
367 /* look for the key in the destination tree */
368 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
369 if (ret < 0)
370 return ret;
371
372 if (ret == 0) {
373 char *src_copy;
374 char *dst_copy;
375 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
376 path->slots[0]);
377 if (dst_size != item_size)
378 goto insert;
379
380 if (item_size == 0) {
381 btrfs_release_path(path);
382 return 0;
383 }
384 dst_copy = kmalloc(item_size, GFP_NOFS);
385 src_copy = kmalloc(item_size, GFP_NOFS);
386 if (!dst_copy || !src_copy) {
387 btrfs_release_path(path);
388 kfree(dst_copy);
389 kfree(src_copy);
390 return -ENOMEM;
391 }
392
393 read_extent_buffer(eb, src_copy, src_ptr, item_size);
394
395 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
396 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
397 item_size);
398 ret = memcmp(dst_copy, src_copy, item_size);
399
400 kfree(dst_copy);
401 kfree(src_copy);
402 /*
403 * they have the same contents, just return, this saves
404 * us from cowing blocks in the destination tree and doing
405 * extra writes that may not have been done by a previous
406 * sync
407 */
408 if (ret == 0) {
409 btrfs_release_path(path);
410 return 0;
411 }
412
413 /*
414 * We need to load the old nbytes into the inode so when we
415 * replay the extents we've logged we get the right nbytes.
416 */
417 if (inode_item) {
418 struct btrfs_inode_item *item;
419 u64 nbytes;
420 u32 mode;
421
422 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
423 struct btrfs_inode_item);
424 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
425 item = btrfs_item_ptr(eb, slot,
426 struct btrfs_inode_item);
427 btrfs_set_inode_nbytes(eb, item, nbytes);
428
429 /*
430 * If this is a directory we need to reset the i_size to
431 * 0 so that we can set it up properly when replaying
432 * the rest of the items in this log.
433 */
434 mode = btrfs_inode_mode(eb, item);
435 if (S_ISDIR(mode))
436 btrfs_set_inode_size(eb, item, 0);
437 }
438 } else if (inode_item) {
439 struct btrfs_inode_item *item;
440 u32 mode;
441
442 /*
443 * New inode, set nbytes to 0 so that the nbytes comes out
444 * properly when we replay the extents.
445 */
446 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
447 btrfs_set_inode_nbytes(eb, item, 0);
448
449 /*
450 * If this is a directory we need to reset the i_size to 0 so
451 * that we can set it up properly when replaying the rest of
452 * the items in this log.
453 */
454 mode = btrfs_inode_mode(eb, item);
455 if (S_ISDIR(mode))
456 btrfs_set_inode_size(eb, item, 0);
457 }
458 insert:
459 btrfs_release_path(path);
460 /* try to insert the key into the destination tree */
461 path->skip_release_on_error = 1;
462 ret = btrfs_insert_empty_item(trans, root, path,
463 key, item_size);
464 path->skip_release_on_error = 0;
465
466 /* make sure any existing item is the correct size */
467 if (ret == -EEXIST || ret == -EOVERFLOW) {
468 u32 found_size;
469 found_size = btrfs_item_size_nr(path->nodes[0],
470 path->slots[0]);
471 if (found_size > item_size)
472 btrfs_truncate_item(path, item_size, 1);
473 else if (found_size < item_size)
474 btrfs_extend_item(path, item_size - found_size);
475 } else if (ret) {
476 return ret;
477 }
478 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
479 path->slots[0]);
480
481 /* don't overwrite an existing inode if the generation number
482 * was logged as zero. This is done when the tree logging code
483 * is just logging an inode to make sure it exists after recovery.
484 *
485 * Also, don't overwrite i_size on directories during replay.
486 * log replay inserts and removes directory items based on the
487 * state of the tree found in the subvolume, and i_size is modified
488 * as it goes
489 */
490 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
491 struct btrfs_inode_item *src_item;
492 struct btrfs_inode_item *dst_item;
493
494 src_item = (struct btrfs_inode_item *)src_ptr;
495 dst_item = (struct btrfs_inode_item *)dst_ptr;
496
497 if (btrfs_inode_generation(eb, src_item) == 0) {
498 struct extent_buffer *dst_eb = path->nodes[0];
499 const u64 ino_size = btrfs_inode_size(eb, src_item);
500
501 /*
502 * For regular files an ino_size == 0 is used only when
503 * logging that an inode exists, as part of a directory
504 * fsync, and the inode wasn't fsynced before. In this
505 * case don't set the size of the inode in the fs/subvol
506 * tree, otherwise we would be throwing valid data away.
507 */
508 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
509 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
510 ino_size != 0) {
511 struct btrfs_map_token token;
512
513 btrfs_init_map_token(&token, dst_eb);
514 btrfs_set_token_inode_size(dst_eb, dst_item,
515 ino_size, &token);
516 }
517 goto no_copy;
518 }
519
520 if (overwrite_root &&
521 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
522 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
523 save_old_i_size = 1;
524 saved_i_size = btrfs_inode_size(path->nodes[0],
525 dst_item);
526 }
527 }
528
529 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
530 src_ptr, item_size);
531
532 if (save_old_i_size) {
533 struct btrfs_inode_item *dst_item;
534 dst_item = (struct btrfs_inode_item *)dst_ptr;
535 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
536 }
537
538 /* make sure the generation is filled in */
539 if (key->type == BTRFS_INODE_ITEM_KEY) {
540 struct btrfs_inode_item *dst_item;
541 dst_item = (struct btrfs_inode_item *)dst_ptr;
542 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
543 btrfs_set_inode_generation(path->nodes[0], dst_item,
544 trans->transid);
545 }
546 }
547 no_copy:
548 btrfs_mark_buffer_dirty(path->nodes[0]);
549 btrfs_release_path(path);
550 return 0;
551 }
552
553 /*
554 * simple helper to read an inode off the disk from a given root
555 * This can only be called for subvolume roots and not for the log
556 */
read_one_inode(struct btrfs_root * root,u64 objectid)557 static noinline struct inode *read_one_inode(struct btrfs_root *root,
558 u64 objectid)
559 {
560 struct btrfs_key key;
561 struct inode *inode;
562
563 key.objectid = objectid;
564 key.type = BTRFS_INODE_ITEM_KEY;
565 key.offset = 0;
566 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
567 if (IS_ERR(inode))
568 inode = NULL;
569 return inode;
570 }
571
572 /* replays a single extent in 'eb' at 'slot' with 'key' into the
573 * subvolume 'root'. path is released on entry and should be released
574 * on exit.
575 *
576 * extents in the log tree have not been allocated out of the extent
577 * tree yet. So, this completes the allocation, taking a reference
578 * as required if the extent already exists or creating a new extent
579 * if it isn't in the extent allocation tree yet.
580 *
581 * The extent is inserted into the file, dropping any existing extents
582 * from the file that overlap the new one.
583 */
replay_one_extent(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct extent_buffer * eb,int slot,struct btrfs_key * key)584 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
585 struct btrfs_root *root,
586 struct btrfs_path *path,
587 struct extent_buffer *eb, int slot,
588 struct btrfs_key *key)
589 {
590 struct btrfs_fs_info *fs_info = root->fs_info;
591 int found_type;
592 u64 extent_end;
593 u64 start = key->offset;
594 u64 nbytes = 0;
595 struct btrfs_file_extent_item *item;
596 struct inode *inode = NULL;
597 unsigned long size;
598 int ret = 0;
599
600 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
601 found_type = btrfs_file_extent_type(eb, item);
602
603 if (found_type == BTRFS_FILE_EXTENT_REG ||
604 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
605 nbytes = btrfs_file_extent_num_bytes(eb, item);
606 extent_end = start + nbytes;
607
608 /*
609 * We don't add to the inodes nbytes if we are prealloc or a
610 * hole.
611 */
612 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
613 nbytes = 0;
614 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
615 size = btrfs_file_extent_ram_bytes(eb, item);
616 nbytes = btrfs_file_extent_ram_bytes(eb, item);
617 extent_end = ALIGN(start + size,
618 fs_info->sectorsize);
619 } else {
620 ret = 0;
621 goto out;
622 }
623
624 inode = read_one_inode(root, key->objectid);
625 if (!inode) {
626 ret = -EIO;
627 goto out;
628 }
629
630 /*
631 * first check to see if we already have this extent in the
632 * file. This must be done before the btrfs_drop_extents run
633 * so we don't try to drop this extent.
634 */
635 ret = btrfs_lookup_file_extent(trans, root, path,
636 btrfs_ino(BTRFS_I(inode)), start, 0);
637
638 if (ret == 0 &&
639 (found_type == BTRFS_FILE_EXTENT_REG ||
640 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
641 struct btrfs_file_extent_item cmp1;
642 struct btrfs_file_extent_item cmp2;
643 struct btrfs_file_extent_item *existing;
644 struct extent_buffer *leaf;
645
646 leaf = path->nodes[0];
647 existing = btrfs_item_ptr(leaf, path->slots[0],
648 struct btrfs_file_extent_item);
649
650 read_extent_buffer(eb, &cmp1, (unsigned long)item,
651 sizeof(cmp1));
652 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
653 sizeof(cmp2));
654
655 /*
656 * we already have a pointer to this exact extent,
657 * we don't have to do anything
658 */
659 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
660 btrfs_release_path(path);
661 goto out;
662 }
663 }
664 btrfs_release_path(path);
665
666 /* drop any overlapping extents */
667 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
668 if (ret)
669 goto out;
670
671 if (found_type == BTRFS_FILE_EXTENT_REG ||
672 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
673 u64 offset;
674 unsigned long dest_offset;
675 struct btrfs_key ins;
676
677 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
678 btrfs_fs_incompat(fs_info, NO_HOLES))
679 goto update_inode;
680
681 ret = btrfs_insert_empty_item(trans, root, path, key,
682 sizeof(*item));
683 if (ret)
684 goto out;
685 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
686 path->slots[0]);
687 copy_extent_buffer(path->nodes[0], eb, dest_offset,
688 (unsigned long)item, sizeof(*item));
689
690 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
691 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
692 ins.type = BTRFS_EXTENT_ITEM_KEY;
693 offset = key->offset - btrfs_file_extent_offset(eb, item);
694
695 /*
696 * Manually record dirty extent, as here we did a shallow
697 * file extent item copy and skip normal backref update,
698 * but modifying extent tree all by ourselves.
699 * So need to manually record dirty extent for qgroup,
700 * as the owner of the file extent changed from log tree
701 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
702 */
703 ret = btrfs_qgroup_trace_extent(trans,
704 btrfs_file_extent_disk_bytenr(eb, item),
705 btrfs_file_extent_disk_num_bytes(eb, item),
706 GFP_NOFS);
707 if (ret < 0)
708 goto out;
709
710 if (ins.objectid > 0) {
711 struct btrfs_ref ref = { 0 };
712 u64 csum_start;
713 u64 csum_end;
714 LIST_HEAD(ordered_sums);
715
716 /*
717 * is this extent already allocated in the extent
718 * allocation tree? If so, just add a reference
719 */
720 ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
721 ins.offset);
722 if (ret < 0) {
723 goto out;
724 } else if (ret == 0) {
725 btrfs_init_generic_ref(&ref,
726 BTRFS_ADD_DELAYED_REF,
727 ins.objectid, ins.offset, 0);
728 btrfs_init_data_ref(&ref,
729 root->root_key.objectid,
730 key->objectid, offset);
731 ret = btrfs_inc_extent_ref(trans, &ref);
732 if (ret)
733 goto out;
734 } else {
735 /*
736 * insert the extent pointer in the extent
737 * allocation tree
738 */
739 ret = btrfs_alloc_logged_file_extent(trans,
740 root->root_key.objectid,
741 key->objectid, offset, &ins);
742 if (ret)
743 goto out;
744 }
745 btrfs_release_path(path);
746
747 if (btrfs_file_extent_compression(eb, item)) {
748 csum_start = ins.objectid;
749 csum_end = csum_start + ins.offset;
750 } else {
751 csum_start = ins.objectid +
752 btrfs_file_extent_offset(eb, item);
753 csum_end = csum_start +
754 btrfs_file_extent_num_bytes(eb, item);
755 }
756
757 ret = btrfs_lookup_csums_range(root->log_root,
758 csum_start, csum_end - 1,
759 &ordered_sums, 0);
760 if (ret)
761 goto out;
762 /*
763 * Now delete all existing cums in the csum root that
764 * cover our range. We do this because we can have an
765 * extent that is completely referenced by one file
766 * extent item and partially referenced by another
767 * file extent item (like after using the clone or
768 * extent_same ioctls). In this case if we end up doing
769 * the replay of the one that partially references the
770 * extent first, and we do not do the csum deletion
771 * below, we can get 2 csum items in the csum tree that
772 * overlap each other. For example, imagine our log has
773 * the two following file extent items:
774 *
775 * key (257 EXTENT_DATA 409600)
776 * extent data disk byte 12845056 nr 102400
777 * extent data offset 20480 nr 20480 ram 102400
778 *
779 * key (257 EXTENT_DATA 819200)
780 * extent data disk byte 12845056 nr 102400
781 * extent data offset 0 nr 102400 ram 102400
782 *
783 * Where the second one fully references the 100K extent
784 * that starts at disk byte 12845056, and the log tree
785 * has a single csum item that covers the entire range
786 * of the extent:
787 *
788 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
789 *
790 * After the first file extent item is replayed, the
791 * csum tree gets the following csum item:
792 *
793 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
794 *
795 * Which covers the 20K sub-range starting at offset 20K
796 * of our extent. Now when we replay the second file
797 * extent item, if we do not delete existing csum items
798 * that cover any of its blocks, we end up getting two
799 * csum items in our csum tree that overlap each other:
800 *
801 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
802 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
803 *
804 * Which is a problem, because after this anyone trying
805 * to lookup up for the checksum of any block of our
806 * extent starting at an offset of 40K or higher, will
807 * end up looking at the second csum item only, which
808 * does not contain the checksum for any block starting
809 * at offset 40K or higher of our extent.
810 */
811 while (!list_empty(&ordered_sums)) {
812 struct btrfs_ordered_sum *sums;
813 sums = list_entry(ordered_sums.next,
814 struct btrfs_ordered_sum,
815 list);
816 if (!ret)
817 ret = btrfs_del_csums(trans,
818 fs_info->csum_root,
819 sums->bytenr,
820 sums->len);
821 if (!ret)
822 ret = btrfs_csum_file_blocks(trans,
823 fs_info->csum_root, sums);
824 list_del(&sums->list);
825 kfree(sums);
826 }
827 if (ret)
828 goto out;
829 } else {
830 btrfs_release_path(path);
831 }
832 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
833 /* inline extents are easy, we just overwrite them */
834 ret = overwrite_item(trans, root, path, eb, slot, key);
835 if (ret)
836 goto out;
837 }
838
839 inode_add_bytes(inode, nbytes);
840 update_inode:
841 ret = btrfs_update_inode(trans, root, inode);
842 out:
843 if (inode)
844 iput(inode);
845 return ret;
846 }
847
848 /*
849 * when cleaning up conflicts between the directory names in the
850 * subvolume, directory names in the log and directory names in the
851 * inode back references, we may have to unlink inodes from directories.
852 *
853 * This is a helper function to do the unlink of a specific directory
854 * item
855 */
drop_one_dir_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_inode * dir,struct btrfs_dir_item * di)856 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
857 struct btrfs_root *root,
858 struct btrfs_path *path,
859 struct btrfs_inode *dir,
860 struct btrfs_dir_item *di)
861 {
862 struct inode *inode;
863 char *name;
864 int name_len;
865 struct extent_buffer *leaf;
866 struct btrfs_key location;
867 int ret;
868
869 leaf = path->nodes[0];
870
871 btrfs_dir_item_key_to_cpu(leaf, di, &location);
872 name_len = btrfs_dir_name_len(leaf, di);
873 name = kmalloc(name_len, GFP_NOFS);
874 if (!name)
875 return -ENOMEM;
876
877 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
878 btrfs_release_path(path);
879
880 inode = read_one_inode(root, location.objectid);
881 if (!inode) {
882 ret = -EIO;
883 goto out;
884 }
885
886 ret = link_to_fixup_dir(trans, root, path, location.objectid);
887 if (ret)
888 goto out;
889
890 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name,
891 name_len);
892 if (ret)
893 goto out;
894 else
895 ret = btrfs_run_delayed_items(trans);
896 out:
897 kfree(name);
898 iput(inode);
899 return ret;
900 }
901
902 /*
903 * See if a given name and sequence number found in an inode back reference are
904 * already in a directory and correctly point to this inode.
905 *
906 * Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it
907 * exists.
908 */
inode_in_dir(struct btrfs_root * root,struct btrfs_path * path,u64 dirid,u64 objectid,u64 index,const char * name,int name_len)909 static noinline int inode_in_dir(struct btrfs_root *root,
910 struct btrfs_path *path,
911 u64 dirid, u64 objectid, u64 index,
912 const char *name, int name_len)
913 {
914 struct btrfs_dir_item *di;
915 struct btrfs_key location;
916 int ret = 0;
917
918 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
919 index, name, name_len, 0);
920 if (IS_ERR(di)) {
921 ret = PTR_ERR(di);
922 goto out;
923 } else if (di) {
924 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
925 if (location.objectid != objectid)
926 goto out;
927 } else {
928 goto out;
929 }
930
931 btrfs_release_path(path);
932 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
933 if (IS_ERR(di)) {
934 ret = PTR_ERR(di);
935 goto out;
936 } else if (di) {
937 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
938 if (location.objectid == objectid)
939 ret = 1;
940 }
941 out:
942 btrfs_release_path(path);
943 return ret;
944 }
945
946 /*
947 * helper function to check a log tree for a named back reference in
948 * an inode. This is used to decide if a back reference that is
949 * found in the subvolume conflicts with what we find in the log.
950 *
951 * inode backreferences may have multiple refs in a single item,
952 * during replay we process one reference at a time, and we don't
953 * want to delete valid links to a file from the subvolume if that
954 * link is also in the log.
955 */
backref_in_log(struct btrfs_root * log,struct btrfs_key * key,u64 ref_objectid,const char * name,int namelen)956 static noinline int backref_in_log(struct btrfs_root *log,
957 struct btrfs_key *key,
958 u64 ref_objectid,
959 const char *name, int namelen)
960 {
961 struct btrfs_path *path;
962 struct btrfs_inode_ref *ref;
963 unsigned long ptr;
964 unsigned long ptr_end;
965 unsigned long name_ptr;
966 int found_name_len;
967 int item_size;
968 int ret;
969 int match = 0;
970
971 path = btrfs_alloc_path();
972 if (!path)
973 return -ENOMEM;
974
975 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
976 if (ret != 0)
977 goto out;
978
979 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
980
981 if (key->type == BTRFS_INODE_EXTREF_KEY) {
982 if (btrfs_find_name_in_ext_backref(path->nodes[0],
983 path->slots[0],
984 ref_objectid,
985 name, namelen))
986 match = 1;
987
988 goto out;
989 }
990
991 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
992 ptr_end = ptr + item_size;
993 while (ptr < ptr_end) {
994 ref = (struct btrfs_inode_ref *)ptr;
995 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
996 if (found_name_len == namelen) {
997 name_ptr = (unsigned long)(ref + 1);
998 ret = memcmp_extent_buffer(path->nodes[0], name,
999 name_ptr, namelen);
1000 if (ret == 0) {
1001 match = 1;
1002 goto out;
1003 }
1004 }
1005 ptr = (unsigned long)(ref + 1) + found_name_len;
1006 }
1007 out:
1008 btrfs_free_path(path);
1009 return match;
1010 }
1011
__add_inode_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_root * log_root,struct btrfs_inode * dir,struct btrfs_inode * inode,u64 inode_objectid,u64 parent_objectid,u64 ref_index,char * name,int namelen,int * search_done)1012 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
1013 struct btrfs_root *root,
1014 struct btrfs_path *path,
1015 struct btrfs_root *log_root,
1016 struct btrfs_inode *dir,
1017 struct btrfs_inode *inode,
1018 u64 inode_objectid, u64 parent_objectid,
1019 u64 ref_index, char *name, int namelen,
1020 int *search_done)
1021 {
1022 int ret;
1023 char *victim_name;
1024 int victim_name_len;
1025 struct extent_buffer *leaf;
1026 struct btrfs_dir_item *di;
1027 struct btrfs_key search_key;
1028 struct btrfs_inode_extref *extref;
1029
1030 again:
1031 /* Search old style refs */
1032 search_key.objectid = inode_objectid;
1033 search_key.type = BTRFS_INODE_REF_KEY;
1034 search_key.offset = parent_objectid;
1035 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1036 if (ret == 0) {
1037 struct btrfs_inode_ref *victim_ref;
1038 unsigned long ptr;
1039 unsigned long ptr_end;
1040
1041 leaf = path->nodes[0];
1042
1043 /* are we trying to overwrite a back ref for the root directory
1044 * if so, just jump out, we're done
1045 */
1046 if (search_key.objectid == search_key.offset)
1047 return 1;
1048
1049 /* check all the names in this back reference to see
1050 * if they are in the log. if so, we allow them to stay
1051 * otherwise they must be unlinked as a conflict
1052 */
1053 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1054 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1055 while (ptr < ptr_end) {
1056 victim_ref = (struct btrfs_inode_ref *)ptr;
1057 victim_name_len = btrfs_inode_ref_name_len(leaf,
1058 victim_ref);
1059 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1060 if (!victim_name)
1061 return -ENOMEM;
1062
1063 read_extent_buffer(leaf, victim_name,
1064 (unsigned long)(victim_ref + 1),
1065 victim_name_len);
1066
1067 if (!backref_in_log(log_root, &search_key,
1068 parent_objectid,
1069 victim_name,
1070 victim_name_len)) {
1071 inc_nlink(&inode->vfs_inode);
1072 btrfs_release_path(path);
1073
1074 ret = btrfs_unlink_inode(trans, root, dir, inode,
1075 victim_name, victim_name_len);
1076 kfree(victim_name);
1077 if (ret)
1078 return ret;
1079 ret = btrfs_run_delayed_items(trans);
1080 if (ret)
1081 return ret;
1082 *search_done = 1;
1083 goto again;
1084 }
1085 kfree(victim_name);
1086
1087 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1088 }
1089
1090 /*
1091 * NOTE: we have searched root tree and checked the
1092 * corresponding ref, it does not need to check again.
1093 */
1094 *search_done = 1;
1095 }
1096 btrfs_release_path(path);
1097
1098 /* Same search but for extended refs */
1099 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1100 inode_objectid, parent_objectid, 0,
1101 0);
1102 if (IS_ERR(extref)) {
1103 return PTR_ERR(extref);
1104 } else if (extref) {
1105 u32 item_size;
1106 u32 cur_offset = 0;
1107 unsigned long base;
1108 struct inode *victim_parent;
1109
1110 leaf = path->nodes[0];
1111
1112 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1113 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1114
1115 while (cur_offset < item_size) {
1116 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1117
1118 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1119
1120 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1121 goto next;
1122
1123 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1124 if (!victim_name)
1125 return -ENOMEM;
1126 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1127 victim_name_len);
1128
1129 search_key.objectid = inode_objectid;
1130 search_key.type = BTRFS_INODE_EXTREF_KEY;
1131 search_key.offset = btrfs_extref_hash(parent_objectid,
1132 victim_name,
1133 victim_name_len);
1134 ret = 0;
1135 if (!backref_in_log(log_root, &search_key,
1136 parent_objectid, victim_name,
1137 victim_name_len)) {
1138 ret = -ENOENT;
1139 victim_parent = read_one_inode(root,
1140 parent_objectid);
1141 if (victim_parent) {
1142 inc_nlink(&inode->vfs_inode);
1143 btrfs_release_path(path);
1144
1145 ret = btrfs_unlink_inode(trans, root,
1146 BTRFS_I(victim_parent),
1147 inode,
1148 victim_name,
1149 victim_name_len);
1150 if (!ret)
1151 ret = btrfs_run_delayed_items(
1152 trans);
1153 }
1154 iput(victim_parent);
1155 kfree(victim_name);
1156 if (ret)
1157 return ret;
1158 *search_done = 1;
1159 goto again;
1160 }
1161 kfree(victim_name);
1162 next:
1163 cur_offset += victim_name_len + sizeof(*extref);
1164 }
1165 *search_done = 1;
1166 }
1167 btrfs_release_path(path);
1168
1169 /* look for a conflicting sequence number */
1170 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1171 ref_index, name, namelen, 0);
1172 if (IS_ERR(di)) {
1173 return PTR_ERR(di);
1174 } else if (di) {
1175 ret = drop_one_dir_item(trans, root, path, dir, di);
1176 if (ret)
1177 return ret;
1178 }
1179 btrfs_release_path(path);
1180
1181 /* look for a conflicting name */
1182 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1183 name, namelen, 0);
1184 if (IS_ERR(di)) {
1185 return PTR_ERR(di);
1186 } else if (di) {
1187 ret = drop_one_dir_item(trans, root, path, dir, di);
1188 if (ret)
1189 return ret;
1190 }
1191 btrfs_release_path(path);
1192
1193 return 0;
1194 }
1195
extref_get_fields(struct extent_buffer * eb,unsigned long ref_ptr,u32 * namelen,char ** name,u64 * index,u64 * parent_objectid)1196 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1197 u32 *namelen, char **name, u64 *index,
1198 u64 *parent_objectid)
1199 {
1200 struct btrfs_inode_extref *extref;
1201
1202 extref = (struct btrfs_inode_extref *)ref_ptr;
1203
1204 *namelen = btrfs_inode_extref_name_len(eb, extref);
1205 *name = kmalloc(*namelen, GFP_NOFS);
1206 if (*name == NULL)
1207 return -ENOMEM;
1208
1209 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1210 *namelen);
1211
1212 if (index)
1213 *index = btrfs_inode_extref_index(eb, extref);
1214 if (parent_objectid)
1215 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1216
1217 return 0;
1218 }
1219
ref_get_fields(struct extent_buffer * eb,unsigned long ref_ptr,u32 * namelen,char ** name,u64 * index)1220 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1221 u32 *namelen, char **name, u64 *index)
1222 {
1223 struct btrfs_inode_ref *ref;
1224
1225 ref = (struct btrfs_inode_ref *)ref_ptr;
1226
1227 *namelen = btrfs_inode_ref_name_len(eb, ref);
1228 *name = kmalloc(*namelen, GFP_NOFS);
1229 if (*name == NULL)
1230 return -ENOMEM;
1231
1232 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1233
1234 if (index)
1235 *index = btrfs_inode_ref_index(eb, ref);
1236
1237 return 0;
1238 }
1239
1240 /*
1241 * Take an inode reference item from the log tree and iterate all names from the
1242 * inode reference item in the subvolume tree with the same key (if it exists).
1243 * For any name that is not in the inode reference item from the log tree, do a
1244 * proper unlink of that name (that is, remove its entry from the inode
1245 * reference item and both dir index keys).
1246 */
unlink_old_inode_refs(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_inode * inode,struct extent_buffer * log_eb,int log_slot,struct btrfs_key * key)1247 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
1248 struct btrfs_root *root,
1249 struct btrfs_path *path,
1250 struct btrfs_inode *inode,
1251 struct extent_buffer *log_eb,
1252 int log_slot,
1253 struct btrfs_key *key)
1254 {
1255 int ret;
1256 unsigned long ref_ptr;
1257 unsigned long ref_end;
1258 struct extent_buffer *eb;
1259
1260 again:
1261 btrfs_release_path(path);
1262 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1263 if (ret > 0) {
1264 ret = 0;
1265 goto out;
1266 }
1267 if (ret < 0)
1268 goto out;
1269
1270 eb = path->nodes[0];
1271 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
1272 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
1273 while (ref_ptr < ref_end) {
1274 char *name = NULL;
1275 int namelen;
1276 u64 parent_id;
1277
1278 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1279 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1280 NULL, &parent_id);
1281 } else {
1282 parent_id = key->offset;
1283 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1284 NULL);
1285 }
1286 if (ret)
1287 goto out;
1288
1289 if (key->type == BTRFS_INODE_EXTREF_KEY)
1290 ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot,
1291 parent_id, name,
1292 namelen);
1293 else
1294 ret = !!btrfs_find_name_in_backref(log_eb, log_slot,
1295 name, namelen);
1296
1297 if (!ret) {
1298 struct inode *dir;
1299
1300 btrfs_release_path(path);
1301 dir = read_one_inode(root, parent_id);
1302 if (!dir) {
1303 ret = -ENOENT;
1304 kfree(name);
1305 goto out;
1306 }
1307 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
1308 inode, name, namelen);
1309 kfree(name);
1310 iput(dir);
1311 /*
1312 * Whenever we need to check if a name exists or not, we
1313 * check the subvolume tree. So after an unlink we must
1314 * run delayed items, so that future checks for a name
1315 * during log replay see that the name does not exists
1316 * anymore.
1317 */
1318 if (!ret)
1319 ret = btrfs_run_delayed_items(trans);
1320 if (ret)
1321 goto out;
1322 goto again;
1323 }
1324
1325 kfree(name);
1326 ref_ptr += namelen;
1327 if (key->type == BTRFS_INODE_EXTREF_KEY)
1328 ref_ptr += sizeof(struct btrfs_inode_extref);
1329 else
1330 ref_ptr += sizeof(struct btrfs_inode_ref);
1331 }
1332 ret = 0;
1333 out:
1334 btrfs_release_path(path);
1335 return ret;
1336 }
1337
btrfs_inode_ref_exists(struct inode * inode,struct inode * dir,const u8 ref_type,const char * name,const int namelen)1338 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
1339 const u8 ref_type, const char *name,
1340 const int namelen)
1341 {
1342 struct btrfs_key key;
1343 struct btrfs_path *path;
1344 const u64 parent_id = btrfs_ino(BTRFS_I(dir));
1345 int ret;
1346
1347 path = btrfs_alloc_path();
1348 if (!path)
1349 return -ENOMEM;
1350
1351 key.objectid = btrfs_ino(BTRFS_I(inode));
1352 key.type = ref_type;
1353 if (key.type == BTRFS_INODE_REF_KEY)
1354 key.offset = parent_id;
1355 else
1356 key.offset = btrfs_extref_hash(parent_id, name, namelen);
1357
1358 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
1359 if (ret < 0)
1360 goto out;
1361 if (ret > 0) {
1362 ret = 0;
1363 goto out;
1364 }
1365 if (key.type == BTRFS_INODE_EXTREF_KEY)
1366 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
1367 path->slots[0], parent_id, name, namelen);
1368 else
1369 ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
1370 name, namelen);
1371
1372 out:
1373 btrfs_free_path(path);
1374 return ret;
1375 }
1376
add_link(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * dir,struct inode * inode,const char * name,int namelen,u64 ref_index)1377 static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1378 struct inode *dir, struct inode *inode, const char *name,
1379 int namelen, u64 ref_index)
1380 {
1381 struct btrfs_dir_item *dir_item;
1382 struct btrfs_key key;
1383 struct btrfs_path *path;
1384 struct inode *other_inode = NULL;
1385 int ret;
1386
1387 path = btrfs_alloc_path();
1388 if (!path)
1389 return -ENOMEM;
1390
1391 dir_item = btrfs_lookup_dir_item(NULL, root, path,
1392 btrfs_ino(BTRFS_I(dir)),
1393 name, namelen, 0);
1394 if (!dir_item) {
1395 btrfs_release_path(path);
1396 goto add_link;
1397 } else if (IS_ERR(dir_item)) {
1398 ret = PTR_ERR(dir_item);
1399 goto out;
1400 }
1401
1402 /*
1403 * Our inode's dentry collides with the dentry of another inode which is
1404 * in the log but not yet processed since it has a higher inode number.
1405 * So delete that other dentry.
1406 */
1407 btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key);
1408 btrfs_release_path(path);
1409 other_inode = read_one_inode(root, key.objectid);
1410 if (!other_inode) {
1411 ret = -ENOENT;
1412 goto out;
1413 }
1414 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode),
1415 name, namelen);
1416 if (ret)
1417 goto out;
1418 /*
1419 * If we dropped the link count to 0, bump it so that later the iput()
1420 * on the inode will not free it. We will fixup the link count later.
1421 */
1422 if (other_inode->i_nlink == 0)
1423 inc_nlink(other_inode);
1424
1425 ret = btrfs_run_delayed_items(trans);
1426 if (ret)
1427 goto out;
1428 add_link:
1429 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
1430 name, namelen, 0, ref_index);
1431 out:
1432 iput(other_inode);
1433 btrfs_free_path(path);
1434
1435 return ret;
1436 }
1437
1438 /*
1439 * replay one inode back reference item found in the log tree.
1440 * eb, slot and key refer to the buffer and key found in the log tree.
1441 * root is the destination we are replaying into, and path is for temp
1442 * use by this function. (it should be released on return).
1443 */
add_inode_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_root * log,struct btrfs_path * path,struct extent_buffer * eb,int slot,struct btrfs_key * key)1444 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1445 struct btrfs_root *root,
1446 struct btrfs_root *log,
1447 struct btrfs_path *path,
1448 struct extent_buffer *eb, int slot,
1449 struct btrfs_key *key)
1450 {
1451 struct inode *dir = NULL;
1452 struct inode *inode = NULL;
1453 unsigned long ref_ptr;
1454 unsigned long ref_end;
1455 char *name = NULL;
1456 int namelen;
1457 int ret;
1458 int search_done = 0;
1459 int log_ref_ver = 0;
1460 u64 parent_objectid;
1461 u64 inode_objectid;
1462 u64 ref_index = 0;
1463 int ref_struct_size;
1464
1465 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1466 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1467
1468 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1469 struct btrfs_inode_extref *r;
1470
1471 ref_struct_size = sizeof(struct btrfs_inode_extref);
1472 log_ref_ver = 1;
1473 r = (struct btrfs_inode_extref *)ref_ptr;
1474 parent_objectid = btrfs_inode_extref_parent(eb, r);
1475 } else {
1476 ref_struct_size = sizeof(struct btrfs_inode_ref);
1477 parent_objectid = key->offset;
1478 }
1479 inode_objectid = key->objectid;
1480
1481 /*
1482 * it is possible that we didn't log all the parent directories
1483 * for a given inode. If we don't find the dir, just don't
1484 * copy the back ref in. The link count fixup code will take
1485 * care of the rest
1486 */
1487 dir = read_one_inode(root, parent_objectid);
1488 if (!dir) {
1489 ret = -ENOENT;
1490 goto out;
1491 }
1492
1493 inode = read_one_inode(root, inode_objectid);
1494 if (!inode) {
1495 ret = -EIO;
1496 goto out;
1497 }
1498
1499 while (ref_ptr < ref_end) {
1500 if (log_ref_ver) {
1501 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1502 &ref_index, &parent_objectid);
1503 /*
1504 * parent object can change from one array
1505 * item to another.
1506 */
1507 if (!dir)
1508 dir = read_one_inode(root, parent_objectid);
1509 if (!dir) {
1510 ret = -ENOENT;
1511 goto out;
1512 }
1513 } else {
1514 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1515 &ref_index);
1516 }
1517 if (ret)
1518 goto out;
1519
1520 ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1521 btrfs_ino(BTRFS_I(inode)), ref_index,
1522 name, namelen);
1523 if (ret < 0) {
1524 goto out;
1525 } else if (ret == 0) {
1526 /*
1527 * look for a conflicting back reference in the
1528 * metadata. if we find one we have to unlink that name
1529 * of the file before we add our new link. Later on, we
1530 * overwrite any existing back reference, and we don't
1531 * want to create dangling pointers in the directory.
1532 */
1533
1534 if (!search_done) {
1535 ret = __add_inode_ref(trans, root, path, log,
1536 BTRFS_I(dir),
1537 BTRFS_I(inode),
1538 inode_objectid,
1539 parent_objectid,
1540 ref_index, name, namelen,
1541 &search_done);
1542 if (ret) {
1543 if (ret == 1)
1544 ret = 0;
1545 goto out;
1546 }
1547 }
1548
1549 /*
1550 * If a reference item already exists for this inode
1551 * with the same parent and name, but different index,
1552 * drop it and the corresponding directory index entries
1553 * from the parent before adding the new reference item
1554 * and dir index entries, otherwise we would fail with
1555 * -EEXIST returned from btrfs_add_link() below.
1556 */
1557 ret = btrfs_inode_ref_exists(inode, dir, key->type,
1558 name, namelen);
1559 if (ret > 0) {
1560 ret = btrfs_unlink_inode(trans, root,
1561 BTRFS_I(dir),
1562 BTRFS_I(inode),
1563 name, namelen);
1564 /*
1565 * If we dropped the link count to 0, bump it so
1566 * that later the iput() on the inode will not
1567 * free it. We will fixup the link count later.
1568 */
1569 if (!ret && inode->i_nlink == 0)
1570 inc_nlink(inode);
1571 /*
1572 * Whenever we need to check if a name exists or
1573 * not, we check the subvolume tree. So after an
1574 * unlink we must run delayed items, so that future
1575 * checks for a name during log replay see that the
1576 * name does not exists anymore.
1577 */
1578 if (!ret)
1579 ret = btrfs_run_delayed_items(trans);
1580 }
1581 if (ret < 0)
1582 goto out;
1583
1584 /* insert our name */
1585 ret = add_link(trans, root, dir, inode, name, namelen,
1586 ref_index);
1587 if (ret)
1588 goto out;
1589
1590 btrfs_update_inode(trans, root, inode);
1591 }
1592 /* Else, ret == 1, we already have a perfect match, we're done. */
1593
1594 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1595 kfree(name);
1596 name = NULL;
1597 if (log_ref_ver) {
1598 iput(dir);
1599 dir = NULL;
1600 }
1601 }
1602
1603 /*
1604 * Before we overwrite the inode reference item in the subvolume tree
1605 * with the item from the log tree, we must unlink all names from the
1606 * parent directory that are in the subvolume's tree inode reference
1607 * item, otherwise we end up with an inconsistent subvolume tree where
1608 * dir index entries exist for a name but there is no inode reference
1609 * item with the same name.
1610 */
1611 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
1612 key);
1613 if (ret)
1614 goto out;
1615
1616 /* finally write the back reference in the inode */
1617 ret = overwrite_item(trans, root, path, eb, slot, key);
1618 out:
1619 btrfs_release_path(path);
1620 kfree(name);
1621 iput(dir);
1622 iput(inode);
1623 return ret;
1624 }
1625
insert_orphan_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 ino)1626 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1627 struct btrfs_root *root, u64 ino)
1628 {
1629 int ret;
1630
1631 ret = btrfs_insert_orphan_item(trans, root, ino);
1632 if (ret == -EEXIST)
1633 ret = 0;
1634
1635 return ret;
1636 }
1637
count_inode_extrefs(struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path)1638 static int count_inode_extrefs(struct btrfs_root *root,
1639 struct btrfs_inode *inode, struct btrfs_path *path)
1640 {
1641 int ret = 0;
1642 int name_len;
1643 unsigned int nlink = 0;
1644 u32 item_size;
1645 u32 cur_offset = 0;
1646 u64 inode_objectid = btrfs_ino(inode);
1647 u64 offset = 0;
1648 unsigned long ptr;
1649 struct btrfs_inode_extref *extref;
1650 struct extent_buffer *leaf;
1651
1652 while (1) {
1653 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1654 &extref, &offset);
1655 if (ret)
1656 break;
1657
1658 leaf = path->nodes[0];
1659 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1660 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1661 cur_offset = 0;
1662
1663 while (cur_offset < item_size) {
1664 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1665 name_len = btrfs_inode_extref_name_len(leaf, extref);
1666
1667 nlink++;
1668
1669 cur_offset += name_len + sizeof(*extref);
1670 }
1671
1672 offset++;
1673 btrfs_release_path(path);
1674 }
1675 btrfs_release_path(path);
1676
1677 if (ret < 0 && ret != -ENOENT)
1678 return ret;
1679 return nlink;
1680 }
1681
count_inode_refs(struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path)1682 static int count_inode_refs(struct btrfs_root *root,
1683 struct btrfs_inode *inode, struct btrfs_path *path)
1684 {
1685 int ret;
1686 struct btrfs_key key;
1687 unsigned int nlink = 0;
1688 unsigned long ptr;
1689 unsigned long ptr_end;
1690 int name_len;
1691 u64 ino = btrfs_ino(inode);
1692
1693 key.objectid = ino;
1694 key.type = BTRFS_INODE_REF_KEY;
1695 key.offset = (u64)-1;
1696
1697 while (1) {
1698 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1699 if (ret < 0)
1700 break;
1701 if (ret > 0) {
1702 if (path->slots[0] == 0)
1703 break;
1704 path->slots[0]--;
1705 }
1706 process_slot:
1707 btrfs_item_key_to_cpu(path->nodes[0], &key,
1708 path->slots[0]);
1709 if (key.objectid != ino ||
1710 key.type != BTRFS_INODE_REF_KEY)
1711 break;
1712 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1713 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1714 path->slots[0]);
1715 while (ptr < ptr_end) {
1716 struct btrfs_inode_ref *ref;
1717
1718 ref = (struct btrfs_inode_ref *)ptr;
1719 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1720 ref);
1721 ptr = (unsigned long)(ref + 1) + name_len;
1722 nlink++;
1723 }
1724
1725 if (key.offset == 0)
1726 break;
1727 if (path->slots[0] > 0) {
1728 path->slots[0]--;
1729 goto process_slot;
1730 }
1731 key.offset--;
1732 btrfs_release_path(path);
1733 }
1734 btrfs_release_path(path);
1735
1736 return nlink;
1737 }
1738
1739 /*
1740 * There are a few corners where the link count of the file can't
1741 * be properly maintained during replay. So, instead of adding
1742 * lots of complexity to the log code, we just scan the backrefs
1743 * for any file that has been through replay.
1744 *
1745 * The scan will update the link count on the inode to reflect the
1746 * number of back refs found. If it goes down to zero, the iput
1747 * will free the inode.
1748 */
fixup_inode_link_count(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * inode)1749 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1750 struct btrfs_root *root,
1751 struct inode *inode)
1752 {
1753 struct btrfs_path *path;
1754 int ret;
1755 u64 nlink = 0;
1756 u64 ino = btrfs_ino(BTRFS_I(inode));
1757
1758 path = btrfs_alloc_path();
1759 if (!path)
1760 return -ENOMEM;
1761
1762 ret = count_inode_refs(root, BTRFS_I(inode), path);
1763 if (ret < 0)
1764 goto out;
1765
1766 nlink = ret;
1767
1768 ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1769 if (ret < 0)
1770 goto out;
1771
1772 nlink += ret;
1773
1774 ret = 0;
1775
1776 if (nlink != inode->i_nlink) {
1777 set_nlink(inode, nlink);
1778 btrfs_update_inode(trans, root, inode);
1779 }
1780 BTRFS_I(inode)->index_cnt = (u64)-1;
1781
1782 if (inode->i_nlink == 0) {
1783 if (S_ISDIR(inode->i_mode)) {
1784 ret = replay_dir_deletes(trans, root, NULL, path,
1785 ino, 1);
1786 if (ret)
1787 goto out;
1788 }
1789 ret = insert_orphan_item(trans, root, ino);
1790 }
1791
1792 out:
1793 btrfs_free_path(path);
1794 return ret;
1795 }
1796
fixup_inode_link_counts(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path)1797 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1798 struct btrfs_root *root,
1799 struct btrfs_path *path)
1800 {
1801 int ret;
1802 struct btrfs_key key;
1803 struct inode *inode;
1804
1805 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1806 key.type = BTRFS_ORPHAN_ITEM_KEY;
1807 key.offset = (u64)-1;
1808 while (1) {
1809 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1810 if (ret < 0)
1811 break;
1812
1813 if (ret == 1) {
1814 ret = 0;
1815 if (path->slots[0] == 0)
1816 break;
1817 path->slots[0]--;
1818 }
1819
1820 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1821 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1822 key.type != BTRFS_ORPHAN_ITEM_KEY)
1823 break;
1824
1825 ret = btrfs_del_item(trans, root, path);
1826 if (ret)
1827 break;
1828
1829 btrfs_release_path(path);
1830 inode = read_one_inode(root, key.offset);
1831 if (!inode) {
1832 ret = -EIO;
1833 break;
1834 }
1835
1836 ret = fixup_inode_link_count(trans, root, inode);
1837 iput(inode);
1838 if (ret)
1839 break;
1840
1841 /*
1842 * fixup on a directory may create new entries,
1843 * make sure we always look for the highset possible
1844 * offset
1845 */
1846 key.offset = (u64)-1;
1847 }
1848 btrfs_release_path(path);
1849 return ret;
1850 }
1851
1852
1853 /*
1854 * record a given inode in the fixup dir so we can check its link
1855 * count when replay is done. The link count is incremented here
1856 * so the inode won't go away until we check it
1857 */
link_to_fixup_dir(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,u64 objectid)1858 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1859 struct btrfs_root *root,
1860 struct btrfs_path *path,
1861 u64 objectid)
1862 {
1863 struct btrfs_key key;
1864 int ret = 0;
1865 struct inode *inode;
1866
1867 inode = read_one_inode(root, objectid);
1868 if (!inode)
1869 return -EIO;
1870
1871 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1872 key.type = BTRFS_ORPHAN_ITEM_KEY;
1873 key.offset = objectid;
1874
1875 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1876
1877 btrfs_release_path(path);
1878 if (ret == 0) {
1879 if (!inode->i_nlink)
1880 set_nlink(inode, 1);
1881 else
1882 inc_nlink(inode);
1883 ret = btrfs_update_inode(trans, root, inode);
1884 } else if (ret == -EEXIST) {
1885 ret = 0;
1886 }
1887 iput(inode);
1888
1889 return ret;
1890 }
1891
1892 /*
1893 * when replaying the log for a directory, we only insert names
1894 * for inodes that actually exist. This means an fsync on a directory
1895 * does not implicitly fsync all the new files in it
1896 */
insert_one_name(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 dirid,u64 index,char * name,int name_len,struct btrfs_key * location)1897 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1898 struct btrfs_root *root,
1899 u64 dirid, u64 index,
1900 char *name, int name_len,
1901 struct btrfs_key *location)
1902 {
1903 struct inode *inode;
1904 struct inode *dir;
1905 int ret;
1906
1907 inode = read_one_inode(root, location->objectid);
1908 if (!inode)
1909 return -ENOENT;
1910
1911 dir = read_one_inode(root, dirid);
1912 if (!dir) {
1913 iput(inode);
1914 return -EIO;
1915 }
1916
1917 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1918 name_len, 1, index);
1919
1920 /* FIXME, put inode into FIXUP list */
1921
1922 iput(inode);
1923 iput(dir);
1924 return ret;
1925 }
1926
1927 /*
1928 * Return true if an inode reference exists in the log for the given name,
1929 * inode and parent inode.
1930 */
name_in_log_ref(struct btrfs_root * log_root,const char * name,const int name_len,const u64 dirid,const u64 ino)1931 static bool name_in_log_ref(struct btrfs_root *log_root,
1932 const char *name, const int name_len,
1933 const u64 dirid, const u64 ino)
1934 {
1935 struct btrfs_key search_key;
1936
1937 search_key.objectid = ino;
1938 search_key.type = BTRFS_INODE_REF_KEY;
1939 search_key.offset = dirid;
1940 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1941 return true;
1942
1943 search_key.type = BTRFS_INODE_EXTREF_KEY;
1944 search_key.offset = btrfs_extref_hash(dirid, name, name_len);
1945 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1946 return true;
1947
1948 return false;
1949 }
1950
1951 /*
1952 * take a single entry in a log directory item and replay it into
1953 * the subvolume.
1954 *
1955 * if a conflicting item exists in the subdirectory already,
1956 * the inode it points to is unlinked and put into the link count
1957 * fix up tree.
1958 *
1959 * If a name from the log points to a file or directory that does
1960 * not exist in the FS, it is skipped. fsyncs on directories
1961 * do not force down inodes inside that directory, just changes to the
1962 * names or unlinks in a directory.
1963 *
1964 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1965 * non-existing inode) and 1 if the name was replayed.
1966 */
replay_one_name(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct extent_buffer * eb,struct btrfs_dir_item * di,struct btrfs_key * key)1967 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1968 struct btrfs_root *root,
1969 struct btrfs_path *path,
1970 struct extent_buffer *eb,
1971 struct btrfs_dir_item *di,
1972 struct btrfs_key *key)
1973 {
1974 char *name;
1975 int name_len;
1976 struct btrfs_dir_item *dst_di;
1977 struct btrfs_key found_key;
1978 struct btrfs_key log_key;
1979 struct inode *dir;
1980 u8 log_type;
1981 bool exists;
1982 int ret;
1983 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1984 bool name_added = false;
1985
1986 dir = read_one_inode(root, key->objectid);
1987 if (!dir)
1988 return -EIO;
1989
1990 name_len = btrfs_dir_name_len(eb, di);
1991 name = kmalloc(name_len, GFP_NOFS);
1992 if (!name) {
1993 ret = -ENOMEM;
1994 goto out;
1995 }
1996
1997 log_type = btrfs_dir_type(eb, di);
1998 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1999 name_len);
2000
2001 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
2002 ret = btrfs_lookup_inode(trans, root, path, &log_key, 0);
2003 btrfs_release_path(path);
2004 if (ret < 0)
2005 goto out;
2006 exists = (ret == 0);
2007 ret = 0;
2008
2009 if (key->type == BTRFS_DIR_ITEM_KEY) {
2010 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
2011 name, name_len, 1);
2012 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
2013 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
2014 key->objectid,
2015 key->offset, name,
2016 name_len, 1);
2017 } else {
2018 /* Corruption */
2019 ret = -EINVAL;
2020 goto out;
2021 }
2022
2023 if (IS_ERR(dst_di)) {
2024 ret = PTR_ERR(dst_di);
2025 goto out;
2026 } else if (!dst_di) {
2027 /* we need a sequence number to insert, so we only
2028 * do inserts for the BTRFS_DIR_INDEX_KEY types
2029 */
2030 if (key->type != BTRFS_DIR_INDEX_KEY)
2031 goto out;
2032 goto insert;
2033 }
2034
2035 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
2036 /* the existing item matches the logged item */
2037 if (found_key.objectid == log_key.objectid &&
2038 found_key.type == log_key.type &&
2039 found_key.offset == log_key.offset &&
2040 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
2041 update_size = false;
2042 goto out;
2043 }
2044
2045 /*
2046 * don't drop the conflicting directory entry if the inode
2047 * for the new entry doesn't exist
2048 */
2049 if (!exists)
2050 goto out;
2051
2052 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di);
2053 if (ret)
2054 goto out;
2055
2056 if (key->type == BTRFS_DIR_INDEX_KEY)
2057 goto insert;
2058 out:
2059 btrfs_release_path(path);
2060 if (!ret && update_size) {
2061 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
2062 ret = btrfs_update_inode(trans, root, dir);
2063 }
2064 kfree(name);
2065 iput(dir);
2066 if (!ret && name_added)
2067 ret = 1;
2068 return ret;
2069
2070 insert:
2071 if (name_in_log_ref(root->log_root, name, name_len,
2072 key->objectid, log_key.objectid)) {
2073 /* The dentry will be added later. */
2074 ret = 0;
2075 update_size = false;
2076 goto out;
2077 }
2078 btrfs_release_path(path);
2079 ret = insert_one_name(trans, root, key->objectid, key->offset,
2080 name, name_len, &log_key);
2081 if (ret && ret != -ENOENT && ret != -EEXIST)
2082 goto out;
2083 if (!ret)
2084 name_added = true;
2085 update_size = false;
2086 ret = 0;
2087 goto out;
2088 }
2089
2090 /*
2091 * find all the names in a directory item and reconcile them into
2092 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
2093 * one name in a directory item, but the same code gets used for
2094 * both directory index types
2095 */
replay_one_dir_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct extent_buffer * eb,int slot,struct btrfs_key * key)2096 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
2097 struct btrfs_root *root,
2098 struct btrfs_path *path,
2099 struct extent_buffer *eb, int slot,
2100 struct btrfs_key *key)
2101 {
2102 int ret = 0;
2103 u32 item_size = btrfs_item_size_nr(eb, slot);
2104 struct btrfs_dir_item *di;
2105 int name_len;
2106 unsigned long ptr;
2107 unsigned long ptr_end;
2108 struct btrfs_path *fixup_path = NULL;
2109
2110 ptr = btrfs_item_ptr_offset(eb, slot);
2111 ptr_end = ptr + item_size;
2112 while (ptr < ptr_end) {
2113 di = (struct btrfs_dir_item *)ptr;
2114 name_len = btrfs_dir_name_len(eb, di);
2115 ret = replay_one_name(trans, root, path, eb, di, key);
2116 if (ret < 0)
2117 break;
2118 ptr = (unsigned long)(di + 1);
2119 ptr += name_len;
2120
2121 /*
2122 * If this entry refers to a non-directory (directories can not
2123 * have a link count > 1) and it was added in the transaction
2124 * that was not committed, make sure we fixup the link count of
2125 * the inode it the entry points to. Otherwise something like
2126 * the following would result in a directory pointing to an
2127 * inode with a wrong link that does not account for this dir
2128 * entry:
2129 *
2130 * mkdir testdir
2131 * touch testdir/foo
2132 * touch testdir/bar
2133 * sync
2134 *
2135 * ln testdir/bar testdir/bar_link
2136 * ln testdir/foo testdir/foo_link
2137 * xfs_io -c "fsync" testdir/bar
2138 *
2139 * <power failure>
2140 *
2141 * mount fs, log replay happens
2142 *
2143 * File foo would remain with a link count of 1 when it has two
2144 * entries pointing to it in the directory testdir. This would
2145 * make it impossible to ever delete the parent directory has
2146 * it would result in stale dentries that can never be deleted.
2147 */
2148 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
2149 struct btrfs_key di_key;
2150
2151 if (!fixup_path) {
2152 fixup_path = btrfs_alloc_path();
2153 if (!fixup_path) {
2154 ret = -ENOMEM;
2155 break;
2156 }
2157 }
2158
2159 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2160 ret = link_to_fixup_dir(trans, root, fixup_path,
2161 di_key.objectid);
2162 if (ret)
2163 break;
2164 }
2165 ret = 0;
2166 }
2167 btrfs_free_path(fixup_path);
2168 return ret;
2169 }
2170
2171 /*
2172 * directory replay has two parts. There are the standard directory
2173 * items in the log copied from the subvolume, and range items
2174 * created in the log while the subvolume was logged.
2175 *
2176 * The range items tell us which parts of the key space the log
2177 * is authoritative for. During replay, if a key in the subvolume
2178 * directory is in a logged range item, but not actually in the log
2179 * that means it was deleted from the directory before the fsync
2180 * and should be removed.
2181 */
find_dir_range(struct btrfs_root * root,struct btrfs_path * path,u64 dirid,int key_type,u64 * start_ret,u64 * end_ret)2182 static noinline int find_dir_range(struct btrfs_root *root,
2183 struct btrfs_path *path,
2184 u64 dirid, int key_type,
2185 u64 *start_ret, u64 *end_ret)
2186 {
2187 struct btrfs_key key;
2188 u64 found_end;
2189 struct btrfs_dir_log_item *item;
2190 int ret;
2191 int nritems;
2192
2193 if (*start_ret == (u64)-1)
2194 return 1;
2195
2196 key.objectid = dirid;
2197 key.type = key_type;
2198 key.offset = *start_ret;
2199
2200 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2201 if (ret < 0)
2202 goto out;
2203 if (ret > 0) {
2204 if (path->slots[0] == 0)
2205 goto out;
2206 path->slots[0]--;
2207 }
2208 if (ret != 0)
2209 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2210
2211 if (key.type != key_type || key.objectid != dirid) {
2212 ret = 1;
2213 goto next;
2214 }
2215 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2216 struct btrfs_dir_log_item);
2217 found_end = btrfs_dir_log_end(path->nodes[0], item);
2218
2219 if (*start_ret >= key.offset && *start_ret <= found_end) {
2220 ret = 0;
2221 *start_ret = key.offset;
2222 *end_ret = found_end;
2223 goto out;
2224 }
2225 ret = 1;
2226 next:
2227 /* check the next slot in the tree to see if it is a valid item */
2228 nritems = btrfs_header_nritems(path->nodes[0]);
2229 path->slots[0]++;
2230 if (path->slots[0] >= nritems) {
2231 ret = btrfs_next_leaf(root, path);
2232 if (ret)
2233 goto out;
2234 }
2235
2236 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2237
2238 if (key.type != key_type || key.objectid != dirid) {
2239 ret = 1;
2240 goto out;
2241 }
2242 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2243 struct btrfs_dir_log_item);
2244 found_end = btrfs_dir_log_end(path->nodes[0], item);
2245 *start_ret = key.offset;
2246 *end_ret = found_end;
2247 ret = 0;
2248 out:
2249 btrfs_release_path(path);
2250 return ret;
2251 }
2252
2253 /*
2254 * this looks for a given directory item in the log. If the directory
2255 * item is not in the log, the item is removed and the inode it points
2256 * to is unlinked
2257 */
check_item_in_log(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_root * log,struct btrfs_path * path,struct btrfs_path * log_path,struct inode * dir,struct btrfs_key * dir_key)2258 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
2259 struct btrfs_root *root,
2260 struct btrfs_root *log,
2261 struct btrfs_path *path,
2262 struct btrfs_path *log_path,
2263 struct inode *dir,
2264 struct btrfs_key *dir_key)
2265 {
2266 int ret;
2267 struct extent_buffer *eb;
2268 int slot;
2269 u32 item_size;
2270 struct btrfs_dir_item *di;
2271 struct btrfs_dir_item *log_di;
2272 int name_len;
2273 unsigned long ptr;
2274 unsigned long ptr_end;
2275 char *name;
2276 struct inode *inode;
2277 struct btrfs_key location;
2278
2279 again:
2280 eb = path->nodes[0];
2281 slot = path->slots[0];
2282 item_size = btrfs_item_size_nr(eb, slot);
2283 ptr = btrfs_item_ptr_offset(eb, slot);
2284 ptr_end = ptr + item_size;
2285 while (ptr < ptr_end) {
2286 di = (struct btrfs_dir_item *)ptr;
2287 name_len = btrfs_dir_name_len(eb, di);
2288 name = kmalloc(name_len, GFP_NOFS);
2289 if (!name) {
2290 ret = -ENOMEM;
2291 goto out;
2292 }
2293 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2294 name_len);
2295 log_di = NULL;
2296 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2297 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2298 dir_key->objectid,
2299 name, name_len, 0);
2300 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2301 log_di = btrfs_lookup_dir_index_item(trans, log,
2302 log_path,
2303 dir_key->objectid,
2304 dir_key->offset,
2305 name, name_len, 0);
2306 }
2307 if (!log_di) {
2308 btrfs_dir_item_key_to_cpu(eb, di, &location);
2309 btrfs_release_path(path);
2310 btrfs_release_path(log_path);
2311 inode = read_one_inode(root, location.objectid);
2312 if (!inode) {
2313 kfree(name);
2314 return -EIO;
2315 }
2316
2317 ret = link_to_fixup_dir(trans, root,
2318 path, location.objectid);
2319 if (ret) {
2320 kfree(name);
2321 iput(inode);
2322 goto out;
2323 }
2324
2325 inc_nlink(inode);
2326 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
2327 BTRFS_I(inode), name, name_len);
2328 if (!ret)
2329 ret = btrfs_run_delayed_items(trans);
2330 kfree(name);
2331 iput(inode);
2332 if (ret)
2333 goto out;
2334
2335 /* there might still be more names under this key
2336 * check and repeat if required
2337 */
2338 ret = btrfs_search_slot(NULL, root, dir_key, path,
2339 0, 0);
2340 if (ret == 0)
2341 goto again;
2342 ret = 0;
2343 goto out;
2344 } else if (IS_ERR(log_di)) {
2345 kfree(name);
2346 return PTR_ERR(log_di);
2347 }
2348 btrfs_release_path(log_path);
2349 kfree(name);
2350
2351 ptr = (unsigned long)(di + 1);
2352 ptr += name_len;
2353 }
2354 ret = 0;
2355 out:
2356 btrfs_release_path(path);
2357 btrfs_release_path(log_path);
2358 return ret;
2359 }
2360
replay_xattr_deletes(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_root * log,struct btrfs_path * path,const u64 ino)2361 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2362 struct btrfs_root *root,
2363 struct btrfs_root *log,
2364 struct btrfs_path *path,
2365 const u64 ino)
2366 {
2367 struct btrfs_key search_key;
2368 struct btrfs_path *log_path;
2369 int i;
2370 int nritems;
2371 int ret;
2372
2373 log_path = btrfs_alloc_path();
2374 if (!log_path)
2375 return -ENOMEM;
2376
2377 search_key.objectid = ino;
2378 search_key.type = BTRFS_XATTR_ITEM_KEY;
2379 search_key.offset = 0;
2380 again:
2381 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2382 if (ret < 0)
2383 goto out;
2384 process_leaf:
2385 nritems = btrfs_header_nritems(path->nodes[0]);
2386 for (i = path->slots[0]; i < nritems; i++) {
2387 struct btrfs_key key;
2388 struct btrfs_dir_item *di;
2389 struct btrfs_dir_item *log_di;
2390 u32 total_size;
2391 u32 cur;
2392
2393 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2394 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2395 ret = 0;
2396 goto out;
2397 }
2398
2399 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2400 total_size = btrfs_item_size_nr(path->nodes[0], i);
2401 cur = 0;
2402 while (cur < total_size) {
2403 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2404 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2405 u32 this_len = sizeof(*di) + name_len + data_len;
2406 char *name;
2407
2408 name = kmalloc(name_len, GFP_NOFS);
2409 if (!name) {
2410 ret = -ENOMEM;
2411 goto out;
2412 }
2413 read_extent_buffer(path->nodes[0], name,
2414 (unsigned long)(di + 1), name_len);
2415
2416 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2417 name, name_len, 0);
2418 btrfs_release_path(log_path);
2419 if (!log_di) {
2420 /* Doesn't exist in log tree, so delete it. */
2421 btrfs_release_path(path);
2422 di = btrfs_lookup_xattr(trans, root, path, ino,
2423 name, name_len, -1);
2424 kfree(name);
2425 if (IS_ERR(di)) {
2426 ret = PTR_ERR(di);
2427 goto out;
2428 }
2429 ASSERT(di);
2430 ret = btrfs_delete_one_dir_name(trans, root,
2431 path, di);
2432 if (ret)
2433 goto out;
2434 btrfs_release_path(path);
2435 search_key = key;
2436 goto again;
2437 }
2438 kfree(name);
2439 if (IS_ERR(log_di)) {
2440 ret = PTR_ERR(log_di);
2441 goto out;
2442 }
2443 cur += this_len;
2444 di = (struct btrfs_dir_item *)((char *)di + this_len);
2445 }
2446 }
2447 ret = btrfs_next_leaf(root, path);
2448 if (ret > 0)
2449 ret = 0;
2450 else if (ret == 0)
2451 goto process_leaf;
2452 out:
2453 btrfs_free_path(log_path);
2454 btrfs_release_path(path);
2455 return ret;
2456 }
2457
2458
2459 /*
2460 * deletion replay happens before we copy any new directory items
2461 * out of the log or out of backreferences from inodes. It
2462 * scans the log to find ranges of keys that log is authoritative for,
2463 * and then scans the directory to find items in those ranges that are
2464 * not present in the log.
2465 *
2466 * Anything we don't find in the log is unlinked and removed from the
2467 * directory.
2468 */
replay_dir_deletes(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_root * log,struct btrfs_path * path,u64 dirid,int del_all)2469 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2470 struct btrfs_root *root,
2471 struct btrfs_root *log,
2472 struct btrfs_path *path,
2473 u64 dirid, int del_all)
2474 {
2475 u64 range_start;
2476 u64 range_end;
2477 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2478 int ret = 0;
2479 struct btrfs_key dir_key;
2480 struct btrfs_key found_key;
2481 struct btrfs_path *log_path;
2482 struct inode *dir;
2483
2484 dir_key.objectid = dirid;
2485 dir_key.type = BTRFS_DIR_ITEM_KEY;
2486 log_path = btrfs_alloc_path();
2487 if (!log_path)
2488 return -ENOMEM;
2489
2490 dir = read_one_inode(root, dirid);
2491 /* it isn't an error if the inode isn't there, that can happen
2492 * because we replay the deletes before we copy in the inode item
2493 * from the log
2494 */
2495 if (!dir) {
2496 btrfs_free_path(log_path);
2497 return 0;
2498 }
2499 again:
2500 range_start = 0;
2501 range_end = 0;
2502 while (1) {
2503 if (del_all)
2504 range_end = (u64)-1;
2505 else {
2506 ret = find_dir_range(log, path, dirid, key_type,
2507 &range_start, &range_end);
2508 if (ret < 0)
2509 goto out;
2510 else if (ret > 0)
2511 break;
2512 }
2513
2514 dir_key.offset = range_start;
2515 while (1) {
2516 int nritems;
2517 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2518 0, 0);
2519 if (ret < 0)
2520 goto out;
2521
2522 nritems = btrfs_header_nritems(path->nodes[0]);
2523 if (path->slots[0] >= nritems) {
2524 ret = btrfs_next_leaf(root, path);
2525 if (ret == 1)
2526 break;
2527 else if (ret < 0)
2528 goto out;
2529 }
2530 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2531 path->slots[0]);
2532 if (found_key.objectid != dirid ||
2533 found_key.type != dir_key.type)
2534 goto next_type;
2535
2536 if (found_key.offset > range_end)
2537 break;
2538
2539 ret = check_item_in_log(trans, root, log, path,
2540 log_path, dir,
2541 &found_key);
2542 if (ret)
2543 goto out;
2544 if (found_key.offset == (u64)-1)
2545 break;
2546 dir_key.offset = found_key.offset + 1;
2547 }
2548 btrfs_release_path(path);
2549 if (range_end == (u64)-1)
2550 break;
2551 range_start = range_end + 1;
2552 }
2553
2554 next_type:
2555 ret = 0;
2556 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2557 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2558 dir_key.type = BTRFS_DIR_INDEX_KEY;
2559 btrfs_release_path(path);
2560 goto again;
2561 }
2562 out:
2563 btrfs_release_path(path);
2564 btrfs_free_path(log_path);
2565 iput(dir);
2566 return ret;
2567 }
2568
2569 /*
2570 * the process_func used to replay items from the log tree. This
2571 * gets called in two different stages. The first stage just looks
2572 * for inodes and makes sure they are all copied into the subvolume.
2573 *
2574 * The second stage copies all the other item types from the log into
2575 * the subvolume. The two stage approach is slower, but gets rid of
2576 * lots of complexity around inodes referencing other inodes that exist
2577 * only in the log (references come from either directory items or inode
2578 * back refs).
2579 */
replay_one_buffer(struct btrfs_root * log,struct extent_buffer * eb,struct walk_control * wc,u64 gen,int level)2580 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2581 struct walk_control *wc, u64 gen, int level)
2582 {
2583 int nritems;
2584 struct btrfs_path *path;
2585 struct btrfs_root *root = wc->replay_dest;
2586 struct btrfs_key key;
2587 int i;
2588 int ret;
2589
2590 ret = btrfs_read_buffer(eb, gen, level, NULL);
2591 if (ret)
2592 return ret;
2593
2594 level = btrfs_header_level(eb);
2595
2596 if (level != 0)
2597 return 0;
2598
2599 path = btrfs_alloc_path();
2600 if (!path)
2601 return -ENOMEM;
2602
2603 nritems = btrfs_header_nritems(eb);
2604 for (i = 0; i < nritems; i++) {
2605 btrfs_item_key_to_cpu(eb, &key, i);
2606
2607 /* inode keys are done during the first stage */
2608 if (key.type == BTRFS_INODE_ITEM_KEY &&
2609 wc->stage == LOG_WALK_REPLAY_INODES) {
2610 struct btrfs_inode_item *inode_item;
2611 u32 mode;
2612
2613 inode_item = btrfs_item_ptr(eb, i,
2614 struct btrfs_inode_item);
2615 /*
2616 * If we have a tmpfile (O_TMPFILE) that got fsync'ed
2617 * and never got linked before the fsync, skip it, as
2618 * replaying it is pointless since it would be deleted
2619 * later. We skip logging tmpfiles, but it's always
2620 * possible we are replaying a log created with a kernel
2621 * that used to log tmpfiles.
2622 */
2623 if (btrfs_inode_nlink(eb, inode_item) == 0) {
2624 wc->ignore_cur_inode = true;
2625 continue;
2626 } else {
2627 wc->ignore_cur_inode = false;
2628 }
2629 ret = replay_xattr_deletes(wc->trans, root, log,
2630 path, key.objectid);
2631 if (ret)
2632 break;
2633 mode = btrfs_inode_mode(eb, inode_item);
2634 if (S_ISDIR(mode)) {
2635 ret = replay_dir_deletes(wc->trans,
2636 root, log, path, key.objectid, 0);
2637 if (ret)
2638 break;
2639 }
2640 ret = overwrite_item(wc->trans, root, path,
2641 eb, i, &key);
2642 if (ret)
2643 break;
2644
2645 /*
2646 * Before replaying extents, truncate the inode to its
2647 * size. We need to do it now and not after log replay
2648 * because before an fsync we can have prealloc extents
2649 * added beyond the inode's i_size. If we did it after,
2650 * through orphan cleanup for example, we would drop
2651 * those prealloc extents just after replaying them.
2652 */
2653 if (S_ISREG(mode)) {
2654 struct inode *inode;
2655 u64 from;
2656
2657 inode = read_one_inode(root, key.objectid);
2658 if (!inode) {
2659 ret = -EIO;
2660 break;
2661 }
2662 from = ALIGN(i_size_read(inode),
2663 root->fs_info->sectorsize);
2664 ret = btrfs_drop_extents(wc->trans, root, inode,
2665 from, (u64)-1, 1);
2666 if (!ret) {
2667 /* Update the inode's nbytes. */
2668 ret = btrfs_update_inode(wc->trans,
2669 root, inode);
2670 }
2671 iput(inode);
2672 if (ret)
2673 break;
2674 }
2675
2676 ret = link_to_fixup_dir(wc->trans, root,
2677 path, key.objectid);
2678 if (ret)
2679 break;
2680 }
2681
2682 if (wc->ignore_cur_inode)
2683 continue;
2684
2685 if (key.type == BTRFS_DIR_INDEX_KEY &&
2686 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2687 ret = replay_one_dir_item(wc->trans, root, path,
2688 eb, i, &key);
2689 if (ret)
2690 break;
2691 }
2692
2693 if (wc->stage < LOG_WALK_REPLAY_ALL)
2694 continue;
2695
2696 /* these keys are simply copied */
2697 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2698 ret = overwrite_item(wc->trans, root, path,
2699 eb, i, &key);
2700 if (ret)
2701 break;
2702 } else if (key.type == BTRFS_INODE_REF_KEY ||
2703 key.type == BTRFS_INODE_EXTREF_KEY) {
2704 ret = add_inode_ref(wc->trans, root, log, path,
2705 eb, i, &key);
2706 if (ret && ret != -ENOENT)
2707 break;
2708 ret = 0;
2709 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2710 ret = replay_one_extent(wc->trans, root, path,
2711 eb, i, &key);
2712 if (ret)
2713 break;
2714 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2715 ret = replay_one_dir_item(wc->trans, root, path,
2716 eb, i, &key);
2717 if (ret)
2718 break;
2719 }
2720 }
2721 btrfs_free_path(path);
2722 return ret;
2723 }
2724
walk_down_log_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int * level,struct walk_control * wc)2725 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2726 struct btrfs_root *root,
2727 struct btrfs_path *path, int *level,
2728 struct walk_control *wc)
2729 {
2730 struct btrfs_fs_info *fs_info = root->fs_info;
2731 u64 root_owner;
2732 u64 bytenr;
2733 u64 ptr_gen;
2734 struct extent_buffer *next;
2735 struct extent_buffer *cur;
2736 struct extent_buffer *parent;
2737 u32 blocksize;
2738 int ret = 0;
2739
2740 WARN_ON(*level < 0);
2741 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2742
2743 while (*level > 0) {
2744 struct btrfs_key first_key;
2745
2746 WARN_ON(*level < 0);
2747 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2748 cur = path->nodes[*level];
2749
2750 WARN_ON(btrfs_header_level(cur) != *level);
2751
2752 if (path->slots[*level] >=
2753 btrfs_header_nritems(cur))
2754 break;
2755
2756 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2757 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2758 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]);
2759 blocksize = fs_info->nodesize;
2760
2761 parent = path->nodes[*level];
2762 root_owner = btrfs_header_owner(parent);
2763
2764 next = btrfs_find_create_tree_block(fs_info, bytenr);
2765 if (IS_ERR(next))
2766 return PTR_ERR(next);
2767
2768 if (*level == 1) {
2769 ret = wc->process_func(root, next, wc, ptr_gen,
2770 *level - 1);
2771 if (ret) {
2772 free_extent_buffer(next);
2773 return ret;
2774 }
2775
2776 path->slots[*level]++;
2777 if (wc->free) {
2778 ret = btrfs_read_buffer(next, ptr_gen,
2779 *level - 1, &first_key);
2780 if (ret) {
2781 free_extent_buffer(next);
2782 return ret;
2783 }
2784
2785 if (trans) {
2786 btrfs_tree_lock(next);
2787 btrfs_set_lock_blocking_write(next);
2788 btrfs_clean_tree_block(next);
2789 btrfs_wait_tree_block_writeback(next);
2790 btrfs_tree_unlock(next);
2791 } else {
2792 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2793 clear_extent_buffer_dirty(next);
2794 }
2795
2796 WARN_ON(root_owner !=
2797 BTRFS_TREE_LOG_OBJECTID);
2798 ret = btrfs_free_and_pin_reserved_extent(
2799 fs_info, bytenr,
2800 blocksize);
2801 if (ret) {
2802 free_extent_buffer(next);
2803 return ret;
2804 }
2805 }
2806 free_extent_buffer(next);
2807 continue;
2808 }
2809 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key);
2810 if (ret) {
2811 free_extent_buffer(next);
2812 return ret;
2813 }
2814
2815 WARN_ON(*level <= 0);
2816 if (path->nodes[*level-1])
2817 free_extent_buffer(path->nodes[*level-1]);
2818 path->nodes[*level-1] = next;
2819 *level = btrfs_header_level(next);
2820 path->slots[*level] = 0;
2821 cond_resched();
2822 }
2823 WARN_ON(*level < 0);
2824 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2825
2826 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2827
2828 cond_resched();
2829 return 0;
2830 }
2831
walk_up_log_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int * level,struct walk_control * wc)2832 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2833 struct btrfs_root *root,
2834 struct btrfs_path *path, int *level,
2835 struct walk_control *wc)
2836 {
2837 struct btrfs_fs_info *fs_info = root->fs_info;
2838 u64 root_owner;
2839 int i;
2840 int slot;
2841 int ret;
2842
2843 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2844 slot = path->slots[i];
2845 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2846 path->slots[i]++;
2847 *level = i;
2848 WARN_ON(*level == 0);
2849 return 0;
2850 } else {
2851 struct extent_buffer *parent;
2852 if (path->nodes[*level] == root->node)
2853 parent = path->nodes[*level];
2854 else
2855 parent = path->nodes[*level + 1];
2856
2857 root_owner = btrfs_header_owner(parent);
2858 ret = wc->process_func(root, path->nodes[*level], wc,
2859 btrfs_header_generation(path->nodes[*level]),
2860 *level);
2861 if (ret)
2862 return ret;
2863
2864 if (wc->free) {
2865 struct extent_buffer *next;
2866
2867 next = path->nodes[*level];
2868
2869 if (trans) {
2870 btrfs_tree_lock(next);
2871 btrfs_set_lock_blocking_write(next);
2872 btrfs_clean_tree_block(next);
2873 btrfs_wait_tree_block_writeback(next);
2874 btrfs_tree_unlock(next);
2875 } else {
2876 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2877 clear_extent_buffer_dirty(next);
2878 }
2879
2880 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2881 ret = btrfs_free_and_pin_reserved_extent(
2882 fs_info,
2883 path->nodes[*level]->start,
2884 path->nodes[*level]->len);
2885 if (ret)
2886 return ret;
2887 }
2888 free_extent_buffer(path->nodes[*level]);
2889 path->nodes[*level] = NULL;
2890 *level = i + 1;
2891 }
2892 }
2893 return 1;
2894 }
2895
2896 /*
2897 * drop the reference count on the tree rooted at 'snap'. This traverses
2898 * the tree freeing any blocks that have a ref count of zero after being
2899 * decremented.
2900 */
walk_log_tree(struct btrfs_trans_handle * trans,struct btrfs_root * log,struct walk_control * wc)2901 static int walk_log_tree(struct btrfs_trans_handle *trans,
2902 struct btrfs_root *log, struct walk_control *wc)
2903 {
2904 struct btrfs_fs_info *fs_info = log->fs_info;
2905 int ret = 0;
2906 int wret;
2907 int level;
2908 struct btrfs_path *path;
2909 int orig_level;
2910
2911 path = btrfs_alloc_path();
2912 if (!path)
2913 return -ENOMEM;
2914
2915 level = btrfs_header_level(log->node);
2916 orig_level = level;
2917 path->nodes[level] = log->node;
2918 extent_buffer_get(log->node);
2919 path->slots[level] = 0;
2920
2921 while (1) {
2922 wret = walk_down_log_tree(trans, log, path, &level, wc);
2923 if (wret > 0)
2924 break;
2925 if (wret < 0) {
2926 ret = wret;
2927 goto out;
2928 }
2929
2930 wret = walk_up_log_tree(trans, log, path, &level, wc);
2931 if (wret > 0)
2932 break;
2933 if (wret < 0) {
2934 ret = wret;
2935 goto out;
2936 }
2937 }
2938
2939 /* was the root node processed? if not, catch it here */
2940 if (path->nodes[orig_level]) {
2941 ret = wc->process_func(log, path->nodes[orig_level], wc,
2942 btrfs_header_generation(path->nodes[orig_level]),
2943 orig_level);
2944 if (ret)
2945 goto out;
2946 if (wc->free) {
2947 struct extent_buffer *next;
2948
2949 next = path->nodes[orig_level];
2950
2951 if (trans) {
2952 btrfs_tree_lock(next);
2953 btrfs_set_lock_blocking_write(next);
2954 btrfs_clean_tree_block(next);
2955 btrfs_wait_tree_block_writeback(next);
2956 btrfs_tree_unlock(next);
2957 } else {
2958 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2959 clear_extent_buffer_dirty(next);
2960 }
2961
2962 WARN_ON(log->root_key.objectid !=
2963 BTRFS_TREE_LOG_OBJECTID);
2964 ret = btrfs_free_and_pin_reserved_extent(fs_info,
2965 next->start, next->len);
2966 if (ret)
2967 goto out;
2968 }
2969 }
2970
2971 out:
2972 btrfs_free_path(path);
2973 return ret;
2974 }
2975
2976 /*
2977 * helper function to update the item for a given subvolumes log root
2978 * in the tree of log roots
2979 */
update_log_root(struct btrfs_trans_handle * trans,struct btrfs_root * log,struct btrfs_root_item * root_item)2980 static int update_log_root(struct btrfs_trans_handle *trans,
2981 struct btrfs_root *log,
2982 struct btrfs_root_item *root_item)
2983 {
2984 struct btrfs_fs_info *fs_info = log->fs_info;
2985 int ret;
2986
2987 if (log->log_transid == 1) {
2988 /* insert root item on the first sync */
2989 ret = btrfs_insert_root(trans, fs_info->log_root_tree,
2990 &log->root_key, root_item);
2991 } else {
2992 ret = btrfs_update_root(trans, fs_info->log_root_tree,
2993 &log->root_key, root_item);
2994 }
2995 return ret;
2996 }
2997
wait_log_commit(struct btrfs_root * root,int transid)2998 static void wait_log_commit(struct btrfs_root *root, int transid)
2999 {
3000 DEFINE_WAIT(wait);
3001 int index = transid % 2;
3002
3003 /*
3004 * we only allow two pending log transactions at a time,
3005 * so we know that if ours is more than 2 older than the
3006 * current transaction, we're done
3007 */
3008 for (;;) {
3009 prepare_to_wait(&root->log_commit_wait[index],
3010 &wait, TASK_UNINTERRUPTIBLE);
3011
3012 if (!(root->log_transid_committed < transid &&
3013 atomic_read(&root->log_commit[index])))
3014 break;
3015
3016 mutex_unlock(&root->log_mutex);
3017 schedule();
3018 mutex_lock(&root->log_mutex);
3019 }
3020 finish_wait(&root->log_commit_wait[index], &wait);
3021 }
3022
wait_for_writer(struct btrfs_root * root)3023 static void wait_for_writer(struct btrfs_root *root)
3024 {
3025 DEFINE_WAIT(wait);
3026
3027 for (;;) {
3028 prepare_to_wait(&root->log_writer_wait, &wait,
3029 TASK_UNINTERRUPTIBLE);
3030 if (!atomic_read(&root->log_writers))
3031 break;
3032
3033 mutex_unlock(&root->log_mutex);
3034 schedule();
3035 mutex_lock(&root->log_mutex);
3036 }
3037 finish_wait(&root->log_writer_wait, &wait);
3038 }
3039
btrfs_remove_log_ctx(struct btrfs_root * root,struct btrfs_log_ctx * ctx)3040 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
3041 struct btrfs_log_ctx *ctx)
3042 {
3043 if (!ctx)
3044 return;
3045
3046 mutex_lock(&root->log_mutex);
3047 list_del_init(&ctx->list);
3048 mutex_unlock(&root->log_mutex);
3049 }
3050
3051 /*
3052 * Invoked in log mutex context, or be sure there is no other task which
3053 * can access the list.
3054 */
btrfs_remove_all_log_ctxs(struct btrfs_root * root,int index,int error)3055 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
3056 int index, int error)
3057 {
3058 struct btrfs_log_ctx *ctx;
3059 struct btrfs_log_ctx *safe;
3060
3061 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
3062 list_del_init(&ctx->list);
3063 ctx->log_ret = error;
3064 }
3065
3066 INIT_LIST_HEAD(&root->log_ctxs[index]);
3067 }
3068
3069 /*
3070 * btrfs_sync_log does sends a given tree log down to the disk and
3071 * updates the super blocks to record it. When this call is done,
3072 * you know that any inodes previously logged are safely on disk only
3073 * if it returns 0.
3074 *
3075 * Any other return value means you need to call btrfs_commit_transaction.
3076 * Some of the edge cases for fsyncing directories that have had unlinks
3077 * or renames done in the past mean that sometimes the only safe
3078 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
3079 * that has happened.
3080 */
btrfs_sync_log(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_log_ctx * ctx)3081 int btrfs_sync_log(struct btrfs_trans_handle *trans,
3082 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
3083 {
3084 int index1;
3085 int index2;
3086 int mark;
3087 int ret;
3088 struct btrfs_fs_info *fs_info = root->fs_info;
3089 struct btrfs_root *log = root->log_root;
3090 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
3091 struct btrfs_root_item new_root_item;
3092 int log_transid = 0;
3093 struct btrfs_log_ctx root_log_ctx;
3094 struct blk_plug plug;
3095
3096 mutex_lock(&root->log_mutex);
3097 log_transid = ctx->log_transid;
3098 if (root->log_transid_committed >= log_transid) {
3099 mutex_unlock(&root->log_mutex);
3100 return ctx->log_ret;
3101 }
3102
3103 index1 = log_transid % 2;
3104 if (atomic_read(&root->log_commit[index1])) {
3105 wait_log_commit(root, log_transid);
3106 mutex_unlock(&root->log_mutex);
3107 return ctx->log_ret;
3108 }
3109 ASSERT(log_transid == root->log_transid);
3110 atomic_set(&root->log_commit[index1], 1);
3111
3112 /* wait for previous tree log sync to complete */
3113 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
3114 wait_log_commit(root, log_transid - 1);
3115
3116 while (1) {
3117 int batch = atomic_read(&root->log_batch);
3118 /* when we're on an ssd, just kick the log commit out */
3119 if (!btrfs_test_opt(fs_info, SSD) &&
3120 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
3121 mutex_unlock(&root->log_mutex);
3122 schedule_timeout_uninterruptible(1);
3123 mutex_lock(&root->log_mutex);
3124 }
3125 wait_for_writer(root);
3126 if (batch == atomic_read(&root->log_batch))
3127 break;
3128 }
3129
3130 /* bail out if we need to do a full commit */
3131 if (btrfs_need_log_full_commit(trans)) {
3132 ret = -EAGAIN;
3133 mutex_unlock(&root->log_mutex);
3134 goto out;
3135 }
3136
3137 if (log_transid % 2 == 0)
3138 mark = EXTENT_DIRTY;
3139 else
3140 mark = EXTENT_NEW;
3141
3142 /* we start IO on all the marked extents here, but we don't actually
3143 * wait for them until later.
3144 */
3145 blk_start_plug(&plug);
3146 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
3147 if (ret) {
3148 blk_finish_plug(&plug);
3149 btrfs_abort_transaction(trans, ret);
3150 btrfs_set_log_full_commit(trans);
3151 mutex_unlock(&root->log_mutex);
3152 goto out;
3153 }
3154
3155 /*
3156 * We _must_ update under the root->log_mutex in order to make sure we
3157 * have a consistent view of the log root we are trying to commit at
3158 * this moment.
3159 *
3160 * We _must_ copy this into a local copy, because we are not holding the
3161 * log_root_tree->log_mutex yet. This is important because when we
3162 * commit the log_root_tree we must have a consistent view of the
3163 * log_root_tree when we update the super block to point at the
3164 * log_root_tree bytenr. If we update the log_root_tree here we'll race
3165 * with the commit and possibly point at the new block which we may not
3166 * have written out.
3167 */
3168 btrfs_set_root_node(&log->root_item, log->node);
3169 memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
3170
3171 root->log_transid++;
3172 log->log_transid = root->log_transid;
3173 root->log_start_pid = 0;
3174 /*
3175 * IO has been started, blocks of the log tree have WRITTEN flag set
3176 * in their headers. new modifications of the log will be written to
3177 * new positions. so it's safe to allow log writers to go in.
3178 */
3179 mutex_unlock(&root->log_mutex);
3180
3181 btrfs_init_log_ctx(&root_log_ctx, NULL);
3182
3183 mutex_lock(&log_root_tree->log_mutex);
3184
3185 index2 = log_root_tree->log_transid % 2;
3186 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
3187 root_log_ctx.log_transid = log_root_tree->log_transid;
3188
3189 /*
3190 * Now we are safe to update the log_root_tree because we're under the
3191 * log_mutex, and we're a current writer so we're holding the commit
3192 * open until we drop the log_mutex.
3193 */
3194 ret = update_log_root(trans, log, &new_root_item);
3195 if (ret) {
3196 if (!list_empty(&root_log_ctx.list))
3197 list_del_init(&root_log_ctx.list);
3198
3199 blk_finish_plug(&plug);
3200 btrfs_set_log_full_commit(trans);
3201
3202 if (ret != -ENOSPC) {
3203 btrfs_abort_transaction(trans, ret);
3204 mutex_unlock(&log_root_tree->log_mutex);
3205 goto out;
3206 }
3207 btrfs_wait_tree_log_extents(log, mark);
3208 mutex_unlock(&log_root_tree->log_mutex);
3209 ret = -EAGAIN;
3210 goto out;
3211 }
3212
3213 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
3214 blk_finish_plug(&plug);
3215 list_del_init(&root_log_ctx.list);
3216 mutex_unlock(&log_root_tree->log_mutex);
3217 ret = root_log_ctx.log_ret;
3218 goto out;
3219 }
3220
3221 index2 = root_log_ctx.log_transid % 2;
3222 if (atomic_read(&log_root_tree->log_commit[index2])) {
3223 blk_finish_plug(&plug);
3224 ret = btrfs_wait_tree_log_extents(log, mark);
3225 wait_log_commit(log_root_tree,
3226 root_log_ctx.log_transid);
3227 mutex_unlock(&log_root_tree->log_mutex);
3228 if (!ret)
3229 ret = root_log_ctx.log_ret;
3230 goto out;
3231 }
3232 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
3233 atomic_set(&log_root_tree->log_commit[index2], 1);
3234
3235 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
3236 wait_log_commit(log_root_tree,
3237 root_log_ctx.log_transid - 1);
3238 }
3239
3240 /*
3241 * now that we've moved on to the tree of log tree roots,
3242 * check the full commit flag again
3243 */
3244 if (btrfs_need_log_full_commit(trans)) {
3245 blk_finish_plug(&plug);
3246 btrfs_wait_tree_log_extents(log, mark);
3247 mutex_unlock(&log_root_tree->log_mutex);
3248 ret = -EAGAIN;
3249 goto out_wake_log_root;
3250 }
3251
3252 ret = btrfs_write_marked_extents(fs_info,
3253 &log_root_tree->dirty_log_pages,
3254 EXTENT_DIRTY | EXTENT_NEW);
3255 blk_finish_plug(&plug);
3256 if (ret) {
3257 btrfs_set_log_full_commit(trans);
3258 btrfs_abort_transaction(trans, ret);
3259 mutex_unlock(&log_root_tree->log_mutex);
3260 goto out_wake_log_root;
3261 }
3262 ret = btrfs_wait_tree_log_extents(log, mark);
3263 if (!ret)
3264 ret = btrfs_wait_tree_log_extents(log_root_tree,
3265 EXTENT_NEW | EXTENT_DIRTY);
3266 if (ret) {
3267 btrfs_set_log_full_commit(trans);
3268 mutex_unlock(&log_root_tree->log_mutex);
3269 goto out_wake_log_root;
3270 }
3271
3272 btrfs_set_super_log_root(fs_info->super_for_commit,
3273 log_root_tree->node->start);
3274 btrfs_set_super_log_root_level(fs_info->super_for_commit,
3275 btrfs_header_level(log_root_tree->node));
3276
3277 log_root_tree->log_transid++;
3278 mutex_unlock(&log_root_tree->log_mutex);
3279
3280 /*
3281 * Nobody else is going to jump in and write the ctree
3282 * super here because the log_commit atomic below is protecting
3283 * us. We must be called with a transaction handle pinning
3284 * the running transaction open, so a full commit can't hop
3285 * in and cause problems either.
3286 */
3287 ret = write_all_supers(fs_info, 1);
3288 if (ret) {
3289 btrfs_set_log_full_commit(trans);
3290 btrfs_abort_transaction(trans, ret);
3291 goto out_wake_log_root;
3292 }
3293
3294 mutex_lock(&root->log_mutex);
3295 if (root->last_log_commit < log_transid)
3296 root->last_log_commit = log_transid;
3297 mutex_unlock(&root->log_mutex);
3298
3299 out_wake_log_root:
3300 mutex_lock(&log_root_tree->log_mutex);
3301 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3302
3303 log_root_tree->log_transid_committed++;
3304 atomic_set(&log_root_tree->log_commit[index2], 0);
3305 mutex_unlock(&log_root_tree->log_mutex);
3306
3307 /*
3308 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3309 * all the updates above are seen by the woken threads. It might not be
3310 * necessary, but proving that seems to be hard.
3311 */
3312 cond_wake_up(&log_root_tree->log_commit_wait[index2]);
3313 out:
3314 mutex_lock(&root->log_mutex);
3315 btrfs_remove_all_log_ctxs(root, index1, ret);
3316 root->log_transid_committed++;
3317 atomic_set(&root->log_commit[index1], 0);
3318 mutex_unlock(&root->log_mutex);
3319
3320 /*
3321 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3322 * all the updates above are seen by the woken threads. It might not be
3323 * necessary, but proving that seems to be hard.
3324 */
3325 cond_wake_up(&root->log_commit_wait[index1]);
3326 return ret;
3327 }
3328
free_log_tree(struct btrfs_trans_handle * trans,struct btrfs_root * log)3329 static void free_log_tree(struct btrfs_trans_handle *trans,
3330 struct btrfs_root *log)
3331 {
3332 int ret;
3333 struct walk_control wc = {
3334 .free = 1,
3335 .process_func = process_one_buffer
3336 };
3337
3338 ret = walk_log_tree(trans, log, &wc);
3339 if (ret) {
3340 if (trans)
3341 btrfs_abort_transaction(trans, ret);
3342 else
3343 btrfs_handle_fs_error(log->fs_info, ret, NULL);
3344 }
3345
3346 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
3347 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3348 free_extent_buffer(log->node);
3349 kfree(log);
3350 }
3351
3352 /*
3353 * free all the extents used by the tree log. This should be called
3354 * at commit time of the full transaction
3355 */
btrfs_free_log(struct btrfs_trans_handle * trans,struct btrfs_root * root)3356 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3357 {
3358 if (root->log_root) {
3359 free_log_tree(trans, root->log_root);
3360 root->log_root = NULL;
3361 clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
3362 }
3363 return 0;
3364 }
3365
btrfs_free_log_root_tree(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)3366 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3367 struct btrfs_fs_info *fs_info)
3368 {
3369 if (fs_info->log_root_tree) {
3370 free_log_tree(trans, fs_info->log_root_tree);
3371 fs_info->log_root_tree = NULL;
3372 }
3373 return 0;
3374 }
3375
3376 /*
3377 * Check if an inode was logged in the current transaction. We can't always rely
3378 * on an inode's logged_trans value, because it's an in-memory only field and
3379 * therefore not persisted. This means that its value is lost if the inode gets
3380 * evicted and loaded again from disk (in which case it has a value of 0, and
3381 * certainly it is smaller then any possible transaction ID), when that happens
3382 * the full_sync flag is set in the inode's runtime flags, so on that case we
3383 * assume eviction happened and ignore the logged_trans value, assuming the
3384 * worst case, that the inode was logged before in the current transaction.
3385 */
inode_logged(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)3386 static bool inode_logged(struct btrfs_trans_handle *trans,
3387 struct btrfs_inode *inode)
3388 {
3389 if (inode->logged_trans == trans->transid)
3390 return true;
3391
3392 if (inode->last_trans == trans->transid &&
3393 test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
3394 !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags))
3395 return true;
3396
3397 return false;
3398 }
3399
3400 /*
3401 * If both a file and directory are logged, and unlinks or renames are
3402 * mixed in, we have a few interesting corners:
3403 *
3404 * create file X in dir Y
3405 * link file X to X.link in dir Y
3406 * fsync file X
3407 * unlink file X but leave X.link
3408 * fsync dir Y
3409 *
3410 * After a crash we would expect only X.link to exist. But file X
3411 * didn't get fsync'd again so the log has back refs for X and X.link.
3412 *
3413 * We solve this by removing directory entries and inode backrefs from the
3414 * log when a file that was logged in the current transaction is
3415 * unlinked. Any later fsync will include the updated log entries, and
3416 * we'll be able to reconstruct the proper directory items from backrefs.
3417 *
3418 * This optimizations allows us to avoid relogging the entire inode
3419 * or the entire directory.
3420 */
btrfs_del_dir_entries_in_log(struct btrfs_trans_handle * trans,struct btrfs_root * root,const char * name,int name_len,struct btrfs_inode * dir,u64 index)3421 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3422 struct btrfs_root *root,
3423 const char *name, int name_len,
3424 struct btrfs_inode *dir, u64 index)
3425 {
3426 struct btrfs_root *log;
3427 struct btrfs_dir_item *di;
3428 struct btrfs_path *path;
3429 int ret;
3430 int err = 0;
3431 int bytes_del = 0;
3432 u64 dir_ino = btrfs_ino(dir);
3433
3434 if (!inode_logged(trans, dir))
3435 return 0;
3436
3437 ret = join_running_log_trans(root);
3438 if (ret)
3439 return 0;
3440
3441 mutex_lock(&dir->log_mutex);
3442
3443 log = root->log_root;
3444 path = btrfs_alloc_path();
3445 if (!path) {
3446 err = -ENOMEM;
3447 goto out_unlock;
3448 }
3449
3450 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3451 name, name_len, -1);
3452 if (IS_ERR(di)) {
3453 err = PTR_ERR(di);
3454 goto fail;
3455 }
3456 if (di) {
3457 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3458 bytes_del += name_len;
3459 if (ret) {
3460 err = ret;
3461 goto fail;
3462 }
3463 }
3464 btrfs_release_path(path);
3465 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3466 index, name, name_len, -1);
3467 if (IS_ERR(di)) {
3468 err = PTR_ERR(di);
3469 goto fail;
3470 }
3471 if (di) {
3472 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3473 bytes_del += name_len;
3474 if (ret) {
3475 err = ret;
3476 goto fail;
3477 }
3478 }
3479
3480 /* update the directory size in the log to reflect the names
3481 * we have removed
3482 */
3483 if (bytes_del) {
3484 struct btrfs_key key;
3485
3486 key.objectid = dir_ino;
3487 key.offset = 0;
3488 key.type = BTRFS_INODE_ITEM_KEY;
3489 btrfs_release_path(path);
3490
3491 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
3492 if (ret < 0) {
3493 err = ret;
3494 goto fail;
3495 }
3496 if (ret == 0) {
3497 struct btrfs_inode_item *item;
3498 u64 i_size;
3499
3500 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3501 struct btrfs_inode_item);
3502 i_size = btrfs_inode_size(path->nodes[0], item);
3503 if (i_size > bytes_del)
3504 i_size -= bytes_del;
3505 else
3506 i_size = 0;
3507 btrfs_set_inode_size(path->nodes[0], item, i_size);
3508 btrfs_mark_buffer_dirty(path->nodes[0]);
3509 } else
3510 ret = 0;
3511 btrfs_release_path(path);
3512 }
3513 fail:
3514 btrfs_free_path(path);
3515 out_unlock:
3516 mutex_unlock(&dir->log_mutex);
3517 if (err == -ENOSPC) {
3518 btrfs_set_log_full_commit(trans);
3519 err = 0;
3520 } else if (err < 0) {
3521 btrfs_abort_transaction(trans, err);
3522 }
3523
3524 btrfs_end_log_trans(root);
3525
3526 return err;
3527 }
3528
3529 /* see comments for btrfs_del_dir_entries_in_log */
btrfs_del_inode_ref_in_log(struct btrfs_trans_handle * trans,struct btrfs_root * root,const char * name,int name_len,struct btrfs_inode * inode,u64 dirid)3530 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3531 struct btrfs_root *root,
3532 const char *name, int name_len,
3533 struct btrfs_inode *inode, u64 dirid)
3534 {
3535 struct btrfs_root *log;
3536 u64 index;
3537 int ret;
3538
3539 if (!inode_logged(trans, inode))
3540 return 0;
3541
3542 ret = join_running_log_trans(root);
3543 if (ret)
3544 return 0;
3545 log = root->log_root;
3546 mutex_lock(&inode->log_mutex);
3547
3548 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3549 dirid, &index);
3550 mutex_unlock(&inode->log_mutex);
3551 if (ret == -ENOSPC) {
3552 btrfs_set_log_full_commit(trans);
3553 ret = 0;
3554 } else if (ret < 0 && ret != -ENOENT)
3555 btrfs_abort_transaction(trans, ret);
3556 btrfs_end_log_trans(root);
3557
3558 return ret;
3559 }
3560
3561 /*
3562 * creates a range item in the log for 'dirid'. first_offset and
3563 * last_offset tell us which parts of the key space the log should
3564 * be considered authoritative for.
3565 */
insert_dir_log_key(struct btrfs_trans_handle * trans,struct btrfs_root * log,struct btrfs_path * path,int key_type,u64 dirid,u64 first_offset,u64 last_offset)3566 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3567 struct btrfs_root *log,
3568 struct btrfs_path *path,
3569 int key_type, u64 dirid,
3570 u64 first_offset, u64 last_offset)
3571 {
3572 int ret;
3573 struct btrfs_key key;
3574 struct btrfs_dir_log_item *item;
3575
3576 key.objectid = dirid;
3577 key.offset = first_offset;
3578 if (key_type == BTRFS_DIR_ITEM_KEY)
3579 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3580 else
3581 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3582 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3583 if (ret)
3584 return ret;
3585
3586 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3587 struct btrfs_dir_log_item);
3588 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3589 btrfs_mark_buffer_dirty(path->nodes[0]);
3590 btrfs_release_path(path);
3591 return 0;
3592 }
3593
3594 /*
3595 * log all the items included in the current transaction for a given
3596 * directory. This also creates the range items in the log tree required
3597 * to replay anything deleted before the fsync
3598 */
log_dir_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path,struct btrfs_path * dst_path,int key_type,struct btrfs_log_ctx * ctx,u64 min_offset,u64 * last_offset_ret)3599 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3600 struct btrfs_root *root, struct btrfs_inode *inode,
3601 struct btrfs_path *path,
3602 struct btrfs_path *dst_path, int key_type,
3603 struct btrfs_log_ctx *ctx,
3604 u64 min_offset, u64 *last_offset_ret)
3605 {
3606 struct btrfs_key min_key;
3607 struct btrfs_root *log = root->log_root;
3608 struct extent_buffer *src;
3609 int err = 0;
3610 int ret;
3611 int i;
3612 int nritems;
3613 u64 first_offset = min_offset;
3614 u64 last_offset = (u64)-1;
3615 u64 ino = btrfs_ino(inode);
3616
3617 log = root->log_root;
3618
3619 min_key.objectid = ino;
3620 min_key.type = key_type;
3621 min_key.offset = min_offset;
3622
3623 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3624
3625 /*
3626 * we didn't find anything from this transaction, see if there
3627 * is anything at all
3628 */
3629 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3630 min_key.objectid = ino;
3631 min_key.type = key_type;
3632 min_key.offset = (u64)-1;
3633 btrfs_release_path(path);
3634 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3635 if (ret < 0) {
3636 btrfs_release_path(path);
3637 return ret;
3638 }
3639 ret = btrfs_previous_item(root, path, ino, key_type);
3640
3641 /* if ret == 0 there are items for this type,
3642 * create a range to tell us the last key of this type.
3643 * otherwise, there are no items in this directory after
3644 * *min_offset, and we create a range to indicate that.
3645 */
3646 if (ret == 0) {
3647 struct btrfs_key tmp;
3648 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3649 path->slots[0]);
3650 if (key_type == tmp.type)
3651 first_offset = max(min_offset, tmp.offset) + 1;
3652 }
3653 goto done;
3654 }
3655
3656 /* go backward to find any previous key */
3657 ret = btrfs_previous_item(root, path, ino, key_type);
3658 if (ret == 0) {
3659 struct btrfs_key tmp;
3660 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3661 if (key_type == tmp.type) {
3662 first_offset = tmp.offset;
3663 ret = overwrite_item(trans, log, dst_path,
3664 path->nodes[0], path->slots[0],
3665 &tmp);
3666 if (ret) {
3667 err = ret;
3668 goto done;
3669 }
3670 }
3671 }
3672 btrfs_release_path(path);
3673
3674 /*
3675 * Find the first key from this transaction again. See the note for
3676 * log_new_dir_dentries, if we're logging a directory recursively we
3677 * won't be holding its i_mutex, which means we can modify the directory
3678 * while we're logging it. If we remove an entry between our first
3679 * search and this search we'll not find the key again and can just
3680 * bail.
3681 */
3682 search:
3683 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3684 if (ret != 0)
3685 goto done;
3686
3687 /*
3688 * we have a block from this transaction, log every item in it
3689 * from our directory
3690 */
3691 while (1) {
3692 struct btrfs_key tmp;
3693 src = path->nodes[0];
3694 nritems = btrfs_header_nritems(src);
3695 for (i = path->slots[0]; i < nritems; i++) {
3696 struct btrfs_dir_item *di;
3697
3698 btrfs_item_key_to_cpu(src, &min_key, i);
3699
3700 if (min_key.objectid != ino || min_key.type != key_type)
3701 goto done;
3702
3703 if (need_resched()) {
3704 btrfs_release_path(path);
3705 cond_resched();
3706 goto search;
3707 }
3708
3709 ret = overwrite_item(trans, log, dst_path, src, i,
3710 &min_key);
3711 if (ret) {
3712 err = ret;
3713 goto done;
3714 }
3715
3716 /*
3717 * We must make sure that when we log a directory entry,
3718 * the corresponding inode, after log replay, has a
3719 * matching link count. For example:
3720 *
3721 * touch foo
3722 * mkdir mydir
3723 * sync
3724 * ln foo mydir/bar
3725 * xfs_io -c "fsync" mydir
3726 * <crash>
3727 * <mount fs and log replay>
3728 *
3729 * Would result in a fsync log that when replayed, our
3730 * file inode would have a link count of 1, but we get
3731 * two directory entries pointing to the same inode.
3732 * After removing one of the names, it would not be
3733 * possible to remove the other name, which resulted
3734 * always in stale file handle errors, and would not
3735 * be possible to rmdir the parent directory, since
3736 * its i_size could never decrement to the value
3737 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3738 */
3739 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3740 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3741 if (ctx &&
3742 (btrfs_dir_transid(src, di) == trans->transid ||
3743 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3744 tmp.type != BTRFS_ROOT_ITEM_KEY)
3745 ctx->log_new_dentries = true;
3746 }
3747 path->slots[0] = nritems;
3748
3749 /*
3750 * look ahead to the next item and see if it is also
3751 * from this directory and from this transaction
3752 */
3753 ret = btrfs_next_leaf(root, path);
3754 if (ret) {
3755 if (ret == 1)
3756 last_offset = (u64)-1;
3757 else
3758 err = ret;
3759 goto done;
3760 }
3761 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3762 if (tmp.objectid != ino || tmp.type != key_type) {
3763 last_offset = (u64)-1;
3764 goto done;
3765 }
3766 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3767 ret = overwrite_item(trans, log, dst_path,
3768 path->nodes[0], path->slots[0],
3769 &tmp);
3770 if (ret)
3771 err = ret;
3772 else
3773 last_offset = tmp.offset;
3774 goto done;
3775 }
3776 }
3777 done:
3778 btrfs_release_path(path);
3779 btrfs_release_path(dst_path);
3780
3781 if (err == 0) {
3782 *last_offset_ret = last_offset;
3783 /*
3784 * insert the log range keys to indicate where the log
3785 * is valid
3786 */
3787 ret = insert_dir_log_key(trans, log, path, key_type,
3788 ino, first_offset, last_offset);
3789 if (ret)
3790 err = ret;
3791 }
3792 return err;
3793 }
3794
3795 /*
3796 * logging directories is very similar to logging inodes, We find all the items
3797 * from the current transaction and write them to the log.
3798 *
3799 * The recovery code scans the directory in the subvolume, and if it finds a
3800 * key in the range logged that is not present in the log tree, then it means
3801 * that dir entry was unlinked during the transaction.
3802 *
3803 * In order for that scan to work, we must include one key smaller than
3804 * the smallest logged by this transaction and one key larger than the largest
3805 * key logged by this transaction.
3806 */
log_directory_changes(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path,struct btrfs_path * dst_path,struct btrfs_log_ctx * ctx)3807 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3808 struct btrfs_root *root, struct btrfs_inode *inode,
3809 struct btrfs_path *path,
3810 struct btrfs_path *dst_path,
3811 struct btrfs_log_ctx *ctx)
3812 {
3813 u64 min_key;
3814 u64 max_key;
3815 int ret;
3816 int key_type = BTRFS_DIR_ITEM_KEY;
3817
3818 again:
3819 min_key = 0;
3820 max_key = 0;
3821 while (1) {
3822 ret = log_dir_items(trans, root, inode, path, dst_path, key_type,
3823 ctx, min_key, &max_key);
3824 if (ret)
3825 return ret;
3826 if (max_key == (u64)-1)
3827 break;
3828 min_key = max_key + 1;
3829 }
3830
3831 if (key_type == BTRFS_DIR_ITEM_KEY) {
3832 key_type = BTRFS_DIR_INDEX_KEY;
3833 goto again;
3834 }
3835 return 0;
3836 }
3837
3838 /*
3839 * a helper function to drop items from the log before we relog an
3840 * inode. max_key_type indicates the highest item type to remove.
3841 * This cannot be run for file data extents because it does not
3842 * free the extents they point to.
3843 */
drop_objectid_items(struct btrfs_trans_handle * trans,struct btrfs_root * log,struct btrfs_path * path,u64 objectid,int max_key_type)3844 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3845 struct btrfs_root *log,
3846 struct btrfs_path *path,
3847 u64 objectid, int max_key_type)
3848 {
3849 int ret;
3850 struct btrfs_key key;
3851 struct btrfs_key found_key;
3852 int start_slot;
3853
3854 key.objectid = objectid;
3855 key.type = max_key_type;
3856 key.offset = (u64)-1;
3857
3858 while (1) {
3859 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3860 BUG_ON(ret == 0); /* Logic error */
3861 if (ret < 0)
3862 break;
3863
3864 if (path->slots[0] == 0)
3865 break;
3866
3867 path->slots[0]--;
3868 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3869 path->slots[0]);
3870
3871 if (found_key.objectid != objectid)
3872 break;
3873
3874 found_key.offset = 0;
3875 found_key.type = 0;
3876 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3877 &start_slot);
3878 if (ret < 0)
3879 break;
3880
3881 ret = btrfs_del_items(trans, log, path, start_slot,
3882 path->slots[0] - start_slot + 1);
3883 /*
3884 * If start slot isn't 0 then we don't need to re-search, we've
3885 * found the last guy with the objectid in this tree.
3886 */
3887 if (ret || start_slot != 0)
3888 break;
3889 btrfs_release_path(path);
3890 }
3891 btrfs_release_path(path);
3892 if (ret > 0)
3893 ret = 0;
3894 return ret;
3895 }
3896
fill_inode_item(struct btrfs_trans_handle * trans,struct extent_buffer * leaf,struct btrfs_inode_item * item,struct inode * inode,int log_inode_only,u64 logged_isize)3897 static void fill_inode_item(struct btrfs_trans_handle *trans,
3898 struct extent_buffer *leaf,
3899 struct btrfs_inode_item *item,
3900 struct inode *inode, int log_inode_only,
3901 u64 logged_isize)
3902 {
3903 struct btrfs_map_token token;
3904
3905 btrfs_init_map_token(&token, leaf);
3906
3907 if (log_inode_only) {
3908 /* set the generation to zero so the recover code
3909 * can tell the difference between an logging
3910 * just to say 'this inode exists' and a logging
3911 * to say 'update this inode with these values'
3912 */
3913 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3914 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3915 } else {
3916 btrfs_set_token_inode_generation(leaf, item,
3917 BTRFS_I(inode)->generation,
3918 &token);
3919 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3920 }
3921
3922 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3923 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3924 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3925 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3926
3927 btrfs_set_token_timespec_sec(leaf, &item->atime,
3928 inode->i_atime.tv_sec, &token);
3929 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3930 inode->i_atime.tv_nsec, &token);
3931
3932 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3933 inode->i_mtime.tv_sec, &token);
3934 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3935 inode->i_mtime.tv_nsec, &token);
3936
3937 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3938 inode->i_ctime.tv_sec, &token);
3939 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3940 inode->i_ctime.tv_nsec, &token);
3941
3942 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3943 &token);
3944
3945 btrfs_set_token_inode_sequence(leaf, item,
3946 inode_peek_iversion(inode), &token);
3947 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3948 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3949 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3950 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3951 }
3952
log_inode_item(struct btrfs_trans_handle * trans,struct btrfs_root * log,struct btrfs_path * path,struct btrfs_inode * inode)3953 static int log_inode_item(struct btrfs_trans_handle *trans,
3954 struct btrfs_root *log, struct btrfs_path *path,
3955 struct btrfs_inode *inode)
3956 {
3957 struct btrfs_inode_item *inode_item;
3958 int ret;
3959
3960 ret = btrfs_insert_empty_item(trans, log, path,
3961 &inode->location, sizeof(*inode_item));
3962 if (ret && ret != -EEXIST)
3963 return ret;
3964 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3965 struct btrfs_inode_item);
3966 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
3967 0, 0);
3968 btrfs_release_path(path);
3969 return 0;
3970 }
3971
log_csums(struct btrfs_trans_handle * trans,struct btrfs_root * log_root,struct btrfs_ordered_sum * sums)3972 static int log_csums(struct btrfs_trans_handle *trans,
3973 struct btrfs_root *log_root,
3974 struct btrfs_ordered_sum *sums)
3975 {
3976 int ret;
3977
3978 /*
3979 * Due to extent cloning, we might have logged a csum item that covers a
3980 * subrange of a cloned extent, and later we can end up logging a csum
3981 * item for a larger subrange of the same extent or the entire range.
3982 * This would leave csum items in the log tree that cover the same range
3983 * and break the searches for checksums in the log tree, resulting in
3984 * some checksums missing in the fs/subvolume tree. So just delete (or
3985 * trim and adjust) any existing csum items in the log for this range.
3986 */
3987 ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len);
3988 if (ret)
3989 return ret;
3990
3991 return btrfs_csum_file_blocks(trans, log_root, sums);
3992 }
3993
copy_items(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_path * dst_path,struct btrfs_path * src_path,int start_slot,int nr,int inode_only,u64 logged_isize)3994 static noinline int copy_items(struct btrfs_trans_handle *trans,
3995 struct btrfs_inode *inode,
3996 struct btrfs_path *dst_path,
3997 struct btrfs_path *src_path,
3998 int start_slot, int nr, int inode_only,
3999 u64 logged_isize)
4000 {
4001 struct btrfs_fs_info *fs_info = trans->fs_info;
4002 unsigned long src_offset;
4003 unsigned long dst_offset;
4004 struct btrfs_root *log = inode->root->log_root;
4005 struct btrfs_file_extent_item *extent;
4006 struct btrfs_inode_item *inode_item;
4007 struct extent_buffer *src = src_path->nodes[0];
4008 int ret;
4009 struct btrfs_key *ins_keys;
4010 u32 *ins_sizes;
4011 char *ins_data;
4012 int i;
4013 struct list_head ordered_sums;
4014 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM;
4015
4016 INIT_LIST_HEAD(&ordered_sums);
4017
4018 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
4019 nr * sizeof(u32), GFP_NOFS);
4020 if (!ins_data)
4021 return -ENOMEM;
4022
4023 ins_sizes = (u32 *)ins_data;
4024 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
4025
4026 for (i = 0; i < nr; i++) {
4027 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
4028 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
4029 }
4030 ret = btrfs_insert_empty_items(trans, log, dst_path,
4031 ins_keys, ins_sizes, nr);
4032 if (ret) {
4033 kfree(ins_data);
4034 return ret;
4035 }
4036
4037 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
4038 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
4039 dst_path->slots[0]);
4040
4041 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
4042
4043 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
4044 inode_item = btrfs_item_ptr(dst_path->nodes[0],
4045 dst_path->slots[0],
4046 struct btrfs_inode_item);
4047 fill_inode_item(trans, dst_path->nodes[0], inode_item,
4048 &inode->vfs_inode,
4049 inode_only == LOG_INODE_EXISTS,
4050 logged_isize);
4051 } else {
4052 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
4053 src_offset, ins_sizes[i]);
4054 }
4055
4056 /* take a reference on file data extents so that truncates
4057 * or deletes of this inode don't have to relog the inode
4058 * again
4059 */
4060 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
4061 !skip_csum) {
4062 int found_type;
4063 extent = btrfs_item_ptr(src, start_slot + i,
4064 struct btrfs_file_extent_item);
4065
4066 if (btrfs_file_extent_generation(src, extent) < trans->transid)
4067 continue;
4068
4069 found_type = btrfs_file_extent_type(src, extent);
4070 if (found_type == BTRFS_FILE_EXTENT_REG) {
4071 u64 ds, dl, cs, cl;
4072 ds = btrfs_file_extent_disk_bytenr(src,
4073 extent);
4074 /* ds == 0 is a hole */
4075 if (ds == 0)
4076 continue;
4077
4078 dl = btrfs_file_extent_disk_num_bytes(src,
4079 extent);
4080 cs = btrfs_file_extent_offset(src, extent);
4081 cl = btrfs_file_extent_num_bytes(src,
4082 extent);
4083 if (btrfs_file_extent_compression(src,
4084 extent)) {
4085 cs = 0;
4086 cl = dl;
4087 }
4088
4089 ret = btrfs_lookup_csums_range(
4090 fs_info->csum_root,
4091 ds + cs, ds + cs + cl - 1,
4092 &ordered_sums, 0);
4093 if (ret)
4094 break;
4095 }
4096 }
4097 }
4098
4099 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
4100 btrfs_release_path(dst_path);
4101 kfree(ins_data);
4102
4103 /*
4104 * we have to do this after the loop above to avoid changing the
4105 * log tree while trying to change the log tree.
4106 */
4107 while (!list_empty(&ordered_sums)) {
4108 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4109 struct btrfs_ordered_sum,
4110 list);
4111 if (!ret)
4112 ret = log_csums(trans, log, sums);
4113 list_del(&sums->list);
4114 kfree(sums);
4115 }
4116
4117 return ret;
4118 }
4119
extent_cmp(void * priv,struct list_head * a,struct list_head * b)4120 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
4121 {
4122 struct extent_map *em1, *em2;
4123
4124 em1 = list_entry(a, struct extent_map, list);
4125 em2 = list_entry(b, struct extent_map, list);
4126
4127 if (em1->start < em2->start)
4128 return -1;
4129 else if (em1->start > em2->start)
4130 return 1;
4131 return 0;
4132 }
4133
log_extent_csums(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_root * log_root,const struct extent_map * em)4134 static int log_extent_csums(struct btrfs_trans_handle *trans,
4135 struct btrfs_inode *inode,
4136 struct btrfs_root *log_root,
4137 const struct extent_map *em)
4138 {
4139 u64 csum_offset;
4140 u64 csum_len;
4141 LIST_HEAD(ordered_sums);
4142 int ret = 0;
4143
4144 if (inode->flags & BTRFS_INODE_NODATASUM ||
4145 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
4146 em->block_start == EXTENT_MAP_HOLE)
4147 return 0;
4148
4149 /* If we're compressed we have to save the entire range of csums. */
4150 if (em->compress_type) {
4151 csum_offset = 0;
4152 csum_len = max(em->block_len, em->orig_block_len);
4153 } else {
4154 csum_offset = em->mod_start - em->start;
4155 csum_len = em->mod_len;
4156 }
4157
4158 /* block start is already adjusted for the file extent offset. */
4159 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
4160 em->block_start + csum_offset,
4161 em->block_start + csum_offset +
4162 csum_len - 1, &ordered_sums, 0);
4163 if (ret)
4164 return ret;
4165
4166 while (!list_empty(&ordered_sums)) {
4167 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4168 struct btrfs_ordered_sum,
4169 list);
4170 if (!ret)
4171 ret = log_csums(trans, log_root, sums);
4172 list_del(&sums->list);
4173 kfree(sums);
4174 }
4175
4176 return ret;
4177 }
4178
log_one_extent(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_root * root,const struct extent_map * em,struct btrfs_path * path,struct btrfs_log_ctx * ctx)4179 static int log_one_extent(struct btrfs_trans_handle *trans,
4180 struct btrfs_inode *inode, struct btrfs_root *root,
4181 const struct extent_map *em,
4182 struct btrfs_path *path,
4183 struct btrfs_log_ctx *ctx)
4184 {
4185 struct btrfs_root *log = root->log_root;
4186 struct btrfs_file_extent_item *fi;
4187 struct extent_buffer *leaf;
4188 struct btrfs_map_token token;
4189 struct btrfs_key key;
4190 u64 extent_offset = em->start - em->orig_start;
4191 u64 block_len;
4192 int ret;
4193 int extent_inserted = 0;
4194
4195 ret = log_extent_csums(trans, inode, log, em);
4196 if (ret)
4197 return ret;
4198
4199 ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start,
4200 em->start + em->len, NULL, 0, 1,
4201 sizeof(*fi), &extent_inserted);
4202 if (ret)
4203 return ret;
4204
4205 if (!extent_inserted) {
4206 key.objectid = btrfs_ino(inode);
4207 key.type = BTRFS_EXTENT_DATA_KEY;
4208 key.offset = em->start;
4209
4210 ret = btrfs_insert_empty_item(trans, log, path, &key,
4211 sizeof(*fi));
4212 if (ret)
4213 return ret;
4214 }
4215 leaf = path->nodes[0];
4216 btrfs_init_map_token(&token, leaf);
4217 fi = btrfs_item_ptr(leaf, path->slots[0],
4218 struct btrfs_file_extent_item);
4219
4220 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
4221 &token);
4222 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4223 btrfs_set_token_file_extent_type(leaf, fi,
4224 BTRFS_FILE_EXTENT_PREALLOC,
4225 &token);
4226 else
4227 btrfs_set_token_file_extent_type(leaf, fi,
4228 BTRFS_FILE_EXTENT_REG,
4229 &token);
4230
4231 block_len = max(em->block_len, em->orig_block_len);
4232 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4233 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4234 em->block_start,
4235 &token);
4236 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4237 &token);
4238 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4239 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4240 em->block_start -
4241 extent_offset, &token);
4242 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4243 &token);
4244 } else {
4245 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
4246 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
4247 &token);
4248 }
4249
4250 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
4251 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
4252 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
4253 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
4254 &token);
4255 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
4256 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
4257 btrfs_mark_buffer_dirty(leaf);
4258
4259 btrfs_release_path(path);
4260
4261 return ret;
4262 }
4263
4264 /*
4265 * Log all prealloc extents beyond the inode's i_size to make sure we do not
4266 * lose them after doing a full/fast fsync and replaying the log. We scan the
4267 * subvolume's root instead of iterating the inode's extent map tree because
4268 * otherwise we can log incorrect extent items based on extent map conversion.
4269 * That can happen due to the fact that extent maps are merged when they
4270 * are not in the extent map tree's list of modified extents.
4271 */
btrfs_log_prealloc_extents(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_path * path)4272 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
4273 struct btrfs_inode *inode,
4274 struct btrfs_path *path)
4275 {
4276 struct btrfs_root *root = inode->root;
4277 struct btrfs_key key;
4278 const u64 i_size = i_size_read(&inode->vfs_inode);
4279 const u64 ino = btrfs_ino(inode);
4280 struct btrfs_path *dst_path = NULL;
4281 bool dropped_extents = false;
4282 u64 truncate_offset = i_size;
4283 struct extent_buffer *leaf;
4284 int slot;
4285 int ins_nr = 0;
4286 int start_slot = 0;
4287 int ret;
4288
4289 if (!(inode->flags & BTRFS_INODE_PREALLOC))
4290 return 0;
4291
4292 key.objectid = ino;
4293 key.type = BTRFS_EXTENT_DATA_KEY;
4294 key.offset = i_size;
4295 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4296 if (ret < 0)
4297 goto out;
4298
4299 /*
4300 * We must check if there is a prealloc extent that starts before the
4301 * i_size and crosses the i_size boundary. This is to ensure later we
4302 * truncate down to the end of that extent and not to the i_size, as
4303 * otherwise we end up losing part of the prealloc extent after a log
4304 * replay and with an implicit hole if there is another prealloc extent
4305 * that starts at an offset beyond i_size.
4306 */
4307 ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
4308 if (ret < 0)
4309 goto out;
4310
4311 if (ret == 0) {
4312 struct btrfs_file_extent_item *ei;
4313
4314 leaf = path->nodes[0];
4315 slot = path->slots[0];
4316 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4317
4318 if (btrfs_file_extent_type(leaf, ei) ==
4319 BTRFS_FILE_EXTENT_PREALLOC) {
4320 u64 extent_end;
4321
4322 btrfs_item_key_to_cpu(leaf, &key, slot);
4323 extent_end = key.offset +
4324 btrfs_file_extent_num_bytes(leaf, ei);
4325
4326 if (extent_end > i_size)
4327 truncate_offset = extent_end;
4328 }
4329 } else {
4330 ret = 0;
4331 }
4332
4333 while (true) {
4334 leaf = path->nodes[0];
4335 slot = path->slots[0];
4336
4337 if (slot >= btrfs_header_nritems(leaf)) {
4338 if (ins_nr > 0) {
4339 ret = copy_items(trans, inode, dst_path, path,
4340 start_slot, ins_nr, 1, 0);
4341 if (ret < 0)
4342 goto out;
4343 ins_nr = 0;
4344 }
4345 ret = btrfs_next_leaf(root, path);
4346 if (ret < 0)
4347 goto out;
4348 if (ret > 0) {
4349 ret = 0;
4350 break;
4351 }
4352 continue;
4353 }
4354
4355 btrfs_item_key_to_cpu(leaf, &key, slot);
4356 if (key.objectid > ino)
4357 break;
4358 if (WARN_ON_ONCE(key.objectid < ino) ||
4359 key.type < BTRFS_EXTENT_DATA_KEY ||
4360 key.offset < i_size) {
4361 path->slots[0]++;
4362 continue;
4363 }
4364 if (!dropped_extents) {
4365 /*
4366 * Avoid logging extent items logged in past fsync calls
4367 * and leading to duplicate keys in the log tree.
4368 */
4369 do {
4370 ret = btrfs_truncate_inode_items(trans,
4371 root->log_root,
4372 &inode->vfs_inode,
4373 truncate_offset,
4374 BTRFS_EXTENT_DATA_KEY);
4375 } while (ret == -EAGAIN);
4376 if (ret)
4377 goto out;
4378 dropped_extents = true;
4379 }
4380 if (ins_nr == 0)
4381 start_slot = slot;
4382 ins_nr++;
4383 path->slots[0]++;
4384 if (!dst_path) {
4385 dst_path = btrfs_alloc_path();
4386 if (!dst_path) {
4387 ret = -ENOMEM;
4388 goto out;
4389 }
4390 }
4391 }
4392 if (ins_nr > 0) {
4393 ret = copy_items(trans, inode, dst_path, path,
4394 start_slot, ins_nr, 1, 0);
4395 if (ret > 0)
4396 ret = 0;
4397 }
4398 out:
4399 btrfs_release_path(path);
4400 btrfs_free_path(dst_path);
4401 return ret;
4402 }
4403
btrfs_log_changed_extents(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path,struct btrfs_log_ctx * ctx,const u64 start,const u64 end)4404 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4405 struct btrfs_root *root,
4406 struct btrfs_inode *inode,
4407 struct btrfs_path *path,
4408 struct btrfs_log_ctx *ctx,
4409 const u64 start,
4410 const u64 end)
4411 {
4412 struct extent_map *em, *n;
4413 struct list_head extents;
4414 struct extent_map_tree *tree = &inode->extent_tree;
4415 u64 test_gen;
4416 int ret = 0;
4417 int num = 0;
4418
4419 INIT_LIST_HEAD(&extents);
4420
4421 write_lock(&tree->lock);
4422 test_gen = root->fs_info->last_trans_committed;
4423
4424 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4425 /*
4426 * Skip extents outside our logging range. It's important to do
4427 * it for correctness because if we don't ignore them, we may
4428 * log them before their ordered extent completes, and therefore
4429 * we could log them without logging their respective checksums
4430 * (the checksum items are added to the csum tree at the very
4431 * end of btrfs_finish_ordered_io()). Also leave such extents
4432 * outside of our range in the list, since we may have another
4433 * ranged fsync in the near future that needs them. If an extent
4434 * outside our range corresponds to a hole, log it to avoid
4435 * leaving gaps between extents (fsck will complain when we are
4436 * not using the NO_HOLES feature).
4437 */
4438 if ((em->start > end || em->start + em->len <= start) &&
4439 em->block_start != EXTENT_MAP_HOLE)
4440 continue;
4441
4442 list_del_init(&em->list);
4443 /*
4444 * Just an arbitrary number, this can be really CPU intensive
4445 * once we start getting a lot of extents, and really once we
4446 * have a bunch of extents we just want to commit since it will
4447 * be faster.
4448 */
4449 if (++num > 32768) {
4450 list_del_init(&tree->modified_extents);
4451 ret = -EFBIG;
4452 goto process;
4453 }
4454
4455 if (em->generation <= test_gen)
4456 continue;
4457
4458 /* We log prealloc extents beyond eof later. */
4459 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
4460 em->start >= i_size_read(&inode->vfs_inode))
4461 continue;
4462
4463 /* Need a ref to keep it from getting evicted from cache */
4464 refcount_inc(&em->refs);
4465 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4466 list_add_tail(&em->list, &extents);
4467 num++;
4468 }
4469
4470 list_sort(NULL, &extents, extent_cmp);
4471 process:
4472 while (!list_empty(&extents)) {
4473 em = list_entry(extents.next, struct extent_map, list);
4474
4475 list_del_init(&em->list);
4476
4477 /*
4478 * If we had an error we just need to delete everybody from our
4479 * private list.
4480 */
4481 if (ret) {
4482 clear_em_logging(tree, em);
4483 free_extent_map(em);
4484 continue;
4485 }
4486
4487 write_unlock(&tree->lock);
4488
4489 ret = log_one_extent(trans, inode, root, em, path, ctx);
4490 write_lock(&tree->lock);
4491 clear_em_logging(tree, em);
4492 free_extent_map(em);
4493 }
4494 WARN_ON(!list_empty(&extents));
4495 write_unlock(&tree->lock);
4496
4497 btrfs_release_path(path);
4498 if (!ret)
4499 ret = btrfs_log_prealloc_extents(trans, inode, path);
4500
4501 return ret;
4502 }
4503
logged_inode_size(struct btrfs_root * log,struct btrfs_inode * inode,struct btrfs_path * path,u64 * size_ret)4504 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4505 struct btrfs_path *path, u64 *size_ret)
4506 {
4507 struct btrfs_key key;
4508 int ret;
4509
4510 key.objectid = btrfs_ino(inode);
4511 key.type = BTRFS_INODE_ITEM_KEY;
4512 key.offset = 0;
4513
4514 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4515 if (ret < 0) {
4516 return ret;
4517 } else if (ret > 0) {
4518 *size_ret = 0;
4519 } else {
4520 struct btrfs_inode_item *item;
4521
4522 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4523 struct btrfs_inode_item);
4524 *size_ret = btrfs_inode_size(path->nodes[0], item);
4525 /*
4526 * If the in-memory inode's i_size is smaller then the inode
4527 * size stored in the btree, return the inode's i_size, so
4528 * that we get a correct inode size after replaying the log
4529 * when before a power failure we had a shrinking truncate
4530 * followed by addition of a new name (rename / new hard link).
4531 * Otherwise return the inode size from the btree, to avoid
4532 * data loss when replaying a log due to previously doing a
4533 * write that expands the inode's size and logging a new name
4534 * immediately after.
4535 */
4536 if (*size_ret > inode->vfs_inode.i_size)
4537 *size_ret = inode->vfs_inode.i_size;
4538 }
4539
4540 btrfs_release_path(path);
4541 return 0;
4542 }
4543
4544 /*
4545 * At the moment we always log all xattrs. This is to figure out at log replay
4546 * time which xattrs must have their deletion replayed. If a xattr is missing
4547 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4548 * because if a xattr is deleted, the inode is fsynced and a power failure
4549 * happens, causing the log to be replayed the next time the fs is mounted,
4550 * we want the xattr to not exist anymore (same behaviour as other filesystems
4551 * with a journal, ext3/4, xfs, f2fs, etc).
4552 */
btrfs_log_all_xattrs(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path,struct btrfs_path * dst_path)4553 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4554 struct btrfs_root *root,
4555 struct btrfs_inode *inode,
4556 struct btrfs_path *path,
4557 struct btrfs_path *dst_path)
4558 {
4559 int ret;
4560 struct btrfs_key key;
4561 const u64 ino = btrfs_ino(inode);
4562 int ins_nr = 0;
4563 int start_slot = 0;
4564
4565 key.objectid = ino;
4566 key.type = BTRFS_XATTR_ITEM_KEY;
4567 key.offset = 0;
4568
4569 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4570 if (ret < 0)
4571 return ret;
4572
4573 while (true) {
4574 int slot = path->slots[0];
4575 struct extent_buffer *leaf = path->nodes[0];
4576 int nritems = btrfs_header_nritems(leaf);
4577
4578 if (slot >= nritems) {
4579 if (ins_nr > 0) {
4580 ret = copy_items(trans, inode, dst_path, path,
4581 start_slot, ins_nr, 1, 0);
4582 if (ret < 0)
4583 return ret;
4584 ins_nr = 0;
4585 }
4586 ret = btrfs_next_leaf(root, path);
4587 if (ret < 0)
4588 return ret;
4589 else if (ret > 0)
4590 break;
4591 continue;
4592 }
4593
4594 btrfs_item_key_to_cpu(leaf, &key, slot);
4595 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4596 break;
4597
4598 if (ins_nr == 0)
4599 start_slot = slot;
4600 ins_nr++;
4601 path->slots[0]++;
4602 cond_resched();
4603 }
4604 if (ins_nr > 0) {
4605 ret = copy_items(trans, inode, dst_path, path,
4606 start_slot, ins_nr, 1, 0);
4607 if (ret < 0)
4608 return ret;
4609 }
4610
4611 return 0;
4612 }
4613
4614 /*
4615 * When using the NO_HOLES feature if we punched a hole that causes the
4616 * deletion of entire leafs or all the extent items of the first leaf (the one
4617 * that contains the inode item and references) we may end up not processing
4618 * any extents, because there are no leafs with a generation matching the
4619 * current transaction that have extent items for our inode. So we need to find
4620 * if any holes exist and then log them. We also need to log holes after any
4621 * truncate operation that changes the inode's size.
4622 */
btrfs_log_holes(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_path * path)4623 static int btrfs_log_holes(struct btrfs_trans_handle *trans,
4624 struct btrfs_root *root,
4625 struct btrfs_inode *inode,
4626 struct btrfs_path *path)
4627 {
4628 struct btrfs_fs_info *fs_info = root->fs_info;
4629 struct btrfs_key key;
4630 const u64 ino = btrfs_ino(inode);
4631 const u64 i_size = i_size_read(&inode->vfs_inode);
4632 u64 prev_extent_end = 0;
4633 int ret;
4634
4635 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0)
4636 return 0;
4637
4638 key.objectid = ino;
4639 key.type = BTRFS_EXTENT_DATA_KEY;
4640 key.offset = 0;
4641
4642 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4643 if (ret < 0)
4644 return ret;
4645
4646 while (true) {
4647 struct btrfs_file_extent_item *extent;
4648 struct extent_buffer *leaf = path->nodes[0];
4649 u64 len;
4650
4651 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
4652 ret = btrfs_next_leaf(root, path);
4653 if (ret < 0)
4654 return ret;
4655 if (ret > 0) {
4656 ret = 0;
4657 break;
4658 }
4659 leaf = path->nodes[0];
4660 }
4661
4662 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4663 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
4664 break;
4665
4666 /* We have a hole, log it. */
4667 if (prev_extent_end < key.offset) {
4668 const u64 hole_len = key.offset - prev_extent_end;
4669
4670 /*
4671 * Release the path to avoid deadlocks with other code
4672 * paths that search the root while holding locks on
4673 * leafs from the log root.
4674 */
4675 btrfs_release_path(path);
4676 ret = btrfs_insert_file_extent(trans, root->log_root,
4677 ino, prev_extent_end, 0,
4678 0, hole_len, 0, hole_len,
4679 0, 0, 0);
4680 if (ret < 0)
4681 return ret;
4682
4683 /*
4684 * Search for the same key again in the root. Since it's
4685 * an extent item and we are holding the inode lock, the
4686 * key must still exist. If it doesn't just emit warning
4687 * and return an error to fall back to a transaction
4688 * commit.
4689 */
4690 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4691 if (ret < 0)
4692 return ret;
4693 if (WARN_ON(ret > 0))
4694 return -ENOENT;
4695 leaf = path->nodes[0];
4696 }
4697
4698 extent = btrfs_item_ptr(leaf, path->slots[0],
4699 struct btrfs_file_extent_item);
4700 if (btrfs_file_extent_type(leaf, extent) ==
4701 BTRFS_FILE_EXTENT_INLINE) {
4702 len = btrfs_file_extent_ram_bytes(leaf, extent);
4703 prev_extent_end = ALIGN(key.offset + len,
4704 fs_info->sectorsize);
4705 } else {
4706 len = btrfs_file_extent_num_bytes(leaf, extent);
4707 prev_extent_end = key.offset + len;
4708 }
4709
4710 path->slots[0]++;
4711 cond_resched();
4712 }
4713
4714 if (prev_extent_end < i_size) {
4715 u64 hole_len;
4716
4717 btrfs_release_path(path);
4718 hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize);
4719 ret = btrfs_insert_file_extent(trans, root->log_root,
4720 ino, prev_extent_end, 0, 0,
4721 hole_len, 0, hole_len,
4722 0, 0, 0);
4723 if (ret < 0)
4724 return ret;
4725 }
4726
4727 return 0;
4728 }
4729
4730 /*
4731 * When we are logging a new inode X, check if it doesn't have a reference that
4732 * matches the reference from some other inode Y created in a past transaction
4733 * and that was renamed in the current transaction. If we don't do this, then at
4734 * log replay time we can lose inode Y (and all its files if it's a directory):
4735 *
4736 * mkdir /mnt/x
4737 * echo "hello world" > /mnt/x/foobar
4738 * sync
4739 * mv /mnt/x /mnt/y
4740 * mkdir /mnt/x # or touch /mnt/x
4741 * xfs_io -c fsync /mnt/x
4742 * <power fail>
4743 * mount fs, trigger log replay
4744 *
4745 * After the log replay procedure, we would lose the first directory and all its
4746 * files (file foobar).
4747 * For the case where inode Y is not a directory we simply end up losing it:
4748 *
4749 * echo "123" > /mnt/foo
4750 * sync
4751 * mv /mnt/foo /mnt/bar
4752 * echo "abc" > /mnt/foo
4753 * xfs_io -c fsync /mnt/foo
4754 * <power fail>
4755 *
4756 * We also need this for cases where a snapshot entry is replaced by some other
4757 * entry (file or directory) otherwise we end up with an unreplayable log due to
4758 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4759 * if it were a regular entry:
4760 *
4761 * mkdir /mnt/x
4762 * btrfs subvolume snapshot /mnt /mnt/x/snap
4763 * btrfs subvolume delete /mnt/x/snap
4764 * rmdir /mnt/x
4765 * mkdir /mnt/x
4766 * fsync /mnt/x or fsync some new file inside it
4767 * <power fail>
4768 *
4769 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4770 * the same transaction.
4771 */
btrfs_check_ref_name_override(struct extent_buffer * eb,const int slot,const struct btrfs_key * key,struct btrfs_inode * inode,u64 * other_ino,u64 * other_parent)4772 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4773 const int slot,
4774 const struct btrfs_key *key,
4775 struct btrfs_inode *inode,
4776 u64 *other_ino, u64 *other_parent)
4777 {
4778 int ret;
4779 struct btrfs_path *search_path;
4780 char *name = NULL;
4781 u32 name_len = 0;
4782 u32 item_size = btrfs_item_size_nr(eb, slot);
4783 u32 cur_offset = 0;
4784 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4785
4786 search_path = btrfs_alloc_path();
4787 if (!search_path)
4788 return -ENOMEM;
4789 search_path->search_commit_root = 1;
4790 search_path->skip_locking = 1;
4791
4792 while (cur_offset < item_size) {
4793 u64 parent;
4794 u32 this_name_len;
4795 u32 this_len;
4796 unsigned long name_ptr;
4797 struct btrfs_dir_item *di;
4798
4799 if (key->type == BTRFS_INODE_REF_KEY) {
4800 struct btrfs_inode_ref *iref;
4801
4802 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4803 parent = key->offset;
4804 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4805 name_ptr = (unsigned long)(iref + 1);
4806 this_len = sizeof(*iref) + this_name_len;
4807 } else {
4808 struct btrfs_inode_extref *extref;
4809
4810 extref = (struct btrfs_inode_extref *)(ptr +
4811 cur_offset);
4812 parent = btrfs_inode_extref_parent(eb, extref);
4813 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4814 name_ptr = (unsigned long)&extref->name;
4815 this_len = sizeof(*extref) + this_name_len;
4816 }
4817
4818 if (this_name_len > name_len) {
4819 char *new_name;
4820
4821 new_name = krealloc(name, this_name_len, GFP_NOFS);
4822 if (!new_name) {
4823 ret = -ENOMEM;
4824 goto out;
4825 }
4826 name_len = this_name_len;
4827 name = new_name;
4828 }
4829
4830 read_extent_buffer(eb, name, name_ptr, this_name_len);
4831 di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
4832 parent, name, this_name_len, 0);
4833 if (di && !IS_ERR(di)) {
4834 struct btrfs_key di_key;
4835
4836 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4837 di, &di_key);
4838 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4839 if (di_key.objectid != key->objectid) {
4840 ret = 1;
4841 *other_ino = di_key.objectid;
4842 *other_parent = parent;
4843 } else {
4844 ret = 0;
4845 }
4846 } else {
4847 ret = -EAGAIN;
4848 }
4849 goto out;
4850 } else if (IS_ERR(di)) {
4851 ret = PTR_ERR(di);
4852 goto out;
4853 }
4854 btrfs_release_path(search_path);
4855
4856 cur_offset += this_len;
4857 }
4858 ret = 0;
4859 out:
4860 btrfs_free_path(search_path);
4861 kfree(name);
4862 return ret;
4863 }
4864
4865 struct btrfs_ino_list {
4866 u64 ino;
4867 u64 parent;
4868 struct list_head list;
4869 };
4870
log_conflicting_inodes(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_log_ctx * ctx,u64 ino,u64 parent)4871 static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
4872 struct btrfs_root *root,
4873 struct btrfs_path *path,
4874 struct btrfs_log_ctx *ctx,
4875 u64 ino, u64 parent)
4876 {
4877 struct btrfs_ino_list *ino_elem;
4878 LIST_HEAD(inode_list);
4879 int ret = 0;
4880
4881 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
4882 if (!ino_elem)
4883 return -ENOMEM;
4884 ino_elem->ino = ino;
4885 ino_elem->parent = parent;
4886 list_add_tail(&ino_elem->list, &inode_list);
4887
4888 while (!list_empty(&inode_list)) {
4889 struct btrfs_fs_info *fs_info = root->fs_info;
4890 struct btrfs_key key;
4891 struct inode *inode;
4892
4893 ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list,
4894 list);
4895 ino = ino_elem->ino;
4896 parent = ino_elem->parent;
4897 list_del(&ino_elem->list);
4898 kfree(ino_elem);
4899 if (ret)
4900 continue;
4901
4902 btrfs_release_path(path);
4903
4904 key.objectid = ino;
4905 key.type = BTRFS_INODE_ITEM_KEY;
4906 key.offset = 0;
4907 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4908 /*
4909 * If the other inode that had a conflicting dir entry was
4910 * deleted in the current transaction, we need to log its parent
4911 * directory.
4912 */
4913 if (IS_ERR(inode)) {
4914 ret = PTR_ERR(inode);
4915 if (ret == -ENOENT) {
4916 key.objectid = parent;
4917 inode = btrfs_iget(fs_info->sb, &key, root,
4918 NULL);
4919 if (IS_ERR(inode)) {
4920 ret = PTR_ERR(inode);
4921 } else {
4922 ret = btrfs_log_inode(trans, root,
4923 BTRFS_I(inode),
4924 LOG_OTHER_INODE_ALL,
4925 0, LLONG_MAX, ctx);
4926 btrfs_add_delayed_iput(inode);
4927 }
4928 }
4929 continue;
4930 }
4931 /*
4932 * If the inode was already logged skip it - otherwise we can
4933 * hit an infinite loop. Example:
4934 *
4935 * From the commit root (previous transaction) we have the
4936 * following inodes:
4937 *
4938 * inode 257 a directory
4939 * inode 258 with references "zz" and "zz_link" on inode 257
4940 * inode 259 with reference "a" on inode 257
4941 *
4942 * And in the current (uncommitted) transaction we have:
4943 *
4944 * inode 257 a directory, unchanged
4945 * inode 258 with references "a" and "a2" on inode 257
4946 * inode 259 with reference "zz_link" on inode 257
4947 * inode 261 with reference "zz" on inode 257
4948 *
4949 * When logging inode 261 the following infinite loop could
4950 * happen if we don't skip already logged inodes:
4951 *
4952 * - we detect inode 258 as a conflicting inode, with inode 261
4953 * on reference "zz", and log it;
4954 *
4955 * - we detect inode 259 as a conflicting inode, with inode 258
4956 * on reference "a", and log it;
4957 *
4958 * - we detect inode 258 as a conflicting inode, with inode 259
4959 * on reference "zz_link", and log it - again! After this we
4960 * repeat the above steps forever.
4961 */
4962 spin_lock(&BTRFS_I(inode)->lock);
4963 /*
4964 * Check the inode's logged_trans only instead of
4965 * btrfs_inode_in_log(). This is because the last_log_commit of
4966 * the inode is not updated when we only log that it exists and
4967 * it has the full sync bit set (see btrfs_log_inode()).
4968 */
4969 if (BTRFS_I(inode)->logged_trans == trans->transid) {
4970 spin_unlock(&BTRFS_I(inode)->lock);
4971 btrfs_add_delayed_iput(inode);
4972 continue;
4973 }
4974 spin_unlock(&BTRFS_I(inode)->lock);
4975 /*
4976 * We are safe logging the other inode without acquiring its
4977 * lock as long as we log with the LOG_INODE_EXISTS mode. We
4978 * are safe against concurrent renames of the other inode as
4979 * well because during a rename we pin the log and update the
4980 * log with the new name before we unpin it.
4981 */
4982 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
4983 LOG_OTHER_INODE, 0, LLONG_MAX, ctx);
4984 if (ret) {
4985 btrfs_add_delayed_iput(inode);
4986 continue;
4987 }
4988
4989 key.objectid = ino;
4990 key.type = BTRFS_INODE_REF_KEY;
4991 key.offset = 0;
4992 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4993 if (ret < 0) {
4994 btrfs_add_delayed_iput(inode);
4995 continue;
4996 }
4997
4998 while (true) {
4999 struct extent_buffer *leaf = path->nodes[0];
5000 int slot = path->slots[0];
5001 u64 other_ino = 0;
5002 u64 other_parent = 0;
5003
5004 if (slot >= btrfs_header_nritems(leaf)) {
5005 ret = btrfs_next_leaf(root, path);
5006 if (ret < 0) {
5007 break;
5008 } else if (ret > 0) {
5009 ret = 0;
5010 break;
5011 }
5012 continue;
5013 }
5014
5015 btrfs_item_key_to_cpu(leaf, &key, slot);
5016 if (key.objectid != ino ||
5017 (key.type != BTRFS_INODE_REF_KEY &&
5018 key.type != BTRFS_INODE_EXTREF_KEY)) {
5019 ret = 0;
5020 break;
5021 }
5022
5023 ret = btrfs_check_ref_name_override(leaf, slot, &key,
5024 BTRFS_I(inode), &other_ino,
5025 &other_parent);
5026 if (ret < 0)
5027 break;
5028 if (ret > 0) {
5029 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
5030 if (!ino_elem) {
5031 ret = -ENOMEM;
5032 break;
5033 }
5034 ino_elem->ino = other_ino;
5035 ino_elem->parent = other_parent;
5036 list_add_tail(&ino_elem->list, &inode_list);
5037 ret = 0;
5038 }
5039 path->slots[0]++;
5040 }
5041 btrfs_add_delayed_iput(inode);
5042 }
5043
5044 return ret;
5045 }
5046
copy_inode_items_to_log(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_key * min_key,const struct btrfs_key * max_key,struct btrfs_path * path,struct btrfs_path * dst_path,const u64 logged_isize,const bool recursive_logging,const int inode_only,struct btrfs_log_ctx * ctx,bool * need_log_inode_item)5047 static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
5048 struct btrfs_inode *inode,
5049 struct btrfs_key *min_key,
5050 const struct btrfs_key *max_key,
5051 struct btrfs_path *path,
5052 struct btrfs_path *dst_path,
5053 const u64 logged_isize,
5054 const bool recursive_logging,
5055 const int inode_only,
5056 struct btrfs_log_ctx *ctx,
5057 bool *need_log_inode_item)
5058 {
5059 const u64 i_size = i_size_read(&inode->vfs_inode);
5060 struct btrfs_root *root = inode->root;
5061 int ins_start_slot = 0;
5062 int ins_nr = 0;
5063 int ret;
5064
5065 while (1) {
5066 ret = btrfs_search_forward(root, min_key, path, trans->transid);
5067 if (ret < 0)
5068 return ret;
5069 if (ret > 0) {
5070 ret = 0;
5071 break;
5072 }
5073 again:
5074 /* Note, ins_nr might be > 0 here, cleanup outside the loop */
5075 if (min_key->objectid != max_key->objectid)
5076 break;
5077 if (min_key->type > max_key->type)
5078 break;
5079
5080 if (min_key->type == BTRFS_INODE_ITEM_KEY) {
5081 *need_log_inode_item = false;
5082 } else if (min_key->type == BTRFS_EXTENT_DATA_KEY &&
5083 min_key->offset >= i_size) {
5084 /*
5085 * Extents at and beyond eof are logged with
5086 * btrfs_log_prealloc_extents().
5087 * Only regular files have BTRFS_EXTENT_DATA_KEY keys,
5088 * and no keys greater than that, so bail out.
5089 */
5090 break;
5091 } else if ((min_key->type == BTRFS_INODE_REF_KEY ||
5092 min_key->type == BTRFS_INODE_EXTREF_KEY) &&
5093 inode->generation == trans->transid &&
5094 !recursive_logging) {
5095 u64 other_ino = 0;
5096 u64 other_parent = 0;
5097
5098 ret = btrfs_check_ref_name_override(path->nodes[0],
5099 path->slots[0], min_key, inode,
5100 &other_ino, &other_parent);
5101 if (ret < 0) {
5102 return ret;
5103 } else if (ret > 0 && ctx &&
5104 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
5105 if (ins_nr > 0) {
5106 ins_nr++;
5107 } else {
5108 ins_nr = 1;
5109 ins_start_slot = path->slots[0];
5110 }
5111 ret = copy_items(trans, inode, dst_path, path,
5112 ins_start_slot, ins_nr,
5113 inode_only, logged_isize);
5114 if (ret < 0)
5115 return ret;
5116 ins_nr = 0;
5117
5118 ret = log_conflicting_inodes(trans, root, path,
5119 ctx, other_ino, other_parent);
5120 if (ret)
5121 return ret;
5122 btrfs_release_path(path);
5123 goto next_key;
5124 }
5125 } else if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
5126 /* Skip xattrs, logged later with btrfs_log_all_xattrs() */
5127 if (ins_nr == 0)
5128 goto next_slot;
5129 ret = copy_items(trans, inode, dst_path, path,
5130 ins_start_slot,
5131 ins_nr, inode_only, logged_isize);
5132 if (ret < 0)
5133 return ret;
5134 ins_nr = 0;
5135 goto next_slot;
5136 }
5137
5138 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5139 ins_nr++;
5140 goto next_slot;
5141 } else if (!ins_nr) {
5142 ins_start_slot = path->slots[0];
5143 ins_nr = 1;
5144 goto next_slot;
5145 }
5146
5147 ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5148 ins_nr, inode_only, logged_isize);
5149 if (ret < 0)
5150 return ret;
5151 ins_nr = 1;
5152 ins_start_slot = path->slots[0];
5153 next_slot:
5154 path->slots[0]++;
5155 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
5156 btrfs_item_key_to_cpu(path->nodes[0], min_key,
5157 path->slots[0]);
5158 goto again;
5159 }
5160 if (ins_nr) {
5161 ret = copy_items(trans, inode, dst_path, path,
5162 ins_start_slot, ins_nr, inode_only,
5163 logged_isize);
5164 if (ret < 0)
5165 return ret;
5166 ins_nr = 0;
5167 }
5168 btrfs_release_path(path);
5169 next_key:
5170 if (min_key->offset < (u64)-1) {
5171 min_key->offset++;
5172 } else if (min_key->type < max_key->type) {
5173 min_key->type++;
5174 min_key->offset = 0;
5175 } else {
5176 break;
5177 }
5178 }
5179 if (ins_nr) {
5180 ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5181 ins_nr, inode_only, logged_isize);
5182 if (ret)
5183 return ret;
5184 }
5185
5186 if (inode_only == LOG_INODE_ALL && S_ISREG(inode->vfs_inode.i_mode)) {
5187 /*
5188 * Release the path because otherwise we might attempt to double
5189 * lock the same leaf with btrfs_log_prealloc_extents() below.
5190 */
5191 btrfs_release_path(path);
5192 ret = btrfs_log_prealloc_extents(trans, inode, dst_path);
5193 }
5194
5195 return ret;
5196 }
5197
5198 /* log a single inode in the tree log.
5199 * At least one parent directory for this inode must exist in the tree
5200 * or be logged already.
5201 *
5202 * Any items from this inode changed by the current transaction are copied
5203 * to the log tree. An extra reference is taken on any extents in this
5204 * file, allowing us to avoid a whole pile of corner cases around logging
5205 * blocks that have been removed from the tree.
5206 *
5207 * See LOG_INODE_ALL and related defines for a description of what inode_only
5208 * does.
5209 *
5210 * This handles both files and directories.
5211 */
btrfs_log_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,int inode_only,const loff_t start,const loff_t end,struct btrfs_log_ctx * ctx)5212 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
5213 struct btrfs_root *root, struct btrfs_inode *inode,
5214 int inode_only,
5215 const loff_t start,
5216 const loff_t end,
5217 struct btrfs_log_ctx *ctx)
5218 {
5219 struct btrfs_path *path;
5220 struct btrfs_path *dst_path;
5221 struct btrfs_key min_key;
5222 struct btrfs_key max_key;
5223 struct btrfs_root *log = root->log_root;
5224 int err = 0;
5225 int ret = 0;
5226 bool fast_search = false;
5227 u64 ino = btrfs_ino(inode);
5228 struct extent_map_tree *em_tree = &inode->extent_tree;
5229 u64 logged_isize = 0;
5230 bool need_log_inode_item = true;
5231 bool xattrs_logged = false;
5232 bool recursive_logging = false;
5233
5234 path = btrfs_alloc_path();
5235 if (!path)
5236 return -ENOMEM;
5237 dst_path = btrfs_alloc_path();
5238 if (!dst_path) {
5239 btrfs_free_path(path);
5240 return -ENOMEM;
5241 }
5242
5243 min_key.objectid = ino;
5244 min_key.type = BTRFS_INODE_ITEM_KEY;
5245 min_key.offset = 0;
5246
5247 max_key.objectid = ino;
5248
5249
5250 /* today the code can only do partial logging of directories */
5251 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5252 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5253 &inode->runtime_flags) &&
5254 inode_only >= LOG_INODE_EXISTS))
5255 max_key.type = BTRFS_XATTR_ITEM_KEY;
5256 else
5257 max_key.type = (u8)-1;
5258 max_key.offset = (u64)-1;
5259
5260 /*
5261 * Only run delayed items if we are a directory. We want to make sure
5262 * all directory indexes hit the fs/subvolume tree so we can find them
5263 * and figure out which index ranges have to be logged.
5264 *
5265 * Otherwise commit the delayed inode only if the full sync flag is set,
5266 * as we want to make sure an up to date version is in the subvolume
5267 * tree so copy_inode_items_to_log() / copy_items() can find it and copy
5268 * it to the log tree. For a non full sync, we always log the inode item
5269 * based on the in-memory struct btrfs_inode which is always up to date.
5270 */
5271 if (S_ISDIR(inode->vfs_inode.i_mode))
5272 ret = btrfs_commit_inode_delayed_items(trans, inode);
5273 else if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
5274 ret = btrfs_commit_inode_delayed_inode(inode);
5275
5276 if (ret) {
5277 btrfs_free_path(path);
5278 btrfs_free_path(dst_path);
5279 return ret;
5280 }
5281
5282 if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) {
5283 recursive_logging = true;
5284 if (inode_only == LOG_OTHER_INODE)
5285 inode_only = LOG_INODE_EXISTS;
5286 else
5287 inode_only = LOG_INODE_ALL;
5288 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
5289 } else {
5290 mutex_lock(&inode->log_mutex);
5291 }
5292
5293 /*
5294 * For symlinks, we must always log their content, which is stored in an
5295 * inline extent, otherwise we could end up with an empty symlink after
5296 * log replay, which is invalid on linux (symlink(2) returns -ENOENT if
5297 * one attempts to create an empty symlink).
5298 * We don't need to worry about flushing delalloc, because when we create
5299 * the inline extent when the symlink is created (we never have delalloc
5300 * for symlinks).
5301 */
5302 if (S_ISLNK(inode->vfs_inode.i_mode))
5303 inode_only = LOG_INODE_ALL;
5304
5305 /*
5306 * a brute force approach to making sure we get the most uptodate
5307 * copies of everything.
5308 */
5309 if (S_ISDIR(inode->vfs_inode.i_mode)) {
5310 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
5311
5312 if (inode_only == LOG_INODE_EXISTS)
5313 max_key_type = BTRFS_XATTR_ITEM_KEY;
5314 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
5315 } else {
5316 if (inode_only == LOG_INODE_EXISTS) {
5317 /*
5318 * Make sure the new inode item we write to the log has
5319 * the same isize as the current one (if it exists).
5320 * This is necessary to prevent data loss after log
5321 * replay, and also to prevent doing a wrong expanding
5322 * truncate - for e.g. create file, write 4K into offset
5323 * 0, fsync, write 4K into offset 4096, add hard link,
5324 * fsync some other file (to sync log), power fail - if
5325 * we use the inode's current i_size, after log replay
5326 * we get a 8Kb file, with the last 4Kb extent as a hole
5327 * (zeroes), as if an expanding truncate happened,
5328 * instead of getting a file of 4Kb only.
5329 */
5330 err = logged_inode_size(log, inode, path, &logged_isize);
5331 if (err)
5332 goto out_unlock;
5333 }
5334 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5335 &inode->runtime_flags)) {
5336 if (inode_only == LOG_INODE_EXISTS) {
5337 max_key.type = BTRFS_XATTR_ITEM_KEY;
5338 ret = drop_objectid_items(trans, log, path, ino,
5339 max_key.type);
5340 } else {
5341 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5342 &inode->runtime_flags);
5343 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5344 &inode->runtime_flags);
5345 while(1) {
5346 ret = btrfs_truncate_inode_items(trans,
5347 log, &inode->vfs_inode, 0, 0);
5348 if (ret != -EAGAIN)
5349 break;
5350 }
5351 }
5352 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5353 &inode->runtime_flags) ||
5354 inode_only == LOG_INODE_EXISTS) {
5355 if (inode_only == LOG_INODE_ALL)
5356 fast_search = true;
5357 max_key.type = BTRFS_XATTR_ITEM_KEY;
5358 ret = drop_objectid_items(trans, log, path, ino,
5359 max_key.type);
5360 } else {
5361 if (inode_only == LOG_INODE_ALL)
5362 fast_search = true;
5363 goto log_extents;
5364 }
5365
5366 }
5367 if (ret) {
5368 err = ret;
5369 goto out_unlock;
5370 }
5371
5372 err = copy_inode_items_to_log(trans, inode, &min_key, &max_key,
5373 path, dst_path, logged_isize,
5374 recursive_logging, inode_only, ctx,
5375 &need_log_inode_item);
5376 if (err)
5377 goto out_unlock;
5378
5379 btrfs_release_path(path);
5380 btrfs_release_path(dst_path);
5381 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
5382 if (err)
5383 goto out_unlock;
5384 xattrs_logged = true;
5385 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
5386 btrfs_release_path(path);
5387 btrfs_release_path(dst_path);
5388 err = btrfs_log_holes(trans, root, inode, path);
5389 if (err)
5390 goto out_unlock;
5391 }
5392 log_extents:
5393 btrfs_release_path(path);
5394 btrfs_release_path(dst_path);
5395 if (need_log_inode_item) {
5396 err = log_inode_item(trans, log, dst_path, inode);
5397 if (!err && !xattrs_logged) {
5398 err = btrfs_log_all_xattrs(trans, root, inode, path,
5399 dst_path);
5400 btrfs_release_path(path);
5401 }
5402 if (err)
5403 goto out_unlock;
5404 }
5405 if (fast_search) {
5406 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
5407 ctx, start, end);
5408 if (ret) {
5409 err = ret;
5410 goto out_unlock;
5411 }
5412 } else if (inode_only == LOG_INODE_ALL) {
5413 struct extent_map *em, *n;
5414
5415 write_lock(&em_tree->lock);
5416 /*
5417 * We can't just remove every em if we're called for a ranged
5418 * fsync - that is, one that doesn't cover the whole possible
5419 * file range (0 to LLONG_MAX). This is because we can have
5420 * em's that fall outside the range we're logging and therefore
5421 * their ordered operations haven't completed yet
5422 * (btrfs_finish_ordered_io() not invoked yet). This means we
5423 * didn't get their respective file extent item in the fs/subvol
5424 * tree yet, and need to let the next fast fsync (one which
5425 * consults the list of modified extent maps) find the em so
5426 * that it logs a matching file extent item and waits for the
5427 * respective ordered operation to complete (if it's still
5428 * running).
5429 *
5430 * Removing every em outside the range we're logging would make
5431 * the next fast fsync not log their matching file extent items,
5432 * therefore making us lose data after a log replay.
5433 */
5434 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
5435 list) {
5436 const u64 mod_end = em->mod_start + em->mod_len - 1;
5437
5438 if (em->mod_start >= start && mod_end <= end)
5439 list_del_init(&em->list);
5440 }
5441 write_unlock(&em_tree->lock);
5442 }
5443
5444 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
5445 ret = log_directory_changes(trans, root, inode, path, dst_path,
5446 ctx);
5447 if (ret) {
5448 err = ret;
5449 goto out_unlock;
5450 }
5451 }
5452
5453 /*
5454 * If we are logging that an ancestor inode exists as part of logging a
5455 * new name from a link or rename operation, don't mark the inode as
5456 * logged - otherwise if an explicit fsync is made against an ancestor,
5457 * the fsync considers the inode in the log and doesn't sync the log,
5458 * resulting in the ancestor missing after a power failure unless the
5459 * log was synced as part of an fsync against any other unrelated inode.
5460 * So keep it simple for this case and just don't flag the ancestors as
5461 * logged.
5462 */
5463 if (!ctx ||
5464 !(S_ISDIR(inode->vfs_inode.i_mode) && ctx->logging_new_name &&
5465 &inode->vfs_inode != ctx->inode)) {
5466 spin_lock(&inode->lock);
5467 inode->logged_trans = trans->transid;
5468 /*
5469 * Don't update last_log_commit if we logged that an inode exists
5470 * after it was loaded to memory (full_sync bit set).
5471 * This is to prevent data loss when we do a write to the inode,
5472 * then the inode gets evicted after all delalloc was flushed,
5473 * then we log it exists (due to a rename for example) and then
5474 * fsync it. This last fsync would do nothing (not logging the
5475 * extents previously written).
5476 */
5477 if (inode_only != LOG_INODE_EXISTS ||
5478 !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
5479 inode->last_log_commit = inode->last_sub_trans;
5480 spin_unlock(&inode->lock);
5481 }
5482 out_unlock:
5483 mutex_unlock(&inode->log_mutex);
5484
5485 btrfs_free_path(path);
5486 btrfs_free_path(dst_path);
5487 return err;
5488 }
5489
5490 /*
5491 * Check if we must fallback to a transaction commit when logging an inode.
5492 * This must be called after logging the inode and is used only in the context
5493 * when fsyncing an inode requires the need to log some other inode - in which
5494 * case we can't lock the i_mutex of each other inode we need to log as that
5495 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5496 * log inodes up or down in the hierarchy) or rename operations for example. So
5497 * we take the log_mutex of the inode after we have logged it and then check for
5498 * its last_unlink_trans value - this is safe because any task setting
5499 * last_unlink_trans must take the log_mutex and it must do this before it does
5500 * the actual unlink operation, so if we do this check before a concurrent task
5501 * sets last_unlink_trans it means we've logged a consistent version/state of
5502 * all the inode items, otherwise we are not sure and must do a transaction
5503 * commit (the concurrent task might have only updated last_unlink_trans before
5504 * we logged the inode or it might have also done the unlink).
5505 */
btrfs_must_commit_transaction(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)5506 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
5507 struct btrfs_inode *inode)
5508 {
5509 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5510 bool ret = false;
5511
5512 mutex_lock(&inode->log_mutex);
5513 if (inode->last_unlink_trans > fs_info->last_trans_committed) {
5514 /*
5515 * Make sure any commits to the log are forced to be full
5516 * commits.
5517 */
5518 btrfs_set_log_full_commit(trans);
5519 ret = true;
5520 }
5521 mutex_unlock(&inode->log_mutex);
5522
5523 return ret;
5524 }
5525
5526 /*
5527 * follow the dentry parent pointers up the chain and see if any
5528 * of the directories in it require a full commit before they can
5529 * be logged. Returns zero if nothing special needs to be done or 1 if
5530 * a full commit is required.
5531 */
check_parent_dirs_for_sync(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct dentry * parent,struct super_block * sb,u64 last_committed)5532 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
5533 struct btrfs_inode *inode,
5534 struct dentry *parent,
5535 struct super_block *sb,
5536 u64 last_committed)
5537 {
5538 int ret = 0;
5539 struct dentry *old_parent = NULL;
5540
5541 /*
5542 * for regular files, if its inode is already on disk, we don't
5543 * have to worry about the parents at all. This is because
5544 * we can use the last_unlink_trans field to record renames
5545 * and other fun in this file.
5546 */
5547 if (S_ISREG(inode->vfs_inode.i_mode) &&
5548 inode->generation <= last_committed &&
5549 inode->last_unlink_trans <= last_committed)
5550 goto out;
5551
5552 if (!S_ISDIR(inode->vfs_inode.i_mode)) {
5553 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5554 goto out;
5555 inode = BTRFS_I(d_inode(parent));
5556 }
5557
5558 while (1) {
5559 if (btrfs_must_commit_transaction(trans, inode)) {
5560 ret = 1;
5561 break;
5562 }
5563
5564 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5565 break;
5566
5567 if (IS_ROOT(parent)) {
5568 inode = BTRFS_I(d_inode(parent));
5569 if (btrfs_must_commit_transaction(trans, inode))
5570 ret = 1;
5571 break;
5572 }
5573
5574 parent = dget_parent(parent);
5575 dput(old_parent);
5576 old_parent = parent;
5577 inode = BTRFS_I(d_inode(parent));
5578
5579 }
5580 dput(old_parent);
5581 out:
5582 return ret;
5583 }
5584
5585 struct btrfs_dir_list {
5586 u64 ino;
5587 struct list_head list;
5588 };
5589
5590 /*
5591 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5592 * details about the why it is needed.
5593 * This is a recursive operation - if an existing dentry corresponds to a
5594 * directory, that directory's new entries are logged too (same behaviour as
5595 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5596 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5597 * complains about the following circular lock dependency / possible deadlock:
5598 *
5599 * CPU0 CPU1
5600 * ---- ----
5601 * lock(&type->i_mutex_dir_key#3/2);
5602 * lock(sb_internal#2);
5603 * lock(&type->i_mutex_dir_key#3/2);
5604 * lock(&sb->s_type->i_mutex_key#14);
5605 *
5606 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5607 * sb_start_intwrite() in btrfs_start_transaction().
5608 * Not locking i_mutex of the inodes is still safe because:
5609 *
5610 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5611 * that while logging the inode new references (names) are added or removed
5612 * from the inode, leaving the logged inode item with a link count that does
5613 * not match the number of logged inode reference items. This is fine because
5614 * at log replay time we compute the real number of links and correct the
5615 * link count in the inode item (see replay_one_buffer() and
5616 * link_to_fixup_dir());
5617 *
5618 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5619 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5620 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5621 * has a size that doesn't match the sum of the lengths of all the logged
5622 * names. This does not result in a problem because if a dir_item key is
5623 * logged but its matching dir_index key is not logged, at log replay time we
5624 * don't use it to replay the respective name (see replay_one_name()). On the
5625 * other hand if only the dir_index key ends up being logged, the respective
5626 * name is added to the fs/subvol tree with both the dir_item and dir_index
5627 * keys created (see replay_one_name()).
5628 * The directory's inode item with a wrong i_size is not a problem as well,
5629 * since we don't use it at log replay time to set the i_size in the inode
5630 * item of the fs/subvol tree (see overwrite_item()).
5631 */
log_new_dir_dentries(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * start_inode,struct btrfs_log_ctx * ctx)5632 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5633 struct btrfs_root *root,
5634 struct btrfs_inode *start_inode,
5635 struct btrfs_log_ctx *ctx)
5636 {
5637 struct btrfs_fs_info *fs_info = root->fs_info;
5638 struct btrfs_root *log = root->log_root;
5639 struct btrfs_path *path;
5640 LIST_HEAD(dir_list);
5641 struct btrfs_dir_list *dir_elem;
5642 int ret = 0;
5643
5644 path = btrfs_alloc_path();
5645 if (!path)
5646 return -ENOMEM;
5647
5648 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5649 if (!dir_elem) {
5650 btrfs_free_path(path);
5651 return -ENOMEM;
5652 }
5653 dir_elem->ino = btrfs_ino(start_inode);
5654 list_add_tail(&dir_elem->list, &dir_list);
5655
5656 while (!list_empty(&dir_list)) {
5657 struct extent_buffer *leaf;
5658 struct btrfs_key min_key;
5659 int nritems;
5660 int i;
5661
5662 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5663 list);
5664 if (ret)
5665 goto next_dir_inode;
5666
5667 min_key.objectid = dir_elem->ino;
5668 min_key.type = BTRFS_DIR_ITEM_KEY;
5669 min_key.offset = 0;
5670 again:
5671 btrfs_release_path(path);
5672 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5673 if (ret < 0) {
5674 goto next_dir_inode;
5675 } else if (ret > 0) {
5676 ret = 0;
5677 goto next_dir_inode;
5678 }
5679
5680 process_leaf:
5681 leaf = path->nodes[0];
5682 nritems = btrfs_header_nritems(leaf);
5683 for (i = path->slots[0]; i < nritems; i++) {
5684 struct btrfs_dir_item *di;
5685 struct btrfs_key di_key;
5686 struct inode *di_inode;
5687 struct btrfs_dir_list *new_dir_elem;
5688 int log_mode = LOG_INODE_EXISTS;
5689 int type;
5690
5691 btrfs_item_key_to_cpu(leaf, &min_key, i);
5692 if (min_key.objectid != dir_elem->ino ||
5693 min_key.type != BTRFS_DIR_ITEM_KEY)
5694 goto next_dir_inode;
5695
5696 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5697 type = btrfs_dir_type(leaf, di);
5698 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5699 type != BTRFS_FT_DIR)
5700 continue;
5701 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5702 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5703 continue;
5704
5705 btrfs_release_path(path);
5706 di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
5707 if (IS_ERR(di_inode)) {
5708 ret = PTR_ERR(di_inode);
5709 goto next_dir_inode;
5710 }
5711
5712 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
5713 btrfs_add_delayed_iput(di_inode);
5714 break;
5715 }
5716
5717 ctx->log_new_dentries = false;
5718 if (type == BTRFS_FT_DIR)
5719 log_mode = LOG_INODE_ALL;
5720 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
5721 log_mode, 0, LLONG_MAX, ctx);
5722 if (!ret &&
5723 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
5724 ret = 1;
5725 btrfs_add_delayed_iput(di_inode);
5726 if (ret)
5727 goto next_dir_inode;
5728 if (ctx->log_new_dentries) {
5729 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5730 GFP_NOFS);
5731 if (!new_dir_elem) {
5732 ret = -ENOMEM;
5733 goto next_dir_inode;
5734 }
5735 new_dir_elem->ino = di_key.objectid;
5736 list_add_tail(&new_dir_elem->list, &dir_list);
5737 }
5738 break;
5739 }
5740 if (i == nritems) {
5741 ret = btrfs_next_leaf(log, path);
5742 if (ret < 0) {
5743 goto next_dir_inode;
5744 } else if (ret > 0) {
5745 ret = 0;
5746 goto next_dir_inode;
5747 }
5748 goto process_leaf;
5749 }
5750 if (min_key.offset < (u64)-1) {
5751 min_key.offset++;
5752 goto again;
5753 }
5754 next_dir_inode:
5755 list_del(&dir_elem->list);
5756 kfree(dir_elem);
5757 }
5758
5759 btrfs_free_path(path);
5760 return ret;
5761 }
5762
btrfs_log_all_parents(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_log_ctx * ctx)5763 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5764 struct btrfs_inode *inode,
5765 struct btrfs_log_ctx *ctx)
5766 {
5767 struct btrfs_fs_info *fs_info = trans->fs_info;
5768 int ret;
5769 struct btrfs_path *path;
5770 struct btrfs_key key;
5771 struct btrfs_root *root = inode->root;
5772 const u64 ino = btrfs_ino(inode);
5773
5774 path = btrfs_alloc_path();
5775 if (!path)
5776 return -ENOMEM;
5777 path->skip_locking = 1;
5778 path->search_commit_root = 1;
5779
5780 key.objectid = ino;
5781 key.type = BTRFS_INODE_REF_KEY;
5782 key.offset = 0;
5783 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5784 if (ret < 0)
5785 goto out;
5786
5787 while (true) {
5788 struct extent_buffer *leaf = path->nodes[0];
5789 int slot = path->slots[0];
5790 u32 cur_offset = 0;
5791 u32 item_size;
5792 unsigned long ptr;
5793
5794 if (slot >= btrfs_header_nritems(leaf)) {
5795 ret = btrfs_next_leaf(root, path);
5796 if (ret < 0)
5797 goto out;
5798 else if (ret > 0)
5799 break;
5800 continue;
5801 }
5802
5803 btrfs_item_key_to_cpu(leaf, &key, slot);
5804 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5805 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5806 break;
5807
5808 item_size = btrfs_item_size_nr(leaf, slot);
5809 ptr = btrfs_item_ptr_offset(leaf, slot);
5810 while (cur_offset < item_size) {
5811 struct btrfs_key inode_key;
5812 struct inode *dir_inode;
5813
5814 inode_key.type = BTRFS_INODE_ITEM_KEY;
5815 inode_key.offset = 0;
5816
5817 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5818 struct btrfs_inode_extref *extref;
5819
5820 extref = (struct btrfs_inode_extref *)
5821 (ptr + cur_offset);
5822 inode_key.objectid = btrfs_inode_extref_parent(
5823 leaf, extref);
5824 cur_offset += sizeof(*extref);
5825 cur_offset += btrfs_inode_extref_name_len(leaf,
5826 extref);
5827 } else {
5828 inode_key.objectid = key.offset;
5829 cur_offset = item_size;
5830 }
5831
5832 dir_inode = btrfs_iget(fs_info->sb, &inode_key,
5833 root, NULL);
5834 /*
5835 * If the parent inode was deleted, return an error to
5836 * fallback to a transaction commit. This is to prevent
5837 * getting an inode that was moved from one parent A to
5838 * a parent B, got its former parent A deleted and then
5839 * it got fsync'ed, from existing at both parents after
5840 * a log replay (and the old parent still existing).
5841 * Example:
5842 *
5843 * mkdir /mnt/A
5844 * mkdir /mnt/B
5845 * touch /mnt/B/bar
5846 * sync
5847 * mv /mnt/B/bar /mnt/A/bar
5848 * mv -T /mnt/A /mnt/B
5849 * fsync /mnt/B/bar
5850 * <power fail>
5851 *
5852 * If we ignore the old parent B which got deleted,
5853 * after a log replay we would have file bar linked
5854 * at both parents and the old parent B would still
5855 * exist.
5856 */
5857 if (IS_ERR(dir_inode)) {
5858 ret = PTR_ERR(dir_inode);
5859 goto out;
5860 }
5861
5862 if (ctx)
5863 ctx->log_new_dentries = false;
5864 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode),
5865 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
5866 if (!ret &&
5867 btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode)))
5868 ret = 1;
5869 if (!ret && ctx && ctx->log_new_dentries)
5870 ret = log_new_dir_dentries(trans, root,
5871 BTRFS_I(dir_inode), ctx);
5872 btrfs_add_delayed_iput(dir_inode);
5873 if (ret)
5874 goto out;
5875 }
5876 path->slots[0]++;
5877 }
5878 ret = 0;
5879 out:
5880 btrfs_free_path(path);
5881 return ret;
5882 }
5883
log_new_ancestors(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_log_ctx * ctx)5884 static int log_new_ancestors(struct btrfs_trans_handle *trans,
5885 struct btrfs_root *root,
5886 struct btrfs_path *path,
5887 struct btrfs_log_ctx *ctx)
5888 {
5889 struct btrfs_key found_key;
5890
5891 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
5892
5893 while (true) {
5894 struct btrfs_fs_info *fs_info = root->fs_info;
5895 const u64 last_committed = fs_info->last_trans_committed;
5896 struct extent_buffer *leaf = path->nodes[0];
5897 int slot = path->slots[0];
5898 struct btrfs_key search_key;
5899 struct inode *inode;
5900 int ret = 0;
5901
5902 btrfs_release_path(path);
5903
5904 search_key.objectid = found_key.offset;
5905 search_key.type = BTRFS_INODE_ITEM_KEY;
5906 search_key.offset = 0;
5907 inode = btrfs_iget(fs_info->sb, &search_key, root, NULL);
5908 if (IS_ERR(inode))
5909 return PTR_ERR(inode);
5910
5911 if (BTRFS_I(inode)->generation > last_committed)
5912 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
5913 LOG_INODE_EXISTS,
5914 0, LLONG_MAX, ctx);
5915 btrfs_add_delayed_iput(inode);
5916 if (ret)
5917 return ret;
5918
5919 if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID)
5920 break;
5921
5922 search_key.type = BTRFS_INODE_REF_KEY;
5923 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
5924 if (ret < 0)
5925 return ret;
5926
5927 leaf = path->nodes[0];
5928 slot = path->slots[0];
5929 if (slot >= btrfs_header_nritems(leaf)) {
5930 ret = btrfs_next_leaf(root, path);
5931 if (ret < 0)
5932 return ret;
5933 else if (ret > 0)
5934 return -ENOENT;
5935 leaf = path->nodes[0];
5936 slot = path->slots[0];
5937 }
5938
5939 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5940 if (found_key.objectid != search_key.objectid ||
5941 found_key.type != BTRFS_INODE_REF_KEY)
5942 return -ENOENT;
5943 }
5944 return 0;
5945 }
5946
log_new_ancestors_fast(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct dentry * parent,struct btrfs_log_ctx * ctx)5947 static int log_new_ancestors_fast(struct btrfs_trans_handle *trans,
5948 struct btrfs_inode *inode,
5949 struct dentry *parent,
5950 struct btrfs_log_ctx *ctx)
5951 {
5952 struct btrfs_root *root = inode->root;
5953 struct btrfs_fs_info *fs_info = root->fs_info;
5954 struct dentry *old_parent = NULL;
5955 struct super_block *sb = inode->vfs_inode.i_sb;
5956 int ret = 0;
5957
5958 while (true) {
5959 if (!parent || d_really_is_negative(parent) ||
5960 sb != parent->d_sb)
5961 break;
5962
5963 inode = BTRFS_I(d_inode(parent));
5964 if (root != inode->root)
5965 break;
5966
5967 if (inode->generation > fs_info->last_trans_committed) {
5968 ret = btrfs_log_inode(trans, root, inode,
5969 LOG_INODE_EXISTS, 0, LLONG_MAX, ctx);
5970 if (ret)
5971 break;
5972 }
5973 if (IS_ROOT(parent))
5974 break;
5975
5976 parent = dget_parent(parent);
5977 dput(old_parent);
5978 old_parent = parent;
5979 }
5980 dput(old_parent);
5981
5982 return ret;
5983 }
5984
log_all_new_ancestors(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct dentry * parent,struct btrfs_log_ctx * ctx)5985 static int log_all_new_ancestors(struct btrfs_trans_handle *trans,
5986 struct btrfs_inode *inode,
5987 struct dentry *parent,
5988 struct btrfs_log_ctx *ctx)
5989 {
5990 struct btrfs_root *root = inode->root;
5991 const u64 ino = btrfs_ino(inode);
5992 struct btrfs_path *path;
5993 struct btrfs_key search_key;
5994 int ret;
5995
5996 /*
5997 * For a single hard link case, go through a fast path that does not
5998 * need to iterate the fs/subvolume tree.
5999 */
6000 if (inode->vfs_inode.i_nlink < 2)
6001 return log_new_ancestors_fast(trans, inode, parent, ctx);
6002
6003 path = btrfs_alloc_path();
6004 if (!path)
6005 return -ENOMEM;
6006
6007 search_key.objectid = ino;
6008 search_key.type = BTRFS_INODE_REF_KEY;
6009 search_key.offset = 0;
6010 again:
6011 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
6012 if (ret < 0)
6013 goto out;
6014 if (ret == 0)
6015 path->slots[0]++;
6016
6017 while (true) {
6018 struct extent_buffer *leaf = path->nodes[0];
6019 int slot = path->slots[0];
6020 struct btrfs_key found_key;
6021
6022 if (slot >= btrfs_header_nritems(leaf)) {
6023 ret = btrfs_next_leaf(root, path);
6024 if (ret < 0)
6025 goto out;
6026 else if (ret > 0)
6027 break;
6028 continue;
6029 }
6030
6031 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6032 if (found_key.objectid != ino ||
6033 found_key.type > BTRFS_INODE_EXTREF_KEY)
6034 break;
6035
6036 /*
6037 * Don't deal with extended references because they are rare
6038 * cases and too complex to deal with (we would need to keep
6039 * track of which subitem we are processing for each item in
6040 * this loop, etc). So just return some error to fallback to
6041 * a transaction commit.
6042 */
6043 if (found_key.type == BTRFS_INODE_EXTREF_KEY) {
6044 ret = -EMLINK;
6045 goto out;
6046 }
6047
6048 /*
6049 * Logging ancestors needs to do more searches on the fs/subvol
6050 * tree, so it releases the path as needed to avoid deadlocks.
6051 * Keep track of the last inode ref key and resume from that key
6052 * after logging all new ancestors for the current hard link.
6053 */
6054 memcpy(&search_key, &found_key, sizeof(search_key));
6055
6056 ret = log_new_ancestors(trans, root, path, ctx);
6057 if (ret)
6058 goto out;
6059 btrfs_release_path(path);
6060 goto again;
6061 }
6062 ret = 0;
6063 out:
6064 btrfs_free_path(path);
6065 return ret;
6066 }
6067
6068 /*
6069 * helper function around btrfs_log_inode to make sure newly created
6070 * parent directories also end up in the log. A minimal inode and backref
6071 * only logging is done of any parent directories that are older than
6072 * the last committed transaction
6073 */
btrfs_log_inode_parent(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct dentry * parent,const loff_t start,const loff_t end,int inode_only,struct btrfs_log_ctx * ctx)6074 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
6075 struct btrfs_inode *inode,
6076 struct dentry *parent,
6077 const loff_t start,
6078 const loff_t end,
6079 int inode_only,
6080 struct btrfs_log_ctx *ctx)
6081 {
6082 struct btrfs_root *root = inode->root;
6083 struct btrfs_fs_info *fs_info = root->fs_info;
6084 struct super_block *sb;
6085 int ret = 0;
6086 u64 last_committed = fs_info->last_trans_committed;
6087 bool log_dentries = false;
6088
6089 sb = inode->vfs_inode.i_sb;
6090
6091 if (btrfs_test_opt(fs_info, NOTREELOG)) {
6092 ret = 1;
6093 goto end_no_trans;
6094 }
6095
6096 /*
6097 * The prev transaction commit doesn't complete, we need do
6098 * full commit by ourselves.
6099 */
6100 if (fs_info->last_trans_log_full_commit >
6101 fs_info->last_trans_committed) {
6102 ret = 1;
6103 goto end_no_trans;
6104 }
6105
6106 if (btrfs_root_refs(&root->root_item) == 0) {
6107 ret = 1;
6108 goto end_no_trans;
6109 }
6110
6111 ret = check_parent_dirs_for_sync(trans, inode, parent, sb,
6112 last_committed);
6113 if (ret)
6114 goto end_no_trans;
6115
6116 /*
6117 * Skip already logged inodes or inodes corresponding to tmpfiles
6118 * (since logging them is pointless, a link count of 0 means they
6119 * will never be accessible).
6120 */
6121 if (btrfs_inode_in_log(inode, trans->transid) ||
6122 inode->vfs_inode.i_nlink == 0) {
6123 ret = BTRFS_NO_LOG_SYNC;
6124 goto end_no_trans;
6125 }
6126
6127 ret = start_log_trans(trans, root, ctx);
6128 if (ret)
6129 goto end_no_trans;
6130
6131 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
6132 if (ret)
6133 goto end_trans;
6134
6135 /*
6136 * for regular files, if its inode is already on disk, we don't
6137 * have to worry about the parents at all. This is because
6138 * we can use the last_unlink_trans field to record renames
6139 * and other fun in this file.
6140 */
6141 if (S_ISREG(inode->vfs_inode.i_mode) &&
6142 inode->generation <= last_committed &&
6143 inode->last_unlink_trans <= last_committed) {
6144 ret = 0;
6145 goto end_trans;
6146 }
6147
6148 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries)
6149 log_dentries = true;
6150
6151 /*
6152 * On unlink we must make sure all our current and old parent directory
6153 * inodes are fully logged. This is to prevent leaving dangling
6154 * directory index entries in directories that were our parents but are
6155 * not anymore. Not doing this results in old parent directory being
6156 * impossible to delete after log replay (rmdir will always fail with
6157 * error -ENOTEMPTY).
6158 *
6159 * Example 1:
6160 *
6161 * mkdir testdir
6162 * touch testdir/foo
6163 * ln testdir/foo testdir/bar
6164 * sync
6165 * unlink testdir/bar
6166 * xfs_io -c fsync testdir/foo
6167 * <power failure>
6168 * mount fs, triggers log replay
6169 *
6170 * If we don't log the parent directory (testdir), after log replay the
6171 * directory still has an entry pointing to the file inode using the bar
6172 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
6173 * the file inode has a link count of 1.
6174 *
6175 * Example 2:
6176 *
6177 * mkdir testdir
6178 * touch foo
6179 * ln foo testdir/foo2
6180 * ln foo testdir/foo3
6181 * sync
6182 * unlink testdir/foo3
6183 * xfs_io -c fsync foo
6184 * <power failure>
6185 * mount fs, triggers log replay
6186 *
6187 * Similar as the first example, after log replay the parent directory
6188 * testdir still has an entry pointing to the inode file with name foo3
6189 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
6190 * and has a link count of 2.
6191 */
6192 if (inode->last_unlink_trans > last_committed) {
6193 ret = btrfs_log_all_parents(trans, inode, ctx);
6194 if (ret)
6195 goto end_trans;
6196 }
6197
6198 ret = log_all_new_ancestors(trans, inode, parent, ctx);
6199 if (ret)
6200 goto end_trans;
6201
6202 if (log_dentries)
6203 ret = log_new_dir_dentries(trans, root, inode, ctx);
6204 else
6205 ret = 0;
6206 end_trans:
6207 if (ret < 0) {
6208 btrfs_set_log_full_commit(trans);
6209 ret = 1;
6210 }
6211
6212 if (ret)
6213 btrfs_remove_log_ctx(root, ctx);
6214 btrfs_end_log_trans(root);
6215 end_no_trans:
6216 return ret;
6217 }
6218
6219 /*
6220 * it is not safe to log dentry if the chunk root has added new
6221 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
6222 * If this returns 1, you must commit the transaction to safely get your
6223 * data on disk.
6224 */
btrfs_log_dentry_safe(struct btrfs_trans_handle * trans,struct dentry * dentry,const loff_t start,const loff_t end,struct btrfs_log_ctx * ctx)6225 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
6226 struct dentry *dentry,
6227 const loff_t start,
6228 const loff_t end,
6229 struct btrfs_log_ctx *ctx)
6230 {
6231 struct dentry *parent = dget_parent(dentry);
6232 int ret;
6233
6234 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent,
6235 start, end, LOG_INODE_ALL, ctx);
6236 dput(parent);
6237
6238 return ret;
6239 }
6240
6241 /*
6242 * should be called during mount to recover any replay any log trees
6243 * from the FS
6244 */
btrfs_recover_log_trees(struct btrfs_root * log_root_tree)6245 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
6246 {
6247 int ret;
6248 struct btrfs_path *path;
6249 struct btrfs_trans_handle *trans;
6250 struct btrfs_key key;
6251 struct btrfs_key found_key;
6252 struct btrfs_key tmp_key;
6253 struct btrfs_root *log;
6254 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
6255 struct walk_control wc = {
6256 .process_func = process_one_buffer,
6257 .stage = LOG_WALK_PIN_ONLY,
6258 };
6259
6260 path = btrfs_alloc_path();
6261 if (!path)
6262 return -ENOMEM;
6263
6264 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6265
6266 trans = btrfs_start_transaction(fs_info->tree_root, 0);
6267 if (IS_ERR(trans)) {
6268 ret = PTR_ERR(trans);
6269 goto error;
6270 }
6271
6272 wc.trans = trans;
6273 wc.pin = 1;
6274
6275 ret = walk_log_tree(trans, log_root_tree, &wc);
6276 if (ret) {
6277 btrfs_handle_fs_error(fs_info, ret,
6278 "Failed to pin buffers while recovering log root tree.");
6279 goto error;
6280 }
6281
6282 again:
6283 key.objectid = BTRFS_TREE_LOG_OBJECTID;
6284 key.offset = (u64)-1;
6285 key.type = BTRFS_ROOT_ITEM_KEY;
6286
6287 while (1) {
6288 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
6289
6290 if (ret < 0) {
6291 btrfs_handle_fs_error(fs_info, ret,
6292 "Couldn't find tree log root.");
6293 goto error;
6294 }
6295 if (ret > 0) {
6296 if (path->slots[0] == 0)
6297 break;
6298 path->slots[0]--;
6299 }
6300 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
6301 path->slots[0]);
6302 btrfs_release_path(path);
6303 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
6304 break;
6305
6306 log = btrfs_read_fs_root(log_root_tree, &found_key);
6307 if (IS_ERR(log)) {
6308 ret = PTR_ERR(log);
6309 btrfs_handle_fs_error(fs_info, ret,
6310 "Couldn't read tree log root.");
6311 goto error;
6312 }
6313
6314 tmp_key.objectid = found_key.offset;
6315 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
6316 tmp_key.offset = (u64)-1;
6317
6318 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
6319 if (IS_ERR(wc.replay_dest)) {
6320 ret = PTR_ERR(wc.replay_dest);
6321
6322 /*
6323 * We didn't find the subvol, likely because it was
6324 * deleted. This is ok, simply skip this log and go to
6325 * the next one.
6326 *
6327 * We need to exclude the root because we can't have
6328 * other log replays overwriting this log as we'll read
6329 * it back in a few more times. This will keep our
6330 * block from being modified, and we'll just bail for
6331 * each subsequent pass.
6332 */
6333 if (ret == -ENOENT)
6334 ret = btrfs_pin_extent_for_log_replay(fs_info,
6335 log->node->start,
6336 log->node->len);
6337 free_extent_buffer(log->node);
6338 free_extent_buffer(log->commit_root);
6339 kfree(log);
6340
6341 if (!ret)
6342 goto next;
6343 btrfs_handle_fs_error(fs_info, ret,
6344 "Couldn't read target root for tree log recovery.");
6345 goto error;
6346 }
6347
6348 wc.replay_dest->log_root = log;
6349 btrfs_record_root_in_trans(trans, wc.replay_dest);
6350 ret = walk_log_tree(trans, log, &wc);
6351
6352 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6353 ret = fixup_inode_link_counts(trans, wc.replay_dest,
6354 path);
6355 }
6356
6357 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6358 struct btrfs_root *root = wc.replay_dest;
6359
6360 btrfs_release_path(path);
6361
6362 /*
6363 * We have just replayed everything, and the highest
6364 * objectid of fs roots probably has changed in case
6365 * some inode_item's got replayed.
6366 *
6367 * root->objectid_mutex is not acquired as log replay
6368 * could only happen during mount.
6369 */
6370 ret = btrfs_find_highest_objectid(root,
6371 &root->highest_objectid);
6372 }
6373
6374 wc.replay_dest->log_root = NULL;
6375 free_extent_buffer(log->node);
6376 free_extent_buffer(log->commit_root);
6377 kfree(log);
6378
6379 if (ret)
6380 goto error;
6381 next:
6382 if (found_key.offset == 0)
6383 break;
6384 key.offset = found_key.offset - 1;
6385 }
6386 btrfs_release_path(path);
6387
6388 /* step one is to pin it all, step two is to replay just inodes */
6389 if (wc.pin) {
6390 wc.pin = 0;
6391 wc.process_func = replay_one_buffer;
6392 wc.stage = LOG_WALK_REPLAY_INODES;
6393 goto again;
6394 }
6395 /* step three is to replay everything */
6396 if (wc.stage < LOG_WALK_REPLAY_ALL) {
6397 wc.stage++;
6398 goto again;
6399 }
6400
6401 btrfs_free_path(path);
6402
6403 /* step 4: commit the transaction, which also unpins the blocks */
6404 ret = btrfs_commit_transaction(trans);
6405 if (ret)
6406 return ret;
6407
6408 free_extent_buffer(log_root_tree->node);
6409 log_root_tree->log_root = NULL;
6410 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6411 kfree(log_root_tree);
6412
6413 return 0;
6414 error:
6415 if (wc.trans)
6416 btrfs_end_transaction(wc.trans);
6417 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6418 btrfs_free_path(path);
6419 return ret;
6420 }
6421
6422 /*
6423 * there are some corner cases where we want to force a full
6424 * commit instead of allowing a directory to be logged.
6425 *
6426 * They revolve around files there were unlinked from the directory, and
6427 * this function updates the parent directory so that a full commit is
6428 * properly done if it is fsync'd later after the unlinks are done.
6429 *
6430 * Must be called before the unlink operations (updates to the subvolume tree,
6431 * inodes, etc) are done.
6432 */
btrfs_record_unlink_dir(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct btrfs_inode * inode,int for_rename)6433 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
6434 struct btrfs_inode *dir, struct btrfs_inode *inode,
6435 int for_rename)
6436 {
6437 /*
6438 * when we're logging a file, if it hasn't been renamed
6439 * or unlinked, and its inode is fully committed on disk,
6440 * we don't have to worry about walking up the directory chain
6441 * to log its parents.
6442 *
6443 * So, we use the last_unlink_trans field to put this transid
6444 * into the file. When the file is logged we check it and
6445 * don't log the parents if the file is fully on disk.
6446 */
6447 mutex_lock(&inode->log_mutex);
6448 inode->last_unlink_trans = trans->transid;
6449 mutex_unlock(&inode->log_mutex);
6450
6451 /*
6452 * if this directory was already logged any new
6453 * names for this file/dir will get recorded
6454 */
6455 if (dir->logged_trans == trans->transid)
6456 return;
6457
6458 /*
6459 * if the inode we're about to unlink was logged,
6460 * the log will be properly updated for any new names
6461 */
6462 if (inode->logged_trans == trans->transid)
6463 return;
6464
6465 /*
6466 * when renaming files across directories, if the directory
6467 * there we're unlinking from gets fsync'd later on, there's
6468 * no way to find the destination directory later and fsync it
6469 * properly. So, we have to be conservative and force commits
6470 * so the new name gets discovered.
6471 */
6472 if (for_rename)
6473 goto record;
6474
6475 /* we can safely do the unlink without any special recording */
6476 return;
6477
6478 record:
6479 mutex_lock(&dir->log_mutex);
6480 dir->last_unlink_trans = trans->transid;
6481 mutex_unlock(&dir->log_mutex);
6482 }
6483
6484 /*
6485 * Make sure that if someone attempts to fsync the parent directory of a deleted
6486 * snapshot, it ends up triggering a transaction commit. This is to guarantee
6487 * that after replaying the log tree of the parent directory's root we will not
6488 * see the snapshot anymore and at log replay time we will not see any log tree
6489 * corresponding to the deleted snapshot's root, which could lead to replaying
6490 * it after replaying the log tree of the parent directory (which would replay
6491 * the snapshot delete operation).
6492 *
6493 * Must be called before the actual snapshot destroy operation (updates to the
6494 * parent root and tree of tree roots trees, etc) are done.
6495 */
btrfs_record_snapshot_destroy(struct btrfs_trans_handle * trans,struct btrfs_inode * dir)6496 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
6497 struct btrfs_inode *dir)
6498 {
6499 mutex_lock(&dir->log_mutex);
6500 dir->last_unlink_trans = trans->transid;
6501 mutex_unlock(&dir->log_mutex);
6502 }
6503
6504 /*
6505 * Call this after adding a new name for a file and it will properly
6506 * update the log to reflect the new name.
6507 */
btrfs_log_new_name(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_inode * old_dir,struct dentry * parent)6508 void btrfs_log_new_name(struct btrfs_trans_handle *trans,
6509 struct btrfs_inode *inode, struct btrfs_inode *old_dir,
6510 struct dentry *parent)
6511 {
6512 struct btrfs_log_ctx ctx;
6513
6514 /*
6515 * this will force the logging code to walk the dentry chain
6516 * up for the file
6517 */
6518 if (!S_ISDIR(inode->vfs_inode.i_mode))
6519 inode->last_unlink_trans = trans->transid;
6520
6521 /*
6522 * if this inode hasn't been logged and directory we're renaming it
6523 * from hasn't been logged, we don't need to log it
6524 */
6525 if (!inode_logged(trans, inode) &&
6526 (!old_dir || !inode_logged(trans, old_dir)))
6527 return;
6528
6529 btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
6530 ctx.logging_new_name = true;
6531 /*
6532 * We don't care about the return value. If we fail to log the new name
6533 * then we know the next attempt to sync the log will fallback to a full
6534 * transaction commit (due to a call to btrfs_set_log_full_commit()), so
6535 * we don't need to worry about getting a log committed that has an
6536 * inconsistent state after a rename operation.
6537 */
6538 btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
6539 LOG_INODE_EXISTS, &ctx);
6540 }
6541
6542