1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 */
5
6 #include <linux/bsearch.h>
7 #include <linux/fs.h>
8 #include <linux/file.h>
9 #include <linux/sort.h>
10 #include <linux/mount.h>
11 #include <linux/xattr.h>
12 #include <linux/posix_acl_xattr.h>
13 #include <linux/radix-tree.h>
14 #include <linux/vmalloc.h>
15 #include <linux/string.h>
16 #include <linux/compat.h>
17 #include <linux/crc32c.h>
18
19 #include "send.h"
20 #include "backref.h"
21 #include "locking.h"
22 #include "disk-io.h"
23 #include "btrfs_inode.h"
24 #include "transaction.h"
25 #include "compression.h"
26 #include "xattr.h"
27 #include "print-tree.h"
28
29 /*
30 * Maximum number of references an extent can have in order for us to attempt to
31 * issue clone operations instead of write operations. This currently exists to
32 * avoid hitting limitations of the backreference walking code (taking a lot of
33 * time and using too much memory for extents with large number of references).
34 */
35 #define SEND_MAX_EXTENT_REFS 64
36
37 /*
38 * A fs_path is a helper to dynamically build path names with unknown size.
39 * It reallocates the internal buffer on demand.
40 * It allows fast adding of path elements on the right side (normal path) and
41 * fast adding to the left side (reversed path). A reversed path can also be
42 * unreversed if needed.
43 */
44 struct fs_path {
45 union {
46 struct {
47 char *start;
48 char *end;
49
50 char *buf;
51 unsigned short buf_len:15;
52 unsigned short reversed:1;
53 char inline_buf[];
54 };
55 /*
56 * Average path length does not exceed 200 bytes, we'll have
57 * better packing in the slab and higher chance to satisfy
58 * a allocation later during send.
59 */
60 char pad[256];
61 };
62 };
63 #define FS_PATH_INLINE_SIZE \
64 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
65
66
67 /* reused for each extent */
68 struct clone_root {
69 struct btrfs_root *root;
70 u64 ino;
71 u64 offset;
72
73 u64 found_refs;
74 };
75
76 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
77 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
78
79 struct send_ctx {
80 struct file *send_filp;
81 loff_t send_off;
82 char *send_buf;
83 u32 send_size;
84 u32 send_max_size;
85 u64 total_send_size;
86 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
87 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
88
89 struct btrfs_root *send_root;
90 struct btrfs_root *parent_root;
91 struct clone_root *clone_roots;
92 int clone_roots_cnt;
93
94 /* current state of the compare_tree call */
95 struct btrfs_path *left_path;
96 struct btrfs_path *right_path;
97 struct btrfs_key *cmp_key;
98
99 /*
100 * Keep track of the generation of the last transaction that was used
101 * for relocating a block group. This is periodically checked in order
102 * to detect if a relocation happened since the last check, so that we
103 * don't operate on stale extent buffers for nodes (level >= 1) or on
104 * stale disk_bytenr values of file extent items.
105 */
106 u64 last_reloc_trans;
107
108 /*
109 * infos of the currently processed inode. In case of deleted inodes,
110 * these are the values from the deleted inode.
111 */
112 u64 cur_ino;
113 u64 cur_inode_gen;
114 int cur_inode_new;
115 int cur_inode_new_gen;
116 int cur_inode_deleted;
117 u64 cur_inode_size;
118 u64 cur_inode_mode;
119 u64 cur_inode_rdev;
120 u64 cur_inode_last_extent;
121 u64 cur_inode_next_write_offset;
122 bool ignore_cur_inode;
123
124 u64 send_progress;
125
126 struct list_head new_refs;
127 struct list_head deleted_refs;
128
129 struct radix_tree_root name_cache;
130 struct list_head name_cache_list;
131 int name_cache_size;
132
133 struct file_ra_state ra;
134
135 /*
136 * We process inodes by their increasing order, so if before an
137 * incremental send we reverse the parent/child relationship of
138 * directories such that a directory with a lower inode number was
139 * the parent of a directory with a higher inode number, and the one
140 * becoming the new parent got renamed too, we can't rename/move the
141 * directory with lower inode number when we finish processing it - we
142 * must process the directory with higher inode number first, then
143 * rename/move it and then rename/move the directory with lower inode
144 * number. Example follows.
145 *
146 * Tree state when the first send was performed:
147 *
148 * .
149 * |-- a (ino 257)
150 * |-- b (ino 258)
151 * |
152 * |
153 * |-- c (ino 259)
154 * | |-- d (ino 260)
155 * |
156 * |-- c2 (ino 261)
157 *
158 * Tree state when the second (incremental) send is performed:
159 *
160 * .
161 * |-- a (ino 257)
162 * |-- b (ino 258)
163 * |-- c2 (ino 261)
164 * |-- d2 (ino 260)
165 * |-- cc (ino 259)
166 *
167 * The sequence of steps that lead to the second state was:
168 *
169 * mv /a/b/c/d /a/b/c2/d2
170 * mv /a/b/c /a/b/c2/d2/cc
171 *
172 * "c" has lower inode number, but we can't move it (2nd mv operation)
173 * before we move "d", which has higher inode number.
174 *
175 * So we just memorize which move/rename operations must be performed
176 * later when their respective parent is processed and moved/renamed.
177 */
178
179 /* Indexed by parent directory inode number. */
180 struct rb_root pending_dir_moves;
181
182 /*
183 * Reverse index, indexed by the inode number of a directory that
184 * is waiting for the move/rename of its immediate parent before its
185 * own move/rename can be performed.
186 */
187 struct rb_root waiting_dir_moves;
188
189 /*
190 * A directory that is going to be rm'ed might have a child directory
191 * which is in the pending directory moves index above. In this case,
192 * the directory can only be removed after the move/rename of its child
193 * is performed. Example:
194 *
195 * Parent snapshot:
196 *
197 * . (ino 256)
198 * |-- a/ (ino 257)
199 * |-- b/ (ino 258)
200 * |-- c/ (ino 259)
201 * | |-- x/ (ino 260)
202 * |
203 * |-- y/ (ino 261)
204 *
205 * Send snapshot:
206 *
207 * . (ino 256)
208 * |-- a/ (ino 257)
209 * |-- b/ (ino 258)
210 * |-- YY/ (ino 261)
211 * |-- x/ (ino 260)
212 *
213 * Sequence of steps that lead to the send snapshot:
214 * rm -f /a/b/c/foo.txt
215 * mv /a/b/y /a/b/YY
216 * mv /a/b/c/x /a/b/YY
217 * rmdir /a/b/c
218 *
219 * When the child is processed, its move/rename is delayed until its
220 * parent is processed (as explained above), but all other operations
221 * like update utimes, chown, chgrp, etc, are performed and the paths
222 * that it uses for those operations must use the orphanized name of
223 * its parent (the directory we're going to rm later), so we need to
224 * memorize that name.
225 *
226 * Indexed by the inode number of the directory to be deleted.
227 */
228 struct rb_root orphan_dirs;
229 };
230
231 struct pending_dir_move {
232 struct rb_node node;
233 struct list_head list;
234 u64 parent_ino;
235 u64 ino;
236 u64 gen;
237 struct list_head update_refs;
238 };
239
240 struct waiting_dir_move {
241 struct rb_node node;
242 u64 ino;
243 /*
244 * There might be some directory that could not be removed because it
245 * was waiting for this directory inode to be moved first. Therefore
246 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
247 */
248 u64 rmdir_ino;
249 u64 rmdir_gen;
250 bool orphanized;
251 };
252
253 struct orphan_dir_info {
254 struct rb_node node;
255 u64 ino;
256 u64 gen;
257 u64 last_dir_index_offset;
258 };
259
260 struct name_cache_entry {
261 struct list_head list;
262 /*
263 * radix_tree has only 32bit entries but we need to handle 64bit inums.
264 * We use the lower 32bit of the 64bit inum to store it in the tree. If
265 * more then one inum would fall into the same entry, we use radix_list
266 * to store the additional entries. radix_list is also used to store
267 * entries where two entries have the same inum but different
268 * generations.
269 */
270 struct list_head radix_list;
271 u64 ino;
272 u64 gen;
273 u64 parent_ino;
274 u64 parent_gen;
275 int ret;
276 int need_later_update;
277 int name_len;
278 char name[];
279 };
280
281 #define ADVANCE 1
282 #define ADVANCE_ONLY_NEXT -1
283
284 enum btrfs_compare_tree_result {
285 BTRFS_COMPARE_TREE_NEW,
286 BTRFS_COMPARE_TREE_DELETED,
287 BTRFS_COMPARE_TREE_CHANGED,
288 BTRFS_COMPARE_TREE_SAME,
289 };
290
291 __cold
inconsistent_snapshot_error(struct send_ctx * sctx,enum btrfs_compare_tree_result result,const char * what)292 static void inconsistent_snapshot_error(struct send_ctx *sctx,
293 enum btrfs_compare_tree_result result,
294 const char *what)
295 {
296 const char *result_string;
297
298 switch (result) {
299 case BTRFS_COMPARE_TREE_NEW:
300 result_string = "new";
301 break;
302 case BTRFS_COMPARE_TREE_DELETED:
303 result_string = "deleted";
304 break;
305 case BTRFS_COMPARE_TREE_CHANGED:
306 result_string = "updated";
307 break;
308 case BTRFS_COMPARE_TREE_SAME:
309 ASSERT(0);
310 result_string = "unchanged";
311 break;
312 default:
313 ASSERT(0);
314 result_string = "unexpected";
315 }
316
317 btrfs_err(sctx->send_root->fs_info,
318 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
319 result_string, what, sctx->cmp_key->objectid,
320 sctx->send_root->root_key.objectid,
321 (sctx->parent_root ?
322 sctx->parent_root->root_key.objectid : 0));
323 }
324
325 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
326
327 static struct waiting_dir_move *
328 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
329
330 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen);
331
need_send_hole(struct send_ctx * sctx)332 static int need_send_hole(struct send_ctx *sctx)
333 {
334 return (sctx->parent_root && !sctx->cur_inode_new &&
335 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
336 S_ISREG(sctx->cur_inode_mode));
337 }
338
fs_path_reset(struct fs_path * p)339 static void fs_path_reset(struct fs_path *p)
340 {
341 if (p->reversed) {
342 p->start = p->buf + p->buf_len - 1;
343 p->end = p->start;
344 *p->start = 0;
345 } else {
346 p->start = p->buf;
347 p->end = p->start;
348 *p->start = 0;
349 }
350 }
351
fs_path_alloc(void)352 static struct fs_path *fs_path_alloc(void)
353 {
354 struct fs_path *p;
355
356 p = kmalloc(sizeof(*p), GFP_KERNEL);
357 if (!p)
358 return NULL;
359 p->reversed = 0;
360 p->buf = p->inline_buf;
361 p->buf_len = FS_PATH_INLINE_SIZE;
362 fs_path_reset(p);
363 return p;
364 }
365
fs_path_alloc_reversed(void)366 static struct fs_path *fs_path_alloc_reversed(void)
367 {
368 struct fs_path *p;
369
370 p = fs_path_alloc();
371 if (!p)
372 return NULL;
373 p->reversed = 1;
374 fs_path_reset(p);
375 return p;
376 }
377
fs_path_free(struct fs_path * p)378 static void fs_path_free(struct fs_path *p)
379 {
380 if (!p)
381 return;
382 if (p->buf != p->inline_buf)
383 kfree(p->buf);
384 kfree(p);
385 }
386
fs_path_len(struct fs_path * p)387 static int fs_path_len(struct fs_path *p)
388 {
389 return p->end - p->start;
390 }
391
fs_path_ensure_buf(struct fs_path * p,int len)392 static int fs_path_ensure_buf(struct fs_path *p, int len)
393 {
394 char *tmp_buf;
395 int path_len;
396 int old_buf_len;
397
398 len++;
399
400 if (p->buf_len >= len)
401 return 0;
402
403 if (len > PATH_MAX) {
404 WARN_ON(1);
405 return -ENOMEM;
406 }
407
408 path_len = p->end - p->start;
409 old_buf_len = p->buf_len;
410
411 /*
412 * First time the inline_buf does not suffice
413 */
414 if (p->buf == p->inline_buf) {
415 tmp_buf = kmalloc(len, GFP_KERNEL);
416 if (tmp_buf)
417 memcpy(tmp_buf, p->buf, old_buf_len);
418 } else {
419 tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
420 }
421 if (!tmp_buf)
422 return -ENOMEM;
423 p->buf = tmp_buf;
424 /*
425 * The real size of the buffer is bigger, this will let the fast path
426 * happen most of the time
427 */
428 p->buf_len = ksize(p->buf);
429
430 if (p->reversed) {
431 tmp_buf = p->buf + old_buf_len - path_len - 1;
432 p->end = p->buf + p->buf_len - 1;
433 p->start = p->end - path_len;
434 memmove(p->start, tmp_buf, path_len + 1);
435 } else {
436 p->start = p->buf;
437 p->end = p->start + path_len;
438 }
439 return 0;
440 }
441
fs_path_prepare_for_add(struct fs_path * p,int name_len,char ** prepared)442 static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
443 char **prepared)
444 {
445 int ret;
446 int new_len;
447
448 new_len = p->end - p->start + name_len;
449 if (p->start != p->end)
450 new_len++;
451 ret = fs_path_ensure_buf(p, new_len);
452 if (ret < 0)
453 goto out;
454
455 if (p->reversed) {
456 if (p->start != p->end)
457 *--p->start = '/';
458 p->start -= name_len;
459 *prepared = p->start;
460 } else {
461 if (p->start != p->end)
462 *p->end++ = '/';
463 *prepared = p->end;
464 p->end += name_len;
465 *p->end = 0;
466 }
467
468 out:
469 return ret;
470 }
471
fs_path_add(struct fs_path * p,const char * name,int name_len)472 static int fs_path_add(struct fs_path *p, const char *name, int name_len)
473 {
474 int ret;
475 char *prepared;
476
477 ret = fs_path_prepare_for_add(p, name_len, &prepared);
478 if (ret < 0)
479 goto out;
480 memcpy(prepared, name, name_len);
481
482 out:
483 return ret;
484 }
485
fs_path_add_path(struct fs_path * p,struct fs_path * p2)486 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
487 {
488 int ret;
489 char *prepared;
490
491 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
492 if (ret < 0)
493 goto out;
494 memcpy(prepared, p2->start, p2->end - p2->start);
495
496 out:
497 return ret;
498 }
499
fs_path_add_from_extent_buffer(struct fs_path * p,struct extent_buffer * eb,unsigned long off,int len)500 static int fs_path_add_from_extent_buffer(struct fs_path *p,
501 struct extent_buffer *eb,
502 unsigned long off, int len)
503 {
504 int ret;
505 char *prepared;
506
507 ret = fs_path_prepare_for_add(p, len, &prepared);
508 if (ret < 0)
509 goto out;
510
511 read_extent_buffer(eb, prepared, off, len);
512
513 out:
514 return ret;
515 }
516
fs_path_copy(struct fs_path * p,struct fs_path * from)517 static int fs_path_copy(struct fs_path *p, struct fs_path *from)
518 {
519 int ret;
520
521 p->reversed = from->reversed;
522 fs_path_reset(p);
523
524 ret = fs_path_add_path(p, from);
525
526 return ret;
527 }
528
529
fs_path_unreverse(struct fs_path * p)530 static void fs_path_unreverse(struct fs_path *p)
531 {
532 char *tmp;
533 int len;
534
535 if (!p->reversed)
536 return;
537
538 tmp = p->start;
539 len = p->end - p->start;
540 p->start = p->buf;
541 p->end = p->start + len;
542 memmove(p->start, tmp, len + 1);
543 p->reversed = 0;
544 }
545
alloc_path_for_send(void)546 static struct btrfs_path *alloc_path_for_send(void)
547 {
548 struct btrfs_path *path;
549
550 path = btrfs_alloc_path();
551 if (!path)
552 return NULL;
553 path->search_commit_root = 1;
554 path->skip_locking = 1;
555 path->need_commit_sem = 1;
556 return path;
557 }
558
write_buf(struct file * filp,const void * buf,u32 len,loff_t * off)559 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
560 {
561 int ret;
562 u32 pos = 0;
563
564 while (pos < len) {
565 ret = kernel_write(filp, buf + pos, len - pos, off);
566 /* TODO handle that correctly */
567 /*if (ret == -ERESTARTSYS) {
568 continue;
569 }*/
570 if (ret < 0)
571 return ret;
572 if (ret == 0) {
573 return -EIO;
574 }
575 pos += ret;
576 }
577
578 return 0;
579 }
580
tlv_put(struct send_ctx * sctx,u16 attr,const void * data,int len)581 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
582 {
583 struct btrfs_tlv_header *hdr;
584 int total_len = sizeof(*hdr) + len;
585 int left = sctx->send_max_size - sctx->send_size;
586
587 if (unlikely(left < total_len))
588 return -EOVERFLOW;
589
590 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
591 put_unaligned_le16(attr, &hdr->tlv_type);
592 put_unaligned_le16(len, &hdr->tlv_len);
593 memcpy(hdr + 1, data, len);
594 sctx->send_size += total_len;
595
596 return 0;
597 }
598
599 #define TLV_PUT_DEFINE_INT(bits) \
600 static int tlv_put_u##bits(struct send_ctx *sctx, \
601 u##bits attr, u##bits value) \
602 { \
603 __le##bits __tmp = cpu_to_le##bits(value); \
604 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
605 }
606
607 TLV_PUT_DEFINE_INT(64)
608
tlv_put_string(struct send_ctx * sctx,u16 attr,const char * str,int len)609 static int tlv_put_string(struct send_ctx *sctx, u16 attr,
610 const char *str, int len)
611 {
612 if (len == -1)
613 len = strlen(str);
614 return tlv_put(sctx, attr, str, len);
615 }
616
tlv_put_uuid(struct send_ctx * sctx,u16 attr,const u8 * uuid)617 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
618 const u8 *uuid)
619 {
620 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
621 }
622
tlv_put_btrfs_timespec(struct send_ctx * sctx,u16 attr,struct extent_buffer * eb,struct btrfs_timespec * ts)623 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
624 struct extent_buffer *eb,
625 struct btrfs_timespec *ts)
626 {
627 struct btrfs_timespec bts;
628 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
629 return tlv_put(sctx, attr, &bts, sizeof(bts));
630 }
631
632
633 #define TLV_PUT(sctx, attrtype, data, attrlen) \
634 do { \
635 ret = tlv_put(sctx, attrtype, data, attrlen); \
636 if (ret < 0) \
637 goto tlv_put_failure; \
638 } while (0)
639
640 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
641 do { \
642 ret = tlv_put_u##bits(sctx, attrtype, value); \
643 if (ret < 0) \
644 goto tlv_put_failure; \
645 } while (0)
646
647 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
648 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
649 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
650 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
651 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
652 do { \
653 ret = tlv_put_string(sctx, attrtype, str, len); \
654 if (ret < 0) \
655 goto tlv_put_failure; \
656 } while (0)
657 #define TLV_PUT_PATH(sctx, attrtype, p) \
658 do { \
659 ret = tlv_put_string(sctx, attrtype, p->start, \
660 p->end - p->start); \
661 if (ret < 0) \
662 goto tlv_put_failure; \
663 } while(0)
664 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
665 do { \
666 ret = tlv_put_uuid(sctx, attrtype, uuid); \
667 if (ret < 0) \
668 goto tlv_put_failure; \
669 } while (0)
670 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
671 do { \
672 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
673 if (ret < 0) \
674 goto tlv_put_failure; \
675 } while (0)
676
send_header(struct send_ctx * sctx)677 static int send_header(struct send_ctx *sctx)
678 {
679 struct btrfs_stream_header hdr;
680
681 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
682 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
683
684 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
685 &sctx->send_off);
686 }
687
688 /*
689 * For each command/item we want to send to userspace, we call this function.
690 */
begin_cmd(struct send_ctx * sctx,int cmd)691 static int begin_cmd(struct send_ctx *sctx, int cmd)
692 {
693 struct btrfs_cmd_header *hdr;
694
695 if (WARN_ON(!sctx->send_buf))
696 return -EINVAL;
697
698 BUG_ON(sctx->send_size);
699
700 sctx->send_size += sizeof(*hdr);
701 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
702 put_unaligned_le16(cmd, &hdr->cmd);
703
704 return 0;
705 }
706
send_cmd(struct send_ctx * sctx)707 static int send_cmd(struct send_ctx *sctx)
708 {
709 int ret;
710 struct btrfs_cmd_header *hdr;
711 u32 crc;
712
713 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
714 put_unaligned_le32(sctx->send_size - sizeof(*hdr), &hdr->len);
715 put_unaligned_le32(0, &hdr->crc);
716
717 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
718 put_unaligned_le32(crc, &hdr->crc);
719
720 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
721 &sctx->send_off);
722
723 sctx->total_send_size += sctx->send_size;
724 sctx->cmd_send_size[get_unaligned_le16(&hdr->cmd)] += sctx->send_size;
725 sctx->send_size = 0;
726
727 return ret;
728 }
729
730 /*
731 * Sends a move instruction to user space
732 */
send_rename(struct send_ctx * sctx,struct fs_path * from,struct fs_path * to)733 static int send_rename(struct send_ctx *sctx,
734 struct fs_path *from, struct fs_path *to)
735 {
736 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
737 int ret;
738
739 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
740
741 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
742 if (ret < 0)
743 goto out;
744
745 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
746 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
747
748 ret = send_cmd(sctx);
749
750 tlv_put_failure:
751 out:
752 return ret;
753 }
754
755 /*
756 * Sends a link instruction to user space
757 */
send_link(struct send_ctx * sctx,struct fs_path * path,struct fs_path * lnk)758 static int send_link(struct send_ctx *sctx,
759 struct fs_path *path, struct fs_path *lnk)
760 {
761 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
762 int ret;
763
764 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
765
766 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
767 if (ret < 0)
768 goto out;
769
770 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
771 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
772
773 ret = send_cmd(sctx);
774
775 tlv_put_failure:
776 out:
777 return ret;
778 }
779
780 /*
781 * Sends an unlink instruction to user space
782 */
send_unlink(struct send_ctx * sctx,struct fs_path * path)783 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
784 {
785 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
786 int ret;
787
788 btrfs_debug(fs_info, "send_unlink %s", path->start);
789
790 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
791 if (ret < 0)
792 goto out;
793
794 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
795
796 ret = send_cmd(sctx);
797
798 tlv_put_failure:
799 out:
800 return ret;
801 }
802
803 /*
804 * Sends a rmdir instruction to user space
805 */
send_rmdir(struct send_ctx * sctx,struct fs_path * path)806 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
807 {
808 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
809 int ret;
810
811 btrfs_debug(fs_info, "send_rmdir %s", path->start);
812
813 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
814 if (ret < 0)
815 goto out;
816
817 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
818
819 ret = send_cmd(sctx);
820
821 tlv_put_failure:
822 out:
823 return ret;
824 }
825
826 /*
827 * Helper function to retrieve some fields from an inode item.
828 */
__get_inode_info(struct btrfs_root * root,struct btrfs_path * path,u64 ino,u64 * size,u64 * gen,u64 * mode,u64 * uid,u64 * gid,u64 * rdev)829 static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
830 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
831 u64 *gid, u64 *rdev)
832 {
833 int ret;
834 struct btrfs_inode_item *ii;
835 struct btrfs_key key;
836
837 key.objectid = ino;
838 key.type = BTRFS_INODE_ITEM_KEY;
839 key.offset = 0;
840 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
841 if (ret) {
842 if (ret > 0)
843 ret = -ENOENT;
844 return ret;
845 }
846
847 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
848 struct btrfs_inode_item);
849 if (size)
850 *size = btrfs_inode_size(path->nodes[0], ii);
851 if (gen)
852 *gen = btrfs_inode_generation(path->nodes[0], ii);
853 if (mode)
854 *mode = btrfs_inode_mode(path->nodes[0], ii);
855 if (uid)
856 *uid = btrfs_inode_uid(path->nodes[0], ii);
857 if (gid)
858 *gid = btrfs_inode_gid(path->nodes[0], ii);
859 if (rdev)
860 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
861
862 return ret;
863 }
864
get_inode_info(struct btrfs_root * root,u64 ino,u64 * size,u64 * gen,u64 * mode,u64 * uid,u64 * gid,u64 * rdev)865 static int get_inode_info(struct btrfs_root *root,
866 u64 ino, u64 *size, u64 *gen,
867 u64 *mode, u64 *uid, u64 *gid,
868 u64 *rdev)
869 {
870 struct btrfs_path *path;
871 int ret;
872
873 path = alloc_path_for_send();
874 if (!path)
875 return -ENOMEM;
876 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
877 rdev);
878 btrfs_free_path(path);
879 return ret;
880 }
881
882 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
883 struct fs_path *p,
884 void *ctx);
885
886 /*
887 * Helper function to iterate the entries in ONE btrfs_inode_ref or
888 * btrfs_inode_extref.
889 * The iterate callback may return a non zero value to stop iteration. This can
890 * be a negative value for error codes or 1 to simply stop it.
891 *
892 * path must point to the INODE_REF or INODE_EXTREF when called.
893 */
iterate_inode_ref(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_key * found_key,int resolve,iterate_inode_ref_t iterate,void * ctx)894 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
895 struct btrfs_key *found_key, int resolve,
896 iterate_inode_ref_t iterate, void *ctx)
897 {
898 struct extent_buffer *eb = path->nodes[0];
899 struct btrfs_item *item;
900 struct btrfs_inode_ref *iref;
901 struct btrfs_inode_extref *extref;
902 struct btrfs_path *tmp_path;
903 struct fs_path *p;
904 u32 cur = 0;
905 u32 total;
906 int slot = path->slots[0];
907 u32 name_len;
908 char *start;
909 int ret = 0;
910 int num = 0;
911 int index;
912 u64 dir;
913 unsigned long name_off;
914 unsigned long elem_size;
915 unsigned long ptr;
916
917 p = fs_path_alloc_reversed();
918 if (!p)
919 return -ENOMEM;
920
921 tmp_path = alloc_path_for_send();
922 if (!tmp_path) {
923 fs_path_free(p);
924 return -ENOMEM;
925 }
926
927
928 if (found_key->type == BTRFS_INODE_REF_KEY) {
929 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
930 struct btrfs_inode_ref);
931 item = btrfs_item_nr(slot);
932 total = btrfs_item_size(eb, item);
933 elem_size = sizeof(*iref);
934 } else {
935 ptr = btrfs_item_ptr_offset(eb, slot);
936 total = btrfs_item_size_nr(eb, slot);
937 elem_size = sizeof(*extref);
938 }
939
940 while (cur < total) {
941 fs_path_reset(p);
942
943 if (found_key->type == BTRFS_INODE_REF_KEY) {
944 iref = (struct btrfs_inode_ref *)(ptr + cur);
945 name_len = btrfs_inode_ref_name_len(eb, iref);
946 name_off = (unsigned long)(iref + 1);
947 index = btrfs_inode_ref_index(eb, iref);
948 dir = found_key->offset;
949 } else {
950 extref = (struct btrfs_inode_extref *)(ptr + cur);
951 name_len = btrfs_inode_extref_name_len(eb, extref);
952 name_off = (unsigned long)&extref->name;
953 index = btrfs_inode_extref_index(eb, extref);
954 dir = btrfs_inode_extref_parent(eb, extref);
955 }
956
957 if (resolve) {
958 start = btrfs_ref_to_path(root, tmp_path, name_len,
959 name_off, eb, dir,
960 p->buf, p->buf_len);
961 if (IS_ERR(start)) {
962 ret = PTR_ERR(start);
963 goto out;
964 }
965 if (start < p->buf) {
966 /* overflow , try again with larger buffer */
967 ret = fs_path_ensure_buf(p,
968 p->buf_len + p->buf - start);
969 if (ret < 0)
970 goto out;
971 start = btrfs_ref_to_path(root, tmp_path,
972 name_len, name_off,
973 eb, dir,
974 p->buf, p->buf_len);
975 if (IS_ERR(start)) {
976 ret = PTR_ERR(start);
977 goto out;
978 }
979 BUG_ON(start < p->buf);
980 }
981 p->start = start;
982 } else {
983 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
984 name_len);
985 if (ret < 0)
986 goto out;
987 }
988
989 cur += elem_size + name_len;
990 ret = iterate(num, dir, index, p, ctx);
991 if (ret)
992 goto out;
993 num++;
994 }
995
996 out:
997 btrfs_free_path(tmp_path);
998 fs_path_free(p);
999 return ret;
1000 }
1001
1002 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
1003 const char *name, int name_len,
1004 const char *data, int data_len,
1005 u8 type, void *ctx);
1006
1007 /*
1008 * Helper function to iterate the entries in ONE btrfs_dir_item.
1009 * The iterate callback may return a non zero value to stop iteration. This can
1010 * be a negative value for error codes or 1 to simply stop it.
1011 *
1012 * path must point to the dir item when called.
1013 */
iterate_dir_item(struct btrfs_root * root,struct btrfs_path * path,iterate_dir_item_t iterate,void * ctx)1014 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
1015 iterate_dir_item_t iterate, void *ctx)
1016 {
1017 int ret = 0;
1018 struct extent_buffer *eb;
1019 struct btrfs_item *item;
1020 struct btrfs_dir_item *di;
1021 struct btrfs_key di_key;
1022 char *buf = NULL;
1023 int buf_len;
1024 u32 name_len;
1025 u32 data_len;
1026 u32 cur;
1027 u32 len;
1028 u32 total;
1029 int slot;
1030 int num;
1031 u8 type;
1032
1033 /*
1034 * Start with a small buffer (1 page). If later we end up needing more
1035 * space, which can happen for xattrs on a fs with a leaf size greater
1036 * then the page size, attempt to increase the buffer. Typically xattr
1037 * values are small.
1038 */
1039 buf_len = PATH_MAX;
1040 buf = kmalloc(buf_len, GFP_KERNEL);
1041 if (!buf) {
1042 ret = -ENOMEM;
1043 goto out;
1044 }
1045
1046 eb = path->nodes[0];
1047 slot = path->slots[0];
1048 item = btrfs_item_nr(slot);
1049 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1050 cur = 0;
1051 len = 0;
1052 total = btrfs_item_size(eb, item);
1053
1054 num = 0;
1055 while (cur < total) {
1056 name_len = btrfs_dir_name_len(eb, di);
1057 data_len = btrfs_dir_data_len(eb, di);
1058 type = btrfs_dir_type(eb, di);
1059 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1060
1061 if (type == BTRFS_FT_XATTR) {
1062 if (name_len > XATTR_NAME_MAX) {
1063 ret = -ENAMETOOLONG;
1064 goto out;
1065 }
1066 if (name_len + data_len >
1067 BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
1068 ret = -E2BIG;
1069 goto out;
1070 }
1071 } else {
1072 /*
1073 * Path too long
1074 */
1075 if (name_len + data_len > PATH_MAX) {
1076 ret = -ENAMETOOLONG;
1077 goto out;
1078 }
1079 }
1080
1081 if (name_len + data_len > buf_len) {
1082 buf_len = name_len + data_len;
1083 if (is_vmalloc_addr(buf)) {
1084 vfree(buf);
1085 buf = NULL;
1086 } else {
1087 char *tmp = krealloc(buf, buf_len,
1088 GFP_KERNEL | __GFP_NOWARN);
1089
1090 if (!tmp)
1091 kfree(buf);
1092 buf = tmp;
1093 }
1094 if (!buf) {
1095 buf = kvmalloc(buf_len, GFP_KERNEL);
1096 if (!buf) {
1097 ret = -ENOMEM;
1098 goto out;
1099 }
1100 }
1101 }
1102
1103 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1104 name_len + data_len);
1105
1106 len = sizeof(*di) + name_len + data_len;
1107 di = (struct btrfs_dir_item *)((char *)di + len);
1108 cur += len;
1109
1110 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1111 data_len, type, ctx);
1112 if (ret < 0)
1113 goto out;
1114 if (ret) {
1115 ret = 0;
1116 goto out;
1117 }
1118
1119 num++;
1120 }
1121
1122 out:
1123 kvfree(buf);
1124 return ret;
1125 }
1126
__copy_first_ref(int num,u64 dir,int index,struct fs_path * p,void * ctx)1127 static int __copy_first_ref(int num, u64 dir, int index,
1128 struct fs_path *p, void *ctx)
1129 {
1130 int ret;
1131 struct fs_path *pt = ctx;
1132
1133 ret = fs_path_copy(pt, p);
1134 if (ret < 0)
1135 return ret;
1136
1137 /* we want the first only */
1138 return 1;
1139 }
1140
1141 /*
1142 * Retrieve the first path of an inode. If an inode has more then one
1143 * ref/hardlink, this is ignored.
1144 */
get_inode_path(struct btrfs_root * root,u64 ino,struct fs_path * path)1145 static int get_inode_path(struct btrfs_root *root,
1146 u64 ino, struct fs_path *path)
1147 {
1148 int ret;
1149 struct btrfs_key key, found_key;
1150 struct btrfs_path *p;
1151
1152 p = alloc_path_for_send();
1153 if (!p)
1154 return -ENOMEM;
1155
1156 fs_path_reset(path);
1157
1158 key.objectid = ino;
1159 key.type = BTRFS_INODE_REF_KEY;
1160 key.offset = 0;
1161
1162 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1163 if (ret < 0)
1164 goto out;
1165 if (ret) {
1166 ret = 1;
1167 goto out;
1168 }
1169 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1170 if (found_key.objectid != ino ||
1171 (found_key.type != BTRFS_INODE_REF_KEY &&
1172 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1173 ret = -ENOENT;
1174 goto out;
1175 }
1176
1177 ret = iterate_inode_ref(root, p, &found_key, 1,
1178 __copy_first_ref, path);
1179 if (ret < 0)
1180 goto out;
1181 ret = 0;
1182
1183 out:
1184 btrfs_free_path(p);
1185 return ret;
1186 }
1187
1188 struct backref_ctx {
1189 struct send_ctx *sctx;
1190
1191 /* number of total found references */
1192 u64 found;
1193
1194 /*
1195 * used for clones found in send_root. clones found behind cur_objectid
1196 * and cur_offset are not considered as allowed clones.
1197 */
1198 u64 cur_objectid;
1199 u64 cur_offset;
1200
1201 /* may be truncated in case it's the last extent in a file */
1202 u64 extent_len;
1203
1204 /* Just to check for bugs in backref resolving */
1205 int found_itself;
1206 };
1207
__clone_root_cmp_bsearch(const void * key,const void * elt)1208 static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1209 {
1210 u64 root = (u64)(uintptr_t)key;
1211 const struct clone_root *cr = elt;
1212
1213 if (root < cr->root->root_key.objectid)
1214 return -1;
1215 if (root > cr->root->root_key.objectid)
1216 return 1;
1217 return 0;
1218 }
1219
__clone_root_cmp_sort(const void * e1,const void * e2)1220 static int __clone_root_cmp_sort(const void *e1, const void *e2)
1221 {
1222 const struct clone_root *cr1 = e1;
1223 const struct clone_root *cr2 = e2;
1224
1225 if (cr1->root->root_key.objectid < cr2->root->root_key.objectid)
1226 return -1;
1227 if (cr1->root->root_key.objectid > cr2->root->root_key.objectid)
1228 return 1;
1229 return 0;
1230 }
1231
1232 /*
1233 * Called for every backref that is found for the current extent.
1234 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1235 */
__iterate_backrefs(u64 ino,u64 offset,u64 root,void * ctx_)1236 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1237 {
1238 struct backref_ctx *bctx = ctx_;
1239 struct clone_root *found;
1240
1241 /* First check if the root is in the list of accepted clone sources */
1242 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1243 bctx->sctx->clone_roots_cnt,
1244 sizeof(struct clone_root),
1245 __clone_root_cmp_bsearch);
1246 if (!found)
1247 return 0;
1248
1249 if (found->root == bctx->sctx->send_root &&
1250 ino == bctx->cur_objectid &&
1251 offset == bctx->cur_offset) {
1252 bctx->found_itself = 1;
1253 }
1254
1255 /*
1256 * Make sure we don't consider clones from send_root that are
1257 * behind the current inode/offset.
1258 */
1259 if (found->root == bctx->sctx->send_root) {
1260 /*
1261 * If the source inode was not yet processed we can't issue a
1262 * clone operation, as the source extent does not exist yet at
1263 * the destination of the stream.
1264 */
1265 if (ino > bctx->cur_objectid)
1266 return 0;
1267 /*
1268 * We clone from the inode currently being sent as long as the
1269 * source extent is already processed, otherwise we could try
1270 * to clone from an extent that does not exist yet at the
1271 * destination of the stream.
1272 */
1273 if (ino == bctx->cur_objectid &&
1274 offset + bctx->extent_len >
1275 bctx->sctx->cur_inode_next_write_offset)
1276 return 0;
1277 }
1278
1279 bctx->found++;
1280 found->found_refs++;
1281 if (ino < found->ino) {
1282 found->ino = ino;
1283 found->offset = offset;
1284 } else if (found->ino == ino) {
1285 /*
1286 * same extent found more then once in the same file.
1287 */
1288 if (found->offset > offset + bctx->extent_len)
1289 found->offset = offset;
1290 }
1291
1292 return 0;
1293 }
1294
1295 /*
1296 * Given an inode, offset and extent item, it finds a good clone for a clone
1297 * instruction. Returns -ENOENT when none could be found. The function makes
1298 * sure that the returned clone is usable at the point where sending is at the
1299 * moment. This means, that no clones are accepted which lie behind the current
1300 * inode+offset.
1301 *
1302 * path must point to the extent item when called.
1303 */
find_extent_clone(struct send_ctx * sctx,struct btrfs_path * path,u64 ino,u64 data_offset,u64 ino_size,struct clone_root ** found)1304 static int find_extent_clone(struct send_ctx *sctx,
1305 struct btrfs_path *path,
1306 u64 ino, u64 data_offset,
1307 u64 ino_size,
1308 struct clone_root **found)
1309 {
1310 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1311 int ret;
1312 int extent_type;
1313 u64 logical;
1314 u64 disk_byte;
1315 u64 num_bytes;
1316 u64 extent_item_pos;
1317 u64 flags = 0;
1318 struct btrfs_file_extent_item *fi;
1319 struct extent_buffer *eb = path->nodes[0];
1320 struct backref_ctx backref_ctx = {0};
1321 struct clone_root *cur_clone_root;
1322 struct btrfs_key found_key;
1323 struct btrfs_path *tmp_path;
1324 struct btrfs_extent_item *ei;
1325 int compressed;
1326 u32 i;
1327
1328 tmp_path = alloc_path_for_send();
1329 if (!tmp_path)
1330 return -ENOMEM;
1331
1332 /* We only use this path under the commit sem */
1333 tmp_path->need_commit_sem = 0;
1334
1335 if (data_offset >= ino_size) {
1336 /*
1337 * There may be extents that lie behind the file's size.
1338 * I at least had this in combination with snapshotting while
1339 * writing large files.
1340 */
1341 ret = 0;
1342 goto out;
1343 }
1344
1345 fi = btrfs_item_ptr(eb, path->slots[0],
1346 struct btrfs_file_extent_item);
1347 extent_type = btrfs_file_extent_type(eb, fi);
1348 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1349 ret = -ENOENT;
1350 goto out;
1351 }
1352 compressed = btrfs_file_extent_compression(eb, fi);
1353
1354 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1355 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1356 if (disk_byte == 0) {
1357 ret = -ENOENT;
1358 goto out;
1359 }
1360 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1361
1362 down_read(&fs_info->commit_root_sem);
1363 ret = extent_from_logical(fs_info, disk_byte, tmp_path,
1364 &found_key, &flags);
1365 up_read(&fs_info->commit_root_sem);
1366
1367 if (ret < 0)
1368 goto out;
1369 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1370 ret = -EIO;
1371 goto out;
1372 }
1373
1374 ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0],
1375 struct btrfs_extent_item);
1376 /*
1377 * Backreference walking (iterate_extent_inodes() below) is currently
1378 * too expensive when an extent has a large number of references, both
1379 * in time spent and used memory. So for now just fallback to write
1380 * operations instead of clone operations when an extent has more than
1381 * a certain amount of references.
1382 */
1383 if (btrfs_extent_refs(tmp_path->nodes[0], ei) > SEND_MAX_EXTENT_REFS) {
1384 ret = -ENOENT;
1385 goto out;
1386 }
1387 btrfs_release_path(tmp_path);
1388
1389 /*
1390 * Setup the clone roots.
1391 */
1392 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1393 cur_clone_root = sctx->clone_roots + i;
1394 cur_clone_root->ino = (u64)-1;
1395 cur_clone_root->offset = 0;
1396 cur_clone_root->found_refs = 0;
1397 }
1398
1399 backref_ctx.sctx = sctx;
1400 backref_ctx.found = 0;
1401 backref_ctx.cur_objectid = ino;
1402 backref_ctx.cur_offset = data_offset;
1403 backref_ctx.found_itself = 0;
1404 backref_ctx.extent_len = num_bytes;
1405
1406 /*
1407 * The last extent of a file may be too large due to page alignment.
1408 * We need to adjust extent_len in this case so that the checks in
1409 * __iterate_backrefs work.
1410 */
1411 if (data_offset + num_bytes >= ino_size)
1412 backref_ctx.extent_len = ino_size - data_offset;
1413
1414 /*
1415 * Now collect all backrefs.
1416 */
1417 if (compressed == BTRFS_COMPRESS_NONE)
1418 extent_item_pos = logical - found_key.objectid;
1419 else
1420 extent_item_pos = 0;
1421 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1422 extent_item_pos, 1, __iterate_backrefs,
1423 &backref_ctx, false);
1424
1425 if (ret < 0)
1426 goto out;
1427
1428 down_read(&fs_info->commit_root_sem);
1429 if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
1430 /*
1431 * A transaction commit for a transaction in which block group
1432 * relocation was done just happened.
1433 * The disk_bytenr of the file extent item we processed is
1434 * possibly stale, referring to the extent's location before
1435 * relocation. So act as if we haven't found any clone sources
1436 * and fallback to write commands, which will read the correct
1437 * data from the new extent location. Otherwise we will fail
1438 * below because we haven't found our own back reference or we
1439 * could be getting incorrect sources in case the old extent
1440 * was already reallocated after the relocation.
1441 */
1442 up_read(&fs_info->commit_root_sem);
1443 ret = -ENOENT;
1444 goto out;
1445 }
1446 up_read(&fs_info->commit_root_sem);
1447
1448 if (!backref_ctx.found_itself) {
1449 /* found a bug in backref code? */
1450 ret = -EIO;
1451 btrfs_err(fs_info,
1452 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
1453 ino, data_offset, disk_byte, found_key.objectid);
1454 goto out;
1455 }
1456
1457 btrfs_debug(fs_info,
1458 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1459 data_offset, ino, num_bytes, logical);
1460
1461 if (!backref_ctx.found)
1462 btrfs_debug(fs_info, "no clones found");
1463
1464 cur_clone_root = NULL;
1465 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1466 if (sctx->clone_roots[i].found_refs) {
1467 if (!cur_clone_root)
1468 cur_clone_root = sctx->clone_roots + i;
1469 else if (sctx->clone_roots[i].root == sctx->send_root)
1470 /* prefer clones from send_root over others */
1471 cur_clone_root = sctx->clone_roots + i;
1472 }
1473
1474 }
1475
1476 if (cur_clone_root) {
1477 *found = cur_clone_root;
1478 ret = 0;
1479 } else {
1480 ret = -ENOENT;
1481 }
1482
1483 out:
1484 btrfs_free_path(tmp_path);
1485 return ret;
1486 }
1487
read_symlink(struct btrfs_root * root,u64 ino,struct fs_path * dest)1488 static int read_symlink(struct btrfs_root *root,
1489 u64 ino,
1490 struct fs_path *dest)
1491 {
1492 int ret;
1493 struct btrfs_path *path;
1494 struct btrfs_key key;
1495 struct btrfs_file_extent_item *ei;
1496 u8 type;
1497 u8 compression;
1498 unsigned long off;
1499 int len;
1500
1501 path = alloc_path_for_send();
1502 if (!path)
1503 return -ENOMEM;
1504
1505 key.objectid = ino;
1506 key.type = BTRFS_EXTENT_DATA_KEY;
1507 key.offset = 0;
1508 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1509 if (ret < 0)
1510 goto out;
1511 if (ret) {
1512 /*
1513 * An empty symlink inode. Can happen in rare error paths when
1514 * creating a symlink (transaction committed before the inode
1515 * eviction handler removed the symlink inode items and a crash
1516 * happened in between or the subvol was snapshoted in between).
1517 * Print an informative message to dmesg/syslog so that the user
1518 * can delete the symlink.
1519 */
1520 btrfs_err(root->fs_info,
1521 "Found empty symlink inode %llu at root %llu",
1522 ino, root->root_key.objectid);
1523 ret = -EIO;
1524 goto out;
1525 }
1526
1527 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1528 struct btrfs_file_extent_item);
1529 type = btrfs_file_extent_type(path->nodes[0], ei);
1530 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1531 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1532 BUG_ON(compression);
1533
1534 off = btrfs_file_extent_inline_start(ei);
1535 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
1536
1537 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1538
1539 out:
1540 btrfs_free_path(path);
1541 return ret;
1542 }
1543
1544 /*
1545 * Helper function to generate a file name that is unique in the root of
1546 * send_root and parent_root. This is used to generate names for orphan inodes.
1547 */
gen_unique_name(struct send_ctx * sctx,u64 ino,u64 gen,struct fs_path * dest)1548 static int gen_unique_name(struct send_ctx *sctx,
1549 u64 ino, u64 gen,
1550 struct fs_path *dest)
1551 {
1552 int ret = 0;
1553 struct btrfs_path *path;
1554 struct btrfs_dir_item *di;
1555 char tmp[64];
1556 int len;
1557 u64 idx = 0;
1558
1559 path = alloc_path_for_send();
1560 if (!path)
1561 return -ENOMEM;
1562
1563 while (1) {
1564 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1565 ino, gen, idx);
1566 ASSERT(len < sizeof(tmp));
1567
1568 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1569 path, BTRFS_FIRST_FREE_OBJECTID,
1570 tmp, strlen(tmp), 0);
1571 btrfs_release_path(path);
1572 if (IS_ERR(di)) {
1573 ret = PTR_ERR(di);
1574 goto out;
1575 }
1576 if (di) {
1577 /* not unique, try again */
1578 idx++;
1579 continue;
1580 }
1581
1582 if (!sctx->parent_root) {
1583 /* unique */
1584 ret = 0;
1585 break;
1586 }
1587
1588 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1589 path, BTRFS_FIRST_FREE_OBJECTID,
1590 tmp, strlen(tmp), 0);
1591 btrfs_release_path(path);
1592 if (IS_ERR(di)) {
1593 ret = PTR_ERR(di);
1594 goto out;
1595 }
1596 if (di) {
1597 /* not unique, try again */
1598 idx++;
1599 continue;
1600 }
1601 /* unique */
1602 break;
1603 }
1604
1605 ret = fs_path_add(dest, tmp, strlen(tmp));
1606
1607 out:
1608 btrfs_free_path(path);
1609 return ret;
1610 }
1611
1612 enum inode_state {
1613 inode_state_no_change,
1614 inode_state_will_create,
1615 inode_state_did_create,
1616 inode_state_will_delete,
1617 inode_state_did_delete,
1618 };
1619
get_cur_inode_state(struct send_ctx * sctx,u64 ino,u64 gen)1620 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1621 {
1622 int ret;
1623 int left_ret;
1624 int right_ret;
1625 u64 left_gen;
1626 u64 right_gen;
1627
1628 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1629 NULL, NULL);
1630 if (ret < 0 && ret != -ENOENT)
1631 goto out;
1632 left_ret = ret;
1633
1634 if (!sctx->parent_root) {
1635 right_ret = -ENOENT;
1636 } else {
1637 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1638 NULL, NULL, NULL, NULL);
1639 if (ret < 0 && ret != -ENOENT)
1640 goto out;
1641 right_ret = ret;
1642 }
1643
1644 if (!left_ret && !right_ret) {
1645 if (left_gen == gen && right_gen == gen) {
1646 ret = inode_state_no_change;
1647 } else if (left_gen == gen) {
1648 if (ino < sctx->send_progress)
1649 ret = inode_state_did_create;
1650 else
1651 ret = inode_state_will_create;
1652 } else if (right_gen == gen) {
1653 if (ino < sctx->send_progress)
1654 ret = inode_state_did_delete;
1655 else
1656 ret = inode_state_will_delete;
1657 } else {
1658 ret = -ENOENT;
1659 }
1660 } else if (!left_ret) {
1661 if (left_gen == gen) {
1662 if (ino < sctx->send_progress)
1663 ret = inode_state_did_create;
1664 else
1665 ret = inode_state_will_create;
1666 } else {
1667 ret = -ENOENT;
1668 }
1669 } else if (!right_ret) {
1670 if (right_gen == gen) {
1671 if (ino < sctx->send_progress)
1672 ret = inode_state_did_delete;
1673 else
1674 ret = inode_state_will_delete;
1675 } else {
1676 ret = -ENOENT;
1677 }
1678 } else {
1679 ret = -ENOENT;
1680 }
1681
1682 out:
1683 return ret;
1684 }
1685
is_inode_existent(struct send_ctx * sctx,u64 ino,u64 gen)1686 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1687 {
1688 int ret;
1689
1690 if (ino == BTRFS_FIRST_FREE_OBJECTID)
1691 return 1;
1692
1693 ret = get_cur_inode_state(sctx, ino, gen);
1694 if (ret < 0)
1695 goto out;
1696
1697 if (ret == inode_state_no_change ||
1698 ret == inode_state_did_create ||
1699 ret == inode_state_will_delete)
1700 ret = 1;
1701 else
1702 ret = 0;
1703
1704 out:
1705 return ret;
1706 }
1707
1708 /*
1709 * Helper function to lookup a dir item in a dir.
1710 */
lookup_dir_item_inode(struct btrfs_root * root,u64 dir,const char * name,int name_len,u64 * found_inode,u8 * found_type)1711 static int lookup_dir_item_inode(struct btrfs_root *root,
1712 u64 dir, const char *name, int name_len,
1713 u64 *found_inode,
1714 u8 *found_type)
1715 {
1716 int ret = 0;
1717 struct btrfs_dir_item *di;
1718 struct btrfs_key key;
1719 struct btrfs_path *path;
1720
1721 path = alloc_path_for_send();
1722 if (!path)
1723 return -ENOMEM;
1724
1725 di = btrfs_lookup_dir_item(NULL, root, path,
1726 dir, name, name_len, 0);
1727 if (IS_ERR_OR_NULL(di)) {
1728 ret = di ? PTR_ERR(di) : -ENOENT;
1729 goto out;
1730 }
1731 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1732 if (key.type == BTRFS_ROOT_ITEM_KEY) {
1733 ret = -ENOENT;
1734 goto out;
1735 }
1736 *found_inode = key.objectid;
1737 *found_type = btrfs_dir_type(path->nodes[0], di);
1738
1739 out:
1740 btrfs_free_path(path);
1741 return ret;
1742 }
1743
1744 /*
1745 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1746 * generation of the parent dir and the name of the dir entry.
1747 */
get_first_ref(struct btrfs_root * root,u64 ino,u64 * dir,u64 * dir_gen,struct fs_path * name)1748 static int get_first_ref(struct btrfs_root *root, u64 ino,
1749 u64 *dir, u64 *dir_gen, struct fs_path *name)
1750 {
1751 int ret;
1752 struct btrfs_key key;
1753 struct btrfs_key found_key;
1754 struct btrfs_path *path;
1755 int len;
1756 u64 parent_dir;
1757
1758 path = alloc_path_for_send();
1759 if (!path)
1760 return -ENOMEM;
1761
1762 key.objectid = ino;
1763 key.type = BTRFS_INODE_REF_KEY;
1764 key.offset = 0;
1765
1766 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1767 if (ret < 0)
1768 goto out;
1769 if (!ret)
1770 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1771 path->slots[0]);
1772 if (ret || found_key.objectid != ino ||
1773 (found_key.type != BTRFS_INODE_REF_KEY &&
1774 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1775 ret = -ENOENT;
1776 goto out;
1777 }
1778
1779 if (found_key.type == BTRFS_INODE_REF_KEY) {
1780 struct btrfs_inode_ref *iref;
1781 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1782 struct btrfs_inode_ref);
1783 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1784 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1785 (unsigned long)(iref + 1),
1786 len);
1787 parent_dir = found_key.offset;
1788 } else {
1789 struct btrfs_inode_extref *extref;
1790 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1791 struct btrfs_inode_extref);
1792 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1793 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1794 (unsigned long)&extref->name, len);
1795 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1796 }
1797 if (ret < 0)
1798 goto out;
1799 btrfs_release_path(path);
1800
1801 if (dir_gen) {
1802 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
1803 NULL, NULL, NULL);
1804 if (ret < 0)
1805 goto out;
1806 }
1807
1808 *dir = parent_dir;
1809
1810 out:
1811 btrfs_free_path(path);
1812 return ret;
1813 }
1814
is_first_ref(struct btrfs_root * root,u64 ino,u64 dir,const char * name,int name_len)1815 static int is_first_ref(struct btrfs_root *root,
1816 u64 ino, u64 dir,
1817 const char *name, int name_len)
1818 {
1819 int ret;
1820 struct fs_path *tmp_name;
1821 u64 tmp_dir;
1822
1823 tmp_name = fs_path_alloc();
1824 if (!tmp_name)
1825 return -ENOMEM;
1826
1827 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
1828 if (ret < 0)
1829 goto out;
1830
1831 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1832 ret = 0;
1833 goto out;
1834 }
1835
1836 ret = !memcmp(tmp_name->start, name, name_len);
1837
1838 out:
1839 fs_path_free(tmp_name);
1840 return ret;
1841 }
1842
1843 /*
1844 * Used by process_recorded_refs to determine if a new ref would overwrite an
1845 * already existing ref. In case it detects an overwrite, it returns the
1846 * inode/gen in who_ino/who_gen.
1847 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1848 * to make sure later references to the overwritten inode are possible.
1849 * Orphanizing is however only required for the first ref of an inode.
1850 * process_recorded_refs does an additional is_first_ref check to see if
1851 * orphanizing is really required.
1852 */
will_overwrite_ref(struct send_ctx * sctx,u64 dir,u64 dir_gen,const char * name,int name_len,u64 * who_ino,u64 * who_gen,u64 * who_mode)1853 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1854 const char *name, int name_len,
1855 u64 *who_ino, u64 *who_gen, u64 *who_mode)
1856 {
1857 int ret = 0;
1858 u64 gen;
1859 u64 other_inode = 0;
1860 u8 other_type = 0;
1861
1862 if (!sctx->parent_root)
1863 goto out;
1864
1865 ret = is_inode_existent(sctx, dir, dir_gen);
1866 if (ret <= 0)
1867 goto out;
1868
1869 /*
1870 * If we have a parent root we need to verify that the parent dir was
1871 * not deleted and then re-created, if it was then we have no overwrite
1872 * and we can just unlink this entry.
1873 */
1874 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
1875 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1876 NULL, NULL, NULL);
1877 if (ret < 0 && ret != -ENOENT)
1878 goto out;
1879 if (ret) {
1880 ret = 0;
1881 goto out;
1882 }
1883 if (gen != dir_gen)
1884 goto out;
1885 }
1886
1887 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1888 &other_inode, &other_type);
1889 if (ret < 0 && ret != -ENOENT)
1890 goto out;
1891 if (ret) {
1892 ret = 0;
1893 goto out;
1894 }
1895
1896 /*
1897 * Check if the overwritten ref was already processed. If yes, the ref
1898 * was already unlinked/moved, so we can safely assume that we will not
1899 * overwrite anything at this point in time.
1900 */
1901 if (other_inode > sctx->send_progress ||
1902 is_waiting_for_move(sctx, other_inode)) {
1903 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1904 who_gen, who_mode, NULL, NULL, NULL);
1905 if (ret < 0)
1906 goto out;
1907
1908 ret = 1;
1909 *who_ino = other_inode;
1910 } else {
1911 ret = 0;
1912 }
1913
1914 out:
1915 return ret;
1916 }
1917
1918 /*
1919 * Checks if the ref was overwritten by an already processed inode. This is
1920 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1921 * thus the orphan name needs be used.
1922 * process_recorded_refs also uses it to avoid unlinking of refs that were
1923 * overwritten.
1924 */
did_overwrite_ref(struct send_ctx * sctx,u64 dir,u64 dir_gen,u64 ino,u64 ino_gen,const char * name,int name_len)1925 static int did_overwrite_ref(struct send_ctx *sctx,
1926 u64 dir, u64 dir_gen,
1927 u64 ino, u64 ino_gen,
1928 const char *name, int name_len)
1929 {
1930 int ret = 0;
1931 u64 gen;
1932 u64 ow_inode;
1933 u8 other_type;
1934
1935 if (!sctx->parent_root)
1936 goto out;
1937
1938 ret = is_inode_existent(sctx, dir, dir_gen);
1939 if (ret <= 0)
1940 goto out;
1941
1942 if (dir != BTRFS_FIRST_FREE_OBJECTID) {
1943 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL,
1944 NULL, NULL, NULL);
1945 if (ret < 0 && ret != -ENOENT)
1946 goto out;
1947 if (ret) {
1948 ret = 0;
1949 goto out;
1950 }
1951 if (gen != dir_gen)
1952 goto out;
1953 }
1954
1955 /* check if the ref was overwritten by another ref */
1956 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1957 &ow_inode, &other_type);
1958 if (ret < 0 && ret != -ENOENT)
1959 goto out;
1960 if (ret) {
1961 /* was never and will never be overwritten */
1962 ret = 0;
1963 goto out;
1964 }
1965
1966 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1967 NULL, NULL);
1968 if (ret < 0)
1969 goto out;
1970
1971 if (ow_inode == ino && gen == ino_gen) {
1972 ret = 0;
1973 goto out;
1974 }
1975
1976 /*
1977 * We know that it is or will be overwritten. Check this now.
1978 * The current inode being processed might have been the one that caused
1979 * inode 'ino' to be orphanized, therefore check if ow_inode matches
1980 * the current inode being processed.
1981 */
1982 if ((ow_inode < sctx->send_progress) ||
1983 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
1984 gen == sctx->cur_inode_gen))
1985 ret = 1;
1986 else
1987 ret = 0;
1988
1989 out:
1990 return ret;
1991 }
1992
1993 /*
1994 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1995 * that got overwritten. This is used by process_recorded_refs to determine
1996 * if it has to use the path as returned by get_cur_path or the orphan name.
1997 */
did_overwrite_first_ref(struct send_ctx * sctx,u64 ino,u64 gen)1998 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
1999 {
2000 int ret = 0;
2001 struct fs_path *name = NULL;
2002 u64 dir;
2003 u64 dir_gen;
2004
2005 if (!sctx->parent_root)
2006 goto out;
2007
2008 name = fs_path_alloc();
2009 if (!name)
2010 return -ENOMEM;
2011
2012 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
2013 if (ret < 0)
2014 goto out;
2015
2016 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
2017 name->start, fs_path_len(name));
2018
2019 out:
2020 fs_path_free(name);
2021 return ret;
2022 }
2023
2024 /*
2025 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
2026 * so we need to do some special handling in case we have clashes. This function
2027 * takes care of this with the help of name_cache_entry::radix_list.
2028 * In case of error, nce is kfreed.
2029 */
name_cache_insert(struct send_ctx * sctx,struct name_cache_entry * nce)2030 static int name_cache_insert(struct send_ctx *sctx,
2031 struct name_cache_entry *nce)
2032 {
2033 int ret = 0;
2034 struct list_head *nce_head;
2035
2036 nce_head = radix_tree_lookup(&sctx->name_cache,
2037 (unsigned long)nce->ino);
2038 if (!nce_head) {
2039 nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
2040 if (!nce_head) {
2041 kfree(nce);
2042 return -ENOMEM;
2043 }
2044 INIT_LIST_HEAD(nce_head);
2045
2046 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
2047 if (ret < 0) {
2048 kfree(nce_head);
2049 kfree(nce);
2050 return ret;
2051 }
2052 }
2053 list_add_tail(&nce->radix_list, nce_head);
2054 list_add_tail(&nce->list, &sctx->name_cache_list);
2055 sctx->name_cache_size++;
2056
2057 return ret;
2058 }
2059
name_cache_delete(struct send_ctx * sctx,struct name_cache_entry * nce)2060 static void name_cache_delete(struct send_ctx *sctx,
2061 struct name_cache_entry *nce)
2062 {
2063 struct list_head *nce_head;
2064
2065 nce_head = radix_tree_lookup(&sctx->name_cache,
2066 (unsigned long)nce->ino);
2067 if (!nce_head) {
2068 btrfs_err(sctx->send_root->fs_info,
2069 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2070 nce->ino, sctx->name_cache_size);
2071 }
2072
2073 list_del(&nce->radix_list);
2074 list_del(&nce->list);
2075 sctx->name_cache_size--;
2076
2077 /*
2078 * We may not get to the final release of nce_head if the lookup fails
2079 */
2080 if (nce_head && list_empty(nce_head)) {
2081 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
2082 kfree(nce_head);
2083 }
2084 }
2085
name_cache_search(struct send_ctx * sctx,u64 ino,u64 gen)2086 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
2087 u64 ino, u64 gen)
2088 {
2089 struct list_head *nce_head;
2090 struct name_cache_entry *cur;
2091
2092 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
2093 if (!nce_head)
2094 return NULL;
2095
2096 list_for_each_entry(cur, nce_head, radix_list) {
2097 if (cur->ino == ino && cur->gen == gen)
2098 return cur;
2099 }
2100 return NULL;
2101 }
2102
2103 /*
2104 * Remove some entries from the beginning of name_cache_list.
2105 */
name_cache_clean_unused(struct send_ctx * sctx)2106 static void name_cache_clean_unused(struct send_ctx *sctx)
2107 {
2108 struct name_cache_entry *nce;
2109
2110 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
2111 return;
2112
2113 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
2114 nce = list_entry(sctx->name_cache_list.next,
2115 struct name_cache_entry, list);
2116 name_cache_delete(sctx, nce);
2117 kfree(nce);
2118 }
2119 }
2120
name_cache_free(struct send_ctx * sctx)2121 static void name_cache_free(struct send_ctx *sctx)
2122 {
2123 struct name_cache_entry *nce;
2124
2125 while (!list_empty(&sctx->name_cache_list)) {
2126 nce = list_entry(sctx->name_cache_list.next,
2127 struct name_cache_entry, list);
2128 name_cache_delete(sctx, nce);
2129 kfree(nce);
2130 }
2131 }
2132
2133 /*
2134 * Used by get_cur_path for each ref up to the root.
2135 * Returns 0 if it succeeded.
2136 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2137 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2138 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2139 * Returns <0 in case of error.
2140 */
__get_cur_name_and_parent(struct send_ctx * sctx,u64 ino,u64 gen,u64 * parent_ino,u64 * parent_gen,struct fs_path * dest)2141 static int __get_cur_name_and_parent(struct send_ctx *sctx,
2142 u64 ino, u64 gen,
2143 u64 *parent_ino,
2144 u64 *parent_gen,
2145 struct fs_path *dest)
2146 {
2147 int ret;
2148 int nce_ret;
2149 struct name_cache_entry *nce = NULL;
2150
2151 /*
2152 * First check if we already did a call to this function with the same
2153 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2154 * return the cached result.
2155 */
2156 nce = name_cache_search(sctx, ino, gen);
2157 if (nce) {
2158 if (ino < sctx->send_progress && nce->need_later_update) {
2159 name_cache_delete(sctx, nce);
2160 kfree(nce);
2161 nce = NULL;
2162 } else {
2163 /*
2164 * Removes the entry from the list and adds it back to
2165 * the end. This marks the entry as recently used so
2166 * that name_cache_clean_unused does not remove it.
2167 */
2168 list_move_tail(&nce->list, &sctx->name_cache_list);
2169
2170 *parent_ino = nce->parent_ino;
2171 *parent_gen = nce->parent_gen;
2172 ret = fs_path_add(dest, nce->name, nce->name_len);
2173 if (ret < 0)
2174 goto out;
2175 ret = nce->ret;
2176 goto out;
2177 }
2178 }
2179
2180 /*
2181 * If the inode is not existent yet, add the orphan name and return 1.
2182 * This should only happen for the parent dir that we determine in
2183 * __record_new_ref
2184 */
2185 ret = is_inode_existent(sctx, ino, gen);
2186 if (ret < 0)
2187 goto out;
2188
2189 if (!ret) {
2190 ret = gen_unique_name(sctx, ino, gen, dest);
2191 if (ret < 0)
2192 goto out;
2193 ret = 1;
2194 goto out_cache;
2195 }
2196
2197 /*
2198 * Depending on whether the inode was already processed or not, use
2199 * send_root or parent_root for ref lookup.
2200 */
2201 if (ino < sctx->send_progress)
2202 ret = get_first_ref(sctx->send_root, ino,
2203 parent_ino, parent_gen, dest);
2204 else
2205 ret = get_first_ref(sctx->parent_root, ino,
2206 parent_ino, parent_gen, dest);
2207 if (ret < 0)
2208 goto out;
2209
2210 /*
2211 * Check if the ref was overwritten by an inode's ref that was processed
2212 * earlier. If yes, treat as orphan and return 1.
2213 */
2214 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2215 dest->start, dest->end - dest->start);
2216 if (ret < 0)
2217 goto out;
2218 if (ret) {
2219 fs_path_reset(dest);
2220 ret = gen_unique_name(sctx, ino, gen, dest);
2221 if (ret < 0)
2222 goto out;
2223 ret = 1;
2224 }
2225
2226 out_cache:
2227 /*
2228 * Store the result of the lookup in the name cache.
2229 */
2230 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
2231 if (!nce) {
2232 ret = -ENOMEM;
2233 goto out;
2234 }
2235
2236 nce->ino = ino;
2237 nce->gen = gen;
2238 nce->parent_ino = *parent_ino;
2239 nce->parent_gen = *parent_gen;
2240 nce->name_len = fs_path_len(dest);
2241 nce->ret = ret;
2242 strcpy(nce->name, dest->start);
2243
2244 if (ino < sctx->send_progress)
2245 nce->need_later_update = 0;
2246 else
2247 nce->need_later_update = 1;
2248
2249 nce_ret = name_cache_insert(sctx, nce);
2250 if (nce_ret < 0)
2251 ret = nce_ret;
2252 name_cache_clean_unused(sctx);
2253
2254 out:
2255 return ret;
2256 }
2257
2258 /*
2259 * Magic happens here. This function returns the first ref to an inode as it
2260 * would look like while receiving the stream at this point in time.
2261 * We walk the path up to the root. For every inode in between, we check if it
2262 * was already processed/sent. If yes, we continue with the parent as found
2263 * in send_root. If not, we continue with the parent as found in parent_root.
2264 * If we encounter an inode that was deleted at this point in time, we use the
2265 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2266 * that were not created yet and overwritten inodes/refs.
2267 *
2268 * When do we have orphan inodes:
2269 * 1. When an inode is freshly created and thus no valid refs are available yet
2270 * 2. When a directory lost all it's refs (deleted) but still has dir items
2271 * inside which were not processed yet (pending for move/delete). If anyone
2272 * tried to get the path to the dir items, it would get a path inside that
2273 * orphan directory.
2274 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2275 * of an unprocessed inode. If in that case the first ref would be
2276 * overwritten, the overwritten inode gets "orphanized". Later when we
2277 * process this overwritten inode, it is restored at a new place by moving
2278 * the orphan inode.
2279 *
2280 * sctx->send_progress tells this function at which point in time receiving
2281 * would be.
2282 */
get_cur_path(struct send_ctx * sctx,u64 ino,u64 gen,struct fs_path * dest)2283 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2284 struct fs_path *dest)
2285 {
2286 int ret = 0;
2287 struct fs_path *name = NULL;
2288 u64 parent_inode = 0;
2289 u64 parent_gen = 0;
2290 int stop = 0;
2291
2292 name = fs_path_alloc();
2293 if (!name) {
2294 ret = -ENOMEM;
2295 goto out;
2296 }
2297
2298 dest->reversed = 1;
2299 fs_path_reset(dest);
2300
2301 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2302 struct waiting_dir_move *wdm;
2303
2304 fs_path_reset(name);
2305
2306 if (is_waiting_for_rm(sctx, ino, gen)) {
2307 ret = gen_unique_name(sctx, ino, gen, name);
2308 if (ret < 0)
2309 goto out;
2310 ret = fs_path_add_path(dest, name);
2311 break;
2312 }
2313
2314 wdm = get_waiting_dir_move(sctx, ino);
2315 if (wdm && wdm->orphanized) {
2316 ret = gen_unique_name(sctx, ino, gen, name);
2317 stop = 1;
2318 } else if (wdm) {
2319 ret = get_first_ref(sctx->parent_root, ino,
2320 &parent_inode, &parent_gen, name);
2321 } else {
2322 ret = __get_cur_name_and_parent(sctx, ino, gen,
2323 &parent_inode,
2324 &parent_gen, name);
2325 if (ret)
2326 stop = 1;
2327 }
2328
2329 if (ret < 0)
2330 goto out;
2331
2332 ret = fs_path_add_path(dest, name);
2333 if (ret < 0)
2334 goto out;
2335
2336 ino = parent_inode;
2337 gen = parent_gen;
2338 }
2339
2340 out:
2341 fs_path_free(name);
2342 if (!ret)
2343 fs_path_unreverse(dest);
2344 return ret;
2345 }
2346
2347 /*
2348 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2349 */
send_subvol_begin(struct send_ctx * sctx)2350 static int send_subvol_begin(struct send_ctx *sctx)
2351 {
2352 int ret;
2353 struct btrfs_root *send_root = sctx->send_root;
2354 struct btrfs_root *parent_root = sctx->parent_root;
2355 struct btrfs_path *path;
2356 struct btrfs_key key;
2357 struct btrfs_root_ref *ref;
2358 struct extent_buffer *leaf;
2359 char *name = NULL;
2360 int namelen;
2361
2362 path = btrfs_alloc_path();
2363 if (!path)
2364 return -ENOMEM;
2365
2366 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
2367 if (!name) {
2368 btrfs_free_path(path);
2369 return -ENOMEM;
2370 }
2371
2372 key.objectid = send_root->root_key.objectid;
2373 key.type = BTRFS_ROOT_BACKREF_KEY;
2374 key.offset = 0;
2375
2376 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2377 &key, path, 1, 0);
2378 if (ret < 0)
2379 goto out;
2380 if (ret) {
2381 ret = -ENOENT;
2382 goto out;
2383 }
2384
2385 leaf = path->nodes[0];
2386 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2387 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2388 key.objectid != send_root->root_key.objectid) {
2389 ret = -ENOENT;
2390 goto out;
2391 }
2392 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2393 namelen = btrfs_root_ref_name_len(leaf, ref);
2394 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2395 btrfs_release_path(path);
2396
2397 if (parent_root) {
2398 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2399 if (ret < 0)
2400 goto out;
2401 } else {
2402 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2403 if (ret < 0)
2404 goto out;
2405 }
2406
2407 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2408
2409 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
2410 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2411 sctx->send_root->root_item.received_uuid);
2412 else
2413 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2414 sctx->send_root->root_item.uuid);
2415
2416 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2417 btrfs_root_ctransid(&sctx->send_root->root_item));
2418 if (parent_root) {
2419 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
2420 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2421 parent_root->root_item.received_uuid);
2422 else
2423 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2424 parent_root->root_item.uuid);
2425 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2426 btrfs_root_ctransid(&sctx->parent_root->root_item));
2427 }
2428
2429 ret = send_cmd(sctx);
2430
2431 tlv_put_failure:
2432 out:
2433 btrfs_free_path(path);
2434 kfree(name);
2435 return ret;
2436 }
2437
send_truncate(struct send_ctx * sctx,u64 ino,u64 gen,u64 size)2438 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2439 {
2440 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2441 int ret = 0;
2442 struct fs_path *p;
2443
2444 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
2445
2446 p = fs_path_alloc();
2447 if (!p)
2448 return -ENOMEM;
2449
2450 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2451 if (ret < 0)
2452 goto out;
2453
2454 ret = get_cur_path(sctx, ino, gen, p);
2455 if (ret < 0)
2456 goto out;
2457 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2458 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2459
2460 ret = send_cmd(sctx);
2461
2462 tlv_put_failure:
2463 out:
2464 fs_path_free(p);
2465 return ret;
2466 }
2467
send_chmod(struct send_ctx * sctx,u64 ino,u64 gen,u64 mode)2468 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2469 {
2470 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2471 int ret = 0;
2472 struct fs_path *p;
2473
2474 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
2475
2476 p = fs_path_alloc();
2477 if (!p)
2478 return -ENOMEM;
2479
2480 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2481 if (ret < 0)
2482 goto out;
2483
2484 ret = get_cur_path(sctx, ino, gen, p);
2485 if (ret < 0)
2486 goto out;
2487 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2488 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2489
2490 ret = send_cmd(sctx);
2491
2492 tlv_put_failure:
2493 out:
2494 fs_path_free(p);
2495 return ret;
2496 }
2497
send_chown(struct send_ctx * sctx,u64 ino,u64 gen,u64 uid,u64 gid)2498 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2499 {
2500 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2501 int ret = 0;
2502 struct fs_path *p;
2503
2504 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
2505 ino, uid, gid);
2506
2507 p = fs_path_alloc();
2508 if (!p)
2509 return -ENOMEM;
2510
2511 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2512 if (ret < 0)
2513 goto out;
2514
2515 ret = get_cur_path(sctx, ino, gen, p);
2516 if (ret < 0)
2517 goto out;
2518 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2519 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2520 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2521
2522 ret = send_cmd(sctx);
2523
2524 tlv_put_failure:
2525 out:
2526 fs_path_free(p);
2527 return ret;
2528 }
2529
send_utimes(struct send_ctx * sctx,u64 ino,u64 gen)2530 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2531 {
2532 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2533 int ret = 0;
2534 struct fs_path *p = NULL;
2535 struct btrfs_inode_item *ii;
2536 struct btrfs_path *path = NULL;
2537 struct extent_buffer *eb;
2538 struct btrfs_key key;
2539 int slot;
2540
2541 btrfs_debug(fs_info, "send_utimes %llu", ino);
2542
2543 p = fs_path_alloc();
2544 if (!p)
2545 return -ENOMEM;
2546
2547 path = alloc_path_for_send();
2548 if (!path) {
2549 ret = -ENOMEM;
2550 goto out;
2551 }
2552
2553 key.objectid = ino;
2554 key.type = BTRFS_INODE_ITEM_KEY;
2555 key.offset = 0;
2556 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2557 if (ret > 0)
2558 ret = -ENOENT;
2559 if (ret < 0)
2560 goto out;
2561
2562 eb = path->nodes[0];
2563 slot = path->slots[0];
2564 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2565
2566 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2567 if (ret < 0)
2568 goto out;
2569
2570 ret = get_cur_path(sctx, ino, gen, p);
2571 if (ret < 0)
2572 goto out;
2573 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2574 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2575 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2576 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2577 /* TODO Add otime support when the otime patches get into upstream */
2578
2579 ret = send_cmd(sctx);
2580
2581 tlv_put_failure:
2582 out:
2583 fs_path_free(p);
2584 btrfs_free_path(path);
2585 return ret;
2586 }
2587
2588 /*
2589 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2590 * a valid path yet because we did not process the refs yet. So, the inode
2591 * is created as orphan.
2592 */
send_create_inode(struct send_ctx * sctx,u64 ino)2593 static int send_create_inode(struct send_ctx *sctx, u64 ino)
2594 {
2595 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2596 int ret = 0;
2597 struct fs_path *p;
2598 int cmd;
2599 u64 gen;
2600 u64 mode;
2601 u64 rdev;
2602
2603 btrfs_debug(fs_info, "send_create_inode %llu", ino);
2604
2605 p = fs_path_alloc();
2606 if (!p)
2607 return -ENOMEM;
2608
2609 if (ino != sctx->cur_ino) {
2610 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
2611 NULL, NULL, &rdev);
2612 if (ret < 0)
2613 goto out;
2614 } else {
2615 gen = sctx->cur_inode_gen;
2616 mode = sctx->cur_inode_mode;
2617 rdev = sctx->cur_inode_rdev;
2618 }
2619
2620 if (S_ISREG(mode)) {
2621 cmd = BTRFS_SEND_C_MKFILE;
2622 } else if (S_ISDIR(mode)) {
2623 cmd = BTRFS_SEND_C_MKDIR;
2624 } else if (S_ISLNK(mode)) {
2625 cmd = BTRFS_SEND_C_SYMLINK;
2626 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2627 cmd = BTRFS_SEND_C_MKNOD;
2628 } else if (S_ISFIFO(mode)) {
2629 cmd = BTRFS_SEND_C_MKFIFO;
2630 } else if (S_ISSOCK(mode)) {
2631 cmd = BTRFS_SEND_C_MKSOCK;
2632 } else {
2633 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
2634 (int)(mode & S_IFMT));
2635 ret = -EOPNOTSUPP;
2636 goto out;
2637 }
2638
2639 ret = begin_cmd(sctx, cmd);
2640 if (ret < 0)
2641 goto out;
2642
2643 ret = gen_unique_name(sctx, ino, gen, p);
2644 if (ret < 0)
2645 goto out;
2646
2647 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2648 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2649
2650 if (S_ISLNK(mode)) {
2651 fs_path_reset(p);
2652 ret = read_symlink(sctx->send_root, ino, p);
2653 if (ret < 0)
2654 goto out;
2655 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2656 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2657 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2658 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2659 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2660 }
2661
2662 ret = send_cmd(sctx);
2663 if (ret < 0)
2664 goto out;
2665
2666
2667 tlv_put_failure:
2668 out:
2669 fs_path_free(p);
2670 return ret;
2671 }
2672
2673 /*
2674 * We need some special handling for inodes that get processed before the parent
2675 * directory got created. See process_recorded_refs for details.
2676 * This function does the check if we already created the dir out of order.
2677 */
did_create_dir(struct send_ctx * sctx,u64 dir)2678 static int did_create_dir(struct send_ctx *sctx, u64 dir)
2679 {
2680 int ret = 0;
2681 struct btrfs_path *path = NULL;
2682 struct btrfs_key key;
2683 struct btrfs_key found_key;
2684 struct btrfs_key di_key;
2685 struct extent_buffer *eb;
2686 struct btrfs_dir_item *di;
2687 int slot;
2688
2689 path = alloc_path_for_send();
2690 if (!path) {
2691 ret = -ENOMEM;
2692 goto out;
2693 }
2694
2695 key.objectid = dir;
2696 key.type = BTRFS_DIR_INDEX_KEY;
2697 key.offset = 0;
2698 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2699 if (ret < 0)
2700 goto out;
2701
2702 while (1) {
2703 eb = path->nodes[0];
2704 slot = path->slots[0];
2705 if (slot >= btrfs_header_nritems(eb)) {
2706 ret = btrfs_next_leaf(sctx->send_root, path);
2707 if (ret < 0) {
2708 goto out;
2709 } else if (ret > 0) {
2710 ret = 0;
2711 break;
2712 }
2713 continue;
2714 }
2715
2716 btrfs_item_key_to_cpu(eb, &found_key, slot);
2717 if (found_key.objectid != key.objectid ||
2718 found_key.type != key.type) {
2719 ret = 0;
2720 goto out;
2721 }
2722
2723 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2724 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2725
2726 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2727 di_key.objectid < sctx->send_progress) {
2728 ret = 1;
2729 goto out;
2730 }
2731
2732 path->slots[0]++;
2733 }
2734
2735 out:
2736 btrfs_free_path(path);
2737 return ret;
2738 }
2739
2740 /*
2741 * Only creates the inode if it is:
2742 * 1. Not a directory
2743 * 2. Or a directory which was not created already due to out of order
2744 * directories. See did_create_dir and process_recorded_refs for details.
2745 */
send_create_inode_if_needed(struct send_ctx * sctx)2746 static int send_create_inode_if_needed(struct send_ctx *sctx)
2747 {
2748 int ret;
2749
2750 if (S_ISDIR(sctx->cur_inode_mode)) {
2751 ret = did_create_dir(sctx, sctx->cur_ino);
2752 if (ret < 0)
2753 goto out;
2754 if (ret) {
2755 ret = 0;
2756 goto out;
2757 }
2758 }
2759
2760 ret = send_create_inode(sctx, sctx->cur_ino);
2761 if (ret < 0)
2762 goto out;
2763
2764 out:
2765 return ret;
2766 }
2767
2768 struct recorded_ref {
2769 struct list_head list;
2770 char *name;
2771 struct fs_path *full_path;
2772 u64 dir;
2773 u64 dir_gen;
2774 int name_len;
2775 };
2776
set_ref_path(struct recorded_ref * ref,struct fs_path * path)2777 static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
2778 {
2779 ref->full_path = path;
2780 ref->name = (char *)kbasename(ref->full_path->start);
2781 ref->name_len = ref->full_path->end - ref->name;
2782 }
2783
2784 /*
2785 * We need to process new refs before deleted refs, but compare_tree gives us
2786 * everything mixed. So we first record all refs and later process them.
2787 * This function is a helper to record one ref.
2788 */
__record_ref(struct list_head * head,u64 dir,u64 dir_gen,struct fs_path * path)2789 static int __record_ref(struct list_head *head, u64 dir,
2790 u64 dir_gen, struct fs_path *path)
2791 {
2792 struct recorded_ref *ref;
2793
2794 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
2795 if (!ref)
2796 return -ENOMEM;
2797
2798 ref->dir = dir;
2799 ref->dir_gen = dir_gen;
2800 set_ref_path(ref, path);
2801 list_add_tail(&ref->list, head);
2802 return 0;
2803 }
2804
dup_ref(struct recorded_ref * ref,struct list_head * list)2805 static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2806 {
2807 struct recorded_ref *new;
2808
2809 new = kmalloc(sizeof(*ref), GFP_KERNEL);
2810 if (!new)
2811 return -ENOMEM;
2812
2813 new->dir = ref->dir;
2814 new->dir_gen = ref->dir_gen;
2815 new->full_path = NULL;
2816 INIT_LIST_HEAD(&new->list);
2817 list_add_tail(&new->list, list);
2818 return 0;
2819 }
2820
__free_recorded_refs(struct list_head * head)2821 static void __free_recorded_refs(struct list_head *head)
2822 {
2823 struct recorded_ref *cur;
2824
2825 while (!list_empty(head)) {
2826 cur = list_entry(head->next, struct recorded_ref, list);
2827 fs_path_free(cur->full_path);
2828 list_del(&cur->list);
2829 kfree(cur);
2830 }
2831 }
2832
free_recorded_refs(struct send_ctx * sctx)2833 static void free_recorded_refs(struct send_ctx *sctx)
2834 {
2835 __free_recorded_refs(&sctx->new_refs);
2836 __free_recorded_refs(&sctx->deleted_refs);
2837 }
2838
2839 /*
2840 * Renames/moves a file/dir to its orphan name. Used when the first
2841 * ref of an unprocessed inode gets overwritten and for all non empty
2842 * directories.
2843 */
orphanize_inode(struct send_ctx * sctx,u64 ino,u64 gen,struct fs_path * path)2844 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2845 struct fs_path *path)
2846 {
2847 int ret;
2848 struct fs_path *orphan;
2849
2850 orphan = fs_path_alloc();
2851 if (!orphan)
2852 return -ENOMEM;
2853
2854 ret = gen_unique_name(sctx, ino, gen, orphan);
2855 if (ret < 0)
2856 goto out;
2857
2858 ret = send_rename(sctx, path, orphan);
2859
2860 out:
2861 fs_path_free(orphan);
2862 return ret;
2863 }
2864
add_orphan_dir_info(struct send_ctx * sctx,u64 dir_ino,u64 dir_gen)2865 static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
2866 u64 dir_ino, u64 dir_gen)
2867 {
2868 struct rb_node **p = &sctx->orphan_dirs.rb_node;
2869 struct rb_node *parent = NULL;
2870 struct orphan_dir_info *entry, *odi;
2871
2872 while (*p) {
2873 parent = *p;
2874 entry = rb_entry(parent, struct orphan_dir_info, node);
2875 if (dir_ino < entry->ino)
2876 p = &(*p)->rb_left;
2877 else if (dir_ino > entry->ino)
2878 p = &(*p)->rb_right;
2879 else if (dir_gen < entry->gen)
2880 p = &(*p)->rb_left;
2881 else if (dir_gen > entry->gen)
2882 p = &(*p)->rb_right;
2883 else
2884 return entry;
2885 }
2886
2887 odi = kmalloc(sizeof(*odi), GFP_KERNEL);
2888 if (!odi)
2889 return ERR_PTR(-ENOMEM);
2890 odi->ino = dir_ino;
2891 odi->gen = dir_gen;
2892 odi->last_dir_index_offset = 0;
2893
2894 rb_link_node(&odi->node, parent, p);
2895 rb_insert_color(&odi->node, &sctx->orphan_dirs);
2896 return odi;
2897 }
2898
get_orphan_dir_info(struct send_ctx * sctx,u64 dir_ino,u64 gen)2899 static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx,
2900 u64 dir_ino, u64 gen)
2901 {
2902 struct rb_node *n = sctx->orphan_dirs.rb_node;
2903 struct orphan_dir_info *entry;
2904
2905 while (n) {
2906 entry = rb_entry(n, struct orphan_dir_info, node);
2907 if (dir_ino < entry->ino)
2908 n = n->rb_left;
2909 else if (dir_ino > entry->ino)
2910 n = n->rb_right;
2911 else if (gen < entry->gen)
2912 n = n->rb_left;
2913 else if (gen > entry->gen)
2914 n = n->rb_right;
2915 else
2916 return entry;
2917 }
2918 return NULL;
2919 }
2920
is_waiting_for_rm(struct send_ctx * sctx,u64 dir_ino,u64 gen)2921 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen)
2922 {
2923 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen);
2924
2925 return odi != NULL;
2926 }
2927
free_orphan_dir_info(struct send_ctx * sctx,struct orphan_dir_info * odi)2928 static void free_orphan_dir_info(struct send_ctx *sctx,
2929 struct orphan_dir_info *odi)
2930 {
2931 if (!odi)
2932 return;
2933 rb_erase(&odi->node, &sctx->orphan_dirs);
2934 kfree(odi);
2935 }
2936
2937 /*
2938 * Returns 1 if a directory can be removed at this point in time.
2939 * We check this by iterating all dir items and checking if the inode behind
2940 * the dir item was already processed.
2941 */
can_rmdir(struct send_ctx * sctx,u64 dir,u64 dir_gen,u64 send_progress)2942 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2943 u64 send_progress)
2944 {
2945 int ret = 0;
2946 struct btrfs_root *root = sctx->parent_root;
2947 struct btrfs_path *path;
2948 struct btrfs_key key;
2949 struct btrfs_key found_key;
2950 struct btrfs_key loc;
2951 struct btrfs_dir_item *di;
2952 struct orphan_dir_info *odi = NULL;
2953
2954 /*
2955 * Don't try to rmdir the top/root subvolume dir.
2956 */
2957 if (dir == BTRFS_FIRST_FREE_OBJECTID)
2958 return 0;
2959
2960 path = alloc_path_for_send();
2961 if (!path)
2962 return -ENOMEM;
2963
2964 key.objectid = dir;
2965 key.type = BTRFS_DIR_INDEX_KEY;
2966 key.offset = 0;
2967
2968 odi = get_orphan_dir_info(sctx, dir, dir_gen);
2969 if (odi)
2970 key.offset = odi->last_dir_index_offset;
2971
2972 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2973 if (ret < 0)
2974 goto out;
2975
2976 while (1) {
2977 struct waiting_dir_move *dm;
2978
2979 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2980 ret = btrfs_next_leaf(root, path);
2981 if (ret < 0)
2982 goto out;
2983 else if (ret > 0)
2984 break;
2985 continue;
2986 }
2987 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2988 path->slots[0]);
2989 if (found_key.objectid != key.objectid ||
2990 found_key.type != key.type)
2991 break;
2992
2993 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
2994 struct btrfs_dir_item);
2995 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
2996
2997 dm = get_waiting_dir_move(sctx, loc.objectid);
2998 if (dm) {
2999 odi = add_orphan_dir_info(sctx, dir, dir_gen);
3000 if (IS_ERR(odi)) {
3001 ret = PTR_ERR(odi);
3002 goto out;
3003 }
3004 odi->gen = dir_gen;
3005 odi->last_dir_index_offset = found_key.offset;
3006 dm->rmdir_ino = dir;
3007 dm->rmdir_gen = dir_gen;
3008 ret = 0;
3009 goto out;
3010 }
3011
3012 if (loc.objectid > send_progress) {
3013 odi = add_orphan_dir_info(sctx, dir, dir_gen);
3014 if (IS_ERR(odi)) {
3015 ret = PTR_ERR(odi);
3016 goto out;
3017 }
3018 odi->gen = dir_gen;
3019 odi->last_dir_index_offset = found_key.offset;
3020 ret = 0;
3021 goto out;
3022 }
3023
3024 path->slots[0]++;
3025 }
3026 free_orphan_dir_info(sctx, odi);
3027
3028 ret = 1;
3029
3030 out:
3031 btrfs_free_path(path);
3032 return ret;
3033 }
3034
is_waiting_for_move(struct send_ctx * sctx,u64 ino)3035 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
3036 {
3037 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
3038
3039 return entry != NULL;
3040 }
3041
add_waiting_dir_move(struct send_ctx * sctx,u64 ino,bool orphanized)3042 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
3043 {
3044 struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
3045 struct rb_node *parent = NULL;
3046 struct waiting_dir_move *entry, *dm;
3047
3048 dm = kmalloc(sizeof(*dm), GFP_KERNEL);
3049 if (!dm)
3050 return -ENOMEM;
3051 dm->ino = ino;
3052 dm->rmdir_ino = 0;
3053 dm->rmdir_gen = 0;
3054 dm->orphanized = orphanized;
3055
3056 while (*p) {
3057 parent = *p;
3058 entry = rb_entry(parent, struct waiting_dir_move, node);
3059 if (ino < entry->ino) {
3060 p = &(*p)->rb_left;
3061 } else if (ino > entry->ino) {
3062 p = &(*p)->rb_right;
3063 } else {
3064 kfree(dm);
3065 return -EEXIST;
3066 }
3067 }
3068
3069 rb_link_node(&dm->node, parent, p);
3070 rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
3071 return 0;
3072 }
3073
3074 static struct waiting_dir_move *
get_waiting_dir_move(struct send_ctx * sctx,u64 ino)3075 get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
3076 {
3077 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
3078 struct waiting_dir_move *entry;
3079
3080 while (n) {
3081 entry = rb_entry(n, struct waiting_dir_move, node);
3082 if (ino < entry->ino)
3083 n = n->rb_left;
3084 else if (ino > entry->ino)
3085 n = n->rb_right;
3086 else
3087 return entry;
3088 }
3089 return NULL;
3090 }
3091
free_waiting_dir_move(struct send_ctx * sctx,struct waiting_dir_move * dm)3092 static void free_waiting_dir_move(struct send_ctx *sctx,
3093 struct waiting_dir_move *dm)
3094 {
3095 if (!dm)
3096 return;
3097 rb_erase(&dm->node, &sctx->waiting_dir_moves);
3098 kfree(dm);
3099 }
3100
add_pending_dir_move(struct send_ctx * sctx,u64 ino,u64 ino_gen,u64 parent_ino,struct list_head * new_refs,struct list_head * deleted_refs,const bool is_orphan)3101 static int add_pending_dir_move(struct send_ctx *sctx,
3102 u64 ino,
3103 u64 ino_gen,
3104 u64 parent_ino,
3105 struct list_head *new_refs,
3106 struct list_head *deleted_refs,
3107 const bool is_orphan)
3108 {
3109 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
3110 struct rb_node *parent = NULL;
3111 struct pending_dir_move *entry = NULL, *pm;
3112 struct recorded_ref *cur;
3113 int exists = 0;
3114 int ret;
3115
3116 pm = kmalloc(sizeof(*pm), GFP_KERNEL);
3117 if (!pm)
3118 return -ENOMEM;
3119 pm->parent_ino = parent_ino;
3120 pm->ino = ino;
3121 pm->gen = ino_gen;
3122 INIT_LIST_HEAD(&pm->list);
3123 INIT_LIST_HEAD(&pm->update_refs);
3124 RB_CLEAR_NODE(&pm->node);
3125
3126 while (*p) {
3127 parent = *p;
3128 entry = rb_entry(parent, struct pending_dir_move, node);
3129 if (parent_ino < entry->parent_ino) {
3130 p = &(*p)->rb_left;
3131 } else if (parent_ino > entry->parent_ino) {
3132 p = &(*p)->rb_right;
3133 } else {
3134 exists = 1;
3135 break;
3136 }
3137 }
3138
3139 list_for_each_entry(cur, deleted_refs, list) {
3140 ret = dup_ref(cur, &pm->update_refs);
3141 if (ret < 0)
3142 goto out;
3143 }
3144 list_for_each_entry(cur, new_refs, list) {
3145 ret = dup_ref(cur, &pm->update_refs);
3146 if (ret < 0)
3147 goto out;
3148 }
3149
3150 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
3151 if (ret)
3152 goto out;
3153
3154 if (exists) {
3155 list_add_tail(&pm->list, &entry->list);
3156 } else {
3157 rb_link_node(&pm->node, parent, p);
3158 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3159 }
3160 ret = 0;
3161 out:
3162 if (ret) {
3163 __free_recorded_refs(&pm->update_refs);
3164 kfree(pm);
3165 }
3166 return ret;
3167 }
3168
get_pending_dir_moves(struct send_ctx * sctx,u64 parent_ino)3169 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3170 u64 parent_ino)
3171 {
3172 struct rb_node *n = sctx->pending_dir_moves.rb_node;
3173 struct pending_dir_move *entry;
3174
3175 while (n) {
3176 entry = rb_entry(n, struct pending_dir_move, node);
3177 if (parent_ino < entry->parent_ino)
3178 n = n->rb_left;
3179 else if (parent_ino > entry->parent_ino)
3180 n = n->rb_right;
3181 else
3182 return entry;
3183 }
3184 return NULL;
3185 }
3186
path_loop(struct send_ctx * sctx,struct fs_path * name,u64 ino,u64 gen,u64 * ancestor_ino)3187 static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3188 u64 ino, u64 gen, u64 *ancestor_ino)
3189 {
3190 int ret = 0;
3191 u64 parent_inode = 0;
3192 u64 parent_gen = 0;
3193 u64 start_ino = ino;
3194
3195 *ancestor_ino = 0;
3196 while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3197 fs_path_reset(name);
3198
3199 if (is_waiting_for_rm(sctx, ino, gen))
3200 break;
3201 if (is_waiting_for_move(sctx, ino)) {
3202 if (*ancestor_ino == 0)
3203 *ancestor_ino = ino;
3204 ret = get_first_ref(sctx->parent_root, ino,
3205 &parent_inode, &parent_gen, name);
3206 } else {
3207 ret = __get_cur_name_and_parent(sctx, ino, gen,
3208 &parent_inode,
3209 &parent_gen, name);
3210 if (ret > 0) {
3211 ret = 0;
3212 break;
3213 }
3214 }
3215 if (ret < 0)
3216 break;
3217 if (parent_inode == start_ino) {
3218 ret = 1;
3219 if (*ancestor_ino == 0)
3220 *ancestor_ino = ino;
3221 break;
3222 }
3223 ino = parent_inode;
3224 gen = parent_gen;
3225 }
3226 return ret;
3227 }
3228
apply_dir_move(struct send_ctx * sctx,struct pending_dir_move * pm)3229 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3230 {
3231 struct fs_path *from_path = NULL;
3232 struct fs_path *to_path = NULL;
3233 struct fs_path *name = NULL;
3234 u64 orig_progress = sctx->send_progress;
3235 struct recorded_ref *cur;
3236 u64 parent_ino, parent_gen;
3237 struct waiting_dir_move *dm = NULL;
3238 u64 rmdir_ino = 0;
3239 u64 rmdir_gen;
3240 u64 ancestor;
3241 bool is_orphan;
3242 int ret;
3243
3244 name = fs_path_alloc();
3245 from_path = fs_path_alloc();
3246 if (!name || !from_path) {
3247 ret = -ENOMEM;
3248 goto out;
3249 }
3250
3251 dm = get_waiting_dir_move(sctx, pm->ino);
3252 ASSERT(dm);
3253 rmdir_ino = dm->rmdir_ino;
3254 rmdir_gen = dm->rmdir_gen;
3255 is_orphan = dm->orphanized;
3256 free_waiting_dir_move(sctx, dm);
3257
3258 if (is_orphan) {
3259 ret = gen_unique_name(sctx, pm->ino,
3260 pm->gen, from_path);
3261 } else {
3262 ret = get_first_ref(sctx->parent_root, pm->ino,
3263 &parent_ino, &parent_gen, name);
3264 if (ret < 0)
3265 goto out;
3266 ret = get_cur_path(sctx, parent_ino, parent_gen,
3267 from_path);
3268 if (ret < 0)
3269 goto out;
3270 ret = fs_path_add_path(from_path, name);
3271 }
3272 if (ret < 0)
3273 goto out;
3274
3275 sctx->send_progress = sctx->cur_ino + 1;
3276 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3277 if (ret < 0)
3278 goto out;
3279 if (ret) {
3280 LIST_HEAD(deleted_refs);
3281 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3282 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3283 &pm->update_refs, &deleted_refs,
3284 is_orphan);
3285 if (ret < 0)
3286 goto out;
3287 if (rmdir_ino) {
3288 dm = get_waiting_dir_move(sctx, pm->ino);
3289 ASSERT(dm);
3290 dm->rmdir_ino = rmdir_ino;
3291 dm->rmdir_gen = rmdir_gen;
3292 }
3293 goto out;
3294 }
3295 fs_path_reset(name);
3296 to_path = name;
3297 name = NULL;
3298 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3299 if (ret < 0)
3300 goto out;
3301
3302 ret = send_rename(sctx, from_path, to_path);
3303 if (ret < 0)
3304 goto out;
3305
3306 if (rmdir_ino) {
3307 struct orphan_dir_info *odi;
3308 u64 gen;
3309
3310 odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen);
3311 if (!odi) {
3312 /* already deleted */
3313 goto finish;
3314 }
3315 gen = odi->gen;
3316
3317 ret = can_rmdir(sctx, rmdir_ino, gen, sctx->cur_ino);
3318 if (ret < 0)
3319 goto out;
3320 if (!ret)
3321 goto finish;
3322
3323 name = fs_path_alloc();
3324 if (!name) {
3325 ret = -ENOMEM;
3326 goto out;
3327 }
3328 ret = get_cur_path(sctx, rmdir_ino, gen, name);
3329 if (ret < 0)
3330 goto out;
3331 ret = send_rmdir(sctx, name);
3332 if (ret < 0)
3333 goto out;
3334 }
3335
3336 finish:
3337 ret = send_utimes(sctx, pm->ino, pm->gen);
3338 if (ret < 0)
3339 goto out;
3340
3341 /*
3342 * After rename/move, need to update the utimes of both new parent(s)
3343 * and old parent(s).
3344 */
3345 list_for_each_entry(cur, &pm->update_refs, list) {
3346 /*
3347 * The parent inode might have been deleted in the send snapshot
3348 */
3349 ret = get_inode_info(sctx->send_root, cur->dir, NULL,
3350 NULL, NULL, NULL, NULL, NULL);
3351 if (ret == -ENOENT) {
3352 ret = 0;
3353 continue;
3354 }
3355 if (ret < 0)
3356 goto out;
3357
3358 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3359 if (ret < 0)
3360 goto out;
3361 }
3362
3363 out:
3364 fs_path_free(name);
3365 fs_path_free(from_path);
3366 fs_path_free(to_path);
3367 sctx->send_progress = orig_progress;
3368
3369 return ret;
3370 }
3371
free_pending_move(struct send_ctx * sctx,struct pending_dir_move * m)3372 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3373 {
3374 if (!list_empty(&m->list))
3375 list_del(&m->list);
3376 if (!RB_EMPTY_NODE(&m->node))
3377 rb_erase(&m->node, &sctx->pending_dir_moves);
3378 __free_recorded_refs(&m->update_refs);
3379 kfree(m);
3380 }
3381
tail_append_pending_moves(struct send_ctx * sctx,struct pending_dir_move * moves,struct list_head * stack)3382 static void tail_append_pending_moves(struct send_ctx *sctx,
3383 struct pending_dir_move *moves,
3384 struct list_head *stack)
3385 {
3386 if (list_empty(&moves->list)) {
3387 list_add_tail(&moves->list, stack);
3388 } else {
3389 LIST_HEAD(list);
3390 list_splice_init(&moves->list, &list);
3391 list_add_tail(&moves->list, stack);
3392 list_splice_tail(&list, stack);
3393 }
3394 if (!RB_EMPTY_NODE(&moves->node)) {
3395 rb_erase(&moves->node, &sctx->pending_dir_moves);
3396 RB_CLEAR_NODE(&moves->node);
3397 }
3398 }
3399
apply_children_dir_moves(struct send_ctx * sctx)3400 static int apply_children_dir_moves(struct send_ctx *sctx)
3401 {
3402 struct pending_dir_move *pm;
3403 struct list_head stack;
3404 u64 parent_ino = sctx->cur_ino;
3405 int ret = 0;
3406
3407 pm = get_pending_dir_moves(sctx, parent_ino);
3408 if (!pm)
3409 return 0;
3410
3411 INIT_LIST_HEAD(&stack);
3412 tail_append_pending_moves(sctx, pm, &stack);
3413
3414 while (!list_empty(&stack)) {
3415 pm = list_first_entry(&stack, struct pending_dir_move, list);
3416 parent_ino = pm->ino;
3417 ret = apply_dir_move(sctx, pm);
3418 free_pending_move(sctx, pm);
3419 if (ret)
3420 goto out;
3421 pm = get_pending_dir_moves(sctx, parent_ino);
3422 if (pm)
3423 tail_append_pending_moves(sctx, pm, &stack);
3424 }
3425 return 0;
3426
3427 out:
3428 while (!list_empty(&stack)) {
3429 pm = list_first_entry(&stack, struct pending_dir_move, list);
3430 free_pending_move(sctx, pm);
3431 }
3432 return ret;
3433 }
3434
3435 /*
3436 * We might need to delay a directory rename even when no ancestor directory
3437 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3438 * renamed. This happens when we rename a directory to the old name (the name
3439 * in the parent root) of some other unrelated directory that got its rename
3440 * delayed due to some ancestor with higher number that got renamed.
3441 *
3442 * Example:
3443 *
3444 * Parent snapshot:
3445 * . (ino 256)
3446 * |---- a/ (ino 257)
3447 * | |---- file (ino 260)
3448 * |
3449 * |---- b/ (ino 258)
3450 * |---- c/ (ino 259)
3451 *
3452 * Send snapshot:
3453 * . (ino 256)
3454 * |---- a/ (ino 258)
3455 * |---- x/ (ino 259)
3456 * |---- y/ (ino 257)
3457 * |----- file (ino 260)
3458 *
3459 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3460 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3461 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3462 * must issue is:
3463 *
3464 * 1 - rename 259 from 'c' to 'x'
3465 * 2 - rename 257 from 'a' to 'x/y'
3466 * 3 - rename 258 from 'b' to 'a'
3467 *
3468 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3469 * be done right away and < 0 on error.
3470 */
wait_for_dest_dir_move(struct send_ctx * sctx,struct recorded_ref * parent_ref,const bool is_orphan)3471 static int wait_for_dest_dir_move(struct send_ctx *sctx,
3472 struct recorded_ref *parent_ref,
3473 const bool is_orphan)
3474 {
3475 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
3476 struct btrfs_path *path;
3477 struct btrfs_key key;
3478 struct btrfs_key di_key;
3479 struct btrfs_dir_item *di;
3480 u64 left_gen;
3481 u64 right_gen;
3482 int ret = 0;
3483 struct waiting_dir_move *wdm;
3484
3485 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3486 return 0;
3487
3488 path = alloc_path_for_send();
3489 if (!path)
3490 return -ENOMEM;
3491
3492 key.objectid = parent_ref->dir;
3493 key.type = BTRFS_DIR_ITEM_KEY;
3494 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
3495
3496 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3497 if (ret < 0) {
3498 goto out;
3499 } else if (ret > 0) {
3500 ret = 0;
3501 goto out;
3502 }
3503
3504 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
3505 parent_ref->name_len);
3506 if (!di) {
3507 ret = 0;
3508 goto out;
3509 }
3510 /*
3511 * di_key.objectid has the number of the inode that has a dentry in the
3512 * parent directory with the same name that sctx->cur_ino is being
3513 * renamed to. We need to check if that inode is in the send root as
3514 * well and if it is currently marked as an inode with a pending rename,
3515 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3516 * that it happens after that other inode is renamed.
3517 */
3518 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3519 if (di_key.type != BTRFS_INODE_ITEM_KEY) {
3520 ret = 0;
3521 goto out;
3522 }
3523
3524 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
3525 &left_gen, NULL, NULL, NULL, NULL);
3526 if (ret < 0)
3527 goto out;
3528 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
3529 &right_gen, NULL, NULL, NULL, NULL);
3530 if (ret < 0) {
3531 if (ret == -ENOENT)
3532 ret = 0;
3533 goto out;
3534 }
3535
3536 /* Different inode, no need to delay the rename of sctx->cur_ino */
3537 if (right_gen != left_gen) {
3538 ret = 0;
3539 goto out;
3540 }
3541
3542 wdm = get_waiting_dir_move(sctx, di_key.objectid);
3543 if (wdm && !wdm->orphanized) {
3544 ret = add_pending_dir_move(sctx,
3545 sctx->cur_ino,
3546 sctx->cur_inode_gen,
3547 di_key.objectid,
3548 &sctx->new_refs,
3549 &sctx->deleted_refs,
3550 is_orphan);
3551 if (!ret)
3552 ret = 1;
3553 }
3554 out:
3555 btrfs_free_path(path);
3556 return ret;
3557 }
3558
3559 /*
3560 * Check if inode ino2, or any of its ancestors, is inode ino1.
3561 * Return 1 if true, 0 if false and < 0 on error.
3562 */
check_ino_in_path(struct btrfs_root * root,const u64 ino1,const u64 ino1_gen,const u64 ino2,const u64 ino2_gen,struct fs_path * fs_path)3563 static int check_ino_in_path(struct btrfs_root *root,
3564 const u64 ino1,
3565 const u64 ino1_gen,
3566 const u64 ino2,
3567 const u64 ino2_gen,
3568 struct fs_path *fs_path)
3569 {
3570 u64 ino = ino2;
3571
3572 if (ino1 == ino2)
3573 return ino1_gen == ino2_gen;
3574
3575 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3576 u64 parent;
3577 u64 parent_gen;
3578 int ret;
3579
3580 fs_path_reset(fs_path);
3581 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3582 if (ret < 0)
3583 return ret;
3584 if (parent == ino1)
3585 return parent_gen == ino1_gen;
3586 ino = parent;
3587 }
3588 return 0;
3589 }
3590
3591 /*
3592 * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
3593 * possible path (in case ino2 is not a directory and has multiple hard links).
3594 * Return 1 if true, 0 if false and < 0 on error.
3595 */
is_ancestor(struct btrfs_root * root,const u64 ino1,const u64 ino1_gen,const u64 ino2,struct fs_path * fs_path)3596 static int is_ancestor(struct btrfs_root *root,
3597 const u64 ino1,
3598 const u64 ino1_gen,
3599 const u64 ino2,
3600 struct fs_path *fs_path)
3601 {
3602 bool free_fs_path = false;
3603 int ret = 0;
3604 struct btrfs_path *path = NULL;
3605 struct btrfs_key key;
3606
3607 if (!fs_path) {
3608 fs_path = fs_path_alloc();
3609 if (!fs_path)
3610 return -ENOMEM;
3611 free_fs_path = true;
3612 }
3613
3614 path = alloc_path_for_send();
3615 if (!path) {
3616 ret = -ENOMEM;
3617 goto out;
3618 }
3619
3620 key.objectid = ino2;
3621 key.type = BTRFS_INODE_REF_KEY;
3622 key.offset = 0;
3623
3624 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3625 if (ret < 0)
3626 goto out;
3627
3628 while (true) {
3629 struct extent_buffer *leaf = path->nodes[0];
3630 int slot = path->slots[0];
3631 u32 cur_offset = 0;
3632 u32 item_size;
3633
3634 if (slot >= btrfs_header_nritems(leaf)) {
3635 ret = btrfs_next_leaf(root, path);
3636 if (ret < 0)
3637 goto out;
3638 if (ret > 0)
3639 break;
3640 continue;
3641 }
3642
3643 btrfs_item_key_to_cpu(leaf, &key, slot);
3644 if (key.objectid != ino2)
3645 break;
3646 if (key.type != BTRFS_INODE_REF_KEY &&
3647 key.type != BTRFS_INODE_EXTREF_KEY)
3648 break;
3649
3650 item_size = btrfs_item_size_nr(leaf, slot);
3651 while (cur_offset < item_size) {
3652 u64 parent;
3653 u64 parent_gen;
3654
3655 if (key.type == BTRFS_INODE_EXTREF_KEY) {
3656 unsigned long ptr;
3657 struct btrfs_inode_extref *extref;
3658
3659 ptr = btrfs_item_ptr_offset(leaf, slot);
3660 extref = (struct btrfs_inode_extref *)
3661 (ptr + cur_offset);
3662 parent = btrfs_inode_extref_parent(leaf,
3663 extref);
3664 cur_offset += sizeof(*extref);
3665 cur_offset += btrfs_inode_extref_name_len(leaf,
3666 extref);
3667 } else {
3668 parent = key.offset;
3669 cur_offset = item_size;
3670 }
3671
3672 ret = get_inode_info(root, parent, NULL, &parent_gen,
3673 NULL, NULL, NULL, NULL);
3674 if (ret < 0)
3675 goto out;
3676 ret = check_ino_in_path(root, ino1, ino1_gen,
3677 parent, parent_gen, fs_path);
3678 if (ret)
3679 goto out;
3680 }
3681 path->slots[0]++;
3682 }
3683 ret = 0;
3684 out:
3685 btrfs_free_path(path);
3686 if (free_fs_path)
3687 fs_path_free(fs_path);
3688 return ret;
3689 }
3690
wait_for_parent_move(struct send_ctx * sctx,struct recorded_ref * parent_ref,const bool is_orphan)3691 static int wait_for_parent_move(struct send_ctx *sctx,
3692 struct recorded_ref *parent_ref,
3693 const bool is_orphan)
3694 {
3695 int ret = 0;
3696 u64 ino = parent_ref->dir;
3697 u64 ino_gen = parent_ref->dir_gen;
3698 u64 parent_ino_before, parent_ino_after;
3699 struct fs_path *path_before = NULL;
3700 struct fs_path *path_after = NULL;
3701 int len1, len2;
3702
3703 path_after = fs_path_alloc();
3704 path_before = fs_path_alloc();
3705 if (!path_after || !path_before) {
3706 ret = -ENOMEM;
3707 goto out;
3708 }
3709
3710 /*
3711 * Our current directory inode may not yet be renamed/moved because some
3712 * ancestor (immediate or not) has to be renamed/moved first. So find if
3713 * such ancestor exists and make sure our own rename/move happens after
3714 * that ancestor is processed to avoid path build infinite loops (done
3715 * at get_cur_path()).
3716 */
3717 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3718 u64 parent_ino_after_gen;
3719
3720 if (is_waiting_for_move(sctx, ino)) {
3721 /*
3722 * If the current inode is an ancestor of ino in the
3723 * parent root, we need to delay the rename of the
3724 * current inode, otherwise don't delayed the rename
3725 * because we can end up with a circular dependency
3726 * of renames, resulting in some directories never
3727 * getting the respective rename operations issued in
3728 * the send stream or getting into infinite path build
3729 * loops.
3730 */
3731 ret = is_ancestor(sctx->parent_root,
3732 sctx->cur_ino, sctx->cur_inode_gen,
3733 ino, path_before);
3734 if (ret)
3735 break;
3736 }
3737
3738 fs_path_reset(path_before);
3739 fs_path_reset(path_after);
3740
3741 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3742 &parent_ino_after_gen, path_after);
3743 if (ret < 0)
3744 goto out;
3745 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3746 NULL, path_before);
3747 if (ret < 0 && ret != -ENOENT) {
3748 goto out;
3749 } else if (ret == -ENOENT) {
3750 ret = 0;
3751 break;
3752 }
3753
3754 len1 = fs_path_len(path_before);
3755 len2 = fs_path_len(path_after);
3756 if (ino > sctx->cur_ino &&
3757 (parent_ino_before != parent_ino_after || len1 != len2 ||
3758 memcmp(path_before->start, path_after->start, len1))) {
3759 u64 parent_ino_gen;
3760
3761 ret = get_inode_info(sctx->parent_root, ino, NULL,
3762 &parent_ino_gen, NULL, NULL, NULL,
3763 NULL);
3764 if (ret < 0)
3765 goto out;
3766 if (ino_gen == parent_ino_gen) {
3767 ret = 1;
3768 break;
3769 }
3770 }
3771 ino = parent_ino_after;
3772 ino_gen = parent_ino_after_gen;
3773 }
3774
3775 out:
3776 fs_path_free(path_before);
3777 fs_path_free(path_after);
3778
3779 if (ret == 1) {
3780 ret = add_pending_dir_move(sctx,
3781 sctx->cur_ino,
3782 sctx->cur_inode_gen,
3783 ino,
3784 &sctx->new_refs,
3785 &sctx->deleted_refs,
3786 is_orphan);
3787 if (!ret)
3788 ret = 1;
3789 }
3790
3791 return ret;
3792 }
3793
update_ref_path(struct send_ctx * sctx,struct recorded_ref * ref)3794 static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3795 {
3796 int ret;
3797 struct fs_path *new_path;
3798
3799 /*
3800 * Our reference's name member points to its full_path member string, so
3801 * we use here a new path.
3802 */
3803 new_path = fs_path_alloc();
3804 if (!new_path)
3805 return -ENOMEM;
3806
3807 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path);
3808 if (ret < 0) {
3809 fs_path_free(new_path);
3810 return ret;
3811 }
3812 ret = fs_path_add(new_path, ref->name, ref->name_len);
3813 if (ret < 0) {
3814 fs_path_free(new_path);
3815 return ret;
3816 }
3817
3818 fs_path_free(ref->full_path);
3819 set_ref_path(ref, new_path);
3820
3821 return 0;
3822 }
3823
3824 /*
3825 * When processing the new references for an inode we may orphanize an existing
3826 * directory inode because its old name conflicts with one of the new references
3827 * of the current inode. Later, when processing another new reference of our
3828 * inode, we might need to orphanize another inode, but the path we have in the
3829 * reference reflects the pre-orphanization name of the directory we previously
3830 * orphanized. For example:
3831 *
3832 * parent snapshot looks like:
3833 *
3834 * . (ino 256)
3835 * |----- f1 (ino 257)
3836 * |----- f2 (ino 258)
3837 * |----- d1/ (ino 259)
3838 * |----- d2/ (ino 260)
3839 *
3840 * send snapshot looks like:
3841 *
3842 * . (ino 256)
3843 * |----- d1 (ino 258)
3844 * |----- f2/ (ino 259)
3845 * |----- f2_link/ (ino 260)
3846 * | |----- f1 (ino 257)
3847 * |
3848 * |----- d2 (ino 258)
3849 *
3850 * When processing inode 257 we compute the name for inode 259 as "d1", and we
3851 * cache it in the name cache. Later when we start processing inode 258, when
3852 * collecting all its new references we set a full path of "d1/d2" for its new
3853 * reference with name "d2". When we start processing the new references we
3854 * start by processing the new reference with name "d1", and this results in
3855 * orphanizing inode 259, since its old reference causes a conflict. Then we
3856 * move on the next new reference, with name "d2", and we find out we must
3857 * orphanize inode 260, as its old reference conflicts with ours - but for the
3858 * orphanization we use a source path corresponding to the path we stored in the
3859 * new reference, which is "d1/d2" and not "o259-6-0/d2" - this makes the
3860 * receiver fail since the path component "d1/" no longer exists, it was renamed
3861 * to "o259-6-0/" when processing the previous new reference. So in this case we
3862 * must recompute the path in the new reference and use it for the new
3863 * orphanization operation.
3864 */
refresh_ref_path(struct send_ctx * sctx,struct recorded_ref * ref)3865 static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3866 {
3867 char *name;
3868 int ret;
3869
3870 name = kmemdup(ref->name, ref->name_len, GFP_KERNEL);
3871 if (!name)
3872 return -ENOMEM;
3873
3874 fs_path_reset(ref->full_path);
3875 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path);
3876 if (ret < 0)
3877 goto out;
3878
3879 ret = fs_path_add(ref->full_path, name, ref->name_len);
3880 if (ret < 0)
3881 goto out;
3882
3883 /* Update the reference's base name pointer. */
3884 set_ref_path(ref, ref->full_path);
3885 out:
3886 kfree(name);
3887 return ret;
3888 }
3889
3890 /*
3891 * This does all the move/link/unlink/rmdir magic.
3892 */
process_recorded_refs(struct send_ctx * sctx,int * pending_move)3893 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3894 {
3895 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
3896 int ret = 0;
3897 struct recorded_ref *cur;
3898 struct recorded_ref *cur2;
3899 struct list_head check_dirs;
3900 struct fs_path *valid_path = NULL;
3901 u64 ow_inode = 0;
3902 u64 ow_gen;
3903 u64 ow_mode;
3904 int did_overwrite = 0;
3905 int is_orphan = 0;
3906 u64 last_dir_ino_rm = 0;
3907 bool can_rename = true;
3908 bool orphanized_dir = false;
3909 bool orphanized_ancestor = false;
3910
3911 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
3912
3913 /*
3914 * This should never happen as the root dir always has the same ref
3915 * which is always '..'
3916 */
3917 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
3918 INIT_LIST_HEAD(&check_dirs);
3919
3920 valid_path = fs_path_alloc();
3921 if (!valid_path) {
3922 ret = -ENOMEM;
3923 goto out;
3924 }
3925
3926 /*
3927 * First, check if the first ref of the current inode was overwritten
3928 * before. If yes, we know that the current inode was already orphanized
3929 * and thus use the orphan name. If not, we can use get_cur_path to
3930 * get the path of the first ref as it would like while receiving at
3931 * this point in time.
3932 * New inodes are always orphan at the beginning, so force to use the
3933 * orphan name in this case.
3934 * The first ref is stored in valid_path and will be updated if it
3935 * gets moved around.
3936 */
3937 if (!sctx->cur_inode_new) {
3938 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
3939 sctx->cur_inode_gen);
3940 if (ret < 0)
3941 goto out;
3942 if (ret)
3943 did_overwrite = 1;
3944 }
3945 if (sctx->cur_inode_new || did_overwrite) {
3946 ret = gen_unique_name(sctx, sctx->cur_ino,
3947 sctx->cur_inode_gen, valid_path);
3948 if (ret < 0)
3949 goto out;
3950 is_orphan = 1;
3951 } else {
3952 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3953 valid_path);
3954 if (ret < 0)
3955 goto out;
3956 }
3957
3958 /*
3959 * Before doing any rename and link operations, do a first pass on the
3960 * new references to orphanize any unprocessed inodes that may have a
3961 * reference that conflicts with one of the new references of the current
3962 * inode. This needs to happen first because a new reference may conflict
3963 * with the old reference of a parent directory, so we must make sure
3964 * that the path used for link and rename commands don't use an
3965 * orphanized name when an ancestor was not yet orphanized.
3966 *
3967 * Example:
3968 *
3969 * Parent snapshot:
3970 *
3971 * . (ino 256)
3972 * |----- testdir/ (ino 259)
3973 * | |----- a (ino 257)
3974 * |
3975 * |----- b (ino 258)
3976 *
3977 * Send snapshot:
3978 *
3979 * . (ino 256)
3980 * |----- testdir_2/ (ino 259)
3981 * | |----- a (ino 260)
3982 * |
3983 * |----- testdir (ino 257)
3984 * |----- b (ino 257)
3985 * |----- b2 (ino 258)
3986 *
3987 * Processing the new reference for inode 257 with name "b" may happen
3988 * before processing the new reference with name "testdir". If so, we
3989 * must make sure that by the time we send a link command to create the
3990 * hard link "b", inode 259 was already orphanized, since the generated
3991 * path in "valid_path" already contains the orphanized name for 259.
3992 * We are processing inode 257, so only later when processing 259 we do
3993 * the rename operation to change its temporary (orphanized) name to
3994 * "testdir_2".
3995 */
3996 list_for_each_entry(cur, &sctx->new_refs, list) {
3997 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3998 if (ret < 0)
3999 goto out;
4000 if (ret == inode_state_will_create)
4001 continue;
4002
4003 /*
4004 * Check if this new ref would overwrite the first ref of another
4005 * unprocessed inode. If yes, orphanize the overwritten inode.
4006 * If we find an overwritten ref that is not the first ref,
4007 * simply unlink it.
4008 */
4009 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4010 cur->name, cur->name_len,
4011 &ow_inode, &ow_gen, &ow_mode);
4012 if (ret < 0)
4013 goto out;
4014 if (ret) {
4015 ret = is_first_ref(sctx->parent_root,
4016 ow_inode, cur->dir, cur->name,
4017 cur->name_len);
4018 if (ret < 0)
4019 goto out;
4020 if (ret) {
4021 struct name_cache_entry *nce;
4022 struct waiting_dir_move *wdm;
4023
4024 if (orphanized_dir) {
4025 ret = refresh_ref_path(sctx, cur);
4026 if (ret < 0)
4027 goto out;
4028 }
4029
4030 ret = orphanize_inode(sctx, ow_inode, ow_gen,
4031 cur->full_path);
4032 if (ret < 0)
4033 goto out;
4034 if (S_ISDIR(ow_mode))
4035 orphanized_dir = true;
4036
4037 /*
4038 * If ow_inode has its rename operation delayed
4039 * make sure that its orphanized name is used in
4040 * the source path when performing its rename
4041 * operation.
4042 */
4043 if (is_waiting_for_move(sctx, ow_inode)) {
4044 wdm = get_waiting_dir_move(sctx,
4045 ow_inode);
4046 ASSERT(wdm);
4047 wdm->orphanized = true;
4048 }
4049
4050 /*
4051 * Make sure we clear our orphanized inode's
4052 * name from the name cache. This is because the
4053 * inode ow_inode might be an ancestor of some
4054 * other inode that will be orphanized as well
4055 * later and has an inode number greater than
4056 * sctx->send_progress. We need to prevent
4057 * future name lookups from using the old name
4058 * and get instead the orphan name.
4059 */
4060 nce = name_cache_search(sctx, ow_inode, ow_gen);
4061 if (nce) {
4062 name_cache_delete(sctx, nce);
4063 kfree(nce);
4064 }
4065
4066 /*
4067 * ow_inode might currently be an ancestor of
4068 * cur_ino, therefore compute valid_path (the
4069 * current path of cur_ino) again because it
4070 * might contain the pre-orphanization name of
4071 * ow_inode, which is no longer valid.
4072 */
4073 ret = is_ancestor(sctx->parent_root,
4074 ow_inode, ow_gen,
4075 sctx->cur_ino, NULL);
4076 if (ret > 0) {
4077 orphanized_ancestor = true;
4078 fs_path_reset(valid_path);
4079 ret = get_cur_path(sctx, sctx->cur_ino,
4080 sctx->cur_inode_gen,
4081 valid_path);
4082 }
4083 if (ret < 0)
4084 goto out;
4085 } else {
4086 /*
4087 * If we previously orphanized a directory that
4088 * collided with a new reference that we already
4089 * processed, recompute the current path because
4090 * that directory may be part of the path.
4091 */
4092 if (orphanized_dir) {
4093 ret = refresh_ref_path(sctx, cur);
4094 if (ret < 0)
4095 goto out;
4096 }
4097 ret = send_unlink(sctx, cur->full_path);
4098 if (ret < 0)
4099 goto out;
4100 }
4101 }
4102
4103 }
4104
4105 list_for_each_entry(cur, &sctx->new_refs, list) {
4106 /*
4107 * We may have refs where the parent directory does not exist
4108 * yet. This happens if the parent directories inum is higher
4109 * than the current inum. To handle this case, we create the
4110 * parent directory out of order. But we need to check if this
4111 * did already happen before due to other refs in the same dir.
4112 */
4113 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4114 if (ret < 0)
4115 goto out;
4116 if (ret == inode_state_will_create) {
4117 ret = 0;
4118 /*
4119 * First check if any of the current inodes refs did
4120 * already create the dir.
4121 */
4122 list_for_each_entry(cur2, &sctx->new_refs, list) {
4123 if (cur == cur2)
4124 break;
4125 if (cur2->dir == cur->dir) {
4126 ret = 1;
4127 break;
4128 }
4129 }
4130
4131 /*
4132 * If that did not happen, check if a previous inode
4133 * did already create the dir.
4134 */
4135 if (!ret)
4136 ret = did_create_dir(sctx, cur->dir);
4137 if (ret < 0)
4138 goto out;
4139 if (!ret) {
4140 ret = send_create_inode(sctx, cur->dir);
4141 if (ret < 0)
4142 goto out;
4143 }
4144 }
4145
4146 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
4147 ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
4148 if (ret < 0)
4149 goto out;
4150 if (ret == 1) {
4151 can_rename = false;
4152 *pending_move = 1;
4153 }
4154 }
4155
4156 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
4157 can_rename) {
4158 ret = wait_for_parent_move(sctx, cur, is_orphan);
4159 if (ret < 0)
4160 goto out;
4161 if (ret == 1) {
4162 can_rename = false;
4163 *pending_move = 1;
4164 }
4165 }
4166
4167 /*
4168 * link/move the ref to the new place. If we have an orphan
4169 * inode, move it and update valid_path. If not, link or move
4170 * it depending on the inode mode.
4171 */
4172 if (is_orphan && can_rename) {
4173 ret = send_rename(sctx, valid_path, cur->full_path);
4174 if (ret < 0)
4175 goto out;
4176 is_orphan = 0;
4177 ret = fs_path_copy(valid_path, cur->full_path);
4178 if (ret < 0)
4179 goto out;
4180 } else if (can_rename) {
4181 if (S_ISDIR(sctx->cur_inode_mode)) {
4182 /*
4183 * Dirs can't be linked, so move it. For moved
4184 * dirs, we always have one new and one deleted
4185 * ref. The deleted ref is ignored later.
4186 */
4187 ret = send_rename(sctx, valid_path,
4188 cur->full_path);
4189 if (!ret)
4190 ret = fs_path_copy(valid_path,
4191 cur->full_path);
4192 if (ret < 0)
4193 goto out;
4194 } else {
4195 /*
4196 * We might have previously orphanized an inode
4197 * which is an ancestor of our current inode,
4198 * so our reference's full path, which was
4199 * computed before any such orphanizations, must
4200 * be updated.
4201 */
4202 if (orphanized_dir) {
4203 ret = update_ref_path(sctx, cur);
4204 if (ret < 0)
4205 goto out;
4206 }
4207 ret = send_link(sctx, cur->full_path,
4208 valid_path);
4209 if (ret < 0)
4210 goto out;
4211 }
4212 }
4213 ret = dup_ref(cur, &check_dirs);
4214 if (ret < 0)
4215 goto out;
4216 }
4217
4218 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
4219 /*
4220 * Check if we can already rmdir the directory. If not,
4221 * orphanize it. For every dir item inside that gets deleted
4222 * later, we do this check again and rmdir it then if possible.
4223 * See the use of check_dirs for more details.
4224 */
4225 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4226 sctx->cur_ino);
4227 if (ret < 0)
4228 goto out;
4229 if (ret) {
4230 ret = send_rmdir(sctx, valid_path);
4231 if (ret < 0)
4232 goto out;
4233 } else if (!is_orphan) {
4234 ret = orphanize_inode(sctx, sctx->cur_ino,
4235 sctx->cur_inode_gen, valid_path);
4236 if (ret < 0)
4237 goto out;
4238 is_orphan = 1;
4239 }
4240
4241 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4242 ret = dup_ref(cur, &check_dirs);
4243 if (ret < 0)
4244 goto out;
4245 }
4246 } else if (S_ISDIR(sctx->cur_inode_mode) &&
4247 !list_empty(&sctx->deleted_refs)) {
4248 /*
4249 * We have a moved dir. Add the old parent to check_dirs
4250 */
4251 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
4252 list);
4253 ret = dup_ref(cur, &check_dirs);
4254 if (ret < 0)
4255 goto out;
4256 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
4257 /*
4258 * We have a non dir inode. Go through all deleted refs and
4259 * unlink them if they were not already overwritten by other
4260 * inodes.
4261 */
4262 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4263 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4264 sctx->cur_ino, sctx->cur_inode_gen,
4265 cur->name, cur->name_len);
4266 if (ret < 0)
4267 goto out;
4268 if (!ret) {
4269 /*
4270 * If we orphanized any ancestor before, we need
4271 * to recompute the full path for deleted names,
4272 * since any such path was computed before we
4273 * processed any references and orphanized any
4274 * ancestor inode.
4275 */
4276 if (orphanized_ancestor) {
4277 ret = update_ref_path(sctx, cur);
4278 if (ret < 0)
4279 goto out;
4280 }
4281 ret = send_unlink(sctx, cur->full_path);
4282 if (ret < 0)
4283 goto out;
4284 }
4285 ret = dup_ref(cur, &check_dirs);
4286 if (ret < 0)
4287 goto out;
4288 }
4289 /*
4290 * If the inode is still orphan, unlink the orphan. This may
4291 * happen when a previous inode did overwrite the first ref
4292 * of this inode and no new refs were added for the current
4293 * inode. Unlinking does not mean that the inode is deleted in
4294 * all cases. There may still be links to this inode in other
4295 * places.
4296 */
4297 if (is_orphan) {
4298 ret = send_unlink(sctx, valid_path);
4299 if (ret < 0)
4300 goto out;
4301 }
4302 }
4303
4304 /*
4305 * We did collect all parent dirs where cur_inode was once located. We
4306 * now go through all these dirs and check if they are pending for
4307 * deletion and if it's finally possible to perform the rmdir now.
4308 * We also update the inode stats of the parent dirs here.
4309 */
4310 list_for_each_entry(cur, &check_dirs, list) {
4311 /*
4312 * In case we had refs into dirs that were not processed yet,
4313 * we don't need to do the utime and rmdir logic for these dirs.
4314 * The dir will be processed later.
4315 */
4316 if (cur->dir > sctx->cur_ino)
4317 continue;
4318
4319 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4320 if (ret < 0)
4321 goto out;
4322
4323 if (ret == inode_state_did_create ||
4324 ret == inode_state_no_change) {
4325 /* TODO delayed utimes */
4326 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
4327 if (ret < 0)
4328 goto out;
4329 } else if (ret == inode_state_did_delete &&
4330 cur->dir != last_dir_ino_rm) {
4331 ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
4332 sctx->cur_ino);
4333 if (ret < 0)
4334 goto out;
4335 if (ret) {
4336 ret = get_cur_path(sctx, cur->dir,
4337 cur->dir_gen, valid_path);
4338 if (ret < 0)
4339 goto out;
4340 ret = send_rmdir(sctx, valid_path);
4341 if (ret < 0)
4342 goto out;
4343 last_dir_ino_rm = cur->dir;
4344 }
4345 }
4346 }
4347
4348 ret = 0;
4349
4350 out:
4351 __free_recorded_refs(&check_dirs);
4352 free_recorded_refs(sctx);
4353 fs_path_free(valid_path);
4354 return ret;
4355 }
4356
record_ref(struct btrfs_root * root,u64 dir,struct fs_path * name,void * ctx,struct list_head * refs)4357 static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name,
4358 void *ctx, struct list_head *refs)
4359 {
4360 int ret = 0;
4361 struct send_ctx *sctx = ctx;
4362 struct fs_path *p;
4363 u64 gen;
4364
4365 p = fs_path_alloc();
4366 if (!p)
4367 return -ENOMEM;
4368
4369 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
4370 NULL, NULL);
4371 if (ret < 0)
4372 goto out;
4373
4374 ret = get_cur_path(sctx, dir, gen, p);
4375 if (ret < 0)
4376 goto out;
4377 ret = fs_path_add_path(p, name);
4378 if (ret < 0)
4379 goto out;
4380
4381 ret = __record_ref(refs, dir, gen, p);
4382
4383 out:
4384 if (ret)
4385 fs_path_free(p);
4386 return ret;
4387 }
4388
__record_new_ref(int num,u64 dir,int index,struct fs_path * name,void * ctx)4389 static int __record_new_ref(int num, u64 dir, int index,
4390 struct fs_path *name,
4391 void *ctx)
4392 {
4393 struct send_ctx *sctx = ctx;
4394 return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs);
4395 }
4396
4397
__record_deleted_ref(int num,u64 dir,int index,struct fs_path * name,void * ctx)4398 static int __record_deleted_ref(int num, u64 dir, int index,
4399 struct fs_path *name,
4400 void *ctx)
4401 {
4402 struct send_ctx *sctx = ctx;
4403 return record_ref(sctx->parent_root, dir, name, ctx,
4404 &sctx->deleted_refs);
4405 }
4406
record_new_ref(struct send_ctx * sctx)4407 static int record_new_ref(struct send_ctx *sctx)
4408 {
4409 int ret;
4410
4411 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4412 sctx->cmp_key, 0, __record_new_ref, sctx);
4413 if (ret < 0)
4414 goto out;
4415 ret = 0;
4416
4417 out:
4418 return ret;
4419 }
4420
record_deleted_ref(struct send_ctx * sctx)4421 static int record_deleted_ref(struct send_ctx *sctx)
4422 {
4423 int ret;
4424
4425 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4426 sctx->cmp_key, 0, __record_deleted_ref, sctx);
4427 if (ret < 0)
4428 goto out;
4429 ret = 0;
4430
4431 out:
4432 return ret;
4433 }
4434
4435 struct find_ref_ctx {
4436 u64 dir;
4437 u64 dir_gen;
4438 struct btrfs_root *root;
4439 struct fs_path *name;
4440 int found_idx;
4441 };
4442
__find_iref(int num,u64 dir,int index,struct fs_path * name,void * ctx_)4443 static int __find_iref(int num, u64 dir, int index,
4444 struct fs_path *name,
4445 void *ctx_)
4446 {
4447 struct find_ref_ctx *ctx = ctx_;
4448 u64 dir_gen;
4449 int ret;
4450
4451 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
4452 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
4453 /*
4454 * To avoid doing extra lookups we'll only do this if everything
4455 * else matches.
4456 */
4457 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
4458 NULL, NULL, NULL);
4459 if (ret)
4460 return ret;
4461 if (dir_gen != ctx->dir_gen)
4462 return 0;
4463 ctx->found_idx = num;
4464 return 1;
4465 }
4466 return 0;
4467 }
4468
find_iref(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_key * key,u64 dir,u64 dir_gen,struct fs_path * name)4469 static int find_iref(struct btrfs_root *root,
4470 struct btrfs_path *path,
4471 struct btrfs_key *key,
4472 u64 dir, u64 dir_gen, struct fs_path *name)
4473 {
4474 int ret;
4475 struct find_ref_ctx ctx;
4476
4477 ctx.dir = dir;
4478 ctx.name = name;
4479 ctx.dir_gen = dir_gen;
4480 ctx.found_idx = -1;
4481 ctx.root = root;
4482
4483 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
4484 if (ret < 0)
4485 return ret;
4486
4487 if (ctx.found_idx == -1)
4488 return -ENOENT;
4489
4490 return ctx.found_idx;
4491 }
4492
__record_changed_new_ref(int num,u64 dir,int index,struct fs_path * name,void * ctx)4493 static int __record_changed_new_ref(int num, u64 dir, int index,
4494 struct fs_path *name,
4495 void *ctx)
4496 {
4497 u64 dir_gen;
4498 int ret;
4499 struct send_ctx *sctx = ctx;
4500
4501 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
4502 NULL, NULL, NULL);
4503 if (ret)
4504 return ret;
4505
4506 ret = find_iref(sctx->parent_root, sctx->right_path,
4507 sctx->cmp_key, dir, dir_gen, name);
4508 if (ret == -ENOENT)
4509 ret = __record_new_ref(num, dir, index, name, sctx);
4510 else if (ret > 0)
4511 ret = 0;
4512
4513 return ret;
4514 }
4515
__record_changed_deleted_ref(int num,u64 dir,int index,struct fs_path * name,void * ctx)4516 static int __record_changed_deleted_ref(int num, u64 dir, int index,
4517 struct fs_path *name,
4518 void *ctx)
4519 {
4520 u64 dir_gen;
4521 int ret;
4522 struct send_ctx *sctx = ctx;
4523
4524 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
4525 NULL, NULL, NULL);
4526 if (ret)
4527 return ret;
4528
4529 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
4530 dir, dir_gen, name);
4531 if (ret == -ENOENT)
4532 ret = __record_deleted_ref(num, dir, index, name, sctx);
4533 else if (ret > 0)
4534 ret = 0;
4535
4536 return ret;
4537 }
4538
record_changed_ref(struct send_ctx * sctx)4539 static int record_changed_ref(struct send_ctx *sctx)
4540 {
4541 int ret = 0;
4542
4543 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4544 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
4545 if (ret < 0)
4546 goto out;
4547 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4548 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
4549 if (ret < 0)
4550 goto out;
4551 ret = 0;
4552
4553 out:
4554 return ret;
4555 }
4556
4557 /*
4558 * Record and process all refs at once. Needed when an inode changes the
4559 * generation number, which means that it was deleted and recreated.
4560 */
process_all_refs(struct send_ctx * sctx,enum btrfs_compare_tree_result cmd)4561 static int process_all_refs(struct send_ctx *sctx,
4562 enum btrfs_compare_tree_result cmd)
4563 {
4564 int ret;
4565 struct btrfs_root *root;
4566 struct btrfs_path *path;
4567 struct btrfs_key key;
4568 struct btrfs_key found_key;
4569 struct extent_buffer *eb;
4570 int slot;
4571 iterate_inode_ref_t cb;
4572 int pending_move = 0;
4573
4574 path = alloc_path_for_send();
4575 if (!path)
4576 return -ENOMEM;
4577
4578 if (cmd == BTRFS_COMPARE_TREE_NEW) {
4579 root = sctx->send_root;
4580 cb = __record_new_ref;
4581 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
4582 root = sctx->parent_root;
4583 cb = __record_deleted_ref;
4584 } else {
4585 btrfs_err(sctx->send_root->fs_info,
4586 "Wrong command %d in process_all_refs", cmd);
4587 ret = -EINVAL;
4588 goto out;
4589 }
4590
4591 key.objectid = sctx->cmp_key->objectid;
4592 key.type = BTRFS_INODE_REF_KEY;
4593 key.offset = 0;
4594 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4595 if (ret < 0)
4596 goto out;
4597
4598 while (1) {
4599 eb = path->nodes[0];
4600 slot = path->slots[0];
4601 if (slot >= btrfs_header_nritems(eb)) {
4602 ret = btrfs_next_leaf(root, path);
4603 if (ret < 0)
4604 goto out;
4605 else if (ret > 0)
4606 break;
4607 continue;
4608 }
4609
4610 btrfs_item_key_to_cpu(eb, &found_key, slot);
4611
4612 if (found_key.objectid != key.objectid ||
4613 (found_key.type != BTRFS_INODE_REF_KEY &&
4614 found_key.type != BTRFS_INODE_EXTREF_KEY))
4615 break;
4616
4617 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
4618 if (ret < 0)
4619 goto out;
4620
4621 path->slots[0]++;
4622 }
4623 btrfs_release_path(path);
4624
4625 /*
4626 * We don't actually care about pending_move as we are simply
4627 * re-creating this inode and will be rename'ing it into place once we
4628 * rename the parent directory.
4629 */
4630 ret = process_recorded_refs(sctx, &pending_move);
4631 out:
4632 btrfs_free_path(path);
4633 return ret;
4634 }
4635
send_set_xattr(struct send_ctx * sctx,struct fs_path * path,const char * name,int name_len,const char * data,int data_len)4636 static int send_set_xattr(struct send_ctx *sctx,
4637 struct fs_path *path,
4638 const char *name, int name_len,
4639 const char *data, int data_len)
4640 {
4641 int ret = 0;
4642
4643 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
4644 if (ret < 0)
4645 goto out;
4646
4647 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4648 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4649 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
4650
4651 ret = send_cmd(sctx);
4652
4653 tlv_put_failure:
4654 out:
4655 return ret;
4656 }
4657
send_remove_xattr(struct send_ctx * sctx,struct fs_path * path,const char * name,int name_len)4658 static int send_remove_xattr(struct send_ctx *sctx,
4659 struct fs_path *path,
4660 const char *name, int name_len)
4661 {
4662 int ret = 0;
4663
4664 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
4665 if (ret < 0)
4666 goto out;
4667
4668 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4669 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4670
4671 ret = send_cmd(sctx);
4672
4673 tlv_put_failure:
4674 out:
4675 return ret;
4676 }
4677
__process_new_xattr(int num,struct btrfs_key * di_key,const char * name,int name_len,const char * data,int data_len,u8 type,void * ctx)4678 static int __process_new_xattr(int num, struct btrfs_key *di_key,
4679 const char *name, int name_len,
4680 const char *data, int data_len,
4681 u8 type, void *ctx)
4682 {
4683 int ret;
4684 struct send_ctx *sctx = ctx;
4685 struct fs_path *p;
4686 struct posix_acl_xattr_header dummy_acl;
4687
4688 /* Capabilities are emitted by finish_inode_if_needed */
4689 if (!strncmp(name, XATTR_NAME_CAPS, name_len))
4690 return 0;
4691
4692 p = fs_path_alloc();
4693 if (!p)
4694 return -ENOMEM;
4695
4696 /*
4697 * This hack is needed because empty acls are stored as zero byte
4698 * data in xattrs. Problem with that is, that receiving these zero byte
4699 * acls will fail later. To fix this, we send a dummy acl list that
4700 * only contains the version number and no entries.
4701 */
4702 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
4703 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
4704 if (data_len == 0) {
4705 dummy_acl.a_version =
4706 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
4707 data = (char *)&dummy_acl;
4708 data_len = sizeof(dummy_acl);
4709 }
4710 }
4711
4712 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4713 if (ret < 0)
4714 goto out;
4715
4716 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
4717
4718 out:
4719 fs_path_free(p);
4720 return ret;
4721 }
4722
__process_deleted_xattr(int num,struct btrfs_key * di_key,const char * name,int name_len,const char * data,int data_len,u8 type,void * ctx)4723 static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
4724 const char *name, int name_len,
4725 const char *data, int data_len,
4726 u8 type, void *ctx)
4727 {
4728 int ret;
4729 struct send_ctx *sctx = ctx;
4730 struct fs_path *p;
4731
4732 p = fs_path_alloc();
4733 if (!p)
4734 return -ENOMEM;
4735
4736 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4737 if (ret < 0)
4738 goto out;
4739
4740 ret = send_remove_xattr(sctx, p, name, name_len);
4741
4742 out:
4743 fs_path_free(p);
4744 return ret;
4745 }
4746
process_new_xattr(struct send_ctx * sctx)4747 static int process_new_xattr(struct send_ctx *sctx)
4748 {
4749 int ret = 0;
4750
4751 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4752 __process_new_xattr, sctx);
4753
4754 return ret;
4755 }
4756
process_deleted_xattr(struct send_ctx * sctx)4757 static int process_deleted_xattr(struct send_ctx *sctx)
4758 {
4759 return iterate_dir_item(sctx->parent_root, sctx->right_path,
4760 __process_deleted_xattr, sctx);
4761 }
4762
4763 struct find_xattr_ctx {
4764 const char *name;
4765 int name_len;
4766 int found_idx;
4767 char *found_data;
4768 int found_data_len;
4769 };
4770
__find_xattr(int num,struct btrfs_key * di_key,const char * name,int name_len,const char * data,int data_len,u8 type,void * vctx)4771 static int __find_xattr(int num, struct btrfs_key *di_key,
4772 const char *name, int name_len,
4773 const char *data, int data_len,
4774 u8 type, void *vctx)
4775 {
4776 struct find_xattr_ctx *ctx = vctx;
4777
4778 if (name_len == ctx->name_len &&
4779 strncmp(name, ctx->name, name_len) == 0) {
4780 ctx->found_idx = num;
4781 ctx->found_data_len = data_len;
4782 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
4783 if (!ctx->found_data)
4784 return -ENOMEM;
4785 return 1;
4786 }
4787 return 0;
4788 }
4789
find_xattr(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_key * key,const char * name,int name_len,char ** data,int * data_len)4790 static int find_xattr(struct btrfs_root *root,
4791 struct btrfs_path *path,
4792 struct btrfs_key *key,
4793 const char *name, int name_len,
4794 char **data, int *data_len)
4795 {
4796 int ret;
4797 struct find_xattr_ctx ctx;
4798
4799 ctx.name = name;
4800 ctx.name_len = name_len;
4801 ctx.found_idx = -1;
4802 ctx.found_data = NULL;
4803 ctx.found_data_len = 0;
4804
4805 ret = iterate_dir_item(root, path, __find_xattr, &ctx);
4806 if (ret < 0)
4807 return ret;
4808
4809 if (ctx.found_idx == -1)
4810 return -ENOENT;
4811 if (data) {
4812 *data = ctx.found_data;
4813 *data_len = ctx.found_data_len;
4814 } else {
4815 kfree(ctx.found_data);
4816 }
4817 return ctx.found_idx;
4818 }
4819
4820
__process_changed_new_xattr(int num,struct btrfs_key * di_key,const char * name,int name_len,const char * data,int data_len,u8 type,void * ctx)4821 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
4822 const char *name, int name_len,
4823 const char *data, int data_len,
4824 u8 type, void *ctx)
4825 {
4826 int ret;
4827 struct send_ctx *sctx = ctx;
4828 char *found_data = NULL;
4829 int found_data_len = 0;
4830
4831 ret = find_xattr(sctx->parent_root, sctx->right_path,
4832 sctx->cmp_key, name, name_len, &found_data,
4833 &found_data_len);
4834 if (ret == -ENOENT) {
4835 ret = __process_new_xattr(num, di_key, name, name_len, data,
4836 data_len, type, ctx);
4837 } else if (ret >= 0) {
4838 if (data_len != found_data_len ||
4839 memcmp(data, found_data, data_len)) {
4840 ret = __process_new_xattr(num, di_key, name, name_len,
4841 data, data_len, type, ctx);
4842 } else {
4843 ret = 0;
4844 }
4845 }
4846
4847 kfree(found_data);
4848 return ret;
4849 }
4850
__process_changed_deleted_xattr(int num,struct btrfs_key * di_key,const char * name,int name_len,const char * data,int data_len,u8 type,void * ctx)4851 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
4852 const char *name, int name_len,
4853 const char *data, int data_len,
4854 u8 type, void *ctx)
4855 {
4856 int ret;
4857 struct send_ctx *sctx = ctx;
4858
4859 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
4860 name, name_len, NULL, NULL);
4861 if (ret == -ENOENT)
4862 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
4863 data_len, type, ctx);
4864 else if (ret >= 0)
4865 ret = 0;
4866
4867 return ret;
4868 }
4869
process_changed_xattr(struct send_ctx * sctx)4870 static int process_changed_xattr(struct send_ctx *sctx)
4871 {
4872 int ret = 0;
4873
4874 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4875 __process_changed_new_xattr, sctx);
4876 if (ret < 0)
4877 goto out;
4878 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4879 __process_changed_deleted_xattr, sctx);
4880
4881 out:
4882 return ret;
4883 }
4884
process_all_new_xattrs(struct send_ctx * sctx)4885 static int process_all_new_xattrs(struct send_ctx *sctx)
4886 {
4887 int ret;
4888 struct btrfs_root *root;
4889 struct btrfs_path *path;
4890 struct btrfs_key key;
4891 struct btrfs_key found_key;
4892 struct extent_buffer *eb;
4893 int slot;
4894
4895 path = alloc_path_for_send();
4896 if (!path)
4897 return -ENOMEM;
4898
4899 root = sctx->send_root;
4900
4901 key.objectid = sctx->cmp_key->objectid;
4902 key.type = BTRFS_XATTR_ITEM_KEY;
4903 key.offset = 0;
4904 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4905 if (ret < 0)
4906 goto out;
4907
4908 while (1) {
4909 eb = path->nodes[0];
4910 slot = path->slots[0];
4911 if (slot >= btrfs_header_nritems(eb)) {
4912 ret = btrfs_next_leaf(root, path);
4913 if (ret < 0) {
4914 goto out;
4915 } else if (ret > 0) {
4916 ret = 0;
4917 break;
4918 }
4919 continue;
4920 }
4921
4922 btrfs_item_key_to_cpu(eb, &found_key, slot);
4923 if (found_key.objectid != key.objectid ||
4924 found_key.type != key.type) {
4925 ret = 0;
4926 goto out;
4927 }
4928
4929 ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
4930 if (ret < 0)
4931 goto out;
4932
4933 path->slots[0]++;
4934 }
4935
4936 out:
4937 btrfs_free_path(path);
4938 return ret;
4939 }
4940
max_send_read_size(const struct send_ctx * sctx)4941 static inline u64 max_send_read_size(const struct send_ctx *sctx)
4942 {
4943 return sctx->send_max_size - SZ_16K;
4944 }
4945
put_data_header(struct send_ctx * sctx,u32 len)4946 static int put_data_header(struct send_ctx *sctx, u32 len)
4947 {
4948 struct btrfs_tlv_header *hdr;
4949
4950 if (sctx->send_max_size - sctx->send_size < sizeof(*hdr) + len)
4951 return -EOVERFLOW;
4952 hdr = (struct btrfs_tlv_header *)(sctx->send_buf + sctx->send_size);
4953 put_unaligned_le16(BTRFS_SEND_A_DATA, &hdr->tlv_type);
4954 put_unaligned_le16(len, &hdr->tlv_len);
4955 sctx->send_size += sizeof(*hdr);
4956 return 0;
4957 }
4958
put_file_data(struct send_ctx * sctx,u64 offset,u32 len)4959 static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
4960 {
4961 struct btrfs_root *root = sctx->send_root;
4962 struct btrfs_fs_info *fs_info = root->fs_info;
4963 struct inode *inode;
4964 struct page *page;
4965 pgoff_t index = offset >> PAGE_SHIFT;
4966 pgoff_t last_index;
4967 unsigned pg_offset = offset_in_page(offset);
4968 int ret;
4969
4970 ret = put_data_header(sctx, len);
4971 if (ret)
4972 return ret;
4973
4974 inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root);
4975 if (IS_ERR(inode))
4976 return PTR_ERR(inode);
4977
4978 last_index = (offset + len - 1) >> PAGE_SHIFT;
4979
4980 /* initial readahead */
4981 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
4982 file_ra_state_init(&sctx->ra, inode->i_mapping);
4983
4984 while (index <= last_index) {
4985 unsigned cur_len = min_t(unsigned, len,
4986 PAGE_SIZE - pg_offset);
4987
4988 page = find_lock_page(inode->i_mapping, index);
4989 if (!page) {
4990 page_cache_sync_readahead(inode->i_mapping, &sctx->ra,
4991 NULL, index, last_index + 1 - index);
4992
4993 page = find_or_create_page(inode->i_mapping, index,
4994 GFP_KERNEL);
4995 if (!page) {
4996 ret = -ENOMEM;
4997 break;
4998 }
4999 }
5000
5001 if (PageReadahead(page)) {
5002 page_cache_async_readahead(inode->i_mapping, &sctx->ra,
5003 NULL, page, index, last_index + 1 - index);
5004 }
5005
5006 if (!PageUptodate(page)) {
5007 btrfs_readpage(NULL, page);
5008 lock_page(page);
5009 if (!PageUptodate(page)) {
5010 unlock_page(page);
5011 btrfs_err(fs_info,
5012 "send: IO error at offset %llu for inode %llu root %llu",
5013 page_offset(page), sctx->cur_ino,
5014 sctx->send_root->root_key.objectid);
5015 put_page(page);
5016 ret = -EIO;
5017 break;
5018 }
5019 }
5020
5021 memcpy_from_page(sctx->send_buf + sctx->send_size, page,
5022 pg_offset, cur_len);
5023 unlock_page(page);
5024 put_page(page);
5025 index++;
5026 pg_offset = 0;
5027 len -= cur_len;
5028 sctx->send_size += cur_len;
5029 }
5030 iput(inode);
5031 return ret;
5032 }
5033
5034 /*
5035 * Read some bytes from the current inode/file and send a write command to
5036 * user space.
5037 */
send_write(struct send_ctx * sctx,u64 offset,u32 len)5038 static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
5039 {
5040 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
5041 int ret = 0;
5042 struct fs_path *p;
5043
5044 p = fs_path_alloc();
5045 if (!p)
5046 return -ENOMEM;
5047
5048 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
5049
5050 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5051 if (ret < 0)
5052 goto out;
5053
5054 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5055 if (ret < 0)
5056 goto out;
5057
5058 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5059 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5060 ret = put_file_data(sctx, offset, len);
5061 if (ret < 0)
5062 goto out;
5063
5064 ret = send_cmd(sctx);
5065
5066 tlv_put_failure:
5067 out:
5068 fs_path_free(p);
5069 return ret;
5070 }
5071
5072 /*
5073 * Send a clone command to user space.
5074 */
send_clone(struct send_ctx * sctx,u64 offset,u32 len,struct clone_root * clone_root)5075 static int send_clone(struct send_ctx *sctx,
5076 u64 offset, u32 len,
5077 struct clone_root *clone_root)
5078 {
5079 int ret = 0;
5080 struct fs_path *p;
5081 u64 gen;
5082
5083 btrfs_debug(sctx->send_root->fs_info,
5084 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
5085 offset, len, clone_root->root->root_key.objectid,
5086 clone_root->ino, clone_root->offset);
5087
5088 p = fs_path_alloc();
5089 if (!p)
5090 return -ENOMEM;
5091
5092 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
5093 if (ret < 0)
5094 goto out;
5095
5096 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5097 if (ret < 0)
5098 goto out;
5099
5100 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5101 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
5102 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5103
5104 if (clone_root->root == sctx->send_root) {
5105 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
5106 &gen, NULL, NULL, NULL, NULL);
5107 if (ret < 0)
5108 goto out;
5109 ret = get_cur_path(sctx, clone_root->ino, gen, p);
5110 } else {
5111 ret = get_inode_path(clone_root->root, clone_root->ino, p);
5112 }
5113 if (ret < 0)
5114 goto out;
5115
5116 /*
5117 * If the parent we're using has a received_uuid set then use that as
5118 * our clone source as that is what we will look for when doing a
5119 * receive.
5120 *
5121 * This covers the case that we create a snapshot off of a received
5122 * subvolume and then use that as the parent and try to receive on a
5123 * different host.
5124 */
5125 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
5126 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5127 clone_root->root->root_item.received_uuid);
5128 else
5129 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5130 clone_root->root->root_item.uuid);
5131 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
5132 btrfs_root_ctransid(&clone_root->root->root_item));
5133 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
5134 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
5135 clone_root->offset);
5136
5137 ret = send_cmd(sctx);
5138
5139 tlv_put_failure:
5140 out:
5141 fs_path_free(p);
5142 return ret;
5143 }
5144
5145 /*
5146 * Send an update extent command to user space.
5147 */
send_update_extent(struct send_ctx * sctx,u64 offset,u32 len)5148 static int send_update_extent(struct send_ctx *sctx,
5149 u64 offset, u32 len)
5150 {
5151 int ret = 0;
5152 struct fs_path *p;
5153
5154 p = fs_path_alloc();
5155 if (!p)
5156 return -ENOMEM;
5157
5158 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
5159 if (ret < 0)
5160 goto out;
5161
5162 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5163 if (ret < 0)
5164 goto out;
5165
5166 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5167 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5168 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
5169
5170 ret = send_cmd(sctx);
5171
5172 tlv_put_failure:
5173 out:
5174 fs_path_free(p);
5175 return ret;
5176 }
5177
send_hole(struct send_ctx * sctx,u64 end)5178 static int send_hole(struct send_ctx *sctx, u64 end)
5179 {
5180 struct fs_path *p = NULL;
5181 u64 read_size = max_send_read_size(sctx);
5182 u64 offset = sctx->cur_inode_last_extent;
5183 int ret = 0;
5184
5185 /*
5186 * A hole that starts at EOF or beyond it. Since we do not yet support
5187 * fallocate (for extent preallocation and hole punching), sending a
5188 * write of zeroes starting at EOF or beyond would later require issuing
5189 * a truncate operation which would undo the write and achieve nothing.
5190 */
5191 if (offset >= sctx->cur_inode_size)
5192 return 0;
5193
5194 /*
5195 * Don't go beyond the inode's i_size due to prealloc extents that start
5196 * after the i_size.
5197 */
5198 end = min_t(u64, end, sctx->cur_inode_size);
5199
5200 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5201 return send_update_extent(sctx, offset, end - offset);
5202
5203 p = fs_path_alloc();
5204 if (!p)
5205 return -ENOMEM;
5206 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5207 if (ret < 0)
5208 goto tlv_put_failure;
5209 while (offset < end) {
5210 u64 len = min(end - offset, read_size);
5211
5212 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5213 if (ret < 0)
5214 break;
5215 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5216 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5217 ret = put_data_header(sctx, len);
5218 if (ret < 0)
5219 break;
5220 memset(sctx->send_buf + sctx->send_size, 0, len);
5221 sctx->send_size += len;
5222 ret = send_cmd(sctx);
5223 if (ret < 0)
5224 break;
5225 offset += len;
5226 }
5227 sctx->cur_inode_next_write_offset = offset;
5228 tlv_put_failure:
5229 fs_path_free(p);
5230 return ret;
5231 }
5232
send_extent_data(struct send_ctx * sctx,const u64 offset,const u64 len)5233 static int send_extent_data(struct send_ctx *sctx,
5234 const u64 offset,
5235 const u64 len)
5236 {
5237 u64 read_size = max_send_read_size(sctx);
5238 u64 sent = 0;
5239
5240 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5241 return send_update_extent(sctx, offset, len);
5242
5243 while (sent < len) {
5244 u64 size = min(len - sent, read_size);
5245 int ret;
5246
5247 ret = send_write(sctx, offset + sent, size);
5248 if (ret < 0)
5249 return ret;
5250 sent += size;
5251 }
5252 return 0;
5253 }
5254
5255 /*
5256 * Search for a capability xattr related to sctx->cur_ino. If the capability is
5257 * found, call send_set_xattr function to emit it.
5258 *
5259 * Return 0 if there isn't a capability, or when the capability was emitted
5260 * successfully, or < 0 if an error occurred.
5261 */
send_capabilities(struct send_ctx * sctx)5262 static int send_capabilities(struct send_ctx *sctx)
5263 {
5264 struct fs_path *fspath = NULL;
5265 struct btrfs_path *path;
5266 struct btrfs_dir_item *di;
5267 struct extent_buffer *leaf;
5268 unsigned long data_ptr;
5269 char *buf = NULL;
5270 int buf_len;
5271 int ret = 0;
5272
5273 path = alloc_path_for_send();
5274 if (!path)
5275 return -ENOMEM;
5276
5277 di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino,
5278 XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0);
5279 if (!di) {
5280 /* There is no xattr for this inode */
5281 goto out;
5282 } else if (IS_ERR(di)) {
5283 ret = PTR_ERR(di);
5284 goto out;
5285 }
5286
5287 leaf = path->nodes[0];
5288 buf_len = btrfs_dir_data_len(leaf, di);
5289
5290 fspath = fs_path_alloc();
5291 buf = kmalloc(buf_len, GFP_KERNEL);
5292 if (!fspath || !buf) {
5293 ret = -ENOMEM;
5294 goto out;
5295 }
5296
5297 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
5298 if (ret < 0)
5299 goto out;
5300
5301 data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di);
5302 read_extent_buffer(leaf, buf, data_ptr, buf_len);
5303
5304 ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS,
5305 strlen(XATTR_NAME_CAPS), buf, buf_len);
5306 out:
5307 kfree(buf);
5308 fs_path_free(fspath);
5309 btrfs_free_path(path);
5310 return ret;
5311 }
5312
clone_range(struct send_ctx * sctx,struct clone_root * clone_root,const u64 disk_byte,u64 data_offset,u64 offset,u64 len)5313 static int clone_range(struct send_ctx *sctx,
5314 struct clone_root *clone_root,
5315 const u64 disk_byte,
5316 u64 data_offset,
5317 u64 offset,
5318 u64 len)
5319 {
5320 struct btrfs_path *path;
5321 struct btrfs_key key;
5322 int ret;
5323 u64 clone_src_i_size = 0;
5324
5325 /*
5326 * Prevent cloning from a zero offset with a length matching the sector
5327 * size because in some scenarios this will make the receiver fail.
5328 *
5329 * For example, if in the source filesystem the extent at offset 0
5330 * has a length of sectorsize and it was written using direct IO, then
5331 * it can never be an inline extent (even if compression is enabled).
5332 * Then this extent can be cloned in the original filesystem to a non
5333 * zero file offset, but it may not be possible to clone in the
5334 * destination filesystem because it can be inlined due to compression
5335 * on the destination filesystem (as the receiver's write operations are
5336 * always done using buffered IO). The same happens when the original
5337 * filesystem does not have compression enabled but the destination
5338 * filesystem has.
5339 */
5340 if (clone_root->offset == 0 &&
5341 len == sctx->send_root->fs_info->sectorsize)
5342 return send_extent_data(sctx, offset, len);
5343
5344 path = alloc_path_for_send();
5345 if (!path)
5346 return -ENOMEM;
5347
5348 /*
5349 * There are inodes that have extents that lie behind its i_size. Don't
5350 * accept clones from these extents.
5351 */
5352 ret = __get_inode_info(clone_root->root, path, clone_root->ino,
5353 &clone_src_i_size, NULL, NULL, NULL, NULL, NULL);
5354 btrfs_release_path(path);
5355 if (ret < 0)
5356 goto out;
5357
5358 /*
5359 * We can't send a clone operation for the entire range if we find
5360 * extent items in the respective range in the source file that
5361 * refer to different extents or if we find holes.
5362 * So check for that and do a mix of clone and regular write/copy
5363 * operations if needed.
5364 *
5365 * Example:
5366 *
5367 * mkfs.btrfs -f /dev/sda
5368 * mount /dev/sda /mnt
5369 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
5370 * cp --reflink=always /mnt/foo /mnt/bar
5371 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
5372 * btrfs subvolume snapshot -r /mnt /mnt/snap
5373 *
5374 * If when we send the snapshot and we are processing file bar (which
5375 * has a higher inode number than foo) we blindly send a clone operation
5376 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
5377 * a file bar that matches the content of file foo - iow, doesn't match
5378 * the content from bar in the original filesystem.
5379 */
5380 key.objectid = clone_root->ino;
5381 key.type = BTRFS_EXTENT_DATA_KEY;
5382 key.offset = clone_root->offset;
5383 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
5384 if (ret < 0)
5385 goto out;
5386 if (ret > 0 && path->slots[0] > 0) {
5387 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
5388 if (key.objectid == clone_root->ino &&
5389 key.type == BTRFS_EXTENT_DATA_KEY)
5390 path->slots[0]--;
5391 }
5392
5393 while (true) {
5394 struct extent_buffer *leaf = path->nodes[0];
5395 int slot = path->slots[0];
5396 struct btrfs_file_extent_item *ei;
5397 u8 type;
5398 u64 ext_len;
5399 u64 clone_len;
5400 u64 clone_data_offset;
5401 bool crossed_src_i_size = false;
5402
5403 if (slot >= btrfs_header_nritems(leaf)) {
5404 ret = btrfs_next_leaf(clone_root->root, path);
5405 if (ret < 0)
5406 goto out;
5407 else if (ret > 0)
5408 break;
5409 continue;
5410 }
5411
5412 btrfs_item_key_to_cpu(leaf, &key, slot);
5413
5414 /*
5415 * We might have an implicit trailing hole (NO_HOLES feature
5416 * enabled). We deal with it after leaving this loop.
5417 */
5418 if (key.objectid != clone_root->ino ||
5419 key.type != BTRFS_EXTENT_DATA_KEY)
5420 break;
5421
5422 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5423 type = btrfs_file_extent_type(leaf, ei);
5424 if (type == BTRFS_FILE_EXTENT_INLINE) {
5425 ext_len = btrfs_file_extent_ram_bytes(leaf, ei);
5426 ext_len = PAGE_ALIGN(ext_len);
5427 } else {
5428 ext_len = btrfs_file_extent_num_bytes(leaf, ei);
5429 }
5430
5431 if (key.offset + ext_len <= clone_root->offset)
5432 goto next;
5433
5434 if (key.offset > clone_root->offset) {
5435 /* Implicit hole, NO_HOLES feature enabled. */
5436 u64 hole_len = key.offset - clone_root->offset;
5437
5438 if (hole_len > len)
5439 hole_len = len;
5440 ret = send_extent_data(sctx, offset, hole_len);
5441 if (ret < 0)
5442 goto out;
5443
5444 len -= hole_len;
5445 if (len == 0)
5446 break;
5447 offset += hole_len;
5448 clone_root->offset += hole_len;
5449 data_offset += hole_len;
5450 }
5451
5452 if (key.offset >= clone_root->offset + len)
5453 break;
5454
5455 if (key.offset >= clone_src_i_size)
5456 break;
5457
5458 if (key.offset + ext_len > clone_src_i_size) {
5459 ext_len = clone_src_i_size - key.offset;
5460 crossed_src_i_size = true;
5461 }
5462
5463 clone_data_offset = btrfs_file_extent_offset(leaf, ei);
5464 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
5465 clone_root->offset = key.offset;
5466 if (clone_data_offset < data_offset &&
5467 clone_data_offset + ext_len > data_offset) {
5468 u64 extent_offset;
5469
5470 extent_offset = data_offset - clone_data_offset;
5471 ext_len -= extent_offset;
5472 clone_data_offset += extent_offset;
5473 clone_root->offset += extent_offset;
5474 }
5475 }
5476
5477 clone_len = min_t(u64, ext_len, len);
5478
5479 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
5480 clone_data_offset == data_offset) {
5481 const u64 src_end = clone_root->offset + clone_len;
5482 const u64 sectorsize = SZ_64K;
5483
5484 /*
5485 * We can't clone the last block, when its size is not
5486 * sector size aligned, into the middle of a file. If we
5487 * do so, the receiver will get a failure (-EINVAL) when
5488 * trying to clone or will silently corrupt the data in
5489 * the destination file if it's on a kernel without the
5490 * fix introduced by commit ac765f83f1397646
5491 * ("Btrfs: fix data corruption due to cloning of eof
5492 * block).
5493 *
5494 * So issue a clone of the aligned down range plus a
5495 * regular write for the eof block, if we hit that case.
5496 *
5497 * Also, we use the maximum possible sector size, 64K,
5498 * because we don't know what's the sector size of the
5499 * filesystem that receives the stream, so we have to
5500 * assume the largest possible sector size.
5501 */
5502 if (src_end == clone_src_i_size &&
5503 !IS_ALIGNED(src_end, sectorsize) &&
5504 offset + clone_len < sctx->cur_inode_size) {
5505 u64 slen;
5506
5507 slen = ALIGN_DOWN(src_end - clone_root->offset,
5508 sectorsize);
5509 if (slen > 0) {
5510 ret = send_clone(sctx, offset, slen,
5511 clone_root);
5512 if (ret < 0)
5513 goto out;
5514 }
5515 ret = send_extent_data(sctx, offset + slen,
5516 clone_len - slen);
5517 } else {
5518 ret = send_clone(sctx, offset, clone_len,
5519 clone_root);
5520 }
5521 } else if (crossed_src_i_size && clone_len < len) {
5522 /*
5523 * If we are at i_size of the clone source inode and we
5524 * can not clone from it, terminate the loop. This is
5525 * to avoid sending two write operations, one with a
5526 * length matching clone_len and the final one after
5527 * this loop with a length of len - clone_len.
5528 *
5529 * When using encoded writes (BTRFS_SEND_FLAG_COMPRESSED
5530 * was passed to the send ioctl), this helps avoid
5531 * sending an encoded write for an offset that is not
5532 * sector size aligned, in case the i_size of the source
5533 * inode is not sector size aligned. That will make the
5534 * receiver fallback to decompression of the data and
5535 * writing it using regular buffered IO, therefore while
5536 * not incorrect, it's not optimal due decompression and
5537 * possible re-compression at the receiver.
5538 */
5539 break;
5540 } else {
5541 ret = send_extent_data(sctx, offset, clone_len);
5542 }
5543
5544 if (ret < 0)
5545 goto out;
5546
5547 len -= clone_len;
5548 if (len == 0)
5549 break;
5550 offset += clone_len;
5551 clone_root->offset += clone_len;
5552
5553 /*
5554 * If we are cloning from the file we are currently processing,
5555 * and using the send root as the clone root, we must stop once
5556 * the current clone offset reaches the current eof of the file
5557 * at the receiver, otherwise we would issue an invalid clone
5558 * operation (source range going beyond eof) and cause the
5559 * receiver to fail. So if we reach the current eof, bail out
5560 * and fallback to a regular write.
5561 */
5562 if (clone_root->root == sctx->send_root &&
5563 clone_root->ino == sctx->cur_ino &&
5564 clone_root->offset >= sctx->cur_inode_next_write_offset)
5565 break;
5566
5567 data_offset += clone_len;
5568 next:
5569 path->slots[0]++;
5570 }
5571
5572 if (len > 0)
5573 ret = send_extent_data(sctx, offset, len);
5574 else
5575 ret = 0;
5576 out:
5577 btrfs_free_path(path);
5578 return ret;
5579 }
5580
send_write_or_clone(struct send_ctx * sctx,struct btrfs_path * path,struct btrfs_key * key,struct clone_root * clone_root)5581 static int send_write_or_clone(struct send_ctx *sctx,
5582 struct btrfs_path *path,
5583 struct btrfs_key *key,
5584 struct clone_root *clone_root)
5585 {
5586 int ret = 0;
5587 u64 offset = key->offset;
5588 u64 end;
5589 u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
5590
5591 end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size);
5592 if (offset >= end)
5593 return 0;
5594
5595 if (clone_root && IS_ALIGNED(end, bs)) {
5596 struct btrfs_file_extent_item *ei;
5597 u64 disk_byte;
5598 u64 data_offset;
5599
5600 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5601 struct btrfs_file_extent_item);
5602 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
5603 data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
5604 ret = clone_range(sctx, clone_root, disk_byte, data_offset,
5605 offset, end - offset);
5606 } else {
5607 ret = send_extent_data(sctx, offset, end - offset);
5608 }
5609 sctx->cur_inode_next_write_offset = end;
5610 return ret;
5611 }
5612
is_extent_unchanged(struct send_ctx * sctx,struct btrfs_path * left_path,struct btrfs_key * ekey)5613 static int is_extent_unchanged(struct send_ctx *sctx,
5614 struct btrfs_path *left_path,
5615 struct btrfs_key *ekey)
5616 {
5617 int ret = 0;
5618 struct btrfs_key key;
5619 struct btrfs_path *path = NULL;
5620 struct extent_buffer *eb;
5621 int slot;
5622 struct btrfs_key found_key;
5623 struct btrfs_file_extent_item *ei;
5624 u64 left_disknr;
5625 u64 right_disknr;
5626 u64 left_offset;
5627 u64 right_offset;
5628 u64 left_offset_fixed;
5629 u64 left_len;
5630 u64 right_len;
5631 u64 left_gen;
5632 u64 right_gen;
5633 u8 left_type;
5634 u8 right_type;
5635
5636 path = alloc_path_for_send();
5637 if (!path)
5638 return -ENOMEM;
5639
5640 eb = left_path->nodes[0];
5641 slot = left_path->slots[0];
5642 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5643 left_type = btrfs_file_extent_type(eb, ei);
5644
5645 if (left_type != BTRFS_FILE_EXTENT_REG) {
5646 ret = 0;
5647 goto out;
5648 }
5649 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5650 left_len = btrfs_file_extent_num_bytes(eb, ei);
5651 left_offset = btrfs_file_extent_offset(eb, ei);
5652 left_gen = btrfs_file_extent_generation(eb, ei);
5653
5654 /*
5655 * Following comments will refer to these graphics. L is the left
5656 * extents which we are checking at the moment. 1-8 are the right
5657 * extents that we iterate.
5658 *
5659 * |-----L-----|
5660 * |-1-|-2a-|-3-|-4-|-5-|-6-|
5661 *
5662 * |-----L-----|
5663 * |--1--|-2b-|...(same as above)
5664 *
5665 * Alternative situation. Happens on files where extents got split.
5666 * |-----L-----|
5667 * |-----------7-----------|-6-|
5668 *
5669 * Alternative situation. Happens on files which got larger.
5670 * |-----L-----|
5671 * |-8-|
5672 * Nothing follows after 8.
5673 */
5674
5675 key.objectid = ekey->objectid;
5676 key.type = BTRFS_EXTENT_DATA_KEY;
5677 key.offset = ekey->offset;
5678 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
5679 if (ret < 0)
5680 goto out;
5681 if (ret) {
5682 ret = 0;
5683 goto out;
5684 }
5685
5686 /*
5687 * Handle special case where the right side has no extents at all.
5688 */
5689 eb = path->nodes[0];
5690 slot = path->slots[0];
5691 btrfs_item_key_to_cpu(eb, &found_key, slot);
5692 if (found_key.objectid != key.objectid ||
5693 found_key.type != key.type) {
5694 /* If we're a hole then just pretend nothing changed */
5695 ret = (left_disknr) ? 0 : 1;
5696 goto out;
5697 }
5698
5699 /*
5700 * We're now on 2a, 2b or 7.
5701 */
5702 key = found_key;
5703 while (key.offset < ekey->offset + left_len) {
5704 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5705 right_type = btrfs_file_extent_type(eb, ei);
5706 if (right_type != BTRFS_FILE_EXTENT_REG &&
5707 right_type != BTRFS_FILE_EXTENT_INLINE) {
5708 ret = 0;
5709 goto out;
5710 }
5711
5712 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5713 right_len = btrfs_file_extent_ram_bytes(eb, ei);
5714 right_len = PAGE_ALIGN(right_len);
5715 } else {
5716 right_len = btrfs_file_extent_num_bytes(eb, ei);
5717 }
5718
5719 /*
5720 * Are we at extent 8? If yes, we know the extent is changed.
5721 * This may only happen on the first iteration.
5722 */
5723 if (found_key.offset + right_len <= ekey->offset) {
5724 /* If we're a hole just pretend nothing changed */
5725 ret = (left_disknr) ? 0 : 1;
5726 goto out;
5727 }
5728
5729 /*
5730 * We just wanted to see if when we have an inline extent, what
5731 * follows it is a regular extent (wanted to check the above
5732 * condition for inline extents too). This should normally not
5733 * happen but it's possible for example when we have an inline
5734 * compressed extent representing data with a size matching
5735 * the page size (currently the same as sector size).
5736 */
5737 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5738 ret = 0;
5739 goto out;
5740 }
5741
5742 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5743 right_offset = btrfs_file_extent_offset(eb, ei);
5744 right_gen = btrfs_file_extent_generation(eb, ei);
5745
5746 left_offset_fixed = left_offset;
5747 if (key.offset < ekey->offset) {
5748 /* Fix the right offset for 2a and 7. */
5749 right_offset += ekey->offset - key.offset;
5750 } else {
5751 /* Fix the left offset for all behind 2a and 2b */
5752 left_offset_fixed += key.offset - ekey->offset;
5753 }
5754
5755 /*
5756 * Check if we have the same extent.
5757 */
5758 if (left_disknr != right_disknr ||
5759 left_offset_fixed != right_offset ||
5760 left_gen != right_gen) {
5761 ret = 0;
5762 goto out;
5763 }
5764
5765 /*
5766 * Go to the next extent.
5767 */
5768 ret = btrfs_next_item(sctx->parent_root, path);
5769 if (ret < 0)
5770 goto out;
5771 if (!ret) {
5772 eb = path->nodes[0];
5773 slot = path->slots[0];
5774 btrfs_item_key_to_cpu(eb, &found_key, slot);
5775 }
5776 if (ret || found_key.objectid != key.objectid ||
5777 found_key.type != key.type) {
5778 key.offset += right_len;
5779 break;
5780 }
5781 if (found_key.offset != key.offset + right_len) {
5782 ret = 0;
5783 goto out;
5784 }
5785 key = found_key;
5786 }
5787
5788 /*
5789 * We're now behind the left extent (treat as unchanged) or at the end
5790 * of the right side (treat as changed).
5791 */
5792 if (key.offset >= ekey->offset + left_len)
5793 ret = 1;
5794 else
5795 ret = 0;
5796
5797
5798 out:
5799 btrfs_free_path(path);
5800 return ret;
5801 }
5802
get_last_extent(struct send_ctx * sctx,u64 offset)5803 static int get_last_extent(struct send_ctx *sctx, u64 offset)
5804 {
5805 struct btrfs_path *path;
5806 struct btrfs_root *root = sctx->send_root;
5807 struct btrfs_key key;
5808 int ret;
5809
5810 path = alloc_path_for_send();
5811 if (!path)
5812 return -ENOMEM;
5813
5814 sctx->cur_inode_last_extent = 0;
5815
5816 key.objectid = sctx->cur_ino;
5817 key.type = BTRFS_EXTENT_DATA_KEY;
5818 key.offset = offset;
5819 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
5820 if (ret < 0)
5821 goto out;
5822 ret = 0;
5823 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5824 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
5825 goto out;
5826
5827 sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
5828 out:
5829 btrfs_free_path(path);
5830 return ret;
5831 }
5832
range_is_hole_in_parent(struct send_ctx * sctx,const u64 start,const u64 end)5833 static int range_is_hole_in_parent(struct send_ctx *sctx,
5834 const u64 start,
5835 const u64 end)
5836 {
5837 struct btrfs_path *path;
5838 struct btrfs_key key;
5839 struct btrfs_root *root = sctx->parent_root;
5840 u64 search_start = start;
5841 int ret;
5842
5843 path = alloc_path_for_send();
5844 if (!path)
5845 return -ENOMEM;
5846
5847 key.objectid = sctx->cur_ino;
5848 key.type = BTRFS_EXTENT_DATA_KEY;
5849 key.offset = search_start;
5850 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5851 if (ret < 0)
5852 goto out;
5853 if (ret > 0 && path->slots[0] > 0)
5854 path->slots[0]--;
5855
5856 while (search_start < end) {
5857 struct extent_buffer *leaf = path->nodes[0];
5858 int slot = path->slots[0];
5859 struct btrfs_file_extent_item *fi;
5860 u64 extent_end;
5861
5862 if (slot >= btrfs_header_nritems(leaf)) {
5863 ret = btrfs_next_leaf(root, path);
5864 if (ret < 0)
5865 goto out;
5866 else if (ret > 0)
5867 break;
5868 continue;
5869 }
5870
5871 btrfs_item_key_to_cpu(leaf, &key, slot);
5872 if (key.objectid < sctx->cur_ino ||
5873 key.type < BTRFS_EXTENT_DATA_KEY)
5874 goto next;
5875 if (key.objectid > sctx->cur_ino ||
5876 key.type > BTRFS_EXTENT_DATA_KEY ||
5877 key.offset >= end)
5878 break;
5879
5880 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5881 extent_end = btrfs_file_extent_end(path);
5882 if (extent_end <= start)
5883 goto next;
5884 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
5885 search_start = extent_end;
5886 goto next;
5887 }
5888 ret = 0;
5889 goto out;
5890 next:
5891 path->slots[0]++;
5892 }
5893 ret = 1;
5894 out:
5895 btrfs_free_path(path);
5896 return ret;
5897 }
5898
maybe_send_hole(struct send_ctx * sctx,struct btrfs_path * path,struct btrfs_key * key)5899 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
5900 struct btrfs_key *key)
5901 {
5902 int ret = 0;
5903
5904 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
5905 return 0;
5906
5907 if (sctx->cur_inode_last_extent == (u64)-1) {
5908 ret = get_last_extent(sctx, key->offset - 1);
5909 if (ret)
5910 return ret;
5911 }
5912
5913 if (path->slots[0] == 0 &&
5914 sctx->cur_inode_last_extent < key->offset) {
5915 /*
5916 * We might have skipped entire leafs that contained only
5917 * file extent items for our current inode. These leafs have
5918 * a generation number smaller (older) than the one in the
5919 * current leaf and the leaf our last extent came from, and
5920 * are located between these 2 leafs.
5921 */
5922 ret = get_last_extent(sctx, key->offset - 1);
5923 if (ret)
5924 return ret;
5925 }
5926
5927 if (sctx->cur_inode_last_extent < key->offset) {
5928 ret = range_is_hole_in_parent(sctx,
5929 sctx->cur_inode_last_extent,
5930 key->offset);
5931 if (ret < 0)
5932 return ret;
5933 else if (ret == 0)
5934 ret = send_hole(sctx, key->offset);
5935 else
5936 ret = 0;
5937 }
5938 sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
5939 return ret;
5940 }
5941
process_extent(struct send_ctx * sctx,struct btrfs_path * path,struct btrfs_key * key)5942 static int process_extent(struct send_ctx *sctx,
5943 struct btrfs_path *path,
5944 struct btrfs_key *key)
5945 {
5946 struct clone_root *found_clone = NULL;
5947 int ret = 0;
5948
5949 if (S_ISLNK(sctx->cur_inode_mode))
5950 return 0;
5951
5952 if (sctx->parent_root && !sctx->cur_inode_new) {
5953 ret = is_extent_unchanged(sctx, path, key);
5954 if (ret < 0)
5955 goto out;
5956 if (ret) {
5957 ret = 0;
5958 goto out_hole;
5959 }
5960 } else {
5961 struct btrfs_file_extent_item *ei;
5962 u8 type;
5963
5964 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5965 struct btrfs_file_extent_item);
5966 type = btrfs_file_extent_type(path->nodes[0], ei);
5967 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
5968 type == BTRFS_FILE_EXTENT_REG) {
5969 /*
5970 * The send spec does not have a prealloc command yet,
5971 * so just leave a hole for prealloc'ed extents until
5972 * we have enough commands queued up to justify rev'ing
5973 * the send spec.
5974 */
5975 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
5976 ret = 0;
5977 goto out;
5978 }
5979
5980 /* Have a hole, just skip it. */
5981 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
5982 ret = 0;
5983 goto out;
5984 }
5985 }
5986 }
5987
5988 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
5989 sctx->cur_inode_size, &found_clone);
5990 if (ret != -ENOENT && ret < 0)
5991 goto out;
5992
5993 ret = send_write_or_clone(sctx, path, key, found_clone);
5994 if (ret)
5995 goto out;
5996 out_hole:
5997 ret = maybe_send_hole(sctx, path, key);
5998 out:
5999 return ret;
6000 }
6001
process_all_extents(struct send_ctx * sctx)6002 static int process_all_extents(struct send_ctx *sctx)
6003 {
6004 int ret;
6005 struct btrfs_root *root;
6006 struct btrfs_path *path;
6007 struct btrfs_key key;
6008 struct btrfs_key found_key;
6009 struct extent_buffer *eb;
6010 int slot;
6011
6012 root = sctx->send_root;
6013 path = alloc_path_for_send();
6014 if (!path)
6015 return -ENOMEM;
6016
6017 key.objectid = sctx->cmp_key->objectid;
6018 key.type = BTRFS_EXTENT_DATA_KEY;
6019 key.offset = 0;
6020 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6021 if (ret < 0)
6022 goto out;
6023
6024 while (1) {
6025 eb = path->nodes[0];
6026 slot = path->slots[0];
6027
6028 if (slot >= btrfs_header_nritems(eb)) {
6029 ret = btrfs_next_leaf(root, path);
6030 if (ret < 0) {
6031 goto out;
6032 } else if (ret > 0) {
6033 ret = 0;
6034 break;
6035 }
6036 continue;
6037 }
6038
6039 btrfs_item_key_to_cpu(eb, &found_key, slot);
6040
6041 if (found_key.objectid != key.objectid ||
6042 found_key.type != key.type) {
6043 ret = 0;
6044 goto out;
6045 }
6046
6047 ret = process_extent(sctx, path, &found_key);
6048 if (ret < 0)
6049 goto out;
6050
6051 path->slots[0]++;
6052 }
6053
6054 out:
6055 btrfs_free_path(path);
6056 return ret;
6057 }
6058
process_recorded_refs_if_needed(struct send_ctx * sctx,int at_end,int * pending_move,int * refs_processed)6059 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
6060 int *pending_move,
6061 int *refs_processed)
6062 {
6063 int ret = 0;
6064
6065 if (sctx->cur_ino == 0)
6066 goto out;
6067 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
6068 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
6069 goto out;
6070 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
6071 goto out;
6072
6073 ret = process_recorded_refs(sctx, pending_move);
6074 if (ret < 0)
6075 goto out;
6076
6077 *refs_processed = 1;
6078 out:
6079 return ret;
6080 }
6081
finish_inode_if_needed(struct send_ctx * sctx,int at_end)6082 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
6083 {
6084 int ret = 0;
6085 u64 left_mode;
6086 u64 left_uid;
6087 u64 left_gid;
6088 u64 right_mode;
6089 u64 right_uid;
6090 u64 right_gid;
6091 int need_chmod = 0;
6092 int need_chown = 0;
6093 int need_truncate = 1;
6094 int pending_move = 0;
6095 int refs_processed = 0;
6096
6097 if (sctx->ignore_cur_inode)
6098 return 0;
6099
6100 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
6101 &refs_processed);
6102 if (ret < 0)
6103 goto out;
6104
6105 /*
6106 * We have processed the refs and thus need to advance send_progress.
6107 * Now, calls to get_cur_xxx will take the updated refs of the current
6108 * inode into account.
6109 *
6110 * On the other hand, if our current inode is a directory and couldn't
6111 * be moved/renamed because its parent was renamed/moved too and it has
6112 * a higher inode number, we can only move/rename our current inode
6113 * after we moved/renamed its parent. Therefore in this case operate on
6114 * the old path (pre move/rename) of our current inode, and the
6115 * move/rename will be performed later.
6116 */
6117 if (refs_processed && !pending_move)
6118 sctx->send_progress = sctx->cur_ino + 1;
6119
6120 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
6121 goto out;
6122 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
6123 goto out;
6124
6125 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
6126 &left_mode, &left_uid, &left_gid, NULL);
6127 if (ret < 0)
6128 goto out;
6129
6130 if (!sctx->parent_root || sctx->cur_inode_new) {
6131 need_chown = 1;
6132 if (!S_ISLNK(sctx->cur_inode_mode))
6133 need_chmod = 1;
6134 if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size)
6135 need_truncate = 0;
6136 } else {
6137 u64 old_size;
6138
6139 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
6140 &old_size, NULL, &right_mode, &right_uid,
6141 &right_gid, NULL);
6142 if (ret < 0)
6143 goto out;
6144
6145 if (left_uid != right_uid || left_gid != right_gid)
6146 need_chown = 1;
6147 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
6148 need_chmod = 1;
6149 if ((old_size == sctx->cur_inode_size) ||
6150 (sctx->cur_inode_size > old_size &&
6151 sctx->cur_inode_next_write_offset == sctx->cur_inode_size))
6152 need_truncate = 0;
6153 }
6154
6155 if (S_ISREG(sctx->cur_inode_mode)) {
6156 if (need_send_hole(sctx)) {
6157 if (sctx->cur_inode_last_extent == (u64)-1 ||
6158 sctx->cur_inode_last_extent <
6159 sctx->cur_inode_size) {
6160 ret = get_last_extent(sctx, (u64)-1);
6161 if (ret)
6162 goto out;
6163 }
6164 if (sctx->cur_inode_last_extent <
6165 sctx->cur_inode_size) {
6166 ret = send_hole(sctx, sctx->cur_inode_size);
6167 if (ret)
6168 goto out;
6169 }
6170 }
6171 if (need_truncate) {
6172 ret = send_truncate(sctx, sctx->cur_ino,
6173 sctx->cur_inode_gen,
6174 sctx->cur_inode_size);
6175 if (ret < 0)
6176 goto out;
6177 }
6178 }
6179
6180 if (need_chown) {
6181 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6182 left_uid, left_gid);
6183 if (ret < 0)
6184 goto out;
6185 }
6186 if (need_chmod) {
6187 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6188 left_mode);
6189 if (ret < 0)
6190 goto out;
6191 }
6192
6193 ret = send_capabilities(sctx);
6194 if (ret < 0)
6195 goto out;
6196
6197 /*
6198 * If other directory inodes depended on our current directory
6199 * inode's move/rename, now do their move/rename operations.
6200 */
6201 if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
6202 ret = apply_children_dir_moves(sctx);
6203 if (ret)
6204 goto out;
6205 /*
6206 * Need to send that every time, no matter if it actually
6207 * changed between the two trees as we have done changes to
6208 * the inode before. If our inode is a directory and it's
6209 * waiting to be moved/renamed, we will send its utimes when
6210 * it's moved/renamed, therefore we don't need to do it here.
6211 */
6212 sctx->send_progress = sctx->cur_ino + 1;
6213 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
6214 if (ret < 0)
6215 goto out;
6216 }
6217
6218 out:
6219 return ret;
6220 }
6221
6222 struct parent_paths_ctx {
6223 struct list_head *refs;
6224 struct send_ctx *sctx;
6225 };
6226
record_parent_ref(int num,u64 dir,int index,struct fs_path * name,void * ctx)6227 static int record_parent_ref(int num, u64 dir, int index, struct fs_path *name,
6228 void *ctx)
6229 {
6230 struct parent_paths_ctx *ppctx = ctx;
6231
6232 return record_ref(ppctx->sctx->parent_root, dir, name, ppctx->sctx,
6233 ppctx->refs);
6234 }
6235
6236 /*
6237 * Issue unlink operations for all paths of the current inode found in the
6238 * parent snapshot.
6239 */
btrfs_unlink_all_paths(struct send_ctx * sctx)6240 static int btrfs_unlink_all_paths(struct send_ctx *sctx)
6241 {
6242 LIST_HEAD(deleted_refs);
6243 struct btrfs_path *path;
6244 struct btrfs_key key;
6245 struct parent_paths_ctx ctx;
6246 int ret;
6247
6248 path = alloc_path_for_send();
6249 if (!path)
6250 return -ENOMEM;
6251
6252 key.objectid = sctx->cur_ino;
6253 key.type = BTRFS_INODE_REF_KEY;
6254 key.offset = 0;
6255 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
6256 if (ret < 0)
6257 goto out;
6258
6259 ctx.refs = &deleted_refs;
6260 ctx.sctx = sctx;
6261
6262 while (true) {
6263 struct extent_buffer *eb = path->nodes[0];
6264 int slot = path->slots[0];
6265
6266 if (slot >= btrfs_header_nritems(eb)) {
6267 ret = btrfs_next_leaf(sctx->parent_root, path);
6268 if (ret < 0)
6269 goto out;
6270 else if (ret > 0)
6271 break;
6272 continue;
6273 }
6274
6275 btrfs_item_key_to_cpu(eb, &key, slot);
6276 if (key.objectid != sctx->cur_ino)
6277 break;
6278 if (key.type != BTRFS_INODE_REF_KEY &&
6279 key.type != BTRFS_INODE_EXTREF_KEY)
6280 break;
6281
6282 ret = iterate_inode_ref(sctx->parent_root, path, &key, 1,
6283 record_parent_ref, &ctx);
6284 if (ret < 0)
6285 goto out;
6286
6287 path->slots[0]++;
6288 }
6289
6290 while (!list_empty(&deleted_refs)) {
6291 struct recorded_ref *ref;
6292
6293 ref = list_first_entry(&deleted_refs, struct recorded_ref, list);
6294 ret = send_unlink(sctx, ref->full_path);
6295 if (ret < 0)
6296 goto out;
6297 fs_path_free(ref->full_path);
6298 list_del(&ref->list);
6299 kfree(ref);
6300 }
6301 ret = 0;
6302 out:
6303 btrfs_free_path(path);
6304 if (ret)
6305 __free_recorded_refs(&deleted_refs);
6306 return ret;
6307 }
6308
changed_inode(struct send_ctx * sctx,enum btrfs_compare_tree_result result)6309 static int changed_inode(struct send_ctx *sctx,
6310 enum btrfs_compare_tree_result result)
6311 {
6312 int ret = 0;
6313 struct btrfs_key *key = sctx->cmp_key;
6314 struct btrfs_inode_item *left_ii = NULL;
6315 struct btrfs_inode_item *right_ii = NULL;
6316 u64 left_gen = 0;
6317 u64 right_gen = 0;
6318
6319 sctx->cur_ino = key->objectid;
6320 sctx->cur_inode_new_gen = 0;
6321 sctx->cur_inode_last_extent = (u64)-1;
6322 sctx->cur_inode_next_write_offset = 0;
6323 sctx->ignore_cur_inode = false;
6324
6325 /*
6326 * Set send_progress to current inode. This will tell all get_cur_xxx
6327 * functions that the current inode's refs are not updated yet. Later,
6328 * when process_recorded_refs is finished, it is set to cur_ino + 1.
6329 */
6330 sctx->send_progress = sctx->cur_ino;
6331
6332 if (result == BTRFS_COMPARE_TREE_NEW ||
6333 result == BTRFS_COMPARE_TREE_CHANGED) {
6334 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
6335 sctx->left_path->slots[0],
6336 struct btrfs_inode_item);
6337 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
6338 left_ii);
6339 } else {
6340 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6341 sctx->right_path->slots[0],
6342 struct btrfs_inode_item);
6343 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6344 right_ii);
6345 }
6346 if (result == BTRFS_COMPARE_TREE_CHANGED) {
6347 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6348 sctx->right_path->slots[0],
6349 struct btrfs_inode_item);
6350
6351 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6352 right_ii);
6353
6354 /*
6355 * The cur_ino = root dir case is special here. We can't treat
6356 * the inode as deleted+reused because it would generate a
6357 * stream that tries to delete/mkdir the root dir.
6358 */
6359 if (left_gen != right_gen &&
6360 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6361 sctx->cur_inode_new_gen = 1;
6362 }
6363
6364 /*
6365 * Normally we do not find inodes with a link count of zero (orphans)
6366 * because the most common case is to create a snapshot and use it
6367 * for a send operation. However other less common use cases involve
6368 * using a subvolume and send it after turning it to RO mode just
6369 * after deleting all hard links of a file while holding an open
6370 * file descriptor against it or turning a RO snapshot into RW mode,
6371 * keep an open file descriptor against a file, delete it and then
6372 * turn the snapshot back to RO mode before using it for a send
6373 * operation. So if we find such cases, ignore the inode and all its
6374 * items completely if it's a new inode, or if it's a changed inode
6375 * make sure all its previous paths (from the parent snapshot) are all
6376 * unlinked and all other the inode items are ignored.
6377 */
6378 if (result == BTRFS_COMPARE_TREE_NEW ||
6379 result == BTRFS_COMPARE_TREE_CHANGED) {
6380 u32 nlinks;
6381
6382 nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
6383 if (nlinks == 0) {
6384 sctx->ignore_cur_inode = true;
6385 if (result == BTRFS_COMPARE_TREE_CHANGED)
6386 ret = btrfs_unlink_all_paths(sctx);
6387 goto out;
6388 }
6389 }
6390
6391 if (result == BTRFS_COMPARE_TREE_NEW) {
6392 sctx->cur_inode_gen = left_gen;
6393 sctx->cur_inode_new = 1;
6394 sctx->cur_inode_deleted = 0;
6395 sctx->cur_inode_size = btrfs_inode_size(
6396 sctx->left_path->nodes[0], left_ii);
6397 sctx->cur_inode_mode = btrfs_inode_mode(
6398 sctx->left_path->nodes[0], left_ii);
6399 sctx->cur_inode_rdev = btrfs_inode_rdev(
6400 sctx->left_path->nodes[0], left_ii);
6401 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6402 ret = send_create_inode_if_needed(sctx);
6403 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
6404 sctx->cur_inode_gen = right_gen;
6405 sctx->cur_inode_new = 0;
6406 sctx->cur_inode_deleted = 1;
6407 sctx->cur_inode_size = btrfs_inode_size(
6408 sctx->right_path->nodes[0], right_ii);
6409 sctx->cur_inode_mode = btrfs_inode_mode(
6410 sctx->right_path->nodes[0], right_ii);
6411 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
6412 /*
6413 * We need to do some special handling in case the inode was
6414 * reported as changed with a changed generation number. This
6415 * means that the original inode was deleted and new inode
6416 * reused the same inum. So we have to treat the old inode as
6417 * deleted and the new one as new.
6418 */
6419 if (sctx->cur_inode_new_gen) {
6420 /*
6421 * First, process the inode as if it was deleted.
6422 */
6423 sctx->cur_inode_gen = right_gen;
6424 sctx->cur_inode_new = 0;
6425 sctx->cur_inode_deleted = 1;
6426 sctx->cur_inode_size = btrfs_inode_size(
6427 sctx->right_path->nodes[0], right_ii);
6428 sctx->cur_inode_mode = btrfs_inode_mode(
6429 sctx->right_path->nodes[0], right_ii);
6430 ret = process_all_refs(sctx,
6431 BTRFS_COMPARE_TREE_DELETED);
6432 if (ret < 0)
6433 goto out;
6434
6435 /*
6436 * Now process the inode as if it was new.
6437 */
6438 sctx->cur_inode_gen = left_gen;
6439 sctx->cur_inode_new = 1;
6440 sctx->cur_inode_deleted = 0;
6441 sctx->cur_inode_size = btrfs_inode_size(
6442 sctx->left_path->nodes[0], left_ii);
6443 sctx->cur_inode_mode = btrfs_inode_mode(
6444 sctx->left_path->nodes[0], left_ii);
6445 sctx->cur_inode_rdev = btrfs_inode_rdev(
6446 sctx->left_path->nodes[0], left_ii);
6447 ret = send_create_inode_if_needed(sctx);
6448 if (ret < 0)
6449 goto out;
6450
6451 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
6452 if (ret < 0)
6453 goto out;
6454 /*
6455 * Advance send_progress now as we did not get into
6456 * process_recorded_refs_if_needed in the new_gen case.
6457 */
6458 sctx->send_progress = sctx->cur_ino + 1;
6459
6460 /*
6461 * Now process all extents and xattrs of the inode as if
6462 * they were all new.
6463 */
6464 ret = process_all_extents(sctx);
6465 if (ret < 0)
6466 goto out;
6467 ret = process_all_new_xattrs(sctx);
6468 if (ret < 0)
6469 goto out;
6470 } else {
6471 sctx->cur_inode_gen = left_gen;
6472 sctx->cur_inode_new = 0;
6473 sctx->cur_inode_new_gen = 0;
6474 sctx->cur_inode_deleted = 0;
6475 sctx->cur_inode_size = btrfs_inode_size(
6476 sctx->left_path->nodes[0], left_ii);
6477 sctx->cur_inode_mode = btrfs_inode_mode(
6478 sctx->left_path->nodes[0], left_ii);
6479 }
6480 }
6481
6482 out:
6483 return ret;
6484 }
6485
6486 /*
6487 * We have to process new refs before deleted refs, but compare_trees gives us
6488 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
6489 * first and later process them in process_recorded_refs.
6490 * For the cur_inode_new_gen case, we skip recording completely because
6491 * changed_inode did already initiate processing of refs. The reason for this is
6492 * that in this case, compare_tree actually compares the refs of 2 different
6493 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
6494 * refs of the right tree as deleted and all refs of the left tree as new.
6495 */
changed_ref(struct send_ctx * sctx,enum btrfs_compare_tree_result result)6496 static int changed_ref(struct send_ctx *sctx,
6497 enum btrfs_compare_tree_result result)
6498 {
6499 int ret = 0;
6500
6501 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6502 inconsistent_snapshot_error(sctx, result, "reference");
6503 return -EIO;
6504 }
6505
6506 if (!sctx->cur_inode_new_gen &&
6507 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
6508 if (result == BTRFS_COMPARE_TREE_NEW)
6509 ret = record_new_ref(sctx);
6510 else if (result == BTRFS_COMPARE_TREE_DELETED)
6511 ret = record_deleted_ref(sctx);
6512 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6513 ret = record_changed_ref(sctx);
6514 }
6515
6516 return ret;
6517 }
6518
6519 /*
6520 * Process new/deleted/changed xattrs. We skip processing in the
6521 * cur_inode_new_gen case because changed_inode did already initiate processing
6522 * of xattrs. The reason is the same as in changed_ref
6523 */
changed_xattr(struct send_ctx * sctx,enum btrfs_compare_tree_result result)6524 static int changed_xattr(struct send_ctx *sctx,
6525 enum btrfs_compare_tree_result result)
6526 {
6527 int ret = 0;
6528
6529 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6530 inconsistent_snapshot_error(sctx, result, "xattr");
6531 return -EIO;
6532 }
6533
6534 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6535 if (result == BTRFS_COMPARE_TREE_NEW)
6536 ret = process_new_xattr(sctx);
6537 else if (result == BTRFS_COMPARE_TREE_DELETED)
6538 ret = process_deleted_xattr(sctx);
6539 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6540 ret = process_changed_xattr(sctx);
6541 }
6542
6543 return ret;
6544 }
6545
6546 /*
6547 * Process new/deleted/changed extents. We skip processing in the
6548 * cur_inode_new_gen case because changed_inode did already initiate processing
6549 * of extents. The reason is the same as in changed_ref
6550 */
changed_extent(struct send_ctx * sctx,enum btrfs_compare_tree_result result)6551 static int changed_extent(struct send_ctx *sctx,
6552 enum btrfs_compare_tree_result result)
6553 {
6554 int ret = 0;
6555
6556 /*
6557 * We have found an extent item that changed without the inode item
6558 * having changed. This can happen either after relocation (where the
6559 * disk_bytenr of an extent item is replaced at
6560 * relocation.c:replace_file_extents()) or after deduplication into a
6561 * file in both the parent and send snapshots (where an extent item can
6562 * get modified or replaced with a new one). Note that deduplication
6563 * updates the inode item, but it only changes the iversion (sequence
6564 * field in the inode item) of the inode, so if a file is deduplicated
6565 * the same amount of times in both the parent and send snapshots, its
6566 * iversion becomes the same in both snapshots, whence the inode item is
6567 * the same on both snapshots.
6568 */
6569 if (sctx->cur_ino != sctx->cmp_key->objectid)
6570 return 0;
6571
6572 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6573 if (result != BTRFS_COMPARE_TREE_DELETED)
6574 ret = process_extent(sctx, sctx->left_path,
6575 sctx->cmp_key);
6576 }
6577
6578 return ret;
6579 }
6580
dir_changed(struct send_ctx * sctx,u64 dir)6581 static int dir_changed(struct send_ctx *sctx, u64 dir)
6582 {
6583 u64 orig_gen, new_gen;
6584 int ret;
6585
6586 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
6587 NULL, NULL);
6588 if (ret)
6589 return ret;
6590
6591 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
6592 NULL, NULL, NULL);
6593 if (ret)
6594 return ret;
6595
6596 return (orig_gen != new_gen) ? 1 : 0;
6597 }
6598
compare_refs(struct send_ctx * sctx,struct btrfs_path * path,struct btrfs_key * key)6599 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
6600 struct btrfs_key *key)
6601 {
6602 struct btrfs_inode_extref *extref;
6603 struct extent_buffer *leaf;
6604 u64 dirid = 0, last_dirid = 0;
6605 unsigned long ptr;
6606 u32 item_size;
6607 u32 cur_offset = 0;
6608 int ref_name_len;
6609 int ret = 0;
6610
6611 /* Easy case, just check this one dirid */
6612 if (key->type == BTRFS_INODE_REF_KEY) {
6613 dirid = key->offset;
6614
6615 ret = dir_changed(sctx, dirid);
6616 goto out;
6617 }
6618
6619 leaf = path->nodes[0];
6620 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
6621 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
6622 while (cur_offset < item_size) {
6623 extref = (struct btrfs_inode_extref *)(ptr +
6624 cur_offset);
6625 dirid = btrfs_inode_extref_parent(leaf, extref);
6626 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
6627 cur_offset += ref_name_len + sizeof(*extref);
6628 if (dirid == last_dirid)
6629 continue;
6630 ret = dir_changed(sctx, dirid);
6631 if (ret)
6632 break;
6633 last_dirid = dirid;
6634 }
6635 out:
6636 return ret;
6637 }
6638
6639 /*
6640 * Updates compare related fields in sctx and simply forwards to the actual
6641 * changed_xxx functions.
6642 */
changed_cb(struct btrfs_path * left_path,struct btrfs_path * right_path,struct btrfs_key * key,enum btrfs_compare_tree_result result,struct send_ctx * sctx)6643 static int changed_cb(struct btrfs_path *left_path,
6644 struct btrfs_path *right_path,
6645 struct btrfs_key *key,
6646 enum btrfs_compare_tree_result result,
6647 struct send_ctx *sctx)
6648 {
6649 int ret = 0;
6650
6651 /*
6652 * We can not hold the commit root semaphore here. This is because in
6653 * the case of sending and receiving to the same filesystem, using a
6654 * pipe, could result in a deadlock:
6655 *
6656 * 1) The task running send blocks on the pipe because it's full;
6657 *
6658 * 2) The task running receive, which is the only consumer of the pipe,
6659 * is waiting for a transaction commit (for example due to a space
6660 * reservation when doing a write or triggering a transaction commit
6661 * when creating a subvolume);
6662 *
6663 * 3) The transaction is waiting to write lock the commit root semaphore,
6664 * but can not acquire it since it's being held at 1).
6665 *
6666 * Down this call chain we write to the pipe through kernel_write().
6667 * The same type of problem can also happen when sending to a file that
6668 * is stored in the same filesystem - when reserving space for a write
6669 * into the file, we can trigger a transaction commit.
6670 *
6671 * Our caller has supplied us with clones of leaves from the send and
6672 * parent roots, so we're safe here from a concurrent relocation and
6673 * further reallocation of metadata extents while we are here. Below we
6674 * also assert that the leaves are clones.
6675 */
6676 lockdep_assert_not_held(&sctx->send_root->fs_info->commit_root_sem);
6677
6678 /*
6679 * We always have a send root, so left_path is never NULL. We will not
6680 * have a leaf when we have reached the end of the send root but have
6681 * not yet reached the end of the parent root.
6682 */
6683 if (left_path->nodes[0])
6684 ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED,
6685 &left_path->nodes[0]->bflags));
6686 /*
6687 * When doing a full send we don't have a parent root, so right_path is
6688 * NULL. When doing an incremental send, we may have reached the end of
6689 * the parent root already, so we don't have a leaf at right_path.
6690 */
6691 if (right_path && right_path->nodes[0])
6692 ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED,
6693 &right_path->nodes[0]->bflags));
6694
6695 if (result == BTRFS_COMPARE_TREE_SAME) {
6696 if (key->type == BTRFS_INODE_REF_KEY ||
6697 key->type == BTRFS_INODE_EXTREF_KEY) {
6698 ret = compare_refs(sctx, left_path, key);
6699 if (!ret)
6700 return 0;
6701 if (ret < 0)
6702 return ret;
6703 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
6704 return maybe_send_hole(sctx, left_path, key);
6705 } else {
6706 return 0;
6707 }
6708 result = BTRFS_COMPARE_TREE_CHANGED;
6709 ret = 0;
6710 }
6711
6712 sctx->left_path = left_path;
6713 sctx->right_path = right_path;
6714 sctx->cmp_key = key;
6715
6716 ret = finish_inode_if_needed(sctx, 0);
6717 if (ret < 0)
6718 goto out;
6719
6720 /* Ignore non-FS objects */
6721 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
6722 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
6723 goto out;
6724
6725 if (key->type == BTRFS_INODE_ITEM_KEY) {
6726 ret = changed_inode(sctx, result);
6727 } else if (!sctx->ignore_cur_inode) {
6728 if (key->type == BTRFS_INODE_REF_KEY ||
6729 key->type == BTRFS_INODE_EXTREF_KEY)
6730 ret = changed_ref(sctx, result);
6731 else if (key->type == BTRFS_XATTR_ITEM_KEY)
6732 ret = changed_xattr(sctx, result);
6733 else if (key->type == BTRFS_EXTENT_DATA_KEY)
6734 ret = changed_extent(sctx, result);
6735 }
6736
6737 out:
6738 return ret;
6739 }
6740
search_key_again(const struct send_ctx * sctx,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_key * key)6741 static int search_key_again(const struct send_ctx *sctx,
6742 struct btrfs_root *root,
6743 struct btrfs_path *path,
6744 const struct btrfs_key *key)
6745 {
6746 int ret;
6747
6748 if (!path->need_commit_sem)
6749 lockdep_assert_held_read(&root->fs_info->commit_root_sem);
6750
6751 /*
6752 * Roots used for send operations are readonly and no one can add,
6753 * update or remove keys from them, so we should be able to find our
6754 * key again. The only exception is deduplication, which can operate on
6755 * readonly roots and add, update or remove keys to/from them - but at
6756 * the moment we don't allow it to run in parallel with send.
6757 */
6758 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
6759 ASSERT(ret <= 0);
6760 if (ret > 0) {
6761 btrfs_print_tree(path->nodes[path->lowest_level], false);
6762 btrfs_err(root->fs_info,
6763 "send: key (%llu %u %llu) not found in %s root %llu, lowest_level %d, slot %d",
6764 key->objectid, key->type, key->offset,
6765 (root == sctx->parent_root ? "parent" : "send"),
6766 root->root_key.objectid, path->lowest_level,
6767 path->slots[path->lowest_level]);
6768 return -EUCLEAN;
6769 }
6770
6771 return ret;
6772 }
6773
full_send_tree(struct send_ctx * sctx)6774 static int full_send_tree(struct send_ctx *sctx)
6775 {
6776 int ret;
6777 struct btrfs_root *send_root = sctx->send_root;
6778 struct btrfs_key key;
6779 struct btrfs_fs_info *fs_info = send_root->fs_info;
6780 struct btrfs_path *path;
6781
6782 path = alloc_path_for_send();
6783 if (!path)
6784 return -ENOMEM;
6785 path->reada = READA_FORWARD_ALWAYS;
6786
6787 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
6788 key.type = BTRFS_INODE_ITEM_KEY;
6789 key.offset = 0;
6790
6791 down_read(&fs_info->commit_root_sem);
6792 sctx->last_reloc_trans = fs_info->last_reloc_trans;
6793 up_read(&fs_info->commit_root_sem);
6794
6795 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
6796 if (ret < 0)
6797 goto out;
6798 if (ret)
6799 goto out_finish;
6800
6801 while (1) {
6802 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
6803
6804 ret = changed_cb(path, NULL, &key,
6805 BTRFS_COMPARE_TREE_NEW, sctx);
6806 if (ret < 0)
6807 goto out;
6808
6809 down_read(&fs_info->commit_root_sem);
6810 if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
6811 sctx->last_reloc_trans = fs_info->last_reloc_trans;
6812 up_read(&fs_info->commit_root_sem);
6813 /*
6814 * A transaction used for relocating a block group was
6815 * committed or is about to finish its commit. Release
6816 * our path (leaf) and restart the search, so that we
6817 * avoid operating on any file extent items that are
6818 * stale, with a disk_bytenr that reflects a pre
6819 * relocation value. This way we avoid as much as
6820 * possible to fallback to regular writes when checking
6821 * if we can clone file ranges.
6822 */
6823 btrfs_release_path(path);
6824 ret = search_key_again(sctx, send_root, path, &key);
6825 if (ret < 0)
6826 goto out;
6827 } else {
6828 up_read(&fs_info->commit_root_sem);
6829 }
6830
6831 ret = btrfs_next_item(send_root, path);
6832 if (ret < 0)
6833 goto out;
6834 if (ret) {
6835 ret = 0;
6836 break;
6837 }
6838 }
6839
6840 out_finish:
6841 ret = finish_inode_if_needed(sctx, 1);
6842
6843 out:
6844 btrfs_free_path(path);
6845 return ret;
6846 }
6847
replace_node_with_clone(struct btrfs_path * path,int level)6848 static int replace_node_with_clone(struct btrfs_path *path, int level)
6849 {
6850 struct extent_buffer *clone;
6851
6852 clone = btrfs_clone_extent_buffer(path->nodes[level]);
6853 if (!clone)
6854 return -ENOMEM;
6855
6856 free_extent_buffer(path->nodes[level]);
6857 path->nodes[level] = clone;
6858
6859 return 0;
6860 }
6861
tree_move_down(struct btrfs_path * path,int * level,u64 reada_min_gen)6862 static int tree_move_down(struct btrfs_path *path, int *level, u64 reada_min_gen)
6863 {
6864 struct extent_buffer *eb;
6865 struct extent_buffer *parent = path->nodes[*level];
6866 int slot = path->slots[*level];
6867 const int nritems = btrfs_header_nritems(parent);
6868 u64 reada_max;
6869 u64 reada_done = 0;
6870
6871 lockdep_assert_held_read(&parent->fs_info->commit_root_sem);
6872
6873 BUG_ON(*level == 0);
6874 eb = btrfs_read_node_slot(parent, slot);
6875 if (IS_ERR(eb))
6876 return PTR_ERR(eb);
6877
6878 /*
6879 * Trigger readahead for the next leaves we will process, so that it is
6880 * very likely that when we need them they are already in memory and we
6881 * will not block on disk IO. For nodes we only do readahead for one,
6882 * since the time window between processing nodes is typically larger.
6883 */
6884 reada_max = (*level == 1 ? SZ_128K : eb->fs_info->nodesize);
6885
6886 for (slot++; slot < nritems && reada_done < reada_max; slot++) {
6887 if (btrfs_node_ptr_generation(parent, slot) > reada_min_gen) {
6888 btrfs_readahead_node_child(parent, slot);
6889 reada_done += eb->fs_info->nodesize;
6890 }
6891 }
6892
6893 path->nodes[*level - 1] = eb;
6894 path->slots[*level - 1] = 0;
6895 (*level)--;
6896
6897 if (*level == 0)
6898 return replace_node_with_clone(path, 0);
6899
6900 return 0;
6901 }
6902
tree_move_next_or_upnext(struct btrfs_path * path,int * level,int root_level)6903 static int tree_move_next_or_upnext(struct btrfs_path *path,
6904 int *level, int root_level)
6905 {
6906 int ret = 0;
6907 int nritems;
6908 nritems = btrfs_header_nritems(path->nodes[*level]);
6909
6910 path->slots[*level]++;
6911
6912 while (path->slots[*level] >= nritems) {
6913 if (*level == root_level) {
6914 path->slots[*level] = nritems - 1;
6915 return -1;
6916 }
6917
6918 /* move upnext */
6919 path->slots[*level] = 0;
6920 free_extent_buffer(path->nodes[*level]);
6921 path->nodes[*level] = NULL;
6922 (*level)++;
6923 path->slots[*level]++;
6924
6925 nritems = btrfs_header_nritems(path->nodes[*level]);
6926 ret = 1;
6927 }
6928 return ret;
6929 }
6930
6931 /*
6932 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
6933 * or down.
6934 */
tree_advance(struct btrfs_path * path,int * level,int root_level,int allow_down,struct btrfs_key * key,u64 reada_min_gen)6935 static int tree_advance(struct btrfs_path *path,
6936 int *level, int root_level,
6937 int allow_down,
6938 struct btrfs_key *key,
6939 u64 reada_min_gen)
6940 {
6941 int ret;
6942
6943 if (*level == 0 || !allow_down) {
6944 ret = tree_move_next_or_upnext(path, level, root_level);
6945 } else {
6946 ret = tree_move_down(path, level, reada_min_gen);
6947 }
6948
6949 /*
6950 * Even if we have reached the end of a tree, ret is -1, update the key
6951 * anyway, so that in case we need to restart due to a block group
6952 * relocation, we can assert that the last key of the root node still
6953 * exists in the tree.
6954 */
6955 if (*level == 0)
6956 btrfs_item_key_to_cpu(path->nodes[*level], key,
6957 path->slots[*level]);
6958 else
6959 btrfs_node_key_to_cpu(path->nodes[*level], key,
6960 path->slots[*level]);
6961
6962 return ret;
6963 }
6964
tree_compare_item(struct btrfs_path * left_path,struct btrfs_path * right_path,char * tmp_buf)6965 static int tree_compare_item(struct btrfs_path *left_path,
6966 struct btrfs_path *right_path,
6967 char *tmp_buf)
6968 {
6969 int cmp;
6970 int len1, len2;
6971 unsigned long off1, off2;
6972
6973 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
6974 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
6975 if (len1 != len2)
6976 return 1;
6977
6978 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
6979 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
6980 right_path->slots[0]);
6981
6982 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
6983
6984 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
6985 if (cmp)
6986 return 1;
6987 return 0;
6988 }
6989
6990 /*
6991 * A transaction used for relocating a block group was committed or is about to
6992 * finish its commit. Release our paths and restart the search, so that we are
6993 * not using stale extent buffers:
6994 *
6995 * 1) For levels > 0, we are only holding references of extent buffers, without
6996 * any locks on them, which does not prevent them from having been relocated
6997 * and reallocated after the last time we released the commit root semaphore.
6998 * The exception are the root nodes, for which we always have a clone, see
6999 * the comment at btrfs_compare_trees();
7000 *
7001 * 2) For leaves, level 0, we are holding copies (clones) of extent buffers, so
7002 * we are safe from the concurrent relocation and reallocation. However they
7003 * can have file extent items with a pre relocation disk_bytenr value, so we
7004 * restart the start from the current commit roots and clone the new leaves so
7005 * that we get the post relocation disk_bytenr values. Not doing so, could
7006 * make us clone the wrong data in case there are new extents using the old
7007 * disk_bytenr that happen to be shared.
7008 */
restart_after_relocation(struct btrfs_path * left_path,struct btrfs_path * right_path,const struct btrfs_key * left_key,const struct btrfs_key * right_key,int left_level,int right_level,const struct send_ctx * sctx)7009 static int restart_after_relocation(struct btrfs_path *left_path,
7010 struct btrfs_path *right_path,
7011 const struct btrfs_key *left_key,
7012 const struct btrfs_key *right_key,
7013 int left_level,
7014 int right_level,
7015 const struct send_ctx *sctx)
7016 {
7017 int root_level;
7018 int ret;
7019
7020 lockdep_assert_held_read(&sctx->send_root->fs_info->commit_root_sem);
7021
7022 btrfs_release_path(left_path);
7023 btrfs_release_path(right_path);
7024
7025 /*
7026 * Since keys can not be added or removed to/from our roots because they
7027 * are readonly and we do not allow deduplication to run in parallel
7028 * (which can add, remove or change keys), the layout of the trees should
7029 * not change.
7030 */
7031 left_path->lowest_level = left_level;
7032 ret = search_key_again(sctx, sctx->send_root, left_path, left_key);
7033 if (ret < 0)
7034 return ret;
7035
7036 right_path->lowest_level = right_level;
7037 ret = search_key_again(sctx, sctx->parent_root, right_path, right_key);
7038 if (ret < 0)
7039 return ret;
7040
7041 /*
7042 * If the lowest level nodes are leaves, clone them so that they can be
7043 * safely used by changed_cb() while not under the protection of the
7044 * commit root semaphore, even if relocation and reallocation happens in
7045 * parallel.
7046 */
7047 if (left_level == 0) {
7048 ret = replace_node_with_clone(left_path, 0);
7049 if (ret < 0)
7050 return ret;
7051 }
7052
7053 if (right_level == 0) {
7054 ret = replace_node_with_clone(right_path, 0);
7055 if (ret < 0)
7056 return ret;
7057 }
7058
7059 /*
7060 * Now clone the root nodes (unless they happen to be the leaves we have
7061 * already cloned). This is to protect against concurrent snapshotting of
7062 * the send and parent roots (see the comment at btrfs_compare_trees()).
7063 */
7064 root_level = btrfs_header_level(sctx->send_root->commit_root);
7065 if (root_level > 0) {
7066 ret = replace_node_with_clone(left_path, root_level);
7067 if (ret < 0)
7068 return ret;
7069 }
7070
7071 root_level = btrfs_header_level(sctx->parent_root->commit_root);
7072 if (root_level > 0) {
7073 ret = replace_node_with_clone(right_path, root_level);
7074 if (ret < 0)
7075 return ret;
7076 }
7077
7078 return 0;
7079 }
7080
7081 /*
7082 * This function compares two trees and calls the provided callback for
7083 * every changed/new/deleted item it finds.
7084 * If shared tree blocks are encountered, whole subtrees are skipped, making
7085 * the compare pretty fast on snapshotted subvolumes.
7086 *
7087 * This currently works on commit roots only. As commit roots are read only,
7088 * we don't do any locking. The commit roots are protected with transactions.
7089 * Transactions are ended and rejoined when a commit is tried in between.
7090 *
7091 * This function checks for modifications done to the trees while comparing.
7092 * If it detects a change, it aborts immediately.
7093 */
btrfs_compare_trees(struct btrfs_root * left_root,struct btrfs_root * right_root,struct send_ctx * sctx)7094 static int btrfs_compare_trees(struct btrfs_root *left_root,
7095 struct btrfs_root *right_root, struct send_ctx *sctx)
7096 {
7097 struct btrfs_fs_info *fs_info = left_root->fs_info;
7098 int ret;
7099 int cmp;
7100 struct btrfs_path *left_path = NULL;
7101 struct btrfs_path *right_path = NULL;
7102 struct btrfs_key left_key;
7103 struct btrfs_key right_key;
7104 char *tmp_buf = NULL;
7105 int left_root_level;
7106 int right_root_level;
7107 int left_level;
7108 int right_level;
7109 int left_end_reached = 0;
7110 int right_end_reached = 0;
7111 int advance_left = 0;
7112 int advance_right = 0;
7113 u64 left_blockptr;
7114 u64 right_blockptr;
7115 u64 left_gen;
7116 u64 right_gen;
7117 u64 reada_min_gen;
7118
7119 left_path = btrfs_alloc_path();
7120 if (!left_path) {
7121 ret = -ENOMEM;
7122 goto out;
7123 }
7124 right_path = btrfs_alloc_path();
7125 if (!right_path) {
7126 ret = -ENOMEM;
7127 goto out;
7128 }
7129
7130 tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
7131 if (!tmp_buf) {
7132 ret = -ENOMEM;
7133 goto out;
7134 }
7135
7136 left_path->search_commit_root = 1;
7137 left_path->skip_locking = 1;
7138 right_path->search_commit_root = 1;
7139 right_path->skip_locking = 1;
7140
7141 /*
7142 * Strategy: Go to the first items of both trees. Then do
7143 *
7144 * If both trees are at level 0
7145 * Compare keys of current items
7146 * If left < right treat left item as new, advance left tree
7147 * and repeat
7148 * If left > right treat right item as deleted, advance right tree
7149 * and repeat
7150 * If left == right do deep compare of items, treat as changed if
7151 * needed, advance both trees and repeat
7152 * If both trees are at the same level but not at level 0
7153 * Compare keys of current nodes/leafs
7154 * If left < right advance left tree and repeat
7155 * If left > right advance right tree and repeat
7156 * If left == right compare blockptrs of the next nodes/leafs
7157 * If they match advance both trees but stay at the same level
7158 * and repeat
7159 * If they don't match advance both trees while allowing to go
7160 * deeper and repeat
7161 * If tree levels are different
7162 * Advance the tree that needs it and repeat
7163 *
7164 * Advancing a tree means:
7165 * If we are at level 0, try to go to the next slot. If that's not
7166 * possible, go one level up and repeat. Stop when we found a level
7167 * where we could go to the next slot. We may at this point be on a
7168 * node or a leaf.
7169 *
7170 * If we are not at level 0 and not on shared tree blocks, go one
7171 * level deeper.
7172 *
7173 * If we are not at level 0 and on shared tree blocks, go one slot to
7174 * the right if possible or go up and right.
7175 */
7176
7177 down_read(&fs_info->commit_root_sem);
7178 left_level = btrfs_header_level(left_root->commit_root);
7179 left_root_level = left_level;
7180 /*
7181 * We clone the root node of the send and parent roots to prevent races
7182 * with snapshot creation of these roots. Snapshot creation COWs the
7183 * root node of a tree, so after the transaction is committed the old
7184 * extent can be reallocated while this send operation is still ongoing.
7185 * So we clone them, under the commit root semaphore, to be race free.
7186 */
7187 left_path->nodes[left_level] =
7188 btrfs_clone_extent_buffer(left_root->commit_root);
7189 if (!left_path->nodes[left_level]) {
7190 ret = -ENOMEM;
7191 goto out_unlock;
7192 }
7193
7194 right_level = btrfs_header_level(right_root->commit_root);
7195 right_root_level = right_level;
7196 right_path->nodes[right_level] =
7197 btrfs_clone_extent_buffer(right_root->commit_root);
7198 if (!right_path->nodes[right_level]) {
7199 ret = -ENOMEM;
7200 goto out_unlock;
7201 }
7202 /*
7203 * Our right root is the parent root, while the left root is the "send"
7204 * root. We know that all new nodes/leaves in the left root must have
7205 * a generation greater than the right root's generation, so we trigger
7206 * readahead for those nodes and leaves of the left root, as we know we
7207 * will need to read them at some point.
7208 */
7209 reada_min_gen = btrfs_header_generation(right_root->commit_root);
7210
7211 if (left_level == 0)
7212 btrfs_item_key_to_cpu(left_path->nodes[left_level],
7213 &left_key, left_path->slots[left_level]);
7214 else
7215 btrfs_node_key_to_cpu(left_path->nodes[left_level],
7216 &left_key, left_path->slots[left_level]);
7217 if (right_level == 0)
7218 btrfs_item_key_to_cpu(right_path->nodes[right_level],
7219 &right_key, right_path->slots[right_level]);
7220 else
7221 btrfs_node_key_to_cpu(right_path->nodes[right_level],
7222 &right_key, right_path->slots[right_level]);
7223
7224 sctx->last_reloc_trans = fs_info->last_reloc_trans;
7225
7226 while (1) {
7227 if (need_resched() ||
7228 rwsem_is_contended(&fs_info->commit_root_sem)) {
7229 up_read(&fs_info->commit_root_sem);
7230 cond_resched();
7231 down_read(&fs_info->commit_root_sem);
7232 }
7233
7234 if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
7235 ret = restart_after_relocation(left_path, right_path,
7236 &left_key, &right_key,
7237 left_level, right_level,
7238 sctx);
7239 if (ret < 0)
7240 goto out_unlock;
7241 sctx->last_reloc_trans = fs_info->last_reloc_trans;
7242 }
7243
7244 if (advance_left && !left_end_reached) {
7245 ret = tree_advance(left_path, &left_level,
7246 left_root_level,
7247 advance_left != ADVANCE_ONLY_NEXT,
7248 &left_key, reada_min_gen);
7249 if (ret == -1)
7250 left_end_reached = ADVANCE;
7251 else if (ret < 0)
7252 goto out_unlock;
7253 advance_left = 0;
7254 }
7255 if (advance_right && !right_end_reached) {
7256 ret = tree_advance(right_path, &right_level,
7257 right_root_level,
7258 advance_right != ADVANCE_ONLY_NEXT,
7259 &right_key, reada_min_gen);
7260 if (ret == -1)
7261 right_end_reached = ADVANCE;
7262 else if (ret < 0)
7263 goto out_unlock;
7264 advance_right = 0;
7265 }
7266
7267 if (left_end_reached && right_end_reached) {
7268 ret = 0;
7269 goto out_unlock;
7270 } else if (left_end_reached) {
7271 if (right_level == 0) {
7272 up_read(&fs_info->commit_root_sem);
7273 ret = changed_cb(left_path, right_path,
7274 &right_key,
7275 BTRFS_COMPARE_TREE_DELETED,
7276 sctx);
7277 if (ret < 0)
7278 goto out;
7279 down_read(&fs_info->commit_root_sem);
7280 }
7281 advance_right = ADVANCE;
7282 continue;
7283 } else if (right_end_reached) {
7284 if (left_level == 0) {
7285 up_read(&fs_info->commit_root_sem);
7286 ret = changed_cb(left_path, right_path,
7287 &left_key,
7288 BTRFS_COMPARE_TREE_NEW,
7289 sctx);
7290 if (ret < 0)
7291 goto out;
7292 down_read(&fs_info->commit_root_sem);
7293 }
7294 advance_left = ADVANCE;
7295 continue;
7296 }
7297
7298 if (left_level == 0 && right_level == 0) {
7299 up_read(&fs_info->commit_root_sem);
7300 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
7301 if (cmp < 0) {
7302 ret = changed_cb(left_path, right_path,
7303 &left_key,
7304 BTRFS_COMPARE_TREE_NEW,
7305 sctx);
7306 advance_left = ADVANCE;
7307 } else if (cmp > 0) {
7308 ret = changed_cb(left_path, right_path,
7309 &right_key,
7310 BTRFS_COMPARE_TREE_DELETED,
7311 sctx);
7312 advance_right = ADVANCE;
7313 } else {
7314 enum btrfs_compare_tree_result result;
7315
7316 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
7317 ret = tree_compare_item(left_path, right_path,
7318 tmp_buf);
7319 if (ret)
7320 result = BTRFS_COMPARE_TREE_CHANGED;
7321 else
7322 result = BTRFS_COMPARE_TREE_SAME;
7323 ret = changed_cb(left_path, right_path,
7324 &left_key, result, sctx);
7325 advance_left = ADVANCE;
7326 advance_right = ADVANCE;
7327 }
7328
7329 if (ret < 0)
7330 goto out;
7331 down_read(&fs_info->commit_root_sem);
7332 } else if (left_level == right_level) {
7333 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
7334 if (cmp < 0) {
7335 advance_left = ADVANCE;
7336 } else if (cmp > 0) {
7337 advance_right = ADVANCE;
7338 } else {
7339 left_blockptr = btrfs_node_blockptr(
7340 left_path->nodes[left_level],
7341 left_path->slots[left_level]);
7342 right_blockptr = btrfs_node_blockptr(
7343 right_path->nodes[right_level],
7344 right_path->slots[right_level]);
7345 left_gen = btrfs_node_ptr_generation(
7346 left_path->nodes[left_level],
7347 left_path->slots[left_level]);
7348 right_gen = btrfs_node_ptr_generation(
7349 right_path->nodes[right_level],
7350 right_path->slots[right_level]);
7351 if (left_blockptr == right_blockptr &&
7352 left_gen == right_gen) {
7353 /*
7354 * As we're on a shared block, don't
7355 * allow to go deeper.
7356 */
7357 advance_left = ADVANCE_ONLY_NEXT;
7358 advance_right = ADVANCE_ONLY_NEXT;
7359 } else {
7360 advance_left = ADVANCE;
7361 advance_right = ADVANCE;
7362 }
7363 }
7364 } else if (left_level < right_level) {
7365 advance_right = ADVANCE;
7366 } else {
7367 advance_left = ADVANCE;
7368 }
7369 }
7370
7371 out_unlock:
7372 up_read(&fs_info->commit_root_sem);
7373 out:
7374 btrfs_free_path(left_path);
7375 btrfs_free_path(right_path);
7376 kvfree(tmp_buf);
7377 return ret;
7378 }
7379
send_subvol(struct send_ctx * sctx)7380 static int send_subvol(struct send_ctx *sctx)
7381 {
7382 int ret;
7383
7384 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
7385 ret = send_header(sctx);
7386 if (ret < 0)
7387 goto out;
7388 }
7389
7390 ret = send_subvol_begin(sctx);
7391 if (ret < 0)
7392 goto out;
7393
7394 if (sctx->parent_root) {
7395 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, sctx);
7396 if (ret < 0)
7397 goto out;
7398 ret = finish_inode_if_needed(sctx, 1);
7399 if (ret < 0)
7400 goto out;
7401 } else {
7402 ret = full_send_tree(sctx);
7403 if (ret < 0)
7404 goto out;
7405 }
7406
7407 out:
7408 free_recorded_refs(sctx);
7409 return ret;
7410 }
7411
7412 /*
7413 * If orphan cleanup did remove any orphans from a root, it means the tree
7414 * was modified and therefore the commit root is not the same as the current
7415 * root anymore. This is a problem, because send uses the commit root and
7416 * therefore can see inode items that don't exist in the current root anymore,
7417 * and for example make calls to btrfs_iget, which will do tree lookups based
7418 * on the current root and not on the commit root. Those lookups will fail,
7419 * returning a -ESTALE error, and making send fail with that error. So make
7420 * sure a send does not see any orphans we have just removed, and that it will
7421 * see the same inodes regardless of whether a transaction commit happened
7422 * before it started (meaning that the commit root will be the same as the
7423 * current root) or not.
7424 */
ensure_commit_roots_uptodate(struct send_ctx * sctx)7425 static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
7426 {
7427 int i;
7428 struct btrfs_trans_handle *trans = NULL;
7429
7430 again:
7431 if (sctx->parent_root &&
7432 sctx->parent_root->node != sctx->parent_root->commit_root)
7433 goto commit_trans;
7434
7435 for (i = 0; i < sctx->clone_roots_cnt; i++)
7436 if (sctx->clone_roots[i].root->node !=
7437 sctx->clone_roots[i].root->commit_root)
7438 goto commit_trans;
7439
7440 if (trans)
7441 return btrfs_end_transaction(trans);
7442
7443 return 0;
7444
7445 commit_trans:
7446 /* Use any root, all fs roots will get their commit roots updated. */
7447 if (!trans) {
7448 trans = btrfs_join_transaction(sctx->send_root);
7449 if (IS_ERR(trans))
7450 return PTR_ERR(trans);
7451 goto again;
7452 }
7453
7454 return btrfs_commit_transaction(trans);
7455 }
7456
7457 /*
7458 * Make sure any existing dellaloc is flushed for any root used by a send
7459 * operation so that we do not miss any data and we do not race with writeback
7460 * finishing and changing a tree while send is using the tree. This could
7461 * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
7462 * a send operation then uses the subvolume.
7463 * After flushing delalloc ensure_commit_roots_uptodate() must be called.
7464 */
flush_delalloc_roots(struct send_ctx * sctx)7465 static int flush_delalloc_roots(struct send_ctx *sctx)
7466 {
7467 struct btrfs_root *root = sctx->parent_root;
7468 int ret;
7469 int i;
7470
7471 if (root) {
7472 ret = btrfs_start_delalloc_snapshot(root, false);
7473 if (ret)
7474 return ret;
7475 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
7476 }
7477
7478 for (i = 0; i < sctx->clone_roots_cnt; i++) {
7479 root = sctx->clone_roots[i].root;
7480 ret = btrfs_start_delalloc_snapshot(root, false);
7481 if (ret)
7482 return ret;
7483 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
7484 }
7485
7486 return 0;
7487 }
7488
btrfs_root_dec_send_in_progress(struct btrfs_root * root)7489 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
7490 {
7491 spin_lock(&root->root_item_lock);
7492 root->send_in_progress--;
7493 /*
7494 * Not much left to do, we don't know why it's unbalanced and
7495 * can't blindly reset it to 0.
7496 */
7497 if (root->send_in_progress < 0)
7498 btrfs_err(root->fs_info,
7499 "send_in_progress unbalanced %d root %llu",
7500 root->send_in_progress, root->root_key.objectid);
7501 spin_unlock(&root->root_item_lock);
7502 }
7503
dedupe_in_progress_warn(const struct btrfs_root * root)7504 static void dedupe_in_progress_warn(const struct btrfs_root *root)
7505 {
7506 btrfs_warn_rl(root->fs_info,
7507 "cannot use root %llu for send while deduplications on it are in progress (%d in progress)",
7508 root->root_key.objectid, root->dedupe_in_progress);
7509 }
7510
btrfs_ioctl_send(struct file * mnt_file,struct btrfs_ioctl_send_args * arg)7511 long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
7512 {
7513 int ret = 0;
7514 struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
7515 struct btrfs_fs_info *fs_info = send_root->fs_info;
7516 struct btrfs_root *clone_root;
7517 struct send_ctx *sctx = NULL;
7518 u32 i;
7519 u64 *clone_sources_tmp = NULL;
7520 int clone_sources_to_rollback = 0;
7521 size_t alloc_size;
7522 int sort_clone_roots = 0;
7523
7524 if (!capable(CAP_SYS_ADMIN))
7525 return -EPERM;
7526
7527 /*
7528 * The subvolume must remain read-only during send, protect against
7529 * making it RW. This also protects against deletion.
7530 */
7531 spin_lock(&send_root->root_item_lock);
7532 if (btrfs_root_readonly(send_root) && send_root->dedupe_in_progress) {
7533 dedupe_in_progress_warn(send_root);
7534 spin_unlock(&send_root->root_item_lock);
7535 return -EAGAIN;
7536 }
7537 send_root->send_in_progress++;
7538 spin_unlock(&send_root->root_item_lock);
7539
7540 /*
7541 * Userspace tools do the checks and warn the user if it's
7542 * not RO.
7543 */
7544 if (!btrfs_root_readonly(send_root)) {
7545 ret = -EPERM;
7546 goto out;
7547 }
7548
7549 /*
7550 * Check that we don't overflow at later allocations, we request
7551 * clone_sources_count + 1 items, and compare to unsigned long inside
7552 * access_ok. Also set an upper limit for allocation size so this can't
7553 * easily exhaust memory. Max number of clone sources is about 200K.
7554 */
7555 if (arg->clone_sources_count > SZ_8M / sizeof(struct clone_root)) {
7556 ret = -EINVAL;
7557 goto out;
7558 }
7559
7560 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
7561 ret = -EOPNOTSUPP;
7562 goto out;
7563 }
7564
7565 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
7566 if (!sctx) {
7567 ret = -ENOMEM;
7568 goto out;
7569 }
7570
7571 INIT_LIST_HEAD(&sctx->new_refs);
7572 INIT_LIST_HEAD(&sctx->deleted_refs);
7573 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
7574 INIT_LIST_HEAD(&sctx->name_cache_list);
7575
7576 sctx->flags = arg->flags;
7577
7578 sctx->send_filp = fget(arg->send_fd);
7579 if (!sctx->send_filp || !(sctx->send_filp->f_mode & FMODE_WRITE)) {
7580 ret = -EBADF;
7581 goto out;
7582 }
7583
7584 sctx->send_root = send_root;
7585 /*
7586 * Unlikely but possible, if the subvolume is marked for deletion but
7587 * is slow to remove the directory entry, send can still be started
7588 */
7589 if (btrfs_root_dead(sctx->send_root)) {
7590 ret = -EPERM;
7591 goto out;
7592 }
7593
7594 sctx->clone_roots_cnt = arg->clone_sources_count;
7595
7596 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
7597 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
7598 if (!sctx->send_buf) {
7599 ret = -ENOMEM;
7600 goto out;
7601 }
7602
7603 sctx->pending_dir_moves = RB_ROOT;
7604 sctx->waiting_dir_moves = RB_ROOT;
7605 sctx->orphan_dirs = RB_ROOT;
7606
7607 sctx->clone_roots = kvcalloc(sizeof(*sctx->clone_roots),
7608 arg->clone_sources_count + 1,
7609 GFP_KERNEL);
7610 if (!sctx->clone_roots) {
7611 ret = -ENOMEM;
7612 goto out;
7613 }
7614
7615 alloc_size = array_size(sizeof(*arg->clone_sources),
7616 arg->clone_sources_count);
7617
7618 if (arg->clone_sources_count) {
7619 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
7620 if (!clone_sources_tmp) {
7621 ret = -ENOMEM;
7622 goto out;
7623 }
7624
7625 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
7626 alloc_size);
7627 if (ret) {
7628 ret = -EFAULT;
7629 goto out;
7630 }
7631
7632 for (i = 0; i < arg->clone_sources_count; i++) {
7633 clone_root = btrfs_get_fs_root(fs_info,
7634 clone_sources_tmp[i], true);
7635 if (IS_ERR(clone_root)) {
7636 ret = PTR_ERR(clone_root);
7637 goto out;
7638 }
7639 spin_lock(&clone_root->root_item_lock);
7640 if (!btrfs_root_readonly(clone_root) ||
7641 btrfs_root_dead(clone_root)) {
7642 spin_unlock(&clone_root->root_item_lock);
7643 btrfs_put_root(clone_root);
7644 ret = -EPERM;
7645 goto out;
7646 }
7647 if (clone_root->dedupe_in_progress) {
7648 dedupe_in_progress_warn(clone_root);
7649 spin_unlock(&clone_root->root_item_lock);
7650 btrfs_put_root(clone_root);
7651 ret = -EAGAIN;
7652 goto out;
7653 }
7654 clone_root->send_in_progress++;
7655 spin_unlock(&clone_root->root_item_lock);
7656
7657 sctx->clone_roots[i].root = clone_root;
7658 clone_sources_to_rollback = i + 1;
7659 }
7660 kvfree(clone_sources_tmp);
7661 clone_sources_tmp = NULL;
7662 }
7663
7664 if (arg->parent_root) {
7665 sctx->parent_root = btrfs_get_fs_root(fs_info, arg->parent_root,
7666 true);
7667 if (IS_ERR(sctx->parent_root)) {
7668 ret = PTR_ERR(sctx->parent_root);
7669 goto out;
7670 }
7671
7672 spin_lock(&sctx->parent_root->root_item_lock);
7673 sctx->parent_root->send_in_progress++;
7674 if (!btrfs_root_readonly(sctx->parent_root) ||
7675 btrfs_root_dead(sctx->parent_root)) {
7676 spin_unlock(&sctx->parent_root->root_item_lock);
7677 ret = -EPERM;
7678 goto out;
7679 }
7680 if (sctx->parent_root->dedupe_in_progress) {
7681 dedupe_in_progress_warn(sctx->parent_root);
7682 spin_unlock(&sctx->parent_root->root_item_lock);
7683 ret = -EAGAIN;
7684 goto out;
7685 }
7686 spin_unlock(&sctx->parent_root->root_item_lock);
7687 }
7688
7689 /*
7690 * Clones from send_root are allowed, but only if the clone source
7691 * is behind the current send position. This is checked while searching
7692 * for possible clone sources.
7693 */
7694 sctx->clone_roots[sctx->clone_roots_cnt++].root =
7695 btrfs_grab_root(sctx->send_root);
7696
7697 /* We do a bsearch later */
7698 sort(sctx->clone_roots, sctx->clone_roots_cnt,
7699 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
7700 NULL);
7701 sort_clone_roots = 1;
7702
7703 ret = flush_delalloc_roots(sctx);
7704 if (ret)
7705 goto out;
7706
7707 ret = ensure_commit_roots_uptodate(sctx);
7708 if (ret)
7709 goto out;
7710
7711 ret = send_subvol(sctx);
7712 if (ret < 0)
7713 goto out;
7714
7715 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
7716 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
7717 if (ret < 0)
7718 goto out;
7719 ret = send_cmd(sctx);
7720 if (ret < 0)
7721 goto out;
7722 }
7723
7724 out:
7725 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
7726 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
7727 struct rb_node *n;
7728 struct pending_dir_move *pm;
7729
7730 n = rb_first(&sctx->pending_dir_moves);
7731 pm = rb_entry(n, struct pending_dir_move, node);
7732 while (!list_empty(&pm->list)) {
7733 struct pending_dir_move *pm2;
7734
7735 pm2 = list_first_entry(&pm->list,
7736 struct pending_dir_move, list);
7737 free_pending_move(sctx, pm2);
7738 }
7739 free_pending_move(sctx, pm);
7740 }
7741
7742 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
7743 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
7744 struct rb_node *n;
7745 struct waiting_dir_move *dm;
7746
7747 n = rb_first(&sctx->waiting_dir_moves);
7748 dm = rb_entry(n, struct waiting_dir_move, node);
7749 rb_erase(&dm->node, &sctx->waiting_dir_moves);
7750 kfree(dm);
7751 }
7752
7753 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
7754 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
7755 struct rb_node *n;
7756 struct orphan_dir_info *odi;
7757
7758 n = rb_first(&sctx->orphan_dirs);
7759 odi = rb_entry(n, struct orphan_dir_info, node);
7760 free_orphan_dir_info(sctx, odi);
7761 }
7762
7763 if (sort_clone_roots) {
7764 for (i = 0; i < sctx->clone_roots_cnt; i++) {
7765 btrfs_root_dec_send_in_progress(
7766 sctx->clone_roots[i].root);
7767 btrfs_put_root(sctx->clone_roots[i].root);
7768 }
7769 } else {
7770 for (i = 0; sctx && i < clone_sources_to_rollback; i++) {
7771 btrfs_root_dec_send_in_progress(
7772 sctx->clone_roots[i].root);
7773 btrfs_put_root(sctx->clone_roots[i].root);
7774 }
7775
7776 btrfs_root_dec_send_in_progress(send_root);
7777 }
7778 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root)) {
7779 btrfs_root_dec_send_in_progress(sctx->parent_root);
7780 btrfs_put_root(sctx->parent_root);
7781 }
7782
7783 kvfree(clone_sources_tmp);
7784
7785 if (sctx) {
7786 if (sctx->send_filp)
7787 fput(sctx->send_filp);
7788
7789 kvfree(sctx->clone_roots);
7790 kvfree(sctx->send_buf);
7791
7792 name_cache_free(sctx);
7793
7794 kfree(sctx);
7795 }
7796
7797 return ret;
7798 }
7799