1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 */
5
6 #include <linux/bsearch.h>
7 #include <linux/fs.h>
8 #include <linux/file.h>
9 #include <linux/sort.h>
10 #include <linux/mount.h>
11 #include <linux/xattr.h>
12 #include <linux/posix_acl_xattr.h>
13 #include <linux/radix-tree.h>
14 #include <linux/vmalloc.h>
15 #include <linux/string.h>
16 #include <linux/compat.h>
17 #include <linux/crc32c.h>
18
19 #include "send.h"
20 #include "backref.h"
21 #include "locking.h"
22 #include "disk-io.h"
23 #include "btrfs_inode.h"
24 #include "transaction.h"
25 #include "compression.h"
26 #include "xattr.h"
27
28 /*
29 * Maximum number of references an extent can have in order for us to attempt to
30 * issue clone operations instead of write operations. This currently exists to
31 * avoid hitting limitations of the backreference walking code (taking a lot of
32 * time and using too much memory for extents with large number of references).
33 */
34 #define SEND_MAX_EXTENT_REFS 64
35
36 /*
37 * A fs_path is a helper to dynamically build path names with unknown size.
38 * It reallocates the internal buffer on demand.
39 * It allows fast adding of path elements on the right side (normal path) and
40 * fast adding to the left side (reversed path). A reversed path can also be
41 * unreversed if needed.
42 */
43 struct fs_path {
44 union {
45 struct {
46 char *start;
47 char *end;
48
49 char *buf;
50 unsigned short buf_len:15;
51 unsigned short reversed:1;
52 char inline_buf[];
53 };
54 /*
55 * Average path length does not exceed 200 bytes, we'll have
56 * better packing in the slab and higher chance to satisfy
57 * a allocation later during send.
58 */
59 char pad[256];
60 };
61 };
62 #define FS_PATH_INLINE_SIZE \
63 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
64
65
66 /* reused for each extent */
67 struct clone_root {
68 struct btrfs_root *root;
69 u64 ino;
70 u64 offset;
71
72 u64 found_refs;
73 };
74
75 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
76 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
77
78 struct send_ctx {
79 struct file *send_filp;
80 loff_t send_off;
81 char *send_buf;
82 u32 send_size;
83 u32 send_max_size;
84 u64 total_send_size;
85 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
86 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
87
88 struct btrfs_root *send_root;
89 struct btrfs_root *parent_root;
90 struct clone_root *clone_roots;
91 int clone_roots_cnt;
92
93 /* current state of the compare_tree call */
94 struct btrfs_path *left_path;
95 struct btrfs_path *right_path;
96 struct btrfs_key *cmp_key;
97
98 /*
99 * infos of the currently processed inode. In case of deleted inodes,
100 * these are the values from the deleted inode.
101 */
102 u64 cur_ino;
103 u64 cur_inode_gen;
104 int cur_inode_new;
105 int cur_inode_new_gen;
106 int cur_inode_deleted;
107 u64 cur_inode_size;
108 u64 cur_inode_mode;
109 u64 cur_inode_rdev;
110 u64 cur_inode_last_extent;
111 u64 cur_inode_next_write_offset;
112 bool ignore_cur_inode;
113
114 u64 send_progress;
115
116 struct list_head new_refs;
117 struct list_head deleted_refs;
118
119 struct radix_tree_root name_cache;
120 struct list_head name_cache_list;
121 int name_cache_size;
122
123 struct file_ra_state ra;
124
125 char *read_buf;
126
127 /*
128 * We process inodes by their increasing order, so if before an
129 * incremental send we reverse the parent/child relationship of
130 * directories such that a directory with a lower inode number was
131 * the parent of a directory with a higher inode number, and the one
132 * becoming the new parent got renamed too, we can't rename/move the
133 * directory with lower inode number when we finish processing it - we
134 * must process the directory with higher inode number first, then
135 * rename/move it and then rename/move the directory with lower inode
136 * number. Example follows.
137 *
138 * Tree state when the first send was performed:
139 *
140 * .
141 * |-- a (ino 257)
142 * |-- b (ino 258)
143 * |
144 * |
145 * |-- c (ino 259)
146 * | |-- d (ino 260)
147 * |
148 * |-- c2 (ino 261)
149 *
150 * Tree state when the second (incremental) send is performed:
151 *
152 * .
153 * |-- a (ino 257)
154 * |-- b (ino 258)
155 * |-- c2 (ino 261)
156 * |-- d2 (ino 260)
157 * |-- cc (ino 259)
158 *
159 * The sequence of steps that lead to the second state was:
160 *
161 * mv /a/b/c/d /a/b/c2/d2
162 * mv /a/b/c /a/b/c2/d2/cc
163 *
164 * "c" has lower inode number, but we can't move it (2nd mv operation)
165 * before we move "d", which has higher inode number.
166 *
167 * So we just memorize which move/rename operations must be performed
168 * later when their respective parent is processed and moved/renamed.
169 */
170
171 /* Indexed by parent directory inode number. */
172 struct rb_root pending_dir_moves;
173
174 /*
175 * Reverse index, indexed by the inode number of a directory that
176 * is waiting for the move/rename of its immediate parent before its
177 * own move/rename can be performed.
178 */
179 struct rb_root waiting_dir_moves;
180
181 /*
182 * A directory that is going to be rm'ed might have a child directory
183 * which is in the pending directory moves index above. In this case,
184 * the directory can only be removed after the move/rename of its child
185 * is performed. Example:
186 *
187 * Parent snapshot:
188 *
189 * . (ino 256)
190 * |-- a/ (ino 257)
191 * |-- b/ (ino 258)
192 * |-- c/ (ino 259)
193 * | |-- x/ (ino 260)
194 * |
195 * |-- y/ (ino 261)
196 *
197 * Send snapshot:
198 *
199 * . (ino 256)
200 * |-- a/ (ino 257)
201 * |-- b/ (ino 258)
202 * |-- YY/ (ino 261)
203 * |-- x/ (ino 260)
204 *
205 * Sequence of steps that lead to the send snapshot:
206 * rm -f /a/b/c/foo.txt
207 * mv /a/b/y /a/b/YY
208 * mv /a/b/c/x /a/b/YY
209 * rmdir /a/b/c
210 *
211 * When the child is processed, its move/rename is delayed until its
212 * parent is processed (as explained above), but all other operations
213 * like update utimes, chown, chgrp, etc, are performed and the paths
214 * that it uses for those operations must use the orphanized name of
215 * its parent (the directory we're going to rm later), so we need to
216 * memorize that name.
217 *
218 * Indexed by the inode number of the directory to be deleted.
219 */
220 struct rb_root orphan_dirs;
221 };
222
223 struct pending_dir_move {
224 struct rb_node node;
225 struct list_head list;
226 u64 parent_ino;
227 u64 ino;
228 u64 gen;
229 struct list_head update_refs;
230 };
231
232 struct waiting_dir_move {
233 struct rb_node node;
234 u64 ino;
235 /*
236 * There might be some directory that could not be removed because it
237 * was waiting for this directory inode to be moved first. Therefore
238 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
239 */
240 u64 rmdir_ino;
241 u64 rmdir_gen;
242 bool orphanized;
243 };
244
245 struct orphan_dir_info {
246 struct rb_node node;
247 u64 ino;
248 u64 gen;
249 u64 last_dir_index_offset;
250 };
251
252 struct name_cache_entry {
253 struct list_head list;
254 /*
255 * radix_tree has only 32bit entries but we need to handle 64bit inums.
256 * We use the lower 32bit of the 64bit inum to store it in the tree. If
257 * more then one inum would fall into the same entry, we use radix_list
258 * to store the additional entries. radix_list is also used to store
259 * entries where two entries have the same inum but different
260 * generations.
261 */
262 struct list_head radix_list;
263 u64 ino;
264 u64 gen;
265 u64 parent_ino;
266 u64 parent_gen;
267 int ret;
268 int need_later_update;
269 int name_len;
270 char name[];
271 };
272
273 #define ADVANCE 1
274 #define ADVANCE_ONLY_NEXT -1
275
276 enum btrfs_compare_tree_result {
277 BTRFS_COMPARE_TREE_NEW,
278 BTRFS_COMPARE_TREE_DELETED,
279 BTRFS_COMPARE_TREE_CHANGED,
280 BTRFS_COMPARE_TREE_SAME,
281 };
282 typedef int (*btrfs_changed_cb_t)(struct btrfs_path *left_path,
283 struct btrfs_path *right_path,
284 struct btrfs_key *key,
285 enum btrfs_compare_tree_result result,
286 void *ctx);
287
288 __cold
inconsistent_snapshot_error(struct send_ctx * sctx,enum btrfs_compare_tree_result result,const char * what)289 static void inconsistent_snapshot_error(struct send_ctx *sctx,
290 enum btrfs_compare_tree_result result,
291 const char *what)
292 {
293 const char *result_string;
294
295 switch (result) {
296 case BTRFS_COMPARE_TREE_NEW:
297 result_string = "new";
298 break;
299 case BTRFS_COMPARE_TREE_DELETED:
300 result_string = "deleted";
301 break;
302 case BTRFS_COMPARE_TREE_CHANGED:
303 result_string = "updated";
304 break;
305 case BTRFS_COMPARE_TREE_SAME:
306 ASSERT(0);
307 result_string = "unchanged";
308 break;
309 default:
310 ASSERT(0);
311 result_string = "unexpected";
312 }
313
314 btrfs_err(sctx->send_root->fs_info,
315 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
316 result_string, what, sctx->cmp_key->objectid,
317 sctx->send_root->root_key.objectid,
318 (sctx->parent_root ?
319 sctx->parent_root->root_key.objectid : 0));
320 }
321
322 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
323
324 static struct waiting_dir_move *
325 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
326
327 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen);
328
need_send_hole(struct send_ctx * sctx)329 static int need_send_hole(struct send_ctx *sctx)
330 {
331 return (sctx->parent_root && !sctx->cur_inode_new &&
332 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
333 S_ISREG(sctx->cur_inode_mode));
334 }
335
fs_path_reset(struct fs_path * p)336 static void fs_path_reset(struct fs_path *p)
337 {
338 if (p->reversed) {
339 p->start = p->buf + p->buf_len - 1;
340 p->end = p->start;
341 *p->start = 0;
342 } else {
343 p->start = p->buf;
344 p->end = p->start;
345 *p->start = 0;
346 }
347 }
348
fs_path_alloc(void)349 static struct fs_path *fs_path_alloc(void)
350 {
351 struct fs_path *p;
352
353 p = kmalloc(sizeof(*p), GFP_KERNEL);
354 if (!p)
355 return NULL;
356 p->reversed = 0;
357 p->buf = p->inline_buf;
358 p->buf_len = FS_PATH_INLINE_SIZE;
359 fs_path_reset(p);
360 return p;
361 }
362
fs_path_alloc_reversed(void)363 static struct fs_path *fs_path_alloc_reversed(void)
364 {
365 struct fs_path *p;
366
367 p = fs_path_alloc();
368 if (!p)
369 return NULL;
370 p->reversed = 1;
371 fs_path_reset(p);
372 return p;
373 }
374
fs_path_free(struct fs_path * p)375 static void fs_path_free(struct fs_path *p)
376 {
377 if (!p)
378 return;
379 if (p->buf != p->inline_buf)
380 kfree(p->buf);
381 kfree(p);
382 }
383
fs_path_len(struct fs_path * p)384 static int fs_path_len(struct fs_path *p)
385 {
386 return p->end - p->start;
387 }
388
fs_path_ensure_buf(struct fs_path * p,int len)389 static int fs_path_ensure_buf(struct fs_path *p, int len)
390 {
391 char *tmp_buf;
392 int path_len;
393 int old_buf_len;
394
395 len++;
396
397 if (p->buf_len >= len)
398 return 0;
399
400 if (len > PATH_MAX) {
401 WARN_ON(1);
402 return -ENOMEM;
403 }
404
405 path_len = p->end - p->start;
406 old_buf_len = p->buf_len;
407
408 /*
409 * First time the inline_buf does not suffice
410 */
411 if (p->buf == p->inline_buf) {
412 tmp_buf = kmalloc(len, GFP_KERNEL);
413 if (tmp_buf)
414 memcpy(tmp_buf, p->buf, old_buf_len);
415 } else {
416 tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
417 }
418 if (!tmp_buf)
419 return -ENOMEM;
420 p->buf = tmp_buf;
421 /*
422 * The real size of the buffer is bigger, this will let the fast path
423 * happen most of the time
424 */
425 p->buf_len = ksize(p->buf);
426
427 if (p->reversed) {
428 tmp_buf = p->buf + old_buf_len - path_len - 1;
429 p->end = p->buf + p->buf_len - 1;
430 p->start = p->end - path_len;
431 memmove(p->start, tmp_buf, path_len + 1);
432 } else {
433 p->start = p->buf;
434 p->end = p->start + path_len;
435 }
436 return 0;
437 }
438
fs_path_prepare_for_add(struct fs_path * p,int name_len,char ** prepared)439 static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
440 char **prepared)
441 {
442 int ret;
443 int new_len;
444
445 new_len = p->end - p->start + name_len;
446 if (p->start != p->end)
447 new_len++;
448 ret = fs_path_ensure_buf(p, new_len);
449 if (ret < 0)
450 goto out;
451
452 if (p->reversed) {
453 if (p->start != p->end)
454 *--p->start = '/';
455 p->start -= name_len;
456 *prepared = p->start;
457 } else {
458 if (p->start != p->end)
459 *p->end++ = '/';
460 *prepared = p->end;
461 p->end += name_len;
462 *p->end = 0;
463 }
464
465 out:
466 return ret;
467 }
468
fs_path_add(struct fs_path * p,const char * name,int name_len)469 static int fs_path_add(struct fs_path *p, const char *name, int name_len)
470 {
471 int ret;
472 char *prepared;
473
474 ret = fs_path_prepare_for_add(p, name_len, &prepared);
475 if (ret < 0)
476 goto out;
477 memcpy(prepared, name, name_len);
478
479 out:
480 return ret;
481 }
482
fs_path_add_path(struct fs_path * p,struct fs_path * p2)483 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
484 {
485 int ret;
486 char *prepared;
487
488 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
489 if (ret < 0)
490 goto out;
491 memcpy(prepared, p2->start, p2->end - p2->start);
492
493 out:
494 return ret;
495 }
496
fs_path_add_from_extent_buffer(struct fs_path * p,struct extent_buffer * eb,unsigned long off,int len)497 static int fs_path_add_from_extent_buffer(struct fs_path *p,
498 struct extent_buffer *eb,
499 unsigned long off, int len)
500 {
501 int ret;
502 char *prepared;
503
504 ret = fs_path_prepare_for_add(p, len, &prepared);
505 if (ret < 0)
506 goto out;
507
508 read_extent_buffer(eb, prepared, off, len);
509
510 out:
511 return ret;
512 }
513
fs_path_copy(struct fs_path * p,struct fs_path * from)514 static int fs_path_copy(struct fs_path *p, struct fs_path *from)
515 {
516 int ret;
517
518 p->reversed = from->reversed;
519 fs_path_reset(p);
520
521 ret = fs_path_add_path(p, from);
522
523 return ret;
524 }
525
526
fs_path_unreverse(struct fs_path * p)527 static void fs_path_unreverse(struct fs_path *p)
528 {
529 char *tmp;
530 int len;
531
532 if (!p->reversed)
533 return;
534
535 tmp = p->start;
536 len = p->end - p->start;
537 p->start = p->buf;
538 p->end = p->start + len;
539 memmove(p->start, tmp, len + 1);
540 p->reversed = 0;
541 }
542
alloc_path_for_send(void)543 static struct btrfs_path *alloc_path_for_send(void)
544 {
545 struct btrfs_path *path;
546
547 path = btrfs_alloc_path();
548 if (!path)
549 return NULL;
550 path->search_commit_root = 1;
551 path->skip_locking = 1;
552 path->need_commit_sem = 1;
553 return path;
554 }
555
write_buf(struct file * filp,const void * buf,u32 len,loff_t * off)556 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
557 {
558 int ret;
559 u32 pos = 0;
560
561 while (pos < len) {
562 ret = kernel_write(filp, buf + pos, len - pos, off);
563 /* TODO handle that correctly */
564 /*if (ret == -ERESTARTSYS) {
565 continue;
566 }*/
567 if (ret < 0)
568 return ret;
569 if (ret == 0) {
570 return -EIO;
571 }
572 pos += ret;
573 }
574
575 return 0;
576 }
577
tlv_put(struct send_ctx * sctx,u16 attr,const void * data,int len)578 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
579 {
580 struct btrfs_tlv_header *hdr;
581 int total_len = sizeof(*hdr) + len;
582 int left = sctx->send_max_size - sctx->send_size;
583
584 if (unlikely(left < total_len))
585 return -EOVERFLOW;
586
587 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
588 hdr->tlv_type = cpu_to_le16(attr);
589 hdr->tlv_len = cpu_to_le16(len);
590 memcpy(hdr + 1, data, len);
591 sctx->send_size += total_len;
592
593 return 0;
594 }
595
596 #define TLV_PUT_DEFINE_INT(bits) \
597 static int tlv_put_u##bits(struct send_ctx *sctx, \
598 u##bits attr, u##bits value) \
599 { \
600 __le##bits __tmp = cpu_to_le##bits(value); \
601 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
602 }
603
604 TLV_PUT_DEFINE_INT(64)
605
tlv_put_string(struct send_ctx * sctx,u16 attr,const char * str,int len)606 static int tlv_put_string(struct send_ctx *sctx, u16 attr,
607 const char *str, int len)
608 {
609 if (len == -1)
610 len = strlen(str);
611 return tlv_put(sctx, attr, str, len);
612 }
613
tlv_put_uuid(struct send_ctx * sctx,u16 attr,const u8 * uuid)614 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
615 const u8 *uuid)
616 {
617 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
618 }
619
tlv_put_btrfs_timespec(struct send_ctx * sctx,u16 attr,struct extent_buffer * eb,struct btrfs_timespec * ts)620 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
621 struct extent_buffer *eb,
622 struct btrfs_timespec *ts)
623 {
624 struct btrfs_timespec bts;
625 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
626 return tlv_put(sctx, attr, &bts, sizeof(bts));
627 }
628
629
630 #define TLV_PUT(sctx, attrtype, data, attrlen) \
631 do { \
632 ret = tlv_put(sctx, attrtype, data, attrlen); \
633 if (ret < 0) \
634 goto tlv_put_failure; \
635 } while (0)
636
637 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
638 do { \
639 ret = tlv_put_u##bits(sctx, attrtype, value); \
640 if (ret < 0) \
641 goto tlv_put_failure; \
642 } while (0)
643
644 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
645 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
646 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
647 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
648 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
649 do { \
650 ret = tlv_put_string(sctx, attrtype, str, len); \
651 if (ret < 0) \
652 goto tlv_put_failure; \
653 } while (0)
654 #define TLV_PUT_PATH(sctx, attrtype, p) \
655 do { \
656 ret = tlv_put_string(sctx, attrtype, p->start, \
657 p->end - p->start); \
658 if (ret < 0) \
659 goto tlv_put_failure; \
660 } while(0)
661 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
662 do { \
663 ret = tlv_put_uuid(sctx, attrtype, uuid); \
664 if (ret < 0) \
665 goto tlv_put_failure; \
666 } while (0)
667 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
668 do { \
669 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
670 if (ret < 0) \
671 goto tlv_put_failure; \
672 } while (0)
673
send_header(struct send_ctx * sctx)674 static int send_header(struct send_ctx *sctx)
675 {
676 struct btrfs_stream_header hdr;
677
678 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
679 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
680
681 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
682 &sctx->send_off);
683 }
684
685 /*
686 * For each command/item we want to send to userspace, we call this function.
687 */
begin_cmd(struct send_ctx * sctx,int cmd)688 static int begin_cmd(struct send_ctx *sctx, int cmd)
689 {
690 struct btrfs_cmd_header *hdr;
691
692 if (WARN_ON(!sctx->send_buf))
693 return -EINVAL;
694
695 BUG_ON(sctx->send_size);
696
697 sctx->send_size += sizeof(*hdr);
698 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
699 hdr->cmd = cpu_to_le16(cmd);
700
701 return 0;
702 }
703
send_cmd(struct send_ctx * sctx)704 static int send_cmd(struct send_ctx *sctx)
705 {
706 int ret;
707 struct btrfs_cmd_header *hdr;
708 u32 crc;
709
710 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
711 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
712 hdr->crc = 0;
713
714 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
715 hdr->crc = cpu_to_le32(crc);
716
717 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
718 &sctx->send_off);
719
720 sctx->total_send_size += sctx->send_size;
721 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
722 sctx->send_size = 0;
723
724 return ret;
725 }
726
727 /*
728 * Sends a move instruction to user space
729 */
send_rename(struct send_ctx * sctx,struct fs_path * from,struct fs_path * to)730 static int send_rename(struct send_ctx *sctx,
731 struct fs_path *from, struct fs_path *to)
732 {
733 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
734 int ret;
735
736 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
737
738 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
739 if (ret < 0)
740 goto out;
741
742 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
743 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
744
745 ret = send_cmd(sctx);
746
747 tlv_put_failure:
748 out:
749 return ret;
750 }
751
752 /*
753 * Sends a link instruction to user space
754 */
send_link(struct send_ctx * sctx,struct fs_path * path,struct fs_path * lnk)755 static int send_link(struct send_ctx *sctx,
756 struct fs_path *path, struct fs_path *lnk)
757 {
758 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
759 int ret;
760
761 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
762
763 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
764 if (ret < 0)
765 goto out;
766
767 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
768 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
769
770 ret = send_cmd(sctx);
771
772 tlv_put_failure:
773 out:
774 return ret;
775 }
776
777 /*
778 * Sends an unlink instruction to user space
779 */
send_unlink(struct send_ctx * sctx,struct fs_path * path)780 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
781 {
782 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
783 int ret;
784
785 btrfs_debug(fs_info, "send_unlink %s", path->start);
786
787 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
788 if (ret < 0)
789 goto out;
790
791 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
792
793 ret = send_cmd(sctx);
794
795 tlv_put_failure:
796 out:
797 return ret;
798 }
799
800 /*
801 * Sends a rmdir instruction to user space
802 */
send_rmdir(struct send_ctx * sctx,struct fs_path * path)803 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
804 {
805 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
806 int ret;
807
808 btrfs_debug(fs_info, "send_rmdir %s", path->start);
809
810 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
811 if (ret < 0)
812 goto out;
813
814 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
815
816 ret = send_cmd(sctx);
817
818 tlv_put_failure:
819 out:
820 return ret;
821 }
822
823 /*
824 * Helper function to retrieve some fields from an inode item.
825 */
__get_inode_info(struct btrfs_root * root,struct btrfs_path * path,u64 ino,u64 * size,u64 * gen,u64 * mode,u64 * uid,u64 * gid,u64 * rdev)826 static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
827 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
828 u64 *gid, u64 *rdev)
829 {
830 int ret;
831 struct btrfs_inode_item *ii;
832 struct btrfs_key key;
833
834 key.objectid = ino;
835 key.type = BTRFS_INODE_ITEM_KEY;
836 key.offset = 0;
837 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
838 if (ret) {
839 if (ret > 0)
840 ret = -ENOENT;
841 return ret;
842 }
843
844 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
845 struct btrfs_inode_item);
846 if (size)
847 *size = btrfs_inode_size(path->nodes[0], ii);
848 if (gen)
849 *gen = btrfs_inode_generation(path->nodes[0], ii);
850 if (mode)
851 *mode = btrfs_inode_mode(path->nodes[0], ii);
852 if (uid)
853 *uid = btrfs_inode_uid(path->nodes[0], ii);
854 if (gid)
855 *gid = btrfs_inode_gid(path->nodes[0], ii);
856 if (rdev)
857 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
858
859 return ret;
860 }
861
get_inode_info(struct btrfs_root * root,u64 ino,u64 * size,u64 * gen,u64 * mode,u64 * uid,u64 * gid,u64 * rdev)862 static int get_inode_info(struct btrfs_root *root,
863 u64 ino, u64 *size, u64 *gen,
864 u64 *mode, u64 *uid, u64 *gid,
865 u64 *rdev)
866 {
867 struct btrfs_path *path;
868 int ret;
869
870 path = alloc_path_for_send();
871 if (!path)
872 return -ENOMEM;
873 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
874 rdev);
875 btrfs_free_path(path);
876 return ret;
877 }
878
879 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
880 struct fs_path *p,
881 void *ctx);
882
883 /*
884 * Helper function to iterate the entries in ONE btrfs_inode_ref or
885 * btrfs_inode_extref.
886 * The iterate callback may return a non zero value to stop iteration. This can
887 * be a negative value for error codes or 1 to simply stop it.
888 *
889 * path must point to the INODE_REF or INODE_EXTREF when called.
890 */
iterate_inode_ref(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_key * found_key,int resolve,iterate_inode_ref_t iterate,void * ctx)891 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
892 struct btrfs_key *found_key, int resolve,
893 iterate_inode_ref_t iterate, void *ctx)
894 {
895 struct extent_buffer *eb = path->nodes[0];
896 struct btrfs_item *item;
897 struct btrfs_inode_ref *iref;
898 struct btrfs_inode_extref *extref;
899 struct btrfs_path *tmp_path;
900 struct fs_path *p;
901 u32 cur = 0;
902 u32 total;
903 int slot = path->slots[0];
904 u32 name_len;
905 char *start;
906 int ret = 0;
907 int num = 0;
908 int index;
909 u64 dir;
910 unsigned long name_off;
911 unsigned long elem_size;
912 unsigned long ptr;
913
914 p = fs_path_alloc_reversed();
915 if (!p)
916 return -ENOMEM;
917
918 tmp_path = alloc_path_for_send();
919 if (!tmp_path) {
920 fs_path_free(p);
921 return -ENOMEM;
922 }
923
924
925 if (found_key->type == BTRFS_INODE_REF_KEY) {
926 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
927 struct btrfs_inode_ref);
928 item = btrfs_item_nr(slot);
929 total = btrfs_item_size(eb, item);
930 elem_size = sizeof(*iref);
931 } else {
932 ptr = btrfs_item_ptr_offset(eb, slot);
933 total = btrfs_item_size_nr(eb, slot);
934 elem_size = sizeof(*extref);
935 }
936
937 while (cur < total) {
938 fs_path_reset(p);
939
940 if (found_key->type == BTRFS_INODE_REF_KEY) {
941 iref = (struct btrfs_inode_ref *)(ptr + cur);
942 name_len = btrfs_inode_ref_name_len(eb, iref);
943 name_off = (unsigned long)(iref + 1);
944 index = btrfs_inode_ref_index(eb, iref);
945 dir = found_key->offset;
946 } else {
947 extref = (struct btrfs_inode_extref *)(ptr + cur);
948 name_len = btrfs_inode_extref_name_len(eb, extref);
949 name_off = (unsigned long)&extref->name;
950 index = btrfs_inode_extref_index(eb, extref);
951 dir = btrfs_inode_extref_parent(eb, extref);
952 }
953
954 if (resolve) {
955 start = btrfs_ref_to_path(root, tmp_path, name_len,
956 name_off, eb, dir,
957 p->buf, p->buf_len);
958 if (IS_ERR(start)) {
959 ret = PTR_ERR(start);
960 goto out;
961 }
962 if (start < p->buf) {
963 /* overflow , try again with larger buffer */
964 ret = fs_path_ensure_buf(p,
965 p->buf_len + p->buf - start);
966 if (ret < 0)
967 goto out;
968 start = btrfs_ref_to_path(root, tmp_path,
969 name_len, name_off,
970 eb, dir,
971 p->buf, p->buf_len);
972 if (IS_ERR(start)) {
973 ret = PTR_ERR(start);
974 goto out;
975 }
976 if (unlikely(start < p->buf)) {
977 btrfs_err(root->fs_info,
978 "send: path ref buffer underflow for key (%llu %u %llu)",
979 found_key->objectid,
980 found_key->type,
981 found_key->offset);
982 ret = -EINVAL;
983 goto out;
984 }
985 }
986 p->start = start;
987 } else {
988 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
989 name_len);
990 if (ret < 0)
991 goto out;
992 }
993
994 cur += elem_size + name_len;
995 ret = iterate(num, dir, index, p, ctx);
996 if (ret)
997 goto out;
998 num++;
999 }
1000
1001 out:
1002 btrfs_free_path(tmp_path);
1003 fs_path_free(p);
1004 return ret;
1005 }
1006
1007 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
1008 const char *name, int name_len,
1009 const char *data, int data_len,
1010 u8 type, void *ctx);
1011
1012 /*
1013 * Helper function to iterate the entries in ONE btrfs_dir_item.
1014 * The iterate callback may return a non zero value to stop iteration. This can
1015 * be a negative value for error codes or 1 to simply stop it.
1016 *
1017 * path must point to the dir item when called.
1018 */
iterate_dir_item(struct btrfs_root * root,struct btrfs_path * path,iterate_dir_item_t iterate,void * ctx)1019 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
1020 iterate_dir_item_t iterate, void *ctx)
1021 {
1022 int ret = 0;
1023 struct extent_buffer *eb;
1024 struct btrfs_item *item;
1025 struct btrfs_dir_item *di;
1026 struct btrfs_key di_key;
1027 char *buf = NULL;
1028 int buf_len;
1029 u32 name_len;
1030 u32 data_len;
1031 u32 cur;
1032 u32 len;
1033 u32 total;
1034 int slot;
1035 int num;
1036 u8 type;
1037
1038 /*
1039 * Start with a small buffer (1 page). If later we end up needing more
1040 * space, which can happen for xattrs on a fs with a leaf size greater
1041 * then the page size, attempt to increase the buffer. Typically xattr
1042 * values are small.
1043 */
1044 buf_len = PATH_MAX;
1045 buf = kmalloc(buf_len, GFP_KERNEL);
1046 if (!buf) {
1047 ret = -ENOMEM;
1048 goto out;
1049 }
1050
1051 eb = path->nodes[0];
1052 slot = path->slots[0];
1053 item = btrfs_item_nr(slot);
1054 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1055 cur = 0;
1056 len = 0;
1057 total = btrfs_item_size(eb, item);
1058
1059 num = 0;
1060 while (cur < total) {
1061 name_len = btrfs_dir_name_len(eb, di);
1062 data_len = btrfs_dir_data_len(eb, di);
1063 type = btrfs_dir_type(eb, di);
1064 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1065
1066 if (type == BTRFS_FT_XATTR) {
1067 if (name_len > XATTR_NAME_MAX) {
1068 ret = -ENAMETOOLONG;
1069 goto out;
1070 }
1071 if (name_len + data_len >
1072 BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
1073 ret = -E2BIG;
1074 goto out;
1075 }
1076 } else {
1077 /*
1078 * Path too long
1079 */
1080 if (name_len + data_len > PATH_MAX) {
1081 ret = -ENAMETOOLONG;
1082 goto out;
1083 }
1084 }
1085
1086 if (name_len + data_len > buf_len) {
1087 buf_len = name_len + data_len;
1088 if (is_vmalloc_addr(buf)) {
1089 vfree(buf);
1090 buf = NULL;
1091 } else {
1092 char *tmp = krealloc(buf, buf_len,
1093 GFP_KERNEL | __GFP_NOWARN);
1094
1095 if (!tmp)
1096 kfree(buf);
1097 buf = tmp;
1098 }
1099 if (!buf) {
1100 buf = kvmalloc(buf_len, GFP_KERNEL);
1101 if (!buf) {
1102 ret = -ENOMEM;
1103 goto out;
1104 }
1105 }
1106 }
1107
1108 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1109 name_len + data_len);
1110
1111 len = sizeof(*di) + name_len + data_len;
1112 di = (struct btrfs_dir_item *)((char *)di + len);
1113 cur += len;
1114
1115 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1116 data_len, type, ctx);
1117 if (ret < 0)
1118 goto out;
1119 if (ret) {
1120 ret = 0;
1121 goto out;
1122 }
1123
1124 num++;
1125 }
1126
1127 out:
1128 kvfree(buf);
1129 return ret;
1130 }
1131
__copy_first_ref(int num,u64 dir,int index,struct fs_path * p,void * ctx)1132 static int __copy_first_ref(int num, u64 dir, int index,
1133 struct fs_path *p, void *ctx)
1134 {
1135 int ret;
1136 struct fs_path *pt = ctx;
1137
1138 ret = fs_path_copy(pt, p);
1139 if (ret < 0)
1140 return ret;
1141
1142 /* we want the first only */
1143 return 1;
1144 }
1145
1146 /*
1147 * Retrieve the first path of an inode. If an inode has more then one
1148 * ref/hardlink, this is ignored.
1149 */
get_inode_path(struct btrfs_root * root,u64 ino,struct fs_path * path)1150 static int get_inode_path(struct btrfs_root *root,
1151 u64 ino, struct fs_path *path)
1152 {
1153 int ret;
1154 struct btrfs_key key, found_key;
1155 struct btrfs_path *p;
1156
1157 p = alloc_path_for_send();
1158 if (!p)
1159 return -ENOMEM;
1160
1161 fs_path_reset(path);
1162
1163 key.objectid = ino;
1164 key.type = BTRFS_INODE_REF_KEY;
1165 key.offset = 0;
1166
1167 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1168 if (ret < 0)
1169 goto out;
1170 if (ret) {
1171 ret = 1;
1172 goto out;
1173 }
1174 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1175 if (found_key.objectid != ino ||
1176 (found_key.type != BTRFS_INODE_REF_KEY &&
1177 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1178 ret = -ENOENT;
1179 goto out;
1180 }
1181
1182 ret = iterate_inode_ref(root, p, &found_key, 1,
1183 __copy_first_ref, path);
1184 if (ret < 0)
1185 goto out;
1186 ret = 0;
1187
1188 out:
1189 btrfs_free_path(p);
1190 return ret;
1191 }
1192
1193 struct backref_ctx {
1194 struct send_ctx *sctx;
1195
1196 /* number of total found references */
1197 u64 found;
1198
1199 /*
1200 * used for clones found in send_root. clones found behind cur_objectid
1201 * and cur_offset are not considered as allowed clones.
1202 */
1203 u64 cur_objectid;
1204 u64 cur_offset;
1205
1206 /* may be truncated in case it's the last extent in a file */
1207 u64 extent_len;
1208
1209 /* data offset in the file extent item */
1210 u64 data_offset;
1211
1212 /* Just to check for bugs in backref resolving */
1213 int found_itself;
1214 };
1215
__clone_root_cmp_bsearch(const void * key,const void * elt)1216 static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1217 {
1218 u64 root = (u64)(uintptr_t)key;
1219 struct clone_root *cr = (struct clone_root *)elt;
1220
1221 if (root < cr->root->root_key.objectid)
1222 return -1;
1223 if (root > cr->root->root_key.objectid)
1224 return 1;
1225 return 0;
1226 }
1227
__clone_root_cmp_sort(const void * e1,const void * e2)1228 static int __clone_root_cmp_sort(const void *e1, const void *e2)
1229 {
1230 struct clone_root *cr1 = (struct clone_root *)e1;
1231 struct clone_root *cr2 = (struct clone_root *)e2;
1232
1233 if (cr1->root->root_key.objectid < cr2->root->root_key.objectid)
1234 return -1;
1235 if (cr1->root->root_key.objectid > cr2->root->root_key.objectid)
1236 return 1;
1237 return 0;
1238 }
1239
1240 /*
1241 * Called for every backref that is found for the current extent.
1242 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1243 */
__iterate_backrefs(u64 ino,u64 offset,u64 root,void * ctx_)1244 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1245 {
1246 struct backref_ctx *bctx = ctx_;
1247 struct clone_root *found;
1248
1249 /* First check if the root is in the list of accepted clone sources */
1250 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1251 bctx->sctx->clone_roots_cnt,
1252 sizeof(struct clone_root),
1253 __clone_root_cmp_bsearch);
1254 if (!found)
1255 return 0;
1256
1257 if (found->root == bctx->sctx->send_root &&
1258 ino == bctx->cur_objectid &&
1259 offset == bctx->cur_offset) {
1260 bctx->found_itself = 1;
1261 }
1262
1263 /*
1264 * Make sure we don't consider clones from send_root that are
1265 * behind the current inode/offset.
1266 */
1267 if (found->root == bctx->sctx->send_root) {
1268 /*
1269 * If the source inode was not yet processed we can't issue a
1270 * clone operation, as the source extent does not exist yet at
1271 * the destination of the stream.
1272 */
1273 if (ino > bctx->cur_objectid)
1274 return 0;
1275 /*
1276 * We clone from the inode currently being sent as long as the
1277 * source extent is already processed, otherwise we could try
1278 * to clone from an extent that does not exist yet at the
1279 * destination of the stream.
1280 */
1281 if (ino == bctx->cur_objectid &&
1282 offset + bctx->extent_len >
1283 bctx->sctx->cur_inode_next_write_offset)
1284 return 0;
1285 }
1286
1287 bctx->found++;
1288 found->found_refs++;
1289 if (ino < found->ino) {
1290 found->ino = ino;
1291 found->offset = offset;
1292 } else if (found->ino == ino) {
1293 /*
1294 * same extent found more then once in the same file.
1295 */
1296 if (found->offset > offset + bctx->extent_len)
1297 found->offset = offset;
1298 }
1299
1300 return 0;
1301 }
1302
1303 /*
1304 * Given an inode, offset and extent item, it finds a good clone for a clone
1305 * instruction. Returns -ENOENT when none could be found. The function makes
1306 * sure that the returned clone is usable at the point where sending is at the
1307 * moment. This means, that no clones are accepted which lie behind the current
1308 * inode+offset.
1309 *
1310 * path must point to the extent item when called.
1311 */
find_extent_clone(struct send_ctx * sctx,struct btrfs_path * path,u64 ino,u64 data_offset,u64 ino_size,struct clone_root ** found)1312 static int find_extent_clone(struct send_ctx *sctx,
1313 struct btrfs_path *path,
1314 u64 ino, u64 data_offset,
1315 u64 ino_size,
1316 struct clone_root **found)
1317 {
1318 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1319 int ret;
1320 int extent_type;
1321 u64 logical;
1322 u64 disk_byte;
1323 u64 num_bytes;
1324 u64 extent_item_pos;
1325 u64 flags = 0;
1326 struct btrfs_file_extent_item *fi;
1327 struct extent_buffer *eb = path->nodes[0];
1328 struct backref_ctx *backref_ctx = NULL;
1329 struct clone_root *cur_clone_root;
1330 struct btrfs_key found_key;
1331 struct btrfs_path *tmp_path;
1332 struct btrfs_extent_item *ei;
1333 int compressed;
1334 u32 i;
1335
1336 tmp_path = alloc_path_for_send();
1337 if (!tmp_path)
1338 return -ENOMEM;
1339
1340 /* We only use this path under the commit sem */
1341 tmp_path->need_commit_sem = 0;
1342
1343 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL);
1344 if (!backref_ctx) {
1345 ret = -ENOMEM;
1346 goto out;
1347 }
1348
1349 if (data_offset >= ino_size) {
1350 /*
1351 * There may be extents that lie behind the file's size.
1352 * I at least had this in combination with snapshotting while
1353 * writing large files.
1354 */
1355 ret = 0;
1356 goto out;
1357 }
1358
1359 fi = btrfs_item_ptr(eb, path->slots[0],
1360 struct btrfs_file_extent_item);
1361 extent_type = btrfs_file_extent_type(eb, fi);
1362 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1363 ret = -ENOENT;
1364 goto out;
1365 }
1366 compressed = btrfs_file_extent_compression(eb, fi);
1367
1368 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1369 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1370 if (disk_byte == 0) {
1371 ret = -ENOENT;
1372 goto out;
1373 }
1374 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1375
1376 down_read(&fs_info->commit_root_sem);
1377 ret = extent_from_logical(fs_info, disk_byte, tmp_path,
1378 &found_key, &flags);
1379 up_read(&fs_info->commit_root_sem);
1380
1381 if (ret < 0)
1382 goto out;
1383 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1384 ret = -EIO;
1385 goto out;
1386 }
1387
1388 ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0],
1389 struct btrfs_extent_item);
1390 /*
1391 * Backreference walking (iterate_extent_inodes() below) is currently
1392 * too expensive when an extent has a large number of references, both
1393 * in time spent and used memory. So for now just fallback to write
1394 * operations instead of clone operations when an extent has more than
1395 * a certain amount of references.
1396 */
1397 if (btrfs_extent_refs(tmp_path->nodes[0], ei) > SEND_MAX_EXTENT_REFS) {
1398 ret = -ENOENT;
1399 goto out;
1400 }
1401 btrfs_release_path(tmp_path);
1402
1403 /*
1404 * Setup the clone roots.
1405 */
1406 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1407 cur_clone_root = sctx->clone_roots + i;
1408 cur_clone_root->ino = (u64)-1;
1409 cur_clone_root->offset = 0;
1410 cur_clone_root->found_refs = 0;
1411 }
1412
1413 backref_ctx->sctx = sctx;
1414 backref_ctx->found = 0;
1415 backref_ctx->cur_objectid = ino;
1416 backref_ctx->cur_offset = data_offset;
1417 backref_ctx->found_itself = 0;
1418 backref_ctx->extent_len = num_bytes;
1419 /*
1420 * For non-compressed extents iterate_extent_inodes() gives us extent
1421 * offsets that already take into account the data offset, but not for
1422 * compressed extents, since the offset is logical and not relative to
1423 * the physical extent locations. We must take this into account to
1424 * avoid sending clone offsets that go beyond the source file's size,
1425 * which would result in the clone ioctl failing with -EINVAL on the
1426 * receiving end.
1427 */
1428 if (compressed == BTRFS_COMPRESS_NONE)
1429 backref_ctx->data_offset = 0;
1430 else
1431 backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi);
1432
1433 /*
1434 * The last extent of a file may be too large due to page alignment.
1435 * We need to adjust extent_len in this case so that the checks in
1436 * __iterate_backrefs work.
1437 */
1438 if (data_offset + num_bytes >= ino_size)
1439 backref_ctx->extent_len = ino_size - data_offset;
1440
1441 /*
1442 * Now collect all backrefs.
1443 */
1444 if (compressed == BTRFS_COMPRESS_NONE)
1445 extent_item_pos = logical - found_key.objectid;
1446 else
1447 extent_item_pos = 0;
1448 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1449 extent_item_pos, 1, __iterate_backrefs,
1450 backref_ctx, false);
1451
1452 if (ret < 0)
1453 goto out;
1454
1455 if (!backref_ctx->found_itself) {
1456 /* found a bug in backref code? */
1457 ret = -EIO;
1458 btrfs_err(fs_info,
1459 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
1460 ino, data_offset, disk_byte, found_key.objectid);
1461 goto out;
1462 }
1463
1464 btrfs_debug(fs_info,
1465 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1466 data_offset, ino, num_bytes, logical);
1467
1468 if (!backref_ctx->found)
1469 btrfs_debug(fs_info, "no clones found");
1470
1471 cur_clone_root = NULL;
1472 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1473 if (sctx->clone_roots[i].found_refs) {
1474 if (!cur_clone_root)
1475 cur_clone_root = sctx->clone_roots + i;
1476 else if (sctx->clone_roots[i].root == sctx->send_root)
1477 /* prefer clones from send_root over others */
1478 cur_clone_root = sctx->clone_roots + i;
1479 }
1480
1481 }
1482
1483 if (cur_clone_root) {
1484 *found = cur_clone_root;
1485 ret = 0;
1486 } else {
1487 ret = -ENOENT;
1488 }
1489
1490 out:
1491 btrfs_free_path(tmp_path);
1492 kfree(backref_ctx);
1493 return ret;
1494 }
1495
read_symlink(struct btrfs_root * root,u64 ino,struct fs_path * dest)1496 static int read_symlink(struct btrfs_root *root,
1497 u64 ino,
1498 struct fs_path *dest)
1499 {
1500 int ret;
1501 struct btrfs_path *path;
1502 struct btrfs_key key;
1503 struct btrfs_file_extent_item *ei;
1504 u8 type;
1505 u8 compression;
1506 unsigned long off;
1507 int len;
1508
1509 path = alloc_path_for_send();
1510 if (!path)
1511 return -ENOMEM;
1512
1513 key.objectid = ino;
1514 key.type = BTRFS_EXTENT_DATA_KEY;
1515 key.offset = 0;
1516 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1517 if (ret < 0)
1518 goto out;
1519 if (ret) {
1520 /*
1521 * An empty symlink inode. Can happen in rare error paths when
1522 * creating a symlink (transaction committed before the inode
1523 * eviction handler removed the symlink inode items and a crash
1524 * happened in between or the subvol was snapshoted in between).
1525 * Print an informative message to dmesg/syslog so that the user
1526 * can delete the symlink.
1527 */
1528 btrfs_err(root->fs_info,
1529 "Found empty symlink inode %llu at root %llu",
1530 ino, root->root_key.objectid);
1531 ret = -EIO;
1532 goto out;
1533 }
1534
1535 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1536 struct btrfs_file_extent_item);
1537 type = btrfs_file_extent_type(path->nodes[0], ei);
1538 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1539 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1540 BUG_ON(compression);
1541
1542 off = btrfs_file_extent_inline_start(ei);
1543 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
1544
1545 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1546
1547 out:
1548 btrfs_free_path(path);
1549 return ret;
1550 }
1551
1552 /*
1553 * Helper function to generate a file name that is unique in the root of
1554 * send_root and parent_root. This is used to generate names for orphan inodes.
1555 */
gen_unique_name(struct send_ctx * sctx,u64 ino,u64 gen,struct fs_path * dest)1556 static int gen_unique_name(struct send_ctx *sctx,
1557 u64 ino, u64 gen,
1558 struct fs_path *dest)
1559 {
1560 int ret = 0;
1561 struct btrfs_path *path;
1562 struct btrfs_dir_item *di;
1563 char tmp[64];
1564 int len;
1565 u64 idx = 0;
1566
1567 path = alloc_path_for_send();
1568 if (!path)
1569 return -ENOMEM;
1570
1571 while (1) {
1572 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1573 ino, gen, idx);
1574 ASSERT(len < sizeof(tmp));
1575
1576 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1577 path, BTRFS_FIRST_FREE_OBJECTID,
1578 tmp, strlen(tmp), 0);
1579 btrfs_release_path(path);
1580 if (IS_ERR(di)) {
1581 ret = PTR_ERR(di);
1582 goto out;
1583 }
1584 if (di) {
1585 /* not unique, try again */
1586 idx++;
1587 continue;
1588 }
1589
1590 if (!sctx->parent_root) {
1591 /* unique */
1592 ret = 0;
1593 break;
1594 }
1595
1596 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1597 path, BTRFS_FIRST_FREE_OBJECTID,
1598 tmp, strlen(tmp), 0);
1599 btrfs_release_path(path);
1600 if (IS_ERR(di)) {
1601 ret = PTR_ERR(di);
1602 goto out;
1603 }
1604 if (di) {
1605 /* not unique, try again */
1606 idx++;
1607 continue;
1608 }
1609 /* unique */
1610 break;
1611 }
1612
1613 ret = fs_path_add(dest, tmp, strlen(tmp));
1614
1615 out:
1616 btrfs_free_path(path);
1617 return ret;
1618 }
1619
1620 enum inode_state {
1621 inode_state_no_change,
1622 inode_state_will_create,
1623 inode_state_did_create,
1624 inode_state_will_delete,
1625 inode_state_did_delete,
1626 };
1627
get_cur_inode_state(struct send_ctx * sctx,u64 ino,u64 gen)1628 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1629 {
1630 int ret;
1631 int left_ret;
1632 int right_ret;
1633 u64 left_gen;
1634 u64 right_gen;
1635
1636 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1637 NULL, NULL);
1638 if (ret < 0 && ret != -ENOENT)
1639 goto out;
1640 left_ret = ret;
1641
1642 if (!sctx->parent_root) {
1643 right_ret = -ENOENT;
1644 } else {
1645 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1646 NULL, NULL, NULL, NULL);
1647 if (ret < 0 && ret != -ENOENT)
1648 goto out;
1649 right_ret = ret;
1650 }
1651
1652 if (!left_ret && !right_ret) {
1653 if (left_gen == gen && right_gen == gen) {
1654 ret = inode_state_no_change;
1655 } else if (left_gen == gen) {
1656 if (ino < sctx->send_progress)
1657 ret = inode_state_did_create;
1658 else
1659 ret = inode_state_will_create;
1660 } else if (right_gen == gen) {
1661 if (ino < sctx->send_progress)
1662 ret = inode_state_did_delete;
1663 else
1664 ret = inode_state_will_delete;
1665 } else {
1666 ret = -ENOENT;
1667 }
1668 } else if (!left_ret) {
1669 if (left_gen == gen) {
1670 if (ino < sctx->send_progress)
1671 ret = inode_state_did_create;
1672 else
1673 ret = inode_state_will_create;
1674 } else {
1675 ret = -ENOENT;
1676 }
1677 } else if (!right_ret) {
1678 if (right_gen == gen) {
1679 if (ino < sctx->send_progress)
1680 ret = inode_state_did_delete;
1681 else
1682 ret = inode_state_will_delete;
1683 } else {
1684 ret = -ENOENT;
1685 }
1686 } else {
1687 ret = -ENOENT;
1688 }
1689
1690 out:
1691 return ret;
1692 }
1693
is_inode_existent(struct send_ctx * sctx,u64 ino,u64 gen)1694 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1695 {
1696 int ret;
1697
1698 if (ino == BTRFS_FIRST_FREE_OBJECTID)
1699 return 1;
1700
1701 ret = get_cur_inode_state(sctx, ino, gen);
1702 if (ret < 0)
1703 goto out;
1704
1705 if (ret == inode_state_no_change ||
1706 ret == inode_state_did_create ||
1707 ret == inode_state_will_delete)
1708 ret = 1;
1709 else
1710 ret = 0;
1711
1712 out:
1713 return ret;
1714 }
1715
1716 /*
1717 * Helper function to lookup a dir item in a dir.
1718 */
lookup_dir_item_inode(struct btrfs_root * root,u64 dir,const char * name,int name_len,u64 * found_inode,u8 * found_type)1719 static int lookup_dir_item_inode(struct btrfs_root *root,
1720 u64 dir, const char *name, int name_len,
1721 u64 *found_inode,
1722 u8 *found_type)
1723 {
1724 int ret = 0;
1725 struct btrfs_dir_item *di;
1726 struct btrfs_key key;
1727 struct btrfs_path *path;
1728
1729 path = alloc_path_for_send();
1730 if (!path)
1731 return -ENOMEM;
1732
1733 di = btrfs_lookup_dir_item(NULL, root, path,
1734 dir, name, name_len, 0);
1735 if (IS_ERR_OR_NULL(di)) {
1736 ret = di ? PTR_ERR(di) : -ENOENT;
1737 goto out;
1738 }
1739 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1740 if (key.type == BTRFS_ROOT_ITEM_KEY) {
1741 ret = -ENOENT;
1742 goto out;
1743 }
1744 *found_inode = key.objectid;
1745 *found_type = btrfs_dir_type(path->nodes[0], di);
1746
1747 out:
1748 btrfs_free_path(path);
1749 return ret;
1750 }
1751
1752 /*
1753 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1754 * generation of the parent dir and the name of the dir entry.
1755 */
get_first_ref(struct btrfs_root * root,u64 ino,u64 * dir,u64 * dir_gen,struct fs_path * name)1756 static int get_first_ref(struct btrfs_root *root, u64 ino,
1757 u64 *dir, u64 *dir_gen, struct fs_path *name)
1758 {
1759 int ret;
1760 struct btrfs_key key;
1761 struct btrfs_key found_key;
1762 struct btrfs_path *path;
1763 int len;
1764 u64 parent_dir;
1765
1766 path = alloc_path_for_send();
1767 if (!path)
1768 return -ENOMEM;
1769
1770 key.objectid = ino;
1771 key.type = BTRFS_INODE_REF_KEY;
1772 key.offset = 0;
1773
1774 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1775 if (ret < 0)
1776 goto out;
1777 if (!ret)
1778 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1779 path->slots[0]);
1780 if (ret || found_key.objectid != ino ||
1781 (found_key.type != BTRFS_INODE_REF_KEY &&
1782 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1783 ret = -ENOENT;
1784 goto out;
1785 }
1786
1787 if (found_key.type == BTRFS_INODE_REF_KEY) {
1788 struct btrfs_inode_ref *iref;
1789 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1790 struct btrfs_inode_ref);
1791 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1792 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1793 (unsigned long)(iref + 1),
1794 len);
1795 parent_dir = found_key.offset;
1796 } else {
1797 struct btrfs_inode_extref *extref;
1798 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1799 struct btrfs_inode_extref);
1800 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1801 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1802 (unsigned long)&extref->name, len);
1803 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1804 }
1805 if (ret < 0)
1806 goto out;
1807 btrfs_release_path(path);
1808
1809 if (dir_gen) {
1810 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
1811 NULL, NULL, NULL);
1812 if (ret < 0)
1813 goto out;
1814 }
1815
1816 *dir = parent_dir;
1817
1818 out:
1819 btrfs_free_path(path);
1820 return ret;
1821 }
1822
is_first_ref(struct btrfs_root * root,u64 ino,u64 dir,const char * name,int name_len)1823 static int is_first_ref(struct btrfs_root *root,
1824 u64 ino, u64 dir,
1825 const char *name, int name_len)
1826 {
1827 int ret;
1828 struct fs_path *tmp_name;
1829 u64 tmp_dir;
1830
1831 tmp_name = fs_path_alloc();
1832 if (!tmp_name)
1833 return -ENOMEM;
1834
1835 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
1836 if (ret < 0)
1837 goto out;
1838
1839 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1840 ret = 0;
1841 goto out;
1842 }
1843
1844 ret = !memcmp(tmp_name->start, name, name_len);
1845
1846 out:
1847 fs_path_free(tmp_name);
1848 return ret;
1849 }
1850
1851 /*
1852 * Used by process_recorded_refs to determine if a new ref would overwrite an
1853 * already existing ref. In case it detects an overwrite, it returns the
1854 * inode/gen in who_ino/who_gen.
1855 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1856 * to make sure later references to the overwritten inode are possible.
1857 * Orphanizing is however only required for the first ref of an inode.
1858 * process_recorded_refs does an additional is_first_ref check to see if
1859 * orphanizing is really required.
1860 */
will_overwrite_ref(struct send_ctx * sctx,u64 dir,u64 dir_gen,const char * name,int name_len,u64 * who_ino,u64 * who_gen,u64 * who_mode)1861 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1862 const char *name, int name_len,
1863 u64 *who_ino, u64 *who_gen, u64 *who_mode)
1864 {
1865 int ret = 0;
1866 u64 gen;
1867 u64 other_inode = 0;
1868 u8 other_type = 0;
1869
1870 if (!sctx->parent_root)
1871 goto out;
1872
1873 ret = is_inode_existent(sctx, dir, dir_gen);
1874 if (ret <= 0)
1875 goto out;
1876
1877 /*
1878 * If we have a parent root we need to verify that the parent dir was
1879 * not deleted and then re-created, if it was then we have no overwrite
1880 * and we can just unlink this entry.
1881 */
1882 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
1883 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1884 NULL, NULL, NULL);
1885 if (ret < 0 && ret != -ENOENT)
1886 goto out;
1887 if (ret) {
1888 ret = 0;
1889 goto out;
1890 }
1891 if (gen != dir_gen)
1892 goto out;
1893 }
1894
1895 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1896 &other_inode, &other_type);
1897 if (ret < 0 && ret != -ENOENT)
1898 goto out;
1899 if (ret) {
1900 ret = 0;
1901 goto out;
1902 }
1903
1904 /*
1905 * Check if the overwritten ref was already processed. If yes, the ref
1906 * was already unlinked/moved, so we can safely assume that we will not
1907 * overwrite anything at this point in time.
1908 */
1909 if (other_inode > sctx->send_progress ||
1910 is_waiting_for_move(sctx, other_inode)) {
1911 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1912 who_gen, who_mode, NULL, NULL, NULL);
1913 if (ret < 0)
1914 goto out;
1915
1916 ret = 1;
1917 *who_ino = other_inode;
1918 } else {
1919 ret = 0;
1920 }
1921
1922 out:
1923 return ret;
1924 }
1925
1926 /*
1927 * Checks if the ref was overwritten by an already processed inode. This is
1928 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1929 * thus the orphan name needs be used.
1930 * process_recorded_refs also uses it to avoid unlinking of refs that were
1931 * overwritten.
1932 */
did_overwrite_ref(struct send_ctx * sctx,u64 dir,u64 dir_gen,u64 ino,u64 ino_gen,const char * name,int name_len)1933 static int did_overwrite_ref(struct send_ctx *sctx,
1934 u64 dir, u64 dir_gen,
1935 u64 ino, u64 ino_gen,
1936 const char *name, int name_len)
1937 {
1938 int ret = 0;
1939 u64 gen;
1940 u64 ow_inode;
1941 u8 other_type;
1942
1943 if (!sctx->parent_root)
1944 goto out;
1945
1946 ret = is_inode_existent(sctx, dir, dir_gen);
1947 if (ret <= 0)
1948 goto out;
1949
1950 if (dir != BTRFS_FIRST_FREE_OBJECTID) {
1951 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL,
1952 NULL, NULL, NULL);
1953 if (ret < 0 && ret != -ENOENT)
1954 goto out;
1955 if (ret) {
1956 ret = 0;
1957 goto out;
1958 }
1959 if (gen != dir_gen)
1960 goto out;
1961 }
1962
1963 /* check if the ref was overwritten by another ref */
1964 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1965 &ow_inode, &other_type);
1966 if (ret < 0 && ret != -ENOENT)
1967 goto out;
1968 if (ret) {
1969 /* was never and will never be overwritten */
1970 ret = 0;
1971 goto out;
1972 }
1973
1974 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1975 NULL, NULL);
1976 if (ret < 0)
1977 goto out;
1978
1979 if (ow_inode == ino && gen == ino_gen) {
1980 ret = 0;
1981 goto out;
1982 }
1983
1984 /*
1985 * We know that it is or will be overwritten. Check this now.
1986 * The current inode being processed might have been the one that caused
1987 * inode 'ino' to be orphanized, therefore check if ow_inode matches
1988 * the current inode being processed.
1989 */
1990 if ((ow_inode < sctx->send_progress) ||
1991 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
1992 gen == sctx->cur_inode_gen))
1993 ret = 1;
1994 else
1995 ret = 0;
1996
1997 out:
1998 return ret;
1999 }
2000
2001 /*
2002 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
2003 * that got overwritten. This is used by process_recorded_refs to determine
2004 * if it has to use the path as returned by get_cur_path or the orphan name.
2005 */
did_overwrite_first_ref(struct send_ctx * sctx,u64 ino,u64 gen)2006 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
2007 {
2008 int ret = 0;
2009 struct fs_path *name = NULL;
2010 u64 dir;
2011 u64 dir_gen;
2012
2013 if (!sctx->parent_root)
2014 goto out;
2015
2016 name = fs_path_alloc();
2017 if (!name)
2018 return -ENOMEM;
2019
2020 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
2021 if (ret < 0)
2022 goto out;
2023
2024 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
2025 name->start, fs_path_len(name));
2026
2027 out:
2028 fs_path_free(name);
2029 return ret;
2030 }
2031
2032 /*
2033 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
2034 * so we need to do some special handling in case we have clashes. This function
2035 * takes care of this with the help of name_cache_entry::radix_list.
2036 * In case of error, nce is kfreed.
2037 */
name_cache_insert(struct send_ctx * sctx,struct name_cache_entry * nce)2038 static int name_cache_insert(struct send_ctx *sctx,
2039 struct name_cache_entry *nce)
2040 {
2041 int ret = 0;
2042 struct list_head *nce_head;
2043
2044 nce_head = radix_tree_lookup(&sctx->name_cache,
2045 (unsigned long)nce->ino);
2046 if (!nce_head) {
2047 nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
2048 if (!nce_head) {
2049 kfree(nce);
2050 return -ENOMEM;
2051 }
2052 INIT_LIST_HEAD(nce_head);
2053
2054 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
2055 if (ret < 0) {
2056 kfree(nce_head);
2057 kfree(nce);
2058 return ret;
2059 }
2060 }
2061 list_add_tail(&nce->radix_list, nce_head);
2062 list_add_tail(&nce->list, &sctx->name_cache_list);
2063 sctx->name_cache_size++;
2064
2065 return ret;
2066 }
2067
name_cache_delete(struct send_ctx * sctx,struct name_cache_entry * nce)2068 static void name_cache_delete(struct send_ctx *sctx,
2069 struct name_cache_entry *nce)
2070 {
2071 struct list_head *nce_head;
2072
2073 nce_head = radix_tree_lookup(&sctx->name_cache,
2074 (unsigned long)nce->ino);
2075 if (!nce_head) {
2076 btrfs_err(sctx->send_root->fs_info,
2077 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2078 nce->ino, sctx->name_cache_size);
2079 }
2080
2081 list_del(&nce->radix_list);
2082 list_del(&nce->list);
2083 sctx->name_cache_size--;
2084
2085 /*
2086 * We may not get to the final release of nce_head if the lookup fails
2087 */
2088 if (nce_head && list_empty(nce_head)) {
2089 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
2090 kfree(nce_head);
2091 }
2092 }
2093
name_cache_search(struct send_ctx * sctx,u64 ino,u64 gen)2094 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
2095 u64 ino, u64 gen)
2096 {
2097 struct list_head *nce_head;
2098 struct name_cache_entry *cur;
2099
2100 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
2101 if (!nce_head)
2102 return NULL;
2103
2104 list_for_each_entry(cur, nce_head, radix_list) {
2105 if (cur->ino == ino && cur->gen == gen)
2106 return cur;
2107 }
2108 return NULL;
2109 }
2110
2111 /*
2112 * Removes the entry from the list and adds it back to the end. This marks the
2113 * entry as recently used so that name_cache_clean_unused does not remove it.
2114 */
name_cache_used(struct send_ctx * sctx,struct name_cache_entry * nce)2115 static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
2116 {
2117 list_del(&nce->list);
2118 list_add_tail(&nce->list, &sctx->name_cache_list);
2119 }
2120
2121 /*
2122 * Remove some entries from the beginning of name_cache_list.
2123 */
name_cache_clean_unused(struct send_ctx * sctx)2124 static void name_cache_clean_unused(struct send_ctx *sctx)
2125 {
2126 struct name_cache_entry *nce;
2127
2128 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
2129 return;
2130
2131 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
2132 nce = list_entry(sctx->name_cache_list.next,
2133 struct name_cache_entry, list);
2134 name_cache_delete(sctx, nce);
2135 kfree(nce);
2136 }
2137 }
2138
name_cache_free(struct send_ctx * sctx)2139 static void name_cache_free(struct send_ctx *sctx)
2140 {
2141 struct name_cache_entry *nce;
2142
2143 while (!list_empty(&sctx->name_cache_list)) {
2144 nce = list_entry(sctx->name_cache_list.next,
2145 struct name_cache_entry, list);
2146 name_cache_delete(sctx, nce);
2147 kfree(nce);
2148 }
2149 }
2150
2151 /*
2152 * Used by get_cur_path for each ref up to the root.
2153 * Returns 0 if it succeeded.
2154 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2155 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2156 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2157 * Returns <0 in case of error.
2158 */
__get_cur_name_and_parent(struct send_ctx * sctx,u64 ino,u64 gen,u64 * parent_ino,u64 * parent_gen,struct fs_path * dest)2159 static int __get_cur_name_and_parent(struct send_ctx *sctx,
2160 u64 ino, u64 gen,
2161 u64 *parent_ino,
2162 u64 *parent_gen,
2163 struct fs_path *dest)
2164 {
2165 int ret;
2166 int nce_ret;
2167 struct name_cache_entry *nce = NULL;
2168
2169 /*
2170 * First check if we already did a call to this function with the same
2171 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2172 * return the cached result.
2173 */
2174 nce = name_cache_search(sctx, ino, gen);
2175 if (nce) {
2176 if (ino < sctx->send_progress && nce->need_later_update) {
2177 name_cache_delete(sctx, nce);
2178 kfree(nce);
2179 nce = NULL;
2180 } else {
2181 name_cache_used(sctx, nce);
2182 *parent_ino = nce->parent_ino;
2183 *parent_gen = nce->parent_gen;
2184 ret = fs_path_add(dest, nce->name, nce->name_len);
2185 if (ret < 0)
2186 goto out;
2187 ret = nce->ret;
2188 goto out;
2189 }
2190 }
2191
2192 /*
2193 * If the inode is not existent yet, add the orphan name and return 1.
2194 * This should only happen for the parent dir that we determine in
2195 * __record_new_ref
2196 */
2197 ret = is_inode_existent(sctx, ino, gen);
2198 if (ret < 0)
2199 goto out;
2200
2201 if (!ret) {
2202 ret = gen_unique_name(sctx, ino, gen, dest);
2203 if (ret < 0)
2204 goto out;
2205 ret = 1;
2206 goto out_cache;
2207 }
2208
2209 /*
2210 * Depending on whether the inode was already processed or not, use
2211 * send_root or parent_root for ref lookup.
2212 */
2213 if (ino < sctx->send_progress)
2214 ret = get_first_ref(sctx->send_root, ino,
2215 parent_ino, parent_gen, dest);
2216 else
2217 ret = get_first_ref(sctx->parent_root, ino,
2218 parent_ino, parent_gen, dest);
2219 if (ret < 0)
2220 goto out;
2221
2222 /*
2223 * Check if the ref was overwritten by an inode's ref that was processed
2224 * earlier. If yes, treat as orphan and return 1.
2225 */
2226 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2227 dest->start, dest->end - dest->start);
2228 if (ret < 0)
2229 goto out;
2230 if (ret) {
2231 fs_path_reset(dest);
2232 ret = gen_unique_name(sctx, ino, gen, dest);
2233 if (ret < 0)
2234 goto out;
2235 ret = 1;
2236 }
2237
2238 out_cache:
2239 /*
2240 * Store the result of the lookup in the name cache.
2241 */
2242 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
2243 if (!nce) {
2244 ret = -ENOMEM;
2245 goto out;
2246 }
2247
2248 nce->ino = ino;
2249 nce->gen = gen;
2250 nce->parent_ino = *parent_ino;
2251 nce->parent_gen = *parent_gen;
2252 nce->name_len = fs_path_len(dest);
2253 nce->ret = ret;
2254 strcpy(nce->name, dest->start);
2255
2256 if (ino < sctx->send_progress)
2257 nce->need_later_update = 0;
2258 else
2259 nce->need_later_update = 1;
2260
2261 nce_ret = name_cache_insert(sctx, nce);
2262 if (nce_ret < 0)
2263 ret = nce_ret;
2264 name_cache_clean_unused(sctx);
2265
2266 out:
2267 return ret;
2268 }
2269
2270 /*
2271 * Magic happens here. This function returns the first ref to an inode as it
2272 * would look like while receiving the stream at this point in time.
2273 * We walk the path up to the root. For every inode in between, we check if it
2274 * was already processed/sent. If yes, we continue with the parent as found
2275 * in send_root. If not, we continue with the parent as found in parent_root.
2276 * If we encounter an inode that was deleted at this point in time, we use the
2277 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2278 * that were not created yet and overwritten inodes/refs.
2279 *
2280 * When do we have orphan inodes:
2281 * 1. When an inode is freshly created and thus no valid refs are available yet
2282 * 2. When a directory lost all it's refs (deleted) but still has dir items
2283 * inside which were not processed yet (pending for move/delete). If anyone
2284 * tried to get the path to the dir items, it would get a path inside that
2285 * orphan directory.
2286 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2287 * of an unprocessed inode. If in that case the first ref would be
2288 * overwritten, the overwritten inode gets "orphanized". Later when we
2289 * process this overwritten inode, it is restored at a new place by moving
2290 * the orphan inode.
2291 *
2292 * sctx->send_progress tells this function at which point in time receiving
2293 * would be.
2294 */
get_cur_path(struct send_ctx * sctx,u64 ino,u64 gen,struct fs_path * dest)2295 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2296 struct fs_path *dest)
2297 {
2298 int ret = 0;
2299 struct fs_path *name = NULL;
2300 u64 parent_inode = 0;
2301 u64 parent_gen = 0;
2302 int stop = 0;
2303
2304 name = fs_path_alloc();
2305 if (!name) {
2306 ret = -ENOMEM;
2307 goto out;
2308 }
2309
2310 dest->reversed = 1;
2311 fs_path_reset(dest);
2312
2313 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2314 struct waiting_dir_move *wdm;
2315
2316 fs_path_reset(name);
2317
2318 if (is_waiting_for_rm(sctx, ino, gen)) {
2319 ret = gen_unique_name(sctx, ino, gen, name);
2320 if (ret < 0)
2321 goto out;
2322 ret = fs_path_add_path(dest, name);
2323 break;
2324 }
2325
2326 wdm = get_waiting_dir_move(sctx, ino);
2327 if (wdm && wdm->orphanized) {
2328 ret = gen_unique_name(sctx, ino, gen, name);
2329 stop = 1;
2330 } else if (wdm) {
2331 ret = get_first_ref(sctx->parent_root, ino,
2332 &parent_inode, &parent_gen, name);
2333 } else {
2334 ret = __get_cur_name_and_parent(sctx, ino, gen,
2335 &parent_inode,
2336 &parent_gen, name);
2337 if (ret)
2338 stop = 1;
2339 }
2340
2341 if (ret < 0)
2342 goto out;
2343
2344 ret = fs_path_add_path(dest, name);
2345 if (ret < 0)
2346 goto out;
2347
2348 ino = parent_inode;
2349 gen = parent_gen;
2350 }
2351
2352 out:
2353 fs_path_free(name);
2354 if (!ret)
2355 fs_path_unreverse(dest);
2356 return ret;
2357 }
2358
2359 /*
2360 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2361 */
send_subvol_begin(struct send_ctx * sctx)2362 static int send_subvol_begin(struct send_ctx *sctx)
2363 {
2364 int ret;
2365 struct btrfs_root *send_root = sctx->send_root;
2366 struct btrfs_root *parent_root = sctx->parent_root;
2367 struct btrfs_path *path;
2368 struct btrfs_key key;
2369 struct btrfs_root_ref *ref;
2370 struct extent_buffer *leaf;
2371 char *name = NULL;
2372 int namelen;
2373
2374 path = btrfs_alloc_path();
2375 if (!path)
2376 return -ENOMEM;
2377
2378 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
2379 if (!name) {
2380 btrfs_free_path(path);
2381 return -ENOMEM;
2382 }
2383
2384 key.objectid = send_root->root_key.objectid;
2385 key.type = BTRFS_ROOT_BACKREF_KEY;
2386 key.offset = 0;
2387
2388 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2389 &key, path, 1, 0);
2390 if (ret < 0)
2391 goto out;
2392 if (ret) {
2393 ret = -ENOENT;
2394 goto out;
2395 }
2396
2397 leaf = path->nodes[0];
2398 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2399 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2400 key.objectid != send_root->root_key.objectid) {
2401 ret = -ENOENT;
2402 goto out;
2403 }
2404 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2405 namelen = btrfs_root_ref_name_len(leaf, ref);
2406 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2407 btrfs_release_path(path);
2408
2409 if (parent_root) {
2410 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2411 if (ret < 0)
2412 goto out;
2413 } else {
2414 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2415 if (ret < 0)
2416 goto out;
2417 }
2418
2419 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2420
2421 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
2422 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2423 sctx->send_root->root_item.received_uuid);
2424 else
2425 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2426 sctx->send_root->root_item.uuid);
2427
2428 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2429 le64_to_cpu(sctx->send_root->root_item.ctransid));
2430 if (parent_root) {
2431 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
2432 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2433 parent_root->root_item.received_uuid);
2434 else
2435 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2436 parent_root->root_item.uuid);
2437 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2438 le64_to_cpu(sctx->parent_root->root_item.ctransid));
2439 }
2440
2441 ret = send_cmd(sctx);
2442
2443 tlv_put_failure:
2444 out:
2445 btrfs_free_path(path);
2446 kfree(name);
2447 return ret;
2448 }
2449
send_truncate(struct send_ctx * sctx,u64 ino,u64 gen,u64 size)2450 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2451 {
2452 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2453 int ret = 0;
2454 struct fs_path *p;
2455
2456 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
2457
2458 p = fs_path_alloc();
2459 if (!p)
2460 return -ENOMEM;
2461
2462 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2463 if (ret < 0)
2464 goto out;
2465
2466 ret = get_cur_path(sctx, ino, gen, p);
2467 if (ret < 0)
2468 goto out;
2469 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2470 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2471
2472 ret = send_cmd(sctx);
2473
2474 tlv_put_failure:
2475 out:
2476 fs_path_free(p);
2477 return ret;
2478 }
2479
send_chmod(struct send_ctx * sctx,u64 ino,u64 gen,u64 mode)2480 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2481 {
2482 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2483 int ret = 0;
2484 struct fs_path *p;
2485
2486 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
2487
2488 p = fs_path_alloc();
2489 if (!p)
2490 return -ENOMEM;
2491
2492 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2493 if (ret < 0)
2494 goto out;
2495
2496 ret = get_cur_path(sctx, ino, gen, p);
2497 if (ret < 0)
2498 goto out;
2499 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2500 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2501
2502 ret = send_cmd(sctx);
2503
2504 tlv_put_failure:
2505 out:
2506 fs_path_free(p);
2507 return ret;
2508 }
2509
send_chown(struct send_ctx * sctx,u64 ino,u64 gen,u64 uid,u64 gid)2510 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2511 {
2512 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2513 int ret = 0;
2514 struct fs_path *p;
2515
2516 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
2517 ino, uid, gid);
2518
2519 p = fs_path_alloc();
2520 if (!p)
2521 return -ENOMEM;
2522
2523 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2524 if (ret < 0)
2525 goto out;
2526
2527 ret = get_cur_path(sctx, ino, gen, p);
2528 if (ret < 0)
2529 goto out;
2530 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2531 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2532 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2533
2534 ret = send_cmd(sctx);
2535
2536 tlv_put_failure:
2537 out:
2538 fs_path_free(p);
2539 return ret;
2540 }
2541
send_utimes(struct send_ctx * sctx,u64 ino,u64 gen)2542 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2543 {
2544 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2545 int ret = 0;
2546 struct fs_path *p = NULL;
2547 struct btrfs_inode_item *ii;
2548 struct btrfs_path *path = NULL;
2549 struct extent_buffer *eb;
2550 struct btrfs_key key;
2551 int slot;
2552
2553 btrfs_debug(fs_info, "send_utimes %llu", ino);
2554
2555 p = fs_path_alloc();
2556 if (!p)
2557 return -ENOMEM;
2558
2559 path = alloc_path_for_send();
2560 if (!path) {
2561 ret = -ENOMEM;
2562 goto out;
2563 }
2564
2565 key.objectid = ino;
2566 key.type = BTRFS_INODE_ITEM_KEY;
2567 key.offset = 0;
2568 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2569 if (ret > 0)
2570 ret = -ENOENT;
2571 if (ret < 0)
2572 goto out;
2573
2574 eb = path->nodes[0];
2575 slot = path->slots[0];
2576 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2577
2578 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2579 if (ret < 0)
2580 goto out;
2581
2582 ret = get_cur_path(sctx, ino, gen, p);
2583 if (ret < 0)
2584 goto out;
2585 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2586 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2587 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2588 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2589 /* TODO Add otime support when the otime patches get into upstream */
2590
2591 ret = send_cmd(sctx);
2592
2593 tlv_put_failure:
2594 out:
2595 fs_path_free(p);
2596 btrfs_free_path(path);
2597 return ret;
2598 }
2599
2600 /*
2601 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2602 * a valid path yet because we did not process the refs yet. So, the inode
2603 * is created as orphan.
2604 */
send_create_inode(struct send_ctx * sctx,u64 ino)2605 static int send_create_inode(struct send_ctx *sctx, u64 ino)
2606 {
2607 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2608 int ret = 0;
2609 struct fs_path *p;
2610 int cmd;
2611 u64 gen;
2612 u64 mode;
2613 u64 rdev;
2614
2615 btrfs_debug(fs_info, "send_create_inode %llu", ino);
2616
2617 p = fs_path_alloc();
2618 if (!p)
2619 return -ENOMEM;
2620
2621 if (ino != sctx->cur_ino) {
2622 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
2623 NULL, NULL, &rdev);
2624 if (ret < 0)
2625 goto out;
2626 } else {
2627 gen = sctx->cur_inode_gen;
2628 mode = sctx->cur_inode_mode;
2629 rdev = sctx->cur_inode_rdev;
2630 }
2631
2632 if (S_ISREG(mode)) {
2633 cmd = BTRFS_SEND_C_MKFILE;
2634 } else if (S_ISDIR(mode)) {
2635 cmd = BTRFS_SEND_C_MKDIR;
2636 } else if (S_ISLNK(mode)) {
2637 cmd = BTRFS_SEND_C_SYMLINK;
2638 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2639 cmd = BTRFS_SEND_C_MKNOD;
2640 } else if (S_ISFIFO(mode)) {
2641 cmd = BTRFS_SEND_C_MKFIFO;
2642 } else if (S_ISSOCK(mode)) {
2643 cmd = BTRFS_SEND_C_MKSOCK;
2644 } else {
2645 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
2646 (int)(mode & S_IFMT));
2647 ret = -EOPNOTSUPP;
2648 goto out;
2649 }
2650
2651 ret = begin_cmd(sctx, cmd);
2652 if (ret < 0)
2653 goto out;
2654
2655 ret = gen_unique_name(sctx, ino, gen, p);
2656 if (ret < 0)
2657 goto out;
2658
2659 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2660 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2661
2662 if (S_ISLNK(mode)) {
2663 fs_path_reset(p);
2664 ret = read_symlink(sctx->send_root, ino, p);
2665 if (ret < 0)
2666 goto out;
2667 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2668 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2669 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2670 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2671 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2672 }
2673
2674 ret = send_cmd(sctx);
2675 if (ret < 0)
2676 goto out;
2677
2678
2679 tlv_put_failure:
2680 out:
2681 fs_path_free(p);
2682 return ret;
2683 }
2684
2685 /*
2686 * We need some special handling for inodes that get processed before the parent
2687 * directory got created. See process_recorded_refs for details.
2688 * This function does the check if we already created the dir out of order.
2689 */
did_create_dir(struct send_ctx * sctx,u64 dir)2690 static int did_create_dir(struct send_ctx *sctx, u64 dir)
2691 {
2692 int ret = 0;
2693 struct btrfs_path *path = NULL;
2694 struct btrfs_key key;
2695 struct btrfs_key found_key;
2696 struct btrfs_key di_key;
2697 struct extent_buffer *eb;
2698 struct btrfs_dir_item *di;
2699 int slot;
2700
2701 path = alloc_path_for_send();
2702 if (!path) {
2703 ret = -ENOMEM;
2704 goto out;
2705 }
2706
2707 key.objectid = dir;
2708 key.type = BTRFS_DIR_INDEX_KEY;
2709 key.offset = 0;
2710 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2711 if (ret < 0)
2712 goto out;
2713
2714 while (1) {
2715 eb = path->nodes[0];
2716 slot = path->slots[0];
2717 if (slot >= btrfs_header_nritems(eb)) {
2718 ret = btrfs_next_leaf(sctx->send_root, path);
2719 if (ret < 0) {
2720 goto out;
2721 } else if (ret > 0) {
2722 ret = 0;
2723 break;
2724 }
2725 continue;
2726 }
2727
2728 btrfs_item_key_to_cpu(eb, &found_key, slot);
2729 if (found_key.objectid != key.objectid ||
2730 found_key.type != key.type) {
2731 ret = 0;
2732 goto out;
2733 }
2734
2735 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2736 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2737
2738 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2739 di_key.objectid < sctx->send_progress) {
2740 ret = 1;
2741 goto out;
2742 }
2743
2744 path->slots[0]++;
2745 }
2746
2747 out:
2748 btrfs_free_path(path);
2749 return ret;
2750 }
2751
2752 /*
2753 * Only creates the inode if it is:
2754 * 1. Not a directory
2755 * 2. Or a directory which was not created already due to out of order
2756 * directories. See did_create_dir and process_recorded_refs for details.
2757 */
send_create_inode_if_needed(struct send_ctx * sctx)2758 static int send_create_inode_if_needed(struct send_ctx *sctx)
2759 {
2760 int ret;
2761
2762 if (S_ISDIR(sctx->cur_inode_mode)) {
2763 ret = did_create_dir(sctx, sctx->cur_ino);
2764 if (ret < 0)
2765 goto out;
2766 if (ret) {
2767 ret = 0;
2768 goto out;
2769 }
2770 }
2771
2772 ret = send_create_inode(sctx, sctx->cur_ino);
2773 if (ret < 0)
2774 goto out;
2775
2776 out:
2777 return ret;
2778 }
2779
2780 struct recorded_ref {
2781 struct list_head list;
2782 char *name;
2783 struct fs_path *full_path;
2784 u64 dir;
2785 u64 dir_gen;
2786 int name_len;
2787 };
2788
set_ref_path(struct recorded_ref * ref,struct fs_path * path)2789 static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
2790 {
2791 ref->full_path = path;
2792 ref->name = (char *)kbasename(ref->full_path->start);
2793 ref->name_len = ref->full_path->end - ref->name;
2794 }
2795
2796 /*
2797 * We need to process new refs before deleted refs, but compare_tree gives us
2798 * everything mixed. So we first record all refs and later process them.
2799 * This function is a helper to record one ref.
2800 */
__record_ref(struct list_head * head,u64 dir,u64 dir_gen,struct fs_path * path)2801 static int __record_ref(struct list_head *head, u64 dir,
2802 u64 dir_gen, struct fs_path *path)
2803 {
2804 struct recorded_ref *ref;
2805
2806 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
2807 if (!ref)
2808 return -ENOMEM;
2809
2810 ref->dir = dir;
2811 ref->dir_gen = dir_gen;
2812 set_ref_path(ref, path);
2813 list_add_tail(&ref->list, head);
2814 return 0;
2815 }
2816
dup_ref(struct recorded_ref * ref,struct list_head * list)2817 static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2818 {
2819 struct recorded_ref *new;
2820
2821 new = kmalloc(sizeof(*ref), GFP_KERNEL);
2822 if (!new)
2823 return -ENOMEM;
2824
2825 new->dir = ref->dir;
2826 new->dir_gen = ref->dir_gen;
2827 new->full_path = NULL;
2828 INIT_LIST_HEAD(&new->list);
2829 list_add_tail(&new->list, list);
2830 return 0;
2831 }
2832
__free_recorded_refs(struct list_head * head)2833 static void __free_recorded_refs(struct list_head *head)
2834 {
2835 struct recorded_ref *cur;
2836
2837 while (!list_empty(head)) {
2838 cur = list_entry(head->next, struct recorded_ref, list);
2839 fs_path_free(cur->full_path);
2840 list_del(&cur->list);
2841 kfree(cur);
2842 }
2843 }
2844
free_recorded_refs(struct send_ctx * sctx)2845 static void free_recorded_refs(struct send_ctx *sctx)
2846 {
2847 __free_recorded_refs(&sctx->new_refs);
2848 __free_recorded_refs(&sctx->deleted_refs);
2849 }
2850
2851 /*
2852 * Renames/moves a file/dir to its orphan name. Used when the first
2853 * ref of an unprocessed inode gets overwritten and for all non empty
2854 * directories.
2855 */
orphanize_inode(struct send_ctx * sctx,u64 ino,u64 gen,struct fs_path * path)2856 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2857 struct fs_path *path)
2858 {
2859 int ret;
2860 struct fs_path *orphan;
2861
2862 orphan = fs_path_alloc();
2863 if (!orphan)
2864 return -ENOMEM;
2865
2866 ret = gen_unique_name(sctx, ino, gen, orphan);
2867 if (ret < 0)
2868 goto out;
2869
2870 ret = send_rename(sctx, path, orphan);
2871
2872 out:
2873 fs_path_free(orphan);
2874 return ret;
2875 }
2876
add_orphan_dir_info(struct send_ctx * sctx,u64 dir_ino,u64 dir_gen)2877 static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
2878 u64 dir_ino, u64 dir_gen)
2879 {
2880 struct rb_node **p = &sctx->orphan_dirs.rb_node;
2881 struct rb_node *parent = NULL;
2882 struct orphan_dir_info *entry, *odi;
2883
2884 while (*p) {
2885 parent = *p;
2886 entry = rb_entry(parent, struct orphan_dir_info, node);
2887 if (dir_ino < entry->ino)
2888 p = &(*p)->rb_left;
2889 else if (dir_ino > entry->ino)
2890 p = &(*p)->rb_right;
2891 else if (dir_gen < entry->gen)
2892 p = &(*p)->rb_left;
2893 else if (dir_gen > entry->gen)
2894 p = &(*p)->rb_right;
2895 else
2896 return entry;
2897 }
2898
2899 odi = kmalloc(sizeof(*odi), GFP_KERNEL);
2900 if (!odi)
2901 return ERR_PTR(-ENOMEM);
2902 odi->ino = dir_ino;
2903 odi->gen = dir_gen;
2904 odi->last_dir_index_offset = 0;
2905
2906 rb_link_node(&odi->node, parent, p);
2907 rb_insert_color(&odi->node, &sctx->orphan_dirs);
2908 return odi;
2909 }
2910
get_orphan_dir_info(struct send_ctx * sctx,u64 dir_ino,u64 gen)2911 static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx,
2912 u64 dir_ino, u64 gen)
2913 {
2914 struct rb_node *n = sctx->orphan_dirs.rb_node;
2915 struct orphan_dir_info *entry;
2916
2917 while (n) {
2918 entry = rb_entry(n, struct orphan_dir_info, node);
2919 if (dir_ino < entry->ino)
2920 n = n->rb_left;
2921 else if (dir_ino > entry->ino)
2922 n = n->rb_right;
2923 else if (gen < entry->gen)
2924 n = n->rb_left;
2925 else if (gen > entry->gen)
2926 n = n->rb_right;
2927 else
2928 return entry;
2929 }
2930 return NULL;
2931 }
2932
is_waiting_for_rm(struct send_ctx * sctx,u64 dir_ino,u64 gen)2933 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen)
2934 {
2935 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen);
2936
2937 return odi != NULL;
2938 }
2939
free_orphan_dir_info(struct send_ctx * sctx,struct orphan_dir_info * odi)2940 static void free_orphan_dir_info(struct send_ctx *sctx,
2941 struct orphan_dir_info *odi)
2942 {
2943 if (!odi)
2944 return;
2945 rb_erase(&odi->node, &sctx->orphan_dirs);
2946 kfree(odi);
2947 }
2948
2949 /*
2950 * Returns 1 if a directory can be removed at this point in time.
2951 * We check this by iterating all dir items and checking if the inode behind
2952 * the dir item was already processed.
2953 */
can_rmdir(struct send_ctx * sctx,u64 dir,u64 dir_gen,u64 send_progress)2954 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2955 u64 send_progress)
2956 {
2957 int ret = 0;
2958 struct btrfs_root *root = sctx->parent_root;
2959 struct btrfs_path *path;
2960 struct btrfs_key key;
2961 struct btrfs_key found_key;
2962 struct btrfs_key loc;
2963 struct btrfs_dir_item *di;
2964 struct orphan_dir_info *odi = NULL;
2965
2966 /*
2967 * Don't try to rmdir the top/root subvolume dir.
2968 */
2969 if (dir == BTRFS_FIRST_FREE_OBJECTID)
2970 return 0;
2971
2972 path = alloc_path_for_send();
2973 if (!path)
2974 return -ENOMEM;
2975
2976 key.objectid = dir;
2977 key.type = BTRFS_DIR_INDEX_KEY;
2978 key.offset = 0;
2979
2980 odi = get_orphan_dir_info(sctx, dir, dir_gen);
2981 if (odi)
2982 key.offset = odi->last_dir_index_offset;
2983
2984 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2985 if (ret < 0)
2986 goto out;
2987
2988 while (1) {
2989 struct waiting_dir_move *dm;
2990
2991 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2992 ret = btrfs_next_leaf(root, path);
2993 if (ret < 0)
2994 goto out;
2995 else if (ret > 0)
2996 break;
2997 continue;
2998 }
2999 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3000 path->slots[0]);
3001 if (found_key.objectid != key.objectid ||
3002 found_key.type != key.type)
3003 break;
3004
3005 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
3006 struct btrfs_dir_item);
3007 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
3008
3009 dm = get_waiting_dir_move(sctx, loc.objectid);
3010 if (dm) {
3011 odi = add_orphan_dir_info(sctx, dir, dir_gen);
3012 if (IS_ERR(odi)) {
3013 ret = PTR_ERR(odi);
3014 goto out;
3015 }
3016 odi->gen = dir_gen;
3017 odi->last_dir_index_offset = found_key.offset;
3018 dm->rmdir_ino = dir;
3019 dm->rmdir_gen = dir_gen;
3020 ret = 0;
3021 goto out;
3022 }
3023
3024 if (loc.objectid > send_progress) {
3025 odi = add_orphan_dir_info(sctx, dir, dir_gen);
3026 if (IS_ERR(odi)) {
3027 ret = PTR_ERR(odi);
3028 goto out;
3029 }
3030 odi->gen = dir_gen;
3031 odi->last_dir_index_offset = found_key.offset;
3032 ret = 0;
3033 goto out;
3034 }
3035
3036 path->slots[0]++;
3037 }
3038 free_orphan_dir_info(sctx, odi);
3039
3040 ret = 1;
3041
3042 out:
3043 btrfs_free_path(path);
3044 return ret;
3045 }
3046
is_waiting_for_move(struct send_ctx * sctx,u64 ino)3047 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
3048 {
3049 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
3050
3051 return entry != NULL;
3052 }
3053
add_waiting_dir_move(struct send_ctx * sctx,u64 ino,bool orphanized)3054 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
3055 {
3056 struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
3057 struct rb_node *parent = NULL;
3058 struct waiting_dir_move *entry, *dm;
3059
3060 dm = kmalloc(sizeof(*dm), GFP_KERNEL);
3061 if (!dm)
3062 return -ENOMEM;
3063 dm->ino = ino;
3064 dm->rmdir_ino = 0;
3065 dm->rmdir_gen = 0;
3066 dm->orphanized = orphanized;
3067
3068 while (*p) {
3069 parent = *p;
3070 entry = rb_entry(parent, struct waiting_dir_move, node);
3071 if (ino < entry->ino) {
3072 p = &(*p)->rb_left;
3073 } else if (ino > entry->ino) {
3074 p = &(*p)->rb_right;
3075 } else {
3076 kfree(dm);
3077 return -EEXIST;
3078 }
3079 }
3080
3081 rb_link_node(&dm->node, parent, p);
3082 rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
3083 return 0;
3084 }
3085
3086 static struct waiting_dir_move *
get_waiting_dir_move(struct send_ctx * sctx,u64 ino)3087 get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
3088 {
3089 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
3090 struct waiting_dir_move *entry;
3091
3092 while (n) {
3093 entry = rb_entry(n, struct waiting_dir_move, node);
3094 if (ino < entry->ino)
3095 n = n->rb_left;
3096 else if (ino > entry->ino)
3097 n = n->rb_right;
3098 else
3099 return entry;
3100 }
3101 return NULL;
3102 }
3103
free_waiting_dir_move(struct send_ctx * sctx,struct waiting_dir_move * dm)3104 static void free_waiting_dir_move(struct send_ctx *sctx,
3105 struct waiting_dir_move *dm)
3106 {
3107 if (!dm)
3108 return;
3109 rb_erase(&dm->node, &sctx->waiting_dir_moves);
3110 kfree(dm);
3111 }
3112
add_pending_dir_move(struct send_ctx * sctx,u64 ino,u64 ino_gen,u64 parent_ino,struct list_head * new_refs,struct list_head * deleted_refs,const bool is_orphan)3113 static int add_pending_dir_move(struct send_ctx *sctx,
3114 u64 ino,
3115 u64 ino_gen,
3116 u64 parent_ino,
3117 struct list_head *new_refs,
3118 struct list_head *deleted_refs,
3119 const bool is_orphan)
3120 {
3121 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
3122 struct rb_node *parent = NULL;
3123 struct pending_dir_move *entry = NULL, *pm;
3124 struct recorded_ref *cur;
3125 int exists = 0;
3126 int ret;
3127
3128 pm = kmalloc(sizeof(*pm), GFP_KERNEL);
3129 if (!pm)
3130 return -ENOMEM;
3131 pm->parent_ino = parent_ino;
3132 pm->ino = ino;
3133 pm->gen = ino_gen;
3134 INIT_LIST_HEAD(&pm->list);
3135 INIT_LIST_HEAD(&pm->update_refs);
3136 RB_CLEAR_NODE(&pm->node);
3137
3138 while (*p) {
3139 parent = *p;
3140 entry = rb_entry(parent, struct pending_dir_move, node);
3141 if (parent_ino < entry->parent_ino) {
3142 p = &(*p)->rb_left;
3143 } else if (parent_ino > entry->parent_ino) {
3144 p = &(*p)->rb_right;
3145 } else {
3146 exists = 1;
3147 break;
3148 }
3149 }
3150
3151 list_for_each_entry(cur, deleted_refs, list) {
3152 ret = dup_ref(cur, &pm->update_refs);
3153 if (ret < 0)
3154 goto out;
3155 }
3156 list_for_each_entry(cur, new_refs, list) {
3157 ret = dup_ref(cur, &pm->update_refs);
3158 if (ret < 0)
3159 goto out;
3160 }
3161
3162 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
3163 if (ret)
3164 goto out;
3165
3166 if (exists) {
3167 list_add_tail(&pm->list, &entry->list);
3168 } else {
3169 rb_link_node(&pm->node, parent, p);
3170 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3171 }
3172 ret = 0;
3173 out:
3174 if (ret) {
3175 __free_recorded_refs(&pm->update_refs);
3176 kfree(pm);
3177 }
3178 return ret;
3179 }
3180
get_pending_dir_moves(struct send_ctx * sctx,u64 parent_ino)3181 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3182 u64 parent_ino)
3183 {
3184 struct rb_node *n = sctx->pending_dir_moves.rb_node;
3185 struct pending_dir_move *entry;
3186
3187 while (n) {
3188 entry = rb_entry(n, struct pending_dir_move, node);
3189 if (parent_ino < entry->parent_ino)
3190 n = n->rb_left;
3191 else if (parent_ino > entry->parent_ino)
3192 n = n->rb_right;
3193 else
3194 return entry;
3195 }
3196 return NULL;
3197 }
3198
path_loop(struct send_ctx * sctx,struct fs_path * name,u64 ino,u64 gen,u64 * ancestor_ino)3199 static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3200 u64 ino, u64 gen, u64 *ancestor_ino)
3201 {
3202 int ret = 0;
3203 u64 parent_inode = 0;
3204 u64 parent_gen = 0;
3205 u64 start_ino = ino;
3206
3207 *ancestor_ino = 0;
3208 while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3209 fs_path_reset(name);
3210
3211 if (is_waiting_for_rm(sctx, ino, gen))
3212 break;
3213 if (is_waiting_for_move(sctx, ino)) {
3214 if (*ancestor_ino == 0)
3215 *ancestor_ino = ino;
3216 ret = get_first_ref(sctx->parent_root, ino,
3217 &parent_inode, &parent_gen, name);
3218 } else {
3219 ret = __get_cur_name_and_parent(sctx, ino, gen,
3220 &parent_inode,
3221 &parent_gen, name);
3222 if (ret > 0) {
3223 ret = 0;
3224 break;
3225 }
3226 }
3227 if (ret < 0)
3228 break;
3229 if (parent_inode == start_ino) {
3230 ret = 1;
3231 if (*ancestor_ino == 0)
3232 *ancestor_ino = ino;
3233 break;
3234 }
3235 ino = parent_inode;
3236 gen = parent_gen;
3237 }
3238 return ret;
3239 }
3240
apply_dir_move(struct send_ctx * sctx,struct pending_dir_move * pm)3241 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3242 {
3243 struct fs_path *from_path = NULL;
3244 struct fs_path *to_path = NULL;
3245 struct fs_path *name = NULL;
3246 u64 orig_progress = sctx->send_progress;
3247 struct recorded_ref *cur;
3248 u64 parent_ino, parent_gen;
3249 struct waiting_dir_move *dm = NULL;
3250 u64 rmdir_ino = 0;
3251 u64 rmdir_gen;
3252 u64 ancestor;
3253 bool is_orphan;
3254 int ret;
3255
3256 name = fs_path_alloc();
3257 from_path = fs_path_alloc();
3258 if (!name || !from_path) {
3259 ret = -ENOMEM;
3260 goto out;
3261 }
3262
3263 dm = get_waiting_dir_move(sctx, pm->ino);
3264 ASSERT(dm);
3265 rmdir_ino = dm->rmdir_ino;
3266 rmdir_gen = dm->rmdir_gen;
3267 is_orphan = dm->orphanized;
3268 free_waiting_dir_move(sctx, dm);
3269
3270 if (is_orphan) {
3271 ret = gen_unique_name(sctx, pm->ino,
3272 pm->gen, from_path);
3273 } else {
3274 ret = get_first_ref(sctx->parent_root, pm->ino,
3275 &parent_ino, &parent_gen, name);
3276 if (ret < 0)
3277 goto out;
3278 ret = get_cur_path(sctx, parent_ino, parent_gen,
3279 from_path);
3280 if (ret < 0)
3281 goto out;
3282 ret = fs_path_add_path(from_path, name);
3283 }
3284 if (ret < 0)
3285 goto out;
3286
3287 sctx->send_progress = sctx->cur_ino + 1;
3288 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3289 if (ret < 0)
3290 goto out;
3291 if (ret) {
3292 LIST_HEAD(deleted_refs);
3293 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3294 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3295 &pm->update_refs, &deleted_refs,
3296 is_orphan);
3297 if (ret < 0)
3298 goto out;
3299 if (rmdir_ino) {
3300 dm = get_waiting_dir_move(sctx, pm->ino);
3301 ASSERT(dm);
3302 dm->rmdir_ino = rmdir_ino;
3303 dm->rmdir_gen = rmdir_gen;
3304 }
3305 goto out;
3306 }
3307 fs_path_reset(name);
3308 to_path = name;
3309 name = NULL;
3310 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3311 if (ret < 0)
3312 goto out;
3313
3314 ret = send_rename(sctx, from_path, to_path);
3315 if (ret < 0)
3316 goto out;
3317
3318 if (rmdir_ino) {
3319 struct orphan_dir_info *odi;
3320 u64 gen;
3321
3322 odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen);
3323 if (!odi) {
3324 /* already deleted */
3325 goto finish;
3326 }
3327 gen = odi->gen;
3328
3329 ret = can_rmdir(sctx, rmdir_ino, gen, sctx->cur_ino);
3330 if (ret < 0)
3331 goto out;
3332 if (!ret)
3333 goto finish;
3334
3335 name = fs_path_alloc();
3336 if (!name) {
3337 ret = -ENOMEM;
3338 goto out;
3339 }
3340 ret = get_cur_path(sctx, rmdir_ino, gen, name);
3341 if (ret < 0)
3342 goto out;
3343 ret = send_rmdir(sctx, name);
3344 if (ret < 0)
3345 goto out;
3346 }
3347
3348 finish:
3349 ret = send_utimes(sctx, pm->ino, pm->gen);
3350 if (ret < 0)
3351 goto out;
3352
3353 /*
3354 * After rename/move, need to update the utimes of both new parent(s)
3355 * and old parent(s).
3356 */
3357 list_for_each_entry(cur, &pm->update_refs, list) {
3358 /*
3359 * The parent inode might have been deleted in the send snapshot
3360 */
3361 ret = get_inode_info(sctx->send_root, cur->dir, NULL,
3362 NULL, NULL, NULL, NULL, NULL);
3363 if (ret == -ENOENT) {
3364 ret = 0;
3365 continue;
3366 }
3367 if (ret < 0)
3368 goto out;
3369
3370 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3371 if (ret < 0)
3372 goto out;
3373 }
3374
3375 out:
3376 fs_path_free(name);
3377 fs_path_free(from_path);
3378 fs_path_free(to_path);
3379 sctx->send_progress = orig_progress;
3380
3381 return ret;
3382 }
3383
free_pending_move(struct send_ctx * sctx,struct pending_dir_move * m)3384 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3385 {
3386 if (!list_empty(&m->list))
3387 list_del(&m->list);
3388 if (!RB_EMPTY_NODE(&m->node))
3389 rb_erase(&m->node, &sctx->pending_dir_moves);
3390 __free_recorded_refs(&m->update_refs);
3391 kfree(m);
3392 }
3393
tail_append_pending_moves(struct send_ctx * sctx,struct pending_dir_move * moves,struct list_head * stack)3394 static void tail_append_pending_moves(struct send_ctx *sctx,
3395 struct pending_dir_move *moves,
3396 struct list_head *stack)
3397 {
3398 if (list_empty(&moves->list)) {
3399 list_add_tail(&moves->list, stack);
3400 } else {
3401 LIST_HEAD(list);
3402 list_splice_init(&moves->list, &list);
3403 list_add_tail(&moves->list, stack);
3404 list_splice_tail(&list, stack);
3405 }
3406 if (!RB_EMPTY_NODE(&moves->node)) {
3407 rb_erase(&moves->node, &sctx->pending_dir_moves);
3408 RB_CLEAR_NODE(&moves->node);
3409 }
3410 }
3411
apply_children_dir_moves(struct send_ctx * sctx)3412 static int apply_children_dir_moves(struct send_ctx *sctx)
3413 {
3414 struct pending_dir_move *pm;
3415 struct list_head stack;
3416 u64 parent_ino = sctx->cur_ino;
3417 int ret = 0;
3418
3419 pm = get_pending_dir_moves(sctx, parent_ino);
3420 if (!pm)
3421 return 0;
3422
3423 INIT_LIST_HEAD(&stack);
3424 tail_append_pending_moves(sctx, pm, &stack);
3425
3426 while (!list_empty(&stack)) {
3427 pm = list_first_entry(&stack, struct pending_dir_move, list);
3428 parent_ino = pm->ino;
3429 ret = apply_dir_move(sctx, pm);
3430 free_pending_move(sctx, pm);
3431 if (ret)
3432 goto out;
3433 pm = get_pending_dir_moves(sctx, parent_ino);
3434 if (pm)
3435 tail_append_pending_moves(sctx, pm, &stack);
3436 }
3437 return 0;
3438
3439 out:
3440 while (!list_empty(&stack)) {
3441 pm = list_first_entry(&stack, struct pending_dir_move, list);
3442 free_pending_move(sctx, pm);
3443 }
3444 return ret;
3445 }
3446
3447 /*
3448 * We might need to delay a directory rename even when no ancestor directory
3449 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3450 * renamed. This happens when we rename a directory to the old name (the name
3451 * in the parent root) of some other unrelated directory that got its rename
3452 * delayed due to some ancestor with higher number that got renamed.
3453 *
3454 * Example:
3455 *
3456 * Parent snapshot:
3457 * . (ino 256)
3458 * |---- a/ (ino 257)
3459 * | |---- file (ino 260)
3460 * |
3461 * |---- b/ (ino 258)
3462 * |---- c/ (ino 259)
3463 *
3464 * Send snapshot:
3465 * . (ino 256)
3466 * |---- a/ (ino 258)
3467 * |---- x/ (ino 259)
3468 * |---- y/ (ino 257)
3469 * |----- file (ino 260)
3470 *
3471 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3472 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3473 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3474 * must issue is:
3475 *
3476 * 1 - rename 259 from 'c' to 'x'
3477 * 2 - rename 257 from 'a' to 'x/y'
3478 * 3 - rename 258 from 'b' to 'a'
3479 *
3480 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3481 * be done right away and < 0 on error.
3482 */
wait_for_dest_dir_move(struct send_ctx * sctx,struct recorded_ref * parent_ref,const bool is_orphan)3483 static int wait_for_dest_dir_move(struct send_ctx *sctx,
3484 struct recorded_ref *parent_ref,
3485 const bool is_orphan)
3486 {
3487 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
3488 struct btrfs_path *path;
3489 struct btrfs_key key;
3490 struct btrfs_key di_key;
3491 struct btrfs_dir_item *di;
3492 u64 left_gen;
3493 u64 right_gen;
3494 int ret = 0;
3495 struct waiting_dir_move *wdm;
3496
3497 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3498 return 0;
3499
3500 path = alloc_path_for_send();
3501 if (!path)
3502 return -ENOMEM;
3503
3504 key.objectid = parent_ref->dir;
3505 key.type = BTRFS_DIR_ITEM_KEY;
3506 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
3507
3508 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3509 if (ret < 0) {
3510 goto out;
3511 } else if (ret > 0) {
3512 ret = 0;
3513 goto out;
3514 }
3515
3516 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
3517 parent_ref->name_len);
3518 if (!di) {
3519 ret = 0;
3520 goto out;
3521 }
3522 /*
3523 * di_key.objectid has the number of the inode that has a dentry in the
3524 * parent directory with the same name that sctx->cur_ino is being
3525 * renamed to. We need to check if that inode is in the send root as
3526 * well and if it is currently marked as an inode with a pending rename,
3527 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3528 * that it happens after that other inode is renamed.
3529 */
3530 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3531 if (di_key.type != BTRFS_INODE_ITEM_KEY) {
3532 ret = 0;
3533 goto out;
3534 }
3535
3536 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
3537 &left_gen, NULL, NULL, NULL, NULL);
3538 if (ret < 0)
3539 goto out;
3540 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
3541 &right_gen, NULL, NULL, NULL, NULL);
3542 if (ret < 0) {
3543 if (ret == -ENOENT)
3544 ret = 0;
3545 goto out;
3546 }
3547
3548 /* Different inode, no need to delay the rename of sctx->cur_ino */
3549 if (right_gen != left_gen) {
3550 ret = 0;
3551 goto out;
3552 }
3553
3554 wdm = get_waiting_dir_move(sctx, di_key.objectid);
3555 if (wdm && !wdm->orphanized) {
3556 ret = add_pending_dir_move(sctx,
3557 sctx->cur_ino,
3558 sctx->cur_inode_gen,
3559 di_key.objectid,
3560 &sctx->new_refs,
3561 &sctx->deleted_refs,
3562 is_orphan);
3563 if (!ret)
3564 ret = 1;
3565 }
3566 out:
3567 btrfs_free_path(path);
3568 return ret;
3569 }
3570
3571 /*
3572 * Check if inode ino2, or any of its ancestors, is inode ino1.
3573 * Return 1 if true, 0 if false and < 0 on error.
3574 */
check_ino_in_path(struct btrfs_root * root,const u64 ino1,const u64 ino1_gen,const u64 ino2,const u64 ino2_gen,struct fs_path * fs_path)3575 static int check_ino_in_path(struct btrfs_root *root,
3576 const u64 ino1,
3577 const u64 ino1_gen,
3578 const u64 ino2,
3579 const u64 ino2_gen,
3580 struct fs_path *fs_path)
3581 {
3582 u64 ino = ino2;
3583
3584 if (ino1 == ino2)
3585 return ino1_gen == ino2_gen;
3586
3587 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3588 u64 parent;
3589 u64 parent_gen;
3590 int ret;
3591
3592 fs_path_reset(fs_path);
3593 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3594 if (ret < 0)
3595 return ret;
3596 if (parent == ino1)
3597 return parent_gen == ino1_gen;
3598 ino = parent;
3599 }
3600 return 0;
3601 }
3602
3603 /*
3604 * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
3605 * possible path (in case ino2 is not a directory and has multiple hard links).
3606 * Return 1 if true, 0 if false and < 0 on error.
3607 */
is_ancestor(struct btrfs_root * root,const u64 ino1,const u64 ino1_gen,const u64 ino2,struct fs_path * fs_path)3608 static int is_ancestor(struct btrfs_root *root,
3609 const u64 ino1,
3610 const u64 ino1_gen,
3611 const u64 ino2,
3612 struct fs_path *fs_path)
3613 {
3614 bool free_fs_path = false;
3615 int ret = 0;
3616 struct btrfs_path *path = NULL;
3617 struct btrfs_key key;
3618
3619 if (!fs_path) {
3620 fs_path = fs_path_alloc();
3621 if (!fs_path)
3622 return -ENOMEM;
3623 free_fs_path = true;
3624 }
3625
3626 path = alloc_path_for_send();
3627 if (!path) {
3628 ret = -ENOMEM;
3629 goto out;
3630 }
3631
3632 key.objectid = ino2;
3633 key.type = BTRFS_INODE_REF_KEY;
3634 key.offset = 0;
3635
3636 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3637 if (ret < 0)
3638 goto out;
3639
3640 while (true) {
3641 struct extent_buffer *leaf = path->nodes[0];
3642 int slot = path->slots[0];
3643 u32 cur_offset = 0;
3644 u32 item_size;
3645
3646 if (slot >= btrfs_header_nritems(leaf)) {
3647 ret = btrfs_next_leaf(root, path);
3648 if (ret < 0)
3649 goto out;
3650 if (ret > 0)
3651 break;
3652 continue;
3653 }
3654
3655 btrfs_item_key_to_cpu(leaf, &key, slot);
3656 if (key.objectid != ino2)
3657 break;
3658 if (key.type != BTRFS_INODE_REF_KEY &&
3659 key.type != BTRFS_INODE_EXTREF_KEY)
3660 break;
3661
3662 item_size = btrfs_item_size_nr(leaf, slot);
3663 while (cur_offset < item_size) {
3664 u64 parent;
3665 u64 parent_gen;
3666
3667 if (key.type == BTRFS_INODE_EXTREF_KEY) {
3668 unsigned long ptr;
3669 struct btrfs_inode_extref *extref;
3670
3671 ptr = btrfs_item_ptr_offset(leaf, slot);
3672 extref = (struct btrfs_inode_extref *)
3673 (ptr + cur_offset);
3674 parent = btrfs_inode_extref_parent(leaf,
3675 extref);
3676 cur_offset += sizeof(*extref);
3677 cur_offset += btrfs_inode_extref_name_len(leaf,
3678 extref);
3679 } else {
3680 parent = key.offset;
3681 cur_offset = item_size;
3682 }
3683
3684 ret = get_inode_info(root, parent, NULL, &parent_gen,
3685 NULL, NULL, NULL, NULL);
3686 if (ret < 0)
3687 goto out;
3688 ret = check_ino_in_path(root, ino1, ino1_gen,
3689 parent, parent_gen, fs_path);
3690 if (ret)
3691 goto out;
3692 }
3693 path->slots[0]++;
3694 }
3695 ret = 0;
3696 out:
3697 btrfs_free_path(path);
3698 if (free_fs_path)
3699 fs_path_free(fs_path);
3700 return ret;
3701 }
3702
wait_for_parent_move(struct send_ctx * sctx,struct recorded_ref * parent_ref,const bool is_orphan)3703 static int wait_for_parent_move(struct send_ctx *sctx,
3704 struct recorded_ref *parent_ref,
3705 const bool is_orphan)
3706 {
3707 int ret = 0;
3708 u64 ino = parent_ref->dir;
3709 u64 ino_gen = parent_ref->dir_gen;
3710 u64 parent_ino_before, parent_ino_after;
3711 struct fs_path *path_before = NULL;
3712 struct fs_path *path_after = NULL;
3713 int len1, len2;
3714
3715 path_after = fs_path_alloc();
3716 path_before = fs_path_alloc();
3717 if (!path_after || !path_before) {
3718 ret = -ENOMEM;
3719 goto out;
3720 }
3721
3722 /*
3723 * Our current directory inode may not yet be renamed/moved because some
3724 * ancestor (immediate or not) has to be renamed/moved first. So find if
3725 * such ancestor exists and make sure our own rename/move happens after
3726 * that ancestor is processed to avoid path build infinite loops (done
3727 * at get_cur_path()).
3728 */
3729 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3730 u64 parent_ino_after_gen;
3731
3732 if (is_waiting_for_move(sctx, ino)) {
3733 /*
3734 * If the current inode is an ancestor of ino in the
3735 * parent root, we need to delay the rename of the
3736 * current inode, otherwise don't delayed the rename
3737 * because we can end up with a circular dependency
3738 * of renames, resulting in some directories never
3739 * getting the respective rename operations issued in
3740 * the send stream or getting into infinite path build
3741 * loops.
3742 */
3743 ret = is_ancestor(sctx->parent_root,
3744 sctx->cur_ino, sctx->cur_inode_gen,
3745 ino, path_before);
3746 if (ret)
3747 break;
3748 }
3749
3750 fs_path_reset(path_before);
3751 fs_path_reset(path_after);
3752
3753 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3754 &parent_ino_after_gen, path_after);
3755 if (ret < 0)
3756 goto out;
3757 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3758 NULL, path_before);
3759 if (ret < 0 && ret != -ENOENT) {
3760 goto out;
3761 } else if (ret == -ENOENT) {
3762 ret = 0;
3763 break;
3764 }
3765
3766 len1 = fs_path_len(path_before);
3767 len2 = fs_path_len(path_after);
3768 if (ino > sctx->cur_ino &&
3769 (parent_ino_before != parent_ino_after || len1 != len2 ||
3770 memcmp(path_before->start, path_after->start, len1))) {
3771 u64 parent_ino_gen;
3772
3773 ret = get_inode_info(sctx->parent_root, ino, NULL,
3774 &parent_ino_gen, NULL, NULL, NULL,
3775 NULL);
3776 if (ret < 0)
3777 goto out;
3778 if (ino_gen == parent_ino_gen) {
3779 ret = 1;
3780 break;
3781 }
3782 }
3783 ino = parent_ino_after;
3784 ino_gen = parent_ino_after_gen;
3785 }
3786
3787 out:
3788 fs_path_free(path_before);
3789 fs_path_free(path_after);
3790
3791 if (ret == 1) {
3792 ret = add_pending_dir_move(sctx,
3793 sctx->cur_ino,
3794 sctx->cur_inode_gen,
3795 ino,
3796 &sctx->new_refs,
3797 &sctx->deleted_refs,
3798 is_orphan);
3799 if (!ret)
3800 ret = 1;
3801 }
3802
3803 return ret;
3804 }
3805
update_ref_path(struct send_ctx * sctx,struct recorded_ref * ref)3806 static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3807 {
3808 int ret;
3809 struct fs_path *new_path;
3810
3811 /*
3812 * Our reference's name member points to its full_path member string, so
3813 * we use here a new path.
3814 */
3815 new_path = fs_path_alloc();
3816 if (!new_path)
3817 return -ENOMEM;
3818
3819 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path);
3820 if (ret < 0) {
3821 fs_path_free(new_path);
3822 return ret;
3823 }
3824 ret = fs_path_add(new_path, ref->name, ref->name_len);
3825 if (ret < 0) {
3826 fs_path_free(new_path);
3827 return ret;
3828 }
3829
3830 fs_path_free(ref->full_path);
3831 set_ref_path(ref, new_path);
3832
3833 return 0;
3834 }
3835
3836 /*
3837 * When processing the new references for an inode we may orphanize an existing
3838 * directory inode because its old name conflicts with one of the new references
3839 * of the current inode. Later, when processing another new reference of our
3840 * inode, we might need to orphanize another inode, but the path we have in the
3841 * reference reflects the pre-orphanization name of the directory we previously
3842 * orphanized. For example:
3843 *
3844 * parent snapshot looks like:
3845 *
3846 * . (ino 256)
3847 * |----- f1 (ino 257)
3848 * |----- f2 (ino 258)
3849 * |----- d1/ (ino 259)
3850 * |----- d2/ (ino 260)
3851 *
3852 * send snapshot looks like:
3853 *
3854 * . (ino 256)
3855 * |----- d1 (ino 258)
3856 * |----- f2/ (ino 259)
3857 * |----- f2_link/ (ino 260)
3858 * | |----- f1 (ino 257)
3859 * |
3860 * |----- d2 (ino 258)
3861 *
3862 * When processing inode 257 we compute the name for inode 259 as "d1", and we
3863 * cache it in the name cache. Later when we start processing inode 258, when
3864 * collecting all its new references we set a full path of "d1/d2" for its new
3865 * reference with name "d2". When we start processing the new references we
3866 * start by processing the new reference with name "d1", and this results in
3867 * orphanizing inode 259, since its old reference causes a conflict. Then we
3868 * move on the next new reference, with name "d2", and we find out we must
3869 * orphanize inode 260, as its old reference conflicts with ours - but for the
3870 * orphanization we use a source path corresponding to the path we stored in the
3871 * new reference, which is "d1/d2" and not "o259-6-0/d2" - this makes the
3872 * receiver fail since the path component "d1/" no longer exists, it was renamed
3873 * to "o259-6-0/" when processing the previous new reference. So in this case we
3874 * must recompute the path in the new reference and use it for the new
3875 * orphanization operation.
3876 */
refresh_ref_path(struct send_ctx * sctx,struct recorded_ref * ref)3877 static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3878 {
3879 char *name;
3880 int ret;
3881
3882 name = kmemdup(ref->name, ref->name_len, GFP_KERNEL);
3883 if (!name)
3884 return -ENOMEM;
3885
3886 fs_path_reset(ref->full_path);
3887 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path);
3888 if (ret < 0)
3889 goto out;
3890
3891 ret = fs_path_add(ref->full_path, name, ref->name_len);
3892 if (ret < 0)
3893 goto out;
3894
3895 /* Update the reference's base name pointer. */
3896 set_ref_path(ref, ref->full_path);
3897 out:
3898 kfree(name);
3899 return ret;
3900 }
3901
3902 /*
3903 * This does all the move/link/unlink/rmdir magic.
3904 */
process_recorded_refs(struct send_ctx * sctx,int * pending_move)3905 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3906 {
3907 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
3908 int ret = 0;
3909 struct recorded_ref *cur;
3910 struct recorded_ref *cur2;
3911 struct list_head check_dirs;
3912 struct fs_path *valid_path = NULL;
3913 u64 ow_inode = 0;
3914 u64 ow_gen;
3915 u64 ow_mode;
3916 int did_overwrite = 0;
3917 int is_orphan = 0;
3918 u64 last_dir_ino_rm = 0;
3919 bool can_rename = true;
3920 bool orphanized_dir = false;
3921 bool orphanized_ancestor = false;
3922
3923 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
3924
3925 /*
3926 * This should never happen as the root dir always has the same ref
3927 * which is always '..'
3928 */
3929 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
3930 INIT_LIST_HEAD(&check_dirs);
3931
3932 valid_path = fs_path_alloc();
3933 if (!valid_path) {
3934 ret = -ENOMEM;
3935 goto out;
3936 }
3937
3938 /*
3939 * First, check if the first ref of the current inode was overwritten
3940 * before. If yes, we know that the current inode was already orphanized
3941 * and thus use the orphan name. If not, we can use get_cur_path to
3942 * get the path of the first ref as it would like while receiving at
3943 * this point in time.
3944 * New inodes are always orphan at the beginning, so force to use the
3945 * orphan name in this case.
3946 * The first ref is stored in valid_path and will be updated if it
3947 * gets moved around.
3948 */
3949 if (!sctx->cur_inode_new) {
3950 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
3951 sctx->cur_inode_gen);
3952 if (ret < 0)
3953 goto out;
3954 if (ret)
3955 did_overwrite = 1;
3956 }
3957 if (sctx->cur_inode_new || did_overwrite) {
3958 ret = gen_unique_name(sctx, sctx->cur_ino,
3959 sctx->cur_inode_gen, valid_path);
3960 if (ret < 0)
3961 goto out;
3962 is_orphan = 1;
3963 } else {
3964 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3965 valid_path);
3966 if (ret < 0)
3967 goto out;
3968 }
3969
3970 /*
3971 * Before doing any rename and link operations, do a first pass on the
3972 * new references to orphanize any unprocessed inodes that may have a
3973 * reference that conflicts with one of the new references of the current
3974 * inode. This needs to happen first because a new reference may conflict
3975 * with the old reference of a parent directory, so we must make sure
3976 * that the path used for link and rename commands don't use an
3977 * orphanized name when an ancestor was not yet orphanized.
3978 *
3979 * Example:
3980 *
3981 * Parent snapshot:
3982 *
3983 * . (ino 256)
3984 * |----- testdir/ (ino 259)
3985 * | |----- a (ino 257)
3986 * |
3987 * |----- b (ino 258)
3988 *
3989 * Send snapshot:
3990 *
3991 * . (ino 256)
3992 * |----- testdir_2/ (ino 259)
3993 * | |----- a (ino 260)
3994 * |
3995 * |----- testdir (ino 257)
3996 * |----- b (ino 257)
3997 * |----- b2 (ino 258)
3998 *
3999 * Processing the new reference for inode 257 with name "b" may happen
4000 * before processing the new reference with name "testdir". If so, we
4001 * must make sure that by the time we send a link command to create the
4002 * hard link "b", inode 259 was already orphanized, since the generated
4003 * path in "valid_path" already contains the orphanized name for 259.
4004 * We are processing inode 257, so only later when processing 259 we do
4005 * the rename operation to change its temporary (orphanized) name to
4006 * "testdir_2".
4007 */
4008 list_for_each_entry(cur, &sctx->new_refs, list) {
4009 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4010 if (ret < 0)
4011 goto out;
4012 if (ret == inode_state_will_create)
4013 continue;
4014
4015 /*
4016 * Check if this new ref would overwrite the first ref of another
4017 * unprocessed inode. If yes, orphanize the overwritten inode.
4018 * If we find an overwritten ref that is not the first ref,
4019 * simply unlink it.
4020 */
4021 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4022 cur->name, cur->name_len,
4023 &ow_inode, &ow_gen, &ow_mode);
4024 if (ret < 0)
4025 goto out;
4026 if (ret) {
4027 ret = is_first_ref(sctx->parent_root,
4028 ow_inode, cur->dir, cur->name,
4029 cur->name_len);
4030 if (ret < 0)
4031 goto out;
4032 if (ret) {
4033 struct name_cache_entry *nce;
4034 struct waiting_dir_move *wdm;
4035
4036 if (orphanized_dir) {
4037 ret = refresh_ref_path(sctx, cur);
4038 if (ret < 0)
4039 goto out;
4040 }
4041
4042 ret = orphanize_inode(sctx, ow_inode, ow_gen,
4043 cur->full_path);
4044 if (ret < 0)
4045 goto out;
4046 if (S_ISDIR(ow_mode))
4047 orphanized_dir = true;
4048
4049 /*
4050 * If ow_inode has its rename operation delayed
4051 * make sure that its orphanized name is used in
4052 * the source path when performing its rename
4053 * operation.
4054 */
4055 if (is_waiting_for_move(sctx, ow_inode)) {
4056 wdm = get_waiting_dir_move(sctx,
4057 ow_inode);
4058 ASSERT(wdm);
4059 wdm->orphanized = true;
4060 }
4061
4062 /*
4063 * Make sure we clear our orphanized inode's
4064 * name from the name cache. This is because the
4065 * inode ow_inode might be an ancestor of some
4066 * other inode that will be orphanized as well
4067 * later and has an inode number greater than
4068 * sctx->send_progress. We need to prevent
4069 * future name lookups from using the old name
4070 * and get instead the orphan name.
4071 */
4072 nce = name_cache_search(sctx, ow_inode, ow_gen);
4073 if (nce) {
4074 name_cache_delete(sctx, nce);
4075 kfree(nce);
4076 }
4077
4078 /*
4079 * ow_inode might currently be an ancestor of
4080 * cur_ino, therefore compute valid_path (the
4081 * current path of cur_ino) again because it
4082 * might contain the pre-orphanization name of
4083 * ow_inode, which is no longer valid.
4084 */
4085 ret = is_ancestor(sctx->parent_root,
4086 ow_inode, ow_gen,
4087 sctx->cur_ino, NULL);
4088 if (ret > 0) {
4089 orphanized_ancestor = true;
4090 fs_path_reset(valid_path);
4091 ret = get_cur_path(sctx, sctx->cur_ino,
4092 sctx->cur_inode_gen,
4093 valid_path);
4094 }
4095 if (ret < 0)
4096 goto out;
4097 } else {
4098 /*
4099 * If we previously orphanized a directory that
4100 * collided with a new reference that we already
4101 * processed, recompute the current path because
4102 * that directory may be part of the path.
4103 */
4104 if (orphanized_dir) {
4105 ret = refresh_ref_path(sctx, cur);
4106 if (ret < 0)
4107 goto out;
4108 }
4109 ret = send_unlink(sctx, cur->full_path);
4110 if (ret < 0)
4111 goto out;
4112 }
4113 }
4114
4115 }
4116
4117 list_for_each_entry(cur, &sctx->new_refs, list) {
4118 /*
4119 * We may have refs where the parent directory does not exist
4120 * yet. This happens if the parent directories inum is higher
4121 * than the current inum. To handle this case, we create the
4122 * parent directory out of order. But we need to check if this
4123 * did already happen before due to other refs in the same dir.
4124 */
4125 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4126 if (ret < 0)
4127 goto out;
4128 if (ret == inode_state_will_create) {
4129 ret = 0;
4130 /*
4131 * First check if any of the current inodes refs did
4132 * already create the dir.
4133 */
4134 list_for_each_entry(cur2, &sctx->new_refs, list) {
4135 if (cur == cur2)
4136 break;
4137 if (cur2->dir == cur->dir) {
4138 ret = 1;
4139 break;
4140 }
4141 }
4142
4143 /*
4144 * If that did not happen, check if a previous inode
4145 * did already create the dir.
4146 */
4147 if (!ret)
4148 ret = did_create_dir(sctx, cur->dir);
4149 if (ret < 0)
4150 goto out;
4151 if (!ret) {
4152 ret = send_create_inode(sctx, cur->dir);
4153 if (ret < 0)
4154 goto out;
4155 }
4156 }
4157
4158 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
4159 ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
4160 if (ret < 0)
4161 goto out;
4162 if (ret == 1) {
4163 can_rename = false;
4164 *pending_move = 1;
4165 }
4166 }
4167
4168 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
4169 can_rename) {
4170 ret = wait_for_parent_move(sctx, cur, is_orphan);
4171 if (ret < 0)
4172 goto out;
4173 if (ret == 1) {
4174 can_rename = false;
4175 *pending_move = 1;
4176 }
4177 }
4178
4179 /*
4180 * link/move the ref to the new place. If we have an orphan
4181 * inode, move it and update valid_path. If not, link or move
4182 * it depending on the inode mode.
4183 */
4184 if (is_orphan && can_rename) {
4185 ret = send_rename(sctx, valid_path, cur->full_path);
4186 if (ret < 0)
4187 goto out;
4188 is_orphan = 0;
4189 ret = fs_path_copy(valid_path, cur->full_path);
4190 if (ret < 0)
4191 goto out;
4192 } else if (can_rename) {
4193 if (S_ISDIR(sctx->cur_inode_mode)) {
4194 /*
4195 * Dirs can't be linked, so move it. For moved
4196 * dirs, we always have one new and one deleted
4197 * ref. The deleted ref is ignored later.
4198 */
4199 ret = send_rename(sctx, valid_path,
4200 cur->full_path);
4201 if (!ret)
4202 ret = fs_path_copy(valid_path,
4203 cur->full_path);
4204 if (ret < 0)
4205 goto out;
4206 } else {
4207 /*
4208 * We might have previously orphanized an inode
4209 * which is an ancestor of our current inode,
4210 * so our reference's full path, which was
4211 * computed before any such orphanizations, must
4212 * be updated.
4213 */
4214 if (orphanized_dir) {
4215 ret = update_ref_path(sctx, cur);
4216 if (ret < 0)
4217 goto out;
4218 }
4219 ret = send_link(sctx, cur->full_path,
4220 valid_path);
4221 if (ret < 0)
4222 goto out;
4223 }
4224 }
4225 ret = dup_ref(cur, &check_dirs);
4226 if (ret < 0)
4227 goto out;
4228 }
4229
4230 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
4231 /*
4232 * Check if we can already rmdir the directory. If not,
4233 * orphanize it. For every dir item inside that gets deleted
4234 * later, we do this check again and rmdir it then if possible.
4235 * See the use of check_dirs for more details.
4236 */
4237 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4238 sctx->cur_ino);
4239 if (ret < 0)
4240 goto out;
4241 if (ret) {
4242 ret = send_rmdir(sctx, valid_path);
4243 if (ret < 0)
4244 goto out;
4245 } else if (!is_orphan) {
4246 ret = orphanize_inode(sctx, sctx->cur_ino,
4247 sctx->cur_inode_gen, valid_path);
4248 if (ret < 0)
4249 goto out;
4250 is_orphan = 1;
4251 }
4252
4253 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4254 ret = dup_ref(cur, &check_dirs);
4255 if (ret < 0)
4256 goto out;
4257 }
4258 } else if (S_ISDIR(sctx->cur_inode_mode) &&
4259 !list_empty(&sctx->deleted_refs)) {
4260 /*
4261 * We have a moved dir. Add the old parent to check_dirs
4262 */
4263 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
4264 list);
4265 ret = dup_ref(cur, &check_dirs);
4266 if (ret < 0)
4267 goto out;
4268 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
4269 /*
4270 * We have a non dir inode. Go through all deleted refs and
4271 * unlink them if they were not already overwritten by other
4272 * inodes.
4273 */
4274 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4275 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4276 sctx->cur_ino, sctx->cur_inode_gen,
4277 cur->name, cur->name_len);
4278 if (ret < 0)
4279 goto out;
4280 if (!ret) {
4281 /*
4282 * If we orphanized any ancestor before, we need
4283 * to recompute the full path for deleted names,
4284 * since any such path was computed before we
4285 * processed any references and orphanized any
4286 * ancestor inode.
4287 */
4288 if (orphanized_ancestor) {
4289 ret = update_ref_path(sctx, cur);
4290 if (ret < 0)
4291 goto out;
4292 }
4293 ret = send_unlink(sctx, cur->full_path);
4294 if (ret < 0)
4295 goto out;
4296 }
4297 ret = dup_ref(cur, &check_dirs);
4298 if (ret < 0)
4299 goto out;
4300 }
4301 /*
4302 * If the inode is still orphan, unlink the orphan. This may
4303 * happen when a previous inode did overwrite the first ref
4304 * of this inode and no new refs were added for the current
4305 * inode. Unlinking does not mean that the inode is deleted in
4306 * all cases. There may still be links to this inode in other
4307 * places.
4308 */
4309 if (is_orphan) {
4310 ret = send_unlink(sctx, valid_path);
4311 if (ret < 0)
4312 goto out;
4313 }
4314 }
4315
4316 /*
4317 * We did collect all parent dirs where cur_inode was once located. We
4318 * now go through all these dirs and check if they are pending for
4319 * deletion and if it's finally possible to perform the rmdir now.
4320 * We also update the inode stats of the parent dirs here.
4321 */
4322 list_for_each_entry(cur, &check_dirs, list) {
4323 /*
4324 * In case we had refs into dirs that were not processed yet,
4325 * we don't need to do the utime and rmdir logic for these dirs.
4326 * The dir will be processed later.
4327 */
4328 if (cur->dir > sctx->cur_ino)
4329 continue;
4330
4331 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4332 if (ret < 0)
4333 goto out;
4334
4335 if (ret == inode_state_did_create ||
4336 ret == inode_state_no_change) {
4337 /* TODO delayed utimes */
4338 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
4339 if (ret < 0)
4340 goto out;
4341 } else if (ret == inode_state_did_delete &&
4342 cur->dir != last_dir_ino_rm) {
4343 ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
4344 sctx->cur_ino);
4345 if (ret < 0)
4346 goto out;
4347 if (ret) {
4348 ret = get_cur_path(sctx, cur->dir,
4349 cur->dir_gen, valid_path);
4350 if (ret < 0)
4351 goto out;
4352 ret = send_rmdir(sctx, valid_path);
4353 if (ret < 0)
4354 goto out;
4355 last_dir_ino_rm = cur->dir;
4356 }
4357 }
4358 }
4359
4360 ret = 0;
4361
4362 out:
4363 __free_recorded_refs(&check_dirs);
4364 free_recorded_refs(sctx);
4365 fs_path_free(valid_path);
4366 return ret;
4367 }
4368
record_ref(struct btrfs_root * root,u64 dir,struct fs_path * name,void * ctx,struct list_head * refs)4369 static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name,
4370 void *ctx, struct list_head *refs)
4371 {
4372 int ret = 0;
4373 struct send_ctx *sctx = ctx;
4374 struct fs_path *p;
4375 u64 gen;
4376
4377 p = fs_path_alloc();
4378 if (!p)
4379 return -ENOMEM;
4380
4381 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
4382 NULL, NULL);
4383 if (ret < 0)
4384 goto out;
4385
4386 ret = get_cur_path(sctx, dir, gen, p);
4387 if (ret < 0)
4388 goto out;
4389 ret = fs_path_add_path(p, name);
4390 if (ret < 0)
4391 goto out;
4392
4393 ret = __record_ref(refs, dir, gen, p);
4394
4395 out:
4396 if (ret)
4397 fs_path_free(p);
4398 return ret;
4399 }
4400
__record_new_ref(int num,u64 dir,int index,struct fs_path * name,void * ctx)4401 static int __record_new_ref(int num, u64 dir, int index,
4402 struct fs_path *name,
4403 void *ctx)
4404 {
4405 struct send_ctx *sctx = ctx;
4406 return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs);
4407 }
4408
4409
__record_deleted_ref(int num,u64 dir,int index,struct fs_path * name,void * ctx)4410 static int __record_deleted_ref(int num, u64 dir, int index,
4411 struct fs_path *name,
4412 void *ctx)
4413 {
4414 struct send_ctx *sctx = ctx;
4415 return record_ref(sctx->parent_root, dir, name, ctx,
4416 &sctx->deleted_refs);
4417 }
4418
record_new_ref(struct send_ctx * sctx)4419 static int record_new_ref(struct send_ctx *sctx)
4420 {
4421 int ret;
4422
4423 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4424 sctx->cmp_key, 0, __record_new_ref, sctx);
4425 if (ret < 0)
4426 goto out;
4427 ret = 0;
4428
4429 out:
4430 return ret;
4431 }
4432
record_deleted_ref(struct send_ctx * sctx)4433 static int record_deleted_ref(struct send_ctx *sctx)
4434 {
4435 int ret;
4436
4437 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4438 sctx->cmp_key, 0, __record_deleted_ref, sctx);
4439 if (ret < 0)
4440 goto out;
4441 ret = 0;
4442
4443 out:
4444 return ret;
4445 }
4446
4447 struct find_ref_ctx {
4448 u64 dir;
4449 u64 dir_gen;
4450 struct btrfs_root *root;
4451 struct fs_path *name;
4452 int found_idx;
4453 };
4454
__find_iref(int num,u64 dir,int index,struct fs_path * name,void * ctx_)4455 static int __find_iref(int num, u64 dir, int index,
4456 struct fs_path *name,
4457 void *ctx_)
4458 {
4459 struct find_ref_ctx *ctx = ctx_;
4460 u64 dir_gen;
4461 int ret;
4462
4463 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
4464 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
4465 /*
4466 * To avoid doing extra lookups we'll only do this if everything
4467 * else matches.
4468 */
4469 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
4470 NULL, NULL, NULL);
4471 if (ret)
4472 return ret;
4473 if (dir_gen != ctx->dir_gen)
4474 return 0;
4475 ctx->found_idx = num;
4476 return 1;
4477 }
4478 return 0;
4479 }
4480
find_iref(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_key * key,u64 dir,u64 dir_gen,struct fs_path * name)4481 static int find_iref(struct btrfs_root *root,
4482 struct btrfs_path *path,
4483 struct btrfs_key *key,
4484 u64 dir, u64 dir_gen, struct fs_path *name)
4485 {
4486 int ret;
4487 struct find_ref_ctx ctx;
4488
4489 ctx.dir = dir;
4490 ctx.name = name;
4491 ctx.dir_gen = dir_gen;
4492 ctx.found_idx = -1;
4493 ctx.root = root;
4494
4495 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
4496 if (ret < 0)
4497 return ret;
4498
4499 if (ctx.found_idx == -1)
4500 return -ENOENT;
4501
4502 return ctx.found_idx;
4503 }
4504
__record_changed_new_ref(int num,u64 dir,int index,struct fs_path * name,void * ctx)4505 static int __record_changed_new_ref(int num, u64 dir, int index,
4506 struct fs_path *name,
4507 void *ctx)
4508 {
4509 u64 dir_gen;
4510 int ret;
4511 struct send_ctx *sctx = ctx;
4512
4513 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
4514 NULL, NULL, NULL);
4515 if (ret)
4516 return ret;
4517
4518 ret = find_iref(sctx->parent_root, sctx->right_path,
4519 sctx->cmp_key, dir, dir_gen, name);
4520 if (ret == -ENOENT)
4521 ret = __record_new_ref(num, dir, index, name, sctx);
4522 else if (ret > 0)
4523 ret = 0;
4524
4525 return ret;
4526 }
4527
__record_changed_deleted_ref(int num,u64 dir,int index,struct fs_path * name,void * ctx)4528 static int __record_changed_deleted_ref(int num, u64 dir, int index,
4529 struct fs_path *name,
4530 void *ctx)
4531 {
4532 u64 dir_gen;
4533 int ret;
4534 struct send_ctx *sctx = ctx;
4535
4536 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
4537 NULL, NULL, NULL);
4538 if (ret)
4539 return ret;
4540
4541 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
4542 dir, dir_gen, name);
4543 if (ret == -ENOENT)
4544 ret = __record_deleted_ref(num, dir, index, name, sctx);
4545 else if (ret > 0)
4546 ret = 0;
4547
4548 return ret;
4549 }
4550
record_changed_ref(struct send_ctx * sctx)4551 static int record_changed_ref(struct send_ctx *sctx)
4552 {
4553 int ret = 0;
4554
4555 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4556 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
4557 if (ret < 0)
4558 goto out;
4559 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4560 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
4561 if (ret < 0)
4562 goto out;
4563 ret = 0;
4564
4565 out:
4566 return ret;
4567 }
4568
4569 /*
4570 * Record and process all refs at once. Needed when an inode changes the
4571 * generation number, which means that it was deleted and recreated.
4572 */
process_all_refs(struct send_ctx * sctx,enum btrfs_compare_tree_result cmd)4573 static int process_all_refs(struct send_ctx *sctx,
4574 enum btrfs_compare_tree_result cmd)
4575 {
4576 int ret;
4577 struct btrfs_root *root;
4578 struct btrfs_path *path;
4579 struct btrfs_key key;
4580 struct btrfs_key found_key;
4581 struct extent_buffer *eb;
4582 int slot;
4583 iterate_inode_ref_t cb;
4584 int pending_move = 0;
4585
4586 path = alloc_path_for_send();
4587 if (!path)
4588 return -ENOMEM;
4589
4590 if (cmd == BTRFS_COMPARE_TREE_NEW) {
4591 root = sctx->send_root;
4592 cb = __record_new_ref;
4593 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
4594 root = sctx->parent_root;
4595 cb = __record_deleted_ref;
4596 } else {
4597 btrfs_err(sctx->send_root->fs_info,
4598 "Wrong command %d in process_all_refs", cmd);
4599 ret = -EINVAL;
4600 goto out;
4601 }
4602
4603 key.objectid = sctx->cmp_key->objectid;
4604 key.type = BTRFS_INODE_REF_KEY;
4605 key.offset = 0;
4606 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4607 if (ret < 0)
4608 goto out;
4609
4610 while (1) {
4611 eb = path->nodes[0];
4612 slot = path->slots[0];
4613 if (slot >= btrfs_header_nritems(eb)) {
4614 ret = btrfs_next_leaf(root, path);
4615 if (ret < 0)
4616 goto out;
4617 else if (ret > 0)
4618 break;
4619 continue;
4620 }
4621
4622 btrfs_item_key_to_cpu(eb, &found_key, slot);
4623
4624 if (found_key.objectid != key.objectid ||
4625 (found_key.type != BTRFS_INODE_REF_KEY &&
4626 found_key.type != BTRFS_INODE_EXTREF_KEY))
4627 break;
4628
4629 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
4630 if (ret < 0)
4631 goto out;
4632
4633 path->slots[0]++;
4634 }
4635 btrfs_release_path(path);
4636
4637 /*
4638 * We don't actually care about pending_move as we are simply
4639 * re-creating this inode and will be rename'ing it into place once we
4640 * rename the parent directory.
4641 */
4642 ret = process_recorded_refs(sctx, &pending_move);
4643 out:
4644 btrfs_free_path(path);
4645 return ret;
4646 }
4647
send_set_xattr(struct send_ctx * sctx,struct fs_path * path,const char * name,int name_len,const char * data,int data_len)4648 static int send_set_xattr(struct send_ctx *sctx,
4649 struct fs_path *path,
4650 const char *name, int name_len,
4651 const char *data, int data_len)
4652 {
4653 int ret = 0;
4654
4655 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
4656 if (ret < 0)
4657 goto out;
4658
4659 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4660 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4661 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
4662
4663 ret = send_cmd(sctx);
4664
4665 tlv_put_failure:
4666 out:
4667 return ret;
4668 }
4669
send_remove_xattr(struct send_ctx * sctx,struct fs_path * path,const char * name,int name_len)4670 static int send_remove_xattr(struct send_ctx *sctx,
4671 struct fs_path *path,
4672 const char *name, int name_len)
4673 {
4674 int ret = 0;
4675
4676 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
4677 if (ret < 0)
4678 goto out;
4679
4680 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4681 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4682
4683 ret = send_cmd(sctx);
4684
4685 tlv_put_failure:
4686 out:
4687 return ret;
4688 }
4689
__process_new_xattr(int num,struct btrfs_key * di_key,const char * name,int name_len,const char * data,int data_len,u8 type,void * ctx)4690 static int __process_new_xattr(int num, struct btrfs_key *di_key,
4691 const char *name, int name_len,
4692 const char *data, int data_len,
4693 u8 type, void *ctx)
4694 {
4695 int ret;
4696 struct send_ctx *sctx = ctx;
4697 struct fs_path *p;
4698 struct posix_acl_xattr_header dummy_acl;
4699
4700 /* Capabilities are emitted by finish_inode_if_needed */
4701 if (!strncmp(name, XATTR_NAME_CAPS, name_len))
4702 return 0;
4703
4704 p = fs_path_alloc();
4705 if (!p)
4706 return -ENOMEM;
4707
4708 /*
4709 * This hack is needed because empty acls are stored as zero byte
4710 * data in xattrs. Problem with that is, that receiving these zero byte
4711 * acls will fail later. To fix this, we send a dummy acl list that
4712 * only contains the version number and no entries.
4713 */
4714 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
4715 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
4716 if (data_len == 0) {
4717 dummy_acl.a_version =
4718 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
4719 data = (char *)&dummy_acl;
4720 data_len = sizeof(dummy_acl);
4721 }
4722 }
4723
4724 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4725 if (ret < 0)
4726 goto out;
4727
4728 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
4729
4730 out:
4731 fs_path_free(p);
4732 return ret;
4733 }
4734
__process_deleted_xattr(int num,struct btrfs_key * di_key,const char * name,int name_len,const char * data,int data_len,u8 type,void * ctx)4735 static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
4736 const char *name, int name_len,
4737 const char *data, int data_len,
4738 u8 type, void *ctx)
4739 {
4740 int ret;
4741 struct send_ctx *sctx = ctx;
4742 struct fs_path *p;
4743
4744 p = fs_path_alloc();
4745 if (!p)
4746 return -ENOMEM;
4747
4748 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4749 if (ret < 0)
4750 goto out;
4751
4752 ret = send_remove_xattr(sctx, p, name, name_len);
4753
4754 out:
4755 fs_path_free(p);
4756 return ret;
4757 }
4758
process_new_xattr(struct send_ctx * sctx)4759 static int process_new_xattr(struct send_ctx *sctx)
4760 {
4761 int ret = 0;
4762
4763 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4764 __process_new_xattr, sctx);
4765
4766 return ret;
4767 }
4768
process_deleted_xattr(struct send_ctx * sctx)4769 static int process_deleted_xattr(struct send_ctx *sctx)
4770 {
4771 return iterate_dir_item(sctx->parent_root, sctx->right_path,
4772 __process_deleted_xattr, sctx);
4773 }
4774
4775 struct find_xattr_ctx {
4776 const char *name;
4777 int name_len;
4778 int found_idx;
4779 char *found_data;
4780 int found_data_len;
4781 };
4782
__find_xattr(int num,struct btrfs_key * di_key,const char * name,int name_len,const char * data,int data_len,u8 type,void * vctx)4783 static int __find_xattr(int num, struct btrfs_key *di_key,
4784 const char *name, int name_len,
4785 const char *data, int data_len,
4786 u8 type, void *vctx)
4787 {
4788 struct find_xattr_ctx *ctx = vctx;
4789
4790 if (name_len == ctx->name_len &&
4791 strncmp(name, ctx->name, name_len) == 0) {
4792 ctx->found_idx = num;
4793 ctx->found_data_len = data_len;
4794 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
4795 if (!ctx->found_data)
4796 return -ENOMEM;
4797 return 1;
4798 }
4799 return 0;
4800 }
4801
find_xattr(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_key * key,const char * name,int name_len,char ** data,int * data_len)4802 static int find_xattr(struct btrfs_root *root,
4803 struct btrfs_path *path,
4804 struct btrfs_key *key,
4805 const char *name, int name_len,
4806 char **data, int *data_len)
4807 {
4808 int ret;
4809 struct find_xattr_ctx ctx;
4810
4811 ctx.name = name;
4812 ctx.name_len = name_len;
4813 ctx.found_idx = -1;
4814 ctx.found_data = NULL;
4815 ctx.found_data_len = 0;
4816
4817 ret = iterate_dir_item(root, path, __find_xattr, &ctx);
4818 if (ret < 0)
4819 return ret;
4820
4821 if (ctx.found_idx == -1)
4822 return -ENOENT;
4823 if (data) {
4824 *data = ctx.found_data;
4825 *data_len = ctx.found_data_len;
4826 } else {
4827 kfree(ctx.found_data);
4828 }
4829 return ctx.found_idx;
4830 }
4831
4832
__process_changed_new_xattr(int num,struct btrfs_key * di_key,const char * name,int name_len,const char * data,int data_len,u8 type,void * ctx)4833 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
4834 const char *name, int name_len,
4835 const char *data, int data_len,
4836 u8 type, void *ctx)
4837 {
4838 int ret;
4839 struct send_ctx *sctx = ctx;
4840 char *found_data = NULL;
4841 int found_data_len = 0;
4842
4843 ret = find_xattr(sctx->parent_root, sctx->right_path,
4844 sctx->cmp_key, name, name_len, &found_data,
4845 &found_data_len);
4846 if (ret == -ENOENT) {
4847 ret = __process_new_xattr(num, di_key, name, name_len, data,
4848 data_len, type, ctx);
4849 } else if (ret >= 0) {
4850 if (data_len != found_data_len ||
4851 memcmp(data, found_data, data_len)) {
4852 ret = __process_new_xattr(num, di_key, name, name_len,
4853 data, data_len, type, ctx);
4854 } else {
4855 ret = 0;
4856 }
4857 }
4858
4859 kfree(found_data);
4860 return ret;
4861 }
4862
__process_changed_deleted_xattr(int num,struct btrfs_key * di_key,const char * name,int name_len,const char * data,int data_len,u8 type,void * ctx)4863 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
4864 const char *name, int name_len,
4865 const char *data, int data_len,
4866 u8 type, void *ctx)
4867 {
4868 int ret;
4869 struct send_ctx *sctx = ctx;
4870
4871 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
4872 name, name_len, NULL, NULL);
4873 if (ret == -ENOENT)
4874 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
4875 data_len, type, ctx);
4876 else if (ret >= 0)
4877 ret = 0;
4878
4879 return ret;
4880 }
4881
process_changed_xattr(struct send_ctx * sctx)4882 static int process_changed_xattr(struct send_ctx *sctx)
4883 {
4884 int ret = 0;
4885
4886 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4887 __process_changed_new_xattr, sctx);
4888 if (ret < 0)
4889 goto out;
4890 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4891 __process_changed_deleted_xattr, sctx);
4892
4893 out:
4894 return ret;
4895 }
4896
process_all_new_xattrs(struct send_ctx * sctx)4897 static int process_all_new_xattrs(struct send_ctx *sctx)
4898 {
4899 int ret;
4900 struct btrfs_root *root;
4901 struct btrfs_path *path;
4902 struct btrfs_key key;
4903 struct btrfs_key found_key;
4904 struct extent_buffer *eb;
4905 int slot;
4906
4907 path = alloc_path_for_send();
4908 if (!path)
4909 return -ENOMEM;
4910
4911 root = sctx->send_root;
4912
4913 key.objectid = sctx->cmp_key->objectid;
4914 key.type = BTRFS_XATTR_ITEM_KEY;
4915 key.offset = 0;
4916 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4917 if (ret < 0)
4918 goto out;
4919
4920 while (1) {
4921 eb = path->nodes[0];
4922 slot = path->slots[0];
4923 if (slot >= btrfs_header_nritems(eb)) {
4924 ret = btrfs_next_leaf(root, path);
4925 if (ret < 0) {
4926 goto out;
4927 } else if (ret > 0) {
4928 ret = 0;
4929 break;
4930 }
4931 continue;
4932 }
4933
4934 btrfs_item_key_to_cpu(eb, &found_key, slot);
4935 if (found_key.objectid != key.objectid ||
4936 found_key.type != key.type) {
4937 ret = 0;
4938 goto out;
4939 }
4940
4941 ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
4942 if (ret < 0)
4943 goto out;
4944
4945 path->slots[0]++;
4946 }
4947
4948 out:
4949 btrfs_free_path(path);
4950 return ret;
4951 }
4952
fill_read_buf(struct send_ctx * sctx,u64 offset,u32 len)4953 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4954 {
4955 struct btrfs_root *root = sctx->send_root;
4956 struct btrfs_fs_info *fs_info = root->fs_info;
4957 struct inode *inode;
4958 struct page *page;
4959 char *addr;
4960 struct btrfs_key key;
4961 pgoff_t index = offset >> PAGE_SHIFT;
4962 pgoff_t last_index;
4963 unsigned pg_offset = offset_in_page(offset);
4964 ssize_t ret = 0;
4965
4966 key.objectid = sctx->cur_ino;
4967 key.type = BTRFS_INODE_ITEM_KEY;
4968 key.offset = 0;
4969
4970 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4971 if (IS_ERR(inode))
4972 return PTR_ERR(inode);
4973
4974 if (offset + len > i_size_read(inode)) {
4975 if (offset > i_size_read(inode))
4976 len = 0;
4977 else
4978 len = offset - i_size_read(inode);
4979 }
4980 if (len == 0)
4981 goto out;
4982
4983 last_index = (offset + len - 1) >> PAGE_SHIFT;
4984
4985 /* initial readahead */
4986 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
4987 file_ra_state_init(&sctx->ra, inode->i_mapping);
4988
4989 while (index <= last_index) {
4990 unsigned cur_len = min_t(unsigned, len,
4991 PAGE_SIZE - pg_offset);
4992
4993 page = find_lock_page(inode->i_mapping, index);
4994 if (!page) {
4995 page_cache_sync_readahead(inode->i_mapping, &sctx->ra,
4996 NULL, index, last_index + 1 - index);
4997
4998 page = find_or_create_page(inode->i_mapping, index,
4999 GFP_KERNEL);
5000 if (!page) {
5001 ret = -ENOMEM;
5002 break;
5003 }
5004 }
5005
5006 if (PageReadahead(page)) {
5007 page_cache_async_readahead(inode->i_mapping, &sctx->ra,
5008 NULL, page, index, last_index + 1 - index);
5009 }
5010
5011 if (!PageUptodate(page)) {
5012 btrfs_readpage(NULL, page);
5013 lock_page(page);
5014 if (!PageUptodate(page)) {
5015 unlock_page(page);
5016 btrfs_err(fs_info,
5017 "send: IO error at offset %llu for inode %llu root %llu",
5018 page_offset(page), sctx->cur_ino,
5019 sctx->send_root->root_key.objectid);
5020 put_page(page);
5021 ret = -EIO;
5022 break;
5023 }
5024 }
5025
5026 addr = kmap(page);
5027 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
5028 kunmap(page);
5029 unlock_page(page);
5030 put_page(page);
5031 index++;
5032 pg_offset = 0;
5033 len -= cur_len;
5034 ret += cur_len;
5035 }
5036 out:
5037 iput(inode);
5038 return ret;
5039 }
5040
5041 /*
5042 * Read some bytes from the current inode/file and send a write command to
5043 * user space.
5044 */
send_write(struct send_ctx * sctx,u64 offset,u32 len)5045 static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
5046 {
5047 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
5048 int ret = 0;
5049 struct fs_path *p;
5050 ssize_t num_read = 0;
5051
5052 p = fs_path_alloc();
5053 if (!p)
5054 return -ENOMEM;
5055
5056 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
5057
5058 num_read = fill_read_buf(sctx, offset, len);
5059 if (num_read <= 0) {
5060 if (num_read < 0)
5061 ret = num_read;
5062 goto out;
5063 }
5064
5065 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5066 if (ret < 0)
5067 goto out;
5068
5069 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5070 if (ret < 0)
5071 goto out;
5072
5073 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5074 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5075 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
5076
5077 ret = send_cmd(sctx);
5078
5079 tlv_put_failure:
5080 out:
5081 fs_path_free(p);
5082 if (ret < 0)
5083 return ret;
5084 return num_read;
5085 }
5086
5087 /*
5088 * Send a clone command to user space.
5089 */
send_clone(struct send_ctx * sctx,u64 offset,u32 len,struct clone_root * clone_root)5090 static int send_clone(struct send_ctx *sctx,
5091 u64 offset, u32 len,
5092 struct clone_root *clone_root)
5093 {
5094 int ret = 0;
5095 struct fs_path *p;
5096 u64 gen;
5097
5098 btrfs_debug(sctx->send_root->fs_info,
5099 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
5100 offset, len, clone_root->root->root_key.objectid,
5101 clone_root->ino, clone_root->offset);
5102
5103 p = fs_path_alloc();
5104 if (!p)
5105 return -ENOMEM;
5106
5107 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
5108 if (ret < 0)
5109 goto out;
5110
5111 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5112 if (ret < 0)
5113 goto out;
5114
5115 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5116 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
5117 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5118
5119 if (clone_root->root == sctx->send_root) {
5120 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
5121 &gen, NULL, NULL, NULL, NULL);
5122 if (ret < 0)
5123 goto out;
5124 ret = get_cur_path(sctx, clone_root->ino, gen, p);
5125 } else {
5126 ret = get_inode_path(clone_root->root, clone_root->ino, p);
5127 }
5128 if (ret < 0)
5129 goto out;
5130
5131 /*
5132 * If the parent we're using has a received_uuid set then use that as
5133 * our clone source as that is what we will look for when doing a
5134 * receive.
5135 *
5136 * This covers the case that we create a snapshot off of a received
5137 * subvolume and then use that as the parent and try to receive on a
5138 * different host.
5139 */
5140 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
5141 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5142 clone_root->root->root_item.received_uuid);
5143 else
5144 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5145 clone_root->root->root_item.uuid);
5146 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
5147 le64_to_cpu(clone_root->root->root_item.ctransid));
5148 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
5149 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
5150 clone_root->offset);
5151
5152 ret = send_cmd(sctx);
5153
5154 tlv_put_failure:
5155 out:
5156 fs_path_free(p);
5157 return ret;
5158 }
5159
5160 /*
5161 * Send an update extent command to user space.
5162 */
send_update_extent(struct send_ctx * sctx,u64 offset,u32 len)5163 static int send_update_extent(struct send_ctx *sctx,
5164 u64 offset, u32 len)
5165 {
5166 int ret = 0;
5167 struct fs_path *p;
5168
5169 p = fs_path_alloc();
5170 if (!p)
5171 return -ENOMEM;
5172
5173 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
5174 if (ret < 0)
5175 goto out;
5176
5177 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5178 if (ret < 0)
5179 goto out;
5180
5181 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5182 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5183 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
5184
5185 ret = send_cmd(sctx);
5186
5187 tlv_put_failure:
5188 out:
5189 fs_path_free(p);
5190 return ret;
5191 }
5192
send_hole(struct send_ctx * sctx,u64 end)5193 static int send_hole(struct send_ctx *sctx, u64 end)
5194 {
5195 struct fs_path *p = NULL;
5196 u64 offset = sctx->cur_inode_last_extent;
5197 u64 len;
5198 int ret = 0;
5199
5200 /*
5201 * A hole that starts at EOF or beyond it. Since we do not yet support
5202 * fallocate (for extent preallocation and hole punching), sending a
5203 * write of zeroes starting at EOF or beyond would later require issuing
5204 * a truncate operation which would undo the write and achieve nothing.
5205 */
5206 if (offset >= sctx->cur_inode_size)
5207 return 0;
5208
5209 /*
5210 * Don't go beyond the inode's i_size due to prealloc extents that start
5211 * after the i_size.
5212 */
5213 end = min_t(u64, end, sctx->cur_inode_size);
5214
5215 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5216 return send_update_extent(sctx, offset, end - offset);
5217
5218 p = fs_path_alloc();
5219 if (!p)
5220 return -ENOMEM;
5221 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5222 if (ret < 0)
5223 goto tlv_put_failure;
5224 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
5225 while (offset < end) {
5226 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
5227
5228 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5229 if (ret < 0)
5230 break;
5231 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5232 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5233 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
5234 ret = send_cmd(sctx);
5235 if (ret < 0)
5236 break;
5237 offset += len;
5238 }
5239 sctx->cur_inode_next_write_offset = offset;
5240 tlv_put_failure:
5241 fs_path_free(p);
5242 return ret;
5243 }
5244
send_extent_data(struct send_ctx * sctx,const u64 offset,const u64 len)5245 static int send_extent_data(struct send_ctx *sctx,
5246 const u64 offset,
5247 const u64 len)
5248 {
5249 u64 sent = 0;
5250
5251 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5252 return send_update_extent(sctx, offset, len);
5253
5254 while (sent < len) {
5255 u64 size = len - sent;
5256 int ret;
5257
5258 if (size > BTRFS_SEND_READ_SIZE)
5259 size = BTRFS_SEND_READ_SIZE;
5260 ret = send_write(sctx, offset + sent, size);
5261 if (ret < 0)
5262 return ret;
5263 if (!ret)
5264 break;
5265 sent += ret;
5266 }
5267 return 0;
5268 }
5269
5270 /*
5271 * Search for a capability xattr related to sctx->cur_ino. If the capability is
5272 * found, call send_set_xattr function to emit it.
5273 *
5274 * Return 0 if there isn't a capability, or when the capability was emitted
5275 * successfully, or < 0 if an error occurred.
5276 */
send_capabilities(struct send_ctx * sctx)5277 static int send_capabilities(struct send_ctx *sctx)
5278 {
5279 struct fs_path *fspath = NULL;
5280 struct btrfs_path *path;
5281 struct btrfs_dir_item *di;
5282 struct extent_buffer *leaf;
5283 unsigned long data_ptr;
5284 char *buf = NULL;
5285 int buf_len;
5286 int ret = 0;
5287
5288 path = alloc_path_for_send();
5289 if (!path)
5290 return -ENOMEM;
5291
5292 di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino,
5293 XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0);
5294 if (!di) {
5295 /* There is no xattr for this inode */
5296 goto out;
5297 } else if (IS_ERR(di)) {
5298 ret = PTR_ERR(di);
5299 goto out;
5300 }
5301
5302 leaf = path->nodes[0];
5303 buf_len = btrfs_dir_data_len(leaf, di);
5304
5305 fspath = fs_path_alloc();
5306 buf = kmalloc(buf_len, GFP_KERNEL);
5307 if (!fspath || !buf) {
5308 ret = -ENOMEM;
5309 goto out;
5310 }
5311
5312 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
5313 if (ret < 0)
5314 goto out;
5315
5316 data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di);
5317 read_extent_buffer(leaf, buf, data_ptr, buf_len);
5318
5319 ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS,
5320 strlen(XATTR_NAME_CAPS), buf, buf_len);
5321 out:
5322 kfree(buf);
5323 fs_path_free(fspath);
5324 btrfs_free_path(path);
5325 return ret;
5326 }
5327
clone_range(struct send_ctx * sctx,struct clone_root * clone_root,const u64 disk_byte,u64 data_offset,u64 offset,u64 len)5328 static int clone_range(struct send_ctx *sctx,
5329 struct clone_root *clone_root,
5330 const u64 disk_byte,
5331 u64 data_offset,
5332 u64 offset,
5333 u64 len)
5334 {
5335 struct btrfs_path *path;
5336 struct btrfs_key key;
5337 int ret;
5338 u64 clone_src_i_size = 0;
5339
5340 /*
5341 * Prevent cloning from a zero offset with a length matching the sector
5342 * size because in some scenarios this will make the receiver fail.
5343 *
5344 * For example, if in the source filesystem the extent at offset 0
5345 * has a length of sectorsize and it was written using direct IO, then
5346 * it can never be an inline extent (even if compression is enabled).
5347 * Then this extent can be cloned in the original filesystem to a non
5348 * zero file offset, but it may not be possible to clone in the
5349 * destination filesystem because it can be inlined due to compression
5350 * on the destination filesystem (as the receiver's write operations are
5351 * always done using buffered IO). The same happens when the original
5352 * filesystem does not have compression enabled but the destination
5353 * filesystem has.
5354 */
5355 if (clone_root->offset == 0 &&
5356 len == sctx->send_root->fs_info->sectorsize)
5357 return send_extent_data(sctx, offset, len);
5358
5359 path = alloc_path_for_send();
5360 if (!path)
5361 return -ENOMEM;
5362
5363 /*
5364 * There are inodes that have extents that lie behind its i_size. Don't
5365 * accept clones from these extents.
5366 */
5367 ret = __get_inode_info(clone_root->root, path, clone_root->ino,
5368 &clone_src_i_size, NULL, NULL, NULL, NULL, NULL);
5369 btrfs_release_path(path);
5370 if (ret < 0)
5371 goto out;
5372
5373 /*
5374 * We can't send a clone operation for the entire range if we find
5375 * extent items in the respective range in the source file that
5376 * refer to different extents or if we find holes.
5377 * So check for that and do a mix of clone and regular write/copy
5378 * operations if needed.
5379 *
5380 * Example:
5381 *
5382 * mkfs.btrfs -f /dev/sda
5383 * mount /dev/sda /mnt
5384 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
5385 * cp --reflink=always /mnt/foo /mnt/bar
5386 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
5387 * btrfs subvolume snapshot -r /mnt /mnt/snap
5388 *
5389 * If when we send the snapshot and we are processing file bar (which
5390 * has a higher inode number than foo) we blindly send a clone operation
5391 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
5392 * a file bar that matches the content of file foo - iow, doesn't match
5393 * the content from bar in the original filesystem.
5394 */
5395 key.objectid = clone_root->ino;
5396 key.type = BTRFS_EXTENT_DATA_KEY;
5397 key.offset = clone_root->offset;
5398 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
5399 if (ret < 0)
5400 goto out;
5401 if (ret > 0 && path->slots[0] > 0) {
5402 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
5403 if (key.objectid == clone_root->ino &&
5404 key.type == BTRFS_EXTENT_DATA_KEY)
5405 path->slots[0]--;
5406 }
5407
5408 while (true) {
5409 struct extent_buffer *leaf = path->nodes[0];
5410 int slot = path->slots[0];
5411 struct btrfs_file_extent_item *ei;
5412 u8 type;
5413 u64 ext_len;
5414 u64 clone_len;
5415 u64 clone_data_offset;
5416 bool crossed_src_i_size = false;
5417
5418 if (slot >= btrfs_header_nritems(leaf)) {
5419 ret = btrfs_next_leaf(clone_root->root, path);
5420 if (ret < 0)
5421 goto out;
5422 else if (ret > 0)
5423 break;
5424 continue;
5425 }
5426
5427 btrfs_item_key_to_cpu(leaf, &key, slot);
5428
5429 /*
5430 * We might have an implicit trailing hole (NO_HOLES feature
5431 * enabled). We deal with it after leaving this loop.
5432 */
5433 if (key.objectid != clone_root->ino ||
5434 key.type != BTRFS_EXTENT_DATA_KEY)
5435 break;
5436
5437 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5438 type = btrfs_file_extent_type(leaf, ei);
5439 if (type == BTRFS_FILE_EXTENT_INLINE) {
5440 ext_len = btrfs_file_extent_ram_bytes(leaf, ei);
5441 ext_len = PAGE_ALIGN(ext_len);
5442 } else {
5443 ext_len = btrfs_file_extent_num_bytes(leaf, ei);
5444 }
5445
5446 if (key.offset + ext_len <= clone_root->offset)
5447 goto next;
5448
5449 if (key.offset > clone_root->offset) {
5450 /* Implicit hole, NO_HOLES feature enabled. */
5451 u64 hole_len = key.offset - clone_root->offset;
5452
5453 if (hole_len > len)
5454 hole_len = len;
5455 ret = send_extent_data(sctx, offset, hole_len);
5456 if (ret < 0)
5457 goto out;
5458
5459 len -= hole_len;
5460 if (len == 0)
5461 break;
5462 offset += hole_len;
5463 clone_root->offset += hole_len;
5464 data_offset += hole_len;
5465 }
5466
5467 if (key.offset >= clone_root->offset + len)
5468 break;
5469
5470 if (key.offset >= clone_src_i_size)
5471 break;
5472
5473 if (key.offset + ext_len > clone_src_i_size) {
5474 ext_len = clone_src_i_size - key.offset;
5475 crossed_src_i_size = true;
5476 }
5477
5478 clone_data_offset = btrfs_file_extent_offset(leaf, ei);
5479 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
5480 clone_root->offset = key.offset;
5481 if (clone_data_offset < data_offset &&
5482 clone_data_offset + ext_len > data_offset) {
5483 u64 extent_offset;
5484
5485 extent_offset = data_offset - clone_data_offset;
5486 ext_len -= extent_offset;
5487 clone_data_offset += extent_offset;
5488 clone_root->offset += extent_offset;
5489 }
5490 }
5491
5492 clone_len = min_t(u64, ext_len, len);
5493
5494 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
5495 clone_data_offset == data_offset) {
5496 const u64 src_end = clone_root->offset + clone_len;
5497 const u64 sectorsize = SZ_64K;
5498
5499 /*
5500 * We can't clone the last block, when its size is not
5501 * sector size aligned, into the middle of a file. If we
5502 * do so, the receiver will get a failure (-EINVAL) when
5503 * trying to clone or will silently corrupt the data in
5504 * the destination file if it's on a kernel without the
5505 * fix introduced by commit ac765f83f1397646
5506 * ("Btrfs: fix data corruption due to cloning of eof
5507 * block).
5508 *
5509 * So issue a clone of the aligned down range plus a
5510 * regular write for the eof block, if we hit that case.
5511 *
5512 * Also, we use the maximum possible sector size, 64K,
5513 * because we don't know what's the sector size of the
5514 * filesystem that receives the stream, so we have to
5515 * assume the largest possible sector size.
5516 */
5517 if (src_end == clone_src_i_size &&
5518 !IS_ALIGNED(src_end, sectorsize) &&
5519 offset + clone_len < sctx->cur_inode_size) {
5520 u64 slen;
5521
5522 slen = ALIGN_DOWN(src_end - clone_root->offset,
5523 sectorsize);
5524 if (slen > 0) {
5525 ret = send_clone(sctx, offset, slen,
5526 clone_root);
5527 if (ret < 0)
5528 goto out;
5529 }
5530 ret = send_extent_data(sctx, offset + slen,
5531 clone_len - slen);
5532 } else {
5533 ret = send_clone(sctx, offset, clone_len,
5534 clone_root);
5535 }
5536 } else if (crossed_src_i_size && clone_len < len) {
5537 /*
5538 * If we are at i_size of the clone source inode and we
5539 * can not clone from it, terminate the loop. This is
5540 * to avoid sending two write operations, one with a
5541 * length matching clone_len and the final one after
5542 * this loop with a length of len - clone_len.
5543 *
5544 * When using encoded writes (BTRFS_SEND_FLAG_COMPRESSED
5545 * was passed to the send ioctl), this helps avoid
5546 * sending an encoded write for an offset that is not
5547 * sector size aligned, in case the i_size of the source
5548 * inode is not sector size aligned. That will make the
5549 * receiver fallback to decompression of the data and
5550 * writing it using regular buffered IO, therefore while
5551 * not incorrect, it's not optimal due decompression and
5552 * possible re-compression at the receiver.
5553 */
5554 break;
5555 } else {
5556 ret = send_extent_data(sctx, offset, clone_len);
5557 }
5558
5559 if (ret < 0)
5560 goto out;
5561
5562 len -= clone_len;
5563 if (len == 0)
5564 break;
5565 offset += clone_len;
5566 clone_root->offset += clone_len;
5567
5568 /*
5569 * If we are cloning from the file we are currently processing,
5570 * and using the send root as the clone root, we must stop once
5571 * the current clone offset reaches the current eof of the file
5572 * at the receiver, otherwise we would issue an invalid clone
5573 * operation (source range going beyond eof) and cause the
5574 * receiver to fail. So if we reach the current eof, bail out
5575 * and fallback to a regular write.
5576 */
5577 if (clone_root->root == sctx->send_root &&
5578 clone_root->ino == sctx->cur_ino &&
5579 clone_root->offset >= sctx->cur_inode_next_write_offset)
5580 break;
5581
5582 data_offset += clone_len;
5583 next:
5584 path->slots[0]++;
5585 }
5586
5587 if (len > 0)
5588 ret = send_extent_data(sctx, offset, len);
5589 else
5590 ret = 0;
5591 out:
5592 btrfs_free_path(path);
5593 return ret;
5594 }
5595
send_write_or_clone(struct send_ctx * sctx,struct btrfs_path * path,struct btrfs_key * key,struct clone_root * clone_root)5596 static int send_write_or_clone(struct send_ctx *sctx,
5597 struct btrfs_path *path,
5598 struct btrfs_key *key,
5599 struct clone_root *clone_root)
5600 {
5601 int ret = 0;
5602 struct btrfs_file_extent_item *ei;
5603 u64 offset = key->offset;
5604 u64 len;
5605 u8 type;
5606 u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
5607
5608 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5609 struct btrfs_file_extent_item);
5610 type = btrfs_file_extent_type(path->nodes[0], ei);
5611 if (type == BTRFS_FILE_EXTENT_INLINE) {
5612 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
5613 /*
5614 * it is possible the inline item won't cover the whole page,
5615 * but there may be items after this page. Make
5616 * sure to send the whole thing
5617 */
5618 len = PAGE_ALIGN(len);
5619 } else {
5620 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
5621 }
5622
5623 if (offset >= sctx->cur_inode_size) {
5624 ret = 0;
5625 goto out;
5626 }
5627 if (offset + len > sctx->cur_inode_size)
5628 len = sctx->cur_inode_size - offset;
5629 if (len == 0) {
5630 ret = 0;
5631 goto out;
5632 }
5633
5634 if (clone_root && IS_ALIGNED(offset + len, bs)) {
5635 u64 disk_byte;
5636 u64 data_offset;
5637
5638 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
5639 data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
5640 ret = clone_range(sctx, clone_root, disk_byte, data_offset,
5641 offset, len);
5642 } else {
5643 ret = send_extent_data(sctx, offset, len);
5644 }
5645 sctx->cur_inode_next_write_offset = offset + len;
5646 out:
5647 return ret;
5648 }
5649
is_extent_unchanged(struct send_ctx * sctx,struct btrfs_path * left_path,struct btrfs_key * ekey)5650 static int is_extent_unchanged(struct send_ctx *sctx,
5651 struct btrfs_path *left_path,
5652 struct btrfs_key *ekey)
5653 {
5654 int ret = 0;
5655 struct btrfs_key key;
5656 struct btrfs_path *path = NULL;
5657 struct extent_buffer *eb;
5658 int slot;
5659 struct btrfs_key found_key;
5660 struct btrfs_file_extent_item *ei;
5661 u64 left_disknr;
5662 u64 right_disknr;
5663 u64 left_offset;
5664 u64 right_offset;
5665 u64 left_offset_fixed;
5666 u64 left_len;
5667 u64 right_len;
5668 u64 left_gen;
5669 u64 right_gen;
5670 u8 left_type;
5671 u8 right_type;
5672
5673 path = alloc_path_for_send();
5674 if (!path)
5675 return -ENOMEM;
5676
5677 eb = left_path->nodes[0];
5678 slot = left_path->slots[0];
5679 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5680 left_type = btrfs_file_extent_type(eb, ei);
5681
5682 if (left_type != BTRFS_FILE_EXTENT_REG) {
5683 ret = 0;
5684 goto out;
5685 }
5686 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5687 left_len = btrfs_file_extent_num_bytes(eb, ei);
5688 left_offset = btrfs_file_extent_offset(eb, ei);
5689 left_gen = btrfs_file_extent_generation(eb, ei);
5690
5691 /*
5692 * Following comments will refer to these graphics. L is the left
5693 * extents which we are checking at the moment. 1-8 are the right
5694 * extents that we iterate.
5695 *
5696 * |-----L-----|
5697 * |-1-|-2a-|-3-|-4-|-5-|-6-|
5698 *
5699 * |-----L-----|
5700 * |--1--|-2b-|...(same as above)
5701 *
5702 * Alternative situation. Happens on files where extents got split.
5703 * |-----L-----|
5704 * |-----------7-----------|-6-|
5705 *
5706 * Alternative situation. Happens on files which got larger.
5707 * |-----L-----|
5708 * |-8-|
5709 * Nothing follows after 8.
5710 */
5711
5712 key.objectid = ekey->objectid;
5713 key.type = BTRFS_EXTENT_DATA_KEY;
5714 key.offset = ekey->offset;
5715 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
5716 if (ret < 0)
5717 goto out;
5718 if (ret) {
5719 ret = 0;
5720 goto out;
5721 }
5722
5723 /*
5724 * Handle special case where the right side has no extents at all.
5725 */
5726 eb = path->nodes[0];
5727 slot = path->slots[0];
5728 btrfs_item_key_to_cpu(eb, &found_key, slot);
5729 if (found_key.objectid != key.objectid ||
5730 found_key.type != key.type) {
5731 /* If we're a hole then just pretend nothing changed */
5732 ret = (left_disknr) ? 0 : 1;
5733 goto out;
5734 }
5735
5736 /*
5737 * We're now on 2a, 2b or 7.
5738 */
5739 key = found_key;
5740 while (key.offset < ekey->offset + left_len) {
5741 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5742 right_type = btrfs_file_extent_type(eb, ei);
5743 if (right_type != BTRFS_FILE_EXTENT_REG &&
5744 right_type != BTRFS_FILE_EXTENT_INLINE) {
5745 ret = 0;
5746 goto out;
5747 }
5748
5749 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5750 right_len = btrfs_file_extent_ram_bytes(eb, ei);
5751 right_len = PAGE_ALIGN(right_len);
5752 } else {
5753 right_len = btrfs_file_extent_num_bytes(eb, ei);
5754 }
5755
5756 /*
5757 * Are we at extent 8? If yes, we know the extent is changed.
5758 * This may only happen on the first iteration.
5759 */
5760 if (found_key.offset + right_len <= ekey->offset) {
5761 /* If we're a hole just pretend nothing changed */
5762 ret = (left_disknr) ? 0 : 1;
5763 goto out;
5764 }
5765
5766 /*
5767 * We just wanted to see if when we have an inline extent, what
5768 * follows it is a regular extent (wanted to check the above
5769 * condition for inline extents too). This should normally not
5770 * happen but it's possible for example when we have an inline
5771 * compressed extent representing data with a size matching
5772 * the page size (currently the same as sector size).
5773 */
5774 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5775 ret = 0;
5776 goto out;
5777 }
5778
5779 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5780 right_offset = btrfs_file_extent_offset(eb, ei);
5781 right_gen = btrfs_file_extent_generation(eb, ei);
5782
5783 left_offset_fixed = left_offset;
5784 if (key.offset < ekey->offset) {
5785 /* Fix the right offset for 2a and 7. */
5786 right_offset += ekey->offset - key.offset;
5787 } else {
5788 /* Fix the left offset for all behind 2a and 2b */
5789 left_offset_fixed += key.offset - ekey->offset;
5790 }
5791
5792 /*
5793 * Check if we have the same extent.
5794 */
5795 if (left_disknr != right_disknr ||
5796 left_offset_fixed != right_offset ||
5797 left_gen != right_gen) {
5798 ret = 0;
5799 goto out;
5800 }
5801
5802 /*
5803 * Go to the next extent.
5804 */
5805 ret = btrfs_next_item(sctx->parent_root, path);
5806 if (ret < 0)
5807 goto out;
5808 if (!ret) {
5809 eb = path->nodes[0];
5810 slot = path->slots[0];
5811 btrfs_item_key_to_cpu(eb, &found_key, slot);
5812 }
5813 if (ret || found_key.objectid != key.objectid ||
5814 found_key.type != key.type) {
5815 key.offset += right_len;
5816 break;
5817 }
5818 if (found_key.offset != key.offset + right_len) {
5819 ret = 0;
5820 goto out;
5821 }
5822 key = found_key;
5823 }
5824
5825 /*
5826 * We're now behind the left extent (treat as unchanged) or at the end
5827 * of the right side (treat as changed).
5828 */
5829 if (key.offset >= ekey->offset + left_len)
5830 ret = 1;
5831 else
5832 ret = 0;
5833
5834
5835 out:
5836 btrfs_free_path(path);
5837 return ret;
5838 }
5839
get_last_extent(struct send_ctx * sctx,u64 offset)5840 static int get_last_extent(struct send_ctx *sctx, u64 offset)
5841 {
5842 struct btrfs_path *path;
5843 struct btrfs_root *root = sctx->send_root;
5844 struct btrfs_file_extent_item *fi;
5845 struct btrfs_key key;
5846 u64 extent_end;
5847 u8 type;
5848 int ret;
5849
5850 path = alloc_path_for_send();
5851 if (!path)
5852 return -ENOMEM;
5853
5854 sctx->cur_inode_last_extent = 0;
5855
5856 key.objectid = sctx->cur_ino;
5857 key.type = BTRFS_EXTENT_DATA_KEY;
5858 key.offset = offset;
5859 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
5860 if (ret < 0)
5861 goto out;
5862 ret = 0;
5863 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5864 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
5865 goto out;
5866
5867 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
5868 struct btrfs_file_extent_item);
5869 type = btrfs_file_extent_type(path->nodes[0], fi);
5870 if (type == BTRFS_FILE_EXTENT_INLINE) {
5871 u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi);
5872 extent_end = ALIGN(key.offset + size,
5873 sctx->send_root->fs_info->sectorsize);
5874 } else {
5875 extent_end = key.offset +
5876 btrfs_file_extent_num_bytes(path->nodes[0], fi);
5877 }
5878 sctx->cur_inode_last_extent = extent_end;
5879 out:
5880 btrfs_free_path(path);
5881 return ret;
5882 }
5883
range_is_hole_in_parent(struct send_ctx * sctx,const u64 start,const u64 end)5884 static int range_is_hole_in_parent(struct send_ctx *sctx,
5885 const u64 start,
5886 const u64 end)
5887 {
5888 struct btrfs_path *path;
5889 struct btrfs_key key;
5890 struct btrfs_root *root = sctx->parent_root;
5891 u64 search_start = start;
5892 int ret;
5893
5894 path = alloc_path_for_send();
5895 if (!path)
5896 return -ENOMEM;
5897
5898 key.objectid = sctx->cur_ino;
5899 key.type = BTRFS_EXTENT_DATA_KEY;
5900 key.offset = search_start;
5901 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5902 if (ret < 0)
5903 goto out;
5904 if (ret > 0 && path->slots[0] > 0)
5905 path->slots[0]--;
5906
5907 while (search_start < end) {
5908 struct extent_buffer *leaf = path->nodes[0];
5909 int slot = path->slots[0];
5910 struct btrfs_file_extent_item *fi;
5911 u64 extent_end;
5912
5913 if (slot >= btrfs_header_nritems(leaf)) {
5914 ret = btrfs_next_leaf(root, path);
5915 if (ret < 0)
5916 goto out;
5917 else if (ret > 0)
5918 break;
5919 continue;
5920 }
5921
5922 btrfs_item_key_to_cpu(leaf, &key, slot);
5923 if (key.objectid < sctx->cur_ino ||
5924 key.type < BTRFS_EXTENT_DATA_KEY)
5925 goto next;
5926 if (key.objectid > sctx->cur_ino ||
5927 key.type > BTRFS_EXTENT_DATA_KEY ||
5928 key.offset >= end)
5929 break;
5930
5931 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5932 if (btrfs_file_extent_type(leaf, fi) ==
5933 BTRFS_FILE_EXTENT_INLINE) {
5934 u64 size = btrfs_file_extent_ram_bytes(leaf, fi);
5935
5936 extent_end = ALIGN(key.offset + size,
5937 root->fs_info->sectorsize);
5938 } else {
5939 extent_end = key.offset +
5940 btrfs_file_extent_num_bytes(leaf, fi);
5941 }
5942 if (extent_end <= start)
5943 goto next;
5944 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
5945 search_start = extent_end;
5946 goto next;
5947 }
5948 ret = 0;
5949 goto out;
5950 next:
5951 path->slots[0]++;
5952 }
5953 ret = 1;
5954 out:
5955 btrfs_free_path(path);
5956 return ret;
5957 }
5958
maybe_send_hole(struct send_ctx * sctx,struct btrfs_path * path,struct btrfs_key * key)5959 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
5960 struct btrfs_key *key)
5961 {
5962 struct btrfs_file_extent_item *fi;
5963 u64 extent_end;
5964 u8 type;
5965 int ret = 0;
5966
5967 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
5968 return 0;
5969
5970 if (sctx->cur_inode_last_extent == (u64)-1) {
5971 ret = get_last_extent(sctx, key->offset - 1);
5972 if (ret)
5973 return ret;
5974 }
5975
5976 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
5977 struct btrfs_file_extent_item);
5978 type = btrfs_file_extent_type(path->nodes[0], fi);
5979 if (type == BTRFS_FILE_EXTENT_INLINE) {
5980 u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi);
5981 extent_end = ALIGN(key->offset + size,
5982 sctx->send_root->fs_info->sectorsize);
5983 } else {
5984 extent_end = key->offset +
5985 btrfs_file_extent_num_bytes(path->nodes[0], fi);
5986 }
5987
5988 if (path->slots[0] == 0 &&
5989 sctx->cur_inode_last_extent < key->offset) {
5990 /*
5991 * We might have skipped entire leafs that contained only
5992 * file extent items for our current inode. These leafs have
5993 * a generation number smaller (older) than the one in the
5994 * current leaf and the leaf our last extent came from, and
5995 * are located between these 2 leafs.
5996 */
5997 ret = get_last_extent(sctx, key->offset - 1);
5998 if (ret)
5999 return ret;
6000 }
6001
6002 if (sctx->cur_inode_last_extent < key->offset) {
6003 ret = range_is_hole_in_parent(sctx,
6004 sctx->cur_inode_last_extent,
6005 key->offset);
6006 if (ret < 0)
6007 return ret;
6008 else if (ret == 0)
6009 ret = send_hole(sctx, key->offset);
6010 else
6011 ret = 0;
6012 }
6013 sctx->cur_inode_last_extent = extent_end;
6014 return ret;
6015 }
6016
process_extent(struct send_ctx * sctx,struct btrfs_path * path,struct btrfs_key * key)6017 static int process_extent(struct send_ctx *sctx,
6018 struct btrfs_path *path,
6019 struct btrfs_key *key)
6020 {
6021 struct clone_root *found_clone = NULL;
6022 int ret = 0;
6023
6024 if (S_ISLNK(sctx->cur_inode_mode))
6025 return 0;
6026
6027 if (sctx->parent_root && !sctx->cur_inode_new) {
6028 ret = is_extent_unchanged(sctx, path, key);
6029 if (ret < 0)
6030 goto out;
6031 if (ret) {
6032 ret = 0;
6033 goto out_hole;
6034 }
6035 } else {
6036 struct btrfs_file_extent_item *ei;
6037 u8 type;
6038
6039 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
6040 struct btrfs_file_extent_item);
6041 type = btrfs_file_extent_type(path->nodes[0], ei);
6042 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
6043 type == BTRFS_FILE_EXTENT_REG) {
6044 /*
6045 * The send spec does not have a prealloc command yet,
6046 * so just leave a hole for prealloc'ed extents until
6047 * we have enough commands queued up to justify rev'ing
6048 * the send spec.
6049 */
6050 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
6051 ret = 0;
6052 goto out;
6053 }
6054
6055 /* Have a hole, just skip it. */
6056 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
6057 ret = 0;
6058 goto out;
6059 }
6060 }
6061 }
6062
6063 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
6064 sctx->cur_inode_size, &found_clone);
6065 if (ret != -ENOENT && ret < 0)
6066 goto out;
6067
6068 ret = send_write_or_clone(sctx, path, key, found_clone);
6069 if (ret)
6070 goto out;
6071 out_hole:
6072 ret = maybe_send_hole(sctx, path, key);
6073 out:
6074 return ret;
6075 }
6076
process_all_extents(struct send_ctx * sctx)6077 static int process_all_extents(struct send_ctx *sctx)
6078 {
6079 int ret;
6080 struct btrfs_root *root;
6081 struct btrfs_path *path;
6082 struct btrfs_key key;
6083 struct btrfs_key found_key;
6084 struct extent_buffer *eb;
6085 int slot;
6086
6087 root = sctx->send_root;
6088 path = alloc_path_for_send();
6089 if (!path)
6090 return -ENOMEM;
6091
6092 key.objectid = sctx->cmp_key->objectid;
6093 key.type = BTRFS_EXTENT_DATA_KEY;
6094 key.offset = 0;
6095 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6096 if (ret < 0)
6097 goto out;
6098
6099 while (1) {
6100 eb = path->nodes[0];
6101 slot = path->slots[0];
6102
6103 if (slot >= btrfs_header_nritems(eb)) {
6104 ret = btrfs_next_leaf(root, path);
6105 if (ret < 0) {
6106 goto out;
6107 } else if (ret > 0) {
6108 ret = 0;
6109 break;
6110 }
6111 continue;
6112 }
6113
6114 btrfs_item_key_to_cpu(eb, &found_key, slot);
6115
6116 if (found_key.objectid != key.objectid ||
6117 found_key.type != key.type) {
6118 ret = 0;
6119 goto out;
6120 }
6121
6122 ret = process_extent(sctx, path, &found_key);
6123 if (ret < 0)
6124 goto out;
6125
6126 path->slots[0]++;
6127 }
6128
6129 out:
6130 btrfs_free_path(path);
6131 return ret;
6132 }
6133
process_recorded_refs_if_needed(struct send_ctx * sctx,int at_end,int * pending_move,int * refs_processed)6134 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
6135 int *pending_move,
6136 int *refs_processed)
6137 {
6138 int ret = 0;
6139
6140 if (sctx->cur_ino == 0)
6141 goto out;
6142 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
6143 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
6144 goto out;
6145 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
6146 goto out;
6147
6148 ret = process_recorded_refs(sctx, pending_move);
6149 if (ret < 0)
6150 goto out;
6151
6152 *refs_processed = 1;
6153 out:
6154 return ret;
6155 }
6156
finish_inode_if_needed(struct send_ctx * sctx,int at_end)6157 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
6158 {
6159 int ret = 0;
6160 u64 left_mode;
6161 u64 left_uid;
6162 u64 left_gid;
6163 u64 right_mode;
6164 u64 right_uid;
6165 u64 right_gid;
6166 int need_chmod = 0;
6167 int need_chown = 0;
6168 int need_truncate = 1;
6169 int pending_move = 0;
6170 int refs_processed = 0;
6171
6172 if (sctx->ignore_cur_inode)
6173 return 0;
6174
6175 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
6176 &refs_processed);
6177 if (ret < 0)
6178 goto out;
6179
6180 /*
6181 * We have processed the refs and thus need to advance send_progress.
6182 * Now, calls to get_cur_xxx will take the updated refs of the current
6183 * inode into account.
6184 *
6185 * On the other hand, if our current inode is a directory and couldn't
6186 * be moved/renamed because its parent was renamed/moved too and it has
6187 * a higher inode number, we can only move/rename our current inode
6188 * after we moved/renamed its parent. Therefore in this case operate on
6189 * the old path (pre move/rename) of our current inode, and the
6190 * move/rename will be performed later.
6191 */
6192 if (refs_processed && !pending_move)
6193 sctx->send_progress = sctx->cur_ino + 1;
6194
6195 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
6196 goto out;
6197 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
6198 goto out;
6199
6200 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
6201 &left_mode, &left_uid, &left_gid, NULL);
6202 if (ret < 0)
6203 goto out;
6204
6205 if (!sctx->parent_root || sctx->cur_inode_new) {
6206 need_chown = 1;
6207 if (!S_ISLNK(sctx->cur_inode_mode))
6208 need_chmod = 1;
6209 if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size)
6210 need_truncate = 0;
6211 } else {
6212 u64 old_size;
6213
6214 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
6215 &old_size, NULL, &right_mode, &right_uid,
6216 &right_gid, NULL);
6217 if (ret < 0)
6218 goto out;
6219
6220 if (left_uid != right_uid || left_gid != right_gid)
6221 need_chown = 1;
6222 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
6223 need_chmod = 1;
6224 if ((old_size == sctx->cur_inode_size) ||
6225 (sctx->cur_inode_size > old_size &&
6226 sctx->cur_inode_next_write_offset == sctx->cur_inode_size))
6227 need_truncate = 0;
6228 }
6229
6230 if (S_ISREG(sctx->cur_inode_mode)) {
6231 if (need_send_hole(sctx)) {
6232 if (sctx->cur_inode_last_extent == (u64)-1 ||
6233 sctx->cur_inode_last_extent <
6234 sctx->cur_inode_size) {
6235 ret = get_last_extent(sctx, (u64)-1);
6236 if (ret)
6237 goto out;
6238 }
6239 if (sctx->cur_inode_last_extent <
6240 sctx->cur_inode_size) {
6241 ret = send_hole(sctx, sctx->cur_inode_size);
6242 if (ret)
6243 goto out;
6244 }
6245 }
6246 if (need_truncate) {
6247 ret = send_truncate(sctx, sctx->cur_ino,
6248 sctx->cur_inode_gen,
6249 sctx->cur_inode_size);
6250 if (ret < 0)
6251 goto out;
6252 }
6253 }
6254
6255 if (need_chown) {
6256 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6257 left_uid, left_gid);
6258 if (ret < 0)
6259 goto out;
6260 }
6261 if (need_chmod) {
6262 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6263 left_mode);
6264 if (ret < 0)
6265 goto out;
6266 }
6267
6268 ret = send_capabilities(sctx);
6269 if (ret < 0)
6270 goto out;
6271
6272 /*
6273 * If other directory inodes depended on our current directory
6274 * inode's move/rename, now do their move/rename operations.
6275 */
6276 if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
6277 ret = apply_children_dir_moves(sctx);
6278 if (ret)
6279 goto out;
6280 /*
6281 * Need to send that every time, no matter if it actually
6282 * changed between the two trees as we have done changes to
6283 * the inode before. If our inode is a directory and it's
6284 * waiting to be moved/renamed, we will send its utimes when
6285 * it's moved/renamed, therefore we don't need to do it here.
6286 */
6287 sctx->send_progress = sctx->cur_ino + 1;
6288 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
6289 if (ret < 0)
6290 goto out;
6291 }
6292
6293 out:
6294 return ret;
6295 }
6296
6297 struct parent_paths_ctx {
6298 struct list_head *refs;
6299 struct send_ctx *sctx;
6300 };
6301
record_parent_ref(int num,u64 dir,int index,struct fs_path * name,void * ctx)6302 static int record_parent_ref(int num, u64 dir, int index, struct fs_path *name,
6303 void *ctx)
6304 {
6305 struct parent_paths_ctx *ppctx = ctx;
6306
6307 return record_ref(ppctx->sctx->parent_root, dir, name, ppctx->sctx,
6308 ppctx->refs);
6309 }
6310
6311 /*
6312 * Issue unlink operations for all paths of the current inode found in the
6313 * parent snapshot.
6314 */
btrfs_unlink_all_paths(struct send_ctx * sctx)6315 static int btrfs_unlink_all_paths(struct send_ctx *sctx)
6316 {
6317 LIST_HEAD(deleted_refs);
6318 struct btrfs_path *path;
6319 struct btrfs_key key;
6320 struct parent_paths_ctx ctx;
6321 int ret;
6322
6323 path = alloc_path_for_send();
6324 if (!path)
6325 return -ENOMEM;
6326
6327 key.objectid = sctx->cur_ino;
6328 key.type = BTRFS_INODE_REF_KEY;
6329 key.offset = 0;
6330 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
6331 if (ret < 0)
6332 goto out;
6333
6334 ctx.refs = &deleted_refs;
6335 ctx.sctx = sctx;
6336
6337 while (true) {
6338 struct extent_buffer *eb = path->nodes[0];
6339 int slot = path->slots[0];
6340
6341 if (slot >= btrfs_header_nritems(eb)) {
6342 ret = btrfs_next_leaf(sctx->parent_root, path);
6343 if (ret < 0)
6344 goto out;
6345 else if (ret > 0)
6346 break;
6347 continue;
6348 }
6349
6350 btrfs_item_key_to_cpu(eb, &key, slot);
6351 if (key.objectid != sctx->cur_ino)
6352 break;
6353 if (key.type != BTRFS_INODE_REF_KEY &&
6354 key.type != BTRFS_INODE_EXTREF_KEY)
6355 break;
6356
6357 ret = iterate_inode_ref(sctx->parent_root, path, &key, 1,
6358 record_parent_ref, &ctx);
6359 if (ret < 0)
6360 goto out;
6361
6362 path->slots[0]++;
6363 }
6364
6365 while (!list_empty(&deleted_refs)) {
6366 struct recorded_ref *ref;
6367
6368 ref = list_first_entry(&deleted_refs, struct recorded_ref, list);
6369 ret = send_unlink(sctx, ref->full_path);
6370 if (ret < 0)
6371 goto out;
6372 fs_path_free(ref->full_path);
6373 list_del(&ref->list);
6374 kfree(ref);
6375 }
6376 ret = 0;
6377 out:
6378 btrfs_free_path(path);
6379 if (ret)
6380 __free_recorded_refs(&deleted_refs);
6381 return ret;
6382 }
6383
changed_inode(struct send_ctx * sctx,enum btrfs_compare_tree_result result)6384 static int changed_inode(struct send_ctx *sctx,
6385 enum btrfs_compare_tree_result result)
6386 {
6387 int ret = 0;
6388 struct btrfs_key *key = sctx->cmp_key;
6389 struct btrfs_inode_item *left_ii = NULL;
6390 struct btrfs_inode_item *right_ii = NULL;
6391 u64 left_gen = 0;
6392 u64 right_gen = 0;
6393
6394 sctx->cur_ino = key->objectid;
6395 sctx->cur_inode_new_gen = 0;
6396 sctx->cur_inode_last_extent = (u64)-1;
6397 sctx->cur_inode_next_write_offset = 0;
6398 sctx->ignore_cur_inode = false;
6399
6400 /*
6401 * Set send_progress to current inode. This will tell all get_cur_xxx
6402 * functions that the current inode's refs are not updated yet. Later,
6403 * when process_recorded_refs is finished, it is set to cur_ino + 1.
6404 */
6405 sctx->send_progress = sctx->cur_ino;
6406
6407 if (result == BTRFS_COMPARE_TREE_NEW ||
6408 result == BTRFS_COMPARE_TREE_CHANGED) {
6409 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
6410 sctx->left_path->slots[0],
6411 struct btrfs_inode_item);
6412 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
6413 left_ii);
6414 } else {
6415 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6416 sctx->right_path->slots[0],
6417 struct btrfs_inode_item);
6418 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6419 right_ii);
6420 }
6421 if (result == BTRFS_COMPARE_TREE_CHANGED) {
6422 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6423 sctx->right_path->slots[0],
6424 struct btrfs_inode_item);
6425
6426 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6427 right_ii);
6428
6429 /*
6430 * The cur_ino = root dir case is special here. We can't treat
6431 * the inode as deleted+reused because it would generate a
6432 * stream that tries to delete/mkdir the root dir.
6433 */
6434 if (left_gen != right_gen &&
6435 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6436 sctx->cur_inode_new_gen = 1;
6437 }
6438
6439 /*
6440 * Normally we do not find inodes with a link count of zero (orphans)
6441 * because the most common case is to create a snapshot and use it
6442 * for a send operation. However other less common use cases involve
6443 * using a subvolume and send it after turning it to RO mode just
6444 * after deleting all hard links of a file while holding an open
6445 * file descriptor against it or turning a RO snapshot into RW mode,
6446 * keep an open file descriptor against a file, delete it and then
6447 * turn the snapshot back to RO mode before using it for a send
6448 * operation. So if we find such cases, ignore the inode and all its
6449 * items completely if it's a new inode, or if it's a changed inode
6450 * make sure all its previous paths (from the parent snapshot) are all
6451 * unlinked and all other the inode items are ignored.
6452 */
6453 if (result == BTRFS_COMPARE_TREE_NEW ||
6454 result == BTRFS_COMPARE_TREE_CHANGED) {
6455 u32 nlinks;
6456
6457 nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
6458 if (nlinks == 0) {
6459 sctx->ignore_cur_inode = true;
6460 if (result == BTRFS_COMPARE_TREE_CHANGED)
6461 ret = btrfs_unlink_all_paths(sctx);
6462 goto out;
6463 }
6464 }
6465
6466 if (result == BTRFS_COMPARE_TREE_NEW) {
6467 sctx->cur_inode_gen = left_gen;
6468 sctx->cur_inode_new = 1;
6469 sctx->cur_inode_deleted = 0;
6470 sctx->cur_inode_size = btrfs_inode_size(
6471 sctx->left_path->nodes[0], left_ii);
6472 sctx->cur_inode_mode = btrfs_inode_mode(
6473 sctx->left_path->nodes[0], left_ii);
6474 sctx->cur_inode_rdev = btrfs_inode_rdev(
6475 sctx->left_path->nodes[0], left_ii);
6476 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6477 ret = send_create_inode_if_needed(sctx);
6478 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
6479 sctx->cur_inode_gen = right_gen;
6480 sctx->cur_inode_new = 0;
6481 sctx->cur_inode_deleted = 1;
6482 sctx->cur_inode_size = btrfs_inode_size(
6483 sctx->right_path->nodes[0], right_ii);
6484 sctx->cur_inode_mode = btrfs_inode_mode(
6485 sctx->right_path->nodes[0], right_ii);
6486 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
6487 /*
6488 * We need to do some special handling in case the inode was
6489 * reported as changed with a changed generation number. This
6490 * means that the original inode was deleted and new inode
6491 * reused the same inum. So we have to treat the old inode as
6492 * deleted and the new one as new.
6493 */
6494 if (sctx->cur_inode_new_gen) {
6495 /*
6496 * First, process the inode as if it was deleted.
6497 */
6498 sctx->cur_inode_gen = right_gen;
6499 sctx->cur_inode_new = 0;
6500 sctx->cur_inode_deleted = 1;
6501 sctx->cur_inode_size = btrfs_inode_size(
6502 sctx->right_path->nodes[0], right_ii);
6503 sctx->cur_inode_mode = btrfs_inode_mode(
6504 sctx->right_path->nodes[0], right_ii);
6505 ret = process_all_refs(sctx,
6506 BTRFS_COMPARE_TREE_DELETED);
6507 if (ret < 0)
6508 goto out;
6509
6510 /*
6511 * Now process the inode as if it was new.
6512 */
6513 sctx->cur_inode_gen = left_gen;
6514 sctx->cur_inode_new = 1;
6515 sctx->cur_inode_deleted = 0;
6516 sctx->cur_inode_size = btrfs_inode_size(
6517 sctx->left_path->nodes[0], left_ii);
6518 sctx->cur_inode_mode = btrfs_inode_mode(
6519 sctx->left_path->nodes[0], left_ii);
6520 sctx->cur_inode_rdev = btrfs_inode_rdev(
6521 sctx->left_path->nodes[0], left_ii);
6522 ret = send_create_inode_if_needed(sctx);
6523 if (ret < 0)
6524 goto out;
6525
6526 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
6527 if (ret < 0)
6528 goto out;
6529 /*
6530 * Advance send_progress now as we did not get into
6531 * process_recorded_refs_if_needed in the new_gen case.
6532 */
6533 sctx->send_progress = sctx->cur_ino + 1;
6534
6535 /*
6536 * Now process all extents and xattrs of the inode as if
6537 * they were all new.
6538 */
6539 ret = process_all_extents(sctx);
6540 if (ret < 0)
6541 goto out;
6542 ret = process_all_new_xattrs(sctx);
6543 if (ret < 0)
6544 goto out;
6545 } else {
6546 sctx->cur_inode_gen = left_gen;
6547 sctx->cur_inode_new = 0;
6548 sctx->cur_inode_new_gen = 0;
6549 sctx->cur_inode_deleted = 0;
6550 sctx->cur_inode_size = btrfs_inode_size(
6551 sctx->left_path->nodes[0], left_ii);
6552 sctx->cur_inode_mode = btrfs_inode_mode(
6553 sctx->left_path->nodes[0], left_ii);
6554 }
6555 }
6556
6557 out:
6558 return ret;
6559 }
6560
6561 /*
6562 * We have to process new refs before deleted refs, but compare_trees gives us
6563 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
6564 * first and later process them in process_recorded_refs.
6565 * For the cur_inode_new_gen case, we skip recording completely because
6566 * changed_inode did already initiate processing of refs. The reason for this is
6567 * that in this case, compare_tree actually compares the refs of 2 different
6568 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
6569 * refs of the right tree as deleted and all refs of the left tree as new.
6570 */
changed_ref(struct send_ctx * sctx,enum btrfs_compare_tree_result result)6571 static int changed_ref(struct send_ctx *sctx,
6572 enum btrfs_compare_tree_result result)
6573 {
6574 int ret = 0;
6575
6576 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6577 inconsistent_snapshot_error(sctx, result, "reference");
6578 return -EIO;
6579 }
6580
6581 if (!sctx->cur_inode_new_gen &&
6582 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
6583 if (result == BTRFS_COMPARE_TREE_NEW)
6584 ret = record_new_ref(sctx);
6585 else if (result == BTRFS_COMPARE_TREE_DELETED)
6586 ret = record_deleted_ref(sctx);
6587 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6588 ret = record_changed_ref(sctx);
6589 }
6590
6591 return ret;
6592 }
6593
6594 /*
6595 * Process new/deleted/changed xattrs. We skip processing in the
6596 * cur_inode_new_gen case because changed_inode did already initiate processing
6597 * of xattrs. The reason is the same as in changed_ref
6598 */
changed_xattr(struct send_ctx * sctx,enum btrfs_compare_tree_result result)6599 static int changed_xattr(struct send_ctx *sctx,
6600 enum btrfs_compare_tree_result result)
6601 {
6602 int ret = 0;
6603
6604 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6605 inconsistent_snapshot_error(sctx, result, "xattr");
6606 return -EIO;
6607 }
6608
6609 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6610 if (result == BTRFS_COMPARE_TREE_NEW)
6611 ret = process_new_xattr(sctx);
6612 else if (result == BTRFS_COMPARE_TREE_DELETED)
6613 ret = process_deleted_xattr(sctx);
6614 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6615 ret = process_changed_xattr(sctx);
6616 }
6617
6618 return ret;
6619 }
6620
6621 /*
6622 * Process new/deleted/changed extents. We skip processing in the
6623 * cur_inode_new_gen case because changed_inode did already initiate processing
6624 * of extents. The reason is the same as in changed_ref
6625 */
changed_extent(struct send_ctx * sctx,enum btrfs_compare_tree_result result)6626 static int changed_extent(struct send_ctx *sctx,
6627 enum btrfs_compare_tree_result result)
6628 {
6629 int ret = 0;
6630
6631 /*
6632 * We have found an extent item that changed without the inode item
6633 * having changed. This can happen either after relocation (where the
6634 * disk_bytenr of an extent item is replaced at
6635 * relocation.c:replace_file_extents()) or after deduplication into a
6636 * file in both the parent and send snapshots (where an extent item can
6637 * get modified or replaced with a new one). Note that deduplication
6638 * updates the inode item, but it only changes the iversion (sequence
6639 * field in the inode item) of the inode, so if a file is deduplicated
6640 * the same amount of times in both the parent and send snapshots, its
6641 * iversion becames the same in both snapshots, whence the inode item is
6642 * the same on both snapshots.
6643 */
6644 if (sctx->cur_ino != sctx->cmp_key->objectid)
6645 return 0;
6646
6647 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6648 if (result != BTRFS_COMPARE_TREE_DELETED)
6649 ret = process_extent(sctx, sctx->left_path,
6650 sctx->cmp_key);
6651 }
6652
6653 return ret;
6654 }
6655
dir_changed(struct send_ctx * sctx,u64 dir)6656 static int dir_changed(struct send_ctx *sctx, u64 dir)
6657 {
6658 u64 orig_gen, new_gen;
6659 int ret;
6660
6661 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
6662 NULL, NULL);
6663 if (ret)
6664 return ret;
6665
6666 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
6667 NULL, NULL, NULL);
6668 if (ret)
6669 return ret;
6670
6671 return (orig_gen != new_gen) ? 1 : 0;
6672 }
6673
compare_refs(struct send_ctx * sctx,struct btrfs_path * path,struct btrfs_key * key)6674 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
6675 struct btrfs_key *key)
6676 {
6677 struct btrfs_inode_extref *extref;
6678 struct extent_buffer *leaf;
6679 u64 dirid = 0, last_dirid = 0;
6680 unsigned long ptr;
6681 u32 item_size;
6682 u32 cur_offset = 0;
6683 int ref_name_len;
6684 int ret = 0;
6685
6686 /* Easy case, just check this one dirid */
6687 if (key->type == BTRFS_INODE_REF_KEY) {
6688 dirid = key->offset;
6689
6690 ret = dir_changed(sctx, dirid);
6691 goto out;
6692 }
6693
6694 leaf = path->nodes[0];
6695 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
6696 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
6697 while (cur_offset < item_size) {
6698 extref = (struct btrfs_inode_extref *)(ptr +
6699 cur_offset);
6700 dirid = btrfs_inode_extref_parent(leaf, extref);
6701 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
6702 cur_offset += ref_name_len + sizeof(*extref);
6703 if (dirid == last_dirid)
6704 continue;
6705 ret = dir_changed(sctx, dirid);
6706 if (ret)
6707 break;
6708 last_dirid = dirid;
6709 }
6710 out:
6711 return ret;
6712 }
6713
6714 /*
6715 * Updates compare related fields in sctx and simply forwards to the actual
6716 * changed_xxx functions.
6717 */
changed_cb(struct btrfs_path * left_path,struct btrfs_path * right_path,struct btrfs_key * key,enum btrfs_compare_tree_result result,void * ctx)6718 static int changed_cb(struct btrfs_path *left_path,
6719 struct btrfs_path *right_path,
6720 struct btrfs_key *key,
6721 enum btrfs_compare_tree_result result,
6722 void *ctx)
6723 {
6724 int ret = 0;
6725 struct send_ctx *sctx = ctx;
6726
6727 if (result == BTRFS_COMPARE_TREE_SAME) {
6728 if (key->type == BTRFS_INODE_REF_KEY ||
6729 key->type == BTRFS_INODE_EXTREF_KEY) {
6730 ret = compare_refs(sctx, left_path, key);
6731 if (!ret)
6732 return 0;
6733 if (ret < 0)
6734 return ret;
6735 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
6736 return maybe_send_hole(sctx, left_path, key);
6737 } else {
6738 return 0;
6739 }
6740 result = BTRFS_COMPARE_TREE_CHANGED;
6741 ret = 0;
6742 }
6743
6744 sctx->left_path = left_path;
6745 sctx->right_path = right_path;
6746 sctx->cmp_key = key;
6747
6748 ret = finish_inode_if_needed(sctx, 0);
6749 if (ret < 0)
6750 goto out;
6751
6752 /* Ignore non-FS objects */
6753 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
6754 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
6755 goto out;
6756
6757 if (key->type == BTRFS_INODE_ITEM_KEY) {
6758 ret = changed_inode(sctx, result);
6759 } else if (!sctx->ignore_cur_inode) {
6760 if (key->type == BTRFS_INODE_REF_KEY ||
6761 key->type == BTRFS_INODE_EXTREF_KEY)
6762 ret = changed_ref(sctx, result);
6763 else if (key->type == BTRFS_XATTR_ITEM_KEY)
6764 ret = changed_xattr(sctx, result);
6765 else if (key->type == BTRFS_EXTENT_DATA_KEY)
6766 ret = changed_extent(sctx, result);
6767 }
6768
6769 out:
6770 return ret;
6771 }
6772
full_send_tree(struct send_ctx * sctx)6773 static int full_send_tree(struct send_ctx *sctx)
6774 {
6775 int ret;
6776 struct btrfs_root *send_root = sctx->send_root;
6777 struct btrfs_key key;
6778 struct btrfs_path *path;
6779 struct extent_buffer *eb;
6780 int slot;
6781
6782 path = alloc_path_for_send();
6783 if (!path)
6784 return -ENOMEM;
6785
6786 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
6787 key.type = BTRFS_INODE_ITEM_KEY;
6788 key.offset = 0;
6789
6790 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
6791 if (ret < 0)
6792 goto out;
6793 if (ret)
6794 goto out_finish;
6795
6796 while (1) {
6797 eb = path->nodes[0];
6798 slot = path->slots[0];
6799 btrfs_item_key_to_cpu(eb, &key, slot);
6800
6801 ret = changed_cb(path, NULL, &key,
6802 BTRFS_COMPARE_TREE_NEW, sctx);
6803 if (ret < 0)
6804 goto out;
6805
6806 ret = btrfs_next_item(send_root, path);
6807 if (ret < 0)
6808 goto out;
6809 if (ret) {
6810 ret = 0;
6811 break;
6812 }
6813 }
6814
6815 out_finish:
6816 ret = finish_inode_if_needed(sctx, 1);
6817
6818 out:
6819 btrfs_free_path(path);
6820 return ret;
6821 }
6822
tree_move_down(struct btrfs_path * path,int * level)6823 static int tree_move_down(struct btrfs_path *path, int *level)
6824 {
6825 struct extent_buffer *eb;
6826
6827 BUG_ON(*level == 0);
6828 eb = btrfs_read_node_slot(path->nodes[*level], path->slots[*level]);
6829 if (IS_ERR(eb))
6830 return PTR_ERR(eb);
6831
6832 path->nodes[*level - 1] = eb;
6833 path->slots[*level - 1] = 0;
6834 (*level)--;
6835 return 0;
6836 }
6837
tree_move_next_or_upnext(struct btrfs_path * path,int * level,int root_level)6838 static int tree_move_next_or_upnext(struct btrfs_path *path,
6839 int *level, int root_level)
6840 {
6841 int ret = 0;
6842 int nritems;
6843 nritems = btrfs_header_nritems(path->nodes[*level]);
6844
6845 path->slots[*level]++;
6846
6847 while (path->slots[*level] >= nritems) {
6848 if (*level == root_level)
6849 return -1;
6850
6851 /* move upnext */
6852 path->slots[*level] = 0;
6853 free_extent_buffer(path->nodes[*level]);
6854 path->nodes[*level] = NULL;
6855 (*level)++;
6856 path->slots[*level]++;
6857
6858 nritems = btrfs_header_nritems(path->nodes[*level]);
6859 ret = 1;
6860 }
6861 return ret;
6862 }
6863
6864 /*
6865 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
6866 * or down.
6867 */
tree_advance(struct btrfs_path * path,int * level,int root_level,int allow_down,struct btrfs_key * key)6868 static int tree_advance(struct btrfs_path *path,
6869 int *level, int root_level,
6870 int allow_down,
6871 struct btrfs_key *key)
6872 {
6873 int ret;
6874
6875 if (*level == 0 || !allow_down) {
6876 ret = tree_move_next_or_upnext(path, level, root_level);
6877 } else {
6878 ret = tree_move_down(path, level);
6879 }
6880 if (ret >= 0) {
6881 if (*level == 0)
6882 btrfs_item_key_to_cpu(path->nodes[*level], key,
6883 path->slots[*level]);
6884 else
6885 btrfs_node_key_to_cpu(path->nodes[*level], key,
6886 path->slots[*level]);
6887 }
6888 return ret;
6889 }
6890
tree_compare_item(struct btrfs_path * left_path,struct btrfs_path * right_path,char * tmp_buf)6891 static int tree_compare_item(struct btrfs_path *left_path,
6892 struct btrfs_path *right_path,
6893 char *tmp_buf)
6894 {
6895 int cmp;
6896 int len1, len2;
6897 unsigned long off1, off2;
6898
6899 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
6900 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
6901 if (len1 != len2)
6902 return 1;
6903
6904 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
6905 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
6906 right_path->slots[0]);
6907
6908 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
6909
6910 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
6911 if (cmp)
6912 return 1;
6913 return 0;
6914 }
6915
6916 /*
6917 * This function compares two trees and calls the provided callback for
6918 * every changed/new/deleted item it finds.
6919 * If shared tree blocks are encountered, whole subtrees are skipped, making
6920 * the compare pretty fast on snapshotted subvolumes.
6921 *
6922 * This currently works on commit roots only. As commit roots are read only,
6923 * we don't do any locking. The commit roots are protected with transactions.
6924 * Transactions are ended and rejoined when a commit is tried in between.
6925 *
6926 * This function checks for modifications done to the trees while comparing.
6927 * If it detects a change, it aborts immediately.
6928 */
btrfs_compare_trees(struct btrfs_root * left_root,struct btrfs_root * right_root,btrfs_changed_cb_t changed_cb,void * ctx)6929 static int btrfs_compare_trees(struct btrfs_root *left_root,
6930 struct btrfs_root *right_root,
6931 btrfs_changed_cb_t changed_cb, void *ctx)
6932 {
6933 struct btrfs_fs_info *fs_info = left_root->fs_info;
6934 int ret;
6935 int cmp;
6936 struct btrfs_path *left_path = NULL;
6937 struct btrfs_path *right_path = NULL;
6938 struct btrfs_key left_key;
6939 struct btrfs_key right_key;
6940 char *tmp_buf = NULL;
6941 int left_root_level;
6942 int right_root_level;
6943 int left_level;
6944 int right_level;
6945 int left_end_reached;
6946 int right_end_reached;
6947 int advance_left;
6948 int advance_right;
6949 u64 left_blockptr;
6950 u64 right_blockptr;
6951 u64 left_gen;
6952 u64 right_gen;
6953
6954 left_path = btrfs_alloc_path();
6955 if (!left_path) {
6956 ret = -ENOMEM;
6957 goto out;
6958 }
6959 right_path = btrfs_alloc_path();
6960 if (!right_path) {
6961 ret = -ENOMEM;
6962 goto out;
6963 }
6964
6965 tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
6966 if (!tmp_buf) {
6967 ret = -ENOMEM;
6968 goto out;
6969 }
6970
6971 left_path->search_commit_root = 1;
6972 left_path->skip_locking = 1;
6973 right_path->search_commit_root = 1;
6974 right_path->skip_locking = 1;
6975
6976 /*
6977 * Strategy: Go to the first items of both trees. Then do
6978 *
6979 * If both trees are at level 0
6980 * Compare keys of current items
6981 * If left < right treat left item as new, advance left tree
6982 * and repeat
6983 * If left > right treat right item as deleted, advance right tree
6984 * and repeat
6985 * If left == right do deep compare of items, treat as changed if
6986 * needed, advance both trees and repeat
6987 * If both trees are at the same level but not at level 0
6988 * Compare keys of current nodes/leafs
6989 * If left < right advance left tree and repeat
6990 * If left > right advance right tree and repeat
6991 * If left == right compare blockptrs of the next nodes/leafs
6992 * If they match advance both trees but stay at the same level
6993 * and repeat
6994 * If they don't match advance both trees while allowing to go
6995 * deeper and repeat
6996 * If tree levels are different
6997 * Advance the tree that needs it and repeat
6998 *
6999 * Advancing a tree means:
7000 * If we are at level 0, try to go to the next slot. If that's not
7001 * possible, go one level up and repeat. Stop when we found a level
7002 * where we could go to the next slot. We may at this point be on a
7003 * node or a leaf.
7004 *
7005 * If we are not at level 0 and not on shared tree blocks, go one
7006 * level deeper.
7007 *
7008 * If we are not at level 0 and on shared tree blocks, go one slot to
7009 * the right if possible or go up and right.
7010 */
7011
7012 down_read(&fs_info->commit_root_sem);
7013 left_level = btrfs_header_level(left_root->commit_root);
7014 left_root_level = left_level;
7015 left_path->nodes[left_level] =
7016 btrfs_clone_extent_buffer(left_root->commit_root);
7017 if (!left_path->nodes[left_level]) {
7018 up_read(&fs_info->commit_root_sem);
7019 ret = -ENOMEM;
7020 goto out;
7021 }
7022
7023 right_level = btrfs_header_level(right_root->commit_root);
7024 right_root_level = right_level;
7025 right_path->nodes[right_level] =
7026 btrfs_clone_extent_buffer(right_root->commit_root);
7027 if (!right_path->nodes[right_level]) {
7028 up_read(&fs_info->commit_root_sem);
7029 ret = -ENOMEM;
7030 goto out;
7031 }
7032 up_read(&fs_info->commit_root_sem);
7033
7034 if (left_level == 0)
7035 btrfs_item_key_to_cpu(left_path->nodes[left_level],
7036 &left_key, left_path->slots[left_level]);
7037 else
7038 btrfs_node_key_to_cpu(left_path->nodes[left_level],
7039 &left_key, left_path->slots[left_level]);
7040 if (right_level == 0)
7041 btrfs_item_key_to_cpu(right_path->nodes[right_level],
7042 &right_key, right_path->slots[right_level]);
7043 else
7044 btrfs_node_key_to_cpu(right_path->nodes[right_level],
7045 &right_key, right_path->slots[right_level]);
7046
7047 left_end_reached = right_end_reached = 0;
7048 advance_left = advance_right = 0;
7049
7050 while (1) {
7051 cond_resched();
7052 if (advance_left && !left_end_reached) {
7053 ret = tree_advance(left_path, &left_level,
7054 left_root_level,
7055 advance_left != ADVANCE_ONLY_NEXT,
7056 &left_key);
7057 if (ret == -1)
7058 left_end_reached = ADVANCE;
7059 else if (ret < 0)
7060 goto out;
7061 advance_left = 0;
7062 }
7063 if (advance_right && !right_end_reached) {
7064 ret = tree_advance(right_path, &right_level,
7065 right_root_level,
7066 advance_right != ADVANCE_ONLY_NEXT,
7067 &right_key);
7068 if (ret == -1)
7069 right_end_reached = ADVANCE;
7070 else if (ret < 0)
7071 goto out;
7072 advance_right = 0;
7073 }
7074
7075 if (left_end_reached && right_end_reached) {
7076 ret = 0;
7077 goto out;
7078 } else if (left_end_reached) {
7079 if (right_level == 0) {
7080 ret = changed_cb(left_path, right_path,
7081 &right_key,
7082 BTRFS_COMPARE_TREE_DELETED,
7083 ctx);
7084 if (ret < 0)
7085 goto out;
7086 }
7087 advance_right = ADVANCE;
7088 continue;
7089 } else if (right_end_reached) {
7090 if (left_level == 0) {
7091 ret = changed_cb(left_path, right_path,
7092 &left_key,
7093 BTRFS_COMPARE_TREE_NEW,
7094 ctx);
7095 if (ret < 0)
7096 goto out;
7097 }
7098 advance_left = ADVANCE;
7099 continue;
7100 }
7101
7102 if (left_level == 0 && right_level == 0) {
7103 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
7104 if (cmp < 0) {
7105 ret = changed_cb(left_path, right_path,
7106 &left_key,
7107 BTRFS_COMPARE_TREE_NEW,
7108 ctx);
7109 if (ret < 0)
7110 goto out;
7111 advance_left = ADVANCE;
7112 } else if (cmp > 0) {
7113 ret = changed_cb(left_path, right_path,
7114 &right_key,
7115 BTRFS_COMPARE_TREE_DELETED,
7116 ctx);
7117 if (ret < 0)
7118 goto out;
7119 advance_right = ADVANCE;
7120 } else {
7121 enum btrfs_compare_tree_result result;
7122
7123 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
7124 ret = tree_compare_item(left_path, right_path,
7125 tmp_buf);
7126 if (ret)
7127 result = BTRFS_COMPARE_TREE_CHANGED;
7128 else
7129 result = BTRFS_COMPARE_TREE_SAME;
7130 ret = changed_cb(left_path, right_path,
7131 &left_key, result, ctx);
7132 if (ret < 0)
7133 goto out;
7134 advance_left = ADVANCE;
7135 advance_right = ADVANCE;
7136 }
7137 } else if (left_level == right_level) {
7138 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
7139 if (cmp < 0) {
7140 advance_left = ADVANCE;
7141 } else if (cmp > 0) {
7142 advance_right = ADVANCE;
7143 } else {
7144 left_blockptr = btrfs_node_blockptr(
7145 left_path->nodes[left_level],
7146 left_path->slots[left_level]);
7147 right_blockptr = btrfs_node_blockptr(
7148 right_path->nodes[right_level],
7149 right_path->slots[right_level]);
7150 left_gen = btrfs_node_ptr_generation(
7151 left_path->nodes[left_level],
7152 left_path->slots[left_level]);
7153 right_gen = btrfs_node_ptr_generation(
7154 right_path->nodes[right_level],
7155 right_path->slots[right_level]);
7156 if (left_blockptr == right_blockptr &&
7157 left_gen == right_gen) {
7158 /*
7159 * As we're on a shared block, don't
7160 * allow to go deeper.
7161 */
7162 advance_left = ADVANCE_ONLY_NEXT;
7163 advance_right = ADVANCE_ONLY_NEXT;
7164 } else {
7165 advance_left = ADVANCE;
7166 advance_right = ADVANCE;
7167 }
7168 }
7169 } else if (left_level < right_level) {
7170 advance_right = ADVANCE;
7171 } else {
7172 advance_left = ADVANCE;
7173 }
7174 }
7175
7176 out:
7177 btrfs_free_path(left_path);
7178 btrfs_free_path(right_path);
7179 kvfree(tmp_buf);
7180 return ret;
7181 }
7182
send_subvol(struct send_ctx * sctx)7183 static int send_subvol(struct send_ctx *sctx)
7184 {
7185 int ret;
7186
7187 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
7188 ret = send_header(sctx);
7189 if (ret < 0)
7190 goto out;
7191 }
7192
7193 ret = send_subvol_begin(sctx);
7194 if (ret < 0)
7195 goto out;
7196
7197 if (sctx->parent_root) {
7198 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
7199 changed_cb, sctx);
7200 if (ret < 0)
7201 goto out;
7202 ret = finish_inode_if_needed(sctx, 1);
7203 if (ret < 0)
7204 goto out;
7205 } else {
7206 ret = full_send_tree(sctx);
7207 if (ret < 0)
7208 goto out;
7209 }
7210
7211 out:
7212 free_recorded_refs(sctx);
7213 return ret;
7214 }
7215
7216 /*
7217 * If orphan cleanup did remove any orphans from a root, it means the tree
7218 * was modified and therefore the commit root is not the same as the current
7219 * root anymore. This is a problem, because send uses the commit root and
7220 * therefore can see inode items that don't exist in the current root anymore,
7221 * and for example make calls to btrfs_iget, which will do tree lookups based
7222 * on the current root and not on the commit root. Those lookups will fail,
7223 * returning a -ESTALE error, and making send fail with that error. So make
7224 * sure a send does not see any orphans we have just removed, and that it will
7225 * see the same inodes regardless of whether a transaction commit happened
7226 * before it started (meaning that the commit root will be the same as the
7227 * current root) or not.
7228 */
ensure_commit_roots_uptodate(struct send_ctx * sctx)7229 static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
7230 {
7231 int i;
7232 struct btrfs_trans_handle *trans = NULL;
7233
7234 again:
7235 if (sctx->parent_root &&
7236 sctx->parent_root->node != sctx->parent_root->commit_root)
7237 goto commit_trans;
7238
7239 for (i = 0; i < sctx->clone_roots_cnt; i++)
7240 if (sctx->clone_roots[i].root->node !=
7241 sctx->clone_roots[i].root->commit_root)
7242 goto commit_trans;
7243
7244 if (trans)
7245 return btrfs_end_transaction(trans);
7246
7247 return 0;
7248
7249 commit_trans:
7250 /* Use any root, all fs roots will get their commit roots updated. */
7251 if (!trans) {
7252 trans = btrfs_join_transaction(sctx->send_root);
7253 if (IS_ERR(trans))
7254 return PTR_ERR(trans);
7255 goto again;
7256 }
7257
7258 return btrfs_commit_transaction(trans);
7259 }
7260
7261 /*
7262 * Make sure any existing dellaloc is flushed for any root used by a send
7263 * operation so that we do not miss any data and we do not race with writeback
7264 * finishing and changing a tree while send is using the tree. This could
7265 * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
7266 * a send operation then uses the subvolume.
7267 * After flushing delalloc ensure_commit_roots_uptodate() must be called.
7268 */
flush_delalloc_roots(struct send_ctx * sctx)7269 static int flush_delalloc_roots(struct send_ctx *sctx)
7270 {
7271 struct btrfs_root *root = sctx->parent_root;
7272 int ret;
7273 int i;
7274
7275 if (root) {
7276 ret = btrfs_start_delalloc_snapshot(root);
7277 if (ret)
7278 return ret;
7279 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
7280 }
7281
7282 for (i = 0; i < sctx->clone_roots_cnt; i++) {
7283 root = sctx->clone_roots[i].root;
7284 ret = btrfs_start_delalloc_snapshot(root);
7285 if (ret)
7286 return ret;
7287 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
7288 }
7289
7290 return 0;
7291 }
7292
btrfs_root_dec_send_in_progress(struct btrfs_root * root)7293 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
7294 {
7295 spin_lock(&root->root_item_lock);
7296 root->send_in_progress--;
7297 /*
7298 * Not much left to do, we don't know why it's unbalanced and
7299 * can't blindly reset it to 0.
7300 */
7301 if (root->send_in_progress < 0)
7302 btrfs_err(root->fs_info,
7303 "send_in_progress unbalanced %d root %llu",
7304 root->send_in_progress, root->root_key.objectid);
7305 spin_unlock(&root->root_item_lock);
7306 }
7307
dedupe_in_progress_warn(const struct btrfs_root * root)7308 static void dedupe_in_progress_warn(const struct btrfs_root *root)
7309 {
7310 btrfs_warn_rl(root->fs_info,
7311 "cannot use root %llu for send while deduplications on it are in progress (%d in progress)",
7312 root->root_key.objectid, root->dedupe_in_progress);
7313 }
7314
btrfs_ioctl_send(struct file * mnt_file,struct btrfs_ioctl_send_args * arg)7315 long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
7316 {
7317 int ret = 0;
7318 struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
7319 struct btrfs_fs_info *fs_info = send_root->fs_info;
7320 struct btrfs_root *clone_root;
7321 struct btrfs_key key;
7322 struct send_ctx *sctx = NULL;
7323 u32 i;
7324 u64 *clone_sources_tmp = NULL;
7325 int clone_sources_to_rollback = 0;
7326 unsigned alloc_size;
7327 int sort_clone_roots = 0;
7328 int index;
7329
7330 if (!capable(CAP_SYS_ADMIN))
7331 return -EPERM;
7332
7333 /*
7334 * The subvolume must remain read-only during send, protect against
7335 * making it RW. This also protects against deletion.
7336 */
7337 spin_lock(&send_root->root_item_lock);
7338 if (btrfs_root_readonly(send_root) && send_root->dedupe_in_progress) {
7339 dedupe_in_progress_warn(send_root);
7340 spin_unlock(&send_root->root_item_lock);
7341 return -EAGAIN;
7342 }
7343 send_root->send_in_progress++;
7344 spin_unlock(&send_root->root_item_lock);
7345
7346 /*
7347 * Userspace tools do the checks and warn the user if it's
7348 * not RO.
7349 */
7350 if (!btrfs_root_readonly(send_root)) {
7351 ret = -EPERM;
7352 goto out;
7353 }
7354
7355 /*
7356 * Check that we don't overflow at later allocations, we request
7357 * clone_sources_count + 1 items, and compare to unsigned long inside
7358 * access_ok. Also set an upper limit for allocation size so this can't
7359 * easily exhaust memory. Max number of clone sources is about 200K.
7360 */
7361 if (arg->clone_sources_count > SZ_8M / sizeof(struct clone_root)) {
7362 ret = -EINVAL;
7363 goto out;
7364 }
7365
7366 if (!access_ok(arg->clone_sources,
7367 sizeof(*arg->clone_sources) *
7368 arg->clone_sources_count)) {
7369 ret = -EFAULT;
7370 goto out;
7371 }
7372
7373 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
7374 ret = -EOPNOTSUPP;
7375 goto out;
7376 }
7377
7378 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
7379 if (!sctx) {
7380 ret = -ENOMEM;
7381 goto out;
7382 }
7383
7384 INIT_LIST_HEAD(&sctx->new_refs);
7385 INIT_LIST_HEAD(&sctx->deleted_refs);
7386 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
7387 INIT_LIST_HEAD(&sctx->name_cache_list);
7388
7389 sctx->flags = arg->flags;
7390
7391 sctx->send_filp = fget(arg->send_fd);
7392 if (!sctx->send_filp || !(sctx->send_filp->f_mode & FMODE_WRITE)) {
7393 ret = -EBADF;
7394 goto out;
7395 }
7396
7397 sctx->send_root = send_root;
7398 /*
7399 * Unlikely but possible, if the subvolume is marked for deletion but
7400 * is slow to remove the directory entry, send can still be started
7401 */
7402 if (btrfs_root_dead(sctx->send_root)) {
7403 ret = -EPERM;
7404 goto out;
7405 }
7406
7407 sctx->clone_roots_cnt = arg->clone_sources_count;
7408
7409 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
7410 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
7411 if (!sctx->send_buf) {
7412 ret = -ENOMEM;
7413 goto out;
7414 }
7415
7416 sctx->read_buf = kvmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL);
7417 if (!sctx->read_buf) {
7418 ret = -ENOMEM;
7419 goto out;
7420 }
7421
7422 sctx->pending_dir_moves = RB_ROOT;
7423 sctx->waiting_dir_moves = RB_ROOT;
7424 sctx->orphan_dirs = RB_ROOT;
7425
7426 alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1);
7427
7428 sctx->clone_roots = kvzalloc(alloc_size, GFP_KERNEL);
7429 if (!sctx->clone_roots) {
7430 ret = -ENOMEM;
7431 goto out;
7432 }
7433
7434 alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources);
7435
7436 if (arg->clone_sources_count) {
7437 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
7438 if (!clone_sources_tmp) {
7439 ret = -ENOMEM;
7440 goto out;
7441 }
7442
7443 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
7444 alloc_size);
7445 if (ret) {
7446 ret = -EFAULT;
7447 goto out;
7448 }
7449
7450 for (i = 0; i < arg->clone_sources_count; i++) {
7451 key.objectid = clone_sources_tmp[i];
7452 key.type = BTRFS_ROOT_ITEM_KEY;
7453 key.offset = (u64)-1;
7454
7455 index = srcu_read_lock(&fs_info->subvol_srcu);
7456
7457 clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
7458 if (IS_ERR(clone_root)) {
7459 srcu_read_unlock(&fs_info->subvol_srcu, index);
7460 ret = PTR_ERR(clone_root);
7461 goto out;
7462 }
7463 spin_lock(&clone_root->root_item_lock);
7464 if (!btrfs_root_readonly(clone_root) ||
7465 btrfs_root_dead(clone_root)) {
7466 spin_unlock(&clone_root->root_item_lock);
7467 srcu_read_unlock(&fs_info->subvol_srcu, index);
7468 ret = -EPERM;
7469 goto out;
7470 }
7471 if (clone_root->dedupe_in_progress) {
7472 dedupe_in_progress_warn(clone_root);
7473 spin_unlock(&clone_root->root_item_lock);
7474 srcu_read_unlock(&fs_info->subvol_srcu, index);
7475 ret = -EAGAIN;
7476 goto out;
7477 }
7478 clone_root->send_in_progress++;
7479 spin_unlock(&clone_root->root_item_lock);
7480 srcu_read_unlock(&fs_info->subvol_srcu, index);
7481
7482 sctx->clone_roots[i].root = clone_root;
7483 clone_sources_to_rollback = i + 1;
7484 }
7485 kvfree(clone_sources_tmp);
7486 clone_sources_tmp = NULL;
7487 }
7488
7489 if (arg->parent_root) {
7490 key.objectid = arg->parent_root;
7491 key.type = BTRFS_ROOT_ITEM_KEY;
7492 key.offset = (u64)-1;
7493
7494 index = srcu_read_lock(&fs_info->subvol_srcu);
7495
7496 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
7497 if (IS_ERR(sctx->parent_root)) {
7498 srcu_read_unlock(&fs_info->subvol_srcu, index);
7499 ret = PTR_ERR(sctx->parent_root);
7500 goto out;
7501 }
7502
7503 spin_lock(&sctx->parent_root->root_item_lock);
7504 sctx->parent_root->send_in_progress++;
7505 if (!btrfs_root_readonly(sctx->parent_root) ||
7506 btrfs_root_dead(sctx->parent_root)) {
7507 spin_unlock(&sctx->parent_root->root_item_lock);
7508 srcu_read_unlock(&fs_info->subvol_srcu, index);
7509 ret = -EPERM;
7510 goto out;
7511 }
7512 if (sctx->parent_root->dedupe_in_progress) {
7513 dedupe_in_progress_warn(sctx->parent_root);
7514 spin_unlock(&sctx->parent_root->root_item_lock);
7515 srcu_read_unlock(&fs_info->subvol_srcu, index);
7516 ret = -EAGAIN;
7517 goto out;
7518 }
7519 spin_unlock(&sctx->parent_root->root_item_lock);
7520
7521 srcu_read_unlock(&fs_info->subvol_srcu, index);
7522 }
7523
7524 /*
7525 * Clones from send_root are allowed, but only if the clone source
7526 * is behind the current send position. This is checked while searching
7527 * for possible clone sources.
7528 */
7529 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
7530
7531 /* We do a bsearch later */
7532 sort(sctx->clone_roots, sctx->clone_roots_cnt,
7533 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
7534 NULL);
7535 sort_clone_roots = 1;
7536
7537 ret = flush_delalloc_roots(sctx);
7538 if (ret)
7539 goto out;
7540
7541 ret = ensure_commit_roots_uptodate(sctx);
7542 if (ret)
7543 goto out;
7544
7545 mutex_lock(&fs_info->balance_mutex);
7546 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
7547 mutex_unlock(&fs_info->balance_mutex);
7548 btrfs_warn_rl(fs_info,
7549 "cannot run send because a balance operation is in progress");
7550 ret = -EAGAIN;
7551 goto out;
7552 }
7553 fs_info->send_in_progress++;
7554 mutex_unlock(&fs_info->balance_mutex);
7555
7556 current->journal_info = BTRFS_SEND_TRANS_STUB;
7557 ret = send_subvol(sctx);
7558 current->journal_info = NULL;
7559 mutex_lock(&fs_info->balance_mutex);
7560 fs_info->send_in_progress--;
7561 mutex_unlock(&fs_info->balance_mutex);
7562 if (ret < 0)
7563 goto out;
7564
7565 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
7566 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
7567 if (ret < 0)
7568 goto out;
7569 ret = send_cmd(sctx);
7570 if (ret < 0)
7571 goto out;
7572 }
7573
7574 out:
7575 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
7576 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
7577 struct rb_node *n;
7578 struct pending_dir_move *pm;
7579
7580 n = rb_first(&sctx->pending_dir_moves);
7581 pm = rb_entry(n, struct pending_dir_move, node);
7582 while (!list_empty(&pm->list)) {
7583 struct pending_dir_move *pm2;
7584
7585 pm2 = list_first_entry(&pm->list,
7586 struct pending_dir_move, list);
7587 free_pending_move(sctx, pm2);
7588 }
7589 free_pending_move(sctx, pm);
7590 }
7591
7592 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
7593 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
7594 struct rb_node *n;
7595 struct waiting_dir_move *dm;
7596
7597 n = rb_first(&sctx->waiting_dir_moves);
7598 dm = rb_entry(n, struct waiting_dir_move, node);
7599 rb_erase(&dm->node, &sctx->waiting_dir_moves);
7600 kfree(dm);
7601 }
7602
7603 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
7604 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
7605 struct rb_node *n;
7606 struct orphan_dir_info *odi;
7607
7608 n = rb_first(&sctx->orphan_dirs);
7609 odi = rb_entry(n, struct orphan_dir_info, node);
7610 free_orphan_dir_info(sctx, odi);
7611 }
7612
7613 if (sort_clone_roots) {
7614 for (i = 0; i < sctx->clone_roots_cnt; i++)
7615 btrfs_root_dec_send_in_progress(
7616 sctx->clone_roots[i].root);
7617 } else {
7618 for (i = 0; sctx && i < clone_sources_to_rollback; i++)
7619 btrfs_root_dec_send_in_progress(
7620 sctx->clone_roots[i].root);
7621
7622 btrfs_root_dec_send_in_progress(send_root);
7623 }
7624 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root))
7625 btrfs_root_dec_send_in_progress(sctx->parent_root);
7626
7627 kvfree(clone_sources_tmp);
7628
7629 if (sctx) {
7630 if (sctx->send_filp)
7631 fput(sctx->send_filp);
7632
7633 kvfree(sctx->clone_roots);
7634 kvfree(sctx->send_buf);
7635 kvfree(sctx->read_buf);
7636
7637 name_cache_free(sctx);
7638
7639 kfree(sctx);
7640 }
7641
7642 return ret;
7643 }
7644