1
2
3
4
5
6#include <linux/bsearch.h>
7#include <linux/fs.h>
8#include <linux/file.h>
9#include <linux/sort.h>
10#include <linux/mount.h>
11#include <linux/xattr.h>
12#include <linux/posix_acl_xattr.h>
13#include <linux/radix-tree.h>
14#include <linux/vmalloc.h>
15#include <linux/string.h>
16#include <linux/compat.h>
17#include <linux/crc32c.h>
18
19#include "send.h"
20#include "backref.h"
21#include "locking.h"
22#include "disk-io.h"
23#include "btrfs_inode.h"
24#include "transaction.h"
25#include "compression.h"
26
27
28
29
30
31
32
33
34struct fs_path {
35 union {
36 struct {
37 char *start;
38 char *end;
39
40 char *buf;
41 unsigned short buf_len:15;
42 unsigned short reversed:1;
43 char inline_buf[];
44 };
45
46
47
48
49
50 char pad[256];
51 };
52};
53#define FS_PATH_INLINE_SIZE \
54 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
55
56
57
58struct clone_root {
59 struct btrfs_root *root;
60 u64 ino;
61 u64 offset;
62
63 u64 found_refs;
64};
65
66#define SEND_CTX_MAX_NAME_CACHE_SIZE 128
67#define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
68
69struct send_ctx {
70 struct file *send_filp;
71 loff_t send_off;
72 char *send_buf;
73 u32 send_size;
74 u32 send_max_size;
75 u64 total_send_size;
76 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
77 u64 flags;
78
79 struct btrfs_root *send_root;
80 struct btrfs_root *parent_root;
81 struct clone_root *clone_roots;
82 int clone_roots_cnt;
83
84
85 struct btrfs_path *left_path;
86 struct btrfs_path *right_path;
87 struct btrfs_key *cmp_key;
88
89
90
91
92
93 u64 cur_ino;
94 u64 cur_inode_gen;
95 int cur_inode_new;
96 int cur_inode_new_gen;
97 int cur_inode_deleted;
98 u64 cur_inode_size;
99 u64 cur_inode_mode;
100 u64 cur_inode_rdev;
101 u64 cur_inode_last_extent;
102 u64 cur_inode_next_write_offset;
103 bool ignore_cur_inode;
104
105 u64 send_progress;
106
107 struct list_head new_refs;
108 struct list_head deleted_refs;
109
110 struct radix_tree_root name_cache;
111 struct list_head name_cache_list;
112 int name_cache_size;
113
114 struct file_ra_state ra;
115
116 char *read_buf;
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163 struct rb_root pending_dir_moves;
164
165
166
167
168
169
170 struct rb_root waiting_dir_moves;
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211 struct rb_root orphan_dirs;
212};
213
214struct pending_dir_move {
215 struct rb_node node;
216 struct list_head list;
217 u64 parent_ino;
218 u64 ino;
219 u64 gen;
220 struct list_head update_refs;
221};
222
223struct waiting_dir_move {
224 struct rb_node node;
225 u64 ino;
226
227
228
229
230
231 u64 rmdir_ino;
232 bool orphanized;
233};
234
235struct orphan_dir_info {
236 struct rb_node node;
237 u64 ino;
238 u64 gen;
239 u64 last_dir_index_offset;
240};
241
242struct name_cache_entry {
243 struct list_head list;
244
245
246
247
248
249
250
251
252 struct list_head radix_list;
253 u64 ino;
254 u64 gen;
255 u64 parent_ino;
256 u64 parent_gen;
257 int ret;
258 int need_later_update;
259 int name_len;
260 char name[];
261};
262
263#define ADVANCE 1
264#define ADVANCE_ONLY_NEXT -1
265
266enum btrfs_compare_tree_result {
267 BTRFS_COMPARE_TREE_NEW,
268 BTRFS_COMPARE_TREE_DELETED,
269 BTRFS_COMPARE_TREE_CHANGED,
270 BTRFS_COMPARE_TREE_SAME,
271};
272typedef int (*btrfs_changed_cb_t)(struct btrfs_path *left_path,
273 struct btrfs_path *right_path,
274 struct btrfs_key *key,
275 enum btrfs_compare_tree_result result,
276 void *ctx);
277
278__cold
279static void inconsistent_snapshot_error(struct send_ctx *sctx,
280 enum btrfs_compare_tree_result result,
281 const char *what)
282{
283 const char *result_string;
284
285 switch (result) {
286 case BTRFS_COMPARE_TREE_NEW:
287 result_string = "new";
288 break;
289 case BTRFS_COMPARE_TREE_DELETED:
290 result_string = "deleted";
291 break;
292 case BTRFS_COMPARE_TREE_CHANGED:
293 result_string = "updated";
294 break;
295 case BTRFS_COMPARE_TREE_SAME:
296 ASSERT(0);
297 result_string = "unchanged";
298 break;
299 default:
300 ASSERT(0);
301 result_string = "unexpected";
302 }
303
304 btrfs_err(sctx->send_root->fs_info,
305 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
306 result_string, what, sctx->cmp_key->objectid,
307 sctx->send_root->root_key.objectid,
308 (sctx->parent_root ?
309 sctx->parent_root->root_key.objectid : 0));
310}
311
312static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
313
314static struct waiting_dir_move *
315get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
316
317static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
318
319static int need_send_hole(struct send_ctx *sctx)
320{
321 return (sctx->parent_root && !sctx->cur_inode_new &&
322 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
323 S_ISREG(sctx->cur_inode_mode));
324}
325
326static void fs_path_reset(struct fs_path *p)
327{
328 if (p->reversed) {
329 p->start = p->buf + p->buf_len - 1;
330 p->end = p->start;
331 *p->start = 0;
332 } else {
333 p->start = p->buf;
334 p->end = p->start;
335 *p->start = 0;
336 }
337}
338
339static struct fs_path *fs_path_alloc(void)
340{
341 struct fs_path *p;
342
343 p = kmalloc(sizeof(*p), GFP_KERNEL);
344 if (!p)
345 return NULL;
346 p->reversed = 0;
347 p->buf = p->inline_buf;
348 p->buf_len = FS_PATH_INLINE_SIZE;
349 fs_path_reset(p);
350 return p;
351}
352
353static struct fs_path *fs_path_alloc_reversed(void)
354{
355 struct fs_path *p;
356
357 p = fs_path_alloc();
358 if (!p)
359 return NULL;
360 p->reversed = 1;
361 fs_path_reset(p);
362 return p;
363}
364
365static void fs_path_free(struct fs_path *p)
366{
367 if (!p)
368 return;
369 if (p->buf != p->inline_buf)
370 kfree(p->buf);
371 kfree(p);
372}
373
374static int fs_path_len(struct fs_path *p)
375{
376 return p->end - p->start;
377}
378
379static int fs_path_ensure_buf(struct fs_path *p, int len)
380{
381 char *tmp_buf;
382 int path_len;
383 int old_buf_len;
384
385 len++;
386
387 if (p->buf_len >= len)
388 return 0;
389
390 if (len > PATH_MAX) {
391 WARN_ON(1);
392 return -ENOMEM;
393 }
394
395 path_len = p->end - p->start;
396 old_buf_len = p->buf_len;
397
398
399
400
401 if (p->buf == p->inline_buf) {
402 tmp_buf = kmalloc(len, GFP_KERNEL);
403 if (tmp_buf)
404 memcpy(tmp_buf, p->buf, old_buf_len);
405 } else {
406 tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
407 }
408 if (!tmp_buf)
409 return -ENOMEM;
410 p->buf = tmp_buf;
411
412
413
414
415 p->buf_len = ksize(p->buf);
416
417 if (p->reversed) {
418 tmp_buf = p->buf + old_buf_len - path_len - 1;
419 p->end = p->buf + p->buf_len - 1;
420 p->start = p->end - path_len;
421 memmove(p->start, tmp_buf, path_len + 1);
422 } else {
423 p->start = p->buf;
424 p->end = p->start + path_len;
425 }
426 return 0;
427}
428
429static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
430 char **prepared)
431{
432 int ret;
433 int new_len;
434
435 new_len = p->end - p->start + name_len;
436 if (p->start != p->end)
437 new_len++;
438 ret = fs_path_ensure_buf(p, new_len);
439 if (ret < 0)
440 goto out;
441
442 if (p->reversed) {
443 if (p->start != p->end)
444 *--p->start = '/';
445 p->start -= name_len;
446 *prepared = p->start;
447 } else {
448 if (p->start != p->end)
449 *p->end++ = '/';
450 *prepared = p->end;
451 p->end += name_len;
452 *p->end = 0;
453 }
454
455out:
456 return ret;
457}
458
459static int fs_path_add(struct fs_path *p, const char *name, int name_len)
460{
461 int ret;
462 char *prepared;
463
464 ret = fs_path_prepare_for_add(p, name_len, &prepared);
465 if (ret < 0)
466 goto out;
467 memcpy(prepared, name, name_len);
468
469out:
470 return ret;
471}
472
473static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
474{
475 int ret;
476 char *prepared;
477
478 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
479 if (ret < 0)
480 goto out;
481 memcpy(prepared, p2->start, p2->end - p2->start);
482
483out:
484 return ret;
485}
486
487static int fs_path_add_from_extent_buffer(struct fs_path *p,
488 struct extent_buffer *eb,
489 unsigned long off, int len)
490{
491 int ret;
492 char *prepared;
493
494 ret = fs_path_prepare_for_add(p, len, &prepared);
495 if (ret < 0)
496 goto out;
497
498 read_extent_buffer(eb, prepared, off, len);
499
500out:
501 return ret;
502}
503
504static int fs_path_copy(struct fs_path *p, struct fs_path *from)
505{
506 int ret;
507
508 p->reversed = from->reversed;
509 fs_path_reset(p);
510
511 ret = fs_path_add_path(p, from);
512
513 return ret;
514}
515
516
517static void fs_path_unreverse(struct fs_path *p)
518{
519 char *tmp;
520 int len;
521
522 if (!p->reversed)
523 return;
524
525 tmp = p->start;
526 len = p->end - p->start;
527 p->start = p->buf;
528 p->end = p->start + len;
529 memmove(p->start, tmp, len + 1);
530 p->reversed = 0;
531}
532
533static struct btrfs_path *alloc_path_for_send(void)
534{
535 struct btrfs_path *path;
536
537 path = btrfs_alloc_path();
538 if (!path)
539 return NULL;
540 path->search_commit_root = 1;
541 path->skip_locking = 1;
542 path->need_commit_sem = 1;
543 return path;
544}
545
546static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
547{
548 int ret;
549 u32 pos = 0;
550
551 while (pos < len) {
552 ret = kernel_write(filp, buf + pos, len - pos, off);
553
554
555
556
557 if (ret < 0)
558 return ret;
559 if (ret == 0) {
560 return -EIO;
561 }
562 pos += ret;
563 }
564
565 return 0;
566}
567
568static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
569{
570 struct btrfs_tlv_header *hdr;
571 int total_len = sizeof(*hdr) + len;
572 int left = sctx->send_max_size - sctx->send_size;
573
574 if (unlikely(left < total_len))
575 return -EOVERFLOW;
576
577 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
578 hdr->tlv_type = cpu_to_le16(attr);
579 hdr->tlv_len = cpu_to_le16(len);
580 memcpy(hdr + 1, data, len);
581 sctx->send_size += total_len;
582
583 return 0;
584}
585
586#define TLV_PUT_DEFINE_INT(bits) \
587 static int tlv_put_u##bits(struct send_ctx *sctx, \
588 u##bits attr, u##bits value) \
589 { \
590 __le##bits __tmp = cpu_to_le##bits(value); \
591 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
592 }
593
594TLV_PUT_DEFINE_INT(64)
595
596static int tlv_put_string(struct send_ctx *sctx, u16 attr,
597 const char *str, int len)
598{
599 if (len == -1)
600 len = strlen(str);
601 return tlv_put(sctx, attr, str, len);
602}
603
604static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
605 const u8 *uuid)
606{
607 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
608}
609
610static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
611 struct extent_buffer *eb,
612 struct btrfs_timespec *ts)
613{
614 struct btrfs_timespec bts;
615 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
616 return tlv_put(sctx, attr, &bts, sizeof(bts));
617}
618
619
620#define TLV_PUT(sctx, attrtype, data, attrlen) \
621 do { \
622 ret = tlv_put(sctx, attrtype, data, attrlen); \
623 if (ret < 0) \
624 goto tlv_put_failure; \
625 } while (0)
626
627#define TLV_PUT_INT(sctx, attrtype, bits, value) \
628 do { \
629 ret = tlv_put_u##bits(sctx, attrtype, value); \
630 if (ret < 0) \
631 goto tlv_put_failure; \
632 } while (0)
633
634#define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
635#define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
636#define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
637#define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
638#define TLV_PUT_STRING(sctx, attrtype, str, len) \
639 do { \
640 ret = tlv_put_string(sctx, attrtype, str, len); \
641 if (ret < 0) \
642 goto tlv_put_failure; \
643 } while (0)
644#define TLV_PUT_PATH(sctx, attrtype, p) \
645 do { \
646 ret = tlv_put_string(sctx, attrtype, p->start, \
647 p->end - p->start); \
648 if (ret < 0) \
649 goto tlv_put_failure; \
650 } while(0)
651#define TLV_PUT_UUID(sctx, attrtype, uuid) \
652 do { \
653 ret = tlv_put_uuid(sctx, attrtype, uuid); \
654 if (ret < 0) \
655 goto tlv_put_failure; \
656 } while (0)
657#define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
658 do { \
659 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
660 if (ret < 0) \
661 goto tlv_put_failure; \
662 } while (0)
663
664static int send_header(struct send_ctx *sctx)
665{
666 struct btrfs_stream_header hdr;
667
668 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
669 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
670
671 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
672 &sctx->send_off);
673}
674
675
676
677
678static int begin_cmd(struct send_ctx *sctx, int cmd)
679{
680 struct btrfs_cmd_header *hdr;
681
682 if (WARN_ON(!sctx->send_buf))
683 return -EINVAL;
684
685 BUG_ON(sctx->send_size);
686
687 sctx->send_size += sizeof(*hdr);
688 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
689 hdr->cmd = cpu_to_le16(cmd);
690
691 return 0;
692}
693
694static int send_cmd(struct send_ctx *sctx)
695{
696 int ret;
697 struct btrfs_cmd_header *hdr;
698 u32 crc;
699
700 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
701 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
702 hdr->crc = 0;
703
704 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
705 hdr->crc = cpu_to_le32(crc);
706
707 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
708 &sctx->send_off);
709
710 sctx->total_send_size += sctx->send_size;
711 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
712 sctx->send_size = 0;
713
714 return ret;
715}
716
717
718
719
720static int send_rename(struct send_ctx *sctx,
721 struct fs_path *from, struct fs_path *to)
722{
723 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
724 int ret;
725
726 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
727
728 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
729 if (ret < 0)
730 goto out;
731
732 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
733 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
734
735 ret = send_cmd(sctx);
736
737tlv_put_failure:
738out:
739 return ret;
740}
741
742
743
744
745static int send_link(struct send_ctx *sctx,
746 struct fs_path *path, struct fs_path *lnk)
747{
748 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
749 int ret;
750
751 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
752
753 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
754 if (ret < 0)
755 goto out;
756
757 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
758 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
759
760 ret = send_cmd(sctx);
761
762tlv_put_failure:
763out:
764 return ret;
765}
766
767
768
769
770static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
771{
772 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
773 int ret;
774
775 btrfs_debug(fs_info, "send_unlink %s", path->start);
776
777 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
778 if (ret < 0)
779 goto out;
780
781 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
782
783 ret = send_cmd(sctx);
784
785tlv_put_failure:
786out:
787 return ret;
788}
789
790
791
792
793static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
794{
795 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
796 int ret;
797
798 btrfs_debug(fs_info, "send_rmdir %s", path->start);
799
800 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
801 if (ret < 0)
802 goto out;
803
804 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
805
806 ret = send_cmd(sctx);
807
808tlv_put_failure:
809out:
810 return ret;
811}
812
813
814
815
816static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
817 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
818 u64 *gid, u64 *rdev)
819{
820 int ret;
821 struct btrfs_inode_item *ii;
822 struct btrfs_key key;
823
824 key.objectid = ino;
825 key.type = BTRFS_INODE_ITEM_KEY;
826 key.offset = 0;
827 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
828 if (ret) {
829 if (ret > 0)
830 ret = -ENOENT;
831 return ret;
832 }
833
834 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
835 struct btrfs_inode_item);
836 if (size)
837 *size = btrfs_inode_size(path->nodes[0], ii);
838 if (gen)
839 *gen = btrfs_inode_generation(path->nodes[0], ii);
840 if (mode)
841 *mode = btrfs_inode_mode(path->nodes[0], ii);
842 if (uid)
843 *uid = btrfs_inode_uid(path->nodes[0], ii);
844 if (gid)
845 *gid = btrfs_inode_gid(path->nodes[0], ii);
846 if (rdev)
847 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
848
849 return ret;
850}
851
852static int get_inode_info(struct btrfs_root *root,
853 u64 ino, u64 *size, u64 *gen,
854 u64 *mode, u64 *uid, u64 *gid,
855 u64 *rdev)
856{
857 struct btrfs_path *path;
858 int ret;
859
860 path = alloc_path_for_send();
861 if (!path)
862 return -ENOMEM;
863 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
864 rdev);
865 btrfs_free_path(path);
866 return ret;
867}
868
869typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
870 struct fs_path *p,
871 void *ctx);
872
873
874
875
876
877
878
879
880
881static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
882 struct btrfs_key *found_key, int resolve,
883 iterate_inode_ref_t iterate, void *ctx)
884{
885 struct extent_buffer *eb = path->nodes[0];
886 struct btrfs_item *item;
887 struct btrfs_inode_ref *iref;
888 struct btrfs_inode_extref *extref;
889 struct btrfs_path *tmp_path;
890 struct fs_path *p;
891 u32 cur = 0;
892 u32 total;
893 int slot = path->slots[0];
894 u32 name_len;
895 char *start;
896 int ret = 0;
897 int num = 0;
898 int index;
899 u64 dir;
900 unsigned long name_off;
901 unsigned long elem_size;
902 unsigned long ptr;
903
904 p = fs_path_alloc_reversed();
905 if (!p)
906 return -ENOMEM;
907
908 tmp_path = alloc_path_for_send();
909 if (!tmp_path) {
910 fs_path_free(p);
911 return -ENOMEM;
912 }
913
914
915 if (found_key->type == BTRFS_INODE_REF_KEY) {
916 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
917 struct btrfs_inode_ref);
918 item = btrfs_item_nr(slot);
919 total = btrfs_item_size(eb, item);
920 elem_size = sizeof(*iref);
921 } else {
922 ptr = btrfs_item_ptr_offset(eb, slot);
923 total = btrfs_item_size_nr(eb, slot);
924 elem_size = sizeof(*extref);
925 }
926
927 while (cur < total) {
928 fs_path_reset(p);
929
930 if (found_key->type == BTRFS_INODE_REF_KEY) {
931 iref = (struct btrfs_inode_ref *)(ptr + cur);
932 name_len = btrfs_inode_ref_name_len(eb, iref);
933 name_off = (unsigned long)(iref + 1);
934 index = btrfs_inode_ref_index(eb, iref);
935 dir = found_key->offset;
936 } else {
937 extref = (struct btrfs_inode_extref *)(ptr + cur);
938 name_len = btrfs_inode_extref_name_len(eb, extref);
939 name_off = (unsigned long)&extref->name;
940 index = btrfs_inode_extref_index(eb, extref);
941 dir = btrfs_inode_extref_parent(eb, extref);
942 }
943
944 if (resolve) {
945 start = btrfs_ref_to_path(root, tmp_path, name_len,
946 name_off, eb, dir,
947 p->buf, p->buf_len);
948 if (IS_ERR(start)) {
949 ret = PTR_ERR(start);
950 goto out;
951 }
952 if (start < p->buf) {
953
954 ret = fs_path_ensure_buf(p,
955 p->buf_len + p->buf - start);
956 if (ret < 0)
957 goto out;
958 start = btrfs_ref_to_path(root, tmp_path,
959 name_len, name_off,
960 eb, dir,
961 p->buf, p->buf_len);
962 if (IS_ERR(start)) {
963 ret = PTR_ERR(start);
964 goto out;
965 }
966 BUG_ON(start < p->buf);
967 }
968 p->start = start;
969 } else {
970 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
971 name_len);
972 if (ret < 0)
973 goto out;
974 }
975
976 cur += elem_size + name_len;
977 ret = iterate(num, dir, index, p, ctx);
978 if (ret)
979 goto out;
980 num++;
981 }
982
983out:
984 btrfs_free_path(tmp_path);
985 fs_path_free(p);
986 return ret;
987}
988
989typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
990 const char *name, int name_len,
991 const char *data, int data_len,
992 u8 type, void *ctx);
993
994
995
996
997
998
999
1000
1001static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
1002 iterate_dir_item_t iterate, void *ctx)
1003{
1004 int ret = 0;
1005 struct extent_buffer *eb;
1006 struct btrfs_item *item;
1007 struct btrfs_dir_item *di;
1008 struct btrfs_key di_key;
1009 char *buf = NULL;
1010 int buf_len;
1011 u32 name_len;
1012 u32 data_len;
1013 u32 cur;
1014 u32 len;
1015 u32 total;
1016 int slot;
1017 int num;
1018 u8 type;
1019
1020
1021
1022
1023
1024
1025
1026 buf_len = PATH_MAX;
1027 buf = kmalloc(buf_len, GFP_KERNEL);
1028 if (!buf) {
1029 ret = -ENOMEM;
1030 goto out;
1031 }
1032
1033 eb = path->nodes[0];
1034 slot = path->slots[0];
1035 item = btrfs_item_nr(slot);
1036 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1037 cur = 0;
1038 len = 0;
1039 total = btrfs_item_size(eb, item);
1040
1041 num = 0;
1042 while (cur < total) {
1043 name_len = btrfs_dir_name_len(eb, di);
1044 data_len = btrfs_dir_data_len(eb, di);
1045 type = btrfs_dir_type(eb, di);
1046 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1047
1048 if (type == BTRFS_FT_XATTR) {
1049 if (name_len > XATTR_NAME_MAX) {
1050 ret = -ENAMETOOLONG;
1051 goto out;
1052 }
1053 if (name_len + data_len >
1054 BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
1055 ret = -E2BIG;
1056 goto out;
1057 }
1058 } else {
1059
1060
1061
1062 if (name_len + data_len > PATH_MAX) {
1063 ret = -ENAMETOOLONG;
1064 goto out;
1065 }
1066 }
1067
1068 if (name_len + data_len > buf_len) {
1069 buf_len = name_len + data_len;
1070 if (is_vmalloc_addr(buf)) {
1071 vfree(buf);
1072 buf = NULL;
1073 } else {
1074 char *tmp = krealloc(buf, buf_len,
1075 GFP_KERNEL | __GFP_NOWARN);
1076
1077 if (!tmp)
1078 kfree(buf);
1079 buf = tmp;
1080 }
1081 if (!buf) {
1082 buf = kvmalloc(buf_len, GFP_KERNEL);
1083 if (!buf) {
1084 ret = -ENOMEM;
1085 goto out;
1086 }
1087 }
1088 }
1089
1090 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1091 name_len + data_len);
1092
1093 len = sizeof(*di) + name_len + data_len;
1094 di = (struct btrfs_dir_item *)((char *)di + len);
1095 cur += len;
1096
1097 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1098 data_len, type, ctx);
1099 if (ret < 0)
1100 goto out;
1101 if (ret) {
1102 ret = 0;
1103 goto out;
1104 }
1105
1106 num++;
1107 }
1108
1109out:
1110 kvfree(buf);
1111 return ret;
1112}
1113
1114static int __copy_first_ref(int num, u64 dir, int index,
1115 struct fs_path *p, void *ctx)
1116{
1117 int ret;
1118 struct fs_path *pt = ctx;
1119
1120 ret = fs_path_copy(pt, p);
1121 if (ret < 0)
1122 return ret;
1123
1124
1125 return 1;
1126}
1127
1128
1129
1130
1131
1132static int get_inode_path(struct btrfs_root *root,
1133 u64 ino, struct fs_path *path)
1134{
1135 int ret;
1136 struct btrfs_key key, found_key;
1137 struct btrfs_path *p;
1138
1139 p = alloc_path_for_send();
1140 if (!p)
1141 return -ENOMEM;
1142
1143 fs_path_reset(path);
1144
1145 key.objectid = ino;
1146 key.type = BTRFS_INODE_REF_KEY;
1147 key.offset = 0;
1148
1149 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1150 if (ret < 0)
1151 goto out;
1152 if (ret) {
1153 ret = 1;
1154 goto out;
1155 }
1156 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1157 if (found_key.objectid != ino ||
1158 (found_key.type != BTRFS_INODE_REF_KEY &&
1159 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1160 ret = -ENOENT;
1161 goto out;
1162 }
1163
1164 ret = iterate_inode_ref(root, p, &found_key, 1,
1165 __copy_first_ref, path);
1166 if (ret < 0)
1167 goto out;
1168 ret = 0;
1169
1170out:
1171 btrfs_free_path(p);
1172 return ret;
1173}
1174
1175struct backref_ctx {
1176 struct send_ctx *sctx;
1177
1178
1179 u64 found;
1180
1181
1182
1183
1184
1185 u64 cur_objectid;
1186 u64 cur_offset;
1187
1188
1189 u64 extent_len;
1190
1191
1192 u64 data_offset;
1193
1194
1195 int found_itself;
1196};
1197
1198static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1199{
1200 u64 root = (u64)(uintptr_t)key;
1201 struct clone_root *cr = (struct clone_root *)elt;
1202
1203 if (root < cr->root->root_key.objectid)
1204 return -1;
1205 if (root > cr->root->root_key.objectid)
1206 return 1;
1207 return 0;
1208}
1209
1210static int __clone_root_cmp_sort(const void *e1, const void *e2)
1211{
1212 struct clone_root *cr1 = (struct clone_root *)e1;
1213 struct clone_root *cr2 = (struct clone_root *)e2;
1214
1215 if (cr1->root->root_key.objectid < cr2->root->root_key.objectid)
1216 return -1;
1217 if (cr1->root->root_key.objectid > cr2->root->root_key.objectid)
1218 return 1;
1219 return 0;
1220}
1221
1222
1223
1224
1225
1226static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1227{
1228 struct backref_ctx *bctx = ctx_;
1229 struct clone_root *found;
1230
1231
1232 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1233 bctx->sctx->clone_roots_cnt,
1234 sizeof(struct clone_root),
1235 __clone_root_cmp_bsearch);
1236 if (!found)
1237 return 0;
1238
1239 if (found->root == bctx->sctx->send_root &&
1240 ino == bctx->cur_objectid &&
1241 offset == bctx->cur_offset) {
1242 bctx->found_itself = 1;
1243 }
1244
1245
1246
1247
1248
1249 if (found->root == bctx->sctx->send_root) {
1250
1251
1252
1253
1254
1255
1256 if (ino >= bctx->cur_objectid)
1257 return 0;
1258 }
1259
1260 bctx->found++;
1261 found->found_refs++;
1262 if (ino < found->ino) {
1263 found->ino = ino;
1264 found->offset = offset;
1265 } else if (found->ino == ino) {
1266
1267
1268
1269 if (found->offset > offset + bctx->extent_len)
1270 found->offset = offset;
1271 }
1272
1273 return 0;
1274}
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285static int find_extent_clone(struct send_ctx *sctx,
1286 struct btrfs_path *path,
1287 u64 ino, u64 data_offset,
1288 u64 ino_size,
1289 struct clone_root **found)
1290{
1291 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1292 int ret;
1293 int extent_type;
1294 u64 logical;
1295 u64 disk_byte;
1296 u64 num_bytes;
1297 u64 extent_item_pos;
1298 u64 flags = 0;
1299 struct btrfs_file_extent_item *fi;
1300 struct extent_buffer *eb = path->nodes[0];
1301 struct backref_ctx *backref_ctx = NULL;
1302 struct clone_root *cur_clone_root;
1303 struct btrfs_key found_key;
1304 struct btrfs_path *tmp_path;
1305 int compressed;
1306 u32 i;
1307
1308 tmp_path = alloc_path_for_send();
1309 if (!tmp_path)
1310 return -ENOMEM;
1311
1312
1313 tmp_path->need_commit_sem = 0;
1314
1315 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL);
1316 if (!backref_ctx) {
1317 ret = -ENOMEM;
1318 goto out;
1319 }
1320
1321 if (data_offset >= ino_size) {
1322
1323
1324
1325
1326
1327 ret = 0;
1328 goto out;
1329 }
1330
1331 fi = btrfs_item_ptr(eb, path->slots[0],
1332 struct btrfs_file_extent_item);
1333 extent_type = btrfs_file_extent_type(eb, fi);
1334 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1335 ret = -ENOENT;
1336 goto out;
1337 }
1338 compressed = btrfs_file_extent_compression(eb, fi);
1339
1340 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1341 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1342 if (disk_byte == 0) {
1343 ret = -ENOENT;
1344 goto out;
1345 }
1346 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1347
1348 down_read(&fs_info->commit_root_sem);
1349 ret = extent_from_logical(fs_info, disk_byte, tmp_path,
1350 &found_key, &flags);
1351 up_read(&fs_info->commit_root_sem);
1352 btrfs_release_path(tmp_path);
1353
1354 if (ret < 0)
1355 goto out;
1356 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1357 ret = -EIO;
1358 goto out;
1359 }
1360
1361
1362
1363
1364 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1365 cur_clone_root = sctx->clone_roots + i;
1366 cur_clone_root->ino = (u64)-1;
1367 cur_clone_root->offset = 0;
1368 cur_clone_root->found_refs = 0;
1369 }
1370
1371 backref_ctx->sctx = sctx;
1372 backref_ctx->found = 0;
1373 backref_ctx->cur_objectid = ino;
1374 backref_ctx->cur_offset = data_offset;
1375 backref_ctx->found_itself = 0;
1376 backref_ctx->extent_len = num_bytes;
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386 if (compressed == BTRFS_COMPRESS_NONE)
1387 backref_ctx->data_offset = 0;
1388 else
1389 backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi);
1390
1391
1392
1393
1394
1395
1396 if (data_offset + num_bytes >= ino_size)
1397 backref_ctx->extent_len = ino_size - data_offset;
1398
1399
1400
1401
1402 if (compressed == BTRFS_COMPRESS_NONE)
1403 extent_item_pos = logical - found_key.objectid;
1404 else
1405 extent_item_pos = 0;
1406 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1407 extent_item_pos, 1, __iterate_backrefs,
1408 backref_ctx, false);
1409
1410 if (ret < 0)
1411 goto out;
1412
1413 if (!backref_ctx->found_itself) {
1414
1415 ret = -EIO;
1416 btrfs_err(fs_info,
1417 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
1418 ino, data_offset, disk_byte, found_key.objectid);
1419 goto out;
1420 }
1421
1422 btrfs_debug(fs_info,
1423 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1424 data_offset, ino, num_bytes, logical);
1425
1426 if (!backref_ctx->found)
1427 btrfs_debug(fs_info, "no clones found");
1428
1429 cur_clone_root = NULL;
1430 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1431 if (sctx->clone_roots[i].found_refs) {
1432 if (!cur_clone_root)
1433 cur_clone_root = sctx->clone_roots + i;
1434 else if (sctx->clone_roots[i].root == sctx->send_root)
1435
1436 cur_clone_root = sctx->clone_roots + i;
1437 }
1438
1439 }
1440
1441 if (cur_clone_root) {
1442 *found = cur_clone_root;
1443 ret = 0;
1444 } else {
1445 ret = -ENOENT;
1446 }
1447
1448out:
1449 btrfs_free_path(tmp_path);
1450 kfree(backref_ctx);
1451 return ret;
1452}
1453
1454static int read_symlink(struct btrfs_root *root,
1455 u64 ino,
1456 struct fs_path *dest)
1457{
1458 int ret;
1459 struct btrfs_path *path;
1460 struct btrfs_key key;
1461 struct btrfs_file_extent_item *ei;
1462 u8 type;
1463 u8 compression;
1464 unsigned long off;
1465 int len;
1466
1467 path = alloc_path_for_send();
1468 if (!path)
1469 return -ENOMEM;
1470
1471 key.objectid = ino;
1472 key.type = BTRFS_EXTENT_DATA_KEY;
1473 key.offset = 0;
1474 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1475 if (ret < 0)
1476 goto out;
1477 if (ret) {
1478
1479
1480
1481
1482
1483
1484
1485
1486 btrfs_err(root->fs_info,
1487 "Found empty symlink inode %llu at root %llu",
1488 ino, root->root_key.objectid);
1489 ret = -EIO;
1490 goto out;
1491 }
1492
1493 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1494 struct btrfs_file_extent_item);
1495 type = btrfs_file_extent_type(path->nodes[0], ei);
1496 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1497 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1498 BUG_ON(compression);
1499
1500 off = btrfs_file_extent_inline_start(ei);
1501 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
1502
1503 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1504
1505out:
1506 btrfs_free_path(path);
1507 return ret;
1508}
1509
1510
1511
1512
1513
1514static int gen_unique_name(struct send_ctx *sctx,
1515 u64 ino, u64 gen,
1516 struct fs_path *dest)
1517{
1518 int ret = 0;
1519 struct btrfs_path *path;
1520 struct btrfs_dir_item *di;
1521 char tmp[64];
1522 int len;
1523 u64 idx = 0;
1524
1525 path = alloc_path_for_send();
1526 if (!path)
1527 return -ENOMEM;
1528
1529 while (1) {
1530 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1531 ino, gen, idx);
1532 ASSERT(len < sizeof(tmp));
1533
1534 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1535 path, BTRFS_FIRST_FREE_OBJECTID,
1536 tmp, strlen(tmp), 0);
1537 btrfs_release_path(path);
1538 if (IS_ERR(di)) {
1539 ret = PTR_ERR(di);
1540 goto out;
1541 }
1542 if (di) {
1543
1544 idx++;
1545 continue;
1546 }
1547
1548 if (!sctx->parent_root) {
1549
1550 ret = 0;
1551 break;
1552 }
1553
1554 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1555 path, BTRFS_FIRST_FREE_OBJECTID,
1556 tmp, strlen(tmp), 0);
1557 btrfs_release_path(path);
1558 if (IS_ERR(di)) {
1559 ret = PTR_ERR(di);
1560 goto out;
1561 }
1562 if (di) {
1563
1564 idx++;
1565 continue;
1566 }
1567
1568 break;
1569 }
1570
1571 ret = fs_path_add(dest, tmp, strlen(tmp));
1572
1573out:
1574 btrfs_free_path(path);
1575 return ret;
1576}
1577
1578enum inode_state {
1579 inode_state_no_change,
1580 inode_state_will_create,
1581 inode_state_did_create,
1582 inode_state_will_delete,
1583 inode_state_did_delete,
1584};
1585
1586static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1587{
1588 int ret;
1589 int left_ret;
1590 int right_ret;
1591 u64 left_gen;
1592 u64 right_gen;
1593
1594 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1595 NULL, NULL);
1596 if (ret < 0 && ret != -ENOENT)
1597 goto out;
1598 left_ret = ret;
1599
1600 if (!sctx->parent_root) {
1601 right_ret = -ENOENT;
1602 } else {
1603 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1604 NULL, NULL, NULL, NULL);
1605 if (ret < 0 && ret != -ENOENT)
1606 goto out;
1607 right_ret = ret;
1608 }
1609
1610 if (!left_ret && !right_ret) {
1611 if (left_gen == gen && right_gen == gen) {
1612 ret = inode_state_no_change;
1613 } else if (left_gen == gen) {
1614 if (ino < sctx->send_progress)
1615 ret = inode_state_did_create;
1616 else
1617 ret = inode_state_will_create;
1618 } else if (right_gen == gen) {
1619 if (ino < sctx->send_progress)
1620 ret = inode_state_did_delete;
1621 else
1622 ret = inode_state_will_delete;
1623 } else {
1624 ret = -ENOENT;
1625 }
1626 } else if (!left_ret) {
1627 if (left_gen == gen) {
1628 if (ino < sctx->send_progress)
1629 ret = inode_state_did_create;
1630 else
1631 ret = inode_state_will_create;
1632 } else {
1633 ret = -ENOENT;
1634 }
1635 } else if (!right_ret) {
1636 if (right_gen == gen) {
1637 if (ino < sctx->send_progress)
1638 ret = inode_state_did_delete;
1639 else
1640 ret = inode_state_will_delete;
1641 } else {
1642 ret = -ENOENT;
1643 }
1644 } else {
1645 ret = -ENOENT;
1646 }
1647
1648out:
1649 return ret;
1650}
1651
1652static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1653{
1654 int ret;
1655
1656 if (ino == BTRFS_FIRST_FREE_OBJECTID)
1657 return 1;
1658
1659 ret = get_cur_inode_state(sctx, ino, gen);
1660 if (ret < 0)
1661 goto out;
1662
1663 if (ret == inode_state_no_change ||
1664 ret == inode_state_did_create ||
1665 ret == inode_state_will_delete)
1666 ret = 1;
1667 else
1668 ret = 0;
1669
1670out:
1671 return ret;
1672}
1673
1674
1675
1676
1677static int lookup_dir_item_inode(struct btrfs_root *root,
1678 u64 dir, const char *name, int name_len,
1679 u64 *found_inode,
1680 u8 *found_type)
1681{
1682 int ret = 0;
1683 struct btrfs_dir_item *di;
1684 struct btrfs_key key;
1685 struct btrfs_path *path;
1686
1687 path = alloc_path_for_send();
1688 if (!path)
1689 return -ENOMEM;
1690
1691 di = btrfs_lookup_dir_item(NULL, root, path,
1692 dir, name, name_len, 0);
1693 if (IS_ERR_OR_NULL(di)) {
1694 ret = di ? PTR_ERR(di) : -ENOENT;
1695 goto out;
1696 }
1697 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1698 if (key.type == BTRFS_ROOT_ITEM_KEY) {
1699 ret = -ENOENT;
1700 goto out;
1701 }
1702 *found_inode = key.objectid;
1703 *found_type = btrfs_dir_type(path->nodes[0], di);
1704
1705out:
1706 btrfs_free_path(path);
1707 return ret;
1708}
1709
1710
1711
1712
1713
1714static int get_first_ref(struct btrfs_root *root, u64 ino,
1715 u64 *dir, u64 *dir_gen, struct fs_path *name)
1716{
1717 int ret;
1718 struct btrfs_key key;
1719 struct btrfs_key found_key;
1720 struct btrfs_path *path;
1721 int len;
1722 u64 parent_dir;
1723
1724 path = alloc_path_for_send();
1725 if (!path)
1726 return -ENOMEM;
1727
1728 key.objectid = ino;
1729 key.type = BTRFS_INODE_REF_KEY;
1730 key.offset = 0;
1731
1732 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1733 if (ret < 0)
1734 goto out;
1735 if (!ret)
1736 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1737 path->slots[0]);
1738 if (ret || found_key.objectid != ino ||
1739 (found_key.type != BTRFS_INODE_REF_KEY &&
1740 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1741 ret = -ENOENT;
1742 goto out;
1743 }
1744
1745 if (found_key.type == BTRFS_INODE_REF_KEY) {
1746 struct btrfs_inode_ref *iref;
1747 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1748 struct btrfs_inode_ref);
1749 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1750 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1751 (unsigned long)(iref + 1),
1752 len);
1753 parent_dir = found_key.offset;
1754 } else {
1755 struct btrfs_inode_extref *extref;
1756 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1757 struct btrfs_inode_extref);
1758 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1759 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1760 (unsigned long)&extref->name, len);
1761 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1762 }
1763 if (ret < 0)
1764 goto out;
1765 btrfs_release_path(path);
1766
1767 if (dir_gen) {
1768 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
1769 NULL, NULL, NULL);
1770 if (ret < 0)
1771 goto out;
1772 }
1773
1774 *dir = parent_dir;
1775
1776out:
1777 btrfs_free_path(path);
1778 return ret;
1779}
1780
1781static int is_first_ref(struct btrfs_root *root,
1782 u64 ino, u64 dir,
1783 const char *name, int name_len)
1784{
1785 int ret;
1786 struct fs_path *tmp_name;
1787 u64 tmp_dir;
1788
1789 tmp_name = fs_path_alloc();
1790 if (!tmp_name)
1791 return -ENOMEM;
1792
1793 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
1794 if (ret < 0)
1795 goto out;
1796
1797 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1798 ret = 0;
1799 goto out;
1800 }
1801
1802 ret = !memcmp(tmp_name->start, name, name_len);
1803
1804out:
1805 fs_path_free(tmp_name);
1806 return ret;
1807}
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1820 const char *name, int name_len,
1821 u64 *who_ino, u64 *who_gen, u64 *who_mode)
1822{
1823 int ret = 0;
1824 u64 gen;
1825 u64 other_inode = 0;
1826 u8 other_type = 0;
1827
1828 if (!sctx->parent_root)
1829 goto out;
1830
1831 ret = is_inode_existent(sctx, dir, dir_gen);
1832 if (ret <= 0)
1833 goto out;
1834
1835
1836
1837
1838
1839
1840 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
1841 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1842 NULL, NULL, NULL);
1843 if (ret < 0 && ret != -ENOENT)
1844 goto out;
1845 if (ret) {
1846 ret = 0;
1847 goto out;
1848 }
1849 if (gen != dir_gen)
1850 goto out;
1851 }
1852
1853 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1854 &other_inode, &other_type);
1855 if (ret < 0 && ret != -ENOENT)
1856 goto out;
1857 if (ret) {
1858 ret = 0;
1859 goto out;
1860 }
1861
1862
1863
1864
1865
1866
1867 if (other_inode > sctx->send_progress ||
1868 is_waiting_for_move(sctx, other_inode)) {
1869 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1870 who_gen, who_mode, NULL, NULL, NULL);
1871 if (ret < 0)
1872 goto out;
1873
1874 ret = 1;
1875 *who_ino = other_inode;
1876 } else {
1877 ret = 0;
1878 }
1879
1880out:
1881 return ret;
1882}
1883
1884
1885
1886
1887
1888
1889
1890
1891static int did_overwrite_ref(struct send_ctx *sctx,
1892 u64 dir, u64 dir_gen,
1893 u64 ino, u64 ino_gen,
1894 const char *name, int name_len)
1895{
1896 int ret = 0;
1897 u64 gen;
1898 u64 ow_inode;
1899 u8 other_type;
1900
1901 if (!sctx->parent_root)
1902 goto out;
1903
1904 ret = is_inode_existent(sctx, dir, dir_gen);
1905 if (ret <= 0)
1906 goto out;
1907
1908 if (dir != BTRFS_FIRST_FREE_OBJECTID) {
1909 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL,
1910 NULL, NULL, NULL);
1911 if (ret < 0 && ret != -ENOENT)
1912 goto out;
1913 if (ret) {
1914 ret = 0;
1915 goto out;
1916 }
1917 if (gen != dir_gen)
1918 goto out;
1919 }
1920
1921
1922 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1923 &ow_inode, &other_type);
1924 if (ret < 0 && ret != -ENOENT)
1925 goto out;
1926 if (ret) {
1927
1928 ret = 0;
1929 goto out;
1930 }
1931
1932 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1933 NULL, NULL);
1934 if (ret < 0)
1935 goto out;
1936
1937 if (ow_inode == ino && gen == ino_gen) {
1938 ret = 0;
1939 goto out;
1940 }
1941
1942
1943
1944
1945
1946
1947
1948 if ((ow_inode < sctx->send_progress) ||
1949 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
1950 gen == sctx->cur_inode_gen))
1951 ret = 1;
1952 else
1953 ret = 0;
1954
1955out:
1956 return ret;
1957}
1958
1959
1960
1961
1962
1963
1964static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
1965{
1966 int ret = 0;
1967 struct fs_path *name = NULL;
1968 u64 dir;
1969 u64 dir_gen;
1970
1971 if (!sctx->parent_root)
1972 goto out;
1973
1974 name = fs_path_alloc();
1975 if (!name)
1976 return -ENOMEM;
1977
1978 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
1979 if (ret < 0)
1980 goto out;
1981
1982 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
1983 name->start, fs_path_len(name));
1984
1985out:
1986 fs_path_free(name);
1987 return ret;
1988}
1989
1990
1991
1992
1993
1994
1995
1996static int name_cache_insert(struct send_ctx *sctx,
1997 struct name_cache_entry *nce)
1998{
1999 int ret = 0;
2000 struct list_head *nce_head;
2001
2002 nce_head = radix_tree_lookup(&sctx->name_cache,
2003 (unsigned long)nce->ino);
2004 if (!nce_head) {
2005 nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
2006 if (!nce_head) {
2007 kfree(nce);
2008 return -ENOMEM;
2009 }
2010 INIT_LIST_HEAD(nce_head);
2011
2012 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
2013 if (ret < 0) {
2014 kfree(nce_head);
2015 kfree(nce);
2016 return ret;
2017 }
2018 }
2019 list_add_tail(&nce->radix_list, nce_head);
2020 list_add_tail(&nce->list, &sctx->name_cache_list);
2021 sctx->name_cache_size++;
2022
2023 return ret;
2024}
2025
2026static void name_cache_delete(struct send_ctx *sctx,
2027 struct name_cache_entry *nce)
2028{
2029 struct list_head *nce_head;
2030
2031 nce_head = radix_tree_lookup(&sctx->name_cache,
2032 (unsigned long)nce->ino);
2033 if (!nce_head) {
2034 btrfs_err(sctx->send_root->fs_info,
2035 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2036 nce->ino, sctx->name_cache_size);
2037 }
2038
2039 list_del(&nce->radix_list);
2040 list_del(&nce->list);
2041 sctx->name_cache_size--;
2042
2043
2044
2045
2046 if (nce_head && list_empty(nce_head)) {
2047 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
2048 kfree(nce_head);
2049 }
2050}
2051
2052static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
2053 u64 ino, u64 gen)
2054{
2055 struct list_head *nce_head;
2056 struct name_cache_entry *cur;
2057
2058 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
2059 if (!nce_head)
2060 return NULL;
2061
2062 list_for_each_entry(cur, nce_head, radix_list) {
2063 if (cur->ino == ino && cur->gen == gen)
2064 return cur;
2065 }
2066 return NULL;
2067}
2068
2069
2070
2071
2072
2073static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
2074{
2075 list_del(&nce->list);
2076 list_add_tail(&nce->list, &sctx->name_cache_list);
2077}
2078
2079
2080
2081
2082static void name_cache_clean_unused(struct send_ctx *sctx)
2083{
2084 struct name_cache_entry *nce;
2085
2086 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
2087 return;
2088
2089 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
2090 nce = list_entry(sctx->name_cache_list.next,
2091 struct name_cache_entry, list);
2092 name_cache_delete(sctx, nce);
2093 kfree(nce);
2094 }
2095}
2096
2097static void name_cache_free(struct send_ctx *sctx)
2098{
2099 struct name_cache_entry *nce;
2100
2101 while (!list_empty(&sctx->name_cache_list)) {
2102 nce = list_entry(sctx->name_cache_list.next,
2103 struct name_cache_entry, list);
2104 name_cache_delete(sctx, nce);
2105 kfree(nce);
2106 }
2107}
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117static int __get_cur_name_and_parent(struct send_ctx *sctx,
2118 u64 ino, u64 gen,
2119 u64 *parent_ino,
2120 u64 *parent_gen,
2121 struct fs_path *dest)
2122{
2123 int ret;
2124 int nce_ret;
2125 struct name_cache_entry *nce = NULL;
2126
2127
2128
2129
2130
2131
2132 nce = name_cache_search(sctx, ino, gen);
2133 if (nce) {
2134 if (ino < sctx->send_progress && nce->need_later_update) {
2135 name_cache_delete(sctx, nce);
2136 kfree(nce);
2137 nce = NULL;
2138 } else {
2139 name_cache_used(sctx, nce);
2140 *parent_ino = nce->parent_ino;
2141 *parent_gen = nce->parent_gen;
2142 ret = fs_path_add(dest, nce->name, nce->name_len);
2143 if (ret < 0)
2144 goto out;
2145 ret = nce->ret;
2146 goto out;
2147 }
2148 }
2149
2150
2151
2152
2153
2154
2155 ret = is_inode_existent(sctx, ino, gen);
2156 if (ret < 0)
2157 goto out;
2158
2159 if (!ret) {
2160 ret = gen_unique_name(sctx, ino, gen, dest);
2161 if (ret < 0)
2162 goto out;
2163 ret = 1;
2164 goto out_cache;
2165 }
2166
2167
2168
2169
2170
2171 if (ino < sctx->send_progress)
2172 ret = get_first_ref(sctx->send_root, ino,
2173 parent_ino, parent_gen, dest);
2174 else
2175 ret = get_first_ref(sctx->parent_root, ino,
2176 parent_ino, parent_gen, dest);
2177 if (ret < 0)
2178 goto out;
2179
2180
2181
2182
2183
2184 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2185 dest->start, dest->end - dest->start);
2186 if (ret < 0)
2187 goto out;
2188 if (ret) {
2189 fs_path_reset(dest);
2190 ret = gen_unique_name(sctx, ino, gen, dest);
2191 if (ret < 0)
2192 goto out;
2193 ret = 1;
2194 }
2195
2196out_cache:
2197
2198
2199
2200 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
2201 if (!nce) {
2202 ret = -ENOMEM;
2203 goto out;
2204 }
2205
2206 nce->ino = ino;
2207 nce->gen = gen;
2208 nce->parent_ino = *parent_ino;
2209 nce->parent_gen = *parent_gen;
2210 nce->name_len = fs_path_len(dest);
2211 nce->ret = ret;
2212 strcpy(nce->name, dest->start);
2213
2214 if (ino < sctx->send_progress)
2215 nce->need_later_update = 0;
2216 else
2217 nce->need_later_update = 1;
2218
2219 nce_ret = name_cache_insert(sctx, nce);
2220 if (nce_ret < 0)
2221 ret = nce_ret;
2222 name_cache_clean_unused(sctx);
2223
2224out:
2225 return ret;
2226}
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2254 struct fs_path *dest)
2255{
2256 int ret = 0;
2257 struct fs_path *name = NULL;
2258 u64 parent_inode = 0;
2259 u64 parent_gen = 0;
2260 int stop = 0;
2261
2262 name = fs_path_alloc();
2263 if (!name) {
2264 ret = -ENOMEM;
2265 goto out;
2266 }
2267
2268 dest->reversed = 1;
2269 fs_path_reset(dest);
2270
2271 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2272 struct waiting_dir_move *wdm;
2273
2274 fs_path_reset(name);
2275
2276 if (is_waiting_for_rm(sctx, ino)) {
2277 ret = gen_unique_name(sctx, ino, gen, name);
2278 if (ret < 0)
2279 goto out;
2280 ret = fs_path_add_path(dest, name);
2281 break;
2282 }
2283
2284 wdm = get_waiting_dir_move(sctx, ino);
2285 if (wdm && wdm->orphanized) {
2286 ret = gen_unique_name(sctx, ino, gen, name);
2287 stop = 1;
2288 } else if (wdm) {
2289 ret = get_first_ref(sctx->parent_root, ino,
2290 &parent_inode, &parent_gen, name);
2291 } else {
2292 ret = __get_cur_name_and_parent(sctx, ino, gen,
2293 &parent_inode,
2294 &parent_gen, name);
2295 if (ret)
2296 stop = 1;
2297 }
2298
2299 if (ret < 0)
2300 goto out;
2301
2302 ret = fs_path_add_path(dest, name);
2303 if (ret < 0)
2304 goto out;
2305
2306 ino = parent_inode;
2307 gen = parent_gen;
2308 }
2309
2310out:
2311 fs_path_free(name);
2312 if (!ret)
2313 fs_path_unreverse(dest);
2314 return ret;
2315}
2316
2317
2318
2319
2320static int send_subvol_begin(struct send_ctx *sctx)
2321{
2322 int ret;
2323 struct btrfs_root *send_root = sctx->send_root;
2324 struct btrfs_root *parent_root = sctx->parent_root;
2325 struct btrfs_path *path;
2326 struct btrfs_key key;
2327 struct btrfs_root_ref *ref;
2328 struct extent_buffer *leaf;
2329 char *name = NULL;
2330 int namelen;
2331
2332 path = btrfs_alloc_path();
2333 if (!path)
2334 return -ENOMEM;
2335
2336 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
2337 if (!name) {
2338 btrfs_free_path(path);
2339 return -ENOMEM;
2340 }
2341
2342 key.objectid = send_root->root_key.objectid;
2343 key.type = BTRFS_ROOT_BACKREF_KEY;
2344 key.offset = 0;
2345
2346 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2347 &key, path, 1, 0);
2348 if (ret < 0)
2349 goto out;
2350 if (ret) {
2351 ret = -ENOENT;
2352 goto out;
2353 }
2354
2355 leaf = path->nodes[0];
2356 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2357 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2358 key.objectid != send_root->root_key.objectid) {
2359 ret = -ENOENT;
2360 goto out;
2361 }
2362 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2363 namelen = btrfs_root_ref_name_len(leaf, ref);
2364 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2365 btrfs_release_path(path);
2366
2367 if (parent_root) {
2368 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2369 if (ret < 0)
2370 goto out;
2371 } else {
2372 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2373 if (ret < 0)
2374 goto out;
2375 }
2376
2377 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2378
2379 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
2380 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2381 sctx->send_root->root_item.received_uuid);
2382 else
2383 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2384 sctx->send_root->root_item.uuid);
2385
2386 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2387 le64_to_cpu(sctx->send_root->root_item.ctransid));
2388 if (parent_root) {
2389 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
2390 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2391 parent_root->root_item.received_uuid);
2392 else
2393 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2394 parent_root->root_item.uuid);
2395 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2396 le64_to_cpu(sctx->parent_root->root_item.ctransid));
2397 }
2398
2399 ret = send_cmd(sctx);
2400
2401tlv_put_failure:
2402out:
2403 btrfs_free_path(path);
2404 kfree(name);
2405 return ret;
2406}
2407
2408static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2409{
2410 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2411 int ret = 0;
2412 struct fs_path *p;
2413
2414 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
2415
2416 p = fs_path_alloc();
2417 if (!p)
2418 return -ENOMEM;
2419
2420 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2421 if (ret < 0)
2422 goto out;
2423
2424 ret = get_cur_path(sctx, ino, gen, p);
2425 if (ret < 0)
2426 goto out;
2427 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2428 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2429
2430 ret = send_cmd(sctx);
2431
2432tlv_put_failure:
2433out:
2434 fs_path_free(p);
2435 return ret;
2436}
2437
2438static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2439{
2440 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2441 int ret = 0;
2442 struct fs_path *p;
2443
2444 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
2445
2446 p = fs_path_alloc();
2447 if (!p)
2448 return -ENOMEM;
2449
2450 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2451 if (ret < 0)
2452 goto out;
2453
2454 ret = get_cur_path(sctx, ino, gen, p);
2455 if (ret < 0)
2456 goto out;
2457 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2458 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2459
2460 ret = send_cmd(sctx);
2461
2462tlv_put_failure:
2463out:
2464 fs_path_free(p);
2465 return ret;
2466}
2467
2468static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2469{
2470 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2471 int ret = 0;
2472 struct fs_path *p;
2473
2474 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
2475 ino, uid, gid);
2476
2477 p = fs_path_alloc();
2478 if (!p)
2479 return -ENOMEM;
2480
2481 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2482 if (ret < 0)
2483 goto out;
2484
2485 ret = get_cur_path(sctx, ino, gen, p);
2486 if (ret < 0)
2487 goto out;
2488 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2489 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2490 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2491
2492 ret = send_cmd(sctx);
2493
2494tlv_put_failure:
2495out:
2496 fs_path_free(p);
2497 return ret;
2498}
2499
2500static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2501{
2502 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2503 int ret = 0;
2504 struct fs_path *p = NULL;
2505 struct btrfs_inode_item *ii;
2506 struct btrfs_path *path = NULL;
2507 struct extent_buffer *eb;
2508 struct btrfs_key key;
2509 int slot;
2510
2511 btrfs_debug(fs_info, "send_utimes %llu", ino);
2512
2513 p = fs_path_alloc();
2514 if (!p)
2515 return -ENOMEM;
2516
2517 path = alloc_path_for_send();
2518 if (!path) {
2519 ret = -ENOMEM;
2520 goto out;
2521 }
2522
2523 key.objectid = ino;
2524 key.type = BTRFS_INODE_ITEM_KEY;
2525 key.offset = 0;
2526 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2527 if (ret > 0)
2528 ret = -ENOENT;
2529 if (ret < 0)
2530 goto out;
2531
2532 eb = path->nodes[0];
2533 slot = path->slots[0];
2534 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2535
2536 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2537 if (ret < 0)
2538 goto out;
2539
2540 ret = get_cur_path(sctx, ino, gen, p);
2541 if (ret < 0)
2542 goto out;
2543 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2544 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2545 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2546 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2547
2548
2549 ret = send_cmd(sctx);
2550
2551tlv_put_failure:
2552out:
2553 fs_path_free(p);
2554 btrfs_free_path(path);
2555 return ret;
2556}
2557
2558
2559
2560
2561
2562
2563static int send_create_inode(struct send_ctx *sctx, u64 ino)
2564{
2565 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2566 int ret = 0;
2567 struct fs_path *p;
2568 int cmd;
2569 u64 gen;
2570 u64 mode;
2571 u64 rdev;
2572
2573 btrfs_debug(fs_info, "send_create_inode %llu", ino);
2574
2575 p = fs_path_alloc();
2576 if (!p)
2577 return -ENOMEM;
2578
2579 if (ino != sctx->cur_ino) {
2580 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
2581 NULL, NULL, &rdev);
2582 if (ret < 0)
2583 goto out;
2584 } else {
2585 gen = sctx->cur_inode_gen;
2586 mode = sctx->cur_inode_mode;
2587 rdev = sctx->cur_inode_rdev;
2588 }
2589
2590 if (S_ISREG(mode)) {
2591 cmd = BTRFS_SEND_C_MKFILE;
2592 } else if (S_ISDIR(mode)) {
2593 cmd = BTRFS_SEND_C_MKDIR;
2594 } else if (S_ISLNK(mode)) {
2595 cmd = BTRFS_SEND_C_SYMLINK;
2596 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2597 cmd = BTRFS_SEND_C_MKNOD;
2598 } else if (S_ISFIFO(mode)) {
2599 cmd = BTRFS_SEND_C_MKFIFO;
2600 } else if (S_ISSOCK(mode)) {
2601 cmd = BTRFS_SEND_C_MKSOCK;
2602 } else {
2603 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
2604 (int)(mode & S_IFMT));
2605 ret = -EOPNOTSUPP;
2606 goto out;
2607 }
2608
2609 ret = begin_cmd(sctx, cmd);
2610 if (ret < 0)
2611 goto out;
2612
2613 ret = gen_unique_name(sctx, ino, gen, p);
2614 if (ret < 0)
2615 goto out;
2616
2617 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2618 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2619
2620 if (S_ISLNK(mode)) {
2621 fs_path_reset(p);
2622 ret = read_symlink(sctx->send_root, ino, p);
2623 if (ret < 0)
2624 goto out;
2625 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2626 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2627 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2628 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2629 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2630 }
2631
2632 ret = send_cmd(sctx);
2633 if (ret < 0)
2634 goto out;
2635
2636
2637tlv_put_failure:
2638out:
2639 fs_path_free(p);
2640 return ret;
2641}
2642
2643
2644
2645
2646
2647
2648static int did_create_dir(struct send_ctx *sctx, u64 dir)
2649{
2650 int ret = 0;
2651 struct btrfs_path *path = NULL;
2652 struct btrfs_key key;
2653 struct btrfs_key found_key;
2654 struct btrfs_key di_key;
2655 struct extent_buffer *eb;
2656 struct btrfs_dir_item *di;
2657 int slot;
2658
2659 path = alloc_path_for_send();
2660 if (!path) {
2661 ret = -ENOMEM;
2662 goto out;
2663 }
2664
2665 key.objectid = dir;
2666 key.type = BTRFS_DIR_INDEX_KEY;
2667 key.offset = 0;
2668 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2669 if (ret < 0)
2670 goto out;
2671
2672 while (1) {
2673 eb = path->nodes[0];
2674 slot = path->slots[0];
2675 if (slot >= btrfs_header_nritems(eb)) {
2676 ret = btrfs_next_leaf(sctx->send_root, path);
2677 if (ret < 0) {
2678 goto out;
2679 } else if (ret > 0) {
2680 ret = 0;
2681 break;
2682 }
2683 continue;
2684 }
2685
2686 btrfs_item_key_to_cpu(eb, &found_key, slot);
2687 if (found_key.objectid != key.objectid ||
2688 found_key.type != key.type) {
2689 ret = 0;
2690 goto out;
2691 }
2692
2693 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2694 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2695
2696 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2697 di_key.objectid < sctx->send_progress) {
2698 ret = 1;
2699 goto out;
2700 }
2701
2702 path->slots[0]++;
2703 }
2704
2705out:
2706 btrfs_free_path(path);
2707 return ret;
2708}
2709
2710
2711
2712
2713
2714
2715
2716static int send_create_inode_if_needed(struct send_ctx *sctx)
2717{
2718 int ret;
2719
2720 if (S_ISDIR(sctx->cur_inode_mode)) {
2721 ret = did_create_dir(sctx, sctx->cur_ino);
2722 if (ret < 0)
2723 goto out;
2724 if (ret) {
2725 ret = 0;
2726 goto out;
2727 }
2728 }
2729
2730 ret = send_create_inode(sctx, sctx->cur_ino);
2731 if (ret < 0)
2732 goto out;
2733
2734out:
2735 return ret;
2736}
2737
2738struct recorded_ref {
2739 struct list_head list;
2740 char *name;
2741 struct fs_path *full_path;
2742 u64 dir;
2743 u64 dir_gen;
2744 int name_len;
2745};
2746
2747static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
2748{
2749 ref->full_path = path;
2750 ref->name = (char *)kbasename(ref->full_path->start);
2751 ref->name_len = ref->full_path->end - ref->name;
2752}
2753
2754
2755
2756
2757
2758
2759static int __record_ref(struct list_head *head, u64 dir,
2760 u64 dir_gen, struct fs_path *path)
2761{
2762 struct recorded_ref *ref;
2763
2764 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
2765 if (!ref)
2766 return -ENOMEM;
2767
2768 ref->dir = dir;
2769 ref->dir_gen = dir_gen;
2770 set_ref_path(ref, path);
2771 list_add_tail(&ref->list, head);
2772 return 0;
2773}
2774
2775static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2776{
2777 struct recorded_ref *new;
2778
2779 new = kmalloc(sizeof(*ref), GFP_KERNEL);
2780 if (!new)
2781 return -ENOMEM;
2782
2783 new->dir = ref->dir;
2784 new->dir_gen = ref->dir_gen;
2785 new->full_path = NULL;
2786 INIT_LIST_HEAD(&new->list);
2787 list_add_tail(&new->list, list);
2788 return 0;
2789}
2790
2791static void __free_recorded_refs(struct list_head *head)
2792{
2793 struct recorded_ref *cur;
2794
2795 while (!list_empty(head)) {
2796 cur = list_entry(head->next, struct recorded_ref, list);
2797 fs_path_free(cur->full_path);
2798 list_del(&cur->list);
2799 kfree(cur);
2800 }
2801}
2802
2803static void free_recorded_refs(struct send_ctx *sctx)
2804{
2805 __free_recorded_refs(&sctx->new_refs);
2806 __free_recorded_refs(&sctx->deleted_refs);
2807}
2808
2809
2810
2811
2812
2813
2814static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2815 struct fs_path *path)
2816{
2817 int ret;
2818 struct fs_path *orphan;
2819
2820 orphan = fs_path_alloc();
2821 if (!orphan)
2822 return -ENOMEM;
2823
2824 ret = gen_unique_name(sctx, ino, gen, orphan);
2825 if (ret < 0)
2826 goto out;
2827
2828 ret = send_rename(sctx, path, orphan);
2829
2830out:
2831 fs_path_free(orphan);
2832 return ret;
2833}
2834
2835static struct orphan_dir_info *
2836add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2837{
2838 struct rb_node **p = &sctx->orphan_dirs.rb_node;
2839 struct rb_node *parent = NULL;
2840 struct orphan_dir_info *entry, *odi;
2841
2842 while (*p) {
2843 parent = *p;
2844 entry = rb_entry(parent, struct orphan_dir_info, node);
2845 if (dir_ino < entry->ino) {
2846 p = &(*p)->rb_left;
2847 } else if (dir_ino > entry->ino) {
2848 p = &(*p)->rb_right;
2849 } else {
2850 return entry;
2851 }
2852 }
2853
2854 odi = kmalloc(sizeof(*odi), GFP_KERNEL);
2855 if (!odi)
2856 return ERR_PTR(-ENOMEM);
2857 odi->ino = dir_ino;
2858 odi->gen = 0;
2859 odi->last_dir_index_offset = 0;
2860
2861 rb_link_node(&odi->node, parent, p);
2862 rb_insert_color(&odi->node, &sctx->orphan_dirs);
2863 return odi;
2864}
2865
2866static struct orphan_dir_info *
2867get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2868{
2869 struct rb_node *n = sctx->orphan_dirs.rb_node;
2870 struct orphan_dir_info *entry;
2871
2872 while (n) {
2873 entry = rb_entry(n, struct orphan_dir_info, node);
2874 if (dir_ino < entry->ino)
2875 n = n->rb_left;
2876 else if (dir_ino > entry->ino)
2877 n = n->rb_right;
2878 else
2879 return entry;
2880 }
2881 return NULL;
2882}
2883
2884static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
2885{
2886 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
2887
2888 return odi != NULL;
2889}
2890
2891static void free_orphan_dir_info(struct send_ctx *sctx,
2892 struct orphan_dir_info *odi)
2893{
2894 if (!odi)
2895 return;
2896 rb_erase(&odi->node, &sctx->orphan_dirs);
2897 kfree(odi);
2898}
2899
2900
2901
2902
2903
2904
2905static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2906 u64 send_progress)
2907{
2908 int ret = 0;
2909 struct btrfs_root *root = sctx->parent_root;
2910 struct btrfs_path *path;
2911 struct btrfs_key key;
2912 struct btrfs_key found_key;
2913 struct btrfs_key loc;
2914 struct btrfs_dir_item *di;
2915 struct orphan_dir_info *odi = NULL;
2916
2917
2918
2919
2920 if (dir == BTRFS_FIRST_FREE_OBJECTID)
2921 return 0;
2922
2923 path = alloc_path_for_send();
2924 if (!path)
2925 return -ENOMEM;
2926
2927 key.objectid = dir;
2928 key.type = BTRFS_DIR_INDEX_KEY;
2929 key.offset = 0;
2930
2931 odi = get_orphan_dir_info(sctx, dir);
2932 if (odi)
2933 key.offset = odi->last_dir_index_offset;
2934
2935 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2936 if (ret < 0)
2937 goto out;
2938
2939 while (1) {
2940 struct waiting_dir_move *dm;
2941
2942 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2943 ret = btrfs_next_leaf(root, path);
2944 if (ret < 0)
2945 goto out;
2946 else if (ret > 0)
2947 break;
2948 continue;
2949 }
2950 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2951 path->slots[0]);
2952 if (found_key.objectid != key.objectid ||
2953 found_key.type != key.type)
2954 break;
2955
2956 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
2957 struct btrfs_dir_item);
2958 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
2959
2960 dm = get_waiting_dir_move(sctx, loc.objectid);
2961 if (dm) {
2962 odi = add_orphan_dir_info(sctx, dir);
2963 if (IS_ERR(odi)) {
2964 ret = PTR_ERR(odi);
2965 goto out;
2966 }
2967 odi->gen = dir_gen;
2968 odi->last_dir_index_offset = found_key.offset;
2969 dm->rmdir_ino = dir;
2970 ret = 0;
2971 goto out;
2972 }
2973
2974 if (loc.objectid > send_progress) {
2975 odi = add_orphan_dir_info(sctx, dir);
2976 if (IS_ERR(odi)) {
2977 ret = PTR_ERR(odi);
2978 goto out;
2979 }
2980 odi->gen = dir_gen;
2981 odi->last_dir_index_offset = found_key.offset;
2982 ret = 0;
2983 goto out;
2984 }
2985
2986 path->slots[0]++;
2987 }
2988 free_orphan_dir_info(sctx, odi);
2989
2990 ret = 1;
2991
2992out:
2993 btrfs_free_path(path);
2994 return ret;
2995}
2996
2997static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
2998{
2999 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
3000
3001 return entry != NULL;
3002}
3003
3004static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
3005{
3006 struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
3007 struct rb_node *parent = NULL;
3008 struct waiting_dir_move *entry, *dm;
3009
3010 dm = kmalloc(sizeof(*dm), GFP_KERNEL);
3011 if (!dm)
3012 return -ENOMEM;
3013 dm->ino = ino;
3014 dm->rmdir_ino = 0;
3015 dm->orphanized = orphanized;
3016
3017 while (*p) {
3018 parent = *p;
3019 entry = rb_entry(parent, struct waiting_dir_move, node);
3020 if (ino < entry->ino) {
3021 p = &(*p)->rb_left;
3022 } else if (ino > entry->ino) {
3023 p = &(*p)->rb_right;
3024 } else {
3025 kfree(dm);
3026 return -EEXIST;
3027 }
3028 }
3029
3030 rb_link_node(&dm->node, parent, p);
3031 rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
3032 return 0;
3033}
3034
3035static struct waiting_dir_move *
3036get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
3037{
3038 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
3039 struct waiting_dir_move *entry;
3040
3041 while (n) {
3042 entry = rb_entry(n, struct waiting_dir_move, node);
3043 if (ino < entry->ino)
3044 n = n->rb_left;
3045 else if (ino > entry->ino)
3046 n = n->rb_right;
3047 else
3048 return entry;
3049 }
3050 return NULL;
3051}
3052
3053static void free_waiting_dir_move(struct send_ctx *sctx,
3054 struct waiting_dir_move *dm)
3055{
3056 if (!dm)
3057 return;
3058 rb_erase(&dm->node, &sctx->waiting_dir_moves);
3059 kfree(dm);
3060}
3061
3062static int add_pending_dir_move(struct send_ctx *sctx,
3063 u64 ino,
3064 u64 ino_gen,
3065 u64 parent_ino,
3066 struct list_head *new_refs,
3067 struct list_head *deleted_refs,
3068 const bool is_orphan)
3069{
3070 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
3071 struct rb_node *parent = NULL;
3072 struct pending_dir_move *entry = NULL, *pm;
3073 struct recorded_ref *cur;
3074 int exists = 0;
3075 int ret;
3076
3077 pm = kmalloc(sizeof(*pm), GFP_KERNEL);
3078 if (!pm)
3079 return -ENOMEM;
3080 pm->parent_ino = parent_ino;
3081 pm->ino = ino;
3082 pm->gen = ino_gen;
3083 INIT_LIST_HEAD(&pm->list);
3084 INIT_LIST_HEAD(&pm->update_refs);
3085 RB_CLEAR_NODE(&pm->node);
3086
3087 while (*p) {
3088 parent = *p;
3089 entry = rb_entry(parent, struct pending_dir_move, node);
3090 if (parent_ino < entry->parent_ino) {
3091 p = &(*p)->rb_left;
3092 } else if (parent_ino > entry->parent_ino) {
3093 p = &(*p)->rb_right;
3094 } else {
3095 exists = 1;
3096 break;
3097 }
3098 }
3099
3100 list_for_each_entry(cur, deleted_refs, list) {
3101 ret = dup_ref(cur, &pm->update_refs);
3102 if (ret < 0)
3103 goto out;
3104 }
3105 list_for_each_entry(cur, new_refs, list) {
3106 ret = dup_ref(cur, &pm->update_refs);
3107 if (ret < 0)
3108 goto out;
3109 }
3110
3111 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
3112 if (ret)
3113 goto out;
3114
3115 if (exists) {
3116 list_add_tail(&pm->list, &entry->list);
3117 } else {
3118 rb_link_node(&pm->node, parent, p);
3119 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3120 }
3121 ret = 0;
3122out:
3123 if (ret) {
3124 __free_recorded_refs(&pm->update_refs);
3125 kfree(pm);
3126 }
3127 return ret;
3128}
3129
3130static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3131 u64 parent_ino)
3132{
3133 struct rb_node *n = sctx->pending_dir_moves.rb_node;
3134 struct pending_dir_move *entry;
3135
3136 while (n) {
3137 entry = rb_entry(n, struct pending_dir_move, node);
3138 if (parent_ino < entry->parent_ino)
3139 n = n->rb_left;
3140 else if (parent_ino > entry->parent_ino)
3141 n = n->rb_right;
3142 else
3143 return entry;
3144 }
3145 return NULL;
3146}
3147
3148static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3149 u64 ino, u64 gen, u64 *ancestor_ino)
3150{
3151 int ret = 0;
3152 u64 parent_inode = 0;
3153 u64 parent_gen = 0;
3154 u64 start_ino = ino;
3155
3156 *ancestor_ino = 0;
3157 while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3158 fs_path_reset(name);
3159
3160 if (is_waiting_for_rm(sctx, ino))
3161 break;
3162 if (is_waiting_for_move(sctx, ino)) {
3163 if (*ancestor_ino == 0)
3164 *ancestor_ino = ino;
3165 ret = get_first_ref(sctx->parent_root, ino,
3166 &parent_inode, &parent_gen, name);
3167 } else {
3168 ret = __get_cur_name_and_parent(sctx, ino, gen,
3169 &parent_inode,
3170 &parent_gen, name);
3171 if (ret > 0) {
3172 ret = 0;
3173 break;
3174 }
3175 }
3176 if (ret < 0)
3177 break;
3178 if (parent_inode == start_ino) {
3179 ret = 1;
3180 if (*ancestor_ino == 0)
3181 *ancestor_ino = ino;
3182 break;
3183 }
3184 ino = parent_inode;
3185 gen = parent_gen;
3186 }
3187 return ret;
3188}
3189
3190static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3191{
3192 struct fs_path *from_path = NULL;
3193 struct fs_path *to_path = NULL;
3194 struct fs_path *name = NULL;
3195 u64 orig_progress = sctx->send_progress;
3196 struct recorded_ref *cur;
3197 u64 parent_ino, parent_gen;
3198 struct waiting_dir_move *dm = NULL;
3199 u64 rmdir_ino = 0;
3200 u64 ancestor;
3201 bool is_orphan;
3202 int ret;
3203
3204 name = fs_path_alloc();
3205 from_path = fs_path_alloc();
3206 if (!name || !from_path) {
3207 ret = -ENOMEM;
3208 goto out;
3209 }
3210
3211 dm = get_waiting_dir_move(sctx, pm->ino);
3212 ASSERT(dm);
3213 rmdir_ino = dm->rmdir_ino;
3214 is_orphan = dm->orphanized;
3215 free_waiting_dir_move(sctx, dm);
3216
3217 if (is_orphan) {
3218 ret = gen_unique_name(sctx, pm->ino,
3219 pm->gen, from_path);
3220 } else {
3221 ret = get_first_ref(sctx->parent_root, pm->ino,
3222 &parent_ino, &parent_gen, name);
3223 if (ret < 0)
3224 goto out;
3225 ret = get_cur_path(sctx, parent_ino, parent_gen,
3226 from_path);
3227 if (ret < 0)
3228 goto out;
3229 ret = fs_path_add_path(from_path, name);
3230 }
3231 if (ret < 0)
3232 goto out;
3233
3234 sctx->send_progress = sctx->cur_ino + 1;
3235 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3236 if (ret < 0)
3237 goto out;
3238 if (ret) {
3239 LIST_HEAD(deleted_refs);
3240 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3241 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3242 &pm->update_refs, &deleted_refs,
3243 is_orphan);
3244 if (ret < 0)
3245 goto out;
3246 if (rmdir_ino) {
3247 dm = get_waiting_dir_move(sctx, pm->ino);
3248 ASSERT(dm);
3249 dm->rmdir_ino = rmdir_ino;
3250 }
3251 goto out;
3252 }
3253 fs_path_reset(name);
3254 to_path = name;
3255 name = NULL;
3256 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3257 if (ret < 0)
3258 goto out;
3259
3260 ret = send_rename(sctx, from_path, to_path);
3261 if (ret < 0)
3262 goto out;
3263
3264 if (rmdir_ino) {
3265 struct orphan_dir_info *odi;
3266 u64 gen;
3267
3268 odi = get_orphan_dir_info(sctx, rmdir_ino);
3269 if (!odi) {
3270
3271 goto finish;
3272 }
3273 gen = odi->gen;
3274
3275 ret = can_rmdir(sctx, rmdir_ino, gen, sctx->cur_ino);
3276 if (ret < 0)
3277 goto out;
3278 if (!ret)
3279 goto finish;
3280
3281 name = fs_path_alloc();
3282 if (!name) {
3283 ret = -ENOMEM;
3284 goto out;
3285 }
3286 ret = get_cur_path(sctx, rmdir_ino, gen, name);
3287 if (ret < 0)
3288 goto out;
3289 ret = send_rmdir(sctx, name);
3290 if (ret < 0)
3291 goto out;
3292 }
3293
3294finish:
3295 ret = send_utimes(sctx, pm->ino, pm->gen);
3296 if (ret < 0)
3297 goto out;
3298
3299
3300
3301
3302
3303 list_for_each_entry(cur, &pm->update_refs, list) {
3304
3305
3306
3307 ret = get_inode_info(sctx->send_root, cur->dir, NULL,
3308 NULL, NULL, NULL, NULL, NULL);
3309 if (ret == -ENOENT) {
3310 ret = 0;
3311 continue;
3312 }
3313 if (ret < 0)
3314 goto out;
3315
3316 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3317 if (ret < 0)
3318 goto out;
3319 }
3320
3321out:
3322 fs_path_free(name);
3323 fs_path_free(from_path);
3324 fs_path_free(to_path);
3325 sctx->send_progress = orig_progress;
3326
3327 return ret;
3328}
3329
3330static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3331{
3332 if (!list_empty(&m->list))
3333 list_del(&m->list);
3334 if (!RB_EMPTY_NODE(&m->node))
3335 rb_erase(&m->node, &sctx->pending_dir_moves);
3336 __free_recorded_refs(&m->update_refs);
3337 kfree(m);
3338}
3339
3340static void tail_append_pending_moves(struct send_ctx *sctx,
3341 struct pending_dir_move *moves,
3342 struct list_head *stack)
3343{
3344 if (list_empty(&moves->list)) {
3345 list_add_tail(&moves->list, stack);
3346 } else {
3347 LIST_HEAD(list);
3348 list_splice_init(&moves->list, &list);
3349 list_add_tail(&moves->list, stack);
3350 list_splice_tail(&list, stack);
3351 }
3352 if (!RB_EMPTY_NODE(&moves->node)) {
3353 rb_erase(&moves->node, &sctx->pending_dir_moves);
3354 RB_CLEAR_NODE(&moves->node);
3355 }
3356}
3357
3358static int apply_children_dir_moves(struct send_ctx *sctx)
3359{
3360 struct pending_dir_move *pm;
3361 struct list_head stack;
3362 u64 parent_ino = sctx->cur_ino;
3363 int ret = 0;
3364
3365 pm = get_pending_dir_moves(sctx, parent_ino);
3366 if (!pm)
3367 return 0;
3368
3369 INIT_LIST_HEAD(&stack);
3370 tail_append_pending_moves(sctx, pm, &stack);
3371
3372 while (!list_empty(&stack)) {
3373 pm = list_first_entry(&stack, struct pending_dir_move, list);
3374 parent_ino = pm->ino;
3375 ret = apply_dir_move(sctx, pm);
3376 free_pending_move(sctx, pm);
3377 if (ret)
3378 goto out;
3379 pm = get_pending_dir_moves(sctx, parent_ino);
3380 if (pm)
3381 tail_append_pending_moves(sctx, pm, &stack);
3382 }
3383 return 0;
3384
3385out:
3386 while (!list_empty(&stack)) {
3387 pm = list_first_entry(&stack, struct pending_dir_move, list);
3388 free_pending_move(sctx, pm);
3389 }
3390 return ret;
3391}
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429static int wait_for_dest_dir_move(struct send_ctx *sctx,
3430 struct recorded_ref *parent_ref,
3431 const bool is_orphan)
3432{
3433 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
3434 struct btrfs_path *path;
3435 struct btrfs_key key;
3436 struct btrfs_key di_key;
3437 struct btrfs_dir_item *di;
3438 u64 left_gen;
3439 u64 right_gen;
3440 int ret = 0;
3441 struct waiting_dir_move *wdm;
3442
3443 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3444 return 0;
3445
3446 path = alloc_path_for_send();
3447 if (!path)
3448 return -ENOMEM;
3449
3450 key.objectid = parent_ref->dir;
3451 key.type = BTRFS_DIR_ITEM_KEY;
3452 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
3453
3454 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3455 if (ret < 0) {
3456 goto out;
3457 } else if (ret > 0) {
3458 ret = 0;
3459 goto out;
3460 }
3461
3462 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
3463 parent_ref->name_len);
3464 if (!di) {
3465 ret = 0;
3466 goto out;
3467 }
3468
3469
3470
3471
3472
3473
3474
3475
3476 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3477 if (di_key.type != BTRFS_INODE_ITEM_KEY) {
3478 ret = 0;
3479 goto out;
3480 }
3481
3482 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
3483 &left_gen, NULL, NULL, NULL, NULL);
3484 if (ret < 0)
3485 goto out;
3486 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
3487 &right_gen, NULL, NULL, NULL, NULL);
3488 if (ret < 0) {
3489 if (ret == -ENOENT)
3490 ret = 0;
3491 goto out;
3492 }
3493
3494
3495 if (right_gen != left_gen) {
3496 ret = 0;
3497 goto out;
3498 }
3499
3500 wdm = get_waiting_dir_move(sctx, di_key.objectid);
3501 if (wdm && !wdm->orphanized) {
3502 ret = add_pending_dir_move(sctx,
3503 sctx->cur_ino,
3504 sctx->cur_inode_gen,
3505 di_key.objectid,
3506 &sctx->new_refs,
3507 &sctx->deleted_refs,
3508 is_orphan);
3509 if (!ret)
3510 ret = 1;
3511 }
3512out:
3513 btrfs_free_path(path);
3514 return ret;
3515}
3516
3517
3518
3519
3520
3521static int check_ino_in_path(struct btrfs_root *root,
3522 const u64 ino1,
3523 const u64 ino1_gen,
3524 const u64 ino2,
3525 const u64 ino2_gen,
3526 struct fs_path *fs_path)
3527{
3528 u64 ino = ino2;
3529
3530 if (ino1 == ino2)
3531 return ino1_gen == ino2_gen;
3532
3533 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3534 u64 parent;
3535 u64 parent_gen;
3536 int ret;
3537
3538 fs_path_reset(fs_path);
3539 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3540 if (ret < 0)
3541 return ret;
3542 if (parent == ino1)
3543 return parent_gen == ino1_gen;
3544 ino = parent;
3545 }
3546 return 0;
3547}
3548
3549
3550
3551
3552
3553
3554static int is_ancestor(struct btrfs_root *root,
3555 const u64 ino1,
3556 const u64 ino1_gen,
3557 const u64 ino2,
3558 struct fs_path *fs_path)
3559{
3560 bool free_fs_path = false;
3561 int ret = 0;
3562 struct btrfs_path *path = NULL;
3563 struct btrfs_key key;
3564
3565 if (!fs_path) {
3566 fs_path = fs_path_alloc();
3567 if (!fs_path)
3568 return -ENOMEM;
3569 free_fs_path = true;
3570 }
3571
3572 path = alloc_path_for_send();
3573 if (!path) {
3574 ret = -ENOMEM;
3575 goto out;
3576 }
3577
3578 key.objectid = ino2;
3579 key.type = BTRFS_INODE_REF_KEY;
3580 key.offset = 0;
3581
3582 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3583 if (ret < 0)
3584 goto out;
3585
3586 while (true) {
3587 struct extent_buffer *leaf = path->nodes[0];
3588 int slot = path->slots[0];
3589 u32 cur_offset = 0;
3590 u32 item_size;
3591
3592 if (slot >= btrfs_header_nritems(leaf)) {
3593 ret = btrfs_next_leaf(root, path);
3594 if (ret < 0)
3595 goto out;
3596 if (ret > 0)
3597 break;
3598 continue;
3599 }
3600
3601 btrfs_item_key_to_cpu(leaf, &key, slot);
3602 if (key.objectid != ino2)
3603 break;
3604 if (key.type != BTRFS_INODE_REF_KEY &&
3605 key.type != BTRFS_INODE_EXTREF_KEY)
3606 break;
3607
3608 item_size = btrfs_item_size_nr(leaf, slot);
3609 while (cur_offset < item_size) {
3610 u64 parent;
3611 u64 parent_gen;
3612
3613 if (key.type == BTRFS_INODE_EXTREF_KEY) {
3614 unsigned long ptr;
3615 struct btrfs_inode_extref *extref;
3616
3617 ptr = btrfs_item_ptr_offset(leaf, slot);
3618 extref = (struct btrfs_inode_extref *)
3619 (ptr + cur_offset);
3620 parent = btrfs_inode_extref_parent(leaf,
3621 extref);
3622 cur_offset += sizeof(*extref);
3623 cur_offset += btrfs_inode_extref_name_len(leaf,
3624 extref);
3625 } else {
3626 parent = key.offset;
3627 cur_offset = item_size;
3628 }
3629
3630 ret = get_inode_info(root, parent, NULL, &parent_gen,
3631 NULL, NULL, NULL, NULL);
3632 if (ret < 0)
3633 goto out;
3634 ret = check_ino_in_path(root, ino1, ino1_gen,
3635 parent, parent_gen, fs_path);
3636 if (ret)
3637 goto out;
3638 }
3639 path->slots[0]++;
3640 }
3641 ret = 0;
3642 out:
3643 btrfs_free_path(path);
3644 if (free_fs_path)
3645 fs_path_free(fs_path);
3646 return ret;
3647}
3648
3649static int wait_for_parent_move(struct send_ctx *sctx,
3650 struct recorded_ref *parent_ref,
3651 const bool is_orphan)
3652{
3653 int ret = 0;
3654 u64 ino = parent_ref->dir;
3655 u64 ino_gen = parent_ref->dir_gen;
3656 u64 parent_ino_before, parent_ino_after;
3657 struct fs_path *path_before = NULL;
3658 struct fs_path *path_after = NULL;
3659 int len1, len2;
3660
3661 path_after = fs_path_alloc();
3662 path_before = fs_path_alloc();
3663 if (!path_after || !path_before) {
3664 ret = -ENOMEM;
3665 goto out;
3666 }
3667
3668
3669
3670
3671
3672
3673
3674
3675 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3676 u64 parent_ino_after_gen;
3677
3678 if (is_waiting_for_move(sctx, ino)) {
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689 ret = is_ancestor(sctx->parent_root,
3690 sctx->cur_ino, sctx->cur_inode_gen,
3691 ino, path_before);
3692 if (ret)
3693 break;
3694 }
3695
3696 fs_path_reset(path_before);
3697 fs_path_reset(path_after);
3698
3699 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3700 &parent_ino_after_gen, path_after);
3701 if (ret < 0)
3702 goto out;
3703 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3704 NULL, path_before);
3705 if (ret < 0 && ret != -ENOENT) {
3706 goto out;
3707 } else if (ret == -ENOENT) {
3708 ret = 0;
3709 break;
3710 }
3711
3712 len1 = fs_path_len(path_before);
3713 len2 = fs_path_len(path_after);
3714 if (ino > sctx->cur_ino &&
3715 (parent_ino_before != parent_ino_after || len1 != len2 ||
3716 memcmp(path_before->start, path_after->start, len1))) {
3717 u64 parent_ino_gen;
3718
3719 ret = get_inode_info(sctx->parent_root, ino, NULL,
3720 &parent_ino_gen, NULL, NULL, NULL,
3721 NULL);
3722 if (ret < 0)
3723 goto out;
3724 if (ino_gen == parent_ino_gen) {
3725 ret = 1;
3726 break;
3727 }
3728 }
3729 ino = parent_ino_after;
3730 ino_gen = parent_ino_after_gen;
3731 }
3732
3733out:
3734 fs_path_free(path_before);
3735 fs_path_free(path_after);
3736
3737 if (ret == 1) {
3738 ret = add_pending_dir_move(sctx,
3739 sctx->cur_ino,
3740 sctx->cur_inode_gen,
3741 ino,
3742 &sctx->new_refs,
3743 &sctx->deleted_refs,
3744 is_orphan);
3745 if (!ret)
3746 ret = 1;
3747 }
3748
3749 return ret;
3750}
3751
3752static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3753{
3754 int ret;
3755 struct fs_path *new_path;
3756
3757
3758
3759
3760
3761 new_path = fs_path_alloc();
3762 if (!new_path)
3763 return -ENOMEM;
3764
3765 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path);
3766 if (ret < 0) {
3767 fs_path_free(new_path);
3768 return ret;
3769 }
3770 ret = fs_path_add(new_path, ref->name, ref->name_len);
3771 if (ret < 0) {
3772 fs_path_free(new_path);
3773 return ret;
3774 }
3775
3776 fs_path_free(ref->full_path);
3777 set_ref_path(ref, new_path);
3778
3779 return 0;
3780}
3781
3782
3783
3784
3785static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3786{
3787 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
3788 int ret = 0;
3789 struct recorded_ref *cur;
3790 struct recorded_ref *cur2;
3791 struct list_head check_dirs;
3792 struct fs_path *valid_path = NULL;
3793 u64 ow_inode = 0;
3794 u64 ow_gen;
3795 u64 ow_mode;
3796 int did_overwrite = 0;
3797 int is_orphan = 0;
3798 u64 last_dir_ino_rm = 0;
3799 bool can_rename = true;
3800 bool orphanized_dir = false;
3801 bool orphanized_ancestor = false;
3802
3803 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
3804
3805
3806
3807
3808
3809 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
3810 INIT_LIST_HEAD(&check_dirs);
3811
3812 valid_path = fs_path_alloc();
3813 if (!valid_path) {
3814 ret = -ENOMEM;
3815 goto out;
3816 }
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829 if (!sctx->cur_inode_new) {
3830 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
3831 sctx->cur_inode_gen);
3832 if (ret < 0)
3833 goto out;
3834 if (ret)
3835 did_overwrite = 1;
3836 }
3837 if (sctx->cur_inode_new || did_overwrite) {
3838 ret = gen_unique_name(sctx, sctx->cur_ino,
3839 sctx->cur_inode_gen, valid_path);
3840 if (ret < 0)
3841 goto out;
3842 is_orphan = 1;
3843 } else {
3844 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3845 valid_path);
3846 if (ret < 0)
3847 goto out;
3848 }
3849
3850 list_for_each_entry(cur, &sctx->new_refs, list) {
3851
3852
3853
3854
3855
3856
3857
3858 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3859 if (ret < 0)
3860 goto out;
3861 if (ret == inode_state_will_create) {
3862 ret = 0;
3863
3864
3865
3866
3867 list_for_each_entry(cur2, &sctx->new_refs, list) {
3868 if (cur == cur2)
3869 break;
3870 if (cur2->dir == cur->dir) {
3871 ret = 1;
3872 break;
3873 }
3874 }
3875
3876
3877
3878
3879
3880 if (!ret)
3881 ret = did_create_dir(sctx, cur->dir);
3882 if (ret < 0)
3883 goto out;
3884 if (!ret) {
3885 ret = send_create_inode(sctx, cur->dir);
3886 if (ret < 0)
3887 goto out;
3888 }
3889 }
3890
3891
3892
3893
3894
3895
3896
3897 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
3898 cur->name, cur->name_len,
3899 &ow_inode, &ow_gen, &ow_mode);
3900 if (ret < 0)
3901 goto out;
3902 if (ret) {
3903 ret = is_first_ref(sctx->parent_root,
3904 ow_inode, cur->dir, cur->name,
3905 cur->name_len);
3906 if (ret < 0)
3907 goto out;
3908 if (ret) {
3909 struct name_cache_entry *nce;
3910 struct waiting_dir_move *wdm;
3911
3912 ret = orphanize_inode(sctx, ow_inode, ow_gen,
3913 cur->full_path);
3914 if (ret < 0)
3915 goto out;
3916 if (S_ISDIR(ow_mode))
3917 orphanized_dir = true;
3918
3919
3920
3921
3922
3923
3924
3925 if (is_waiting_for_move(sctx, ow_inode)) {
3926 wdm = get_waiting_dir_move(sctx,
3927 ow_inode);
3928 ASSERT(wdm);
3929 wdm->orphanized = true;
3930 }
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942 nce = name_cache_search(sctx, ow_inode, ow_gen);
3943 if (nce) {
3944 name_cache_delete(sctx, nce);
3945 kfree(nce);
3946 }
3947
3948
3949
3950
3951
3952
3953
3954
3955 ret = is_ancestor(sctx->parent_root,
3956 ow_inode, ow_gen,
3957 sctx->cur_ino, NULL);
3958 if (ret > 0) {
3959 orphanized_ancestor = true;
3960 fs_path_reset(valid_path);
3961 ret = get_cur_path(sctx, sctx->cur_ino,
3962 sctx->cur_inode_gen,
3963 valid_path);
3964 }
3965 if (ret < 0)
3966 goto out;
3967 } else {
3968 ret = send_unlink(sctx, cur->full_path);
3969 if (ret < 0)
3970 goto out;
3971 }
3972 }
3973
3974 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
3975 ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
3976 if (ret < 0)
3977 goto out;
3978 if (ret == 1) {
3979 can_rename = false;
3980 *pending_move = 1;
3981 }
3982 }
3983
3984 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
3985 can_rename) {
3986 ret = wait_for_parent_move(sctx, cur, is_orphan);
3987 if (ret < 0)
3988 goto out;
3989 if (ret == 1) {
3990 can_rename = false;
3991 *pending_move = 1;
3992 }
3993 }
3994
3995
3996
3997
3998
3999
4000 if (is_orphan && can_rename) {
4001 ret = send_rename(sctx, valid_path, cur->full_path);
4002 if (ret < 0)
4003 goto out;
4004 is_orphan = 0;
4005 ret = fs_path_copy(valid_path, cur->full_path);
4006 if (ret < 0)
4007 goto out;
4008 } else if (can_rename) {
4009 if (S_ISDIR(sctx->cur_inode_mode)) {
4010
4011
4012
4013
4014
4015 ret = send_rename(sctx, valid_path,
4016 cur->full_path);
4017 if (!ret)
4018 ret = fs_path_copy(valid_path,
4019 cur->full_path);
4020 if (ret < 0)
4021 goto out;
4022 } else {
4023
4024
4025
4026
4027
4028
4029
4030 if (orphanized_dir) {
4031 ret = update_ref_path(sctx, cur);
4032 if (ret < 0)
4033 goto out;
4034 }
4035 ret = send_link(sctx, cur->full_path,
4036 valid_path);
4037 if (ret < 0)
4038 goto out;
4039 }
4040 }
4041 ret = dup_ref(cur, &check_dirs);
4042 if (ret < 0)
4043 goto out;
4044 }
4045
4046 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
4047
4048
4049
4050
4051
4052
4053 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4054 sctx->cur_ino);
4055 if (ret < 0)
4056 goto out;
4057 if (ret) {
4058 ret = send_rmdir(sctx, valid_path);
4059 if (ret < 0)
4060 goto out;
4061 } else if (!is_orphan) {
4062 ret = orphanize_inode(sctx, sctx->cur_ino,
4063 sctx->cur_inode_gen, valid_path);
4064 if (ret < 0)
4065 goto out;
4066 is_orphan = 1;
4067 }
4068
4069 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4070 ret = dup_ref(cur, &check_dirs);
4071 if (ret < 0)
4072 goto out;
4073 }
4074 } else if (S_ISDIR(sctx->cur_inode_mode) &&
4075 !list_empty(&sctx->deleted_refs)) {
4076
4077
4078
4079 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
4080 list);
4081 ret = dup_ref(cur, &check_dirs);
4082 if (ret < 0)
4083 goto out;
4084 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
4085
4086
4087
4088
4089
4090 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4091 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4092 sctx->cur_ino, sctx->cur_inode_gen,
4093 cur->name, cur->name_len);
4094 if (ret < 0)
4095 goto out;
4096 if (!ret) {
4097
4098
4099
4100
4101
4102
4103
4104 if (orphanized_ancestor) {
4105 ret = update_ref_path(sctx, cur);
4106 if (ret < 0)
4107 goto out;
4108 }
4109 ret = send_unlink(sctx, cur->full_path);
4110 if (ret < 0)
4111 goto out;
4112 }
4113 ret = dup_ref(cur, &check_dirs);
4114 if (ret < 0)
4115 goto out;
4116 }
4117
4118
4119
4120
4121
4122
4123
4124
4125 if (is_orphan) {
4126 ret = send_unlink(sctx, valid_path);
4127 if (ret < 0)
4128 goto out;
4129 }
4130 }
4131
4132
4133
4134
4135
4136
4137
4138 list_for_each_entry(cur, &check_dirs, list) {
4139
4140
4141
4142
4143
4144 if (cur->dir > sctx->cur_ino)
4145 continue;
4146
4147 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4148 if (ret < 0)
4149 goto out;
4150
4151 if (ret == inode_state_did_create ||
4152 ret == inode_state_no_change) {
4153
4154 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
4155 if (ret < 0)
4156 goto out;
4157 } else if (ret == inode_state_did_delete &&
4158 cur->dir != last_dir_ino_rm) {
4159 ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
4160 sctx->cur_ino);
4161 if (ret < 0)
4162 goto out;
4163 if (ret) {
4164 ret = get_cur_path(sctx, cur->dir,
4165 cur->dir_gen, valid_path);
4166 if (ret < 0)
4167 goto out;
4168 ret = send_rmdir(sctx, valid_path);
4169 if (ret < 0)
4170 goto out;
4171 last_dir_ino_rm = cur->dir;
4172 }
4173 }
4174 }
4175
4176 ret = 0;
4177
4178out:
4179 __free_recorded_refs(&check_dirs);
4180 free_recorded_refs(sctx);
4181 fs_path_free(valid_path);
4182 return ret;
4183}
4184
4185static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name,
4186 void *ctx, struct list_head *refs)
4187{
4188 int ret = 0;
4189 struct send_ctx *sctx = ctx;
4190 struct fs_path *p;
4191 u64 gen;
4192
4193 p = fs_path_alloc();
4194 if (!p)
4195 return -ENOMEM;
4196
4197 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
4198 NULL, NULL);
4199 if (ret < 0)
4200 goto out;
4201
4202 ret = get_cur_path(sctx, dir, gen, p);
4203 if (ret < 0)
4204 goto out;
4205 ret = fs_path_add_path(p, name);
4206 if (ret < 0)
4207 goto out;
4208
4209 ret = __record_ref(refs, dir, gen, p);
4210
4211out:
4212 if (ret)
4213 fs_path_free(p);
4214 return ret;
4215}
4216
4217static int __record_new_ref(int num, u64 dir, int index,
4218 struct fs_path *name,
4219 void *ctx)
4220{
4221 struct send_ctx *sctx = ctx;
4222 return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs);
4223}
4224
4225
4226static int __record_deleted_ref(int num, u64 dir, int index,
4227 struct fs_path *name,
4228 void *ctx)
4229{
4230 struct send_ctx *sctx = ctx;
4231 return record_ref(sctx->parent_root, dir, name, ctx,
4232 &sctx->deleted_refs);
4233}
4234
4235static int record_new_ref(struct send_ctx *sctx)
4236{
4237 int ret;
4238
4239 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4240 sctx->cmp_key, 0, __record_new_ref, sctx);
4241 if (ret < 0)
4242 goto out;
4243 ret = 0;
4244
4245out:
4246 return ret;
4247}
4248
4249static int record_deleted_ref(struct send_ctx *sctx)
4250{
4251 int ret;
4252
4253 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4254 sctx->cmp_key, 0, __record_deleted_ref, sctx);
4255 if (ret < 0)
4256 goto out;
4257 ret = 0;
4258
4259out:
4260 return ret;
4261}
4262
4263struct find_ref_ctx {
4264 u64 dir;
4265 u64 dir_gen;
4266 struct btrfs_root *root;
4267 struct fs_path *name;
4268 int found_idx;
4269};
4270
4271static int __find_iref(int num, u64 dir, int index,
4272 struct fs_path *name,
4273 void *ctx_)
4274{
4275 struct find_ref_ctx *ctx = ctx_;
4276 u64 dir_gen;
4277 int ret;
4278
4279 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
4280 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
4281
4282
4283
4284
4285 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
4286 NULL, NULL, NULL);
4287 if (ret)
4288 return ret;
4289 if (dir_gen != ctx->dir_gen)
4290 return 0;
4291 ctx->found_idx = num;
4292 return 1;
4293 }
4294 return 0;
4295}
4296
4297static int find_iref(struct btrfs_root *root,
4298 struct btrfs_path *path,
4299 struct btrfs_key *key,
4300 u64 dir, u64 dir_gen, struct fs_path *name)
4301{
4302 int ret;
4303 struct find_ref_ctx ctx;
4304
4305 ctx.dir = dir;
4306 ctx.name = name;
4307 ctx.dir_gen = dir_gen;
4308 ctx.found_idx = -1;
4309 ctx.root = root;
4310
4311 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
4312 if (ret < 0)
4313 return ret;
4314
4315 if (ctx.found_idx == -1)
4316 return -ENOENT;
4317
4318 return ctx.found_idx;
4319}
4320
4321static int __record_changed_new_ref(int num, u64 dir, int index,
4322 struct fs_path *name,
4323 void *ctx)
4324{
4325 u64 dir_gen;
4326 int ret;
4327 struct send_ctx *sctx = ctx;
4328
4329 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
4330 NULL, NULL, NULL);
4331 if (ret)
4332 return ret;
4333
4334 ret = find_iref(sctx->parent_root, sctx->right_path,
4335 sctx->cmp_key, dir, dir_gen, name);
4336 if (ret == -ENOENT)
4337 ret = __record_new_ref(num, dir, index, name, sctx);
4338 else if (ret > 0)
4339 ret = 0;
4340
4341 return ret;
4342}
4343
4344static int __record_changed_deleted_ref(int num, u64 dir, int index,
4345 struct fs_path *name,
4346 void *ctx)
4347{
4348 u64 dir_gen;
4349 int ret;
4350 struct send_ctx *sctx = ctx;
4351
4352 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
4353 NULL, NULL, NULL);
4354 if (ret)
4355 return ret;
4356
4357 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
4358 dir, dir_gen, name);
4359 if (ret == -ENOENT)
4360 ret = __record_deleted_ref(num, dir, index, name, sctx);
4361 else if (ret > 0)
4362 ret = 0;
4363
4364 return ret;
4365}
4366
4367static int record_changed_ref(struct send_ctx *sctx)
4368{
4369 int ret = 0;
4370
4371 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4372 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
4373 if (ret < 0)
4374 goto out;
4375 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4376 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
4377 if (ret < 0)
4378 goto out;
4379 ret = 0;
4380
4381out:
4382 return ret;
4383}
4384
4385
4386
4387
4388
4389static int process_all_refs(struct send_ctx *sctx,
4390 enum btrfs_compare_tree_result cmd)
4391{
4392 int ret;
4393 struct btrfs_root *root;
4394 struct btrfs_path *path;
4395 struct btrfs_key key;
4396 struct btrfs_key found_key;
4397 struct extent_buffer *eb;
4398 int slot;
4399 iterate_inode_ref_t cb;
4400 int pending_move = 0;
4401
4402 path = alloc_path_for_send();
4403 if (!path)
4404 return -ENOMEM;
4405
4406 if (cmd == BTRFS_COMPARE_TREE_NEW) {
4407 root = sctx->send_root;
4408 cb = __record_new_ref;
4409 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
4410 root = sctx->parent_root;
4411 cb = __record_deleted_ref;
4412 } else {
4413 btrfs_err(sctx->send_root->fs_info,
4414 "Wrong command %d in process_all_refs", cmd);
4415 ret = -EINVAL;
4416 goto out;
4417 }
4418
4419 key.objectid = sctx->cmp_key->objectid;
4420 key.type = BTRFS_INODE_REF_KEY;
4421 key.offset = 0;
4422 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4423 if (ret < 0)
4424 goto out;
4425
4426 while (1) {
4427 eb = path->nodes[0];
4428 slot = path->slots[0];
4429 if (slot >= btrfs_header_nritems(eb)) {
4430 ret = btrfs_next_leaf(root, path);
4431 if (ret < 0)
4432 goto out;
4433 else if (ret > 0)
4434 break;
4435 continue;
4436 }
4437
4438 btrfs_item_key_to_cpu(eb, &found_key, slot);
4439
4440 if (found_key.objectid != key.objectid ||
4441 (found_key.type != BTRFS_INODE_REF_KEY &&
4442 found_key.type != BTRFS_INODE_EXTREF_KEY))
4443 break;
4444
4445 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
4446 if (ret < 0)
4447 goto out;
4448
4449 path->slots[0]++;
4450 }
4451 btrfs_release_path(path);
4452
4453
4454
4455
4456
4457
4458 ret = process_recorded_refs(sctx, &pending_move);
4459out:
4460 btrfs_free_path(path);
4461 return ret;
4462}
4463
4464static int send_set_xattr(struct send_ctx *sctx,
4465 struct fs_path *path,
4466 const char *name, int name_len,
4467 const char *data, int data_len)
4468{
4469 int ret = 0;
4470
4471 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
4472 if (ret < 0)
4473 goto out;
4474
4475 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4476 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4477 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
4478
4479 ret = send_cmd(sctx);
4480
4481tlv_put_failure:
4482out:
4483 return ret;
4484}
4485
4486static int send_remove_xattr(struct send_ctx *sctx,
4487 struct fs_path *path,
4488 const char *name, int name_len)
4489{
4490 int ret = 0;
4491
4492 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
4493 if (ret < 0)
4494 goto out;
4495
4496 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4497 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4498
4499 ret = send_cmd(sctx);
4500
4501tlv_put_failure:
4502out:
4503 return ret;
4504}
4505
4506static int __process_new_xattr(int num, struct btrfs_key *di_key,
4507 const char *name, int name_len,
4508 const char *data, int data_len,
4509 u8 type, void *ctx)
4510{
4511 int ret;
4512 struct send_ctx *sctx = ctx;
4513 struct fs_path *p;
4514 struct posix_acl_xattr_header dummy_acl;
4515
4516 p = fs_path_alloc();
4517 if (!p)
4518 return -ENOMEM;
4519
4520
4521
4522
4523
4524
4525
4526 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
4527 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
4528 if (data_len == 0) {
4529 dummy_acl.a_version =
4530 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
4531 data = (char *)&dummy_acl;
4532 data_len = sizeof(dummy_acl);
4533 }
4534 }
4535
4536 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4537 if (ret < 0)
4538 goto out;
4539
4540 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
4541
4542out:
4543 fs_path_free(p);
4544 return ret;
4545}
4546
4547static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
4548 const char *name, int name_len,
4549 const char *data, int data_len,
4550 u8 type, void *ctx)
4551{
4552 int ret;
4553 struct send_ctx *sctx = ctx;
4554 struct fs_path *p;
4555
4556 p = fs_path_alloc();
4557 if (!p)
4558 return -ENOMEM;
4559
4560 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4561 if (ret < 0)
4562 goto out;
4563
4564 ret = send_remove_xattr(sctx, p, name, name_len);
4565
4566out:
4567 fs_path_free(p);
4568 return ret;
4569}
4570
4571static int process_new_xattr(struct send_ctx *sctx)
4572{
4573 int ret = 0;
4574
4575 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4576 __process_new_xattr, sctx);
4577
4578 return ret;
4579}
4580
4581static int process_deleted_xattr(struct send_ctx *sctx)
4582{
4583 return iterate_dir_item(sctx->parent_root, sctx->right_path,
4584 __process_deleted_xattr, sctx);
4585}
4586
4587struct find_xattr_ctx {
4588 const char *name;
4589 int name_len;
4590 int found_idx;
4591 char *found_data;
4592 int found_data_len;
4593};
4594
4595static int __find_xattr(int num, struct btrfs_key *di_key,
4596 const char *name, int name_len,
4597 const char *data, int data_len,
4598 u8 type, void *vctx)
4599{
4600 struct find_xattr_ctx *ctx = vctx;
4601
4602 if (name_len == ctx->name_len &&
4603 strncmp(name, ctx->name, name_len) == 0) {
4604 ctx->found_idx = num;
4605 ctx->found_data_len = data_len;
4606 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
4607 if (!ctx->found_data)
4608 return -ENOMEM;
4609 return 1;
4610 }
4611 return 0;
4612}
4613
4614static int find_xattr(struct btrfs_root *root,
4615 struct btrfs_path *path,
4616 struct btrfs_key *key,
4617 const char *name, int name_len,
4618 char **data, int *data_len)
4619{
4620 int ret;
4621 struct find_xattr_ctx ctx;
4622
4623 ctx.name = name;
4624 ctx.name_len = name_len;
4625 ctx.found_idx = -1;
4626 ctx.found_data = NULL;
4627 ctx.found_data_len = 0;
4628
4629 ret = iterate_dir_item(root, path, __find_xattr, &ctx);
4630 if (ret < 0)
4631 return ret;
4632
4633 if (ctx.found_idx == -1)
4634 return -ENOENT;
4635 if (data) {
4636 *data = ctx.found_data;
4637 *data_len = ctx.found_data_len;
4638 } else {
4639 kfree(ctx.found_data);
4640 }
4641 return ctx.found_idx;
4642}
4643
4644
4645static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
4646 const char *name, int name_len,
4647 const char *data, int data_len,
4648 u8 type, void *ctx)
4649{
4650 int ret;
4651 struct send_ctx *sctx = ctx;
4652 char *found_data = NULL;
4653 int found_data_len = 0;
4654
4655 ret = find_xattr(sctx->parent_root, sctx->right_path,
4656 sctx->cmp_key, name, name_len, &found_data,
4657 &found_data_len);
4658 if (ret == -ENOENT) {
4659 ret = __process_new_xattr(num, di_key, name, name_len, data,
4660 data_len, type, ctx);
4661 } else if (ret >= 0) {
4662 if (data_len != found_data_len ||
4663 memcmp(data, found_data, data_len)) {
4664 ret = __process_new_xattr(num, di_key, name, name_len,
4665 data, data_len, type, ctx);
4666 } else {
4667 ret = 0;
4668 }
4669 }
4670
4671 kfree(found_data);
4672 return ret;
4673}
4674
4675static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
4676 const char *name, int name_len,
4677 const char *data, int data_len,
4678 u8 type, void *ctx)
4679{
4680 int ret;
4681 struct send_ctx *sctx = ctx;
4682
4683 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
4684 name, name_len, NULL, NULL);
4685 if (ret == -ENOENT)
4686 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
4687 data_len, type, ctx);
4688 else if (ret >= 0)
4689 ret = 0;
4690
4691 return ret;
4692}
4693
4694static int process_changed_xattr(struct send_ctx *sctx)
4695{
4696 int ret = 0;
4697
4698 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4699 __process_changed_new_xattr, sctx);
4700 if (ret < 0)
4701 goto out;
4702 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4703 __process_changed_deleted_xattr, sctx);
4704
4705out:
4706 return ret;
4707}
4708
4709static int process_all_new_xattrs(struct send_ctx *sctx)
4710{
4711 int ret;
4712 struct btrfs_root *root;
4713 struct btrfs_path *path;
4714 struct btrfs_key key;
4715 struct btrfs_key found_key;
4716 struct extent_buffer *eb;
4717 int slot;
4718
4719 path = alloc_path_for_send();
4720 if (!path)
4721 return -ENOMEM;
4722
4723 root = sctx->send_root;
4724
4725 key.objectid = sctx->cmp_key->objectid;
4726 key.type = BTRFS_XATTR_ITEM_KEY;
4727 key.offset = 0;
4728 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4729 if (ret < 0)
4730 goto out;
4731
4732 while (1) {
4733 eb = path->nodes[0];
4734 slot = path->slots[0];
4735 if (slot >= btrfs_header_nritems(eb)) {
4736 ret = btrfs_next_leaf(root, path);
4737 if (ret < 0) {
4738 goto out;
4739 } else if (ret > 0) {
4740 ret = 0;
4741 break;
4742 }
4743 continue;
4744 }
4745
4746 btrfs_item_key_to_cpu(eb, &found_key, slot);
4747 if (found_key.objectid != key.objectid ||
4748 found_key.type != key.type) {
4749 ret = 0;
4750 goto out;
4751 }
4752
4753 ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
4754 if (ret < 0)
4755 goto out;
4756
4757 path->slots[0]++;
4758 }
4759
4760out:
4761 btrfs_free_path(path);
4762 return ret;
4763}
4764
4765static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4766{
4767 struct btrfs_root *root = sctx->send_root;
4768 struct btrfs_fs_info *fs_info = root->fs_info;
4769 struct inode *inode;
4770 struct page *page;
4771 char *addr;
4772 struct btrfs_key key;
4773 pgoff_t index = offset >> PAGE_SHIFT;
4774 pgoff_t last_index;
4775 unsigned pg_offset = offset_in_page(offset);
4776 ssize_t ret = 0;
4777
4778 key.objectid = sctx->cur_ino;
4779 key.type = BTRFS_INODE_ITEM_KEY;
4780 key.offset = 0;
4781
4782 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4783 if (IS_ERR(inode))
4784 return PTR_ERR(inode);
4785
4786 if (offset + len > i_size_read(inode)) {
4787 if (offset > i_size_read(inode))
4788 len = 0;
4789 else
4790 len = offset - i_size_read(inode);
4791 }
4792 if (len == 0)
4793 goto out;
4794
4795 last_index = (offset + len - 1) >> PAGE_SHIFT;
4796
4797
4798 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
4799 file_ra_state_init(&sctx->ra, inode->i_mapping);
4800
4801 while (index <= last_index) {
4802 unsigned cur_len = min_t(unsigned, len,
4803 PAGE_SIZE - pg_offset);
4804
4805 page = find_lock_page(inode->i_mapping, index);
4806 if (!page) {
4807 page_cache_sync_readahead(inode->i_mapping, &sctx->ra,
4808 NULL, index, last_index + 1 - index);
4809
4810 page = find_or_create_page(inode->i_mapping, index,
4811 GFP_KERNEL);
4812 if (!page) {
4813 ret = -ENOMEM;
4814 break;
4815 }
4816 }
4817
4818 if (PageReadahead(page)) {
4819 page_cache_async_readahead(inode->i_mapping, &sctx->ra,
4820 NULL, page, index, last_index + 1 - index);
4821 }
4822
4823 if (!PageUptodate(page)) {
4824 btrfs_readpage(NULL, page);
4825 lock_page(page);
4826 if (!PageUptodate(page)) {
4827 unlock_page(page);
4828 put_page(page);
4829 ret = -EIO;
4830 break;
4831 }
4832 }
4833
4834 addr = kmap(page);
4835 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
4836 kunmap(page);
4837 unlock_page(page);
4838 put_page(page);
4839 index++;
4840 pg_offset = 0;
4841 len -= cur_len;
4842 ret += cur_len;
4843 }
4844out:
4845 iput(inode);
4846 return ret;
4847}
4848
4849
4850
4851
4852
4853static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
4854{
4855 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
4856 int ret = 0;
4857 struct fs_path *p;
4858 ssize_t num_read = 0;
4859
4860 p = fs_path_alloc();
4861 if (!p)
4862 return -ENOMEM;
4863
4864 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
4865
4866 num_read = fill_read_buf(sctx, offset, len);
4867 if (num_read <= 0) {
4868 if (num_read < 0)
4869 ret = num_read;
4870 goto out;
4871 }
4872
4873 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
4874 if (ret < 0)
4875 goto out;
4876
4877 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4878 if (ret < 0)
4879 goto out;
4880
4881 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4882 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4883 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
4884
4885 ret = send_cmd(sctx);
4886
4887tlv_put_failure:
4888out:
4889 fs_path_free(p);
4890 if (ret < 0)
4891 return ret;
4892 return num_read;
4893}
4894
4895
4896
4897
4898static int send_clone(struct send_ctx *sctx,
4899 u64 offset, u32 len,
4900 struct clone_root *clone_root)
4901{
4902 int ret = 0;
4903 struct fs_path *p;
4904 u64 gen;
4905
4906 btrfs_debug(sctx->send_root->fs_info,
4907 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
4908 offset, len, clone_root->root->root_key.objectid,
4909 clone_root->ino, clone_root->offset);
4910
4911 p = fs_path_alloc();
4912 if (!p)
4913 return -ENOMEM;
4914
4915 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
4916 if (ret < 0)
4917 goto out;
4918
4919 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4920 if (ret < 0)
4921 goto out;
4922
4923 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4924 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
4925 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4926
4927 if (clone_root->root == sctx->send_root) {
4928 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
4929 &gen, NULL, NULL, NULL, NULL);
4930 if (ret < 0)
4931 goto out;
4932 ret = get_cur_path(sctx, clone_root->ino, gen, p);
4933 } else {
4934 ret = get_inode_path(clone_root->root, clone_root->ino, p);
4935 }
4936 if (ret < 0)
4937 goto out;
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
4949 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
4950 clone_root->root->root_item.received_uuid);
4951 else
4952 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
4953 clone_root->root->root_item.uuid);
4954 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
4955 le64_to_cpu(clone_root->root->root_item.ctransid));
4956 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
4957 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
4958 clone_root->offset);
4959
4960 ret = send_cmd(sctx);
4961
4962tlv_put_failure:
4963out:
4964 fs_path_free(p);
4965 return ret;
4966}
4967
4968
4969
4970
4971static int send_update_extent(struct send_ctx *sctx,
4972 u64 offset, u32 len)
4973{
4974 int ret = 0;
4975 struct fs_path *p;
4976
4977 p = fs_path_alloc();
4978 if (!p)
4979 return -ENOMEM;
4980
4981 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
4982 if (ret < 0)
4983 goto out;
4984
4985 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4986 if (ret < 0)
4987 goto out;
4988
4989 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4990 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4991 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
4992
4993 ret = send_cmd(sctx);
4994
4995tlv_put_failure:
4996out:
4997 fs_path_free(p);
4998 return ret;
4999}
5000
5001static int send_hole(struct send_ctx *sctx, u64 end)
5002{
5003 struct fs_path *p = NULL;
5004 u64 offset = sctx->cur_inode_last_extent;
5005 u64 len;
5006 int ret = 0;
5007
5008
5009
5010
5011
5012
5013
5014 if (offset >= sctx->cur_inode_size)
5015 return 0;
5016
5017
5018
5019
5020
5021 end = min_t(u64, end, sctx->cur_inode_size);
5022
5023 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5024 return send_update_extent(sctx, offset, end - offset);
5025
5026 p = fs_path_alloc();
5027 if (!p)
5028 return -ENOMEM;
5029 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5030 if (ret < 0)
5031 goto tlv_put_failure;
5032 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
5033 while (offset < end) {
5034 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
5035
5036 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5037 if (ret < 0)
5038 break;
5039 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5040 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5041 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
5042 ret = send_cmd(sctx);
5043 if (ret < 0)
5044 break;
5045 offset += len;
5046 }
5047 sctx->cur_inode_next_write_offset = offset;
5048tlv_put_failure:
5049 fs_path_free(p);
5050 return ret;
5051}
5052
5053static int send_extent_data(struct send_ctx *sctx,
5054 const u64 offset,
5055 const u64 len)
5056{
5057 u64 sent = 0;
5058
5059 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5060 return send_update_extent(sctx, offset, len);
5061
5062 while (sent < len) {
5063 u64 size = len - sent;
5064 int ret;
5065
5066 if (size > BTRFS_SEND_READ_SIZE)
5067 size = BTRFS_SEND_READ_SIZE;
5068 ret = send_write(sctx, offset + sent, size);
5069 if (ret < 0)
5070 return ret;
5071 if (!ret)
5072 break;
5073 sent += ret;
5074 }
5075 return 0;
5076}
5077
5078static int clone_range(struct send_ctx *sctx,
5079 struct clone_root *clone_root,
5080 const u64 disk_byte,
5081 u64 data_offset,
5082 u64 offset,
5083 u64 len)
5084{
5085 struct btrfs_path *path;
5086 struct btrfs_key key;
5087 int ret;
5088 u64 clone_src_i_size = 0;
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098
5099
5100
5101
5102
5103
5104
5105 if (clone_root->offset == 0 &&
5106 len == sctx->send_root->fs_info->sectorsize)
5107 return send_extent_data(sctx, offset, len);
5108
5109 path = alloc_path_for_send();
5110 if (!path)
5111 return -ENOMEM;
5112
5113
5114
5115
5116
5117 ret = __get_inode_info(clone_root->root, path, clone_root->ino,
5118 &clone_src_i_size, NULL, NULL, NULL, NULL, NULL);
5119 btrfs_release_path(path);
5120 if (ret < 0)
5121 goto out;
5122
5123
5124
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145 key.objectid = clone_root->ino;
5146 key.type = BTRFS_EXTENT_DATA_KEY;
5147 key.offset = clone_root->offset;
5148 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
5149 if (ret < 0)
5150 goto out;
5151 if (ret > 0 && path->slots[0] > 0) {
5152 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
5153 if (key.objectid == clone_root->ino &&
5154 key.type == BTRFS_EXTENT_DATA_KEY)
5155 path->slots[0]--;
5156 }
5157
5158 while (true) {
5159 struct extent_buffer *leaf = path->nodes[0];
5160 int slot = path->slots[0];
5161 struct btrfs_file_extent_item *ei;
5162 u8 type;
5163 u64 ext_len;
5164 u64 clone_len;
5165 u64 clone_data_offset;
5166
5167 if (slot >= btrfs_header_nritems(leaf)) {
5168 ret = btrfs_next_leaf(clone_root->root, path);
5169 if (ret < 0)
5170 goto out;
5171 else if (ret > 0)
5172 break;
5173 continue;
5174 }
5175
5176 btrfs_item_key_to_cpu(leaf, &key, slot);
5177
5178
5179
5180
5181
5182 if (key.objectid != clone_root->ino ||
5183 key.type != BTRFS_EXTENT_DATA_KEY)
5184 break;
5185
5186 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5187 type = btrfs_file_extent_type(leaf, ei);
5188 if (type == BTRFS_FILE_EXTENT_INLINE) {
5189 ext_len = btrfs_file_extent_ram_bytes(leaf, ei);
5190 ext_len = PAGE_ALIGN(ext_len);
5191 } else {
5192 ext_len = btrfs_file_extent_num_bytes(leaf, ei);
5193 }
5194
5195 if (key.offset + ext_len <= clone_root->offset)
5196 goto next;
5197
5198 if (key.offset > clone_root->offset) {
5199
5200 u64 hole_len = key.offset - clone_root->offset;
5201
5202 if (hole_len > len)
5203 hole_len = len;
5204 ret = send_extent_data(sctx, offset, hole_len);
5205 if (ret < 0)
5206 goto out;
5207
5208 len -= hole_len;
5209 if (len == 0)
5210 break;
5211 offset += hole_len;
5212 clone_root->offset += hole_len;
5213 data_offset += hole_len;
5214 }
5215
5216 if (key.offset >= clone_root->offset + len)
5217 break;
5218
5219 if (key.offset >= clone_src_i_size)
5220 break;
5221
5222 if (key.offset + ext_len > clone_src_i_size)
5223 ext_len = clone_src_i_size - key.offset;
5224
5225 clone_data_offset = btrfs_file_extent_offset(leaf, ei);
5226 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
5227 clone_root->offset = key.offset;
5228 if (clone_data_offset < data_offset &&
5229 clone_data_offset + ext_len > data_offset) {
5230 u64 extent_offset;
5231
5232 extent_offset = data_offset - clone_data_offset;
5233 ext_len -= extent_offset;
5234 clone_data_offset += extent_offset;
5235 clone_root->offset += extent_offset;
5236 }
5237 }
5238
5239 clone_len = min_t(u64, ext_len, len);
5240
5241 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
5242 clone_data_offset == data_offset) {
5243 const u64 src_end = clone_root->offset + clone_len;
5244 const u64 sectorsize = SZ_64K;
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260
5261
5262
5263
5264 if (src_end == clone_src_i_size &&
5265 !IS_ALIGNED(src_end, sectorsize) &&
5266 offset + clone_len < sctx->cur_inode_size) {
5267 u64 slen;
5268
5269 slen = ALIGN_DOWN(src_end - clone_root->offset,
5270 sectorsize);
5271 if (slen > 0) {
5272 ret = send_clone(sctx, offset, slen,
5273 clone_root);
5274 if (ret < 0)
5275 goto out;
5276 }
5277 ret = send_extent_data(sctx, offset + slen,
5278 clone_len - slen);
5279 } else {
5280 ret = send_clone(sctx, offset, clone_len,
5281 clone_root);
5282 }
5283 } else {
5284 ret = send_extent_data(sctx, offset, clone_len);
5285 }
5286
5287 if (ret < 0)
5288 goto out;
5289
5290 len -= clone_len;
5291 if (len == 0)
5292 break;
5293 offset += clone_len;
5294 clone_root->offset += clone_len;
5295 data_offset += clone_len;
5296next:
5297 path->slots[0]++;
5298 }
5299
5300 if (len > 0)
5301 ret = send_extent_data(sctx, offset, len);
5302 else
5303 ret = 0;
5304out:
5305 btrfs_free_path(path);
5306 return ret;
5307}
5308
5309static int send_write_or_clone(struct send_ctx *sctx,
5310 struct btrfs_path *path,
5311 struct btrfs_key *key,
5312 struct clone_root *clone_root)
5313{
5314 int ret = 0;
5315 struct btrfs_file_extent_item *ei;
5316 u64 offset = key->offset;
5317 u64 len;
5318 u8 type;
5319 u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
5320
5321 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5322 struct btrfs_file_extent_item);
5323 type = btrfs_file_extent_type(path->nodes[0], ei);
5324 if (type == BTRFS_FILE_EXTENT_INLINE) {
5325 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
5326
5327
5328
5329
5330
5331 len = PAGE_ALIGN(len);
5332 } else {
5333 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
5334 }
5335
5336 if (offset >= sctx->cur_inode_size) {
5337 ret = 0;
5338 goto out;
5339 }
5340 if (offset + len > sctx->cur_inode_size)
5341 len = sctx->cur_inode_size - offset;
5342 if (len == 0) {
5343 ret = 0;
5344 goto out;
5345 }
5346
5347 if (clone_root && IS_ALIGNED(offset + len, bs)) {
5348 u64 disk_byte;
5349 u64 data_offset;
5350
5351 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
5352 data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
5353 ret = clone_range(sctx, clone_root, disk_byte, data_offset,
5354 offset, len);
5355 } else {
5356 ret = send_extent_data(sctx, offset, len);
5357 }
5358 sctx->cur_inode_next_write_offset = offset + len;
5359out:
5360 return ret;
5361}
5362
5363static int is_extent_unchanged(struct send_ctx *sctx,
5364 struct btrfs_path *left_path,
5365 struct btrfs_key *ekey)
5366{
5367 int ret = 0;
5368 struct btrfs_key key;
5369 struct btrfs_path *path = NULL;
5370 struct extent_buffer *eb;
5371 int slot;
5372 struct btrfs_key found_key;
5373 struct btrfs_file_extent_item *ei;
5374 u64 left_disknr;
5375 u64 right_disknr;
5376 u64 left_offset;
5377 u64 right_offset;
5378 u64 left_offset_fixed;
5379 u64 left_len;
5380 u64 right_len;
5381 u64 left_gen;
5382 u64 right_gen;
5383 u8 left_type;
5384 u8 right_type;
5385
5386 path = alloc_path_for_send();
5387 if (!path)
5388 return -ENOMEM;
5389
5390 eb = left_path->nodes[0];
5391 slot = left_path->slots[0];
5392 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5393 left_type = btrfs_file_extent_type(eb, ei);
5394
5395 if (left_type != BTRFS_FILE_EXTENT_REG) {
5396 ret = 0;
5397 goto out;
5398 }
5399 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5400 left_len = btrfs_file_extent_num_bytes(eb, ei);
5401 left_offset = btrfs_file_extent_offset(eb, ei);
5402 left_gen = btrfs_file_extent_generation(eb, ei);
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425 key.objectid = ekey->objectid;
5426 key.type = BTRFS_EXTENT_DATA_KEY;
5427 key.offset = ekey->offset;
5428 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
5429 if (ret < 0)
5430 goto out;
5431 if (ret) {
5432 ret = 0;
5433 goto out;
5434 }
5435
5436
5437
5438
5439 eb = path->nodes[0];
5440 slot = path->slots[0];
5441 btrfs_item_key_to_cpu(eb, &found_key, slot);
5442 if (found_key.objectid != key.objectid ||
5443 found_key.type != key.type) {
5444
5445 ret = (left_disknr) ? 0 : 1;
5446 goto out;
5447 }
5448
5449
5450
5451
5452 key = found_key;
5453 while (key.offset < ekey->offset + left_len) {
5454 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5455 right_type = btrfs_file_extent_type(eb, ei);
5456 if (right_type != BTRFS_FILE_EXTENT_REG &&
5457 right_type != BTRFS_FILE_EXTENT_INLINE) {
5458 ret = 0;
5459 goto out;
5460 }
5461
5462 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5463 right_len = btrfs_file_extent_ram_bytes(eb, ei);
5464 right_len = PAGE_ALIGN(right_len);
5465 } else {
5466 right_len = btrfs_file_extent_num_bytes(eb, ei);
5467 }
5468
5469
5470
5471
5472
5473 if (found_key.offset + right_len <= ekey->offset) {
5474
5475 ret = (left_disknr) ? 0 : 1;
5476 goto out;
5477 }
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5488 ret = 0;
5489 goto out;
5490 }
5491
5492 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5493 right_offset = btrfs_file_extent_offset(eb, ei);
5494 right_gen = btrfs_file_extent_generation(eb, ei);
5495
5496 left_offset_fixed = left_offset;
5497 if (key.offset < ekey->offset) {
5498
5499 right_offset += ekey->offset - key.offset;
5500 } else {
5501
5502 left_offset_fixed += key.offset - ekey->offset;
5503 }
5504
5505
5506
5507
5508 if (left_disknr != right_disknr ||
5509 left_offset_fixed != right_offset ||
5510 left_gen != right_gen) {
5511 ret = 0;
5512 goto out;
5513 }
5514
5515
5516
5517
5518 ret = btrfs_next_item(sctx->parent_root, path);
5519 if (ret < 0)
5520 goto out;
5521 if (!ret) {
5522 eb = path->nodes[0];
5523 slot = path->slots[0];
5524 btrfs_item_key_to_cpu(eb, &found_key, slot);
5525 }
5526 if (ret || found_key.objectid != key.objectid ||
5527 found_key.type != key.type) {
5528 key.offset += right_len;
5529 break;
5530 }
5531 if (found_key.offset != key.offset + right_len) {
5532 ret = 0;
5533 goto out;
5534 }
5535 key = found_key;
5536 }
5537
5538
5539
5540
5541
5542 if (key.offset >= ekey->offset + left_len)
5543 ret = 1;
5544 else
5545 ret = 0;
5546
5547
5548out:
5549 btrfs_free_path(path);
5550 return ret;
5551}
5552
5553static int get_last_extent(struct send_ctx *sctx, u64 offset)
5554{
5555 struct btrfs_path *path;
5556 struct btrfs_root *root = sctx->send_root;
5557 struct btrfs_file_extent_item *fi;
5558 struct btrfs_key key;
5559 u64 extent_end;
5560 u8 type;
5561 int ret;
5562
5563 path = alloc_path_for_send();
5564 if (!path)
5565 return -ENOMEM;
5566
5567 sctx->cur_inode_last_extent = 0;
5568
5569 key.objectid = sctx->cur_ino;
5570 key.type = BTRFS_EXTENT_DATA_KEY;
5571 key.offset = offset;
5572 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
5573 if (ret < 0)
5574 goto out;
5575 ret = 0;
5576 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5577 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
5578 goto out;
5579
5580 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
5581 struct btrfs_file_extent_item);
5582 type = btrfs_file_extent_type(path->nodes[0], fi);
5583 if (type == BTRFS_FILE_EXTENT_INLINE) {
5584 u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi);
5585 extent_end = ALIGN(key.offset + size,
5586 sctx->send_root->fs_info->sectorsize);
5587 } else {
5588 extent_end = key.offset +
5589 btrfs_file_extent_num_bytes(path->nodes[0], fi);
5590 }
5591 sctx->cur_inode_last_extent = extent_end;
5592out:
5593 btrfs_free_path(path);
5594 return ret;
5595}
5596
5597static int range_is_hole_in_parent(struct send_ctx *sctx,
5598 const u64 start,
5599 const u64 end)
5600{
5601 struct btrfs_path *path;
5602 struct btrfs_key key;
5603 struct btrfs_root *root = sctx->parent_root;
5604 u64 search_start = start;
5605 int ret;
5606
5607 path = alloc_path_for_send();
5608 if (!path)
5609 return -ENOMEM;
5610
5611 key.objectid = sctx->cur_ino;
5612 key.type = BTRFS_EXTENT_DATA_KEY;
5613 key.offset = search_start;
5614 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5615 if (ret < 0)
5616 goto out;
5617 if (ret > 0 && path->slots[0] > 0)
5618 path->slots[0]--;
5619
5620 while (search_start < end) {
5621 struct extent_buffer *leaf = path->nodes[0];
5622 int slot = path->slots[0];
5623 struct btrfs_file_extent_item *fi;
5624 u64 extent_end;
5625
5626 if (slot >= btrfs_header_nritems(leaf)) {
5627 ret = btrfs_next_leaf(root, path);
5628 if (ret < 0)
5629 goto out;
5630 else if (ret > 0)
5631 break;
5632 continue;
5633 }
5634
5635 btrfs_item_key_to_cpu(leaf, &key, slot);
5636 if (key.objectid < sctx->cur_ino ||
5637 key.type < BTRFS_EXTENT_DATA_KEY)
5638 goto next;
5639 if (key.objectid > sctx->cur_ino ||
5640 key.type > BTRFS_EXTENT_DATA_KEY ||
5641 key.offset >= end)
5642 break;
5643
5644 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5645 if (btrfs_file_extent_type(leaf, fi) ==
5646 BTRFS_FILE_EXTENT_INLINE) {
5647 u64 size = btrfs_file_extent_ram_bytes(leaf, fi);
5648
5649 extent_end = ALIGN(key.offset + size,
5650 root->fs_info->sectorsize);
5651 } else {
5652 extent_end = key.offset +
5653 btrfs_file_extent_num_bytes(leaf, fi);
5654 }
5655 if (extent_end <= start)
5656 goto next;
5657 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
5658 search_start = extent_end;
5659 goto next;
5660 }
5661 ret = 0;
5662 goto out;
5663next:
5664 path->slots[0]++;
5665 }
5666 ret = 1;
5667out:
5668 btrfs_free_path(path);
5669 return ret;
5670}
5671
5672static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
5673 struct btrfs_key *key)
5674{
5675 struct btrfs_file_extent_item *fi;
5676 u64 extent_end;
5677 u8 type;
5678 int ret = 0;
5679
5680 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
5681 return 0;
5682
5683 if (sctx->cur_inode_last_extent == (u64)-1) {
5684 ret = get_last_extent(sctx, key->offset - 1);
5685 if (ret)
5686 return ret;
5687 }
5688
5689 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
5690 struct btrfs_file_extent_item);
5691 type = btrfs_file_extent_type(path->nodes[0], fi);
5692 if (type == BTRFS_FILE_EXTENT_INLINE) {
5693 u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi);
5694 extent_end = ALIGN(key->offset + size,
5695 sctx->send_root->fs_info->sectorsize);
5696 } else {
5697 extent_end = key->offset +
5698 btrfs_file_extent_num_bytes(path->nodes[0], fi);
5699 }
5700
5701 if (path->slots[0] == 0 &&
5702 sctx->cur_inode_last_extent < key->offset) {
5703
5704
5705
5706
5707
5708
5709
5710 ret = get_last_extent(sctx, key->offset - 1);
5711 if (ret)
5712 return ret;
5713 }
5714
5715 if (sctx->cur_inode_last_extent < key->offset) {
5716 ret = range_is_hole_in_parent(sctx,
5717 sctx->cur_inode_last_extent,
5718 key->offset);
5719 if (ret < 0)
5720 return ret;
5721 else if (ret == 0)
5722 ret = send_hole(sctx, key->offset);
5723 else
5724 ret = 0;
5725 }
5726 sctx->cur_inode_last_extent = extent_end;
5727 return ret;
5728}
5729
5730static int process_extent(struct send_ctx *sctx,
5731 struct btrfs_path *path,
5732 struct btrfs_key *key)
5733{
5734 struct clone_root *found_clone = NULL;
5735 int ret = 0;
5736
5737 if (S_ISLNK(sctx->cur_inode_mode))
5738 return 0;
5739
5740 if (sctx->parent_root && !sctx->cur_inode_new) {
5741 ret = is_extent_unchanged(sctx, path, key);
5742 if (ret < 0)
5743 goto out;
5744 if (ret) {
5745 ret = 0;
5746 goto out_hole;
5747 }
5748 } else {
5749 struct btrfs_file_extent_item *ei;
5750 u8 type;
5751
5752 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5753 struct btrfs_file_extent_item);
5754 type = btrfs_file_extent_type(path->nodes[0], ei);
5755 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
5756 type == BTRFS_FILE_EXTENT_REG) {
5757
5758
5759
5760
5761
5762
5763 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
5764 ret = 0;
5765 goto out;
5766 }
5767
5768
5769 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
5770 ret = 0;
5771 goto out;
5772 }
5773 }
5774 }
5775
5776 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
5777 sctx->cur_inode_size, &found_clone);
5778 if (ret != -ENOENT && ret < 0)
5779 goto out;
5780
5781 ret = send_write_or_clone(sctx, path, key, found_clone);
5782 if (ret)
5783 goto out;
5784out_hole:
5785 ret = maybe_send_hole(sctx, path, key);
5786out:
5787 return ret;
5788}
5789
5790static int process_all_extents(struct send_ctx *sctx)
5791{
5792 int ret;
5793 struct btrfs_root *root;
5794 struct btrfs_path *path;
5795 struct btrfs_key key;
5796 struct btrfs_key found_key;
5797 struct extent_buffer *eb;
5798 int slot;
5799
5800 root = sctx->send_root;
5801 path = alloc_path_for_send();
5802 if (!path)
5803 return -ENOMEM;
5804
5805 key.objectid = sctx->cmp_key->objectid;
5806 key.type = BTRFS_EXTENT_DATA_KEY;
5807 key.offset = 0;
5808 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5809 if (ret < 0)
5810 goto out;
5811
5812 while (1) {
5813 eb = path->nodes[0];
5814 slot = path->slots[0];
5815
5816 if (slot >= btrfs_header_nritems(eb)) {
5817 ret = btrfs_next_leaf(root, path);
5818 if (ret < 0) {
5819 goto out;
5820 } else if (ret > 0) {
5821 ret = 0;
5822 break;
5823 }
5824 continue;
5825 }
5826
5827 btrfs_item_key_to_cpu(eb, &found_key, slot);
5828
5829 if (found_key.objectid != key.objectid ||
5830 found_key.type != key.type) {
5831 ret = 0;
5832 goto out;
5833 }
5834
5835 ret = process_extent(sctx, path, &found_key);
5836 if (ret < 0)
5837 goto out;
5838
5839 path->slots[0]++;
5840 }
5841
5842out:
5843 btrfs_free_path(path);
5844 return ret;
5845}
5846
5847static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
5848 int *pending_move,
5849 int *refs_processed)
5850{
5851 int ret = 0;
5852
5853 if (sctx->cur_ino == 0)
5854 goto out;
5855 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
5856 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
5857 goto out;
5858 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
5859 goto out;
5860
5861 ret = process_recorded_refs(sctx, pending_move);
5862 if (ret < 0)
5863 goto out;
5864
5865 *refs_processed = 1;
5866out:
5867 return ret;
5868}
5869
5870static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
5871{
5872 int ret = 0;
5873 u64 left_mode;
5874 u64 left_uid;
5875 u64 left_gid;
5876 u64 right_mode;
5877 u64 right_uid;
5878 u64 right_gid;
5879 int need_chmod = 0;
5880 int need_chown = 0;
5881 int need_truncate = 1;
5882 int pending_move = 0;
5883 int refs_processed = 0;
5884
5885 if (sctx->ignore_cur_inode)
5886 return 0;
5887
5888 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
5889 &refs_processed);
5890 if (ret < 0)
5891 goto out;
5892
5893
5894
5895
5896
5897
5898
5899
5900
5901
5902
5903
5904
5905 if (refs_processed && !pending_move)
5906 sctx->send_progress = sctx->cur_ino + 1;
5907
5908 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
5909 goto out;
5910 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
5911 goto out;
5912
5913 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
5914 &left_mode, &left_uid, &left_gid, NULL);
5915 if (ret < 0)
5916 goto out;
5917
5918 if (!sctx->parent_root || sctx->cur_inode_new) {
5919 need_chown = 1;
5920 if (!S_ISLNK(sctx->cur_inode_mode))
5921 need_chmod = 1;
5922 if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size)
5923 need_truncate = 0;
5924 } else {
5925 u64 old_size;
5926
5927 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
5928 &old_size, NULL, &right_mode, &right_uid,
5929 &right_gid, NULL);
5930 if (ret < 0)
5931 goto out;
5932
5933 if (left_uid != right_uid || left_gid != right_gid)
5934 need_chown = 1;
5935 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
5936 need_chmod = 1;
5937 if ((old_size == sctx->cur_inode_size) ||
5938 (sctx->cur_inode_size > old_size &&
5939 sctx->cur_inode_next_write_offset == sctx->cur_inode_size))
5940 need_truncate = 0;
5941 }
5942
5943 if (S_ISREG(sctx->cur_inode_mode)) {
5944 if (need_send_hole(sctx)) {
5945 if (sctx->cur_inode_last_extent == (u64)-1 ||
5946 sctx->cur_inode_last_extent <
5947 sctx->cur_inode_size) {
5948 ret = get_last_extent(sctx, (u64)-1);
5949 if (ret)
5950 goto out;
5951 }
5952 if (sctx->cur_inode_last_extent <
5953 sctx->cur_inode_size) {
5954 ret = send_hole(sctx, sctx->cur_inode_size);
5955 if (ret)
5956 goto out;
5957 }
5958 }
5959 if (need_truncate) {
5960 ret = send_truncate(sctx, sctx->cur_ino,
5961 sctx->cur_inode_gen,
5962 sctx->cur_inode_size);
5963 if (ret < 0)
5964 goto out;
5965 }
5966 }
5967
5968 if (need_chown) {
5969 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5970 left_uid, left_gid);
5971 if (ret < 0)
5972 goto out;
5973 }
5974 if (need_chmod) {
5975 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5976 left_mode);
5977 if (ret < 0)
5978 goto out;
5979 }
5980
5981
5982
5983
5984
5985 if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
5986 ret = apply_children_dir_moves(sctx);
5987 if (ret)
5988 goto out;
5989
5990
5991
5992
5993
5994
5995
5996 sctx->send_progress = sctx->cur_ino + 1;
5997 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
5998 if (ret < 0)
5999 goto out;
6000 }
6001
6002out:
6003 return ret;
6004}
6005
6006struct parent_paths_ctx {
6007 struct list_head *refs;
6008 struct send_ctx *sctx;
6009};
6010
6011static int record_parent_ref(int num, u64 dir, int index, struct fs_path *name,
6012 void *ctx)
6013{
6014 struct parent_paths_ctx *ppctx = ctx;
6015
6016 return record_ref(ppctx->sctx->parent_root, dir, name, ppctx->sctx,
6017 ppctx->refs);
6018}
6019
6020
6021
6022
6023
6024static int btrfs_unlink_all_paths(struct send_ctx *sctx)
6025{
6026 LIST_HEAD(deleted_refs);
6027 struct btrfs_path *path;
6028 struct btrfs_key key;
6029 struct parent_paths_ctx ctx;
6030 int ret;
6031
6032 path = alloc_path_for_send();
6033 if (!path)
6034 return -ENOMEM;
6035
6036 key.objectid = sctx->cur_ino;
6037 key.type = BTRFS_INODE_REF_KEY;
6038 key.offset = 0;
6039 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
6040 if (ret < 0)
6041 goto out;
6042
6043 ctx.refs = &deleted_refs;
6044 ctx.sctx = sctx;
6045
6046 while (true) {
6047 struct extent_buffer *eb = path->nodes[0];
6048 int slot = path->slots[0];
6049
6050 if (slot >= btrfs_header_nritems(eb)) {
6051 ret = btrfs_next_leaf(sctx->parent_root, path);
6052 if (ret < 0)
6053 goto out;
6054 else if (ret > 0)
6055 break;
6056 continue;
6057 }
6058
6059 btrfs_item_key_to_cpu(eb, &key, slot);
6060 if (key.objectid != sctx->cur_ino)
6061 break;
6062 if (key.type != BTRFS_INODE_REF_KEY &&
6063 key.type != BTRFS_INODE_EXTREF_KEY)
6064 break;
6065
6066 ret = iterate_inode_ref(sctx->parent_root, path, &key, 1,
6067 record_parent_ref, &ctx);
6068 if (ret < 0)
6069 goto out;
6070
6071 path->slots[0]++;
6072 }
6073
6074 while (!list_empty(&deleted_refs)) {
6075 struct recorded_ref *ref;
6076
6077 ref = list_first_entry(&deleted_refs, struct recorded_ref, list);
6078 ret = send_unlink(sctx, ref->full_path);
6079 if (ret < 0)
6080 goto out;
6081 fs_path_free(ref->full_path);
6082 list_del(&ref->list);
6083 kfree(ref);
6084 }
6085 ret = 0;
6086out:
6087 btrfs_free_path(path);
6088 if (ret)
6089 __free_recorded_refs(&deleted_refs);
6090 return ret;
6091}
6092
6093static int changed_inode(struct send_ctx *sctx,
6094 enum btrfs_compare_tree_result result)
6095{
6096 int ret = 0;
6097 struct btrfs_key *key = sctx->cmp_key;
6098 struct btrfs_inode_item *left_ii = NULL;
6099 struct btrfs_inode_item *right_ii = NULL;
6100 u64 left_gen = 0;
6101 u64 right_gen = 0;
6102
6103 sctx->cur_ino = key->objectid;
6104 sctx->cur_inode_new_gen = 0;
6105 sctx->cur_inode_last_extent = (u64)-1;
6106 sctx->cur_inode_next_write_offset = 0;
6107 sctx->ignore_cur_inode = false;
6108
6109
6110
6111
6112
6113
6114 sctx->send_progress = sctx->cur_ino;
6115
6116 if (result == BTRFS_COMPARE_TREE_NEW ||
6117 result == BTRFS_COMPARE_TREE_CHANGED) {
6118 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
6119 sctx->left_path->slots[0],
6120 struct btrfs_inode_item);
6121 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
6122 left_ii);
6123 } else {
6124 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6125 sctx->right_path->slots[0],
6126 struct btrfs_inode_item);
6127 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6128 right_ii);
6129 }
6130 if (result == BTRFS_COMPARE_TREE_CHANGED) {
6131 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6132 sctx->right_path->slots[0],
6133 struct btrfs_inode_item);
6134
6135 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6136 right_ii);
6137
6138
6139
6140
6141
6142
6143 if (left_gen != right_gen &&
6144 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6145 sctx->cur_inode_new_gen = 1;
6146 }
6147
6148
6149
6150
6151
6152
6153
6154
6155
6156
6157
6158
6159
6160
6161
6162 if (result == BTRFS_COMPARE_TREE_NEW ||
6163 result == BTRFS_COMPARE_TREE_CHANGED) {
6164 u32 nlinks;
6165
6166 nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
6167 if (nlinks == 0) {
6168 sctx->ignore_cur_inode = true;
6169 if (result == BTRFS_COMPARE_TREE_CHANGED)
6170 ret = btrfs_unlink_all_paths(sctx);
6171 goto out;
6172 }
6173 }
6174
6175 if (result == BTRFS_COMPARE_TREE_NEW) {
6176 sctx->cur_inode_gen = left_gen;
6177 sctx->cur_inode_new = 1;
6178 sctx->cur_inode_deleted = 0;
6179 sctx->cur_inode_size = btrfs_inode_size(
6180 sctx->left_path->nodes[0], left_ii);
6181 sctx->cur_inode_mode = btrfs_inode_mode(
6182 sctx->left_path->nodes[0], left_ii);
6183 sctx->cur_inode_rdev = btrfs_inode_rdev(
6184 sctx->left_path->nodes[0], left_ii);
6185 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6186 ret = send_create_inode_if_needed(sctx);
6187 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
6188 sctx->cur_inode_gen = right_gen;
6189 sctx->cur_inode_new = 0;
6190 sctx->cur_inode_deleted = 1;
6191 sctx->cur_inode_size = btrfs_inode_size(
6192 sctx->right_path->nodes[0], right_ii);
6193 sctx->cur_inode_mode = btrfs_inode_mode(
6194 sctx->right_path->nodes[0], right_ii);
6195 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
6196
6197
6198
6199
6200
6201
6202
6203 if (sctx->cur_inode_new_gen) {
6204
6205
6206
6207 sctx->cur_inode_gen = right_gen;
6208 sctx->cur_inode_new = 0;
6209 sctx->cur_inode_deleted = 1;
6210 sctx->cur_inode_size = btrfs_inode_size(
6211 sctx->right_path->nodes[0], right_ii);
6212 sctx->cur_inode_mode = btrfs_inode_mode(
6213 sctx->right_path->nodes[0], right_ii);
6214 ret = process_all_refs(sctx,
6215 BTRFS_COMPARE_TREE_DELETED);
6216 if (ret < 0)
6217 goto out;
6218
6219
6220
6221
6222 sctx->cur_inode_gen = left_gen;
6223 sctx->cur_inode_new = 1;
6224 sctx->cur_inode_deleted = 0;
6225 sctx->cur_inode_size = btrfs_inode_size(
6226 sctx->left_path->nodes[0], left_ii);
6227 sctx->cur_inode_mode = btrfs_inode_mode(
6228 sctx->left_path->nodes[0], left_ii);
6229 sctx->cur_inode_rdev = btrfs_inode_rdev(
6230 sctx->left_path->nodes[0], left_ii);
6231 ret = send_create_inode_if_needed(sctx);
6232 if (ret < 0)
6233 goto out;
6234
6235 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
6236 if (ret < 0)
6237 goto out;
6238
6239
6240
6241
6242 sctx->send_progress = sctx->cur_ino + 1;
6243
6244
6245
6246
6247
6248 ret = process_all_extents(sctx);
6249 if (ret < 0)
6250 goto out;
6251 ret = process_all_new_xattrs(sctx);
6252 if (ret < 0)
6253 goto out;
6254 } else {
6255 sctx->cur_inode_gen = left_gen;
6256 sctx->cur_inode_new = 0;
6257 sctx->cur_inode_new_gen = 0;
6258 sctx->cur_inode_deleted = 0;
6259 sctx->cur_inode_size = btrfs_inode_size(
6260 sctx->left_path->nodes[0], left_ii);
6261 sctx->cur_inode_mode = btrfs_inode_mode(
6262 sctx->left_path->nodes[0], left_ii);
6263 }
6264 }
6265
6266out:
6267 return ret;
6268}
6269
6270
6271
6272
6273
6274
6275
6276
6277
6278
6279
6280static int changed_ref(struct send_ctx *sctx,
6281 enum btrfs_compare_tree_result result)
6282{
6283 int ret = 0;
6284
6285 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6286 inconsistent_snapshot_error(sctx, result, "reference");
6287 return -EIO;
6288 }
6289
6290 if (!sctx->cur_inode_new_gen &&
6291 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
6292 if (result == BTRFS_COMPARE_TREE_NEW)
6293 ret = record_new_ref(sctx);
6294 else if (result == BTRFS_COMPARE_TREE_DELETED)
6295 ret = record_deleted_ref(sctx);
6296 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6297 ret = record_changed_ref(sctx);
6298 }
6299
6300 return ret;
6301}
6302
6303
6304
6305
6306
6307
6308static int changed_xattr(struct send_ctx *sctx,
6309 enum btrfs_compare_tree_result result)
6310{
6311 int ret = 0;
6312
6313 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6314 inconsistent_snapshot_error(sctx, result, "xattr");
6315 return -EIO;
6316 }
6317
6318 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6319 if (result == BTRFS_COMPARE_TREE_NEW)
6320 ret = process_new_xattr(sctx);
6321 else if (result == BTRFS_COMPARE_TREE_DELETED)
6322 ret = process_deleted_xattr(sctx);
6323 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6324 ret = process_changed_xattr(sctx);
6325 }
6326
6327 return ret;
6328}
6329
6330
6331
6332
6333
6334
6335static int changed_extent(struct send_ctx *sctx,
6336 enum btrfs_compare_tree_result result)
6337{
6338 int ret = 0;
6339
6340
6341
6342
6343
6344
6345
6346
6347
6348
6349
6350
6351
6352
6353 if (sctx->cur_ino != sctx->cmp_key->objectid)
6354 return 0;
6355
6356 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6357 if (result != BTRFS_COMPARE_TREE_DELETED)
6358 ret = process_extent(sctx, sctx->left_path,
6359 sctx->cmp_key);
6360 }
6361
6362 return ret;
6363}
6364
6365static int dir_changed(struct send_ctx *sctx, u64 dir)
6366{
6367 u64 orig_gen, new_gen;
6368 int ret;
6369
6370 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
6371 NULL, NULL);
6372 if (ret)
6373 return ret;
6374
6375 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
6376 NULL, NULL, NULL);
6377 if (ret)
6378 return ret;
6379
6380 return (orig_gen != new_gen) ? 1 : 0;
6381}
6382
6383static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
6384 struct btrfs_key *key)
6385{
6386 struct btrfs_inode_extref *extref;
6387 struct extent_buffer *leaf;
6388 u64 dirid = 0, last_dirid = 0;
6389 unsigned long ptr;
6390 u32 item_size;
6391 u32 cur_offset = 0;
6392 int ref_name_len;
6393 int ret = 0;
6394
6395
6396 if (key->type == BTRFS_INODE_REF_KEY) {
6397 dirid = key->offset;
6398
6399 ret = dir_changed(sctx, dirid);
6400 goto out;
6401 }
6402
6403 leaf = path->nodes[0];
6404 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
6405 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
6406 while (cur_offset < item_size) {
6407 extref = (struct btrfs_inode_extref *)(ptr +
6408 cur_offset);
6409 dirid = btrfs_inode_extref_parent(leaf, extref);
6410 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
6411 cur_offset += ref_name_len + sizeof(*extref);
6412 if (dirid == last_dirid)
6413 continue;
6414 ret = dir_changed(sctx, dirid);
6415 if (ret)
6416 break;
6417 last_dirid = dirid;
6418 }
6419out:
6420 return ret;
6421}
6422
6423
6424
6425
6426
6427static int changed_cb(struct btrfs_path *left_path,
6428 struct btrfs_path *right_path,
6429 struct btrfs_key *key,
6430 enum btrfs_compare_tree_result result,
6431 void *ctx)
6432{
6433 int ret = 0;
6434 struct send_ctx *sctx = ctx;
6435
6436 if (result == BTRFS_COMPARE_TREE_SAME) {
6437 if (key->type == BTRFS_INODE_REF_KEY ||
6438 key->type == BTRFS_INODE_EXTREF_KEY) {
6439 ret = compare_refs(sctx, left_path, key);
6440 if (!ret)
6441 return 0;
6442 if (ret < 0)
6443 return ret;
6444 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
6445 return maybe_send_hole(sctx, left_path, key);
6446 } else {
6447 return 0;
6448 }
6449 result = BTRFS_COMPARE_TREE_CHANGED;
6450 ret = 0;
6451 }
6452
6453 sctx->left_path = left_path;
6454 sctx->right_path = right_path;
6455 sctx->cmp_key = key;
6456
6457 ret = finish_inode_if_needed(sctx, 0);
6458 if (ret < 0)
6459 goto out;
6460
6461
6462 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
6463 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
6464 goto out;
6465
6466 if (key->type == BTRFS_INODE_ITEM_KEY) {
6467 ret = changed_inode(sctx, result);
6468 } else if (!sctx->ignore_cur_inode) {
6469 if (key->type == BTRFS_INODE_REF_KEY ||
6470 key->type == BTRFS_INODE_EXTREF_KEY)
6471 ret = changed_ref(sctx, result);
6472 else if (key->type == BTRFS_XATTR_ITEM_KEY)
6473 ret = changed_xattr(sctx, result);
6474 else if (key->type == BTRFS_EXTENT_DATA_KEY)
6475 ret = changed_extent(sctx, result);
6476 }
6477
6478out:
6479 return ret;
6480}
6481
6482static int full_send_tree(struct send_ctx *sctx)
6483{
6484 int ret;
6485 struct btrfs_root *send_root = sctx->send_root;
6486 struct btrfs_key key;
6487 struct btrfs_path *path;
6488 struct extent_buffer *eb;
6489 int slot;
6490
6491 path = alloc_path_for_send();
6492 if (!path)
6493 return -ENOMEM;
6494
6495 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
6496 key.type = BTRFS_INODE_ITEM_KEY;
6497 key.offset = 0;
6498
6499 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
6500 if (ret < 0)
6501 goto out;
6502 if (ret)
6503 goto out_finish;
6504
6505 while (1) {
6506 eb = path->nodes[0];
6507 slot = path->slots[0];
6508 btrfs_item_key_to_cpu(eb, &key, slot);
6509
6510 ret = changed_cb(path, NULL, &key,
6511 BTRFS_COMPARE_TREE_NEW, sctx);
6512 if (ret < 0)
6513 goto out;
6514
6515 ret = btrfs_next_item(send_root, path);
6516 if (ret < 0)
6517 goto out;
6518 if (ret) {
6519 ret = 0;
6520 break;
6521 }
6522 }
6523
6524out_finish:
6525 ret = finish_inode_if_needed(sctx, 1);
6526
6527out:
6528 btrfs_free_path(path);
6529 return ret;
6530}
6531
6532static int tree_move_down(struct btrfs_path *path, int *level)
6533{
6534 struct extent_buffer *eb;
6535
6536 BUG_ON(*level == 0);
6537 eb = btrfs_read_node_slot(path->nodes[*level], path->slots[*level]);
6538 if (IS_ERR(eb))
6539 return PTR_ERR(eb);
6540
6541 path->nodes[*level - 1] = eb;
6542 path->slots[*level - 1] = 0;
6543 (*level)--;
6544 return 0;
6545}
6546
6547static int tree_move_next_or_upnext(struct btrfs_path *path,
6548 int *level, int root_level)
6549{
6550 int ret = 0;
6551 int nritems;
6552 nritems = btrfs_header_nritems(path->nodes[*level]);
6553
6554 path->slots[*level]++;
6555
6556 while (path->slots[*level] >= nritems) {
6557 if (*level == root_level)
6558 return -1;
6559
6560
6561 path->slots[*level] = 0;
6562 free_extent_buffer(path->nodes[*level]);
6563 path->nodes[*level] = NULL;
6564 (*level)++;
6565 path->slots[*level]++;
6566
6567 nritems = btrfs_header_nritems(path->nodes[*level]);
6568 ret = 1;
6569 }
6570 return ret;
6571}
6572
6573
6574
6575
6576
6577static int tree_advance(struct btrfs_path *path,
6578 int *level, int root_level,
6579 int allow_down,
6580 struct btrfs_key *key)
6581{
6582 int ret;
6583
6584 if (*level == 0 || !allow_down) {
6585 ret = tree_move_next_or_upnext(path, level, root_level);
6586 } else {
6587 ret = tree_move_down(path, level);
6588 }
6589 if (ret >= 0) {
6590 if (*level == 0)
6591 btrfs_item_key_to_cpu(path->nodes[*level], key,
6592 path->slots[*level]);
6593 else
6594 btrfs_node_key_to_cpu(path->nodes[*level], key,
6595 path->slots[*level]);
6596 }
6597 return ret;
6598}
6599
6600static int tree_compare_item(struct btrfs_path *left_path,
6601 struct btrfs_path *right_path,
6602 char *tmp_buf)
6603{
6604 int cmp;
6605 int len1, len2;
6606 unsigned long off1, off2;
6607
6608 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
6609 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
6610 if (len1 != len2)
6611 return 1;
6612
6613 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
6614 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
6615 right_path->slots[0]);
6616
6617 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
6618
6619 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
6620 if (cmp)
6621 return 1;
6622 return 0;
6623}
6624
6625
6626
6627
6628
6629
6630
6631
6632
6633
6634
6635
6636
6637
6638static int btrfs_compare_trees(struct btrfs_root *left_root,
6639 struct btrfs_root *right_root,
6640 btrfs_changed_cb_t changed_cb, void *ctx)
6641{
6642 struct btrfs_fs_info *fs_info = left_root->fs_info;
6643 int ret;
6644 int cmp;
6645 struct btrfs_path *left_path = NULL;
6646 struct btrfs_path *right_path = NULL;
6647 struct btrfs_key left_key;
6648 struct btrfs_key right_key;
6649 char *tmp_buf = NULL;
6650 int left_root_level;
6651 int right_root_level;
6652 int left_level;
6653 int right_level;
6654 int left_end_reached;
6655 int right_end_reached;
6656 int advance_left;
6657 int advance_right;
6658 u64 left_blockptr;
6659 u64 right_blockptr;
6660 u64 left_gen;
6661 u64 right_gen;
6662
6663 left_path = btrfs_alloc_path();
6664 if (!left_path) {
6665 ret = -ENOMEM;
6666 goto out;
6667 }
6668 right_path = btrfs_alloc_path();
6669 if (!right_path) {
6670 ret = -ENOMEM;
6671 goto out;
6672 }
6673
6674 tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
6675 if (!tmp_buf) {
6676 ret = -ENOMEM;
6677 goto out;
6678 }
6679
6680 left_path->search_commit_root = 1;
6681 left_path->skip_locking = 1;
6682 right_path->search_commit_root = 1;
6683 right_path->skip_locking = 1;
6684
6685
6686
6687
6688
6689
6690
6691
6692
6693
6694
6695
6696
6697
6698
6699
6700
6701
6702
6703
6704
6705
6706
6707
6708
6709
6710
6711
6712
6713
6714
6715
6716
6717
6718
6719
6720
6721 down_read(&fs_info->commit_root_sem);
6722 left_level = btrfs_header_level(left_root->commit_root);
6723 left_root_level = left_level;
6724 left_path->nodes[left_level] =
6725 btrfs_clone_extent_buffer(left_root->commit_root);
6726 if (!left_path->nodes[left_level]) {
6727 up_read(&fs_info->commit_root_sem);
6728 ret = -ENOMEM;
6729 goto out;
6730 }
6731
6732 right_level = btrfs_header_level(right_root->commit_root);
6733 right_root_level = right_level;
6734 right_path->nodes[right_level] =
6735 btrfs_clone_extent_buffer(right_root->commit_root);
6736 if (!right_path->nodes[right_level]) {
6737 up_read(&fs_info->commit_root_sem);
6738 ret = -ENOMEM;
6739 goto out;
6740 }
6741 up_read(&fs_info->commit_root_sem);
6742
6743 if (left_level == 0)
6744 btrfs_item_key_to_cpu(left_path->nodes[left_level],
6745 &left_key, left_path->slots[left_level]);
6746 else
6747 btrfs_node_key_to_cpu(left_path->nodes[left_level],
6748 &left_key, left_path->slots[left_level]);
6749 if (right_level == 0)
6750 btrfs_item_key_to_cpu(right_path->nodes[right_level],
6751 &right_key, right_path->slots[right_level]);
6752 else
6753 btrfs_node_key_to_cpu(right_path->nodes[right_level],
6754 &right_key, right_path->slots[right_level]);
6755
6756 left_end_reached = right_end_reached = 0;
6757 advance_left = advance_right = 0;
6758
6759 while (1) {
6760 cond_resched();
6761 if (advance_left && !left_end_reached) {
6762 ret = tree_advance(left_path, &left_level,
6763 left_root_level,
6764 advance_left != ADVANCE_ONLY_NEXT,
6765 &left_key);
6766 if (ret == -1)
6767 left_end_reached = ADVANCE;
6768 else if (ret < 0)
6769 goto out;
6770 advance_left = 0;
6771 }
6772 if (advance_right && !right_end_reached) {
6773 ret = tree_advance(right_path, &right_level,
6774 right_root_level,
6775 advance_right != ADVANCE_ONLY_NEXT,
6776 &right_key);
6777 if (ret == -1)
6778 right_end_reached = ADVANCE;
6779 else if (ret < 0)
6780 goto out;
6781 advance_right = 0;
6782 }
6783
6784 if (left_end_reached && right_end_reached) {
6785 ret = 0;
6786 goto out;
6787 } else if (left_end_reached) {
6788 if (right_level == 0) {
6789 ret = changed_cb(left_path, right_path,
6790 &right_key,
6791 BTRFS_COMPARE_TREE_DELETED,
6792 ctx);
6793 if (ret < 0)
6794 goto out;
6795 }
6796 advance_right = ADVANCE;
6797 continue;
6798 } else if (right_end_reached) {
6799 if (left_level == 0) {
6800 ret = changed_cb(left_path, right_path,
6801 &left_key,
6802 BTRFS_COMPARE_TREE_NEW,
6803 ctx);
6804 if (ret < 0)
6805 goto out;
6806 }
6807 advance_left = ADVANCE;
6808 continue;
6809 }
6810
6811 if (left_level == 0 && right_level == 0) {
6812 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
6813 if (cmp < 0) {
6814 ret = changed_cb(left_path, right_path,
6815 &left_key,
6816 BTRFS_COMPARE_TREE_NEW,
6817 ctx);
6818 if (ret < 0)
6819 goto out;
6820 advance_left = ADVANCE;
6821 } else if (cmp > 0) {
6822 ret = changed_cb(left_path, right_path,
6823 &right_key,
6824 BTRFS_COMPARE_TREE_DELETED,
6825 ctx);
6826 if (ret < 0)
6827 goto out;
6828 advance_right = ADVANCE;
6829 } else {
6830 enum btrfs_compare_tree_result result;
6831
6832 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
6833 ret = tree_compare_item(left_path, right_path,
6834 tmp_buf);
6835 if (ret)
6836 result = BTRFS_COMPARE_TREE_CHANGED;
6837 else
6838 result = BTRFS_COMPARE_TREE_SAME;
6839 ret = changed_cb(left_path, right_path,
6840 &left_key, result, ctx);
6841 if (ret < 0)
6842 goto out;
6843 advance_left = ADVANCE;
6844 advance_right = ADVANCE;
6845 }
6846 } else if (left_level == right_level) {
6847 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
6848 if (cmp < 0) {
6849 advance_left = ADVANCE;
6850 } else if (cmp > 0) {
6851 advance_right = ADVANCE;
6852 } else {
6853 left_blockptr = btrfs_node_blockptr(
6854 left_path->nodes[left_level],
6855 left_path->slots[left_level]);
6856 right_blockptr = btrfs_node_blockptr(
6857 right_path->nodes[right_level],
6858 right_path->slots[right_level]);
6859 left_gen = btrfs_node_ptr_generation(
6860 left_path->nodes[left_level],
6861 left_path->slots[left_level]);
6862 right_gen = btrfs_node_ptr_generation(
6863 right_path->nodes[right_level],
6864 right_path->slots[right_level]);
6865 if (left_blockptr == right_blockptr &&
6866 left_gen == right_gen) {
6867
6868
6869
6870
6871 advance_left = ADVANCE_ONLY_NEXT;
6872 advance_right = ADVANCE_ONLY_NEXT;
6873 } else {
6874 advance_left = ADVANCE;
6875 advance_right = ADVANCE;
6876 }
6877 }
6878 } else if (left_level < right_level) {
6879 advance_right = ADVANCE;
6880 } else {
6881 advance_left = ADVANCE;
6882 }
6883 }
6884
6885out:
6886 btrfs_free_path(left_path);
6887 btrfs_free_path(right_path);
6888 kvfree(tmp_buf);
6889 return ret;
6890}
6891
6892static int send_subvol(struct send_ctx *sctx)
6893{
6894 int ret;
6895
6896 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
6897 ret = send_header(sctx);
6898 if (ret < 0)
6899 goto out;
6900 }
6901
6902 ret = send_subvol_begin(sctx);
6903 if (ret < 0)
6904 goto out;
6905
6906 if (sctx->parent_root) {
6907 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
6908 changed_cb, sctx);
6909 if (ret < 0)
6910 goto out;
6911 ret = finish_inode_if_needed(sctx, 1);
6912 if (ret < 0)
6913 goto out;
6914 } else {
6915 ret = full_send_tree(sctx);
6916 if (ret < 0)
6917 goto out;
6918 }
6919
6920out:
6921 free_recorded_refs(sctx);
6922 return ret;
6923}
6924
6925
6926
6927
6928
6929
6930
6931
6932
6933
6934
6935
6936
6937
6938static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
6939{
6940 int i;
6941 struct btrfs_trans_handle *trans = NULL;
6942
6943again:
6944 if (sctx->parent_root &&
6945 sctx->parent_root->node != sctx->parent_root->commit_root)
6946 goto commit_trans;
6947
6948 for (i = 0; i < sctx->clone_roots_cnt; i++)
6949 if (sctx->clone_roots[i].root->node !=
6950 sctx->clone_roots[i].root->commit_root)
6951 goto commit_trans;
6952
6953 if (trans)
6954 return btrfs_end_transaction(trans);
6955
6956 return 0;
6957
6958commit_trans:
6959
6960 if (!trans) {
6961 trans = btrfs_join_transaction(sctx->send_root);
6962 if (IS_ERR(trans))
6963 return PTR_ERR(trans);
6964 goto again;
6965 }
6966
6967 return btrfs_commit_transaction(trans);
6968}
6969
6970
6971
6972
6973
6974
6975
6976
6977
6978static int flush_delalloc_roots(struct send_ctx *sctx)
6979{
6980 struct btrfs_root *root = sctx->parent_root;
6981 int ret;
6982 int i;
6983
6984 if (root) {
6985 ret = btrfs_start_delalloc_snapshot(root);
6986 if (ret)
6987 return ret;
6988 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
6989 }
6990
6991 for (i = 0; i < sctx->clone_roots_cnt; i++) {
6992 root = sctx->clone_roots[i].root;
6993 ret = btrfs_start_delalloc_snapshot(root);
6994 if (ret)
6995 return ret;
6996 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
6997 }
6998
6999 return 0;
7000}
7001
7002static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
7003{
7004 spin_lock(&root->root_item_lock);
7005 root->send_in_progress--;
7006
7007
7008
7009
7010 if (root->send_in_progress < 0)
7011 btrfs_err(root->fs_info,
7012 "send_in_progress unbalanced %d root %llu",
7013 root->send_in_progress, root->root_key.objectid);
7014 spin_unlock(&root->root_item_lock);
7015}
7016
7017static void dedupe_in_progress_warn(const struct btrfs_root *root)
7018{
7019 btrfs_warn_rl(root->fs_info,
7020"cannot use root %llu for send while deduplications on it are in progress (%d in progress)",
7021 root->root_key.objectid, root->dedupe_in_progress);
7022}
7023
7024long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
7025{
7026 int ret = 0;
7027 struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
7028 struct btrfs_fs_info *fs_info = send_root->fs_info;
7029 struct btrfs_root *clone_root;
7030 struct btrfs_key key;
7031 struct send_ctx *sctx = NULL;
7032 u32 i;
7033 u64 *clone_sources_tmp = NULL;
7034 int clone_sources_to_rollback = 0;
7035 unsigned alloc_size;
7036 int sort_clone_roots = 0;
7037 int index;
7038
7039 if (!capable(CAP_SYS_ADMIN))
7040 return -EPERM;
7041
7042
7043
7044
7045
7046 spin_lock(&send_root->root_item_lock);
7047 if (btrfs_root_readonly(send_root) && send_root->dedupe_in_progress) {
7048 dedupe_in_progress_warn(send_root);
7049 spin_unlock(&send_root->root_item_lock);
7050 return -EAGAIN;
7051 }
7052 send_root->send_in_progress++;
7053 spin_unlock(&send_root->root_item_lock);
7054
7055
7056
7057
7058
7059 WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
7060
7061
7062
7063
7064
7065 if (!btrfs_root_readonly(send_root)) {
7066 ret = -EPERM;
7067 goto out;
7068 }
7069
7070
7071
7072
7073
7074
7075 if (arg->clone_sources_count >
7076 ULONG_MAX / sizeof(struct clone_root) - 1) {
7077 ret = -EINVAL;
7078 goto out;
7079 }
7080
7081 if (!access_ok(arg->clone_sources,
7082 sizeof(*arg->clone_sources) *
7083 arg->clone_sources_count)) {
7084 ret = -EFAULT;
7085 goto out;
7086 }
7087
7088 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
7089 ret = -EINVAL;
7090 goto out;
7091 }
7092
7093 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
7094 if (!sctx) {
7095 ret = -ENOMEM;
7096 goto out;
7097 }
7098
7099 INIT_LIST_HEAD(&sctx->new_refs);
7100 INIT_LIST_HEAD(&sctx->deleted_refs);
7101 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
7102 INIT_LIST_HEAD(&sctx->name_cache_list);
7103
7104 sctx->flags = arg->flags;
7105
7106 sctx->send_filp = fget(arg->send_fd);
7107 if (!sctx->send_filp) {
7108 ret = -EBADF;
7109 goto out;
7110 }
7111
7112 sctx->send_root = send_root;
7113
7114
7115
7116
7117 if (btrfs_root_dead(sctx->send_root)) {
7118 ret = -EPERM;
7119 goto out;
7120 }
7121
7122 sctx->clone_roots_cnt = arg->clone_sources_count;
7123
7124 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
7125 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
7126 if (!sctx->send_buf) {
7127 ret = -ENOMEM;
7128 goto out;
7129 }
7130
7131 sctx->read_buf = kvmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL);
7132 if (!sctx->read_buf) {
7133 ret = -ENOMEM;
7134 goto out;
7135 }
7136
7137 sctx->pending_dir_moves = RB_ROOT;
7138 sctx->waiting_dir_moves = RB_ROOT;
7139 sctx->orphan_dirs = RB_ROOT;
7140
7141 alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1);
7142
7143 sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL);
7144 if (!sctx->clone_roots) {
7145 ret = -ENOMEM;
7146 goto out;
7147 }
7148
7149 alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources);
7150
7151 if (arg->clone_sources_count) {
7152 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
7153 if (!clone_sources_tmp) {
7154 ret = -ENOMEM;
7155 goto out;
7156 }
7157
7158 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
7159 alloc_size);
7160 if (ret) {
7161 ret = -EFAULT;
7162 goto out;
7163 }
7164
7165 for (i = 0; i < arg->clone_sources_count; i++) {
7166 key.objectid = clone_sources_tmp[i];
7167 key.type = BTRFS_ROOT_ITEM_KEY;
7168 key.offset = (u64)-1;
7169
7170 index = srcu_read_lock(&fs_info->subvol_srcu);
7171
7172 clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
7173 if (IS_ERR(clone_root)) {
7174 srcu_read_unlock(&fs_info->subvol_srcu, index);
7175 ret = PTR_ERR(clone_root);
7176 goto out;
7177 }
7178 spin_lock(&clone_root->root_item_lock);
7179 if (!btrfs_root_readonly(clone_root) ||
7180 btrfs_root_dead(clone_root)) {
7181 spin_unlock(&clone_root->root_item_lock);
7182 srcu_read_unlock(&fs_info->subvol_srcu, index);
7183 ret = -EPERM;
7184 goto out;
7185 }
7186 if (clone_root->dedupe_in_progress) {
7187 dedupe_in_progress_warn(clone_root);
7188 spin_unlock(&clone_root->root_item_lock);
7189 srcu_read_unlock(&fs_info->subvol_srcu, index);
7190 ret = -EAGAIN;
7191 goto out;
7192 }
7193 clone_root->send_in_progress++;
7194 spin_unlock(&clone_root->root_item_lock);
7195 srcu_read_unlock(&fs_info->subvol_srcu, index);
7196
7197 sctx->clone_roots[i].root = clone_root;
7198 clone_sources_to_rollback = i + 1;
7199 }
7200 kvfree(clone_sources_tmp);
7201 clone_sources_tmp = NULL;
7202 }
7203
7204 if (arg->parent_root) {
7205 key.objectid = arg->parent_root;
7206 key.type = BTRFS_ROOT_ITEM_KEY;
7207 key.offset = (u64)-1;
7208
7209 index = srcu_read_lock(&fs_info->subvol_srcu);
7210
7211 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
7212 if (IS_ERR(sctx->parent_root)) {
7213 srcu_read_unlock(&fs_info->subvol_srcu, index);
7214 ret = PTR_ERR(sctx->parent_root);
7215 goto out;
7216 }
7217
7218 spin_lock(&sctx->parent_root->root_item_lock);
7219 sctx->parent_root->send_in_progress++;
7220 if (!btrfs_root_readonly(sctx->parent_root) ||
7221 btrfs_root_dead(sctx->parent_root)) {
7222 spin_unlock(&sctx->parent_root->root_item_lock);
7223 srcu_read_unlock(&fs_info->subvol_srcu, index);
7224 ret = -EPERM;
7225 goto out;
7226 }
7227 if (sctx->parent_root->dedupe_in_progress) {
7228 dedupe_in_progress_warn(sctx->parent_root);
7229 spin_unlock(&sctx->parent_root->root_item_lock);
7230 srcu_read_unlock(&fs_info->subvol_srcu, index);
7231 ret = -EAGAIN;
7232 goto out;
7233 }
7234 spin_unlock(&sctx->parent_root->root_item_lock);
7235
7236 srcu_read_unlock(&fs_info->subvol_srcu, index);
7237 }
7238
7239
7240
7241
7242
7243
7244 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
7245
7246
7247 sort(sctx->clone_roots, sctx->clone_roots_cnt,
7248 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
7249 NULL);
7250 sort_clone_roots = 1;
7251
7252 ret = flush_delalloc_roots(sctx);
7253 if (ret)
7254 goto out;
7255
7256 ret = ensure_commit_roots_uptodate(sctx);
7257 if (ret)
7258 goto out;
7259
7260 mutex_lock(&fs_info->balance_mutex);
7261 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
7262 mutex_unlock(&fs_info->balance_mutex);
7263 btrfs_warn_rl(fs_info,
7264 "cannot run send because a balance operation is in progress");
7265 ret = -EAGAIN;
7266 goto out;
7267 }
7268 fs_info->send_in_progress++;
7269 mutex_unlock(&fs_info->balance_mutex);
7270
7271 current->journal_info = BTRFS_SEND_TRANS_STUB;
7272 ret = send_subvol(sctx);
7273 current->journal_info = NULL;
7274 mutex_lock(&fs_info->balance_mutex);
7275 fs_info->send_in_progress--;
7276 mutex_unlock(&fs_info->balance_mutex);
7277 if (ret < 0)
7278 goto out;
7279
7280 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
7281 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
7282 if (ret < 0)
7283 goto out;
7284 ret = send_cmd(sctx);
7285 if (ret < 0)
7286 goto out;
7287 }
7288
7289out:
7290 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
7291 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
7292 struct rb_node *n;
7293 struct pending_dir_move *pm;
7294
7295 n = rb_first(&sctx->pending_dir_moves);
7296 pm = rb_entry(n, struct pending_dir_move, node);
7297 while (!list_empty(&pm->list)) {
7298 struct pending_dir_move *pm2;
7299
7300 pm2 = list_first_entry(&pm->list,
7301 struct pending_dir_move, list);
7302 free_pending_move(sctx, pm2);
7303 }
7304 free_pending_move(sctx, pm);
7305 }
7306
7307 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
7308 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
7309 struct rb_node *n;
7310 struct waiting_dir_move *dm;
7311
7312 n = rb_first(&sctx->waiting_dir_moves);
7313 dm = rb_entry(n, struct waiting_dir_move, node);
7314 rb_erase(&dm->node, &sctx->waiting_dir_moves);
7315 kfree(dm);
7316 }
7317
7318 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
7319 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
7320 struct rb_node *n;
7321 struct orphan_dir_info *odi;
7322
7323 n = rb_first(&sctx->orphan_dirs);
7324 odi = rb_entry(n, struct orphan_dir_info, node);
7325 free_orphan_dir_info(sctx, odi);
7326 }
7327
7328 if (sort_clone_roots) {
7329 for (i = 0; i < sctx->clone_roots_cnt; i++)
7330 btrfs_root_dec_send_in_progress(
7331 sctx->clone_roots[i].root);
7332 } else {
7333 for (i = 0; sctx && i < clone_sources_to_rollback; i++)
7334 btrfs_root_dec_send_in_progress(
7335 sctx->clone_roots[i].root);
7336
7337 btrfs_root_dec_send_in_progress(send_root);
7338 }
7339 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root))
7340 btrfs_root_dec_send_in_progress(sctx->parent_root);
7341
7342 kvfree(clone_sources_tmp);
7343
7344 if (sctx) {
7345 if (sctx->send_filp)
7346 fput(sctx->send_filp);
7347
7348 kvfree(sctx->clone_roots);
7349 kvfree(sctx->send_buf);
7350 kvfree(sctx->read_buf);
7351
7352 name_cache_free(sctx);
7353
7354 kfree(sctx);
7355 }
7356
7357 return ret;
7358}
7359