1
2
3
4
5
6#include <linux/sched.h>
7#include <linux/slab.h>
8#include <linux/blkdev.h>
9#include <linux/list_sort.h>
10#include <linux/iversion.h>
11#include "misc.h"
12#include "ctree.h"
13#include "tree-log.h"
14#include "disk-io.h"
15#include "locking.h"
16#include "print-tree.h"
17#include "backref.h"
18#include "compression.h"
19#include "qgroup.h"
20#include "block-group.h"
21#include "space-info.h"
22#include "zoned.h"
23
24
25
26
27
28
29
30enum {
31 LOG_INODE_ALL,
32 LOG_INODE_EXISTS,
33 LOG_OTHER_INODE,
34 LOG_OTHER_INODE_ALL,
35};
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89enum {
90 LOG_WALK_PIN_ONLY,
91 LOG_WALK_REPLAY_INODES,
92 LOG_WALK_REPLAY_DIR_INDEX,
93 LOG_WALK_REPLAY_ALL,
94};
95
96static int btrfs_log_inode(struct btrfs_trans_handle *trans,
97 struct btrfs_root *root, struct btrfs_inode *inode,
98 int inode_only,
99 struct btrfs_log_ctx *ctx);
100static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
101 struct btrfs_root *root,
102 struct btrfs_path *path, u64 objectid);
103static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
104 struct btrfs_root *root,
105 struct btrfs_root *log,
106 struct btrfs_path *path,
107 u64 dirid, int del_all);
108static void wait_log_commit(struct btrfs_root *root, int transid);
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138static int start_log_trans(struct btrfs_trans_handle *trans,
139 struct btrfs_root *root,
140 struct btrfs_log_ctx *ctx)
141{
142 struct btrfs_fs_info *fs_info = root->fs_info;
143 struct btrfs_root *tree_root = fs_info->tree_root;
144 const bool zoned = btrfs_is_zoned(fs_info);
145 int ret = 0;
146 bool created = false;
147
148
149
150
151
152 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state)) {
153 mutex_lock(&tree_root->log_mutex);
154 if (!fs_info->log_root_tree) {
155 ret = btrfs_init_log_root_tree(trans, fs_info);
156 if (!ret) {
157 set_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state);
158 created = true;
159 }
160 }
161 mutex_unlock(&tree_root->log_mutex);
162 if (ret)
163 return ret;
164 }
165
166 mutex_lock(&root->log_mutex);
167
168again:
169 if (root->log_root) {
170 int index = (root->log_transid + 1) % 2;
171
172 if (btrfs_need_log_full_commit(trans)) {
173 ret = -EAGAIN;
174 goto out;
175 }
176
177 if (zoned && atomic_read(&root->log_commit[index])) {
178 wait_log_commit(root, root->log_transid - 1);
179 goto again;
180 }
181
182 if (!root->log_start_pid) {
183 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
184 root->log_start_pid = current->pid;
185 } else if (root->log_start_pid != current->pid) {
186 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
187 }
188 } else {
189
190
191
192
193
194
195 if (zoned && !created) {
196 ret = -EAGAIN;
197 goto out;
198 }
199
200 ret = btrfs_add_log_tree(trans, root);
201 if (ret)
202 goto out;
203
204 set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
205 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
206 root->log_start_pid = current->pid;
207 }
208
209 atomic_inc(&root->log_writers);
210 if (ctx && !ctx->logging_new_name) {
211 int index = root->log_transid % 2;
212 list_add_tail(&ctx->list, &root->log_ctxs[index]);
213 ctx->log_transid = root->log_transid;
214 }
215
216out:
217 mutex_unlock(&root->log_mutex);
218 return ret;
219}
220
221
222
223
224
225
226static int join_running_log_trans(struct btrfs_root *root)
227{
228 const bool zoned = btrfs_is_zoned(root->fs_info);
229 int ret = -ENOENT;
230
231 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state))
232 return ret;
233
234 mutex_lock(&root->log_mutex);
235again:
236 if (root->log_root) {
237 int index = (root->log_transid + 1) % 2;
238
239 ret = 0;
240 if (zoned && atomic_read(&root->log_commit[index])) {
241 wait_log_commit(root, root->log_transid - 1);
242 goto again;
243 }
244 atomic_inc(&root->log_writers);
245 }
246 mutex_unlock(&root->log_mutex);
247 return ret;
248}
249
250
251
252
253
254
255void btrfs_pin_log_trans(struct btrfs_root *root)
256{
257 atomic_inc(&root->log_writers);
258}
259
260
261
262
263
264void btrfs_end_log_trans(struct btrfs_root *root)
265{
266 if (atomic_dec_and_test(&root->log_writers)) {
267
268 cond_wake_up_nomb(&root->log_writer_wait);
269 }
270}
271
272static int btrfs_write_tree_block(struct extent_buffer *buf)
273{
274 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
275 buf->start + buf->len - 1);
276}
277
278static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
279{
280 filemap_fdatawait_range(buf->pages[0]->mapping,
281 buf->start, buf->start + buf->len - 1);
282}
283
284
285
286
287
288
289
290struct walk_control {
291
292
293
294 int free;
295
296
297
298
299 int write;
300
301
302
303
304 int wait;
305
306
307
308
309 int pin;
310
311
312 int stage;
313
314
315
316
317
318
319 bool ignore_cur_inode;
320
321
322 struct btrfs_root *replay_dest;
323
324
325 struct btrfs_trans_handle *trans;
326
327
328
329
330
331
332 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
333 struct walk_control *wc, u64 gen, int level);
334};
335
336
337
338
339static int process_one_buffer(struct btrfs_root *log,
340 struct extent_buffer *eb,
341 struct walk_control *wc, u64 gen, int level)
342{
343 struct btrfs_fs_info *fs_info = log->fs_info;
344 int ret = 0;
345
346
347
348
349
350 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
351 ret = btrfs_read_buffer(eb, gen, level, NULL);
352 if (ret)
353 return ret;
354 }
355
356 if (wc->pin)
357 ret = btrfs_pin_extent_for_log_replay(wc->trans, eb->start,
358 eb->len);
359
360 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
361 if (wc->pin && btrfs_header_level(eb) == 0)
362 ret = btrfs_exclude_logged_extents(eb);
363 if (wc->write)
364 btrfs_write_tree_block(eb);
365 if (wc->wait)
366 btrfs_wait_tree_block_writeback(eb);
367 }
368 return ret;
369}
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385static noinline int overwrite_item(struct btrfs_trans_handle *trans,
386 struct btrfs_root *root,
387 struct btrfs_path *path,
388 struct extent_buffer *eb, int slot,
389 struct btrfs_key *key)
390{
391 int ret;
392 u32 item_size;
393 u64 saved_i_size = 0;
394 int save_old_i_size = 0;
395 unsigned long src_ptr;
396 unsigned long dst_ptr;
397 int overwrite_root = 0;
398 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
399
400 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
401 overwrite_root = 1;
402
403 item_size = btrfs_item_size_nr(eb, slot);
404 src_ptr = btrfs_item_ptr_offset(eb, slot);
405
406
407 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
408 if (ret < 0)
409 return ret;
410
411 if (ret == 0) {
412 char *src_copy;
413 char *dst_copy;
414 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
415 path->slots[0]);
416 if (dst_size != item_size)
417 goto insert;
418
419 if (item_size == 0) {
420 btrfs_release_path(path);
421 return 0;
422 }
423 dst_copy = kmalloc(item_size, GFP_NOFS);
424 src_copy = kmalloc(item_size, GFP_NOFS);
425 if (!dst_copy || !src_copy) {
426 btrfs_release_path(path);
427 kfree(dst_copy);
428 kfree(src_copy);
429 return -ENOMEM;
430 }
431
432 read_extent_buffer(eb, src_copy, src_ptr, item_size);
433
434 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
435 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
436 item_size);
437 ret = memcmp(dst_copy, src_copy, item_size);
438
439 kfree(dst_copy);
440 kfree(src_copy);
441
442
443
444
445
446
447 if (ret == 0) {
448 btrfs_release_path(path);
449 return 0;
450 }
451
452
453
454
455
456 if (inode_item) {
457 struct btrfs_inode_item *item;
458 u64 nbytes;
459 u32 mode;
460
461 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
462 struct btrfs_inode_item);
463 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
464 item = btrfs_item_ptr(eb, slot,
465 struct btrfs_inode_item);
466 btrfs_set_inode_nbytes(eb, item, nbytes);
467
468
469
470
471
472
473 mode = btrfs_inode_mode(eb, item);
474 if (S_ISDIR(mode))
475 btrfs_set_inode_size(eb, item, 0);
476 }
477 } else if (inode_item) {
478 struct btrfs_inode_item *item;
479 u32 mode;
480
481
482
483
484
485 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
486 btrfs_set_inode_nbytes(eb, item, 0);
487
488
489
490
491
492
493 mode = btrfs_inode_mode(eb, item);
494 if (S_ISDIR(mode))
495 btrfs_set_inode_size(eb, item, 0);
496 }
497insert:
498 btrfs_release_path(path);
499
500 path->skip_release_on_error = 1;
501 ret = btrfs_insert_empty_item(trans, root, path,
502 key, item_size);
503 path->skip_release_on_error = 0;
504
505
506 if (ret == -EEXIST || ret == -EOVERFLOW) {
507 u32 found_size;
508 found_size = btrfs_item_size_nr(path->nodes[0],
509 path->slots[0]);
510 if (found_size > item_size)
511 btrfs_truncate_item(path, item_size, 1);
512 else if (found_size < item_size)
513 btrfs_extend_item(path, item_size - found_size);
514 } else if (ret) {
515 return ret;
516 }
517 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
518 path->slots[0]);
519
520
521
522
523
524
525
526
527
528
529 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
530 struct btrfs_inode_item *src_item;
531 struct btrfs_inode_item *dst_item;
532
533 src_item = (struct btrfs_inode_item *)src_ptr;
534 dst_item = (struct btrfs_inode_item *)dst_ptr;
535
536 if (btrfs_inode_generation(eb, src_item) == 0) {
537 struct extent_buffer *dst_eb = path->nodes[0];
538 const u64 ino_size = btrfs_inode_size(eb, src_item);
539
540
541
542
543
544
545
546
547 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
548 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
549 ino_size != 0)
550 btrfs_set_inode_size(dst_eb, dst_item, ino_size);
551 goto no_copy;
552 }
553
554 if (overwrite_root &&
555 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
556 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
557 save_old_i_size = 1;
558 saved_i_size = btrfs_inode_size(path->nodes[0],
559 dst_item);
560 }
561 }
562
563 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
564 src_ptr, item_size);
565
566 if (save_old_i_size) {
567 struct btrfs_inode_item *dst_item;
568 dst_item = (struct btrfs_inode_item *)dst_ptr;
569 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
570 }
571
572
573 if (key->type == BTRFS_INODE_ITEM_KEY) {
574 struct btrfs_inode_item *dst_item;
575 dst_item = (struct btrfs_inode_item *)dst_ptr;
576 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
577 btrfs_set_inode_generation(path->nodes[0], dst_item,
578 trans->transid);
579 }
580 }
581no_copy:
582 btrfs_mark_buffer_dirty(path->nodes[0]);
583 btrfs_release_path(path);
584 return 0;
585}
586
587
588
589
590
591static noinline struct inode *read_one_inode(struct btrfs_root *root,
592 u64 objectid)
593{
594 struct inode *inode;
595
596 inode = btrfs_iget(root->fs_info->sb, objectid, root);
597 if (IS_ERR(inode))
598 inode = NULL;
599 return inode;
600}
601
602
603
604
605
606
607
608
609
610
611
612
613
614static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
615 struct btrfs_root *root,
616 struct btrfs_path *path,
617 struct extent_buffer *eb, int slot,
618 struct btrfs_key *key)
619{
620 struct btrfs_drop_extents_args drop_args = { 0 };
621 struct btrfs_fs_info *fs_info = root->fs_info;
622 int found_type;
623 u64 extent_end;
624 u64 start = key->offset;
625 u64 nbytes = 0;
626 struct btrfs_file_extent_item *item;
627 struct inode *inode = NULL;
628 unsigned long size;
629 int ret = 0;
630
631 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
632 found_type = btrfs_file_extent_type(eb, item);
633
634 if (found_type == BTRFS_FILE_EXTENT_REG ||
635 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
636 nbytes = btrfs_file_extent_num_bytes(eb, item);
637 extent_end = start + nbytes;
638
639
640
641
642
643 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
644 nbytes = 0;
645 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
646 size = btrfs_file_extent_ram_bytes(eb, item);
647 nbytes = btrfs_file_extent_ram_bytes(eb, item);
648 extent_end = ALIGN(start + size,
649 fs_info->sectorsize);
650 } else {
651 ret = 0;
652 goto out;
653 }
654
655 inode = read_one_inode(root, key->objectid);
656 if (!inode) {
657 ret = -EIO;
658 goto out;
659 }
660
661
662
663
664
665
666 ret = btrfs_lookup_file_extent(trans, root, path,
667 btrfs_ino(BTRFS_I(inode)), start, 0);
668
669 if (ret == 0 &&
670 (found_type == BTRFS_FILE_EXTENT_REG ||
671 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
672 struct btrfs_file_extent_item cmp1;
673 struct btrfs_file_extent_item cmp2;
674 struct btrfs_file_extent_item *existing;
675 struct extent_buffer *leaf;
676
677 leaf = path->nodes[0];
678 existing = btrfs_item_ptr(leaf, path->slots[0],
679 struct btrfs_file_extent_item);
680
681 read_extent_buffer(eb, &cmp1, (unsigned long)item,
682 sizeof(cmp1));
683 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
684 sizeof(cmp2));
685
686
687
688
689
690 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
691 btrfs_release_path(path);
692 goto out;
693 }
694 }
695 btrfs_release_path(path);
696
697
698 drop_args.start = start;
699 drop_args.end = extent_end;
700 drop_args.drop_cache = true;
701 ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args);
702 if (ret)
703 goto out;
704
705 if (found_type == BTRFS_FILE_EXTENT_REG ||
706 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
707 u64 offset;
708 unsigned long dest_offset;
709 struct btrfs_key ins;
710
711 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
712 btrfs_fs_incompat(fs_info, NO_HOLES))
713 goto update_inode;
714
715 ret = btrfs_insert_empty_item(trans, root, path, key,
716 sizeof(*item));
717 if (ret)
718 goto out;
719 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
720 path->slots[0]);
721 copy_extent_buffer(path->nodes[0], eb, dest_offset,
722 (unsigned long)item, sizeof(*item));
723
724 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
725 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
726 ins.type = BTRFS_EXTENT_ITEM_KEY;
727 offset = key->offset - btrfs_file_extent_offset(eb, item);
728
729
730
731
732
733
734
735
736
737 ret = btrfs_qgroup_trace_extent(trans,
738 btrfs_file_extent_disk_bytenr(eb, item),
739 btrfs_file_extent_disk_num_bytes(eb, item),
740 GFP_NOFS);
741 if (ret < 0)
742 goto out;
743
744 if (ins.objectid > 0) {
745 struct btrfs_ref ref = { 0 };
746 u64 csum_start;
747 u64 csum_end;
748 LIST_HEAD(ordered_sums);
749
750
751
752
753
754 ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
755 ins.offset);
756 if (ret < 0) {
757 goto out;
758 } else if (ret == 0) {
759 btrfs_init_generic_ref(&ref,
760 BTRFS_ADD_DELAYED_REF,
761 ins.objectid, ins.offset, 0);
762 btrfs_init_data_ref(&ref,
763 root->root_key.objectid,
764 key->objectid, offset);
765 ret = btrfs_inc_extent_ref(trans, &ref);
766 if (ret)
767 goto out;
768 } else {
769
770
771
772
773 ret = btrfs_alloc_logged_file_extent(trans,
774 root->root_key.objectid,
775 key->objectid, offset, &ins);
776 if (ret)
777 goto out;
778 }
779 btrfs_release_path(path);
780
781 if (btrfs_file_extent_compression(eb, item)) {
782 csum_start = ins.objectid;
783 csum_end = csum_start + ins.offset;
784 } else {
785 csum_start = ins.objectid +
786 btrfs_file_extent_offset(eb, item);
787 csum_end = csum_start +
788 btrfs_file_extent_num_bytes(eb, item);
789 }
790
791 ret = btrfs_lookup_csums_range(root->log_root,
792 csum_start, csum_end - 1,
793 &ordered_sums, 0);
794 if (ret)
795 goto out;
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845 while (!list_empty(&ordered_sums)) {
846 struct btrfs_ordered_sum *sums;
847 sums = list_entry(ordered_sums.next,
848 struct btrfs_ordered_sum,
849 list);
850 if (!ret)
851 ret = btrfs_del_csums(trans,
852 fs_info->csum_root,
853 sums->bytenr,
854 sums->len);
855 if (!ret)
856 ret = btrfs_csum_file_blocks(trans,
857 fs_info->csum_root, sums);
858 list_del(&sums->list);
859 kfree(sums);
860 }
861 if (ret)
862 goto out;
863 } else {
864 btrfs_release_path(path);
865 }
866 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
867
868 ret = overwrite_item(trans, root, path, eb, slot, key);
869 if (ret)
870 goto out;
871 }
872
873 ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start,
874 extent_end - start);
875 if (ret)
876 goto out;
877
878update_inode:
879 btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found);
880 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
881out:
882 if (inode)
883 iput(inode);
884 return ret;
885}
886
887
888
889
890
891
892
893
894
895static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
896 struct btrfs_root *root,
897 struct btrfs_path *path,
898 struct btrfs_inode *dir,
899 struct btrfs_dir_item *di)
900{
901 struct inode *inode;
902 char *name;
903 int name_len;
904 struct extent_buffer *leaf;
905 struct btrfs_key location;
906 int ret;
907
908 leaf = path->nodes[0];
909
910 btrfs_dir_item_key_to_cpu(leaf, di, &location);
911 name_len = btrfs_dir_name_len(leaf, di);
912 name = kmalloc(name_len, GFP_NOFS);
913 if (!name)
914 return -ENOMEM;
915
916 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
917 btrfs_release_path(path);
918
919 inode = read_one_inode(root, location.objectid);
920 if (!inode) {
921 ret = -EIO;
922 goto out;
923 }
924
925 ret = link_to_fixup_dir(trans, root, path, location.objectid);
926 if (ret)
927 goto out;
928
929 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name,
930 name_len);
931 if (ret)
932 goto out;
933 else
934 ret = btrfs_run_delayed_items(trans);
935out:
936 kfree(name);
937 iput(inode);
938 return ret;
939}
940
941
942
943
944
945
946
947
948static noinline int inode_in_dir(struct btrfs_root *root,
949 struct btrfs_path *path,
950 u64 dirid, u64 objectid, u64 index,
951 const char *name, int name_len)
952{
953 struct btrfs_dir_item *di;
954 struct btrfs_key location;
955 int ret = 0;
956
957 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
958 index, name, name_len, 0);
959 if (IS_ERR(di)) {
960 ret = PTR_ERR(di);
961 goto out;
962 } else if (di) {
963 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
964 if (location.objectid != objectid)
965 goto out;
966 } else {
967 goto out;
968 }
969
970 btrfs_release_path(path);
971 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
972 if (IS_ERR(di)) {
973 ret = PTR_ERR(di);
974 goto out;
975 } else if (di) {
976 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
977 if (location.objectid == objectid)
978 ret = 1;
979 }
980out:
981 btrfs_release_path(path);
982 return ret;
983}
984
985
986
987
988
989
990
991
992
993
994
995static noinline int backref_in_log(struct btrfs_root *log,
996 struct btrfs_key *key,
997 u64 ref_objectid,
998 const char *name, int namelen)
999{
1000 struct btrfs_path *path;
1001 int ret;
1002
1003 path = btrfs_alloc_path();
1004 if (!path)
1005 return -ENOMEM;
1006
1007 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
1008 if (ret < 0) {
1009 goto out;
1010 } else if (ret == 1) {
1011 ret = 0;
1012 goto out;
1013 }
1014
1015 if (key->type == BTRFS_INODE_EXTREF_KEY)
1016 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
1017 path->slots[0],
1018 ref_objectid,
1019 name, namelen);
1020 else
1021 ret = !!btrfs_find_name_in_backref(path->nodes[0],
1022 path->slots[0],
1023 name, namelen);
1024out:
1025 btrfs_free_path(path);
1026 return ret;
1027}
1028
1029static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
1030 struct btrfs_root *root,
1031 struct btrfs_path *path,
1032 struct btrfs_root *log_root,
1033 struct btrfs_inode *dir,
1034 struct btrfs_inode *inode,
1035 u64 inode_objectid, u64 parent_objectid,
1036 u64 ref_index, char *name, int namelen,
1037 int *search_done)
1038{
1039 int ret;
1040 char *victim_name;
1041 int victim_name_len;
1042 struct extent_buffer *leaf;
1043 struct btrfs_dir_item *di;
1044 struct btrfs_key search_key;
1045 struct btrfs_inode_extref *extref;
1046
1047again:
1048
1049 search_key.objectid = inode_objectid;
1050 search_key.type = BTRFS_INODE_REF_KEY;
1051 search_key.offset = parent_objectid;
1052 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1053 if (ret == 0) {
1054 struct btrfs_inode_ref *victim_ref;
1055 unsigned long ptr;
1056 unsigned long ptr_end;
1057
1058 leaf = path->nodes[0];
1059
1060
1061
1062
1063 if (search_key.objectid == search_key.offset)
1064 return 1;
1065
1066
1067
1068
1069
1070 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1071 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1072 while (ptr < ptr_end) {
1073 victim_ref = (struct btrfs_inode_ref *)ptr;
1074 victim_name_len = btrfs_inode_ref_name_len(leaf,
1075 victim_ref);
1076 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1077 if (!victim_name)
1078 return -ENOMEM;
1079
1080 read_extent_buffer(leaf, victim_name,
1081 (unsigned long)(victim_ref + 1),
1082 victim_name_len);
1083
1084 ret = backref_in_log(log_root, &search_key,
1085 parent_objectid, victim_name,
1086 victim_name_len);
1087 if (ret < 0) {
1088 kfree(victim_name);
1089 return ret;
1090 } else if (!ret) {
1091 inc_nlink(&inode->vfs_inode);
1092 btrfs_release_path(path);
1093
1094 ret = btrfs_unlink_inode(trans, root, dir, inode,
1095 victim_name, victim_name_len);
1096 kfree(victim_name);
1097 if (ret)
1098 return ret;
1099 ret = btrfs_run_delayed_items(trans);
1100 if (ret)
1101 return ret;
1102 *search_done = 1;
1103 goto again;
1104 }
1105 kfree(victim_name);
1106
1107 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1108 }
1109
1110
1111
1112
1113
1114 *search_done = 1;
1115 }
1116 btrfs_release_path(path);
1117
1118
1119 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1120 inode_objectid, parent_objectid, 0,
1121 0);
1122 if (!IS_ERR_OR_NULL(extref)) {
1123 u32 item_size;
1124 u32 cur_offset = 0;
1125 unsigned long base;
1126 struct inode *victim_parent;
1127
1128 leaf = path->nodes[0];
1129
1130 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1131 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1132
1133 while (cur_offset < item_size) {
1134 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1135
1136 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1137
1138 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1139 goto next;
1140
1141 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1142 if (!victim_name)
1143 return -ENOMEM;
1144 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1145 victim_name_len);
1146
1147 search_key.objectid = inode_objectid;
1148 search_key.type = BTRFS_INODE_EXTREF_KEY;
1149 search_key.offset = btrfs_extref_hash(parent_objectid,
1150 victim_name,
1151 victim_name_len);
1152 ret = backref_in_log(log_root, &search_key,
1153 parent_objectid, victim_name,
1154 victim_name_len);
1155 if (ret < 0) {
1156 return ret;
1157 } else if (!ret) {
1158 ret = -ENOENT;
1159 victim_parent = read_one_inode(root,
1160 parent_objectid);
1161 if (victim_parent) {
1162 inc_nlink(&inode->vfs_inode);
1163 btrfs_release_path(path);
1164
1165 ret = btrfs_unlink_inode(trans, root,
1166 BTRFS_I(victim_parent),
1167 inode,
1168 victim_name,
1169 victim_name_len);
1170 if (!ret)
1171 ret = btrfs_run_delayed_items(
1172 trans);
1173 }
1174 iput(victim_parent);
1175 kfree(victim_name);
1176 if (ret)
1177 return ret;
1178 *search_done = 1;
1179 goto again;
1180 }
1181 kfree(victim_name);
1182next:
1183 cur_offset += victim_name_len + sizeof(*extref);
1184 }
1185 *search_done = 1;
1186 }
1187 btrfs_release_path(path);
1188
1189
1190 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1191 ref_index, name, namelen, 0);
1192 if (IS_ERR(di)) {
1193 return PTR_ERR(di);
1194 } else if (di) {
1195 ret = drop_one_dir_item(trans, root, path, dir, di);
1196 if (ret)
1197 return ret;
1198 }
1199 btrfs_release_path(path);
1200
1201
1202 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1203 name, namelen, 0);
1204 if (IS_ERR(di)) {
1205 return PTR_ERR(di);
1206 } else if (di) {
1207 ret = drop_one_dir_item(trans, root, path, dir, di);
1208 if (ret)
1209 return ret;
1210 }
1211 btrfs_release_path(path);
1212
1213 return 0;
1214}
1215
1216static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1217 u32 *namelen, char **name, u64 *index,
1218 u64 *parent_objectid)
1219{
1220 struct btrfs_inode_extref *extref;
1221
1222 extref = (struct btrfs_inode_extref *)ref_ptr;
1223
1224 *namelen = btrfs_inode_extref_name_len(eb, extref);
1225 *name = kmalloc(*namelen, GFP_NOFS);
1226 if (*name == NULL)
1227 return -ENOMEM;
1228
1229 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1230 *namelen);
1231
1232 if (index)
1233 *index = btrfs_inode_extref_index(eb, extref);
1234 if (parent_objectid)
1235 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1236
1237 return 0;
1238}
1239
1240static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1241 u32 *namelen, char **name, u64 *index)
1242{
1243 struct btrfs_inode_ref *ref;
1244
1245 ref = (struct btrfs_inode_ref *)ref_ptr;
1246
1247 *namelen = btrfs_inode_ref_name_len(eb, ref);
1248 *name = kmalloc(*namelen, GFP_NOFS);
1249 if (*name == NULL)
1250 return -ENOMEM;
1251
1252 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1253
1254 if (index)
1255 *index = btrfs_inode_ref_index(eb, ref);
1256
1257 return 0;
1258}
1259
1260
1261
1262
1263
1264
1265
1266
1267static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
1268 struct btrfs_root *root,
1269 struct btrfs_path *path,
1270 struct btrfs_inode *inode,
1271 struct extent_buffer *log_eb,
1272 int log_slot,
1273 struct btrfs_key *key)
1274{
1275 int ret;
1276 unsigned long ref_ptr;
1277 unsigned long ref_end;
1278 struct extent_buffer *eb;
1279
1280again:
1281 btrfs_release_path(path);
1282 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1283 if (ret > 0) {
1284 ret = 0;
1285 goto out;
1286 }
1287 if (ret < 0)
1288 goto out;
1289
1290 eb = path->nodes[0];
1291 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
1292 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
1293 while (ref_ptr < ref_end) {
1294 char *name = NULL;
1295 int namelen;
1296 u64 parent_id;
1297
1298 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1299 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1300 NULL, &parent_id);
1301 } else {
1302 parent_id = key->offset;
1303 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1304 NULL);
1305 }
1306 if (ret)
1307 goto out;
1308
1309 if (key->type == BTRFS_INODE_EXTREF_KEY)
1310 ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot,
1311 parent_id, name,
1312 namelen);
1313 else
1314 ret = !!btrfs_find_name_in_backref(log_eb, log_slot,
1315 name, namelen);
1316
1317 if (!ret) {
1318 struct inode *dir;
1319
1320 btrfs_release_path(path);
1321 dir = read_one_inode(root, parent_id);
1322 if (!dir) {
1323 ret = -ENOENT;
1324 kfree(name);
1325 goto out;
1326 }
1327 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
1328 inode, name, namelen);
1329 kfree(name);
1330 iput(dir);
1331 if (ret)
1332 goto out;
1333 goto again;
1334 }
1335
1336 kfree(name);
1337 ref_ptr += namelen;
1338 if (key->type == BTRFS_INODE_EXTREF_KEY)
1339 ref_ptr += sizeof(struct btrfs_inode_extref);
1340 else
1341 ref_ptr += sizeof(struct btrfs_inode_ref);
1342 }
1343 ret = 0;
1344 out:
1345 btrfs_release_path(path);
1346 return ret;
1347}
1348
1349static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
1350 const u8 ref_type, const char *name,
1351 const int namelen)
1352{
1353 struct btrfs_key key;
1354 struct btrfs_path *path;
1355 const u64 parent_id = btrfs_ino(BTRFS_I(dir));
1356 int ret;
1357
1358 path = btrfs_alloc_path();
1359 if (!path)
1360 return -ENOMEM;
1361
1362 key.objectid = btrfs_ino(BTRFS_I(inode));
1363 key.type = ref_type;
1364 if (key.type == BTRFS_INODE_REF_KEY)
1365 key.offset = parent_id;
1366 else
1367 key.offset = btrfs_extref_hash(parent_id, name, namelen);
1368
1369 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
1370 if (ret < 0)
1371 goto out;
1372 if (ret > 0) {
1373 ret = 0;
1374 goto out;
1375 }
1376 if (key.type == BTRFS_INODE_EXTREF_KEY)
1377 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
1378 path->slots[0], parent_id, name, namelen);
1379 else
1380 ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
1381 name, namelen);
1382
1383out:
1384 btrfs_free_path(path);
1385 return ret;
1386}
1387
1388static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1389 struct inode *dir, struct inode *inode, const char *name,
1390 int namelen, u64 ref_index)
1391{
1392 struct btrfs_dir_item *dir_item;
1393 struct btrfs_key key;
1394 struct btrfs_path *path;
1395 struct inode *other_inode = NULL;
1396 int ret;
1397
1398 path = btrfs_alloc_path();
1399 if (!path)
1400 return -ENOMEM;
1401
1402 dir_item = btrfs_lookup_dir_item(NULL, root, path,
1403 btrfs_ino(BTRFS_I(dir)),
1404 name, namelen, 0);
1405 if (!dir_item) {
1406 btrfs_release_path(path);
1407 goto add_link;
1408 } else if (IS_ERR(dir_item)) {
1409 ret = PTR_ERR(dir_item);
1410 goto out;
1411 }
1412
1413
1414
1415
1416
1417
1418 btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key);
1419 btrfs_release_path(path);
1420 other_inode = read_one_inode(root, key.objectid);
1421 if (!other_inode) {
1422 ret = -ENOENT;
1423 goto out;
1424 }
1425 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode),
1426 name, namelen);
1427 if (ret)
1428 goto out;
1429
1430
1431
1432
1433 if (other_inode->i_nlink == 0)
1434 inc_nlink(other_inode);
1435
1436 ret = btrfs_run_delayed_items(trans);
1437 if (ret)
1438 goto out;
1439add_link:
1440 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
1441 name, namelen, 0, ref_index);
1442out:
1443 iput(other_inode);
1444 btrfs_free_path(path);
1445
1446 return ret;
1447}
1448
1449
1450
1451
1452
1453
1454
1455static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1456 struct btrfs_root *root,
1457 struct btrfs_root *log,
1458 struct btrfs_path *path,
1459 struct extent_buffer *eb, int slot,
1460 struct btrfs_key *key)
1461{
1462 struct inode *dir = NULL;
1463 struct inode *inode = NULL;
1464 unsigned long ref_ptr;
1465 unsigned long ref_end;
1466 char *name = NULL;
1467 int namelen;
1468 int ret;
1469 int search_done = 0;
1470 int log_ref_ver = 0;
1471 u64 parent_objectid;
1472 u64 inode_objectid;
1473 u64 ref_index = 0;
1474 int ref_struct_size;
1475
1476 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1477 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1478
1479 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1480 struct btrfs_inode_extref *r;
1481
1482 ref_struct_size = sizeof(struct btrfs_inode_extref);
1483 log_ref_ver = 1;
1484 r = (struct btrfs_inode_extref *)ref_ptr;
1485 parent_objectid = btrfs_inode_extref_parent(eb, r);
1486 } else {
1487 ref_struct_size = sizeof(struct btrfs_inode_ref);
1488 parent_objectid = key->offset;
1489 }
1490 inode_objectid = key->objectid;
1491
1492
1493
1494
1495
1496
1497
1498 dir = read_one_inode(root, parent_objectid);
1499 if (!dir) {
1500 ret = -ENOENT;
1501 goto out;
1502 }
1503
1504 inode = read_one_inode(root, inode_objectid);
1505 if (!inode) {
1506 ret = -EIO;
1507 goto out;
1508 }
1509
1510 while (ref_ptr < ref_end) {
1511 if (log_ref_ver) {
1512 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1513 &ref_index, &parent_objectid);
1514
1515
1516
1517
1518 if (!dir)
1519 dir = read_one_inode(root, parent_objectid);
1520 if (!dir) {
1521 ret = -ENOENT;
1522 goto out;
1523 }
1524 } else {
1525 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1526 &ref_index);
1527 }
1528 if (ret)
1529 goto out;
1530
1531 ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1532 btrfs_ino(BTRFS_I(inode)), ref_index,
1533 name, namelen);
1534 if (ret < 0) {
1535 goto out;
1536 } else if (ret == 0) {
1537
1538
1539
1540
1541
1542
1543
1544
1545 if (!search_done) {
1546 ret = __add_inode_ref(trans, root, path, log,
1547 BTRFS_I(dir),
1548 BTRFS_I(inode),
1549 inode_objectid,
1550 parent_objectid,
1551 ref_index, name, namelen,
1552 &search_done);
1553 if (ret) {
1554 if (ret == 1)
1555 ret = 0;
1556 goto out;
1557 }
1558 }
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568 ret = btrfs_inode_ref_exists(inode, dir, key->type,
1569 name, namelen);
1570 if (ret > 0) {
1571 ret = btrfs_unlink_inode(trans, root,
1572 BTRFS_I(dir),
1573 BTRFS_I(inode),
1574 name, namelen);
1575
1576
1577
1578
1579
1580 if (!ret && inode->i_nlink == 0)
1581 inc_nlink(inode);
1582 }
1583 if (ret < 0)
1584 goto out;
1585
1586
1587 ret = add_link(trans, root, dir, inode, name, namelen,
1588 ref_index);
1589 if (ret)
1590 goto out;
1591
1592 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1593 if (ret)
1594 goto out;
1595 }
1596
1597
1598 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1599 kfree(name);
1600 name = NULL;
1601 if (log_ref_ver) {
1602 iput(dir);
1603 dir = NULL;
1604 }
1605 }
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
1616 key);
1617 if (ret)
1618 goto out;
1619
1620
1621 ret = overwrite_item(trans, root, path, eb, slot, key);
1622out:
1623 btrfs_release_path(path);
1624 kfree(name);
1625 iput(dir);
1626 iput(inode);
1627 return ret;
1628}
1629
1630static int count_inode_extrefs(struct btrfs_root *root,
1631 struct btrfs_inode *inode, struct btrfs_path *path)
1632{
1633 int ret = 0;
1634 int name_len;
1635 unsigned int nlink = 0;
1636 u32 item_size;
1637 u32 cur_offset = 0;
1638 u64 inode_objectid = btrfs_ino(inode);
1639 u64 offset = 0;
1640 unsigned long ptr;
1641 struct btrfs_inode_extref *extref;
1642 struct extent_buffer *leaf;
1643
1644 while (1) {
1645 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1646 &extref, &offset);
1647 if (ret)
1648 break;
1649
1650 leaf = path->nodes[0];
1651 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1652 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1653 cur_offset = 0;
1654
1655 while (cur_offset < item_size) {
1656 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1657 name_len = btrfs_inode_extref_name_len(leaf, extref);
1658
1659 nlink++;
1660
1661 cur_offset += name_len + sizeof(*extref);
1662 }
1663
1664 offset++;
1665 btrfs_release_path(path);
1666 }
1667 btrfs_release_path(path);
1668
1669 if (ret < 0 && ret != -ENOENT)
1670 return ret;
1671 return nlink;
1672}
1673
1674static int count_inode_refs(struct btrfs_root *root,
1675 struct btrfs_inode *inode, struct btrfs_path *path)
1676{
1677 int ret;
1678 struct btrfs_key key;
1679 unsigned int nlink = 0;
1680 unsigned long ptr;
1681 unsigned long ptr_end;
1682 int name_len;
1683 u64 ino = btrfs_ino(inode);
1684
1685 key.objectid = ino;
1686 key.type = BTRFS_INODE_REF_KEY;
1687 key.offset = (u64)-1;
1688
1689 while (1) {
1690 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1691 if (ret < 0)
1692 break;
1693 if (ret > 0) {
1694 if (path->slots[0] == 0)
1695 break;
1696 path->slots[0]--;
1697 }
1698process_slot:
1699 btrfs_item_key_to_cpu(path->nodes[0], &key,
1700 path->slots[0]);
1701 if (key.objectid != ino ||
1702 key.type != BTRFS_INODE_REF_KEY)
1703 break;
1704 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1705 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1706 path->slots[0]);
1707 while (ptr < ptr_end) {
1708 struct btrfs_inode_ref *ref;
1709
1710 ref = (struct btrfs_inode_ref *)ptr;
1711 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1712 ref);
1713 ptr = (unsigned long)(ref + 1) + name_len;
1714 nlink++;
1715 }
1716
1717 if (key.offset == 0)
1718 break;
1719 if (path->slots[0] > 0) {
1720 path->slots[0]--;
1721 goto process_slot;
1722 }
1723 key.offset--;
1724 btrfs_release_path(path);
1725 }
1726 btrfs_release_path(path);
1727
1728 return nlink;
1729}
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1742 struct btrfs_root *root,
1743 struct inode *inode)
1744{
1745 struct btrfs_path *path;
1746 int ret;
1747 u64 nlink = 0;
1748 u64 ino = btrfs_ino(BTRFS_I(inode));
1749
1750 path = btrfs_alloc_path();
1751 if (!path)
1752 return -ENOMEM;
1753
1754 ret = count_inode_refs(root, BTRFS_I(inode), path);
1755 if (ret < 0)
1756 goto out;
1757
1758 nlink = ret;
1759
1760 ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1761 if (ret < 0)
1762 goto out;
1763
1764 nlink += ret;
1765
1766 ret = 0;
1767
1768 if (nlink != inode->i_nlink) {
1769 set_nlink(inode, nlink);
1770 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1771 if (ret)
1772 goto out;
1773 }
1774 BTRFS_I(inode)->index_cnt = (u64)-1;
1775
1776 if (inode->i_nlink == 0) {
1777 if (S_ISDIR(inode->i_mode)) {
1778 ret = replay_dir_deletes(trans, root, NULL, path,
1779 ino, 1);
1780 if (ret)
1781 goto out;
1782 }
1783 ret = btrfs_insert_orphan_item(trans, root, ino);
1784 if (ret == -EEXIST)
1785 ret = 0;
1786 }
1787
1788out:
1789 btrfs_free_path(path);
1790 return ret;
1791}
1792
1793static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1794 struct btrfs_root *root,
1795 struct btrfs_path *path)
1796{
1797 int ret;
1798 struct btrfs_key key;
1799 struct inode *inode;
1800
1801 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1802 key.type = BTRFS_ORPHAN_ITEM_KEY;
1803 key.offset = (u64)-1;
1804 while (1) {
1805 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1806 if (ret < 0)
1807 break;
1808
1809 if (ret == 1) {
1810 ret = 0;
1811 if (path->slots[0] == 0)
1812 break;
1813 path->slots[0]--;
1814 }
1815
1816 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1817 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1818 key.type != BTRFS_ORPHAN_ITEM_KEY)
1819 break;
1820
1821 ret = btrfs_del_item(trans, root, path);
1822 if (ret)
1823 break;
1824
1825 btrfs_release_path(path);
1826 inode = read_one_inode(root, key.offset);
1827 if (!inode) {
1828 ret = -EIO;
1829 break;
1830 }
1831
1832 ret = fixup_inode_link_count(trans, root, inode);
1833 iput(inode);
1834 if (ret)
1835 break;
1836
1837
1838
1839
1840
1841
1842 key.offset = (u64)-1;
1843 }
1844 btrfs_release_path(path);
1845 return ret;
1846}
1847
1848
1849
1850
1851
1852
1853
1854static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1855 struct btrfs_root *root,
1856 struct btrfs_path *path,
1857 u64 objectid)
1858{
1859 struct btrfs_key key;
1860 int ret = 0;
1861 struct inode *inode;
1862
1863 inode = read_one_inode(root, objectid);
1864 if (!inode)
1865 return -EIO;
1866
1867 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1868 key.type = BTRFS_ORPHAN_ITEM_KEY;
1869 key.offset = objectid;
1870
1871 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1872
1873 btrfs_release_path(path);
1874 if (ret == 0) {
1875 if (!inode->i_nlink)
1876 set_nlink(inode, 1);
1877 else
1878 inc_nlink(inode);
1879 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1880 } else if (ret == -EEXIST) {
1881 ret = 0;
1882 }
1883 iput(inode);
1884
1885 return ret;
1886}
1887
1888
1889
1890
1891
1892
1893static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1894 struct btrfs_root *root,
1895 u64 dirid, u64 index,
1896 char *name, int name_len,
1897 struct btrfs_key *location)
1898{
1899 struct inode *inode;
1900 struct inode *dir;
1901 int ret;
1902
1903 inode = read_one_inode(root, location->objectid);
1904 if (!inode)
1905 return -ENOENT;
1906
1907 dir = read_one_inode(root, dirid);
1908 if (!dir) {
1909 iput(inode);
1910 return -EIO;
1911 }
1912
1913 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1914 name_len, 1, index);
1915
1916
1917
1918 iput(inode);
1919 iput(dir);
1920 return ret;
1921}
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1940 struct btrfs_root *root,
1941 struct btrfs_path *path,
1942 struct extent_buffer *eb,
1943 struct btrfs_dir_item *di,
1944 struct btrfs_key *key)
1945{
1946 char *name;
1947 int name_len;
1948 struct btrfs_dir_item *dst_di;
1949 struct btrfs_key found_key;
1950 struct btrfs_key log_key;
1951 struct inode *dir;
1952 u8 log_type;
1953 bool exists;
1954 int ret;
1955 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1956 bool name_added = false;
1957
1958 dir = read_one_inode(root, key->objectid);
1959 if (!dir)
1960 return -EIO;
1961
1962 name_len = btrfs_dir_name_len(eb, di);
1963 name = kmalloc(name_len, GFP_NOFS);
1964 if (!name) {
1965 ret = -ENOMEM;
1966 goto out;
1967 }
1968
1969 log_type = btrfs_dir_type(eb, di);
1970 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1971 name_len);
1972
1973 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1974 ret = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1975 btrfs_release_path(path);
1976 if (ret < 0)
1977 goto out;
1978 exists = (ret == 0);
1979 ret = 0;
1980
1981 if (key->type == BTRFS_DIR_ITEM_KEY) {
1982 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1983 name, name_len, 1);
1984 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1985 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1986 key->objectid,
1987 key->offset, name,
1988 name_len, 1);
1989 } else {
1990
1991 ret = -EINVAL;
1992 goto out;
1993 }
1994
1995 if (IS_ERR(dst_di)) {
1996 ret = PTR_ERR(dst_di);
1997 goto out;
1998 } else if (!dst_di) {
1999
2000
2001
2002 if (key->type != BTRFS_DIR_INDEX_KEY)
2003 goto out;
2004 goto insert;
2005 }
2006
2007 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
2008
2009 if (found_key.objectid == log_key.objectid &&
2010 found_key.type == log_key.type &&
2011 found_key.offset == log_key.offset &&
2012 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
2013 update_size = false;
2014 goto out;
2015 }
2016
2017
2018
2019
2020
2021 if (!exists)
2022 goto out;
2023
2024 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di);
2025 if (ret)
2026 goto out;
2027
2028 if (key->type == BTRFS_DIR_INDEX_KEY)
2029 goto insert;
2030out:
2031 btrfs_release_path(path);
2032 if (!ret && update_size) {
2033 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
2034 ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
2035 }
2036 kfree(name);
2037 iput(dir);
2038 if (!ret && name_added)
2039 ret = 1;
2040 return ret;
2041
2042insert:
2043
2044
2045
2046
2047 found_key.objectid = log_key.objectid;
2048 found_key.type = BTRFS_INODE_REF_KEY;
2049 found_key.offset = key->objectid;
2050 ret = backref_in_log(root->log_root, &found_key, 0, name, name_len);
2051 if (ret < 0) {
2052 goto out;
2053 } else if (ret) {
2054
2055 ret = 0;
2056 update_size = false;
2057 goto out;
2058 }
2059
2060 found_key.objectid = log_key.objectid;
2061 found_key.type = BTRFS_INODE_EXTREF_KEY;
2062 found_key.offset = key->objectid;
2063 ret = backref_in_log(root->log_root, &found_key, key->objectid, name,
2064 name_len);
2065 if (ret < 0) {
2066 goto out;
2067 } else if (ret) {
2068
2069 ret = 0;
2070 update_size = false;
2071 goto out;
2072 }
2073 btrfs_release_path(path);
2074 ret = insert_one_name(trans, root, key->objectid, key->offset,
2075 name, name_len, &log_key);
2076 if (ret && ret != -ENOENT && ret != -EEXIST)
2077 goto out;
2078 if (!ret)
2079 name_added = true;
2080 update_size = false;
2081 ret = 0;
2082 goto out;
2083}
2084
2085
2086
2087
2088
2089
2090
2091static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
2092 struct btrfs_root *root,
2093 struct btrfs_path *path,
2094 struct extent_buffer *eb, int slot,
2095 struct btrfs_key *key)
2096{
2097 int ret = 0;
2098 u32 item_size = btrfs_item_size_nr(eb, slot);
2099 struct btrfs_dir_item *di;
2100 int name_len;
2101 unsigned long ptr;
2102 unsigned long ptr_end;
2103 struct btrfs_path *fixup_path = NULL;
2104
2105 ptr = btrfs_item_ptr_offset(eb, slot);
2106 ptr_end = ptr + item_size;
2107 while (ptr < ptr_end) {
2108 di = (struct btrfs_dir_item *)ptr;
2109 name_len = btrfs_dir_name_len(eb, di);
2110 ret = replay_one_name(trans, root, path, eb, di, key);
2111 if (ret < 0)
2112 break;
2113 ptr = (unsigned long)(di + 1);
2114 ptr += name_len;
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
2144 struct btrfs_key di_key;
2145
2146 if (!fixup_path) {
2147 fixup_path = btrfs_alloc_path();
2148 if (!fixup_path) {
2149 ret = -ENOMEM;
2150 break;
2151 }
2152 }
2153
2154 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2155 ret = link_to_fixup_dir(trans, root, fixup_path,
2156 di_key.objectid);
2157 if (ret)
2158 break;
2159 }
2160 ret = 0;
2161 }
2162 btrfs_free_path(fixup_path);
2163 return ret;
2164}
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177static noinline int find_dir_range(struct btrfs_root *root,
2178 struct btrfs_path *path,
2179 u64 dirid, int key_type,
2180 u64 *start_ret, u64 *end_ret)
2181{
2182 struct btrfs_key key;
2183 u64 found_end;
2184 struct btrfs_dir_log_item *item;
2185 int ret;
2186 int nritems;
2187
2188 if (*start_ret == (u64)-1)
2189 return 1;
2190
2191 key.objectid = dirid;
2192 key.type = key_type;
2193 key.offset = *start_ret;
2194
2195 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2196 if (ret < 0)
2197 goto out;
2198 if (ret > 0) {
2199 if (path->slots[0] == 0)
2200 goto out;
2201 path->slots[0]--;
2202 }
2203 if (ret != 0)
2204 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2205
2206 if (key.type != key_type || key.objectid != dirid) {
2207 ret = 1;
2208 goto next;
2209 }
2210 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2211 struct btrfs_dir_log_item);
2212 found_end = btrfs_dir_log_end(path->nodes[0], item);
2213
2214 if (*start_ret >= key.offset && *start_ret <= found_end) {
2215 ret = 0;
2216 *start_ret = key.offset;
2217 *end_ret = found_end;
2218 goto out;
2219 }
2220 ret = 1;
2221next:
2222
2223 nritems = btrfs_header_nritems(path->nodes[0]);
2224 path->slots[0]++;
2225 if (path->slots[0] >= nritems) {
2226 ret = btrfs_next_leaf(root, path);
2227 if (ret)
2228 goto out;
2229 }
2230
2231 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2232
2233 if (key.type != key_type || key.objectid != dirid) {
2234 ret = 1;
2235 goto out;
2236 }
2237 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2238 struct btrfs_dir_log_item);
2239 found_end = btrfs_dir_log_end(path->nodes[0], item);
2240 *start_ret = key.offset;
2241 *end_ret = found_end;
2242 ret = 0;
2243out:
2244 btrfs_release_path(path);
2245 return ret;
2246}
2247
2248
2249
2250
2251
2252
2253static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
2254 struct btrfs_root *root,
2255 struct btrfs_root *log,
2256 struct btrfs_path *path,
2257 struct btrfs_path *log_path,
2258 struct inode *dir,
2259 struct btrfs_key *dir_key)
2260{
2261 int ret;
2262 struct extent_buffer *eb;
2263 int slot;
2264 u32 item_size;
2265 struct btrfs_dir_item *di;
2266 struct btrfs_dir_item *log_di;
2267 int name_len;
2268 unsigned long ptr;
2269 unsigned long ptr_end;
2270 char *name;
2271 struct inode *inode;
2272 struct btrfs_key location;
2273
2274again:
2275 eb = path->nodes[0];
2276 slot = path->slots[0];
2277 item_size = btrfs_item_size_nr(eb, slot);
2278 ptr = btrfs_item_ptr_offset(eb, slot);
2279 ptr_end = ptr + item_size;
2280 while (ptr < ptr_end) {
2281 di = (struct btrfs_dir_item *)ptr;
2282 name_len = btrfs_dir_name_len(eb, di);
2283 name = kmalloc(name_len, GFP_NOFS);
2284 if (!name) {
2285 ret = -ENOMEM;
2286 goto out;
2287 }
2288 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2289 name_len);
2290 log_di = NULL;
2291 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2292 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2293 dir_key->objectid,
2294 name, name_len, 0);
2295 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2296 log_di = btrfs_lookup_dir_index_item(trans, log,
2297 log_path,
2298 dir_key->objectid,
2299 dir_key->offset,
2300 name, name_len, 0);
2301 }
2302 if (!log_di) {
2303 btrfs_dir_item_key_to_cpu(eb, di, &location);
2304 btrfs_release_path(path);
2305 btrfs_release_path(log_path);
2306 inode = read_one_inode(root, location.objectid);
2307 if (!inode) {
2308 kfree(name);
2309 return -EIO;
2310 }
2311
2312 ret = link_to_fixup_dir(trans, root,
2313 path, location.objectid);
2314 if (ret) {
2315 kfree(name);
2316 iput(inode);
2317 goto out;
2318 }
2319
2320 inc_nlink(inode);
2321 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
2322 BTRFS_I(inode), name, name_len);
2323 if (!ret)
2324 ret = btrfs_run_delayed_items(trans);
2325 kfree(name);
2326 iput(inode);
2327 if (ret)
2328 goto out;
2329
2330
2331
2332
2333 ret = btrfs_search_slot(NULL, root, dir_key, path,
2334 0, 0);
2335 if (ret == 0)
2336 goto again;
2337 ret = 0;
2338 goto out;
2339 } else if (IS_ERR(log_di)) {
2340 kfree(name);
2341 return PTR_ERR(log_di);
2342 }
2343 btrfs_release_path(log_path);
2344 kfree(name);
2345
2346 ptr = (unsigned long)(di + 1);
2347 ptr += name_len;
2348 }
2349 ret = 0;
2350out:
2351 btrfs_release_path(path);
2352 btrfs_release_path(log_path);
2353 return ret;
2354}
2355
2356static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2357 struct btrfs_root *root,
2358 struct btrfs_root *log,
2359 struct btrfs_path *path,
2360 const u64 ino)
2361{
2362 struct btrfs_key search_key;
2363 struct btrfs_path *log_path;
2364 int i;
2365 int nritems;
2366 int ret;
2367
2368 log_path = btrfs_alloc_path();
2369 if (!log_path)
2370 return -ENOMEM;
2371
2372 search_key.objectid = ino;
2373 search_key.type = BTRFS_XATTR_ITEM_KEY;
2374 search_key.offset = 0;
2375again:
2376 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2377 if (ret < 0)
2378 goto out;
2379process_leaf:
2380 nritems = btrfs_header_nritems(path->nodes[0]);
2381 for (i = path->slots[0]; i < nritems; i++) {
2382 struct btrfs_key key;
2383 struct btrfs_dir_item *di;
2384 struct btrfs_dir_item *log_di;
2385 u32 total_size;
2386 u32 cur;
2387
2388 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2389 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2390 ret = 0;
2391 goto out;
2392 }
2393
2394 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2395 total_size = btrfs_item_size_nr(path->nodes[0], i);
2396 cur = 0;
2397 while (cur < total_size) {
2398 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2399 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2400 u32 this_len = sizeof(*di) + name_len + data_len;
2401 char *name;
2402
2403 name = kmalloc(name_len, GFP_NOFS);
2404 if (!name) {
2405 ret = -ENOMEM;
2406 goto out;
2407 }
2408 read_extent_buffer(path->nodes[0], name,
2409 (unsigned long)(di + 1), name_len);
2410
2411 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2412 name, name_len, 0);
2413 btrfs_release_path(log_path);
2414 if (!log_di) {
2415
2416 btrfs_release_path(path);
2417 di = btrfs_lookup_xattr(trans, root, path, ino,
2418 name, name_len, -1);
2419 kfree(name);
2420 if (IS_ERR(di)) {
2421 ret = PTR_ERR(di);
2422 goto out;
2423 }
2424 ASSERT(di);
2425 ret = btrfs_delete_one_dir_name(trans, root,
2426 path, di);
2427 if (ret)
2428 goto out;
2429 btrfs_release_path(path);
2430 search_key = key;
2431 goto again;
2432 }
2433 kfree(name);
2434 if (IS_ERR(log_di)) {
2435 ret = PTR_ERR(log_di);
2436 goto out;
2437 }
2438 cur += this_len;
2439 di = (struct btrfs_dir_item *)((char *)di + this_len);
2440 }
2441 }
2442 ret = btrfs_next_leaf(root, path);
2443 if (ret > 0)
2444 ret = 0;
2445 else if (ret == 0)
2446 goto process_leaf;
2447out:
2448 btrfs_free_path(log_path);
2449 btrfs_release_path(path);
2450 return ret;
2451}
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2465 struct btrfs_root *root,
2466 struct btrfs_root *log,
2467 struct btrfs_path *path,
2468 u64 dirid, int del_all)
2469{
2470 u64 range_start;
2471 u64 range_end;
2472 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2473 int ret = 0;
2474 struct btrfs_key dir_key;
2475 struct btrfs_key found_key;
2476 struct btrfs_path *log_path;
2477 struct inode *dir;
2478
2479 dir_key.objectid = dirid;
2480 dir_key.type = BTRFS_DIR_ITEM_KEY;
2481 log_path = btrfs_alloc_path();
2482 if (!log_path)
2483 return -ENOMEM;
2484
2485 dir = read_one_inode(root, dirid);
2486
2487
2488
2489
2490 if (!dir) {
2491 btrfs_free_path(log_path);
2492 return 0;
2493 }
2494again:
2495 range_start = 0;
2496 range_end = 0;
2497 while (1) {
2498 if (del_all)
2499 range_end = (u64)-1;
2500 else {
2501 ret = find_dir_range(log, path, dirid, key_type,
2502 &range_start, &range_end);
2503 if (ret != 0)
2504 break;
2505 }
2506
2507 dir_key.offset = range_start;
2508 while (1) {
2509 int nritems;
2510 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2511 0, 0);
2512 if (ret < 0)
2513 goto out;
2514
2515 nritems = btrfs_header_nritems(path->nodes[0]);
2516 if (path->slots[0] >= nritems) {
2517 ret = btrfs_next_leaf(root, path);
2518 if (ret == 1)
2519 break;
2520 else if (ret < 0)
2521 goto out;
2522 }
2523 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2524 path->slots[0]);
2525 if (found_key.objectid != dirid ||
2526 found_key.type != dir_key.type)
2527 goto next_type;
2528
2529 if (found_key.offset > range_end)
2530 break;
2531
2532 ret = check_item_in_log(trans, root, log, path,
2533 log_path, dir,
2534 &found_key);
2535 if (ret)
2536 goto out;
2537 if (found_key.offset == (u64)-1)
2538 break;
2539 dir_key.offset = found_key.offset + 1;
2540 }
2541 btrfs_release_path(path);
2542 if (range_end == (u64)-1)
2543 break;
2544 range_start = range_end + 1;
2545 }
2546
2547next_type:
2548 ret = 0;
2549 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2550 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2551 dir_key.type = BTRFS_DIR_INDEX_KEY;
2552 btrfs_release_path(path);
2553 goto again;
2554 }
2555out:
2556 btrfs_release_path(path);
2557 btrfs_free_path(log_path);
2558 iput(dir);
2559 return ret;
2560}
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2574 struct walk_control *wc, u64 gen, int level)
2575{
2576 int nritems;
2577 struct btrfs_path *path;
2578 struct btrfs_root *root = wc->replay_dest;
2579 struct btrfs_key key;
2580 int i;
2581 int ret;
2582
2583 ret = btrfs_read_buffer(eb, gen, level, NULL);
2584 if (ret)
2585 return ret;
2586
2587 level = btrfs_header_level(eb);
2588
2589 if (level != 0)
2590 return 0;
2591
2592 path = btrfs_alloc_path();
2593 if (!path)
2594 return -ENOMEM;
2595
2596 nritems = btrfs_header_nritems(eb);
2597 for (i = 0; i < nritems; i++) {
2598 btrfs_item_key_to_cpu(eb, &key, i);
2599
2600
2601 if (key.type == BTRFS_INODE_ITEM_KEY &&
2602 wc->stage == LOG_WALK_REPLAY_INODES) {
2603 struct btrfs_inode_item *inode_item;
2604 u32 mode;
2605
2606 inode_item = btrfs_item_ptr(eb, i,
2607 struct btrfs_inode_item);
2608
2609
2610
2611
2612
2613
2614
2615
2616 if (btrfs_inode_nlink(eb, inode_item) == 0) {
2617 wc->ignore_cur_inode = true;
2618 continue;
2619 } else {
2620 wc->ignore_cur_inode = false;
2621 }
2622 ret = replay_xattr_deletes(wc->trans, root, log,
2623 path, key.objectid);
2624 if (ret)
2625 break;
2626 mode = btrfs_inode_mode(eb, inode_item);
2627 if (S_ISDIR(mode)) {
2628 ret = replay_dir_deletes(wc->trans,
2629 root, log, path, key.objectid, 0);
2630 if (ret)
2631 break;
2632 }
2633 ret = overwrite_item(wc->trans, root, path,
2634 eb, i, &key);
2635 if (ret)
2636 break;
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646 if (S_ISREG(mode)) {
2647 struct btrfs_drop_extents_args drop_args = { 0 };
2648 struct inode *inode;
2649 u64 from;
2650
2651 inode = read_one_inode(root, key.objectid);
2652 if (!inode) {
2653 ret = -EIO;
2654 break;
2655 }
2656 from = ALIGN(i_size_read(inode),
2657 root->fs_info->sectorsize);
2658 drop_args.start = from;
2659 drop_args.end = (u64)-1;
2660 drop_args.drop_cache = true;
2661 ret = btrfs_drop_extents(wc->trans, root,
2662 BTRFS_I(inode),
2663 &drop_args);
2664 if (!ret) {
2665 inode_sub_bytes(inode,
2666 drop_args.bytes_found);
2667
2668 ret = btrfs_update_inode(wc->trans,
2669 root, BTRFS_I(inode));
2670 }
2671 iput(inode);
2672 if (ret)
2673 break;
2674 }
2675
2676 ret = link_to_fixup_dir(wc->trans, root,
2677 path, key.objectid);
2678 if (ret)
2679 break;
2680 }
2681
2682 if (wc->ignore_cur_inode)
2683 continue;
2684
2685 if (key.type == BTRFS_DIR_INDEX_KEY &&
2686 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2687 ret = replay_one_dir_item(wc->trans, root, path,
2688 eb, i, &key);
2689 if (ret)
2690 break;
2691 }
2692
2693 if (wc->stage < LOG_WALK_REPLAY_ALL)
2694 continue;
2695
2696
2697 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2698 ret = overwrite_item(wc->trans, root, path,
2699 eb, i, &key);
2700 if (ret)
2701 break;
2702 } else if (key.type == BTRFS_INODE_REF_KEY ||
2703 key.type == BTRFS_INODE_EXTREF_KEY) {
2704 ret = add_inode_ref(wc->trans, root, log, path,
2705 eb, i, &key);
2706 if (ret && ret != -ENOENT)
2707 break;
2708 ret = 0;
2709 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2710 ret = replay_one_extent(wc->trans, root, path,
2711 eb, i, &key);
2712 if (ret)
2713 break;
2714 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2715 ret = replay_one_dir_item(wc->trans, root, path,
2716 eb, i, &key);
2717 if (ret)
2718 break;
2719 }
2720 }
2721 btrfs_free_path(path);
2722 return ret;
2723}
2724
2725
2726
2727
2728static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start)
2729{
2730 struct btrfs_block_group *cache;
2731
2732 cache = btrfs_lookup_block_group(fs_info, start);
2733 if (!cache) {
2734 btrfs_err(fs_info, "unable to find block group for %llu", start);
2735 return;
2736 }
2737
2738 spin_lock(&cache->space_info->lock);
2739 spin_lock(&cache->lock);
2740 cache->reserved -= fs_info->nodesize;
2741 cache->space_info->bytes_reserved -= fs_info->nodesize;
2742 spin_unlock(&cache->lock);
2743 spin_unlock(&cache->space_info->lock);
2744
2745 btrfs_put_block_group(cache);
2746}
2747
2748static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2749 struct btrfs_root *root,
2750 struct btrfs_path *path, int *level,
2751 struct walk_control *wc)
2752{
2753 struct btrfs_fs_info *fs_info = root->fs_info;
2754 u64 bytenr;
2755 u64 ptr_gen;
2756 struct extent_buffer *next;
2757 struct extent_buffer *cur;
2758 u32 blocksize;
2759 int ret = 0;
2760
2761 while (*level > 0) {
2762 struct btrfs_key first_key;
2763
2764 cur = path->nodes[*level];
2765
2766 WARN_ON(btrfs_header_level(cur) != *level);
2767
2768 if (path->slots[*level] >=
2769 btrfs_header_nritems(cur))
2770 break;
2771
2772 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2773 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2774 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]);
2775 blocksize = fs_info->nodesize;
2776
2777 next = btrfs_find_create_tree_block(fs_info, bytenr,
2778 btrfs_header_owner(cur),
2779 *level - 1);
2780 if (IS_ERR(next))
2781 return PTR_ERR(next);
2782
2783 if (*level == 1) {
2784 ret = wc->process_func(root, next, wc, ptr_gen,
2785 *level - 1);
2786 if (ret) {
2787 free_extent_buffer(next);
2788 return ret;
2789 }
2790
2791 path->slots[*level]++;
2792 if (wc->free) {
2793 ret = btrfs_read_buffer(next, ptr_gen,
2794 *level - 1, &first_key);
2795 if (ret) {
2796 free_extent_buffer(next);
2797 return ret;
2798 }
2799
2800 if (trans) {
2801 btrfs_tree_lock(next);
2802 btrfs_clean_tree_block(next);
2803 btrfs_wait_tree_block_writeback(next);
2804 btrfs_tree_unlock(next);
2805 ret = btrfs_pin_reserved_extent(trans,
2806 bytenr, blocksize);
2807 if (ret) {
2808 free_extent_buffer(next);
2809 return ret;
2810 }
2811 btrfs_redirty_list_add(
2812 trans->transaction, next);
2813 } else {
2814 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2815 clear_extent_buffer_dirty(next);
2816 unaccount_log_buffer(fs_info, bytenr);
2817 }
2818 }
2819 free_extent_buffer(next);
2820 continue;
2821 }
2822 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key);
2823 if (ret) {
2824 free_extent_buffer(next);
2825 return ret;
2826 }
2827
2828 if (path->nodes[*level-1])
2829 free_extent_buffer(path->nodes[*level-1]);
2830 path->nodes[*level-1] = next;
2831 *level = btrfs_header_level(next);
2832 path->slots[*level] = 0;
2833 cond_resched();
2834 }
2835 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2836
2837 cond_resched();
2838 return 0;
2839}
2840
2841static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2842 struct btrfs_root *root,
2843 struct btrfs_path *path, int *level,
2844 struct walk_control *wc)
2845{
2846 struct btrfs_fs_info *fs_info = root->fs_info;
2847 int i;
2848 int slot;
2849 int ret;
2850
2851 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2852 slot = path->slots[i];
2853 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2854 path->slots[i]++;
2855 *level = i;
2856 WARN_ON(*level == 0);
2857 return 0;
2858 } else {
2859 ret = wc->process_func(root, path->nodes[*level], wc,
2860 btrfs_header_generation(path->nodes[*level]),
2861 *level);
2862 if (ret)
2863 return ret;
2864
2865 if (wc->free) {
2866 struct extent_buffer *next;
2867
2868 next = path->nodes[*level];
2869
2870 if (trans) {
2871 btrfs_tree_lock(next);
2872 btrfs_clean_tree_block(next);
2873 btrfs_wait_tree_block_writeback(next);
2874 btrfs_tree_unlock(next);
2875 ret = btrfs_pin_reserved_extent(trans,
2876 path->nodes[*level]->start,
2877 path->nodes[*level]->len);
2878 if (ret)
2879 return ret;
2880 } else {
2881 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2882 clear_extent_buffer_dirty(next);
2883
2884 unaccount_log_buffer(fs_info,
2885 path->nodes[*level]->start);
2886 }
2887 }
2888 free_extent_buffer(path->nodes[*level]);
2889 path->nodes[*level] = NULL;
2890 *level = i + 1;
2891 }
2892 }
2893 return 1;
2894}
2895
2896
2897
2898
2899
2900
2901static int walk_log_tree(struct btrfs_trans_handle *trans,
2902 struct btrfs_root *log, struct walk_control *wc)
2903{
2904 struct btrfs_fs_info *fs_info = log->fs_info;
2905 int ret = 0;
2906 int wret;
2907 int level;
2908 struct btrfs_path *path;
2909 int orig_level;
2910
2911 path = btrfs_alloc_path();
2912 if (!path)
2913 return -ENOMEM;
2914
2915 level = btrfs_header_level(log->node);
2916 orig_level = level;
2917 path->nodes[level] = log->node;
2918 atomic_inc(&log->node->refs);
2919 path->slots[level] = 0;
2920
2921 while (1) {
2922 wret = walk_down_log_tree(trans, log, path, &level, wc);
2923 if (wret > 0)
2924 break;
2925 if (wret < 0) {
2926 ret = wret;
2927 goto out;
2928 }
2929
2930 wret = walk_up_log_tree(trans, log, path, &level, wc);
2931 if (wret > 0)
2932 break;
2933 if (wret < 0) {
2934 ret = wret;
2935 goto out;
2936 }
2937 }
2938
2939
2940 if (path->nodes[orig_level]) {
2941 ret = wc->process_func(log, path->nodes[orig_level], wc,
2942 btrfs_header_generation(path->nodes[orig_level]),
2943 orig_level);
2944 if (ret)
2945 goto out;
2946 if (wc->free) {
2947 struct extent_buffer *next;
2948
2949 next = path->nodes[orig_level];
2950
2951 if (trans) {
2952 btrfs_tree_lock(next);
2953 btrfs_clean_tree_block(next);
2954 btrfs_wait_tree_block_writeback(next);
2955 btrfs_tree_unlock(next);
2956 ret = btrfs_pin_reserved_extent(trans,
2957 next->start, next->len);
2958 if (ret)
2959 goto out;
2960 } else {
2961 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2962 clear_extent_buffer_dirty(next);
2963 unaccount_log_buffer(fs_info, next->start);
2964 }
2965 }
2966 }
2967
2968out:
2969 btrfs_free_path(path);
2970 return ret;
2971}
2972
2973
2974
2975
2976
2977static int update_log_root(struct btrfs_trans_handle *trans,
2978 struct btrfs_root *log,
2979 struct btrfs_root_item *root_item)
2980{
2981 struct btrfs_fs_info *fs_info = log->fs_info;
2982 int ret;
2983
2984 if (log->log_transid == 1) {
2985
2986 ret = btrfs_insert_root(trans, fs_info->log_root_tree,
2987 &log->root_key, root_item);
2988 } else {
2989 ret = btrfs_update_root(trans, fs_info->log_root_tree,
2990 &log->root_key, root_item);
2991 }
2992 return ret;
2993}
2994
2995static void wait_log_commit(struct btrfs_root *root, int transid)
2996{
2997 DEFINE_WAIT(wait);
2998 int index = transid % 2;
2999
3000
3001
3002
3003
3004
3005 for (;;) {
3006 prepare_to_wait(&root->log_commit_wait[index],
3007 &wait, TASK_UNINTERRUPTIBLE);
3008
3009 if (!(root->log_transid_committed < transid &&
3010 atomic_read(&root->log_commit[index])))
3011 break;
3012
3013 mutex_unlock(&root->log_mutex);
3014 schedule();
3015 mutex_lock(&root->log_mutex);
3016 }
3017 finish_wait(&root->log_commit_wait[index], &wait);
3018}
3019
3020static void wait_for_writer(struct btrfs_root *root)
3021{
3022 DEFINE_WAIT(wait);
3023
3024 for (;;) {
3025 prepare_to_wait(&root->log_writer_wait, &wait,
3026 TASK_UNINTERRUPTIBLE);
3027 if (!atomic_read(&root->log_writers))
3028 break;
3029
3030 mutex_unlock(&root->log_mutex);
3031 schedule();
3032 mutex_lock(&root->log_mutex);
3033 }
3034 finish_wait(&root->log_writer_wait, &wait);
3035}
3036
3037static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
3038 struct btrfs_log_ctx *ctx)
3039{
3040 if (!ctx)
3041 return;
3042
3043 mutex_lock(&root->log_mutex);
3044 list_del_init(&ctx->list);
3045 mutex_unlock(&root->log_mutex);
3046}
3047
3048
3049
3050
3051
3052static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
3053 int index, int error)
3054{
3055 struct btrfs_log_ctx *ctx;
3056 struct btrfs_log_ctx *safe;
3057
3058 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
3059 list_del_init(&ctx->list);
3060 ctx->log_ret = error;
3061 }
3062}
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076int btrfs_sync_log(struct btrfs_trans_handle *trans,
3077 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
3078{
3079 int index1;
3080 int index2;
3081 int mark;
3082 int ret;
3083 struct btrfs_fs_info *fs_info = root->fs_info;
3084 struct btrfs_root *log = root->log_root;
3085 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
3086 struct btrfs_root_item new_root_item;
3087 int log_transid = 0;
3088 struct btrfs_log_ctx root_log_ctx;
3089 struct blk_plug plug;
3090 u64 log_root_start;
3091 u64 log_root_level;
3092
3093 mutex_lock(&root->log_mutex);
3094 log_transid = ctx->log_transid;
3095 if (root->log_transid_committed >= log_transid) {
3096 mutex_unlock(&root->log_mutex);
3097 return ctx->log_ret;
3098 }
3099
3100 index1 = log_transid % 2;
3101 if (atomic_read(&root->log_commit[index1])) {
3102 wait_log_commit(root, log_transid);
3103 mutex_unlock(&root->log_mutex);
3104 return ctx->log_ret;
3105 }
3106 ASSERT(log_transid == root->log_transid);
3107 atomic_set(&root->log_commit[index1], 1);
3108
3109
3110 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
3111 wait_log_commit(root, log_transid - 1);
3112
3113 while (1) {
3114 int batch = atomic_read(&root->log_batch);
3115
3116 if (!btrfs_test_opt(fs_info, SSD) &&
3117 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
3118 mutex_unlock(&root->log_mutex);
3119 schedule_timeout_uninterruptible(1);
3120 mutex_lock(&root->log_mutex);
3121 }
3122 wait_for_writer(root);
3123 if (batch == atomic_read(&root->log_batch))
3124 break;
3125 }
3126
3127
3128 if (btrfs_need_log_full_commit(trans)) {
3129 ret = -EAGAIN;
3130 mutex_unlock(&root->log_mutex);
3131 goto out;
3132 }
3133
3134 if (log_transid % 2 == 0)
3135 mark = EXTENT_DIRTY;
3136 else
3137 mark = EXTENT_NEW;
3138
3139
3140
3141
3142 blk_start_plug(&plug);
3143 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153 if (ret == -EAGAIN && btrfs_is_zoned(fs_info))
3154 ret = 0;
3155 if (ret) {
3156 blk_finish_plug(&plug);
3157 btrfs_abort_transaction(trans, ret);
3158 btrfs_set_log_full_commit(trans);
3159 mutex_unlock(&root->log_mutex);
3160 goto out;
3161 }
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176 btrfs_set_root_node(&log->root_item, log->node);
3177 memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
3178
3179 root->log_transid++;
3180 log->log_transid = root->log_transid;
3181 root->log_start_pid = 0;
3182
3183
3184
3185
3186
3187 mutex_unlock(&root->log_mutex);
3188
3189 if (btrfs_is_zoned(fs_info)) {
3190 mutex_lock(&fs_info->tree_root->log_mutex);
3191 if (!log_root_tree->node) {
3192 ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
3193 if (ret) {
3194 mutex_unlock(&fs_info->tree_root->log_mutex);
3195 goto out;
3196 }
3197 }
3198 mutex_unlock(&fs_info->tree_root->log_mutex);
3199 }
3200
3201 btrfs_init_log_ctx(&root_log_ctx, NULL);
3202
3203 mutex_lock(&log_root_tree->log_mutex);
3204
3205 index2 = log_root_tree->log_transid % 2;
3206 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
3207 root_log_ctx.log_transid = log_root_tree->log_transid;
3208
3209
3210
3211
3212
3213
3214 ret = update_log_root(trans, log, &new_root_item);
3215 if (ret) {
3216 if (!list_empty(&root_log_ctx.list))
3217 list_del_init(&root_log_ctx.list);
3218
3219 blk_finish_plug(&plug);
3220 btrfs_set_log_full_commit(trans);
3221
3222 if (ret != -ENOSPC) {
3223 btrfs_abort_transaction(trans, ret);
3224 mutex_unlock(&log_root_tree->log_mutex);
3225 goto out;
3226 }
3227 btrfs_wait_tree_log_extents(log, mark);
3228 mutex_unlock(&log_root_tree->log_mutex);
3229 ret = -EAGAIN;
3230 goto out;
3231 }
3232
3233 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
3234 blk_finish_plug(&plug);
3235 list_del_init(&root_log_ctx.list);
3236 mutex_unlock(&log_root_tree->log_mutex);
3237 ret = root_log_ctx.log_ret;
3238 goto out;
3239 }
3240
3241 index2 = root_log_ctx.log_transid % 2;
3242 if (atomic_read(&log_root_tree->log_commit[index2])) {
3243 blk_finish_plug(&plug);
3244 ret = btrfs_wait_tree_log_extents(log, mark);
3245 wait_log_commit(log_root_tree,
3246 root_log_ctx.log_transid);
3247 mutex_unlock(&log_root_tree->log_mutex);
3248 if (!ret)
3249 ret = root_log_ctx.log_ret;
3250 goto out;
3251 }
3252 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
3253 atomic_set(&log_root_tree->log_commit[index2], 1);
3254
3255 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
3256 wait_log_commit(log_root_tree,
3257 root_log_ctx.log_transid - 1);
3258 }
3259
3260
3261
3262
3263
3264 if (btrfs_need_log_full_commit(trans)) {
3265 blk_finish_plug(&plug);
3266 btrfs_wait_tree_log_extents(log, mark);
3267 mutex_unlock(&log_root_tree->log_mutex);
3268 ret = -EAGAIN;
3269 goto out_wake_log_root;
3270 }
3271
3272 ret = btrfs_write_marked_extents(fs_info,
3273 &log_root_tree->dirty_log_pages,
3274 EXTENT_DIRTY | EXTENT_NEW);
3275 blk_finish_plug(&plug);
3276
3277
3278
3279
3280
3281 if (ret == -EAGAIN && btrfs_is_zoned(fs_info)) {
3282 btrfs_set_log_full_commit(trans);
3283 btrfs_wait_tree_log_extents(log, mark);
3284 mutex_unlock(&log_root_tree->log_mutex);
3285 goto out_wake_log_root;
3286 } else if (ret) {
3287 btrfs_set_log_full_commit(trans);
3288 btrfs_abort_transaction(trans, ret);
3289 mutex_unlock(&log_root_tree->log_mutex);
3290 goto out_wake_log_root;
3291 }
3292 ret = btrfs_wait_tree_log_extents(log, mark);
3293 if (!ret)
3294 ret = btrfs_wait_tree_log_extents(log_root_tree,
3295 EXTENT_NEW | EXTENT_DIRTY);
3296 if (ret) {
3297 btrfs_set_log_full_commit(trans);
3298 mutex_unlock(&log_root_tree->log_mutex);
3299 goto out_wake_log_root;
3300 }
3301
3302 log_root_start = log_root_tree->node->start;
3303 log_root_level = btrfs_header_level(log_root_tree->node);
3304 log_root_tree->log_transid++;
3305 mutex_unlock(&log_root_tree->log_mutex);
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322 mutex_lock(&fs_info->tree_log_mutex);
3323
3324
3325
3326
3327
3328
3329
3330
3331 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3332 ret = -EIO;
3333 btrfs_set_log_full_commit(trans);
3334 btrfs_abort_transaction(trans, ret);
3335 mutex_unlock(&fs_info->tree_log_mutex);
3336 goto out_wake_log_root;
3337 }
3338
3339 btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start);
3340 btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level);
3341 ret = write_all_supers(fs_info, 1);
3342 mutex_unlock(&fs_info->tree_log_mutex);
3343 if (ret) {
3344 btrfs_set_log_full_commit(trans);
3345 btrfs_abort_transaction(trans, ret);
3346 goto out_wake_log_root;
3347 }
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357 ASSERT(root->last_log_commit <= log_transid);
3358 root->last_log_commit = log_transid;
3359
3360out_wake_log_root:
3361 mutex_lock(&log_root_tree->log_mutex);
3362 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3363
3364 log_root_tree->log_transid_committed++;
3365 atomic_set(&log_root_tree->log_commit[index2], 0);
3366 mutex_unlock(&log_root_tree->log_mutex);
3367
3368
3369
3370
3371
3372
3373 cond_wake_up(&log_root_tree->log_commit_wait[index2]);
3374out:
3375 mutex_lock(&root->log_mutex);
3376 btrfs_remove_all_log_ctxs(root, index1, ret);
3377 root->log_transid_committed++;
3378 atomic_set(&root->log_commit[index1], 0);
3379 mutex_unlock(&root->log_mutex);
3380
3381
3382
3383
3384
3385
3386 cond_wake_up(&root->log_commit_wait[index1]);
3387 return ret;
3388}
3389
3390static void free_log_tree(struct btrfs_trans_handle *trans,
3391 struct btrfs_root *log)
3392{
3393 int ret;
3394 struct walk_control wc = {
3395 .free = 1,
3396 .process_func = process_one_buffer
3397 };
3398
3399 if (log->node) {
3400 ret = walk_log_tree(trans, log, &wc);
3401 if (ret) {
3402 if (trans)
3403 btrfs_abort_transaction(trans, ret);
3404 else
3405 btrfs_handle_fs_error(log->fs_info, ret, NULL);
3406 }
3407 }
3408
3409 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
3410 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3411 extent_io_tree_release(&log->log_csum_range);
3412
3413 if (trans && log->node)
3414 btrfs_redirty_list_add(trans->transaction, log->node);
3415 btrfs_put_root(log);
3416}
3417
3418
3419
3420
3421
3422int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3423{
3424 if (root->log_root) {
3425 free_log_tree(trans, root->log_root);
3426 root->log_root = NULL;
3427 clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
3428 }
3429 return 0;
3430}
3431
3432int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3433 struct btrfs_fs_info *fs_info)
3434{
3435 if (fs_info->log_root_tree) {
3436 free_log_tree(trans, fs_info->log_root_tree);
3437 fs_info->log_root_tree = NULL;
3438 clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &fs_info->tree_root->state);
3439 }
3440 return 0;
3441}
3442
3443
3444
3445
3446
3447
3448
3449static bool inode_logged(struct btrfs_trans_handle *trans,
3450 struct btrfs_inode *inode)
3451{
3452 if (inode->logged_trans == trans->transid)
3453 return true;
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464 if (inode->logged_trans == 0 &&
3465 inode->last_trans == trans->transid &&
3466 !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags))
3467 return true;
3468
3469 return false;
3470}
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3494 struct btrfs_root *root,
3495 const char *name, int name_len,
3496 struct btrfs_inode *dir, u64 index)
3497{
3498 struct btrfs_root *log;
3499 struct btrfs_dir_item *di;
3500 struct btrfs_path *path;
3501 int ret;
3502 int err = 0;
3503 u64 dir_ino = btrfs_ino(dir);
3504
3505 if (!inode_logged(trans, dir))
3506 return 0;
3507
3508 ret = join_running_log_trans(root);
3509 if (ret)
3510 return 0;
3511
3512 mutex_lock(&dir->log_mutex);
3513
3514 log = root->log_root;
3515 path = btrfs_alloc_path();
3516 if (!path) {
3517 err = -ENOMEM;
3518 goto out_unlock;
3519 }
3520
3521 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3522 name, name_len, -1);
3523 if (IS_ERR(di)) {
3524 err = PTR_ERR(di);
3525 goto fail;
3526 }
3527 if (di) {
3528 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3529 if (ret) {
3530 err = ret;
3531 goto fail;
3532 }
3533 }
3534 btrfs_release_path(path);
3535 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3536 index, name, name_len, -1);
3537 if (IS_ERR(di)) {
3538 err = PTR_ERR(di);
3539 goto fail;
3540 }
3541 if (di) {
3542 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3543 if (ret) {
3544 err = ret;
3545 goto fail;
3546 }
3547 }
3548
3549
3550
3551
3552
3553
3554fail:
3555 btrfs_free_path(path);
3556out_unlock:
3557 mutex_unlock(&dir->log_mutex);
3558 if (err == -ENOSPC) {
3559 btrfs_set_log_full_commit(trans);
3560 err = 0;
3561 } else if (err < 0) {
3562 btrfs_abort_transaction(trans, err);
3563 }
3564
3565 btrfs_end_log_trans(root);
3566
3567 return err;
3568}
3569
3570
3571int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3572 struct btrfs_root *root,
3573 const char *name, int name_len,
3574 struct btrfs_inode *inode, u64 dirid)
3575{
3576 struct btrfs_root *log;
3577 u64 index;
3578 int ret;
3579
3580 if (!inode_logged(trans, inode))
3581 return 0;
3582
3583 ret = join_running_log_trans(root);
3584 if (ret)
3585 return 0;
3586 log = root->log_root;
3587 mutex_lock(&inode->log_mutex);
3588
3589 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3590 dirid, &index);
3591 mutex_unlock(&inode->log_mutex);
3592 if (ret == -ENOSPC) {
3593 btrfs_set_log_full_commit(trans);
3594 ret = 0;
3595 } else if (ret < 0 && ret != -ENOENT)
3596 btrfs_abort_transaction(trans, ret);
3597 btrfs_end_log_trans(root);
3598
3599 return ret;
3600}
3601
3602
3603
3604
3605
3606
3607static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3608 struct btrfs_root *log,
3609 struct btrfs_path *path,
3610 int key_type, u64 dirid,
3611 u64 first_offset, u64 last_offset)
3612{
3613 int ret;
3614 struct btrfs_key key;
3615 struct btrfs_dir_log_item *item;
3616
3617 key.objectid = dirid;
3618 key.offset = first_offset;
3619 if (key_type == BTRFS_DIR_ITEM_KEY)
3620 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3621 else
3622 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3623 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3624 if (ret)
3625 return ret;
3626
3627 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3628 struct btrfs_dir_log_item);
3629 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3630 btrfs_mark_buffer_dirty(path->nodes[0]);
3631 btrfs_release_path(path);
3632 return 0;
3633}
3634
3635
3636
3637
3638
3639
3640static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3641 struct btrfs_root *root, struct btrfs_inode *inode,
3642 struct btrfs_path *path,
3643 struct btrfs_path *dst_path, int key_type,
3644 struct btrfs_log_ctx *ctx,
3645 u64 min_offset, u64 *last_offset_ret)
3646{
3647 struct btrfs_key min_key;
3648 struct btrfs_root *log = root->log_root;
3649 struct extent_buffer *src;
3650 int err = 0;
3651 int ret;
3652 int i;
3653 int nritems;
3654 u64 first_offset = min_offset;
3655 u64 last_offset = (u64)-1;
3656 u64 ino = btrfs_ino(inode);
3657
3658 log = root->log_root;
3659
3660 min_key.objectid = ino;
3661 min_key.type = key_type;
3662 min_key.offset = min_offset;
3663
3664 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3665
3666
3667
3668
3669
3670 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3671 min_key.objectid = ino;
3672 min_key.type = key_type;
3673 min_key.offset = (u64)-1;
3674 btrfs_release_path(path);
3675 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3676 if (ret < 0) {
3677 btrfs_release_path(path);
3678 return ret;
3679 }
3680 ret = btrfs_previous_item(root, path, ino, key_type);
3681
3682
3683
3684
3685
3686
3687 if (ret == 0) {
3688 struct btrfs_key tmp;
3689 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3690 path->slots[0]);
3691 if (key_type == tmp.type)
3692 first_offset = max(min_offset, tmp.offset) + 1;
3693 }
3694 goto done;
3695 }
3696
3697
3698 ret = btrfs_previous_item(root, path, ino, key_type);
3699 if (ret == 0) {
3700 struct btrfs_key tmp;
3701 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3702 if (key_type == tmp.type) {
3703 first_offset = tmp.offset;
3704 ret = overwrite_item(trans, log, dst_path,
3705 path->nodes[0], path->slots[0],
3706 &tmp);
3707 if (ret) {
3708 err = ret;
3709 goto done;
3710 }
3711 }
3712 }
3713 btrfs_release_path(path);
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723search:
3724 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3725 if (ret != 0)
3726 goto done;
3727
3728
3729
3730
3731
3732 while (1) {
3733 struct btrfs_key tmp;
3734 src = path->nodes[0];
3735 nritems = btrfs_header_nritems(src);
3736 for (i = path->slots[0]; i < nritems; i++) {
3737 struct btrfs_dir_item *di;
3738
3739 btrfs_item_key_to_cpu(src, &min_key, i);
3740
3741 if (min_key.objectid != ino || min_key.type != key_type)
3742 goto done;
3743
3744 if (need_resched()) {
3745 btrfs_release_path(path);
3746 cond_resched();
3747 goto search;
3748 }
3749
3750 ret = overwrite_item(trans, log, dst_path, src, i,
3751 &min_key);
3752 if (ret) {
3753 err = ret;
3754 goto done;
3755 }
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3781 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3782 if (ctx &&
3783 (btrfs_dir_transid(src, di) == trans->transid ||
3784 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3785 tmp.type != BTRFS_ROOT_ITEM_KEY)
3786 ctx->log_new_dentries = true;
3787 }
3788 path->slots[0] = nritems;
3789
3790
3791
3792
3793
3794 ret = btrfs_next_leaf(root, path);
3795 if (ret) {
3796 if (ret == 1)
3797 last_offset = (u64)-1;
3798 else
3799 err = ret;
3800 goto done;
3801 }
3802 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3803 if (tmp.objectid != ino || tmp.type != key_type) {
3804 last_offset = (u64)-1;
3805 goto done;
3806 }
3807 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3808 ret = overwrite_item(trans, log, dst_path,
3809 path->nodes[0], path->slots[0],
3810 &tmp);
3811 if (ret)
3812 err = ret;
3813 else
3814 last_offset = tmp.offset;
3815 goto done;
3816 }
3817 }
3818done:
3819 btrfs_release_path(path);
3820 btrfs_release_path(dst_path);
3821
3822 if (err == 0) {
3823 *last_offset_ret = last_offset;
3824
3825
3826
3827
3828 ret = insert_dir_log_key(trans, log, path, key_type,
3829 ino, first_offset, last_offset);
3830 if (ret)
3831 err = ret;
3832 }
3833 return err;
3834}
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3849 struct btrfs_root *root, struct btrfs_inode *inode,
3850 struct btrfs_path *path,
3851 struct btrfs_path *dst_path,
3852 struct btrfs_log_ctx *ctx)
3853{
3854 u64 min_key;
3855 u64 max_key;
3856 int ret;
3857 int key_type = BTRFS_DIR_ITEM_KEY;
3858
3859again:
3860 min_key = 0;
3861 max_key = 0;
3862 while (1) {
3863 ret = log_dir_items(trans, root, inode, path, dst_path, key_type,
3864 ctx, min_key, &max_key);
3865 if (ret)
3866 return ret;
3867 if (max_key == (u64)-1)
3868 break;
3869 min_key = max_key + 1;
3870 }
3871
3872 if (key_type == BTRFS_DIR_ITEM_KEY) {
3873 key_type = BTRFS_DIR_INDEX_KEY;
3874 goto again;
3875 }
3876 return 0;
3877}
3878
3879
3880
3881
3882
3883
3884
3885static int drop_objectid_items(struct btrfs_trans_handle *trans,
3886 struct btrfs_root *log,
3887 struct btrfs_path *path,
3888 u64 objectid, int max_key_type)
3889{
3890 int ret;
3891 struct btrfs_key key;
3892 struct btrfs_key found_key;
3893 int start_slot;
3894
3895 key.objectid = objectid;
3896 key.type = max_key_type;
3897 key.offset = (u64)-1;
3898
3899 while (1) {
3900 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3901 BUG_ON(ret == 0);
3902 if (ret < 0)
3903 break;
3904
3905 if (path->slots[0] == 0)
3906 break;
3907
3908 path->slots[0]--;
3909 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3910 path->slots[0]);
3911
3912 if (found_key.objectid != objectid)
3913 break;
3914
3915 found_key.offset = 0;
3916 found_key.type = 0;
3917 ret = btrfs_bin_search(path->nodes[0], &found_key, &start_slot);
3918 if (ret < 0)
3919 break;
3920
3921 ret = btrfs_del_items(trans, log, path, start_slot,
3922 path->slots[0] - start_slot + 1);
3923
3924
3925
3926
3927 if (ret || start_slot != 0)
3928 break;
3929 btrfs_release_path(path);
3930 }
3931 btrfs_release_path(path);
3932 if (ret > 0)
3933 ret = 0;
3934 return ret;
3935}
3936
3937static void fill_inode_item(struct btrfs_trans_handle *trans,
3938 struct extent_buffer *leaf,
3939 struct btrfs_inode_item *item,
3940 struct inode *inode, int log_inode_only,
3941 u64 logged_isize)
3942{
3943 struct btrfs_map_token token;
3944 u64 flags;
3945
3946 btrfs_init_map_token(&token, leaf);
3947
3948 if (log_inode_only) {
3949
3950
3951
3952
3953
3954 btrfs_set_token_inode_generation(&token, item, 0);
3955 btrfs_set_token_inode_size(&token, item, logged_isize);
3956 } else {
3957 btrfs_set_token_inode_generation(&token, item,
3958 BTRFS_I(inode)->generation);
3959 btrfs_set_token_inode_size(&token, item, inode->i_size);
3960 }
3961
3962 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
3963 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
3964 btrfs_set_token_inode_mode(&token, item, inode->i_mode);
3965 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
3966
3967 btrfs_set_token_timespec_sec(&token, &item->atime,
3968 inode->i_atime.tv_sec);
3969 btrfs_set_token_timespec_nsec(&token, &item->atime,
3970 inode->i_atime.tv_nsec);
3971
3972 btrfs_set_token_timespec_sec(&token, &item->mtime,
3973 inode->i_mtime.tv_sec);
3974 btrfs_set_token_timespec_nsec(&token, &item->mtime,
3975 inode->i_mtime.tv_nsec);
3976
3977 btrfs_set_token_timespec_sec(&token, &item->ctime,
3978 inode->i_ctime.tv_sec);
3979 btrfs_set_token_timespec_nsec(&token, &item->ctime,
3980 inode->i_ctime.tv_nsec);
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
3992 btrfs_set_token_inode_transid(&token, item, trans->transid);
3993 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
3994 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
3995 BTRFS_I(inode)->ro_flags);
3996 btrfs_set_token_inode_flags(&token, item, flags);
3997 btrfs_set_token_inode_block_group(&token, item, 0);
3998}
3999
4000static int log_inode_item(struct btrfs_trans_handle *trans,
4001 struct btrfs_root *log, struct btrfs_path *path,
4002 struct btrfs_inode *inode, bool inode_item_dropped)
4003{
4004 struct btrfs_inode_item *inode_item;
4005 int ret;
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017 if (!inode_item_dropped && inode->logged_trans == trans->transid) {
4018 ret = btrfs_search_slot(trans, log, &inode->location, path, 0, 1);
4019 ASSERT(ret <= 0);
4020 if (ret > 0)
4021 ret = -ENOENT;
4022 } else {
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032 ret = btrfs_insert_empty_item(trans, log, path, &inode->location,
4033 sizeof(*inode_item));
4034 ASSERT(ret != -EEXIST);
4035 }
4036 if (ret)
4037 return ret;
4038 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4039 struct btrfs_inode_item);
4040 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
4041 0, 0);
4042 btrfs_release_path(path);
4043 return 0;
4044}
4045
4046static int log_csums(struct btrfs_trans_handle *trans,
4047 struct btrfs_inode *inode,
4048 struct btrfs_root *log_root,
4049 struct btrfs_ordered_sum *sums)
4050{
4051 const u64 lock_end = sums->bytenr + sums->len - 1;
4052 struct extent_state *cached_state = NULL;
4053 int ret;
4054
4055
4056
4057
4058
4059
4060 if (inode->last_reflink_trans < trans->transid)
4061 return btrfs_csum_file_blocks(trans, log_root, sums);
4062
4063
4064
4065
4066
4067
4068
4069 ret = lock_extent_bits(&log_root->log_csum_range, sums->bytenr,
4070 lock_end, &cached_state);
4071 if (ret)
4072 return ret;
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082 ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len);
4083 if (!ret)
4084 ret = btrfs_csum_file_blocks(trans, log_root, sums);
4085
4086 unlock_extent_cached(&log_root->log_csum_range, sums->bytenr, lock_end,
4087 &cached_state);
4088
4089 return ret;
4090}
4091
4092static noinline int copy_items(struct btrfs_trans_handle *trans,
4093 struct btrfs_inode *inode,
4094 struct btrfs_path *dst_path,
4095 struct btrfs_path *src_path,
4096 int start_slot, int nr, int inode_only,
4097 u64 logged_isize)
4098{
4099 struct btrfs_fs_info *fs_info = trans->fs_info;
4100 unsigned long src_offset;
4101 unsigned long dst_offset;
4102 struct btrfs_root *log = inode->root->log_root;
4103 struct btrfs_file_extent_item *extent;
4104 struct btrfs_inode_item *inode_item;
4105 struct extent_buffer *src = src_path->nodes[0];
4106 int ret;
4107 struct btrfs_key *ins_keys;
4108 u32 *ins_sizes;
4109 char *ins_data;
4110 int i;
4111 struct list_head ordered_sums;
4112 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM;
4113
4114 INIT_LIST_HEAD(&ordered_sums);
4115
4116 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
4117 nr * sizeof(u32), GFP_NOFS);
4118 if (!ins_data)
4119 return -ENOMEM;
4120
4121 ins_sizes = (u32 *)ins_data;
4122 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
4123
4124 for (i = 0; i < nr; i++) {
4125 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
4126 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
4127 }
4128 ret = btrfs_insert_empty_items(trans, log, dst_path,
4129 ins_keys, ins_sizes, nr);
4130 if (ret) {
4131 kfree(ins_data);
4132 return ret;
4133 }
4134
4135 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
4136 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
4137 dst_path->slots[0]);
4138
4139 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
4140
4141 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
4142 inode_item = btrfs_item_ptr(dst_path->nodes[0],
4143 dst_path->slots[0],
4144 struct btrfs_inode_item);
4145 fill_inode_item(trans, dst_path->nodes[0], inode_item,
4146 &inode->vfs_inode,
4147 inode_only == LOG_INODE_EXISTS,
4148 logged_isize);
4149 } else {
4150 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
4151 src_offset, ins_sizes[i]);
4152 }
4153
4154
4155
4156
4157
4158 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
4159 !skip_csum) {
4160 int found_type;
4161 extent = btrfs_item_ptr(src, start_slot + i,
4162 struct btrfs_file_extent_item);
4163
4164 if (btrfs_file_extent_generation(src, extent) < trans->transid)
4165 continue;
4166
4167 found_type = btrfs_file_extent_type(src, extent);
4168 if (found_type == BTRFS_FILE_EXTENT_REG) {
4169 u64 ds, dl, cs, cl;
4170 ds = btrfs_file_extent_disk_bytenr(src,
4171 extent);
4172
4173 if (ds == 0)
4174 continue;
4175
4176 dl = btrfs_file_extent_disk_num_bytes(src,
4177 extent);
4178 cs = btrfs_file_extent_offset(src, extent);
4179 cl = btrfs_file_extent_num_bytes(src,
4180 extent);
4181 if (btrfs_file_extent_compression(src,
4182 extent)) {
4183 cs = 0;
4184 cl = dl;
4185 }
4186
4187 ret = btrfs_lookup_csums_range(
4188 fs_info->csum_root,
4189 ds + cs, ds + cs + cl - 1,
4190 &ordered_sums, 0);
4191 if (ret)
4192 break;
4193 }
4194 }
4195 }
4196
4197 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
4198 btrfs_release_path(dst_path);
4199 kfree(ins_data);
4200
4201
4202
4203
4204
4205 while (!list_empty(&ordered_sums)) {
4206 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4207 struct btrfs_ordered_sum,
4208 list);
4209 if (!ret)
4210 ret = log_csums(trans, inode, log, sums);
4211 list_del(&sums->list);
4212 kfree(sums);
4213 }
4214
4215 return ret;
4216}
4217
4218static int extent_cmp(void *priv, const struct list_head *a,
4219 const struct list_head *b)
4220{
4221 const struct extent_map *em1, *em2;
4222
4223 em1 = list_entry(a, struct extent_map, list);
4224 em2 = list_entry(b, struct extent_map, list);
4225
4226 if (em1->start < em2->start)
4227 return -1;
4228 else if (em1->start > em2->start)
4229 return 1;
4230 return 0;
4231}
4232
4233static int log_extent_csums(struct btrfs_trans_handle *trans,
4234 struct btrfs_inode *inode,
4235 struct btrfs_root *log_root,
4236 const struct extent_map *em,
4237 struct btrfs_log_ctx *ctx)
4238{
4239 struct btrfs_ordered_extent *ordered;
4240 u64 csum_offset;
4241 u64 csum_len;
4242 u64 mod_start = em->mod_start;
4243 u64 mod_len = em->mod_len;
4244 LIST_HEAD(ordered_sums);
4245 int ret = 0;
4246
4247 if (inode->flags & BTRFS_INODE_NODATASUM ||
4248 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
4249 em->block_start == EXTENT_MAP_HOLE)
4250 return 0;
4251
4252 list_for_each_entry(ordered, &ctx->ordered_extents, log_list) {
4253 const u64 ordered_end = ordered->file_offset + ordered->num_bytes;
4254 const u64 mod_end = mod_start + mod_len;
4255 struct btrfs_ordered_sum *sums;
4256
4257 if (mod_len == 0)
4258 break;
4259
4260 if (ordered_end <= mod_start)
4261 continue;
4262 if (mod_end <= ordered->file_offset)
4263 break;
4264
4265
4266
4267
4268
4269
4270 if (ordered->file_offset > mod_start) {
4271 if (ordered_end >= mod_end)
4272 mod_len = ordered->file_offset - mod_start;
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283 } else {
4284 if (ordered_end < mod_end) {
4285 mod_len = mod_end - ordered_end;
4286 mod_start = ordered_end;
4287 } else {
4288 mod_len = 0;
4289 }
4290 }
4291
4292
4293
4294
4295
4296 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM, &ordered->flags))
4297 continue;
4298
4299 list_for_each_entry(sums, &ordered->list, list) {
4300 ret = log_csums(trans, inode, log_root, sums);
4301 if (ret)
4302 return ret;
4303 }
4304 }
4305
4306
4307 if (mod_len == 0)
4308 return 0;
4309
4310
4311 if (em->compress_type) {
4312 csum_offset = 0;
4313 csum_len = max(em->block_len, em->orig_block_len);
4314 } else {
4315 csum_offset = mod_start - em->start;
4316 csum_len = mod_len;
4317 }
4318
4319
4320 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
4321 em->block_start + csum_offset,
4322 em->block_start + csum_offset +
4323 csum_len - 1, &ordered_sums, 0);
4324 if (ret)
4325 return ret;
4326
4327 while (!list_empty(&ordered_sums)) {
4328 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4329 struct btrfs_ordered_sum,
4330 list);
4331 if (!ret)
4332 ret = log_csums(trans, inode, log_root, sums);
4333 list_del(&sums->list);
4334 kfree(sums);
4335 }
4336
4337 return ret;
4338}
4339
4340static int log_one_extent(struct btrfs_trans_handle *trans,
4341 struct btrfs_inode *inode, struct btrfs_root *root,
4342 const struct extent_map *em,
4343 struct btrfs_path *path,
4344 struct btrfs_log_ctx *ctx)
4345{
4346 struct btrfs_drop_extents_args drop_args = { 0 };
4347 struct btrfs_root *log = root->log_root;
4348 struct btrfs_file_extent_item *fi;
4349 struct extent_buffer *leaf;
4350 struct btrfs_map_token token;
4351 struct btrfs_key key;
4352 u64 extent_offset = em->start - em->orig_start;
4353 u64 block_len;
4354 int ret;
4355
4356 ret = log_extent_csums(trans, inode, log, em, ctx);
4357 if (ret)
4358 return ret;
4359
4360 drop_args.path = path;
4361 drop_args.start = em->start;
4362 drop_args.end = em->start + em->len;
4363 drop_args.replace_extent = true;
4364 drop_args.extent_item_size = sizeof(*fi);
4365 ret = btrfs_drop_extents(trans, log, inode, &drop_args);
4366 if (ret)
4367 return ret;
4368
4369 if (!drop_args.extent_inserted) {
4370 key.objectid = btrfs_ino(inode);
4371 key.type = BTRFS_EXTENT_DATA_KEY;
4372 key.offset = em->start;
4373
4374 ret = btrfs_insert_empty_item(trans, log, path, &key,
4375 sizeof(*fi));
4376 if (ret)
4377 return ret;
4378 }
4379 leaf = path->nodes[0];
4380 btrfs_init_map_token(&token, leaf);
4381 fi = btrfs_item_ptr(leaf, path->slots[0],
4382 struct btrfs_file_extent_item);
4383
4384 btrfs_set_token_file_extent_generation(&token, fi, trans->transid);
4385 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4386 btrfs_set_token_file_extent_type(&token, fi,
4387 BTRFS_FILE_EXTENT_PREALLOC);
4388 else
4389 btrfs_set_token_file_extent_type(&token, fi,
4390 BTRFS_FILE_EXTENT_REG);
4391
4392 block_len = max(em->block_len, em->orig_block_len);
4393 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4394 btrfs_set_token_file_extent_disk_bytenr(&token, fi,
4395 em->block_start);
4396 btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len);
4397 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4398 btrfs_set_token_file_extent_disk_bytenr(&token, fi,
4399 em->block_start -
4400 extent_offset);
4401 btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len);
4402 } else {
4403 btrfs_set_token_file_extent_disk_bytenr(&token, fi, 0);
4404 btrfs_set_token_file_extent_disk_num_bytes(&token, fi, 0);
4405 }
4406
4407 btrfs_set_token_file_extent_offset(&token, fi, extent_offset);
4408 btrfs_set_token_file_extent_num_bytes(&token, fi, em->len);
4409 btrfs_set_token_file_extent_ram_bytes(&token, fi, em->ram_bytes);
4410 btrfs_set_token_file_extent_compression(&token, fi, em->compress_type);
4411 btrfs_set_token_file_extent_encryption(&token, fi, 0);
4412 btrfs_set_token_file_extent_other_encoding(&token, fi, 0);
4413 btrfs_mark_buffer_dirty(leaf);
4414
4415 btrfs_release_path(path);
4416
4417 return ret;
4418}
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
4429 struct btrfs_inode *inode,
4430 struct btrfs_path *path)
4431{
4432 struct btrfs_root *root = inode->root;
4433 struct btrfs_key key;
4434 const u64 i_size = i_size_read(&inode->vfs_inode);
4435 const u64 ino = btrfs_ino(inode);
4436 struct btrfs_path *dst_path = NULL;
4437 bool dropped_extents = false;
4438 u64 truncate_offset = i_size;
4439 struct extent_buffer *leaf;
4440 int slot;
4441 int ins_nr = 0;
4442 int start_slot;
4443 int ret;
4444
4445 if (!(inode->flags & BTRFS_INODE_PREALLOC))
4446 return 0;
4447
4448 key.objectid = ino;
4449 key.type = BTRFS_EXTENT_DATA_KEY;
4450 key.offset = i_size;
4451 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4452 if (ret < 0)
4453 goto out;
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463 ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
4464 if (ret < 0)
4465 goto out;
4466
4467 if (ret == 0) {
4468 struct btrfs_file_extent_item *ei;
4469
4470 leaf = path->nodes[0];
4471 slot = path->slots[0];
4472 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4473
4474 if (btrfs_file_extent_type(leaf, ei) ==
4475 BTRFS_FILE_EXTENT_PREALLOC) {
4476 u64 extent_end;
4477
4478 btrfs_item_key_to_cpu(leaf, &key, slot);
4479 extent_end = key.offset +
4480 btrfs_file_extent_num_bytes(leaf, ei);
4481
4482 if (extent_end > i_size)
4483 truncate_offset = extent_end;
4484 }
4485 } else {
4486 ret = 0;
4487 }
4488
4489 while (true) {
4490 leaf = path->nodes[0];
4491 slot = path->slots[0];
4492
4493 if (slot >= btrfs_header_nritems(leaf)) {
4494 if (ins_nr > 0) {
4495 ret = copy_items(trans, inode, dst_path, path,
4496 start_slot, ins_nr, 1, 0);
4497 if (ret < 0)
4498 goto out;
4499 ins_nr = 0;
4500 }
4501 ret = btrfs_next_leaf(root, path);
4502 if (ret < 0)
4503 goto out;
4504 if (ret > 0) {
4505 ret = 0;
4506 break;
4507 }
4508 continue;
4509 }
4510
4511 btrfs_item_key_to_cpu(leaf, &key, slot);
4512 if (key.objectid > ino)
4513 break;
4514 if (WARN_ON_ONCE(key.objectid < ino) ||
4515 key.type < BTRFS_EXTENT_DATA_KEY ||
4516 key.offset < i_size) {
4517 path->slots[0]++;
4518 continue;
4519 }
4520 if (!dropped_extents) {
4521
4522
4523
4524
4525 do {
4526 ret = btrfs_truncate_inode_items(trans,
4527 root->log_root,
4528 inode, truncate_offset,
4529 BTRFS_EXTENT_DATA_KEY,
4530 NULL);
4531 } while (ret == -EAGAIN);
4532 if (ret)
4533 goto out;
4534 dropped_extents = true;
4535 }
4536 if (ins_nr == 0)
4537 start_slot = slot;
4538 ins_nr++;
4539 path->slots[0]++;
4540 if (!dst_path) {
4541 dst_path = btrfs_alloc_path();
4542 if (!dst_path) {
4543 ret = -ENOMEM;
4544 goto out;
4545 }
4546 }
4547 }
4548 if (ins_nr > 0)
4549 ret = copy_items(trans, inode, dst_path, path,
4550 start_slot, ins_nr, 1, 0);
4551out:
4552 btrfs_release_path(path);
4553 btrfs_free_path(dst_path);
4554 return ret;
4555}
4556
4557static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4558 struct btrfs_root *root,
4559 struct btrfs_inode *inode,
4560 struct btrfs_path *path,
4561 struct btrfs_log_ctx *ctx)
4562{
4563 struct btrfs_ordered_extent *ordered;
4564 struct btrfs_ordered_extent *tmp;
4565 struct extent_map *em, *n;
4566 struct list_head extents;
4567 struct extent_map_tree *tree = &inode->extent_tree;
4568 int ret = 0;
4569 int num = 0;
4570
4571 INIT_LIST_HEAD(&extents);
4572
4573 write_lock(&tree->lock);
4574
4575 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4576 list_del_init(&em->list);
4577
4578
4579
4580
4581
4582
4583 if (++num > 32768) {
4584 list_del_init(&tree->modified_extents);
4585 ret = -EFBIG;
4586 goto process;
4587 }
4588
4589 if (em->generation < trans->transid)
4590 continue;
4591
4592
4593 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
4594 em->start >= i_size_read(&inode->vfs_inode))
4595 continue;
4596
4597
4598 refcount_inc(&em->refs);
4599 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4600 list_add_tail(&em->list, &extents);
4601 num++;
4602 }
4603
4604 list_sort(NULL, &extents, extent_cmp);
4605process:
4606 while (!list_empty(&extents)) {
4607 em = list_entry(extents.next, struct extent_map, list);
4608
4609 list_del_init(&em->list);
4610
4611
4612
4613
4614
4615 if (ret) {
4616 clear_em_logging(tree, em);
4617 free_extent_map(em);
4618 continue;
4619 }
4620
4621 write_unlock(&tree->lock);
4622
4623 ret = log_one_extent(trans, inode, root, em, path, ctx);
4624 write_lock(&tree->lock);
4625 clear_em_logging(tree, em);
4626 free_extent_map(em);
4627 }
4628 WARN_ON(!list_empty(&extents));
4629 write_unlock(&tree->lock);
4630
4631 btrfs_release_path(path);
4632 if (!ret)
4633 ret = btrfs_log_prealloc_extents(trans, inode, path);
4634 if (ret)
4635 return ret;
4636
4637
4638
4639
4640
4641
4642
4643
4644 list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) {
4645 list_del_init(&ordered->log_list);
4646 set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags);
4647
4648 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
4649 spin_lock_irq(&inode->ordered_tree.lock);
4650 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
4651 set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
4652 atomic_inc(&trans->transaction->pending_ordered);
4653 }
4654 spin_unlock_irq(&inode->ordered_tree.lock);
4655 }
4656 btrfs_put_ordered_extent(ordered);
4657 }
4658
4659 return 0;
4660}
4661
4662static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4663 struct btrfs_path *path, u64 *size_ret)
4664{
4665 struct btrfs_key key;
4666 int ret;
4667
4668 key.objectid = btrfs_ino(inode);
4669 key.type = BTRFS_INODE_ITEM_KEY;
4670 key.offset = 0;
4671
4672 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4673 if (ret < 0) {
4674 return ret;
4675 } else if (ret > 0) {
4676 *size_ret = 0;
4677 } else {
4678 struct btrfs_inode_item *item;
4679
4680 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4681 struct btrfs_inode_item);
4682 *size_ret = btrfs_inode_size(path->nodes[0], item);
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694 if (*size_ret > inode->vfs_inode.i_size)
4695 *size_ret = inode->vfs_inode.i_size;
4696 }
4697
4698 btrfs_release_path(path);
4699 return 0;
4700}
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4712 struct btrfs_root *root,
4713 struct btrfs_inode *inode,
4714 struct btrfs_path *path,
4715 struct btrfs_path *dst_path)
4716{
4717 int ret;
4718 struct btrfs_key key;
4719 const u64 ino = btrfs_ino(inode);
4720 int ins_nr = 0;
4721 int start_slot = 0;
4722 bool found_xattrs = false;
4723
4724 if (test_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags))
4725 return 0;
4726
4727 key.objectid = ino;
4728 key.type = BTRFS_XATTR_ITEM_KEY;
4729 key.offset = 0;
4730
4731 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4732 if (ret < 0)
4733 return ret;
4734
4735 while (true) {
4736 int slot = path->slots[0];
4737 struct extent_buffer *leaf = path->nodes[0];
4738 int nritems = btrfs_header_nritems(leaf);
4739
4740 if (slot >= nritems) {
4741 if (ins_nr > 0) {
4742 ret = copy_items(trans, inode, dst_path, path,
4743 start_slot, ins_nr, 1, 0);
4744 if (ret < 0)
4745 return ret;
4746 ins_nr = 0;
4747 }
4748 ret = btrfs_next_leaf(root, path);
4749 if (ret < 0)
4750 return ret;
4751 else if (ret > 0)
4752 break;
4753 continue;
4754 }
4755
4756 btrfs_item_key_to_cpu(leaf, &key, slot);
4757 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4758 break;
4759
4760 if (ins_nr == 0)
4761 start_slot = slot;
4762 ins_nr++;
4763 path->slots[0]++;
4764 found_xattrs = true;
4765 cond_resched();
4766 }
4767 if (ins_nr > 0) {
4768 ret = copy_items(trans, inode, dst_path, path,
4769 start_slot, ins_nr, 1, 0);
4770 if (ret < 0)
4771 return ret;
4772 }
4773
4774 if (!found_xattrs)
4775 set_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags);
4776
4777 return 0;
4778}
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789static int btrfs_log_holes(struct btrfs_trans_handle *trans,
4790 struct btrfs_root *root,
4791 struct btrfs_inode *inode,
4792 struct btrfs_path *path)
4793{
4794 struct btrfs_fs_info *fs_info = root->fs_info;
4795 struct btrfs_key key;
4796 const u64 ino = btrfs_ino(inode);
4797 const u64 i_size = i_size_read(&inode->vfs_inode);
4798 u64 prev_extent_end = 0;
4799 int ret;
4800
4801 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0)
4802 return 0;
4803
4804 key.objectid = ino;
4805 key.type = BTRFS_EXTENT_DATA_KEY;
4806 key.offset = 0;
4807
4808 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4809 if (ret < 0)
4810 return ret;
4811
4812 while (true) {
4813 struct extent_buffer *leaf = path->nodes[0];
4814
4815 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
4816 ret = btrfs_next_leaf(root, path);
4817 if (ret < 0)
4818 return ret;
4819 if (ret > 0) {
4820 ret = 0;
4821 break;
4822 }
4823 leaf = path->nodes[0];
4824 }
4825
4826 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4827 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
4828 break;
4829
4830
4831 if (prev_extent_end < key.offset) {
4832 const u64 hole_len = key.offset - prev_extent_end;
4833
4834
4835
4836
4837
4838
4839 btrfs_release_path(path);
4840 ret = btrfs_insert_file_extent(trans, root->log_root,
4841 ino, prev_extent_end, 0,
4842 0, hole_len, 0, hole_len,
4843 0, 0, 0);
4844 if (ret < 0)
4845 return ret;
4846
4847
4848
4849
4850
4851
4852
4853
4854 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4855 if (ret < 0)
4856 return ret;
4857 if (WARN_ON(ret > 0))
4858 return -ENOENT;
4859 leaf = path->nodes[0];
4860 }
4861
4862 prev_extent_end = btrfs_file_extent_end(path);
4863 path->slots[0]++;
4864 cond_resched();
4865 }
4866
4867 if (prev_extent_end < i_size) {
4868 u64 hole_len;
4869
4870 btrfs_release_path(path);
4871 hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize);
4872 ret = btrfs_insert_file_extent(trans, root->log_root,
4873 ino, prev_extent_end, 0, 0,
4874 hole_len, 0, hole_len,
4875 0, 0, 0);
4876 if (ret < 0)
4877 return ret;
4878 }
4879
4880 return 0;
4881}
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920
4921
4922
4923
4924
4925static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4926 const int slot,
4927 const struct btrfs_key *key,
4928 struct btrfs_inode *inode,
4929 u64 *other_ino, u64 *other_parent)
4930{
4931 int ret;
4932 struct btrfs_path *search_path;
4933 char *name = NULL;
4934 u32 name_len = 0;
4935 u32 item_size = btrfs_item_size_nr(eb, slot);
4936 u32 cur_offset = 0;
4937 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4938
4939 search_path = btrfs_alloc_path();
4940 if (!search_path)
4941 return -ENOMEM;
4942 search_path->search_commit_root = 1;
4943 search_path->skip_locking = 1;
4944
4945 while (cur_offset < item_size) {
4946 u64 parent;
4947 u32 this_name_len;
4948 u32 this_len;
4949 unsigned long name_ptr;
4950 struct btrfs_dir_item *di;
4951
4952 if (key->type == BTRFS_INODE_REF_KEY) {
4953 struct btrfs_inode_ref *iref;
4954
4955 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4956 parent = key->offset;
4957 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4958 name_ptr = (unsigned long)(iref + 1);
4959 this_len = sizeof(*iref) + this_name_len;
4960 } else {
4961 struct btrfs_inode_extref *extref;
4962
4963 extref = (struct btrfs_inode_extref *)(ptr +
4964 cur_offset);
4965 parent = btrfs_inode_extref_parent(eb, extref);
4966 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4967 name_ptr = (unsigned long)&extref->name;
4968 this_len = sizeof(*extref) + this_name_len;
4969 }
4970
4971 if (this_name_len > name_len) {
4972 char *new_name;
4973
4974 new_name = krealloc(name, this_name_len, GFP_NOFS);
4975 if (!new_name) {
4976 ret = -ENOMEM;
4977 goto out;
4978 }
4979 name_len = this_name_len;
4980 name = new_name;
4981 }
4982
4983 read_extent_buffer(eb, name, name_ptr, this_name_len);
4984 di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
4985 parent, name, this_name_len, 0);
4986 if (di && !IS_ERR(di)) {
4987 struct btrfs_key di_key;
4988
4989 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4990 di, &di_key);
4991 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4992 if (di_key.objectid != key->objectid) {
4993 ret = 1;
4994 *other_ino = di_key.objectid;
4995 *other_parent = parent;
4996 } else {
4997 ret = 0;
4998 }
4999 } else {
5000 ret = -EAGAIN;
5001 }
5002 goto out;
5003 } else if (IS_ERR(di)) {
5004 ret = PTR_ERR(di);
5005 goto out;
5006 }
5007 btrfs_release_path(search_path);
5008
5009 cur_offset += this_len;
5010 }
5011 ret = 0;
5012out:
5013 btrfs_free_path(search_path);
5014 kfree(name);
5015 return ret;
5016}
5017
5018struct btrfs_ino_list {
5019 u64 ino;
5020 u64 parent;
5021 struct list_head list;
5022};
5023
5024static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
5025 struct btrfs_root *root,
5026 struct btrfs_path *path,
5027 struct btrfs_log_ctx *ctx,
5028 u64 ino, u64 parent)
5029{
5030 struct btrfs_ino_list *ino_elem;
5031 LIST_HEAD(inode_list);
5032 int ret = 0;
5033
5034 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
5035 if (!ino_elem)
5036 return -ENOMEM;
5037 ino_elem->ino = ino;
5038 ino_elem->parent = parent;
5039 list_add_tail(&ino_elem->list, &inode_list);
5040
5041 while (!list_empty(&inode_list)) {
5042 struct btrfs_fs_info *fs_info = root->fs_info;
5043 struct btrfs_key key;
5044 struct inode *inode;
5045
5046 ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list,
5047 list);
5048 ino = ino_elem->ino;
5049 parent = ino_elem->parent;
5050 list_del(&ino_elem->list);
5051 kfree(ino_elem);
5052 if (ret)
5053 continue;
5054
5055 btrfs_release_path(path);
5056
5057 inode = btrfs_iget(fs_info->sb, ino, root);
5058
5059
5060
5061
5062
5063 if (IS_ERR(inode)) {
5064 ret = PTR_ERR(inode);
5065 if (ret == -ENOENT) {
5066 inode = btrfs_iget(fs_info->sb, parent, root);
5067 if (IS_ERR(inode)) {
5068 ret = PTR_ERR(inode);
5069 } else {
5070 ret = btrfs_log_inode(trans, root,
5071 BTRFS_I(inode),
5072 LOG_OTHER_INODE_ALL,
5073 ctx);
5074 btrfs_add_delayed_iput(inode);
5075 }
5076 }
5077 continue;
5078 }
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098
5099
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110 spin_lock(&BTRFS_I(inode)->lock);
5111
5112
5113
5114
5115
5116
5117 if (BTRFS_I(inode)->logged_trans == trans->transid) {
5118 spin_unlock(&BTRFS_I(inode)->lock);
5119 btrfs_add_delayed_iput(inode);
5120 continue;
5121 }
5122 spin_unlock(&BTRFS_I(inode)->lock);
5123
5124
5125
5126
5127
5128
5129
5130 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
5131 LOG_OTHER_INODE, ctx);
5132 if (ret) {
5133 btrfs_add_delayed_iput(inode);
5134 continue;
5135 }
5136
5137 key.objectid = ino;
5138 key.type = BTRFS_INODE_REF_KEY;
5139 key.offset = 0;
5140 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5141 if (ret < 0) {
5142 btrfs_add_delayed_iput(inode);
5143 continue;
5144 }
5145
5146 while (true) {
5147 struct extent_buffer *leaf = path->nodes[0];
5148 int slot = path->slots[0];
5149 u64 other_ino = 0;
5150 u64 other_parent = 0;
5151
5152 if (slot >= btrfs_header_nritems(leaf)) {
5153 ret = btrfs_next_leaf(root, path);
5154 if (ret < 0) {
5155 break;
5156 } else if (ret > 0) {
5157 ret = 0;
5158 break;
5159 }
5160 continue;
5161 }
5162
5163 btrfs_item_key_to_cpu(leaf, &key, slot);
5164 if (key.objectid != ino ||
5165 (key.type != BTRFS_INODE_REF_KEY &&
5166 key.type != BTRFS_INODE_EXTREF_KEY)) {
5167 ret = 0;
5168 break;
5169 }
5170
5171 ret = btrfs_check_ref_name_override(leaf, slot, &key,
5172 BTRFS_I(inode), &other_ino,
5173 &other_parent);
5174 if (ret < 0)
5175 break;
5176 if (ret > 0) {
5177 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
5178 if (!ino_elem) {
5179 ret = -ENOMEM;
5180 break;
5181 }
5182 ino_elem->ino = other_ino;
5183 ino_elem->parent = other_parent;
5184 list_add_tail(&ino_elem->list, &inode_list);
5185 ret = 0;
5186 }
5187 path->slots[0]++;
5188 }
5189 btrfs_add_delayed_iput(inode);
5190 }
5191
5192 return ret;
5193}
5194
5195static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
5196 struct btrfs_inode *inode,
5197 struct btrfs_key *min_key,
5198 const struct btrfs_key *max_key,
5199 struct btrfs_path *path,
5200 struct btrfs_path *dst_path,
5201 const u64 logged_isize,
5202 const bool recursive_logging,
5203 const int inode_only,
5204 struct btrfs_log_ctx *ctx,
5205 bool *need_log_inode_item)
5206{
5207 struct btrfs_root *root = inode->root;
5208 int ins_start_slot = 0;
5209 int ins_nr = 0;
5210 int ret;
5211
5212 while (1) {
5213 ret = btrfs_search_forward(root, min_key, path, trans->transid);
5214 if (ret < 0)
5215 return ret;
5216 if (ret > 0) {
5217 ret = 0;
5218 break;
5219 }
5220again:
5221
5222 if (min_key->objectid != max_key->objectid)
5223 break;
5224 if (min_key->type > max_key->type)
5225 break;
5226
5227 if (min_key->type == BTRFS_INODE_ITEM_KEY)
5228 *need_log_inode_item = false;
5229
5230 if ((min_key->type == BTRFS_INODE_REF_KEY ||
5231 min_key->type == BTRFS_INODE_EXTREF_KEY) &&
5232 inode->generation == trans->transid &&
5233 !recursive_logging) {
5234 u64 other_ino = 0;
5235 u64 other_parent = 0;
5236
5237 ret = btrfs_check_ref_name_override(path->nodes[0],
5238 path->slots[0], min_key, inode,
5239 &other_ino, &other_parent);
5240 if (ret < 0) {
5241 return ret;
5242 } else if (ret > 0 && ctx &&
5243 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
5244 if (ins_nr > 0) {
5245 ins_nr++;
5246 } else {
5247 ins_nr = 1;
5248 ins_start_slot = path->slots[0];
5249 }
5250 ret = copy_items(trans, inode, dst_path, path,
5251 ins_start_slot, ins_nr,
5252 inode_only, logged_isize);
5253 if (ret < 0)
5254 return ret;
5255 ins_nr = 0;
5256
5257 ret = log_conflicting_inodes(trans, root, path,
5258 ctx, other_ino, other_parent);
5259 if (ret)
5260 return ret;
5261 btrfs_release_path(path);
5262 goto next_key;
5263 }
5264 }
5265
5266
5267 if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
5268 if (ins_nr == 0)
5269 goto next_slot;
5270 ret = copy_items(trans, inode, dst_path, path,
5271 ins_start_slot,
5272 ins_nr, inode_only, logged_isize);
5273 if (ret < 0)
5274 return ret;
5275 ins_nr = 0;
5276 goto next_slot;
5277 }
5278
5279 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5280 ins_nr++;
5281 goto next_slot;
5282 } else if (!ins_nr) {
5283 ins_start_slot = path->slots[0];
5284 ins_nr = 1;
5285 goto next_slot;
5286 }
5287
5288 ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5289 ins_nr, inode_only, logged_isize);
5290 if (ret < 0)
5291 return ret;
5292 ins_nr = 1;
5293 ins_start_slot = path->slots[0];
5294next_slot:
5295 path->slots[0]++;
5296 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
5297 btrfs_item_key_to_cpu(path->nodes[0], min_key,
5298 path->slots[0]);
5299 goto again;
5300 }
5301 if (ins_nr) {
5302 ret = copy_items(trans, inode, dst_path, path,
5303 ins_start_slot, ins_nr, inode_only,
5304 logged_isize);
5305 if (ret < 0)
5306 return ret;
5307 ins_nr = 0;
5308 }
5309 btrfs_release_path(path);
5310next_key:
5311 if (min_key->offset < (u64)-1) {
5312 min_key->offset++;
5313 } else if (min_key->type < max_key->type) {
5314 min_key->type++;
5315 min_key->offset = 0;
5316 } else {
5317 break;
5318 }
5319 }
5320 if (ins_nr)
5321 ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5322 ins_nr, inode_only, logged_isize);
5323
5324 return ret;
5325}
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341static int btrfs_log_inode(struct btrfs_trans_handle *trans,
5342 struct btrfs_root *root, struct btrfs_inode *inode,
5343 int inode_only,
5344 struct btrfs_log_ctx *ctx)
5345{
5346 struct btrfs_path *path;
5347 struct btrfs_path *dst_path;
5348 struct btrfs_key min_key;
5349 struct btrfs_key max_key;
5350 struct btrfs_root *log = root->log_root;
5351 int err = 0;
5352 int ret = 0;
5353 bool fast_search = false;
5354 u64 ino = btrfs_ino(inode);
5355 struct extent_map_tree *em_tree = &inode->extent_tree;
5356 u64 logged_isize = 0;
5357 bool need_log_inode_item = true;
5358 bool xattrs_logged = false;
5359 bool recursive_logging = false;
5360 bool inode_item_dropped = true;
5361
5362 path = btrfs_alloc_path();
5363 if (!path)
5364 return -ENOMEM;
5365 dst_path = btrfs_alloc_path();
5366 if (!dst_path) {
5367 btrfs_free_path(path);
5368 return -ENOMEM;
5369 }
5370
5371 min_key.objectid = ino;
5372 min_key.type = BTRFS_INODE_ITEM_KEY;
5373 min_key.offset = 0;
5374
5375 max_key.objectid = ino;
5376
5377
5378
5379 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5380 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5381 &inode->runtime_flags) &&
5382 inode_only >= LOG_INODE_EXISTS))
5383 max_key.type = BTRFS_XATTR_ITEM_KEY;
5384 else
5385 max_key.type = (u8)-1;
5386 max_key.offset = (u64)-1;
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397
5398
5399 if (S_ISDIR(inode->vfs_inode.i_mode))
5400 ret = btrfs_commit_inode_delayed_items(trans, inode);
5401 else if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
5402 ret = btrfs_commit_inode_delayed_inode(inode);
5403
5404 if (ret) {
5405 btrfs_free_path(path);
5406 btrfs_free_path(dst_path);
5407 return ret;
5408 }
5409
5410 if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) {
5411 recursive_logging = true;
5412 if (inode_only == LOG_OTHER_INODE)
5413 inode_only = LOG_INODE_EXISTS;
5414 else
5415 inode_only = LOG_INODE_ALL;
5416 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
5417 } else {
5418 mutex_lock(&inode->log_mutex);
5419 }
5420
5421
5422
5423
5424
5425
5426
5427
5428 if (S_ISDIR(inode->vfs_inode.i_mode) &&
5429 inode_only == LOG_INODE_ALL &&
5430 inode->last_unlink_trans >= trans->transid) {
5431 btrfs_set_log_full_commit(trans);
5432 err = 1;
5433 goto out_unlock;
5434 }
5435
5436
5437
5438
5439
5440 if (S_ISDIR(inode->vfs_inode.i_mode)) {
5441 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
5442
5443 clear_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags);
5444 if (inode_only == LOG_INODE_EXISTS)
5445 max_key_type = BTRFS_XATTR_ITEM_KEY;
5446 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
5447 } else {
5448 if (inode_only == LOG_INODE_EXISTS) {
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462 err = logged_inode_size(log, inode, path, &logged_isize);
5463 if (err)
5464 goto out_unlock;
5465 }
5466 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5467 &inode->runtime_flags)) {
5468 if (inode_only == LOG_INODE_EXISTS) {
5469 max_key.type = BTRFS_XATTR_ITEM_KEY;
5470 ret = drop_objectid_items(trans, log, path, ino,
5471 max_key.type);
5472 } else {
5473 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5474 &inode->runtime_flags);
5475 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5476 &inode->runtime_flags);
5477 while(1) {
5478 ret = btrfs_truncate_inode_items(trans,
5479 log, inode, 0, 0, NULL);
5480 if (ret != -EAGAIN)
5481 break;
5482 }
5483 }
5484 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5485 &inode->runtime_flags) ||
5486 inode_only == LOG_INODE_EXISTS) {
5487 if (inode_only == LOG_INODE_ALL)
5488 fast_search = true;
5489 max_key.type = BTRFS_XATTR_ITEM_KEY;
5490 ret = drop_objectid_items(trans, log, path, ino,
5491 max_key.type);
5492 } else {
5493 if (inode_only == LOG_INODE_ALL)
5494 fast_search = true;
5495 inode_item_dropped = false;
5496 goto log_extents;
5497 }
5498
5499 }
5500 if (ret) {
5501 err = ret;
5502 goto out_unlock;
5503 }
5504
5505 err = copy_inode_items_to_log(trans, inode, &min_key, &max_key,
5506 path, dst_path, logged_isize,
5507 recursive_logging, inode_only, ctx,
5508 &need_log_inode_item);
5509 if (err)
5510 goto out_unlock;
5511
5512 btrfs_release_path(path);
5513 btrfs_release_path(dst_path);
5514 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
5515 if (err)
5516 goto out_unlock;
5517 xattrs_logged = true;
5518 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
5519 btrfs_release_path(path);
5520 btrfs_release_path(dst_path);
5521 err = btrfs_log_holes(trans, root, inode, path);
5522 if (err)
5523 goto out_unlock;
5524 }
5525log_extents:
5526 btrfs_release_path(path);
5527 btrfs_release_path(dst_path);
5528 if (need_log_inode_item) {
5529 err = log_inode_item(trans, log, dst_path, inode, inode_item_dropped);
5530 if (err)
5531 goto out_unlock;
5532
5533
5534
5535
5536
5537
5538
5539
5540 if (!xattrs_logged && inode->logged_trans < trans->transid) {
5541 err = btrfs_log_all_xattrs(trans, root, inode, path,
5542 dst_path);
5543 if (err)
5544 goto out_unlock;
5545 btrfs_release_path(path);
5546 }
5547 }
5548 if (fast_search) {
5549 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
5550 ctx);
5551 if (ret) {
5552 err = ret;
5553 goto out_unlock;
5554 }
5555 } else if (inode_only == LOG_INODE_ALL) {
5556 struct extent_map *em, *n;
5557
5558 write_lock(&em_tree->lock);
5559 list_for_each_entry_safe(em, n, &em_tree->modified_extents, list)
5560 list_del_init(&em->list);
5561 write_unlock(&em_tree->lock);
5562 }
5563
5564 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
5565 ret = log_directory_changes(trans, root, inode, path, dst_path,
5566 ctx);
5567 if (ret) {
5568 err = ret;
5569 goto out_unlock;
5570 }
5571 }
5572
5573
5574
5575
5576
5577
5578
5579
5580
5581
5582
5583 if (!ctx ||
5584 !(S_ISDIR(inode->vfs_inode.i_mode) && ctx->logging_new_name &&
5585 &inode->vfs_inode != ctx->inode)) {
5586 spin_lock(&inode->lock);
5587 inode->logged_trans = trans->transid;
5588
5589
5590
5591
5592
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609
5610
5611 if (inode_only != LOG_INODE_EXISTS)
5612 inode->last_log_commit = inode->last_sub_trans;
5613 spin_unlock(&inode->lock);
5614 }
5615out_unlock:
5616 mutex_unlock(&inode->log_mutex);
5617
5618 btrfs_free_path(path);
5619 btrfs_free_path(dst_path);
5620 return err;
5621}
5622
5623
5624
5625
5626
5627
5628
5629
5630
5631
5632static bool need_log_inode(struct btrfs_trans_handle *trans,
5633 struct btrfs_inode *inode)
5634{
5635
5636
5637
5638
5639 if (S_ISDIR(inode->vfs_inode.i_mode) && inode->last_trans < trans->transid)
5640 return false;
5641
5642
5643
5644
5645
5646
5647
5648
5649
5650
5651 if (inode->logged_trans == trans->transid &&
5652 !test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags))
5653 return false;
5654
5655 return true;
5656}
5657
5658struct btrfs_dir_list {
5659 u64 ino;
5660 struct list_head list;
5661};
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687
5688
5689
5690
5691
5692
5693
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5706 struct btrfs_root *root,
5707 struct btrfs_inode *start_inode,
5708 struct btrfs_log_ctx *ctx)
5709{
5710 struct btrfs_fs_info *fs_info = root->fs_info;
5711 struct btrfs_root *log = root->log_root;
5712 struct btrfs_path *path;
5713 LIST_HEAD(dir_list);
5714 struct btrfs_dir_list *dir_elem;
5715 int ret = 0;
5716
5717 path = btrfs_alloc_path();
5718 if (!path)
5719 return -ENOMEM;
5720
5721 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5722 if (!dir_elem) {
5723 btrfs_free_path(path);
5724 return -ENOMEM;
5725 }
5726 dir_elem->ino = btrfs_ino(start_inode);
5727 list_add_tail(&dir_elem->list, &dir_list);
5728
5729 while (!list_empty(&dir_list)) {
5730 struct extent_buffer *leaf;
5731 struct btrfs_key min_key;
5732 int nritems;
5733 int i;
5734
5735 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5736 list);
5737 if (ret)
5738 goto next_dir_inode;
5739
5740 min_key.objectid = dir_elem->ino;
5741 min_key.type = BTRFS_DIR_ITEM_KEY;
5742 min_key.offset = 0;
5743again:
5744 btrfs_release_path(path);
5745 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5746 if (ret < 0) {
5747 goto next_dir_inode;
5748 } else if (ret > 0) {
5749 ret = 0;
5750 goto next_dir_inode;
5751 }
5752
5753process_leaf:
5754 leaf = path->nodes[0];
5755 nritems = btrfs_header_nritems(leaf);
5756 for (i = path->slots[0]; i < nritems; i++) {
5757 struct btrfs_dir_item *di;
5758 struct btrfs_key di_key;
5759 struct inode *di_inode;
5760 struct btrfs_dir_list *new_dir_elem;
5761 int log_mode = LOG_INODE_EXISTS;
5762 int type;
5763
5764 btrfs_item_key_to_cpu(leaf, &min_key, i);
5765 if (min_key.objectid != dir_elem->ino ||
5766 min_key.type != BTRFS_DIR_ITEM_KEY)
5767 goto next_dir_inode;
5768
5769 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5770 type = btrfs_dir_type(leaf, di);
5771 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5772 type != BTRFS_FT_DIR)
5773 continue;
5774 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5775 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5776 continue;
5777
5778 btrfs_release_path(path);
5779 di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root);
5780 if (IS_ERR(di_inode)) {
5781 ret = PTR_ERR(di_inode);
5782 goto next_dir_inode;
5783 }
5784
5785 if (!need_log_inode(trans, BTRFS_I(di_inode))) {
5786 btrfs_add_delayed_iput(di_inode);
5787 break;
5788 }
5789
5790 ctx->log_new_dentries = false;
5791 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
5792 log_mode = LOG_INODE_ALL;
5793 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
5794 log_mode, ctx);
5795 btrfs_add_delayed_iput(di_inode);
5796 if (ret)
5797 goto next_dir_inode;
5798 if (ctx->log_new_dentries) {
5799 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5800 GFP_NOFS);
5801 if (!new_dir_elem) {
5802 ret = -ENOMEM;
5803 goto next_dir_inode;
5804 }
5805 new_dir_elem->ino = di_key.objectid;
5806 list_add_tail(&new_dir_elem->list, &dir_list);
5807 }
5808 break;
5809 }
5810 if (i == nritems) {
5811 ret = btrfs_next_leaf(log, path);
5812 if (ret < 0) {
5813 goto next_dir_inode;
5814 } else if (ret > 0) {
5815 ret = 0;
5816 goto next_dir_inode;
5817 }
5818 goto process_leaf;
5819 }
5820 if (min_key.offset < (u64)-1) {
5821 min_key.offset++;
5822 goto again;
5823 }
5824next_dir_inode:
5825 list_del(&dir_elem->list);
5826 kfree(dir_elem);
5827 }
5828
5829 btrfs_free_path(path);
5830 return ret;
5831}
5832
5833static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5834 struct btrfs_inode *inode,
5835 struct btrfs_log_ctx *ctx)
5836{
5837 struct btrfs_fs_info *fs_info = trans->fs_info;
5838 int ret;
5839 struct btrfs_path *path;
5840 struct btrfs_key key;
5841 struct btrfs_root *root = inode->root;
5842 const u64 ino = btrfs_ino(inode);
5843
5844 path = btrfs_alloc_path();
5845 if (!path)
5846 return -ENOMEM;
5847 path->skip_locking = 1;
5848 path->search_commit_root = 1;
5849
5850 key.objectid = ino;
5851 key.type = BTRFS_INODE_REF_KEY;
5852 key.offset = 0;
5853 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5854 if (ret < 0)
5855 goto out;
5856
5857 while (true) {
5858 struct extent_buffer *leaf = path->nodes[0];
5859 int slot = path->slots[0];
5860 u32 cur_offset = 0;
5861 u32 item_size;
5862 unsigned long ptr;
5863
5864 if (slot >= btrfs_header_nritems(leaf)) {
5865 ret = btrfs_next_leaf(root, path);
5866 if (ret < 0)
5867 goto out;
5868 else if (ret > 0)
5869 break;
5870 continue;
5871 }
5872
5873 btrfs_item_key_to_cpu(leaf, &key, slot);
5874
5875 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5876 break;
5877
5878 item_size = btrfs_item_size_nr(leaf, slot);
5879 ptr = btrfs_item_ptr_offset(leaf, slot);
5880 while (cur_offset < item_size) {
5881 struct btrfs_key inode_key;
5882 struct inode *dir_inode;
5883
5884 inode_key.type = BTRFS_INODE_ITEM_KEY;
5885 inode_key.offset = 0;
5886
5887 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5888 struct btrfs_inode_extref *extref;
5889
5890 extref = (struct btrfs_inode_extref *)
5891 (ptr + cur_offset);
5892 inode_key.objectid = btrfs_inode_extref_parent(
5893 leaf, extref);
5894 cur_offset += sizeof(*extref);
5895 cur_offset += btrfs_inode_extref_name_len(leaf,
5896 extref);
5897 } else {
5898 inode_key.objectid = key.offset;
5899 cur_offset = item_size;
5900 }
5901
5902 dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid,
5903 root);
5904
5905
5906
5907
5908
5909
5910
5911
5912
5913
5914
5915
5916
5917
5918
5919
5920
5921
5922
5923
5924
5925
5926
5927 if (IS_ERR(dir_inode)) {
5928 ret = PTR_ERR(dir_inode);
5929 goto out;
5930 }
5931
5932 if (!need_log_inode(trans, BTRFS_I(dir_inode))) {
5933 btrfs_add_delayed_iput(dir_inode);
5934 continue;
5935 }
5936
5937 if (ctx)
5938 ctx->log_new_dentries = false;
5939 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode),
5940 LOG_INODE_ALL, ctx);
5941 if (!ret && ctx && ctx->log_new_dentries)
5942 ret = log_new_dir_dentries(trans, root,
5943 BTRFS_I(dir_inode), ctx);
5944 btrfs_add_delayed_iput(dir_inode);
5945 if (ret)
5946 goto out;
5947 }
5948 path->slots[0]++;
5949 }
5950 ret = 0;
5951out:
5952 btrfs_free_path(path);
5953 return ret;
5954}
5955
5956static int log_new_ancestors(struct btrfs_trans_handle *trans,
5957 struct btrfs_root *root,
5958 struct btrfs_path *path,
5959 struct btrfs_log_ctx *ctx)
5960{
5961 struct btrfs_key found_key;
5962
5963 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
5964
5965 while (true) {
5966 struct btrfs_fs_info *fs_info = root->fs_info;
5967 struct extent_buffer *leaf = path->nodes[0];
5968 int slot = path->slots[0];
5969 struct btrfs_key search_key;
5970 struct inode *inode;
5971 u64 ino;
5972 int ret = 0;
5973
5974 btrfs_release_path(path);
5975
5976 ino = found_key.offset;
5977
5978 search_key.objectid = found_key.offset;
5979 search_key.type = BTRFS_INODE_ITEM_KEY;
5980 search_key.offset = 0;
5981 inode = btrfs_iget(fs_info->sb, ino, root);
5982 if (IS_ERR(inode))
5983 return PTR_ERR(inode);
5984
5985 if (BTRFS_I(inode)->generation >= trans->transid &&
5986 need_log_inode(trans, BTRFS_I(inode)))
5987 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
5988 LOG_INODE_EXISTS, ctx);
5989 btrfs_add_delayed_iput(inode);
5990 if (ret)
5991 return ret;
5992
5993 if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID)
5994 break;
5995
5996 search_key.type = BTRFS_INODE_REF_KEY;
5997 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
5998 if (ret < 0)
5999 return ret;
6000
6001 leaf = path->nodes[0];
6002 slot = path->slots[0];
6003 if (slot >= btrfs_header_nritems(leaf)) {
6004 ret = btrfs_next_leaf(root, path);
6005 if (ret < 0)
6006 return ret;
6007 else if (ret > 0)
6008 return -ENOENT;
6009 leaf = path->nodes[0];
6010 slot = path->slots[0];
6011 }
6012
6013 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6014 if (found_key.objectid != search_key.objectid ||
6015 found_key.type != BTRFS_INODE_REF_KEY)
6016 return -ENOENT;
6017 }
6018 return 0;
6019}
6020
6021static int log_new_ancestors_fast(struct btrfs_trans_handle *trans,
6022 struct btrfs_inode *inode,
6023 struct dentry *parent,
6024 struct btrfs_log_ctx *ctx)
6025{
6026 struct btrfs_root *root = inode->root;
6027 struct dentry *old_parent = NULL;
6028 struct super_block *sb = inode->vfs_inode.i_sb;
6029 int ret = 0;
6030
6031 while (true) {
6032 if (!parent || d_really_is_negative(parent) ||
6033 sb != parent->d_sb)
6034 break;
6035
6036 inode = BTRFS_I(d_inode(parent));
6037 if (root != inode->root)
6038 break;
6039
6040 if (inode->generation >= trans->transid &&
6041 need_log_inode(trans, inode)) {
6042 ret = btrfs_log_inode(trans, root, inode,
6043 LOG_INODE_EXISTS, ctx);
6044 if (ret)
6045 break;
6046 }
6047 if (IS_ROOT(parent))
6048 break;
6049
6050 parent = dget_parent(parent);
6051 dput(old_parent);
6052 old_parent = parent;
6053 }
6054 dput(old_parent);
6055
6056 return ret;
6057}
6058
6059static int log_all_new_ancestors(struct btrfs_trans_handle *trans,
6060 struct btrfs_inode *inode,
6061 struct dentry *parent,
6062 struct btrfs_log_ctx *ctx)
6063{
6064 struct btrfs_root *root = inode->root;
6065 const u64 ino = btrfs_ino(inode);
6066 struct btrfs_path *path;
6067 struct btrfs_key search_key;
6068 int ret;
6069
6070
6071
6072
6073
6074 if (inode->vfs_inode.i_nlink < 2)
6075 return log_new_ancestors_fast(trans, inode, parent, ctx);
6076
6077 path = btrfs_alloc_path();
6078 if (!path)
6079 return -ENOMEM;
6080
6081 search_key.objectid = ino;
6082 search_key.type = BTRFS_INODE_REF_KEY;
6083 search_key.offset = 0;
6084again:
6085 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
6086 if (ret < 0)
6087 goto out;
6088 if (ret == 0)
6089 path->slots[0]++;
6090
6091 while (true) {
6092 struct extent_buffer *leaf = path->nodes[0];
6093 int slot = path->slots[0];
6094 struct btrfs_key found_key;
6095
6096 if (slot >= btrfs_header_nritems(leaf)) {
6097 ret = btrfs_next_leaf(root, path);
6098 if (ret < 0)
6099 goto out;
6100 else if (ret > 0)
6101 break;
6102 continue;
6103 }
6104
6105 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6106 if (found_key.objectid != ino ||
6107 found_key.type > BTRFS_INODE_EXTREF_KEY)
6108 break;
6109
6110
6111
6112
6113
6114
6115
6116
6117 if (found_key.type == BTRFS_INODE_EXTREF_KEY) {
6118 ret = -EMLINK;
6119 goto out;
6120 }
6121
6122
6123
6124
6125
6126
6127
6128 memcpy(&search_key, &found_key, sizeof(search_key));
6129
6130 ret = log_new_ancestors(trans, root, path, ctx);
6131 if (ret)
6132 goto out;
6133 btrfs_release_path(path);
6134 goto again;
6135 }
6136 ret = 0;
6137out:
6138 btrfs_free_path(path);
6139 return ret;
6140}
6141
6142
6143
6144
6145
6146
6147
6148static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
6149 struct btrfs_inode *inode,
6150 struct dentry *parent,
6151 int inode_only,
6152 struct btrfs_log_ctx *ctx)
6153{
6154 struct btrfs_root *root = inode->root;
6155 struct btrfs_fs_info *fs_info = root->fs_info;
6156 int ret = 0;
6157 bool log_dentries = false;
6158
6159 if (btrfs_test_opt(fs_info, NOTREELOG)) {
6160 ret = 1;
6161 goto end_no_trans;
6162 }
6163
6164 if (btrfs_root_refs(&root->root_item) == 0) {
6165 ret = 1;
6166 goto end_no_trans;
6167 }
6168
6169
6170
6171
6172
6173
6174 if ((btrfs_inode_in_log(inode, trans->transid) &&
6175 list_empty(&ctx->ordered_extents)) ||
6176 inode->vfs_inode.i_nlink == 0) {
6177 ret = BTRFS_NO_LOG_SYNC;
6178 goto end_no_trans;
6179 }
6180
6181 ret = start_log_trans(trans, root, ctx);
6182 if (ret)
6183 goto end_no_trans;
6184
6185 ret = btrfs_log_inode(trans, root, inode, inode_only, ctx);
6186 if (ret)
6187 goto end_trans;
6188
6189
6190
6191
6192
6193
6194
6195 if (S_ISREG(inode->vfs_inode.i_mode) &&
6196 inode->generation < trans->transid &&
6197 inode->last_unlink_trans < trans->transid) {
6198 ret = 0;
6199 goto end_trans;
6200 }
6201
6202 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries)
6203 log_dentries = true;
6204
6205
6206
6207
6208
6209
6210
6211
6212
6213
6214
6215
6216
6217
6218
6219
6220
6221
6222
6223
6224
6225
6226
6227
6228
6229
6230
6231
6232
6233
6234
6235
6236
6237
6238
6239
6240
6241
6242
6243
6244
6245
6246 if (inode->last_unlink_trans >= trans->transid) {
6247 ret = btrfs_log_all_parents(trans, inode, ctx);
6248 if (ret)
6249 goto end_trans;
6250 }
6251
6252 ret = log_all_new_ancestors(trans, inode, parent, ctx);
6253 if (ret)
6254 goto end_trans;
6255
6256 if (log_dentries)
6257 ret = log_new_dir_dentries(trans, root, inode, ctx);
6258 else
6259 ret = 0;
6260end_trans:
6261 if (ret < 0) {
6262 btrfs_set_log_full_commit(trans);
6263 ret = 1;
6264 }
6265
6266 if (ret)
6267 btrfs_remove_log_ctx(root, ctx);
6268 btrfs_end_log_trans(root);
6269end_no_trans:
6270 return ret;
6271}
6272
6273
6274
6275
6276
6277
6278
6279int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
6280 struct dentry *dentry,
6281 struct btrfs_log_ctx *ctx)
6282{
6283 struct dentry *parent = dget_parent(dentry);
6284 int ret;
6285
6286 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent,
6287 LOG_INODE_ALL, ctx);
6288 dput(parent);
6289
6290 return ret;
6291}
6292
6293
6294
6295
6296
6297int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
6298{
6299 int ret;
6300 struct btrfs_path *path;
6301 struct btrfs_trans_handle *trans;
6302 struct btrfs_key key;
6303 struct btrfs_key found_key;
6304 struct btrfs_root *log;
6305 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
6306 struct walk_control wc = {
6307 .process_func = process_one_buffer,
6308 .stage = LOG_WALK_PIN_ONLY,
6309 };
6310
6311 path = btrfs_alloc_path();
6312 if (!path)
6313 return -ENOMEM;
6314
6315 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6316
6317 trans = btrfs_start_transaction(fs_info->tree_root, 0);
6318 if (IS_ERR(trans)) {
6319 ret = PTR_ERR(trans);
6320 goto error;
6321 }
6322
6323 wc.trans = trans;
6324 wc.pin = 1;
6325
6326 ret = walk_log_tree(trans, log_root_tree, &wc);
6327 if (ret) {
6328 btrfs_handle_fs_error(fs_info, ret,
6329 "Failed to pin buffers while recovering log root tree.");
6330 goto error;
6331 }
6332
6333again:
6334 key.objectid = BTRFS_TREE_LOG_OBJECTID;
6335 key.offset = (u64)-1;
6336 key.type = BTRFS_ROOT_ITEM_KEY;
6337
6338 while (1) {
6339 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
6340
6341 if (ret < 0) {
6342 btrfs_handle_fs_error(fs_info, ret,
6343 "Couldn't find tree log root.");
6344 goto error;
6345 }
6346 if (ret > 0) {
6347 if (path->slots[0] == 0)
6348 break;
6349 path->slots[0]--;
6350 }
6351 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
6352 path->slots[0]);
6353 btrfs_release_path(path);
6354 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
6355 break;
6356
6357 log = btrfs_read_tree_root(log_root_tree, &found_key);
6358 if (IS_ERR(log)) {
6359 ret = PTR_ERR(log);
6360 btrfs_handle_fs_error(fs_info, ret,
6361 "Couldn't read tree log root.");
6362 goto error;
6363 }
6364
6365 wc.replay_dest = btrfs_get_fs_root(fs_info, found_key.offset,
6366 true);
6367 if (IS_ERR(wc.replay_dest)) {
6368 ret = PTR_ERR(wc.replay_dest);
6369
6370
6371
6372
6373
6374
6375
6376
6377
6378
6379
6380
6381 if (ret == -ENOENT)
6382 ret = btrfs_pin_extent_for_log_replay(trans,
6383 log->node->start,
6384 log->node->len);
6385 btrfs_put_root(log);
6386
6387 if (!ret)
6388 goto next;
6389 btrfs_handle_fs_error(fs_info, ret,
6390 "Couldn't read target root for tree log recovery.");
6391 goto error;
6392 }
6393
6394 wc.replay_dest->log_root = log;
6395 ret = btrfs_record_root_in_trans(trans, wc.replay_dest);
6396 if (ret)
6397
6398 btrfs_handle_fs_error(fs_info, ret,
6399 "failed to record the log root in transaction");
6400 else
6401 ret = walk_log_tree(trans, log, &wc);
6402
6403 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6404 ret = fixup_inode_link_counts(trans, wc.replay_dest,
6405 path);
6406 }
6407
6408 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6409 struct btrfs_root *root = wc.replay_dest;
6410
6411 btrfs_release_path(path);
6412
6413
6414
6415
6416
6417
6418
6419
6420
6421 ret = btrfs_init_root_free_objectid(root);
6422 }
6423
6424 wc.replay_dest->log_root = NULL;
6425 btrfs_put_root(wc.replay_dest);
6426 btrfs_put_root(log);
6427
6428 if (ret)
6429 goto error;
6430next:
6431 if (found_key.offset == 0)
6432 break;
6433 key.offset = found_key.offset - 1;
6434 }
6435 btrfs_release_path(path);
6436
6437
6438 if (wc.pin) {
6439 wc.pin = 0;
6440 wc.process_func = replay_one_buffer;
6441 wc.stage = LOG_WALK_REPLAY_INODES;
6442 goto again;
6443 }
6444
6445 if (wc.stage < LOG_WALK_REPLAY_ALL) {
6446 wc.stage++;
6447 goto again;
6448 }
6449
6450 btrfs_free_path(path);
6451
6452
6453 ret = btrfs_commit_transaction(trans);
6454 if (ret)
6455 return ret;
6456
6457 log_root_tree->log_root = NULL;
6458 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6459 btrfs_put_root(log_root_tree);
6460
6461 return 0;
6462error:
6463 if (wc.trans)
6464 btrfs_end_transaction(wc.trans);
6465 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6466 btrfs_free_path(path);
6467 return ret;
6468}
6469
6470
6471
6472
6473
6474
6475
6476
6477
6478
6479
6480
6481void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
6482 struct btrfs_inode *dir, struct btrfs_inode *inode,
6483 int for_rename)
6484{
6485
6486
6487
6488
6489
6490
6491
6492
6493
6494
6495 mutex_lock(&inode->log_mutex);
6496 inode->last_unlink_trans = trans->transid;
6497 mutex_unlock(&inode->log_mutex);
6498
6499
6500
6501
6502
6503 if (dir->logged_trans == trans->transid)
6504 return;
6505
6506
6507
6508
6509
6510 if (inode->logged_trans == trans->transid)
6511 return;
6512
6513
6514
6515
6516
6517
6518
6519
6520 if (for_rename)
6521 goto record;
6522
6523
6524 return;
6525
6526record:
6527 mutex_lock(&dir->log_mutex);
6528 dir->last_unlink_trans = trans->transid;
6529 mutex_unlock(&dir->log_mutex);
6530}
6531
6532
6533
6534
6535
6536
6537
6538
6539
6540
6541
6542
6543
6544void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
6545 struct btrfs_inode *dir)
6546{
6547 mutex_lock(&dir->log_mutex);
6548 dir->last_unlink_trans = trans->transid;
6549 mutex_unlock(&dir->log_mutex);
6550}
6551
6552
6553
6554
6555
6556void btrfs_log_new_name(struct btrfs_trans_handle *trans,
6557 struct btrfs_inode *inode, struct btrfs_inode *old_dir,
6558 struct dentry *parent)
6559{
6560 struct btrfs_log_ctx ctx;
6561
6562
6563
6564
6565
6566 if (!S_ISDIR(inode->vfs_inode.i_mode))
6567 inode->last_unlink_trans = trans->transid;
6568
6569
6570
6571
6572
6573 if (!inode_logged(trans, inode) &&
6574 (!old_dir || !inode_logged(trans, old_dir)))
6575 return;
6576
6577
6578
6579
6580
6581
6582
6583
6584
6585
6586
6587
6588
6589
6590
6591
6592 if (old_dir)
6593 old_dir->logged_trans = 0;
6594
6595 btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
6596 ctx.logging_new_name = true;
6597
6598
6599
6600
6601
6602
6603
6604 btrfs_log_inode_parent(trans, inode, parent, LOG_INODE_EXISTS, &ctx);
6605}
6606
6607