1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/vmalloc.h>
20#include "ctree.h"
21#include "disk-io.h"
22#include "backref.h"
23#include "ulist.h"
24#include "transaction.h"
25#include "delayed-ref.h"
26#include "locking.h"
27
28
29#define BACKREF_FOUND_SHARED 6
30
31struct extent_inode_elem {
32 u64 inum;
33 u64 offset;
34 struct extent_inode_elem *next;
35};
36
37static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
38 struct btrfs_file_extent_item *fi,
39 u64 extent_item_pos,
40 struct extent_inode_elem **eie)
41{
42 u64 offset = 0;
43 struct extent_inode_elem *e;
44
45 if (!btrfs_file_extent_compression(eb, fi) &&
46 !btrfs_file_extent_encryption(eb, fi) &&
47 !btrfs_file_extent_other_encoding(eb, fi)) {
48 u64 data_offset;
49 u64 data_len;
50
51 data_offset = btrfs_file_extent_offset(eb, fi);
52 data_len = btrfs_file_extent_num_bytes(eb, fi);
53
54 if (extent_item_pos < data_offset ||
55 extent_item_pos >= data_offset + data_len)
56 return 1;
57 offset = extent_item_pos - data_offset;
58 }
59
60 e = kmalloc(sizeof(*e), GFP_NOFS);
61 if (!e)
62 return -ENOMEM;
63
64 e->next = *eie;
65 e->inum = key->objectid;
66 e->offset = key->offset + offset;
67 *eie = e;
68
69 return 0;
70}
71
72static void free_inode_elem_list(struct extent_inode_elem *eie)
73{
74 struct extent_inode_elem *eie_next;
75
76 for (; eie; eie = eie_next) {
77 eie_next = eie->next;
78 kfree(eie);
79 }
80}
81
82static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte,
83 u64 extent_item_pos,
84 struct extent_inode_elem **eie)
85{
86 u64 disk_byte;
87 struct btrfs_key key;
88 struct btrfs_file_extent_item *fi;
89 int slot;
90 int nritems;
91 int extent_type;
92 int ret;
93
94
95
96
97
98
99 nritems = btrfs_header_nritems(eb);
100 for (slot = 0; slot < nritems; ++slot) {
101 btrfs_item_key_to_cpu(eb, &key, slot);
102 if (key.type != BTRFS_EXTENT_DATA_KEY)
103 continue;
104 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
105 extent_type = btrfs_file_extent_type(eb, fi);
106 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
107 continue;
108
109 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
110 if (disk_byte != wanted_disk_byte)
111 continue;
112
113 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie);
114 if (ret < 0)
115 return ret;
116 }
117
118 return 0;
119}
120
121
122
123
124struct __prelim_ref {
125 struct list_head list;
126 u64 root_id;
127 struct btrfs_key key_for_search;
128 int level;
129 int count;
130 struct extent_inode_elem *inode_list;
131 u64 parent;
132 u64 wanted_disk_byte;
133};
134
135static struct kmem_cache *btrfs_prelim_ref_cache;
136
137int __init btrfs_prelim_ref_init(void)
138{
139 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
140 sizeof(struct __prelim_ref),
141 0,
142 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
143 NULL);
144 if (!btrfs_prelim_ref_cache)
145 return -ENOMEM;
146 return 0;
147}
148
149void btrfs_prelim_ref_exit(void)
150{
151 if (btrfs_prelim_ref_cache)
152 kmem_cache_destroy(btrfs_prelim_ref_cache);
153}
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194static int __add_prelim_ref(struct list_head *head, u64 root_id,
195 struct btrfs_key *key, int level,
196 u64 parent, u64 wanted_disk_byte, int count,
197 gfp_t gfp_mask)
198{
199 struct __prelim_ref *ref;
200
201 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
202 return 0;
203
204 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
205 if (!ref)
206 return -ENOMEM;
207
208 ref->root_id = root_id;
209 if (key)
210 ref->key_for_search = *key;
211 else
212 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
213
214 ref->inode_list = NULL;
215 ref->level = level;
216 ref->count = count;
217 ref->parent = parent;
218 ref->wanted_disk_byte = wanted_disk_byte;
219 list_add_tail(&ref->list, head);
220
221 return 0;
222}
223
224static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
225 struct ulist *parents, struct __prelim_ref *ref,
226 int level, u64 time_seq, const u64 *extent_item_pos,
227 u64 total_refs)
228{
229 int ret = 0;
230 int slot;
231 struct extent_buffer *eb;
232 struct btrfs_key key;
233 struct btrfs_key *key_for_search = &ref->key_for_search;
234 struct btrfs_file_extent_item *fi;
235 struct extent_inode_elem *eie = NULL, *old = NULL;
236 u64 disk_byte;
237 u64 wanted_disk_byte = ref->wanted_disk_byte;
238 u64 count = 0;
239
240 if (level != 0) {
241 eb = path->nodes[level];
242 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
243 if (ret < 0)
244 return ret;
245 return 0;
246 }
247
248
249
250
251
252
253 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
254 if (time_seq == (u64)-1)
255 ret = btrfs_next_leaf(root, path);
256 else
257 ret = btrfs_next_old_leaf(root, path, time_seq);
258 }
259
260 while (!ret && count < total_refs) {
261 eb = path->nodes[0];
262 slot = path->slots[0];
263
264 btrfs_item_key_to_cpu(eb, &key, slot);
265
266 if (key.objectid != key_for_search->objectid ||
267 key.type != BTRFS_EXTENT_DATA_KEY)
268 break;
269
270 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
271 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
272
273 if (disk_byte == wanted_disk_byte) {
274 eie = NULL;
275 old = NULL;
276 count++;
277 if (extent_item_pos) {
278 ret = check_extent_in_eb(&key, eb, fi,
279 *extent_item_pos,
280 &eie);
281 if (ret < 0)
282 break;
283 }
284 if (ret > 0)
285 goto next;
286 ret = ulist_add_merge_ptr(parents, eb->start,
287 eie, (void **)&old, GFP_NOFS);
288 if (ret < 0)
289 break;
290 if (!ret && extent_item_pos) {
291 while (old->next)
292 old = old->next;
293 old->next = eie;
294 }
295 eie = NULL;
296 }
297next:
298 if (time_seq == (u64)-1)
299 ret = btrfs_next_item(root, path);
300 else
301 ret = btrfs_next_old_item(root, path, time_seq);
302 }
303
304 if (ret > 0)
305 ret = 0;
306 else if (ret < 0)
307 free_inode_elem_list(eie);
308 return ret;
309}
310
311
312
313
314
315static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
316 struct btrfs_path *path, u64 time_seq,
317 struct __prelim_ref *ref,
318 struct ulist *parents,
319 const u64 *extent_item_pos, u64 total_refs)
320{
321 struct btrfs_root *root;
322 struct btrfs_key root_key;
323 struct extent_buffer *eb;
324 int ret = 0;
325 int root_level;
326 int level = ref->level;
327 int index;
328
329 root_key.objectid = ref->root_id;
330 root_key.type = BTRFS_ROOT_ITEM_KEY;
331 root_key.offset = (u64)-1;
332
333 index = srcu_read_lock(&fs_info->subvol_srcu);
334
335 root = btrfs_read_fs_root_no_name(fs_info, &root_key);
336 if (IS_ERR(root)) {
337 srcu_read_unlock(&fs_info->subvol_srcu, index);
338 ret = PTR_ERR(root);
339 goto out;
340 }
341
342 if (path->search_commit_root)
343 root_level = btrfs_header_level(root->commit_root);
344 else if (time_seq == (u64)-1)
345 root_level = btrfs_header_level(root->node);
346 else
347 root_level = btrfs_old_root_level(root, time_seq);
348
349 if (root_level + 1 == level) {
350 srcu_read_unlock(&fs_info->subvol_srcu, index);
351 goto out;
352 }
353
354 path->lowest_level = level;
355 if (time_seq == (u64)-1)
356 ret = btrfs_search_slot(NULL, root, &ref->key_for_search, path,
357 0, 0);
358 else
359 ret = btrfs_search_old_slot(root, &ref->key_for_search, path,
360 time_seq);
361
362
363 srcu_read_unlock(&fs_info->subvol_srcu, index);
364
365 pr_debug("search slot in root %llu (level %d, ref count %d) returned "
366 "%d for key (%llu %u %llu)\n",
367 ref->root_id, level, ref->count, ret,
368 ref->key_for_search.objectid, ref->key_for_search.type,
369 ref->key_for_search.offset);
370 if (ret < 0)
371 goto out;
372
373 eb = path->nodes[level];
374 while (!eb) {
375 if (WARN_ON(!level)) {
376 ret = 1;
377 goto out;
378 }
379 level--;
380 eb = path->nodes[level];
381 }
382
383 ret = add_all_parents(root, path, parents, ref, level, time_seq,
384 extent_item_pos, total_refs);
385out:
386 path->lowest_level = 0;
387 btrfs_release_path(path);
388 return ret;
389}
390
391
392
393
394static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
395 struct btrfs_path *path, u64 time_seq,
396 struct list_head *head,
397 const u64 *extent_item_pos, u64 total_refs,
398 u64 root_objectid)
399{
400 int err;
401 int ret = 0;
402 struct __prelim_ref *ref;
403 struct __prelim_ref *ref_safe;
404 struct __prelim_ref *new_ref;
405 struct ulist *parents;
406 struct ulist_node *node;
407 struct ulist_iterator uiter;
408
409 parents = ulist_alloc(GFP_NOFS);
410 if (!parents)
411 return -ENOMEM;
412
413
414
415
416
417
418 list_for_each_entry_safe(ref, ref_safe, head, list) {
419 if (ref->parent)
420 continue;
421 if (ref->count == 0)
422 continue;
423 if (root_objectid && ref->root_id != root_objectid) {
424 ret = BACKREF_FOUND_SHARED;
425 goto out;
426 }
427 err = __resolve_indirect_ref(fs_info, path, time_seq, ref,
428 parents, extent_item_pos,
429 total_refs);
430
431
432
433
434 if (err == -ENOENT) {
435 continue;
436 } else if (err) {
437 ret = err;
438 goto out;
439 }
440
441
442 ULIST_ITER_INIT(&uiter);
443 node = ulist_next(parents, &uiter);
444 ref->parent = node ? node->val : 0;
445 ref->inode_list = node ?
446 (struct extent_inode_elem *)(uintptr_t)node->aux : NULL;
447
448
449 while ((node = ulist_next(parents, &uiter))) {
450 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
451 GFP_NOFS);
452 if (!new_ref) {
453 ret = -ENOMEM;
454 goto out;
455 }
456 memcpy(new_ref, ref, sizeof(*ref));
457 new_ref->parent = node->val;
458 new_ref->inode_list = (struct extent_inode_elem *)
459 (uintptr_t)node->aux;
460 list_add(&new_ref->list, &ref->list);
461 }
462 ulist_reinit(parents);
463 }
464out:
465 ulist_free(parents);
466 return ret;
467}
468
469static inline int ref_for_same_block(struct __prelim_ref *ref1,
470 struct __prelim_ref *ref2)
471{
472 if (ref1->level != ref2->level)
473 return 0;
474 if (ref1->root_id != ref2->root_id)
475 return 0;
476 if (ref1->key_for_search.type != ref2->key_for_search.type)
477 return 0;
478 if (ref1->key_for_search.objectid != ref2->key_for_search.objectid)
479 return 0;
480 if (ref1->key_for_search.offset != ref2->key_for_search.offset)
481 return 0;
482 if (ref1->parent != ref2->parent)
483 return 0;
484
485 return 1;
486}
487
488
489
490
491static int __add_missing_keys(struct btrfs_fs_info *fs_info,
492 struct list_head *head)
493{
494 struct list_head *pos;
495 struct extent_buffer *eb;
496
497 list_for_each(pos, head) {
498 struct __prelim_ref *ref;
499 ref = list_entry(pos, struct __prelim_ref, list);
500
501 if (ref->parent)
502 continue;
503 if (ref->key_for_search.type)
504 continue;
505 BUG_ON(!ref->wanted_disk_byte);
506 eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
507 0);
508 if (IS_ERR(eb)) {
509 return PTR_ERR(eb);
510 } else if (!extent_buffer_uptodate(eb)) {
511 free_extent_buffer(eb);
512 return -EIO;
513 }
514 btrfs_tree_read_lock(eb);
515 if (btrfs_header_level(eb) == 0)
516 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
517 else
518 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
519 btrfs_tree_read_unlock(eb);
520 free_extent_buffer(eb);
521 }
522 return 0;
523}
524
525
526
527
528
529
530
531
532
533
534
535static void __merge_refs(struct list_head *head, int mode)
536{
537 struct list_head *pos1;
538
539 list_for_each(pos1, head) {
540 struct list_head *n2;
541 struct list_head *pos2;
542 struct __prelim_ref *ref1;
543
544 ref1 = list_entry(pos1, struct __prelim_ref, list);
545
546 for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
547 pos2 = n2, n2 = pos2->next) {
548 struct __prelim_ref *ref2;
549 struct __prelim_ref *xchg;
550 struct extent_inode_elem *eie;
551
552 ref2 = list_entry(pos2, struct __prelim_ref, list);
553
554 if (!ref_for_same_block(ref1, ref2))
555 continue;
556 if (mode == 1) {
557 if (!ref1->parent && ref2->parent) {
558 xchg = ref1;
559 ref1 = ref2;
560 ref2 = xchg;
561 }
562 } else {
563 if (ref1->parent != ref2->parent)
564 continue;
565 }
566
567 eie = ref1->inode_list;
568 while (eie && eie->next)
569 eie = eie->next;
570 if (eie)
571 eie->next = ref2->inode_list;
572 else
573 ref1->inode_list = ref2->inode_list;
574 ref1->count += ref2->count;
575
576 list_del(&ref2->list);
577 kmem_cache_free(btrfs_prelim_ref_cache, ref2);
578 }
579
580 }
581}
582
583
584
585
586
587static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
588 struct list_head *prefs, u64 *total_refs,
589 u64 inum)
590{
591 struct btrfs_delayed_ref_node *node;
592 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
593 struct btrfs_key key;
594 struct btrfs_key op_key = {0};
595 int sgn;
596 int ret = 0;
597
598 if (extent_op && extent_op->update_key)
599 btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
600
601 spin_lock(&head->lock);
602 list_for_each_entry(node, &head->ref_list, list) {
603 if (node->seq > seq)
604 continue;
605
606 switch (node->action) {
607 case BTRFS_ADD_DELAYED_EXTENT:
608 case BTRFS_UPDATE_DELAYED_HEAD:
609 WARN_ON(1);
610 continue;
611 case BTRFS_ADD_DELAYED_REF:
612 sgn = 1;
613 break;
614 case BTRFS_DROP_DELAYED_REF:
615 sgn = -1;
616 break;
617 default:
618 BUG_ON(1);
619 }
620 *total_refs += (node->ref_mod * sgn);
621 switch (node->type) {
622 case BTRFS_TREE_BLOCK_REF_KEY: {
623 struct btrfs_delayed_tree_ref *ref;
624
625 ref = btrfs_delayed_node_to_tree_ref(node);
626 ret = __add_prelim_ref(prefs, ref->root, &op_key,
627 ref->level + 1, 0, node->bytenr,
628 node->ref_mod * sgn, GFP_ATOMIC);
629 break;
630 }
631 case BTRFS_SHARED_BLOCK_REF_KEY: {
632 struct btrfs_delayed_tree_ref *ref;
633
634 ref = btrfs_delayed_node_to_tree_ref(node);
635 ret = __add_prelim_ref(prefs, ref->root, NULL,
636 ref->level + 1, ref->parent,
637 node->bytenr,
638 node->ref_mod * sgn, GFP_ATOMIC);
639 break;
640 }
641 case BTRFS_EXTENT_DATA_REF_KEY: {
642 struct btrfs_delayed_data_ref *ref;
643 ref = btrfs_delayed_node_to_data_ref(node);
644
645 key.objectid = ref->objectid;
646 key.type = BTRFS_EXTENT_DATA_KEY;
647 key.offset = ref->offset;
648
649
650
651
652
653 if (inum && ref->objectid != inum) {
654 ret = BACKREF_FOUND_SHARED;
655 break;
656 }
657
658 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
659 node->bytenr,
660 node->ref_mod * sgn, GFP_ATOMIC);
661 break;
662 }
663 case BTRFS_SHARED_DATA_REF_KEY: {
664 struct btrfs_delayed_data_ref *ref;
665
666 ref = btrfs_delayed_node_to_data_ref(node);
667
668 key.objectid = ref->objectid;
669 key.type = BTRFS_EXTENT_DATA_KEY;
670 key.offset = ref->offset;
671 ret = __add_prelim_ref(prefs, ref->root, &key, 0,
672 ref->parent, node->bytenr,
673 node->ref_mod * sgn, GFP_ATOMIC);
674 break;
675 }
676 default:
677 WARN_ON(1);
678 }
679 if (ret)
680 break;
681 }
682 spin_unlock(&head->lock);
683 return ret;
684}
685
686
687
688
689static int __add_inline_refs(struct btrfs_fs_info *fs_info,
690 struct btrfs_path *path, u64 bytenr,
691 int *info_level, struct list_head *prefs,
692 u64 *total_refs, u64 inum)
693{
694 int ret = 0;
695 int slot;
696 struct extent_buffer *leaf;
697 struct btrfs_key key;
698 struct btrfs_key found_key;
699 unsigned long ptr;
700 unsigned long end;
701 struct btrfs_extent_item *ei;
702 u64 flags;
703 u64 item_size;
704
705
706
707
708 leaf = path->nodes[0];
709 slot = path->slots[0];
710
711 item_size = btrfs_item_size_nr(leaf, slot);
712 BUG_ON(item_size < sizeof(*ei));
713
714 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
715 flags = btrfs_extent_flags(leaf, ei);
716 *total_refs += btrfs_extent_refs(leaf, ei);
717 btrfs_item_key_to_cpu(leaf, &found_key, slot);
718
719 ptr = (unsigned long)(ei + 1);
720 end = (unsigned long)ei + item_size;
721
722 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
723 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
724 struct btrfs_tree_block_info *info;
725
726 info = (struct btrfs_tree_block_info *)ptr;
727 *info_level = btrfs_tree_block_level(leaf, info);
728 ptr += sizeof(struct btrfs_tree_block_info);
729 BUG_ON(ptr > end);
730 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
731 *info_level = found_key.offset;
732 } else {
733 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
734 }
735
736 while (ptr < end) {
737 struct btrfs_extent_inline_ref *iref;
738 u64 offset;
739 int type;
740
741 iref = (struct btrfs_extent_inline_ref *)ptr;
742 type = btrfs_extent_inline_ref_type(leaf, iref);
743 offset = btrfs_extent_inline_ref_offset(leaf, iref);
744
745 switch (type) {
746 case BTRFS_SHARED_BLOCK_REF_KEY:
747 ret = __add_prelim_ref(prefs, 0, NULL,
748 *info_level + 1, offset,
749 bytenr, 1, GFP_NOFS);
750 break;
751 case BTRFS_SHARED_DATA_REF_KEY: {
752 struct btrfs_shared_data_ref *sdref;
753 int count;
754
755 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
756 count = btrfs_shared_data_ref_count(leaf, sdref);
757 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
758 bytenr, count, GFP_NOFS);
759 break;
760 }
761 case BTRFS_TREE_BLOCK_REF_KEY:
762 ret = __add_prelim_ref(prefs, offset, NULL,
763 *info_level + 1, 0,
764 bytenr, 1, GFP_NOFS);
765 break;
766 case BTRFS_EXTENT_DATA_REF_KEY: {
767 struct btrfs_extent_data_ref *dref;
768 int count;
769 u64 root;
770
771 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
772 count = btrfs_extent_data_ref_count(leaf, dref);
773 key.objectid = btrfs_extent_data_ref_objectid(leaf,
774 dref);
775 key.type = BTRFS_EXTENT_DATA_KEY;
776 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
777
778 if (inum && key.objectid != inum) {
779 ret = BACKREF_FOUND_SHARED;
780 break;
781 }
782
783 root = btrfs_extent_data_ref_root(leaf, dref);
784 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
785 bytenr, count, GFP_NOFS);
786 break;
787 }
788 default:
789 WARN_ON(1);
790 }
791 if (ret)
792 return ret;
793 ptr += btrfs_extent_inline_ref_size(type);
794 }
795
796 return 0;
797}
798
799
800
801
802static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
803 struct btrfs_path *path, u64 bytenr,
804 int info_level, struct list_head *prefs, u64 inum)
805{
806 struct btrfs_root *extent_root = fs_info->extent_root;
807 int ret;
808 int slot;
809 struct extent_buffer *leaf;
810 struct btrfs_key key;
811
812 while (1) {
813 ret = btrfs_next_item(extent_root, path);
814 if (ret < 0)
815 break;
816 if (ret) {
817 ret = 0;
818 break;
819 }
820
821 slot = path->slots[0];
822 leaf = path->nodes[0];
823 btrfs_item_key_to_cpu(leaf, &key, slot);
824
825 if (key.objectid != bytenr)
826 break;
827 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
828 continue;
829 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
830 break;
831
832 switch (key.type) {
833 case BTRFS_SHARED_BLOCK_REF_KEY:
834 ret = __add_prelim_ref(prefs, 0, NULL,
835 info_level + 1, key.offset,
836 bytenr, 1, GFP_NOFS);
837 break;
838 case BTRFS_SHARED_DATA_REF_KEY: {
839 struct btrfs_shared_data_ref *sdref;
840 int count;
841
842 sdref = btrfs_item_ptr(leaf, slot,
843 struct btrfs_shared_data_ref);
844 count = btrfs_shared_data_ref_count(leaf, sdref);
845 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
846 bytenr, count, GFP_NOFS);
847 break;
848 }
849 case BTRFS_TREE_BLOCK_REF_KEY:
850 ret = __add_prelim_ref(prefs, key.offset, NULL,
851 info_level + 1, 0,
852 bytenr, 1, GFP_NOFS);
853 break;
854 case BTRFS_EXTENT_DATA_REF_KEY: {
855 struct btrfs_extent_data_ref *dref;
856 int count;
857 u64 root;
858
859 dref = btrfs_item_ptr(leaf, slot,
860 struct btrfs_extent_data_ref);
861 count = btrfs_extent_data_ref_count(leaf, dref);
862 key.objectid = btrfs_extent_data_ref_objectid(leaf,
863 dref);
864 key.type = BTRFS_EXTENT_DATA_KEY;
865 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
866
867 if (inum && key.objectid != inum) {
868 ret = BACKREF_FOUND_SHARED;
869 break;
870 }
871
872 root = btrfs_extent_data_ref_root(leaf, dref);
873 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
874 bytenr, count, GFP_NOFS);
875 break;
876 }
877 default:
878 WARN_ON(1);
879 }
880 if (ret)
881 return ret;
882
883 }
884
885 return ret;
886}
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903static int find_parent_nodes(struct btrfs_trans_handle *trans,
904 struct btrfs_fs_info *fs_info, u64 bytenr,
905 u64 time_seq, struct ulist *refs,
906 struct ulist *roots, const u64 *extent_item_pos,
907 u64 root_objectid, u64 inum)
908{
909 struct btrfs_key key;
910 struct btrfs_path *path;
911 struct btrfs_delayed_ref_root *delayed_refs = NULL;
912 struct btrfs_delayed_ref_head *head;
913 int info_level = 0;
914 int ret;
915 struct list_head prefs_delayed;
916 struct list_head prefs;
917 struct __prelim_ref *ref;
918 struct extent_inode_elem *eie = NULL;
919 u64 total_refs = 0;
920
921 INIT_LIST_HEAD(&prefs);
922 INIT_LIST_HEAD(&prefs_delayed);
923
924 key.objectid = bytenr;
925 key.offset = (u64)-1;
926 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
927 key.type = BTRFS_METADATA_ITEM_KEY;
928 else
929 key.type = BTRFS_EXTENT_ITEM_KEY;
930
931 path = btrfs_alloc_path();
932 if (!path)
933 return -ENOMEM;
934 if (!trans) {
935 path->search_commit_root = 1;
936 path->skip_locking = 1;
937 }
938
939 if (time_seq == (u64)-1)
940 path->skip_locking = 1;
941
942
943
944
945
946
947again:
948 head = NULL;
949
950 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
951 if (ret < 0)
952 goto out;
953 BUG_ON(ret == 0);
954
955#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
956 if (trans && likely(trans->type != __TRANS_DUMMY) &&
957 time_seq != (u64)-1) {
958#else
959 if (trans && time_seq != (u64)-1) {
960#endif
961
962
963
964
965 delayed_refs = &trans->transaction->delayed_refs;
966 spin_lock(&delayed_refs->lock);
967 head = btrfs_find_delayed_ref_head(trans, bytenr);
968 if (head) {
969 if (!mutex_trylock(&head->mutex)) {
970 atomic_inc(&head->node.refs);
971 spin_unlock(&delayed_refs->lock);
972
973 btrfs_release_path(path);
974
975
976
977
978
979 mutex_lock(&head->mutex);
980 mutex_unlock(&head->mutex);
981 btrfs_put_delayed_ref(&head->node);
982 goto again;
983 }
984 spin_unlock(&delayed_refs->lock);
985 ret = __add_delayed_refs(head, time_seq,
986 &prefs_delayed, &total_refs,
987 inum);
988 mutex_unlock(&head->mutex);
989 if (ret)
990 goto out;
991 } else {
992 spin_unlock(&delayed_refs->lock);
993 }
994 }
995
996 if (path->slots[0]) {
997 struct extent_buffer *leaf;
998 int slot;
999
1000 path->slots[0]--;
1001 leaf = path->nodes[0];
1002 slot = path->slots[0];
1003 btrfs_item_key_to_cpu(leaf, &key, slot);
1004 if (key.objectid == bytenr &&
1005 (key.type == BTRFS_EXTENT_ITEM_KEY ||
1006 key.type == BTRFS_METADATA_ITEM_KEY)) {
1007 ret = __add_inline_refs(fs_info, path, bytenr,
1008 &info_level, &prefs,
1009 &total_refs, inum);
1010 if (ret)
1011 goto out;
1012 ret = __add_keyed_refs(fs_info, path, bytenr,
1013 info_level, &prefs, inum);
1014 if (ret)
1015 goto out;
1016 }
1017 }
1018 btrfs_release_path(path);
1019
1020 list_splice_init(&prefs_delayed, &prefs);
1021
1022 ret = __add_missing_keys(fs_info, &prefs);
1023 if (ret)
1024 goto out;
1025
1026 __merge_refs(&prefs, 1);
1027
1028 ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
1029 extent_item_pos, total_refs,
1030 root_objectid);
1031 if (ret)
1032 goto out;
1033
1034 __merge_refs(&prefs, 2);
1035
1036 while (!list_empty(&prefs)) {
1037 ref = list_first_entry(&prefs, struct __prelim_ref, list);
1038 WARN_ON(ref->count < 0);
1039 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1040 if (root_objectid && ref->root_id != root_objectid) {
1041 ret = BACKREF_FOUND_SHARED;
1042 goto out;
1043 }
1044
1045
1046 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1047 if (ret < 0)
1048 goto out;
1049 }
1050 if (ref->count && ref->parent) {
1051 if (extent_item_pos && !ref->inode_list &&
1052 ref->level == 0) {
1053 struct extent_buffer *eb;
1054
1055 eb = read_tree_block(fs_info->extent_root,
1056 ref->parent, 0);
1057 if (IS_ERR(eb)) {
1058 ret = PTR_ERR(eb);
1059 goto out;
1060 } else if (!extent_buffer_uptodate(eb)) {
1061 free_extent_buffer(eb);
1062 ret = -EIO;
1063 goto out;
1064 }
1065 btrfs_tree_read_lock(eb);
1066 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1067 ret = find_extent_in_eb(eb, bytenr,
1068 *extent_item_pos, &eie);
1069 btrfs_tree_read_unlock_blocking(eb);
1070 free_extent_buffer(eb);
1071 if (ret < 0)
1072 goto out;
1073 ref->inode_list = eie;
1074 }
1075 ret = ulist_add_merge_ptr(refs, ref->parent,
1076 ref->inode_list,
1077 (void **)&eie, GFP_NOFS);
1078 if (ret < 0)
1079 goto out;
1080 if (!ret && extent_item_pos) {
1081
1082
1083
1084
1085 BUG_ON(!eie);
1086 while (eie->next)
1087 eie = eie->next;
1088 eie->next = ref->inode_list;
1089 }
1090 eie = NULL;
1091 }
1092 list_del(&ref->list);
1093 kmem_cache_free(btrfs_prelim_ref_cache, ref);
1094 }
1095
1096out:
1097 btrfs_free_path(path);
1098 while (!list_empty(&prefs)) {
1099 ref = list_first_entry(&prefs, struct __prelim_ref, list);
1100 list_del(&ref->list);
1101 kmem_cache_free(btrfs_prelim_ref_cache, ref);
1102 }
1103 while (!list_empty(&prefs_delayed)) {
1104 ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
1105 list);
1106 list_del(&ref->list);
1107 kmem_cache_free(btrfs_prelim_ref_cache, ref);
1108 }
1109 if (ret < 0)
1110 free_inode_elem_list(eie);
1111 return ret;
1112}
1113
1114static void free_leaf_list(struct ulist *blocks)
1115{
1116 struct ulist_node *node = NULL;
1117 struct extent_inode_elem *eie;
1118 struct ulist_iterator uiter;
1119
1120 ULIST_ITER_INIT(&uiter);
1121 while ((node = ulist_next(blocks, &uiter))) {
1122 if (!node->aux)
1123 continue;
1124 eie = (struct extent_inode_elem *)(uintptr_t)node->aux;
1125 free_inode_elem_list(eie);
1126 node->aux = 0;
1127 }
1128
1129 ulist_free(blocks);
1130}
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1141 struct btrfs_fs_info *fs_info, u64 bytenr,
1142 u64 time_seq, struct ulist **leafs,
1143 const u64 *extent_item_pos)
1144{
1145 int ret;
1146
1147 *leafs = ulist_alloc(GFP_NOFS);
1148 if (!*leafs)
1149 return -ENOMEM;
1150
1151 ret = find_parent_nodes(trans, fs_info, bytenr,
1152 time_seq, *leafs, NULL, extent_item_pos, 0, 0);
1153 if (ret < 0 && ret != -ENOENT) {
1154 free_leaf_list(*leafs);
1155 return ret;
1156 }
1157
1158 return 0;
1159}
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174static int __btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1175 struct btrfs_fs_info *fs_info, u64 bytenr,
1176 u64 time_seq, struct ulist **roots)
1177{
1178 struct ulist *tmp;
1179 struct ulist_node *node = NULL;
1180 struct ulist_iterator uiter;
1181 int ret;
1182
1183 tmp = ulist_alloc(GFP_NOFS);
1184 if (!tmp)
1185 return -ENOMEM;
1186 *roots = ulist_alloc(GFP_NOFS);
1187 if (!*roots) {
1188 ulist_free(tmp);
1189 return -ENOMEM;
1190 }
1191
1192 ULIST_ITER_INIT(&uiter);
1193 while (1) {
1194 ret = find_parent_nodes(trans, fs_info, bytenr,
1195 time_seq, tmp, *roots, NULL, 0, 0);
1196 if (ret < 0 && ret != -ENOENT) {
1197 ulist_free(tmp);
1198 ulist_free(*roots);
1199 return ret;
1200 }
1201 node = ulist_next(tmp, &uiter);
1202 if (!node)
1203 break;
1204 bytenr = node->val;
1205 cond_resched();
1206 }
1207
1208 ulist_free(tmp);
1209 return 0;
1210}
1211
1212int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1213 struct btrfs_fs_info *fs_info, u64 bytenr,
1214 u64 time_seq, struct ulist **roots)
1215{
1216 int ret;
1217
1218 if (!trans)
1219 down_read(&fs_info->commit_root_sem);
1220 ret = __btrfs_find_all_roots(trans, fs_info, bytenr, time_seq, roots);
1221 if (!trans)
1222 up_read(&fs_info->commit_root_sem);
1223 return ret;
1224}
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239int btrfs_check_shared(struct btrfs_trans_handle *trans,
1240 struct btrfs_fs_info *fs_info, u64 root_objectid,
1241 u64 inum, u64 bytenr)
1242{
1243 struct ulist *tmp = NULL;
1244 struct ulist *roots = NULL;
1245 struct ulist_iterator uiter;
1246 struct ulist_node *node;
1247 struct seq_list elem = SEQ_LIST_INIT(elem);
1248 int ret = 0;
1249
1250 tmp = ulist_alloc(GFP_NOFS);
1251 roots = ulist_alloc(GFP_NOFS);
1252 if (!tmp || !roots) {
1253 ulist_free(tmp);
1254 ulist_free(roots);
1255 return -ENOMEM;
1256 }
1257
1258 if (trans)
1259 btrfs_get_tree_mod_seq(fs_info, &elem);
1260 else
1261 down_read(&fs_info->commit_root_sem);
1262 ULIST_ITER_INIT(&uiter);
1263 while (1) {
1264 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1265 roots, NULL, root_objectid, inum);
1266 if (ret == BACKREF_FOUND_SHARED) {
1267
1268 ret = 1;
1269 break;
1270 }
1271 if (ret < 0 && ret != -ENOENT)
1272 break;
1273 ret = 0;
1274 node = ulist_next(tmp, &uiter);
1275 if (!node)
1276 break;
1277 bytenr = node->val;
1278 cond_resched();
1279 }
1280 if (trans)
1281 btrfs_put_tree_mod_seq(fs_info, &elem);
1282 else
1283 up_read(&fs_info->commit_root_sem);
1284 ulist_free(tmp);
1285 ulist_free(roots);
1286 return ret;
1287}
1288
1289int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1290 u64 start_off, struct btrfs_path *path,
1291 struct btrfs_inode_extref **ret_extref,
1292 u64 *found_off)
1293{
1294 int ret, slot;
1295 struct btrfs_key key;
1296 struct btrfs_key found_key;
1297 struct btrfs_inode_extref *extref;
1298 struct extent_buffer *leaf;
1299 unsigned long ptr;
1300
1301 key.objectid = inode_objectid;
1302 key.type = BTRFS_INODE_EXTREF_KEY;
1303 key.offset = start_off;
1304
1305 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1306 if (ret < 0)
1307 return ret;
1308
1309 while (1) {
1310 leaf = path->nodes[0];
1311 slot = path->slots[0];
1312 if (slot >= btrfs_header_nritems(leaf)) {
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322 ret = btrfs_next_leaf(root, path);
1323 if (ret) {
1324 if (ret >= 1)
1325 ret = -ENOENT;
1326 break;
1327 }
1328 continue;
1329 }
1330
1331 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1332
1333
1334
1335
1336
1337
1338
1339 ret = -ENOENT;
1340 if (found_key.objectid != inode_objectid)
1341 break;
1342 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1343 break;
1344
1345 ret = 0;
1346 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1347 extref = (struct btrfs_inode_extref *)ptr;
1348 *ret_extref = extref;
1349 if (found_off)
1350 *found_off = found_key.offset;
1351 break;
1352 }
1353
1354 return ret;
1355}
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1372 u32 name_len, unsigned long name_off,
1373 struct extent_buffer *eb_in, u64 parent,
1374 char *dest, u32 size)
1375{
1376 int slot;
1377 u64 next_inum;
1378 int ret;
1379 s64 bytes_left = ((s64)size) - 1;
1380 struct extent_buffer *eb = eb_in;
1381 struct btrfs_key found_key;
1382 int leave_spinning = path->leave_spinning;
1383 struct btrfs_inode_ref *iref;
1384
1385 if (bytes_left >= 0)
1386 dest[bytes_left] = '\0';
1387
1388 path->leave_spinning = 1;
1389 while (1) {
1390 bytes_left -= name_len;
1391 if (bytes_left >= 0)
1392 read_extent_buffer(eb, dest + bytes_left,
1393 name_off, name_len);
1394 if (eb != eb_in) {
1395 btrfs_tree_read_unlock_blocking(eb);
1396 free_extent_buffer(eb);
1397 }
1398 ret = btrfs_find_item(fs_root, path, parent, 0,
1399 BTRFS_INODE_REF_KEY, &found_key);
1400 if (ret > 0)
1401 ret = -ENOENT;
1402 if (ret)
1403 break;
1404
1405 next_inum = found_key.offset;
1406
1407
1408 if (parent == next_inum)
1409 break;
1410
1411 slot = path->slots[0];
1412 eb = path->nodes[0];
1413
1414 if (eb != eb_in) {
1415 atomic_inc(&eb->refs);
1416 btrfs_tree_read_lock(eb);
1417 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1418 }
1419 btrfs_release_path(path);
1420 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1421
1422 name_len = btrfs_inode_ref_name_len(eb, iref);
1423 name_off = (unsigned long)(iref + 1);
1424
1425 parent = next_inum;
1426 --bytes_left;
1427 if (bytes_left >= 0)
1428 dest[bytes_left] = '/';
1429 }
1430
1431 btrfs_release_path(path);
1432 path->leave_spinning = leave_spinning;
1433
1434 if (ret)
1435 return ERR_PTR(ret);
1436
1437 return dest + bytes_left;
1438}
1439
1440
1441
1442
1443
1444
1445int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1446 struct btrfs_path *path, struct btrfs_key *found_key,
1447 u64 *flags_ret)
1448{
1449 int ret;
1450 u64 flags;
1451 u64 size = 0;
1452 u32 item_size;
1453 struct extent_buffer *eb;
1454 struct btrfs_extent_item *ei;
1455 struct btrfs_key key;
1456
1457 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1458 key.type = BTRFS_METADATA_ITEM_KEY;
1459 else
1460 key.type = BTRFS_EXTENT_ITEM_KEY;
1461 key.objectid = logical;
1462 key.offset = (u64)-1;
1463
1464 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1465 if (ret < 0)
1466 return ret;
1467
1468 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1469 if (ret) {
1470 if (ret > 0)
1471 ret = -ENOENT;
1472 return ret;
1473 }
1474 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1475 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1476 size = fs_info->extent_root->nodesize;
1477 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1478 size = found_key->offset;
1479
1480 if (found_key->objectid > logical ||
1481 found_key->objectid + size <= logical) {
1482 pr_debug("logical %llu is not within any extent\n", logical);
1483 return -ENOENT;
1484 }
1485
1486 eb = path->nodes[0];
1487 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1488 BUG_ON(item_size < sizeof(*ei));
1489
1490 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1491 flags = btrfs_extent_flags(eb, ei);
1492
1493 pr_debug("logical %llu is at position %llu within the extent (%llu "
1494 "EXTENT_ITEM %llu) flags %#llx size %u\n",
1495 logical, logical - found_key->objectid, found_key->objectid,
1496 found_key->offset, flags, item_size);
1497
1498 WARN_ON(!flags_ret);
1499 if (flags_ret) {
1500 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1501 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1502 else if (flags & BTRFS_EXTENT_FLAG_DATA)
1503 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1504 else
1505 BUG_ON(1);
1506 return 0;
1507 }
1508
1509 return -EIO;
1510}
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
1521 struct btrfs_key *key,
1522 struct btrfs_extent_item *ei, u32 item_size,
1523 struct btrfs_extent_inline_ref **out_eiref,
1524 int *out_type)
1525{
1526 unsigned long end;
1527 u64 flags;
1528 struct btrfs_tree_block_info *info;
1529
1530 if (!*ptr) {
1531
1532 flags = btrfs_extent_flags(eb, ei);
1533 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1534 if (key->type == BTRFS_METADATA_ITEM_KEY) {
1535
1536 *out_eiref =
1537 (struct btrfs_extent_inline_ref *)(ei + 1);
1538 } else {
1539 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1540 info = (struct btrfs_tree_block_info *)(ei + 1);
1541 *out_eiref =
1542 (struct btrfs_extent_inline_ref *)(info + 1);
1543 }
1544 } else {
1545 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1546 }
1547 *ptr = (unsigned long)*out_eiref;
1548 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1549 return -ENOENT;
1550 }
1551
1552 end = (unsigned long)ei + item_size;
1553 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1554 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
1555
1556 *ptr += btrfs_extent_inline_ref_size(*out_type);
1557 WARN_ON(*ptr > end);
1558 if (*ptr == end)
1559 return 1;
1560
1561 return 0;
1562}
1563
1564
1565
1566
1567
1568
1569
1570
1571int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1572 struct btrfs_key *key, struct btrfs_extent_item *ei,
1573 u32 item_size, u64 *out_root, u8 *out_level)
1574{
1575 int ret;
1576 int type;
1577 struct btrfs_extent_inline_ref *eiref;
1578
1579 if (*ptr == (unsigned long)-1)
1580 return 1;
1581
1582 while (1) {
1583 ret = __get_extent_inline_ref(ptr, eb, key, ei, item_size,
1584 &eiref, &type);
1585 if (ret < 0)
1586 return ret;
1587
1588 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1589 type == BTRFS_SHARED_BLOCK_REF_KEY)
1590 break;
1591
1592 if (ret == 1)
1593 return 1;
1594 }
1595
1596
1597 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1598
1599 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1600 struct btrfs_tree_block_info *info;
1601
1602 info = (struct btrfs_tree_block_info *)(ei + 1);
1603 *out_level = btrfs_tree_block_level(eb, info);
1604 } else {
1605 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1606 *out_level = (u8)key->offset;
1607 }
1608
1609 if (ret == 1)
1610 *ptr = (unsigned long)-1;
1611
1612 return 0;
1613}
1614
1615static int iterate_leaf_refs(struct extent_inode_elem *inode_list,
1616 u64 root, u64 extent_item_objectid,
1617 iterate_extent_inodes_t *iterate, void *ctx)
1618{
1619 struct extent_inode_elem *eie;
1620 int ret = 0;
1621
1622 for (eie = inode_list; eie; eie = eie->next) {
1623 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
1624 "root %llu\n", extent_item_objectid,
1625 eie->inum, eie->offset, root);
1626 ret = iterate(eie->inum, eie->offset, root, ctx);
1627 if (ret) {
1628 pr_debug("stopping iteration for %llu due to ret=%d\n",
1629 extent_item_objectid, ret);
1630 break;
1631 }
1632 }
1633
1634 return ret;
1635}
1636
1637
1638
1639
1640
1641
1642int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1643 u64 extent_item_objectid, u64 extent_item_pos,
1644 int search_commit_root,
1645 iterate_extent_inodes_t *iterate, void *ctx)
1646{
1647 int ret;
1648 struct btrfs_trans_handle *trans = NULL;
1649 struct ulist *refs = NULL;
1650 struct ulist *roots = NULL;
1651 struct ulist_node *ref_node = NULL;
1652 struct ulist_node *root_node = NULL;
1653 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
1654 struct ulist_iterator ref_uiter;
1655 struct ulist_iterator root_uiter;
1656
1657 pr_debug("resolving all inodes for extent %llu\n",
1658 extent_item_objectid);
1659
1660 if (!search_commit_root) {
1661 trans = btrfs_join_transaction(fs_info->extent_root);
1662 if (IS_ERR(trans))
1663 return PTR_ERR(trans);
1664 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1665 } else {
1666 down_read(&fs_info->commit_root_sem);
1667 }
1668
1669 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1670 tree_mod_seq_elem.seq, &refs,
1671 &extent_item_pos);
1672 if (ret)
1673 goto out;
1674
1675 ULIST_ITER_INIT(&ref_uiter);
1676 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1677 ret = __btrfs_find_all_roots(trans, fs_info, ref_node->val,
1678 tree_mod_seq_elem.seq, &roots);
1679 if (ret)
1680 break;
1681 ULIST_ITER_INIT(&root_uiter);
1682 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1683 pr_debug("root %llu references leaf %llu, data list "
1684 "%#llx\n", root_node->val, ref_node->val,
1685 ref_node->aux);
1686 ret = iterate_leaf_refs((struct extent_inode_elem *)
1687 (uintptr_t)ref_node->aux,
1688 root_node->val,
1689 extent_item_objectid,
1690 iterate, ctx);
1691 }
1692 ulist_free(roots);
1693 }
1694
1695 free_leaf_list(refs);
1696out:
1697 if (!search_commit_root) {
1698 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1699 btrfs_end_transaction(trans, fs_info->extent_root);
1700 } else {
1701 up_read(&fs_info->commit_root_sem);
1702 }
1703
1704 return ret;
1705}
1706
1707int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1708 struct btrfs_path *path,
1709 iterate_extent_inodes_t *iterate, void *ctx)
1710{
1711 int ret;
1712 u64 extent_item_pos;
1713 u64 flags = 0;
1714 struct btrfs_key found_key;
1715 int search_commit_root = path->search_commit_root;
1716
1717 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
1718 btrfs_release_path(path);
1719 if (ret < 0)
1720 return ret;
1721 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1722 return -EINVAL;
1723
1724 extent_item_pos = logical - found_key.objectid;
1725 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1726 extent_item_pos, search_commit_root,
1727 iterate, ctx);
1728
1729 return ret;
1730}
1731
1732typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
1733 struct extent_buffer *eb, void *ctx);
1734
1735static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
1736 struct btrfs_path *path,
1737 iterate_irefs_t *iterate, void *ctx)
1738{
1739 int ret = 0;
1740 int slot;
1741 u32 cur;
1742 u32 len;
1743 u32 name_len;
1744 u64 parent = 0;
1745 int found = 0;
1746 struct extent_buffer *eb;
1747 struct btrfs_item *item;
1748 struct btrfs_inode_ref *iref;
1749 struct btrfs_key found_key;
1750
1751 while (!ret) {
1752 ret = btrfs_find_item(fs_root, path, inum,
1753 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
1754 &found_key);
1755
1756 if (ret < 0)
1757 break;
1758 if (ret) {
1759 ret = found ? 0 : -ENOENT;
1760 break;
1761 }
1762 ++found;
1763
1764 parent = found_key.offset;
1765 slot = path->slots[0];
1766 eb = btrfs_clone_extent_buffer(path->nodes[0]);
1767 if (!eb) {
1768 ret = -ENOMEM;
1769 break;
1770 }
1771 extent_buffer_get(eb);
1772 btrfs_tree_read_lock(eb);
1773 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1774 btrfs_release_path(path);
1775
1776 item = btrfs_item_nr(slot);
1777 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1778
1779 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
1780 name_len = btrfs_inode_ref_name_len(eb, iref);
1781
1782 pr_debug("following ref at offset %u for inode %llu in "
1783 "tree %llu\n", cur, found_key.objectid,
1784 fs_root->objectid);
1785 ret = iterate(parent, name_len,
1786 (unsigned long)(iref + 1), eb, ctx);
1787 if (ret)
1788 break;
1789 len = sizeof(*iref) + name_len;
1790 iref = (struct btrfs_inode_ref *)((char *)iref + len);
1791 }
1792 btrfs_tree_read_unlock_blocking(eb);
1793 free_extent_buffer(eb);
1794 }
1795
1796 btrfs_release_path(path);
1797
1798 return ret;
1799}
1800
1801static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
1802 struct btrfs_path *path,
1803 iterate_irefs_t *iterate, void *ctx)
1804{
1805 int ret;
1806 int slot;
1807 u64 offset = 0;
1808 u64 parent;
1809 int found = 0;
1810 struct extent_buffer *eb;
1811 struct btrfs_inode_extref *extref;
1812 struct extent_buffer *leaf;
1813 u32 item_size;
1814 u32 cur_offset;
1815 unsigned long ptr;
1816
1817 while (1) {
1818 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
1819 &offset);
1820 if (ret < 0)
1821 break;
1822 if (ret) {
1823 ret = found ? 0 : -ENOENT;
1824 break;
1825 }
1826 ++found;
1827
1828 slot = path->slots[0];
1829 eb = btrfs_clone_extent_buffer(path->nodes[0]);
1830 if (!eb) {
1831 ret = -ENOMEM;
1832 break;
1833 }
1834 extent_buffer_get(eb);
1835
1836 btrfs_tree_read_lock(eb);
1837 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1838 btrfs_release_path(path);
1839
1840 leaf = path->nodes[0];
1841 item_size = btrfs_item_size_nr(leaf, slot);
1842 ptr = btrfs_item_ptr_offset(leaf, slot);
1843 cur_offset = 0;
1844
1845 while (cur_offset < item_size) {
1846 u32 name_len;
1847
1848 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
1849 parent = btrfs_inode_extref_parent(eb, extref);
1850 name_len = btrfs_inode_extref_name_len(eb, extref);
1851 ret = iterate(parent, name_len,
1852 (unsigned long)&extref->name, eb, ctx);
1853 if (ret)
1854 break;
1855
1856 cur_offset += btrfs_inode_extref_name_len(leaf, extref);
1857 cur_offset += sizeof(*extref);
1858 }
1859 btrfs_tree_read_unlock_blocking(eb);
1860 free_extent_buffer(eb);
1861
1862 offset++;
1863 }
1864
1865 btrfs_release_path(path);
1866
1867 return ret;
1868}
1869
1870static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
1871 struct btrfs_path *path, iterate_irefs_t *iterate,
1872 void *ctx)
1873{
1874 int ret;
1875 int found_refs = 0;
1876
1877 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
1878 if (!ret)
1879 ++found_refs;
1880 else if (ret != -ENOENT)
1881 return ret;
1882
1883 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
1884 if (ret == -ENOENT && found_refs)
1885 return 0;
1886
1887 return ret;
1888}
1889
1890
1891
1892
1893
1894static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
1895 struct extent_buffer *eb, void *ctx)
1896{
1897 struct inode_fs_paths *ipath = ctx;
1898 char *fspath;
1899 char *fspath_min;
1900 int i = ipath->fspath->elem_cnt;
1901 const int s_ptr = sizeof(char *);
1902 u32 bytes_left;
1903
1904 bytes_left = ipath->fspath->bytes_left > s_ptr ?
1905 ipath->fspath->bytes_left - s_ptr : 0;
1906
1907 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
1908 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
1909 name_off, eb, inum, fspath_min, bytes_left);
1910 if (IS_ERR(fspath))
1911 return PTR_ERR(fspath);
1912
1913 if (fspath > fspath_min) {
1914 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
1915 ++ipath->fspath->elem_cnt;
1916 ipath->fspath->bytes_left = fspath - fspath_min;
1917 } else {
1918 ++ipath->fspath->elem_missed;
1919 ipath->fspath->bytes_missing += fspath_min - fspath;
1920 ipath->fspath->bytes_left = 0;
1921 }
1922
1923 return 0;
1924}
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
1937{
1938 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
1939 inode_to_path, ipath);
1940}
1941
1942struct btrfs_data_container *init_data_container(u32 total_bytes)
1943{
1944 struct btrfs_data_container *data;
1945 size_t alloc_bytes;
1946
1947 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
1948 data = vmalloc(alloc_bytes);
1949 if (!data)
1950 return ERR_PTR(-ENOMEM);
1951
1952 if (total_bytes >= sizeof(*data)) {
1953 data->bytes_left = total_bytes - sizeof(*data);
1954 data->bytes_missing = 0;
1955 } else {
1956 data->bytes_missing = sizeof(*data) - total_bytes;
1957 data->bytes_left = 0;
1958 }
1959
1960 data->elem_cnt = 0;
1961 data->elem_missed = 0;
1962
1963 return data;
1964}
1965
1966
1967
1968
1969
1970
1971
1972struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
1973 struct btrfs_path *path)
1974{
1975 struct inode_fs_paths *ifp;
1976 struct btrfs_data_container *fspath;
1977
1978 fspath = init_data_container(total_bytes);
1979 if (IS_ERR(fspath))
1980 return (void *)fspath;
1981
1982 ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
1983 if (!ifp) {
1984 kfree(fspath);
1985 return ERR_PTR(-ENOMEM);
1986 }
1987
1988 ifp->btrfs_path = path;
1989 ifp->fspath = fspath;
1990 ifp->fs_root = fs_root;
1991
1992 return ifp;
1993}
1994
1995void free_ipath(struct inode_fs_paths *ipath)
1996{
1997 if (!ipath)
1998 return;
1999 vfree(ipath->fspath);
2000 kfree(ipath);
2001}
2002