1
2
3
4
5
6#include <linux/slab.h>
7#include <linux/blkdev.h>
8#include <linux/writeback.h>
9#include "ctree.h"
10#include "transaction.h"
11#include "btrfs_inode.h"
12#include "extent_io.h"
13#include "disk-io.h"
14#include "compression.h"
15
16static struct kmem_cache *btrfs_ordered_extent_cache;
17
18static u64 entry_end(struct btrfs_ordered_extent *entry)
19{
20 if (entry->file_offset + entry->len < entry->file_offset)
21 return (u64)-1;
22 return entry->file_offset + entry->len;
23}
24
25
26
27
28static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
29 struct rb_node *node)
30{
31 struct rb_node **p = &root->rb_node;
32 struct rb_node *parent = NULL;
33 struct btrfs_ordered_extent *entry;
34
35 while (*p) {
36 parent = *p;
37 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
38
39 if (file_offset < entry->file_offset)
40 p = &(*p)->rb_left;
41 else if (file_offset >= entry_end(entry))
42 p = &(*p)->rb_right;
43 else
44 return parent;
45 }
46
47 rb_link_node(node, parent, p);
48 rb_insert_color(node, root);
49 return NULL;
50}
51
52static void ordered_data_tree_panic(struct inode *inode, int errno,
53 u64 offset)
54{
55 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
56 btrfs_panic(fs_info, errno,
57 "Inconsistency in ordered tree at offset %llu", offset);
58}
59
60
61
62
63
64static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
65 struct rb_node **prev_ret)
66{
67 struct rb_node *n = root->rb_node;
68 struct rb_node *prev = NULL;
69 struct rb_node *test;
70 struct btrfs_ordered_extent *entry;
71 struct btrfs_ordered_extent *prev_entry = NULL;
72
73 while (n) {
74 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
75 prev = n;
76 prev_entry = entry;
77
78 if (file_offset < entry->file_offset)
79 n = n->rb_left;
80 else if (file_offset >= entry_end(entry))
81 n = n->rb_right;
82 else
83 return n;
84 }
85 if (!prev_ret)
86 return NULL;
87
88 while (prev && file_offset >= entry_end(prev_entry)) {
89 test = rb_next(prev);
90 if (!test)
91 break;
92 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
93 rb_node);
94 if (file_offset < entry_end(prev_entry))
95 break;
96
97 prev = test;
98 }
99 if (prev)
100 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
101 rb_node);
102 while (prev && file_offset < entry_end(prev_entry)) {
103 test = rb_prev(prev);
104 if (!test)
105 break;
106 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
107 rb_node);
108 prev = test;
109 }
110 *prev_ret = prev;
111 return NULL;
112}
113
114
115
116
117static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
118{
119 if (file_offset < entry->file_offset ||
120 entry->file_offset + entry->len <= file_offset)
121 return 0;
122 return 1;
123}
124
125static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
126 u64 len)
127{
128 if (file_offset + len <= entry->file_offset ||
129 entry->file_offset + entry->len <= file_offset)
130 return 0;
131 return 1;
132}
133
134
135
136
137
138static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
139 u64 file_offset)
140{
141 struct rb_root *root = &tree->tree;
142 struct rb_node *prev = NULL;
143 struct rb_node *ret;
144 struct btrfs_ordered_extent *entry;
145
146 if (tree->last) {
147 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
148 rb_node);
149 if (offset_in_entry(entry, file_offset))
150 return tree->last;
151 }
152 ret = __tree_search(root, file_offset, &prev);
153 if (!ret)
154 ret = prev;
155 if (ret)
156 tree->last = ret;
157 return ret;
158}
159
160
161
162
163
164
165
166
167
168
169
170
171static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
172 u64 start, u64 len, u64 disk_len,
173 int type, int dio, int compress_type)
174{
175 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
176 struct btrfs_root *root = BTRFS_I(inode)->root;
177 struct btrfs_ordered_inode_tree *tree;
178 struct rb_node *node;
179 struct btrfs_ordered_extent *entry;
180
181 tree = &BTRFS_I(inode)->ordered_tree;
182 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
183 if (!entry)
184 return -ENOMEM;
185
186 entry->file_offset = file_offset;
187 entry->start = start;
188 entry->len = len;
189 entry->disk_len = disk_len;
190 entry->bytes_left = len;
191 entry->inode = igrab(inode);
192 entry->compress_type = compress_type;
193 entry->truncated_len = (u64)-1;
194 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
195 set_bit(type, &entry->flags);
196
197 if (dio)
198 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
199
200
201 refcount_set(&entry->refs, 1);
202 init_waitqueue_head(&entry->wait);
203 INIT_LIST_HEAD(&entry->list);
204 INIT_LIST_HEAD(&entry->root_extent_list);
205 INIT_LIST_HEAD(&entry->work_list);
206 init_completion(&entry->completion);
207 INIT_LIST_HEAD(&entry->log_list);
208 INIT_LIST_HEAD(&entry->trans_list);
209
210 trace_btrfs_ordered_extent_add(inode, entry);
211
212 spin_lock_irq(&tree->lock);
213 node = tree_insert(&tree->tree, file_offset,
214 &entry->rb_node);
215 if (node)
216 ordered_data_tree_panic(inode, -EEXIST, file_offset);
217 spin_unlock_irq(&tree->lock);
218
219 spin_lock(&root->ordered_extent_lock);
220 list_add_tail(&entry->root_extent_list,
221 &root->ordered_extents);
222 root->nr_ordered_extents++;
223 if (root->nr_ordered_extents == 1) {
224 spin_lock(&fs_info->ordered_root_lock);
225 BUG_ON(!list_empty(&root->ordered_root));
226 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
227 spin_unlock(&fs_info->ordered_root_lock);
228 }
229 spin_unlock(&root->ordered_extent_lock);
230
231
232
233
234
235
236 spin_lock(&BTRFS_I(inode)->lock);
237 btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
238 spin_unlock(&BTRFS_I(inode)->lock);
239
240 return 0;
241}
242
243int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
244 u64 start, u64 len, u64 disk_len, int type)
245{
246 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
247 disk_len, type, 0,
248 BTRFS_COMPRESS_NONE);
249}
250
251int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
252 u64 start, u64 len, u64 disk_len, int type)
253{
254 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
255 disk_len, type, 1,
256 BTRFS_COMPRESS_NONE);
257}
258
259int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
260 u64 start, u64 len, u64 disk_len,
261 int type, int compress_type)
262{
263 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
264 disk_len, type, 0,
265 compress_type);
266}
267
268
269
270
271
272
273void btrfs_add_ordered_sum(struct inode *inode,
274 struct btrfs_ordered_extent *entry,
275 struct btrfs_ordered_sum *sum)
276{
277 struct btrfs_ordered_inode_tree *tree;
278
279 tree = &BTRFS_I(inode)->ordered_tree;
280 spin_lock_irq(&tree->lock);
281 list_add_tail(&sum->list, &entry->list);
282 spin_unlock_irq(&tree->lock);
283}
284
285
286
287
288
289
290
291
292
293
294
295
296
297int btrfs_dec_test_first_ordered_pending(struct inode *inode,
298 struct btrfs_ordered_extent **cached,
299 u64 *file_offset, u64 io_size, int uptodate)
300{
301 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
302 struct btrfs_ordered_inode_tree *tree;
303 struct rb_node *node;
304 struct btrfs_ordered_extent *entry = NULL;
305 int ret;
306 unsigned long flags;
307 u64 dec_end;
308 u64 dec_start;
309 u64 to_dec;
310
311 tree = &BTRFS_I(inode)->ordered_tree;
312 spin_lock_irqsave(&tree->lock, flags);
313 node = tree_search(tree, *file_offset);
314 if (!node) {
315 ret = 1;
316 goto out;
317 }
318
319 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
320 if (!offset_in_entry(entry, *file_offset)) {
321 ret = 1;
322 goto out;
323 }
324
325 dec_start = max(*file_offset, entry->file_offset);
326 dec_end = min(*file_offset + io_size, entry->file_offset +
327 entry->len);
328 *file_offset = dec_end;
329 if (dec_start > dec_end) {
330 btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
331 dec_start, dec_end);
332 }
333 to_dec = dec_end - dec_start;
334 if (to_dec > entry->bytes_left) {
335 btrfs_crit(fs_info,
336 "bad ordered accounting left %llu size %llu",
337 entry->bytes_left, to_dec);
338 }
339 entry->bytes_left -= to_dec;
340 if (!uptodate)
341 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
342
343 if (entry->bytes_left == 0) {
344 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
345
346 cond_wake_up_nomb(&entry->wait);
347 } else {
348 ret = 1;
349 }
350out:
351 if (!ret && cached && entry) {
352 *cached = entry;
353 refcount_inc(&entry->refs);
354 }
355 spin_unlock_irqrestore(&tree->lock, flags);
356 return ret == 0;
357}
358
359
360
361
362
363
364
365
366
367
368int btrfs_dec_test_ordered_pending(struct inode *inode,
369 struct btrfs_ordered_extent **cached,
370 u64 file_offset, u64 io_size, int uptodate)
371{
372 struct btrfs_ordered_inode_tree *tree;
373 struct rb_node *node;
374 struct btrfs_ordered_extent *entry = NULL;
375 unsigned long flags;
376 int ret;
377
378 tree = &BTRFS_I(inode)->ordered_tree;
379 spin_lock_irqsave(&tree->lock, flags);
380 if (cached && *cached) {
381 entry = *cached;
382 goto have_entry;
383 }
384
385 node = tree_search(tree, file_offset);
386 if (!node) {
387 ret = 1;
388 goto out;
389 }
390
391 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
392have_entry:
393 if (!offset_in_entry(entry, file_offset)) {
394 ret = 1;
395 goto out;
396 }
397
398 if (io_size > entry->bytes_left) {
399 btrfs_crit(BTRFS_I(inode)->root->fs_info,
400 "bad ordered accounting left %llu size %llu",
401 entry->bytes_left, io_size);
402 }
403 entry->bytes_left -= io_size;
404 if (!uptodate)
405 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
406
407 if (entry->bytes_left == 0) {
408 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
409
410 cond_wake_up_nomb(&entry->wait);
411 } else {
412 ret = 1;
413 }
414out:
415 if (!ret && cached && entry) {
416 *cached = entry;
417 refcount_inc(&entry->refs);
418 }
419 spin_unlock_irqrestore(&tree->lock, flags);
420 return ret == 0;
421}
422
423
424
425
426
427void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
428{
429 struct list_head *cur;
430 struct btrfs_ordered_sum *sum;
431
432 trace_btrfs_ordered_extent_put(entry->inode, entry);
433
434 if (refcount_dec_and_test(&entry->refs)) {
435 ASSERT(list_empty(&entry->log_list));
436 ASSERT(list_empty(&entry->trans_list));
437 ASSERT(list_empty(&entry->root_extent_list));
438 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
439 if (entry->inode)
440 btrfs_add_delayed_iput(entry->inode);
441 while (!list_empty(&entry->list)) {
442 cur = entry->list.next;
443 sum = list_entry(cur, struct btrfs_ordered_sum, list);
444 list_del(&sum->list);
445 kfree(sum);
446 }
447 kmem_cache_free(btrfs_ordered_extent_cache, entry);
448 }
449}
450
451
452
453
454
455void btrfs_remove_ordered_extent(struct inode *inode,
456 struct btrfs_ordered_extent *entry)
457{
458 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
459 struct btrfs_ordered_inode_tree *tree;
460 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
461 struct btrfs_root *root = btrfs_inode->root;
462 struct rb_node *node;
463 bool dec_pending_ordered = false;
464
465
466 spin_lock(&btrfs_inode->lock);
467 btrfs_mod_outstanding_extents(btrfs_inode, -1);
468 spin_unlock(&btrfs_inode->lock);
469 if (root != fs_info->tree_root)
470 btrfs_delalloc_release_metadata(btrfs_inode, entry->len, false);
471
472 tree = &btrfs_inode->ordered_tree;
473 spin_lock_irq(&tree->lock);
474 node = &entry->rb_node;
475 rb_erase(node, &tree->tree);
476 RB_CLEAR_NODE(node);
477 if (tree->last == node)
478 tree->last = NULL;
479 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
480 if (test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags))
481 dec_pending_ordered = true;
482 spin_unlock_irq(&tree->lock);
483
484
485
486
487
488 if (dec_pending_ordered) {
489 struct btrfs_transaction *trans;
490
491
492
493
494
495
496
497 spin_lock(&fs_info->trans_lock);
498 trans = fs_info->running_transaction;
499 if (trans)
500 refcount_inc(&trans->use_count);
501 spin_unlock(&fs_info->trans_lock);
502
503 ASSERT(trans);
504 if (trans) {
505 if (atomic_dec_and_test(&trans->pending_ordered))
506 wake_up(&trans->pending_wait);
507 btrfs_put_transaction(trans);
508 }
509 }
510
511 spin_lock(&root->ordered_extent_lock);
512 list_del_init(&entry->root_extent_list);
513 root->nr_ordered_extents--;
514
515 trace_btrfs_ordered_extent_remove(inode, entry);
516
517 if (!root->nr_ordered_extents) {
518 spin_lock(&fs_info->ordered_root_lock);
519 BUG_ON(list_empty(&root->ordered_root));
520 list_del_init(&root->ordered_root);
521 spin_unlock(&fs_info->ordered_root_lock);
522 }
523 spin_unlock(&root->ordered_extent_lock);
524 wake_up(&entry->wait);
525}
526
527static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
528{
529 struct btrfs_ordered_extent *ordered;
530
531 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
532 btrfs_start_ordered_extent(ordered->inode, ordered, 1);
533 complete(&ordered->completion);
534}
535
536
537
538
539
540u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
541 const u64 range_start, const u64 range_len)
542{
543 struct btrfs_fs_info *fs_info = root->fs_info;
544 LIST_HEAD(splice);
545 LIST_HEAD(skipped);
546 LIST_HEAD(works);
547 struct btrfs_ordered_extent *ordered, *next;
548 u64 count = 0;
549 const u64 range_end = range_start + range_len;
550
551 mutex_lock(&root->ordered_extent_mutex);
552 spin_lock(&root->ordered_extent_lock);
553 list_splice_init(&root->ordered_extents, &splice);
554 while (!list_empty(&splice) && nr) {
555 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
556 root_extent_list);
557
558 if (range_end <= ordered->start ||
559 ordered->start + ordered->disk_len <= range_start) {
560 list_move_tail(&ordered->root_extent_list, &skipped);
561 cond_resched_lock(&root->ordered_extent_lock);
562 continue;
563 }
564
565 list_move_tail(&ordered->root_extent_list,
566 &root->ordered_extents);
567 refcount_inc(&ordered->refs);
568 spin_unlock(&root->ordered_extent_lock);
569
570 btrfs_init_work(&ordered->flush_work,
571 btrfs_flush_delalloc_helper,
572 btrfs_run_ordered_extent_work, NULL, NULL);
573 list_add_tail(&ordered->work_list, &works);
574 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
575
576 cond_resched();
577 spin_lock(&root->ordered_extent_lock);
578 if (nr != U64_MAX)
579 nr--;
580 count++;
581 }
582 list_splice_tail(&skipped, &root->ordered_extents);
583 list_splice_tail(&splice, &root->ordered_extents);
584 spin_unlock(&root->ordered_extent_lock);
585
586 list_for_each_entry_safe(ordered, next, &works, work_list) {
587 list_del_init(&ordered->work_list);
588 wait_for_completion(&ordered->completion);
589 btrfs_put_ordered_extent(ordered);
590 cond_resched();
591 }
592 mutex_unlock(&root->ordered_extent_mutex);
593
594 return count;
595}
596
597u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
598 const u64 range_start, const u64 range_len)
599{
600 struct btrfs_root *root;
601 struct list_head splice;
602 u64 total_done = 0;
603 u64 done;
604
605 INIT_LIST_HEAD(&splice);
606
607 mutex_lock(&fs_info->ordered_operations_mutex);
608 spin_lock(&fs_info->ordered_root_lock);
609 list_splice_init(&fs_info->ordered_roots, &splice);
610 while (!list_empty(&splice) && nr) {
611 root = list_first_entry(&splice, struct btrfs_root,
612 ordered_root);
613 root = btrfs_grab_fs_root(root);
614 BUG_ON(!root);
615 list_move_tail(&root->ordered_root,
616 &fs_info->ordered_roots);
617 spin_unlock(&fs_info->ordered_root_lock);
618
619 done = btrfs_wait_ordered_extents(root, nr,
620 range_start, range_len);
621 btrfs_put_fs_root(root);
622 total_done += done;
623
624 spin_lock(&fs_info->ordered_root_lock);
625 if (nr != U64_MAX) {
626 nr -= done;
627 }
628 }
629 list_splice_tail(&splice, &fs_info->ordered_roots);
630 spin_unlock(&fs_info->ordered_root_lock);
631 mutex_unlock(&fs_info->ordered_operations_mutex);
632
633 return total_done;
634}
635
636
637
638
639
640
641
642
643void btrfs_start_ordered_extent(struct inode *inode,
644 struct btrfs_ordered_extent *entry,
645 int wait)
646{
647 u64 start = entry->file_offset;
648 u64 end = start + entry->len - 1;
649
650 trace_btrfs_ordered_extent_start(inode, entry);
651
652
653
654
655
656
657 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
658 filemap_fdatawrite_range(inode->i_mapping, start, end);
659 if (wait) {
660 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
661 &entry->flags));
662 }
663}
664
665
666
667
668int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
669{
670 int ret = 0;
671 int ret_wb = 0;
672 u64 end;
673 u64 orig_end;
674 struct btrfs_ordered_extent *ordered;
675
676 if (start + len < start) {
677 orig_end = INT_LIMIT(loff_t);
678 } else {
679 orig_end = start + len - 1;
680 if (orig_end > INT_LIMIT(loff_t))
681 orig_end = INT_LIMIT(loff_t);
682 }
683
684
685
686
687 ret = btrfs_fdatawrite_range(inode, start, orig_end);
688 if (ret)
689 return ret;
690
691
692
693
694
695
696
697
698 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
699
700 end = orig_end;
701 while (1) {
702 ordered = btrfs_lookup_first_ordered_extent(inode, end);
703 if (!ordered)
704 break;
705 if (ordered->file_offset > orig_end) {
706 btrfs_put_ordered_extent(ordered);
707 break;
708 }
709 if (ordered->file_offset + ordered->len <= start) {
710 btrfs_put_ordered_extent(ordered);
711 break;
712 }
713 btrfs_start_ordered_extent(inode, ordered, 1);
714 end = ordered->file_offset;
715 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
716 ret = -EIO;
717 btrfs_put_ordered_extent(ordered);
718 if (ret || end == 0 || end == start)
719 break;
720 end--;
721 }
722 return ret_wb ? ret_wb : ret;
723}
724
725
726
727
728
729struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
730 u64 file_offset)
731{
732 struct btrfs_ordered_inode_tree *tree;
733 struct rb_node *node;
734 struct btrfs_ordered_extent *entry = NULL;
735
736 tree = &BTRFS_I(inode)->ordered_tree;
737 spin_lock_irq(&tree->lock);
738 node = tree_search(tree, file_offset);
739 if (!node)
740 goto out;
741
742 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
743 if (!offset_in_entry(entry, file_offset))
744 entry = NULL;
745 if (entry)
746 refcount_inc(&entry->refs);
747out:
748 spin_unlock_irq(&tree->lock);
749 return entry;
750}
751
752
753
754
755struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
756 struct btrfs_inode *inode, u64 file_offset, u64 len)
757{
758 struct btrfs_ordered_inode_tree *tree;
759 struct rb_node *node;
760 struct btrfs_ordered_extent *entry = NULL;
761
762 tree = &inode->ordered_tree;
763 spin_lock_irq(&tree->lock);
764 node = tree_search(tree, file_offset);
765 if (!node) {
766 node = tree_search(tree, file_offset + len);
767 if (!node)
768 goto out;
769 }
770
771 while (1) {
772 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
773 if (range_overlaps(entry, file_offset, len))
774 break;
775
776 if (entry->file_offset >= file_offset + len) {
777 entry = NULL;
778 break;
779 }
780 entry = NULL;
781 node = rb_next(node);
782 if (!node)
783 break;
784 }
785out:
786 if (entry)
787 refcount_inc(&entry->refs);
788 spin_unlock_irq(&tree->lock);
789 return entry;
790}
791
792
793
794
795
796struct btrfs_ordered_extent *
797btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
798{
799 struct btrfs_ordered_inode_tree *tree;
800 struct rb_node *node;
801 struct btrfs_ordered_extent *entry = NULL;
802
803 tree = &BTRFS_I(inode)->ordered_tree;
804 spin_lock_irq(&tree->lock);
805 node = tree_search(tree, file_offset);
806 if (!node)
807 goto out;
808
809 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
810 refcount_inc(&entry->refs);
811out:
812 spin_unlock_irq(&tree->lock);
813 return entry;
814}
815
816
817
818
819
820int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
821 struct btrfs_ordered_extent *ordered)
822{
823 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
824 u64 disk_i_size;
825 u64 new_i_size;
826 u64 i_size = i_size_read(inode);
827 struct rb_node *node;
828 struct rb_node *prev = NULL;
829 struct btrfs_ordered_extent *test;
830 int ret = 1;
831 u64 orig_offset = offset;
832
833 spin_lock_irq(&tree->lock);
834 if (ordered) {
835 offset = entry_end(ordered);
836 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags))
837 offset = min(offset,
838 ordered->file_offset +
839 ordered->truncated_len);
840 } else {
841 offset = ALIGN(offset, btrfs_inode_sectorsize(inode));
842 }
843 disk_i_size = BTRFS_I(inode)->disk_i_size;
844
845
846
847
848
849
850
851
852
853
854
855
856 if (!ordered && disk_i_size > i_size) {
857 BTRFS_I(inode)->disk_i_size = orig_offset;
858 ret = 0;
859 goto out;
860 }
861
862
863
864
865
866 if (disk_i_size == i_size)
867 goto out;
868
869
870
871
872
873 if (offset <= disk_i_size &&
874 (!ordered || ordered->outstanding_isize <= disk_i_size))
875 goto out;
876
877
878
879
880
881
882 if (ordered) {
883 node = rb_prev(&ordered->rb_node);
884 } else {
885 prev = tree_search(tree, offset);
886
887
888
889
890 if (prev) {
891 test = rb_entry(prev, struct btrfs_ordered_extent,
892 rb_node);
893 BUG_ON(offset_in_entry(test, offset));
894 }
895 node = prev;
896 }
897 for (; node; node = rb_prev(node)) {
898 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
899
900
901 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
902 continue;
903
904 if (entry_end(test) <= disk_i_size)
905 break;
906 if (test->file_offset >= i_size)
907 break;
908
909
910
911
912
913 if (test->outstanding_isize < offset)
914 test->outstanding_isize = offset;
915 if (ordered &&
916 ordered->outstanding_isize > test->outstanding_isize)
917 test->outstanding_isize = ordered->outstanding_isize;
918 goto out;
919 }
920 new_i_size = min_t(u64, offset, i_size);
921
922
923
924
925
926 if (ordered && ordered->outstanding_isize > new_i_size)
927 new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
928 BTRFS_I(inode)->disk_i_size = new_i_size;
929 ret = 0;
930out:
931
932
933
934
935
936
937
938 if (ordered)
939 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
940 spin_unlock_irq(&tree->lock);
941 return ret;
942}
943
944
945
946
947
948
949int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
950 u32 *sum, int len)
951{
952 struct btrfs_ordered_sum *ordered_sum;
953 struct btrfs_ordered_extent *ordered;
954 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
955 unsigned long num_sectors;
956 unsigned long i;
957 u32 sectorsize = btrfs_inode_sectorsize(inode);
958 int index = 0;
959
960 ordered = btrfs_lookup_ordered_extent(inode, offset);
961 if (!ordered)
962 return 0;
963
964 spin_lock_irq(&tree->lock);
965 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
966 if (disk_bytenr >= ordered_sum->bytenr &&
967 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
968 i = (disk_bytenr - ordered_sum->bytenr) >>
969 inode->i_sb->s_blocksize_bits;
970 num_sectors = ordered_sum->len >>
971 inode->i_sb->s_blocksize_bits;
972 num_sectors = min_t(int, len - index, num_sectors - i);
973 memcpy(sum + index, ordered_sum->sums + i,
974 num_sectors);
975
976 index += (int)num_sectors;
977 if (index == len)
978 goto out;
979 disk_bytenr += num_sectors * sectorsize;
980 }
981 }
982out:
983 spin_unlock_irq(&tree->lock);
984 btrfs_put_ordered_extent(ordered);
985 return index;
986}
987
988int __init ordered_data_init(void)
989{
990 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
991 sizeof(struct btrfs_ordered_extent), 0,
992 SLAB_MEM_SPREAD,
993 NULL);
994 if (!btrfs_ordered_extent_cache)
995 return -ENOMEM;
996
997 return 0;
998}
999
1000void __cold ordered_data_exit(void)
1001{
1002 kmem_cache_destroy(btrfs_ordered_extent_cache);
1003}
1004