1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/slab.h>
20#include <linux/blkdev.h>
21#include <linux/writeback.h>
22#include <linux/pagevec.h>
23#include "ctree.h"
24#include "transaction.h"
25#include "btrfs_inode.h"
26#include "extent_io.h"
27#include "disk-io.h"
28
29static struct kmem_cache *btrfs_ordered_extent_cache;
30
31static u64 entry_end(struct btrfs_ordered_extent *entry)
32{
33 if (entry->file_offset + entry->len < entry->file_offset)
34 return (u64)-1;
35 return entry->file_offset + entry->len;
36}
37
38
39
40
41static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
42 struct rb_node *node)
43{
44 struct rb_node **p = &root->rb_node;
45 struct rb_node *parent = NULL;
46 struct btrfs_ordered_extent *entry;
47
48 while (*p) {
49 parent = *p;
50 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
51
52 if (file_offset < entry->file_offset)
53 p = &(*p)->rb_left;
54 else if (file_offset >= entry_end(entry))
55 p = &(*p)->rb_right;
56 else
57 return parent;
58 }
59
60 rb_link_node(node, parent, p);
61 rb_insert_color(node, root);
62 return NULL;
63}
64
65static void ordered_data_tree_panic(struct inode *inode, int errno,
66 u64 offset)
67{
68 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
69 btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
70 "%llu", offset);
71}
72
73
74
75
76
77static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
78 struct rb_node **prev_ret)
79{
80 struct rb_node *n = root->rb_node;
81 struct rb_node *prev = NULL;
82 struct rb_node *test;
83 struct btrfs_ordered_extent *entry;
84 struct btrfs_ordered_extent *prev_entry = NULL;
85
86 while (n) {
87 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
88 prev = n;
89 prev_entry = entry;
90
91 if (file_offset < entry->file_offset)
92 n = n->rb_left;
93 else if (file_offset >= entry_end(entry))
94 n = n->rb_right;
95 else
96 return n;
97 }
98 if (!prev_ret)
99 return NULL;
100
101 while (prev && file_offset >= entry_end(prev_entry)) {
102 test = rb_next(prev);
103 if (!test)
104 break;
105 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
106 rb_node);
107 if (file_offset < entry_end(prev_entry))
108 break;
109
110 prev = test;
111 }
112 if (prev)
113 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
114 rb_node);
115 while (prev && file_offset < entry_end(prev_entry)) {
116 test = rb_prev(prev);
117 if (!test)
118 break;
119 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
120 rb_node);
121 prev = test;
122 }
123 *prev_ret = prev;
124 return NULL;
125}
126
127
128
129
130static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
131{
132 if (file_offset < entry->file_offset ||
133 entry->file_offset + entry->len <= file_offset)
134 return 0;
135 return 1;
136}
137
138static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
139 u64 len)
140{
141 if (file_offset + len <= entry->file_offset ||
142 entry->file_offset + entry->len <= file_offset)
143 return 0;
144 return 1;
145}
146
147
148
149
150
151static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
152 u64 file_offset)
153{
154 struct rb_root *root = &tree->tree;
155 struct rb_node *prev = NULL;
156 struct rb_node *ret;
157 struct btrfs_ordered_extent *entry;
158
159 if (tree->last) {
160 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
161 rb_node);
162 if (offset_in_entry(entry, file_offset))
163 return tree->last;
164 }
165 ret = __tree_search(root, file_offset, &prev);
166 if (!ret)
167 ret = prev;
168 if (ret)
169 tree->last = ret;
170 return ret;
171}
172
173
174
175
176
177
178
179
180
181
182
183
184static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
185 u64 start, u64 len, u64 disk_len,
186 int type, int dio, int compress_type)
187{
188 struct btrfs_root *root = BTRFS_I(inode)->root;
189 struct btrfs_ordered_inode_tree *tree;
190 struct rb_node *node;
191 struct btrfs_ordered_extent *entry;
192
193 tree = &BTRFS_I(inode)->ordered_tree;
194 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
195 if (!entry)
196 return -ENOMEM;
197
198 entry->file_offset = file_offset;
199 entry->start = start;
200 entry->len = len;
201 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) &&
202 !(type == BTRFS_ORDERED_NOCOW))
203 entry->csum_bytes_left = disk_len;
204 entry->disk_len = disk_len;
205 entry->bytes_left = len;
206 entry->inode = igrab(inode);
207 entry->compress_type = compress_type;
208 entry->truncated_len = (u64)-1;
209 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
210 set_bit(type, &entry->flags);
211
212 if (dio)
213 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
214
215
216 atomic_set(&entry->refs, 1);
217 init_waitqueue_head(&entry->wait);
218 INIT_LIST_HEAD(&entry->list);
219 INIT_LIST_HEAD(&entry->root_extent_list);
220 INIT_LIST_HEAD(&entry->work_list);
221 init_completion(&entry->completion);
222 INIT_LIST_HEAD(&entry->log_list);
223
224 trace_btrfs_ordered_extent_add(inode, entry);
225
226 spin_lock_irq(&tree->lock);
227 node = tree_insert(&tree->tree, file_offset,
228 &entry->rb_node);
229 if (node)
230 ordered_data_tree_panic(inode, -EEXIST, file_offset);
231 spin_unlock_irq(&tree->lock);
232
233 spin_lock(&root->ordered_extent_lock);
234 list_add_tail(&entry->root_extent_list,
235 &root->ordered_extents);
236 root->nr_ordered_extents++;
237 if (root->nr_ordered_extents == 1) {
238 spin_lock(&root->fs_info->ordered_root_lock);
239 BUG_ON(!list_empty(&root->ordered_root));
240 list_add_tail(&root->ordered_root,
241 &root->fs_info->ordered_roots);
242 spin_unlock(&root->fs_info->ordered_root_lock);
243 }
244 spin_unlock(&root->ordered_extent_lock);
245
246 return 0;
247}
248
249int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
250 u64 start, u64 len, u64 disk_len, int type)
251{
252 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
253 disk_len, type, 0,
254 BTRFS_COMPRESS_NONE);
255}
256
257int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
258 u64 start, u64 len, u64 disk_len, int type)
259{
260 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
261 disk_len, type, 1,
262 BTRFS_COMPRESS_NONE);
263}
264
265int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
266 u64 start, u64 len, u64 disk_len,
267 int type, int compress_type)
268{
269 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
270 disk_len, type, 0,
271 compress_type);
272}
273
274
275
276
277
278
279void btrfs_add_ordered_sum(struct inode *inode,
280 struct btrfs_ordered_extent *entry,
281 struct btrfs_ordered_sum *sum)
282{
283 struct btrfs_ordered_inode_tree *tree;
284
285 tree = &BTRFS_I(inode)->ordered_tree;
286 spin_lock_irq(&tree->lock);
287 list_add_tail(&sum->list, &entry->list);
288 WARN_ON(entry->csum_bytes_left < sum->len);
289 entry->csum_bytes_left -= sum->len;
290 if (entry->csum_bytes_left == 0)
291 wake_up(&entry->wait);
292 spin_unlock_irq(&tree->lock);
293}
294
295
296
297
298
299
300
301
302
303
304
305
306
307int btrfs_dec_test_first_ordered_pending(struct inode *inode,
308 struct btrfs_ordered_extent **cached,
309 u64 *file_offset, u64 io_size, int uptodate)
310{
311 struct btrfs_ordered_inode_tree *tree;
312 struct rb_node *node;
313 struct btrfs_ordered_extent *entry = NULL;
314 int ret;
315 unsigned long flags;
316 u64 dec_end;
317 u64 dec_start;
318 u64 to_dec;
319
320 tree = &BTRFS_I(inode)->ordered_tree;
321 spin_lock_irqsave(&tree->lock, flags);
322 node = tree_search(tree, *file_offset);
323 if (!node) {
324 ret = 1;
325 goto out;
326 }
327
328 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
329 if (!offset_in_entry(entry, *file_offset)) {
330 ret = 1;
331 goto out;
332 }
333
334 dec_start = max(*file_offset, entry->file_offset);
335 dec_end = min(*file_offset + io_size, entry->file_offset +
336 entry->len);
337 *file_offset = dec_end;
338 if (dec_start > dec_end) {
339 btrfs_crit(BTRFS_I(inode)->root->fs_info,
340 "bad ordering dec_start %llu end %llu", dec_start, dec_end);
341 }
342 to_dec = dec_end - dec_start;
343 if (to_dec > entry->bytes_left) {
344 btrfs_crit(BTRFS_I(inode)->root->fs_info,
345 "bad ordered accounting left %llu size %llu",
346 entry->bytes_left, to_dec);
347 }
348 entry->bytes_left -= to_dec;
349 if (!uptodate)
350 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
351
352 if (entry->bytes_left == 0) {
353 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
354 if (waitqueue_active(&entry->wait))
355 wake_up(&entry->wait);
356 } else {
357 ret = 1;
358 }
359out:
360 if (!ret && cached && entry) {
361 *cached = entry;
362 atomic_inc(&entry->refs);
363 }
364 spin_unlock_irqrestore(&tree->lock, flags);
365 return ret == 0;
366}
367
368
369
370
371
372
373
374
375
376
377int btrfs_dec_test_ordered_pending(struct inode *inode,
378 struct btrfs_ordered_extent **cached,
379 u64 file_offset, u64 io_size, int uptodate)
380{
381 struct btrfs_ordered_inode_tree *tree;
382 struct rb_node *node;
383 struct btrfs_ordered_extent *entry = NULL;
384 unsigned long flags;
385 int ret;
386
387 tree = &BTRFS_I(inode)->ordered_tree;
388 spin_lock_irqsave(&tree->lock, flags);
389 if (cached && *cached) {
390 entry = *cached;
391 goto have_entry;
392 }
393
394 node = tree_search(tree, file_offset);
395 if (!node) {
396 ret = 1;
397 goto out;
398 }
399
400 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
401have_entry:
402 if (!offset_in_entry(entry, file_offset)) {
403 ret = 1;
404 goto out;
405 }
406
407 if (io_size > entry->bytes_left) {
408 btrfs_crit(BTRFS_I(inode)->root->fs_info,
409 "bad ordered accounting left %llu size %llu",
410 entry->bytes_left, io_size);
411 }
412 entry->bytes_left -= io_size;
413 if (!uptodate)
414 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
415
416 if (entry->bytes_left == 0) {
417 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
418 if (waitqueue_active(&entry->wait))
419 wake_up(&entry->wait);
420 } else {
421 ret = 1;
422 }
423out:
424 if (!ret && cached && entry) {
425 *cached = entry;
426 atomic_inc(&entry->refs);
427 }
428 spin_unlock_irqrestore(&tree->lock, flags);
429 return ret == 0;
430}
431
432
433void btrfs_get_logged_extents(struct inode *inode,
434 struct list_head *logged_list)
435{
436 struct btrfs_ordered_inode_tree *tree;
437 struct btrfs_ordered_extent *ordered;
438 struct rb_node *n;
439
440 tree = &BTRFS_I(inode)->ordered_tree;
441 spin_lock_irq(&tree->lock);
442 for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
443 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
444 if (!list_empty(&ordered->log_list))
445 continue;
446 list_add_tail(&ordered->log_list, logged_list);
447 atomic_inc(&ordered->refs);
448 }
449 spin_unlock_irq(&tree->lock);
450}
451
452void btrfs_put_logged_extents(struct list_head *logged_list)
453{
454 struct btrfs_ordered_extent *ordered;
455
456 while (!list_empty(logged_list)) {
457 ordered = list_first_entry(logged_list,
458 struct btrfs_ordered_extent,
459 log_list);
460 list_del_init(&ordered->log_list);
461 btrfs_put_ordered_extent(ordered);
462 }
463}
464
465void btrfs_submit_logged_extents(struct list_head *logged_list,
466 struct btrfs_root *log)
467{
468 int index = log->log_transid % 2;
469
470 spin_lock_irq(&log->log_extents_lock[index]);
471 list_splice_tail(logged_list, &log->logged_list[index]);
472 spin_unlock_irq(&log->log_extents_lock[index]);
473}
474
475void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
476{
477 struct btrfs_ordered_extent *ordered;
478 int index = transid % 2;
479
480 spin_lock_irq(&log->log_extents_lock[index]);
481 while (!list_empty(&log->logged_list[index])) {
482 ordered = list_first_entry(&log->logged_list[index],
483 struct btrfs_ordered_extent,
484 log_list);
485 list_del_init(&ordered->log_list);
486 spin_unlock_irq(&log->log_extents_lock[index]);
487
488 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
489 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
490 struct inode *inode = ordered->inode;
491 u64 start = ordered->file_offset;
492 u64 end = ordered->file_offset + ordered->len - 1;
493
494 WARN_ON(!inode);
495 filemap_fdatawrite_range(inode->i_mapping, start, end);
496 }
497 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
498 &ordered->flags));
499
500 btrfs_put_ordered_extent(ordered);
501 spin_lock_irq(&log->log_extents_lock[index]);
502 }
503 spin_unlock_irq(&log->log_extents_lock[index]);
504}
505
506void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
507{
508 struct btrfs_ordered_extent *ordered;
509 int index = transid % 2;
510
511 spin_lock_irq(&log->log_extents_lock[index]);
512 while (!list_empty(&log->logged_list[index])) {
513 ordered = list_first_entry(&log->logged_list[index],
514 struct btrfs_ordered_extent,
515 log_list);
516 list_del_init(&ordered->log_list);
517 spin_unlock_irq(&log->log_extents_lock[index]);
518 btrfs_put_ordered_extent(ordered);
519 spin_lock_irq(&log->log_extents_lock[index]);
520 }
521 spin_unlock_irq(&log->log_extents_lock[index]);
522}
523
524
525
526
527
528void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
529{
530 struct list_head *cur;
531 struct btrfs_ordered_sum *sum;
532
533 trace_btrfs_ordered_extent_put(entry->inode, entry);
534
535 if (atomic_dec_and_test(&entry->refs)) {
536 if (entry->inode)
537 btrfs_add_delayed_iput(entry->inode);
538 while (!list_empty(&entry->list)) {
539 cur = entry->list.next;
540 sum = list_entry(cur, struct btrfs_ordered_sum, list);
541 list_del(&sum->list);
542 kfree(sum);
543 }
544 kmem_cache_free(btrfs_ordered_extent_cache, entry);
545 }
546}
547
548
549
550
551
552void btrfs_remove_ordered_extent(struct inode *inode,
553 struct btrfs_ordered_extent *entry)
554{
555 struct btrfs_ordered_inode_tree *tree;
556 struct btrfs_root *root = BTRFS_I(inode)->root;
557 struct rb_node *node;
558
559 tree = &BTRFS_I(inode)->ordered_tree;
560 spin_lock_irq(&tree->lock);
561 node = &entry->rb_node;
562 rb_erase(node, &tree->tree);
563 if (tree->last == node)
564 tree->last = NULL;
565 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
566 spin_unlock_irq(&tree->lock);
567
568 spin_lock(&root->ordered_extent_lock);
569 list_del_init(&entry->root_extent_list);
570 root->nr_ordered_extents--;
571
572 trace_btrfs_ordered_extent_remove(inode, entry);
573
574 if (!root->nr_ordered_extents) {
575 spin_lock(&root->fs_info->ordered_root_lock);
576 BUG_ON(list_empty(&root->ordered_root));
577 list_del_init(&root->ordered_root);
578 spin_unlock(&root->fs_info->ordered_root_lock);
579 }
580 spin_unlock(&root->ordered_extent_lock);
581 wake_up(&entry->wait);
582}
583
584static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
585{
586 struct btrfs_ordered_extent *ordered;
587
588 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
589 btrfs_start_ordered_extent(ordered->inode, ordered, 1);
590 complete(&ordered->completion);
591}
592
593
594
595
596
597int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr)
598{
599 struct list_head splice, works;
600 struct btrfs_ordered_extent *ordered, *next;
601 int count = 0;
602
603 INIT_LIST_HEAD(&splice);
604 INIT_LIST_HEAD(&works);
605
606 mutex_lock(&root->ordered_extent_mutex);
607 spin_lock(&root->ordered_extent_lock);
608 list_splice_init(&root->ordered_extents, &splice);
609 while (!list_empty(&splice) && nr) {
610 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
611 root_extent_list);
612 list_move_tail(&ordered->root_extent_list,
613 &root->ordered_extents);
614 atomic_inc(&ordered->refs);
615 spin_unlock(&root->ordered_extent_lock);
616
617 btrfs_init_work(&ordered->flush_work,
618 btrfs_flush_delalloc_helper,
619 btrfs_run_ordered_extent_work, NULL, NULL);
620 list_add_tail(&ordered->work_list, &works);
621 btrfs_queue_work(root->fs_info->flush_workers,
622 &ordered->flush_work);
623
624 cond_resched();
625 spin_lock(&root->ordered_extent_lock);
626 if (nr != -1)
627 nr--;
628 count++;
629 }
630 list_splice_tail(&splice, &root->ordered_extents);
631 spin_unlock(&root->ordered_extent_lock);
632
633 list_for_each_entry_safe(ordered, next, &works, work_list) {
634 list_del_init(&ordered->work_list);
635 wait_for_completion(&ordered->completion);
636 btrfs_put_ordered_extent(ordered);
637 cond_resched();
638 }
639 mutex_unlock(&root->ordered_extent_mutex);
640
641 return count;
642}
643
644void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr)
645{
646 struct btrfs_root *root;
647 struct list_head splice;
648 int done;
649
650 INIT_LIST_HEAD(&splice);
651
652 mutex_lock(&fs_info->ordered_operations_mutex);
653 spin_lock(&fs_info->ordered_root_lock);
654 list_splice_init(&fs_info->ordered_roots, &splice);
655 while (!list_empty(&splice) && nr) {
656 root = list_first_entry(&splice, struct btrfs_root,
657 ordered_root);
658 root = btrfs_grab_fs_root(root);
659 BUG_ON(!root);
660 list_move_tail(&root->ordered_root,
661 &fs_info->ordered_roots);
662 spin_unlock(&fs_info->ordered_root_lock);
663
664 done = btrfs_wait_ordered_extents(root, nr);
665 btrfs_put_fs_root(root);
666
667 spin_lock(&fs_info->ordered_root_lock);
668 if (nr != -1) {
669 nr -= done;
670 WARN_ON(nr < 0);
671 }
672 }
673 list_splice_tail(&splice, &fs_info->ordered_roots);
674 spin_unlock(&fs_info->ordered_root_lock);
675 mutex_unlock(&fs_info->ordered_operations_mutex);
676}
677
678
679
680
681
682
683
684
685void btrfs_start_ordered_extent(struct inode *inode,
686 struct btrfs_ordered_extent *entry,
687 int wait)
688{
689 u64 start = entry->file_offset;
690 u64 end = start + entry->len - 1;
691
692 trace_btrfs_ordered_extent_start(inode, entry);
693
694
695
696
697
698
699 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
700 filemap_fdatawrite_range(inode->i_mapping, start, end);
701 if (wait) {
702 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
703 &entry->flags));
704 }
705}
706
707
708
709
710int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
711{
712 int ret = 0;
713 u64 end;
714 u64 orig_end;
715 struct btrfs_ordered_extent *ordered;
716
717 if (start + len < start) {
718 orig_end = INT_LIMIT(loff_t);
719 } else {
720 orig_end = start + len - 1;
721 if (orig_end > INT_LIMIT(loff_t))
722 orig_end = INT_LIMIT(loff_t);
723 }
724
725
726
727
728 ret = filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
729 if (ret)
730 return ret;
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
746 &BTRFS_I(inode)->runtime_flags)) {
747 ret = filemap_fdatawrite_range(inode->i_mapping, start,
748 orig_end);
749 if (ret)
750 return ret;
751 }
752 ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
753 if (ret)
754 return ret;
755
756 end = orig_end;
757 while (1) {
758 ordered = btrfs_lookup_first_ordered_extent(inode, end);
759 if (!ordered)
760 break;
761 if (ordered->file_offset > orig_end) {
762 btrfs_put_ordered_extent(ordered);
763 break;
764 }
765 if (ordered->file_offset + ordered->len <= start) {
766 btrfs_put_ordered_extent(ordered);
767 break;
768 }
769 btrfs_start_ordered_extent(inode, ordered, 1);
770 end = ordered->file_offset;
771 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
772 ret = -EIO;
773 btrfs_put_ordered_extent(ordered);
774 if (ret || end == 0 || end == start)
775 break;
776 end--;
777 }
778 return ret;
779}
780
781
782
783
784
785struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
786 u64 file_offset)
787{
788 struct btrfs_ordered_inode_tree *tree;
789 struct rb_node *node;
790 struct btrfs_ordered_extent *entry = NULL;
791
792 tree = &BTRFS_I(inode)->ordered_tree;
793 spin_lock_irq(&tree->lock);
794 node = tree_search(tree, file_offset);
795 if (!node)
796 goto out;
797
798 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
799 if (!offset_in_entry(entry, file_offset))
800 entry = NULL;
801 if (entry)
802 atomic_inc(&entry->refs);
803out:
804 spin_unlock_irq(&tree->lock);
805 return entry;
806}
807
808
809
810
811struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
812 u64 file_offset,
813 u64 len)
814{
815 struct btrfs_ordered_inode_tree *tree;
816 struct rb_node *node;
817 struct btrfs_ordered_extent *entry = NULL;
818
819 tree = &BTRFS_I(inode)->ordered_tree;
820 spin_lock_irq(&tree->lock);
821 node = tree_search(tree, file_offset);
822 if (!node) {
823 node = tree_search(tree, file_offset + len);
824 if (!node)
825 goto out;
826 }
827
828 while (1) {
829 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
830 if (range_overlaps(entry, file_offset, len))
831 break;
832
833 if (entry->file_offset >= file_offset + len) {
834 entry = NULL;
835 break;
836 }
837 entry = NULL;
838 node = rb_next(node);
839 if (!node)
840 break;
841 }
842out:
843 if (entry)
844 atomic_inc(&entry->refs);
845 spin_unlock_irq(&tree->lock);
846 return entry;
847}
848
849
850
851
852
853struct btrfs_ordered_extent *
854btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
855{
856 struct btrfs_ordered_inode_tree *tree;
857 struct rb_node *node;
858 struct btrfs_ordered_extent *entry = NULL;
859
860 tree = &BTRFS_I(inode)->ordered_tree;
861 spin_lock_irq(&tree->lock);
862 node = tree_search(tree, file_offset);
863 if (!node)
864 goto out;
865
866 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
867 atomic_inc(&entry->refs);
868out:
869 spin_unlock_irq(&tree->lock);
870 return entry;
871}
872
873
874
875
876
877int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
878 struct btrfs_ordered_extent *ordered)
879{
880 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
881 u64 disk_i_size;
882 u64 new_i_size;
883 u64 i_size = i_size_read(inode);
884 struct rb_node *node;
885 struct rb_node *prev = NULL;
886 struct btrfs_ordered_extent *test;
887 int ret = 1;
888
889 spin_lock_irq(&tree->lock);
890 if (ordered) {
891 offset = entry_end(ordered);
892 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags))
893 offset = min(offset,
894 ordered->file_offset +
895 ordered->truncated_len);
896 } else {
897 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
898 }
899 disk_i_size = BTRFS_I(inode)->disk_i_size;
900
901
902 if (disk_i_size > i_size) {
903 BTRFS_I(inode)->disk_i_size = i_size;
904 ret = 0;
905 goto out;
906 }
907
908
909
910
911
912 if (disk_i_size == i_size)
913 goto out;
914
915
916
917
918
919 if (offset <= disk_i_size &&
920 (!ordered || ordered->outstanding_isize <= disk_i_size))
921 goto out;
922
923
924
925
926
927
928 if (ordered) {
929 node = rb_prev(&ordered->rb_node);
930 } else {
931 prev = tree_search(tree, offset);
932
933
934
935
936 if (prev) {
937 test = rb_entry(prev, struct btrfs_ordered_extent,
938 rb_node);
939 BUG_ON(offset_in_entry(test, offset));
940 }
941 node = prev;
942 }
943 for (; node; node = rb_prev(node)) {
944 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
945
946
947 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
948 continue;
949 if (test->file_offset + test->len <= disk_i_size)
950 break;
951 if (test->file_offset >= i_size)
952 break;
953 if (entry_end(test) > disk_i_size) {
954
955
956
957
958
959 if (test->outstanding_isize < offset)
960 test->outstanding_isize = offset;
961 if (ordered &&
962 ordered->outstanding_isize >
963 test->outstanding_isize)
964 test->outstanding_isize =
965 ordered->outstanding_isize;
966 goto out;
967 }
968 }
969 new_i_size = min_t(u64, offset, i_size);
970
971
972
973
974
975 if (ordered && ordered->outstanding_isize > new_i_size)
976 new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
977 BTRFS_I(inode)->disk_i_size = new_i_size;
978 ret = 0;
979out:
980
981
982
983
984
985
986
987 if (ordered)
988 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
989 spin_unlock_irq(&tree->lock);
990 return ret;
991}
992
993
994
995
996
997
998int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
999 u32 *sum, int len)
1000{
1001 struct btrfs_ordered_sum *ordered_sum;
1002 struct btrfs_ordered_extent *ordered;
1003 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1004 unsigned long num_sectors;
1005 unsigned long i;
1006 u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
1007 int index = 0;
1008
1009 ordered = btrfs_lookup_ordered_extent(inode, offset);
1010 if (!ordered)
1011 return 0;
1012
1013 spin_lock_irq(&tree->lock);
1014 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
1015 if (disk_bytenr >= ordered_sum->bytenr &&
1016 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
1017 i = (disk_bytenr - ordered_sum->bytenr) >>
1018 inode->i_sb->s_blocksize_bits;
1019 num_sectors = ordered_sum->len >>
1020 inode->i_sb->s_blocksize_bits;
1021 num_sectors = min_t(int, len - index, num_sectors - i);
1022 memcpy(sum + index, ordered_sum->sums + i,
1023 num_sectors);
1024
1025 index += (int)num_sectors;
1026 if (index == len)
1027 goto out;
1028 disk_bytenr += num_sectors * sectorsize;
1029 }
1030 }
1031out:
1032 spin_unlock_irq(&tree->lock);
1033 btrfs_put_ordered_extent(ordered);
1034 return index;
1035}
1036
1037int __init ordered_data_init(void)
1038{
1039 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1040 sizeof(struct btrfs_ordered_extent), 0,
1041 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1042 NULL);
1043 if (!btrfs_ordered_extent_cache)
1044 return -ENOMEM;
1045
1046 return 0;
1047}
1048
1049void ordered_data_exit(void)
1050{
1051 if (btrfs_ordered_extent_cache)
1052 kmem_cache_destroy(btrfs_ordered_extent_cache);
1053}
1054