1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/spinlock.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/mm.h>
23#include <linux/kthread.h>
24#include <linux/freezer.h>
25#include <linux/writeback.h>
26#include <linux/blkdev.h>
27#include <linux/backing-dev.h>
28#include <linux/buffer_head.h>
29#include <linux/tracepoint.h>
30#include "internal.h"
31
32
33
34
35struct wb_writeback_work {
36 long nr_pages;
37 struct super_block *sb;
38 enum writeback_sync_modes sync_mode;
39 unsigned int for_kupdate:1;
40 unsigned int range_cyclic:1;
41 unsigned int for_background:1;
42
43 struct list_head list;
44 struct completion *done;
45};
46
47
48
49
50
51
52#define CREATE_TRACE_POINTS
53#include <trace/events/writeback.h>
54
55
56
57
58int nr_pdflush_threads;
59
60
61
62
63
64
65
66
67int writeback_in_progress(struct backing_dev_info *bdi)
68{
69 return test_bit(BDI_writeback_running, &bdi->state);
70}
71
72static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
73{
74 struct super_block *sb = inode->i_sb;
75
76 if (strcmp(sb->s_type->name, "bdev") == 0)
77 return inode->i_mapping->backing_dev_info;
78
79 return sb->s_bdi;
80}
81
82static inline struct inode *wb_inode(struct list_head *head)
83{
84 return list_entry(head, struct inode, i_wb_list);
85}
86
87
88static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
89{
90 if (bdi->wb.task) {
91 wake_up_process(bdi->wb.task);
92 } else {
93
94
95
96
97 wake_up_process(default_backing_dev_info.wb.task);
98 }
99}
100
101static void bdi_queue_work(struct backing_dev_info *bdi,
102 struct wb_writeback_work *work)
103{
104 trace_writeback_queue(bdi, work);
105
106 spin_lock_bh(&bdi->wb_lock);
107 list_add_tail(&work->list, &bdi->work_list);
108 if (!bdi->wb.task)
109 trace_writeback_nothread(bdi, work);
110 bdi_wakeup_flusher(bdi);
111 spin_unlock_bh(&bdi->wb_lock);
112}
113
114static void
115__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
116 bool range_cyclic)
117{
118 struct wb_writeback_work *work;
119
120
121
122
123
124 work = kzalloc(sizeof(*work), GFP_ATOMIC);
125 if (!work) {
126 if (bdi->wb.task) {
127 trace_writeback_nowork(bdi);
128 wake_up_process(bdi->wb.task);
129 }
130 return;
131 }
132
133 work->sync_mode = WB_SYNC_NONE;
134 work->nr_pages = nr_pages;
135 work->range_cyclic = range_cyclic;
136
137 bdi_queue_work(bdi, work);
138}
139
140
141
142
143
144
145
146
147
148
149
150
151void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
152{
153 __bdi_start_writeback(bdi, nr_pages, true);
154}
155
156
157
158
159
160
161
162
163
164
165
166void bdi_start_background_writeback(struct backing_dev_info *bdi)
167{
168
169
170
171
172 trace_writeback_wake_background(bdi);
173 spin_lock_bh(&bdi->wb_lock);
174 bdi_wakeup_flusher(bdi);
175 spin_unlock_bh(&bdi->wb_lock);
176}
177
178
179
180
181
182
183
184
185
186
187static void redirty_tail(struct inode *inode)
188{
189 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
190
191 if (!list_empty(&wb->b_dirty)) {
192 struct inode *tail;
193
194 tail = wb_inode(wb->b_dirty.next);
195 if (time_before(inode->dirtied_when, tail->dirtied_when))
196 inode->dirtied_when = jiffies;
197 }
198 list_move(&inode->i_wb_list, &wb->b_dirty);
199}
200
201
202
203
204static void requeue_io(struct inode *inode)
205{
206 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
207
208 list_move(&inode->i_wb_list, &wb->b_more_io);
209}
210
211static void inode_sync_complete(struct inode *inode)
212{
213
214
215
216 smp_mb();
217 wake_up_bit(&inode->i_state, __I_SYNC);
218}
219
220static bool inode_dirtied_after(struct inode *inode, unsigned long t)
221{
222 bool ret = time_after(inode->dirtied_when, t);
223#ifndef CONFIG_64BIT
224
225
226
227
228
229
230 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
231#endif
232 return ret;
233}
234
235
236
237
238static void move_expired_inodes(struct list_head *delaying_queue,
239 struct list_head *dispatch_queue,
240 unsigned long *older_than_this)
241{
242 LIST_HEAD(tmp);
243 struct list_head *pos, *node;
244 struct super_block *sb = NULL;
245 struct inode *inode;
246 int do_sb_sort = 0;
247
248 while (!list_empty(delaying_queue)) {
249 inode = wb_inode(delaying_queue->prev);
250 if (older_than_this &&
251 inode_dirtied_after(inode, *older_than_this))
252 break;
253 if (sb && sb != inode->i_sb)
254 do_sb_sort = 1;
255 sb = inode->i_sb;
256 list_move(&inode->i_wb_list, &tmp);
257 }
258
259
260 if (!do_sb_sort) {
261 list_splice(&tmp, dispatch_queue);
262 return;
263 }
264
265
266 while (!list_empty(&tmp)) {
267 sb = wb_inode(tmp.prev)->i_sb;
268 list_for_each_prev_safe(pos, node, &tmp) {
269 inode = wb_inode(pos);
270 if (inode->i_sb == sb)
271 list_move(&inode->i_wb_list, dispatch_queue);
272 }
273 }
274}
275
276
277
278
279
280
281
282
283
284
285
286
287static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
288{
289 list_splice_init(&wb->b_more_io, &wb->b_io);
290 move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
291}
292
293static int write_inode(struct inode *inode, struct writeback_control *wbc)
294{
295 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
296 return inode->i_sb->s_op->write_inode(inode, wbc);
297 return 0;
298}
299
300
301
302
303static void inode_wait_for_writeback(struct inode *inode)
304{
305 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
306 wait_queue_head_t *wqh;
307
308 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
309 while (inode->i_state & I_SYNC) {
310 spin_unlock(&inode_lock);
311 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
312 spin_lock(&inode_lock);
313 }
314}
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329static int
330writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
331{
332 struct address_space *mapping = inode->i_mapping;
333 unsigned dirty;
334 int ret;
335
336 if (!atomic_read(&inode->i_count))
337 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
338 else
339 WARN_ON(inode->i_state & I_WILL_FREE);
340
341 if (inode->i_state & I_SYNC) {
342
343
344
345
346
347
348
349
350 if (wbc->sync_mode != WB_SYNC_ALL) {
351 requeue_io(inode);
352 return 0;
353 }
354
355
356
357
358 inode_wait_for_writeback(inode);
359 }
360
361 BUG_ON(inode->i_state & I_SYNC);
362
363
364 inode->i_state |= I_SYNC;
365 inode->i_state &= ~I_DIRTY_PAGES;
366 spin_unlock(&inode_lock);
367
368 ret = do_writepages(mapping, wbc);
369
370
371
372
373
374
375 if (wbc->sync_mode == WB_SYNC_ALL) {
376 int err = filemap_fdatawait(mapping);
377 if (ret == 0)
378 ret = err;
379 }
380
381
382
383
384
385
386 spin_lock(&inode_lock);
387 dirty = inode->i_state & I_DIRTY;
388 inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
389 spin_unlock(&inode_lock);
390
391 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
392 int err = write_inode(inode, wbc);
393 if (ret == 0)
394 ret = err;
395 }
396
397 spin_lock(&inode_lock);
398 inode->i_state &= ~I_SYNC;
399 if (!(inode->i_state & I_FREEING)) {
400 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
401
402
403
404
405 inode->i_state |= I_DIRTY_PAGES;
406 if (wbc->nr_to_write <= 0) {
407
408
409
410 requeue_io(inode);
411 } else {
412
413
414
415
416
417
418
419 redirty_tail(inode);
420 }
421 } else if (inode->i_state & I_DIRTY) {
422
423
424
425
426
427
428 redirty_tail(inode);
429 } else {
430
431
432
433
434
435 list_del_init(&inode->i_wb_list);
436 }
437 }
438 inode_sync_complete(inode);
439 return ret;
440}
441
442
443
444
445
446
447static bool pin_sb_for_writeback(struct super_block *sb)
448{
449 spin_lock(&sb_lock);
450 if (list_empty(&sb->s_instances)) {
451 spin_unlock(&sb_lock);
452 return false;
453 }
454
455 sb->s_count++;
456 spin_unlock(&sb_lock);
457
458 if (down_read_trylock(&sb->s_umount)) {
459 if (sb->s_root)
460 return true;
461 up_read(&sb->s_umount);
462 }
463
464 put_super(sb);
465 return false;
466}
467
468
469
470
471
472
473
474
475
476
477
478static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
479 struct writeback_control *wbc, bool only_this_sb)
480{
481 while (!list_empty(&wb->b_io)) {
482 long pages_skipped;
483 struct inode *inode = wb_inode(wb->b_io.prev);
484
485 if (inode->i_sb != sb) {
486 if (only_this_sb) {
487
488
489
490
491
492 redirty_tail(inode);
493 continue;
494 }
495
496
497
498
499
500
501 return 0;
502 }
503
504
505
506
507
508
509 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
510 requeue_io(inode);
511 continue;
512 }
513
514
515
516
517
518 if (inode_dirtied_after(inode, wbc->wb_start))
519 return 1;
520
521 __iget(inode);
522 pages_skipped = wbc->pages_skipped;
523 writeback_single_inode(inode, wbc);
524 if (wbc->pages_skipped != pages_skipped) {
525
526
527
528
529 redirty_tail(inode);
530 }
531 spin_unlock(&inode_lock);
532 iput(inode);
533 cond_resched();
534 spin_lock(&inode_lock);
535 if (wbc->nr_to_write <= 0) {
536 wbc->more_io = 1;
537 return 1;
538 }
539 if (!list_empty(&wb->b_more_io))
540 wbc->more_io = 1;
541 }
542
543 return 1;
544}
545
546void writeback_inodes_wb(struct bdi_writeback *wb,
547 struct writeback_control *wbc)
548{
549 int ret = 0;
550
551 if (!wbc->wb_start)
552 wbc->wb_start = jiffies;
553 spin_lock(&inode_lock);
554 if (!wbc->for_kupdate || list_empty(&wb->b_io))
555 queue_io(wb, wbc->older_than_this);
556
557 while (!list_empty(&wb->b_io)) {
558 struct inode *inode = wb_inode(wb->b_io.prev);
559 struct super_block *sb = inode->i_sb;
560
561 if (!pin_sb_for_writeback(sb)) {
562 requeue_io(inode);
563 continue;
564 }
565 ret = writeback_sb_inodes(sb, wb, wbc, false);
566 drop_super(sb);
567
568 if (ret)
569 break;
570 }
571 spin_unlock(&inode_lock);
572
573}
574
575static void __writeback_inodes_sb(struct super_block *sb,
576 struct bdi_writeback *wb, struct writeback_control *wbc)
577{
578 WARN_ON(!rwsem_is_locked(&sb->s_umount));
579
580 spin_lock(&inode_lock);
581 if (!wbc->for_kupdate || list_empty(&wb->b_io))
582 queue_io(wb, wbc->older_than_this);
583 writeback_sb_inodes(sb, wb, wbc, true);
584 spin_unlock(&inode_lock);
585}
586
587
588
589
590
591
592
593
594#define MAX_WRITEBACK_PAGES 1024
595
596static inline bool over_bground_thresh(void)
597{
598 unsigned long background_thresh, dirty_thresh;
599
600 global_dirty_limits(&background_thresh, &dirty_thresh);
601
602 return (global_page_state(NR_FILE_DIRTY) +
603 global_page_state(NR_UNSTABLE_NFS) > background_thresh);
604}
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621static long wb_writeback(struct bdi_writeback *wb,
622 struct wb_writeback_work *work)
623{
624 struct writeback_control wbc = {
625 .sync_mode = work->sync_mode,
626 .older_than_this = NULL,
627 .for_kupdate = work->for_kupdate,
628 .for_background = work->for_background,
629 .range_cyclic = work->range_cyclic,
630 };
631 unsigned long oldest_jif;
632 long wrote = 0;
633 long write_chunk;
634 struct inode *inode;
635
636 if (wbc.for_kupdate) {
637 wbc.older_than_this = &oldest_jif;
638 oldest_jif = jiffies -
639 msecs_to_jiffies(dirty_expire_interval * 10);
640 }
641 if (!wbc.range_cyclic) {
642 wbc.range_start = 0;
643 wbc.range_end = LLONG_MAX;
644 }
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659 if (wbc.sync_mode == WB_SYNC_NONE)
660 write_chunk = MAX_WRITEBACK_PAGES;
661 else
662 write_chunk = LONG_MAX;
663
664 wbc.wb_start = jiffies;
665 for (;;) {
666
667
668
669 if (work->nr_pages <= 0)
670 break;
671
672
673
674
675
676
677
678 if ((work->for_background || work->for_kupdate) &&
679 !list_empty(&wb->bdi->work_list))
680 break;
681
682
683
684
685
686 if (work->for_background && !over_bground_thresh())
687 break;
688
689 wbc.more_io = 0;
690 wbc.nr_to_write = write_chunk;
691 wbc.pages_skipped = 0;
692
693 trace_wbc_writeback_start(&wbc, wb->bdi);
694 if (work->sb)
695 __writeback_inodes_sb(work->sb, wb, &wbc);
696 else
697 writeback_inodes_wb(wb, &wbc);
698 trace_wbc_writeback_written(&wbc, wb->bdi);
699
700 work->nr_pages -= write_chunk - wbc.nr_to_write;
701 wrote += write_chunk - wbc.nr_to_write;
702
703
704
705
706 if (wbc.nr_to_write <= 0)
707 continue;
708
709
710
711 if (!wbc.more_io)
712 break;
713
714
715
716 if (wbc.nr_to_write < write_chunk)
717 continue;
718
719
720
721
722
723 spin_lock(&inode_lock);
724 if (!list_empty(&wb->b_more_io)) {
725 inode = wb_inode(wb->b_more_io.prev);
726 trace_wbc_writeback_wait(&wbc, wb->bdi);
727 inode_wait_for_writeback(inode);
728 }
729 spin_unlock(&inode_lock);
730 }
731
732 return wrote;
733}
734
735
736
737
738static struct wb_writeback_work *
739get_next_work_item(struct backing_dev_info *bdi)
740{
741 struct wb_writeback_work *work = NULL;
742
743 spin_lock_bh(&bdi->wb_lock);
744 if (!list_empty(&bdi->work_list)) {
745 work = list_entry(bdi->work_list.next,
746 struct wb_writeback_work, list);
747 list_del_init(&work->list);
748 }
749 spin_unlock_bh(&bdi->wb_lock);
750 return work;
751}
752
753
754
755
756
757static unsigned long get_nr_dirty_pages(void)
758{
759 return global_page_state(NR_FILE_DIRTY) +
760 global_page_state(NR_UNSTABLE_NFS) +
761 get_nr_dirty_inodes();
762}
763
764static long wb_check_background_flush(struct bdi_writeback *wb)
765{
766 if (over_bground_thresh()) {
767
768 struct wb_writeback_work work = {
769 .nr_pages = LONG_MAX,
770 .sync_mode = WB_SYNC_NONE,
771 .for_background = 1,
772 .range_cyclic = 1,
773 };
774
775 return wb_writeback(wb, &work);
776 }
777
778 return 0;
779}
780
781static long wb_check_old_data_flush(struct bdi_writeback *wb)
782{
783 unsigned long expired;
784 long nr_pages;
785
786
787
788
789 if (!dirty_writeback_interval)
790 return 0;
791
792 expired = wb->last_old_flush +
793 msecs_to_jiffies(dirty_writeback_interval * 10);
794 if (time_before(jiffies, expired))
795 return 0;
796
797 wb->last_old_flush = jiffies;
798 nr_pages = get_nr_dirty_pages();
799
800 if (nr_pages) {
801 struct wb_writeback_work work = {
802 .nr_pages = nr_pages,
803 .sync_mode = WB_SYNC_NONE,
804 .for_kupdate = 1,
805 .range_cyclic = 1,
806 };
807
808 return wb_writeback(wb, &work);
809 }
810
811 return 0;
812}
813
814
815
816
817long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
818{
819 struct backing_dev_info *bdi = wb->bdi;
820 struct wb_writeback_work *work;
821 long wrote = 0;
822
823 set_bit(BDI_writeback_running, &wb->bdi->state);
824 while ((work = get_next_work_item(bdi)) != NULL) {
825
826
827
828
829 if (force_wait)
830 work->sync_mode = WB_SYNC_ALL;
831
832 trace_writeback_exec(bdi, work);
833
834 wrote += wb_writeback(wb, work);
835
836
837
838
839
840 if (work->done)
841 complete(work->done);
842 else
843 kfree(work);
844 }
845
846
847
848
849 wrote += wb_check_old_data_flush(wb);
850 wrote += wb_check_background_flush(wb);
851 clear_bit(BDI_writeback_running, &wb->bdi->state);
852
853 return wrote;
854}
855
856
857
858
859
860int bdi_writeback_thread(void *data)
861{
862 struct bdi_writeback *wb = data;
863 struct backing_dev_info *bdi = wb->bdi;
864 long pages_written;
865
866 current->flags |= PF_SWAPWRITE;
867 set_freezable();
868 wb->last_active = jiffies;
869
870
871
872
873 set_user_nice(current, 0);
874
875 trace_writeback_thread_start(bdi);
876
877 while (!kthread_should_stop()) {
878
879
880
881
882 del_timer(&wb->wakeup_timer);
883
884 pages_written = wb_do_writeback(wb, 0);
885
886 trace_writeback_pages_written(pages_written);
887
888 if (pages_written)
889 wb->last_active = jiffies;
890
891 set_current_state(TASK_INTERRUPTIBLE);
892 if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
893 __set_current_state(TASK_RUNNING);
894 continue;
895 }
896
897 if (wb_has_dirty_io(wb) && dirty_writeback_interval)
898 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
899 else {
900
901
902
903
904
905 schedule();
906 }
907
908 try_to_freeze();
909 }
910
911
912 if (!list_empty(&bdi->work_list))
913 wb_do_writeback(wb, 1);
914
915 trace_writeback_thread_stop(bdi);
916 return 0;
917}
918
919
920
921
922
923
924void wakeup_flusher_threads(long nr_pages)
925{
926 struct backing_dev_info *bdi;
927
928 if (!nr_pages) {
929 nr_pages = global_page_state(NR_FILE_DIRTY) +
930 global_page_state(NR_UNSTABLE_NFS);
931 }
932
933 rcu_read_lock();
934 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
935 if (!bdi_has_dirty_io(bdi))
936 continue;
937 __bdi_start_writeback(bdi, nr_pages, false);
938 }
939 rcu_read_unlock();
940}
941
942static noinline void block_dump___mark_inode_dirty(struct inode *inode)
943{
944 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
945 struct dentry *dentry;
946 const char *name = "?";
947
948 dentry = d_find_alias(inode);
949 if (dentry) {
950 spin_lock(&dentry->d_lock);
951 name = (const char *) dentry->d_name.name;
952 }
953 printk(KERN_DEBUG
954 "%s(%d): dirtied inode %lu (%s) on %s\n",
955 current->comm, task_pid_nr(current), inode->i_ino,
956 name, inode->i_sb->s_id);
957 if (dentry) {
958 spin_unlock(&dentry->d_lock);
959 dput(dentry);
960 }
961 }
962}
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991void __mark_inode_dirty(struct inode *inode, int flags)
992{
993 struct super_block *sb = inode->i_sb;
994 struct backing_dev_info *bdi = NULL;
995 bool wakeup_bdi = false;
996
997
998
999
1000
1001 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
1002 if (sb->s_op->dirty_inode)
1003 sb->s_op->dirty_inode(inode);
1004 }
1005
1006
1007
1008
1009
1010 smp_mb();
1011
1012
1013 if ((inode->i_state & flags) == flags)
1014 return;
1015
1016 if (unlikely(block_dump))
1017 block_dump___mark_inode_dirty(inode);
1018
1019 spin_lock(&inode_lock);
1020 if ((inode->i_state & flags) != flags) {
1021 const int was_dirty = inode->i_state & I_DIRTY;
1022
1023 inode->i_state |= flags;
1024
1025
1026
1027
1028
1029
1030 if (inode->i_state & I_SYNC)
1031 goto out;
1032
1033
1034
1035
1036
1037 if (!S_ISBLK(inode->i_mode)) {
1038 if (inode_unhashed(inode))
1039 goto out;
1040 }
1041 if (inode->i_state & I_FREEING)
1042 goto out;
1043
1044
1045
1046
1047
1048 if (!was_dirty) {
1049 bdi = inode_to_bdi(inode);
1050
1051 if (bdi_cap_writeback_dirty(bdi)) {
1052 WARN(!test_bit(BDI_registered, &bdi->state),
1053 "bdi-%s not registered\n", bdi->name);
1054
1055
1056
1057
1058
1059
1060
1061 if (!wb_has_dirty_io(&bdi->wb))
1062 wakeup_bdi = true;
1063 }
1064
1065 inode->dirtied_when = jiffies;
1066 list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1067 }
1068 }
1069out:
1070 spin_unlock(&inode_lock);
1071
1072 if (wakeup_bdi)
1073 bdi_wakeup_thread_delayed(bdi);
1074}
1075EXPORT_SYMBOL(__mark_inode_dirty);
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094static void wait_sb_inodes(struct super_block *sb)
1095{
1096 struct inode *inode, *old_inode = NULL;
1097
1098
1099
1100
1101
1102 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1103
1104 spin_lock(&inode_lock);
1105
1106
1107
1108
1109
1110
1111
1112
1113 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1114 struct address_space *mapping;
1115
1116 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
1117 continue;
1118 mapping = inode->i_mapping;
1119 if (mapping->nrpages == 0)
1120 continue;
1121 __iget(inode);
1122 spin_unlock(&inode_lock);
1123
1124
1125
1126
1127
1128
1129
1130
1131 iput(old_inode);
1132 old_inode = inode;
1133
1134 filemap_fdatawait(mapping);
1135
1136 cond_resched();
1137
1138 spin_lock(&inode_lock);
1139 }
1140 spin_unlock(&inode_lock);
1141 iput(old_inode);
1142}
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr)
1154{
1155 DECLARE_COMPLETION_ONSTACK(done);
1156 struct wb_writeback_work work = {
1157 .sb = sb,
1158 .sync_mode = WB_SYNC_NONE,
1159 .done = &done,
1160 .nr_pages = nr,
1161 };
1162
1163 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1164 bdi_queue_work(sb->s_bdi, &work);
1165 wait_for_completion(&done);
1166}
1167EXPORT_SYMBOL(writeback_inodes_sb_nr);
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177void writeback_inodes_sb(struct super_block *sb)
1178{
1179 return writeback_inodes_sb_nr(sb, get_nr_dirty_pages());
1180}
1181EXPORT_SYMBOL(writeback_inodes_sb);
1182
1183
1184
1185
1186
1187
1188
1189
1190int writeback_inodes_sb_if_idle(struct super_block *sb)
1191{
1192 if (!writeback_in_progress(sb->s_bdi)) {
1193 down_read(&sb->s_umount);
1194 writeback_inodes_sb(sb);
1195 up_read(&sb->s_umount);
1196 return 1;
1197 } else
1198 return 0;
1199}
1200EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210int writeback_inodes_sb_nr_if_idle(struct super_block *sb,
1211 unsigned long nr)
1212{
1213 if (!writeback_in_progress(sb->s_bdi)) {
1214 down_read(&sb->s_umount);
1215 writeback_inodes_sb_nr(sb, nr);
1216 up_read(&sb->s_umount);
1217 return 1;
1218 } else
1219 return 0;
1220}
1221EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle);
1222
1223
1224
1225
1226
1227
1228
1229
1230void sync_inodes_sb(struct super_block *sb)
1231{
1232 DECLARE_COMPLETION_ONSTACK(done);
1233 struct wb_writeback_work work = {
1234 .sb = sb,
1235 .sync_mode = WB_SYNC_ALL,
1236 .nr_pages = LONG_MAX,
1237 .range_cyclic = 0,
1238 .done = &done,
1239 };
1240
1241 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1242
1243 bdi_queue_work(sb->s_bdi, &work);
1244 wait_for_completion(&done);
1245
1246 wait_sb_inodes(sb);
1247}
1248EXPORT_SYMBOL(sync_inodes_sb);
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260int write_inode_now(struct inode *inode, int sync)
1261{
1262 int ret;
1263 struct writeback_control wbc = {
1264 .nr_to_write = LONG_MAX,
1265 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1266 .range_start = 0,
1267 .range_end = LLONG_MAX,
1268 };
1269
1270 if (!mapping_cap_writeback_dirty(inode->i_mapping))
1271 wbc.nr_to_write = 0;
1272
1273 might_sleep();
1274 spin_lock(&inode_lock);
1275 ret = writeback_single_inode(inode, &wbc);
1276 spin_unlock(&inode_lock);
1277 if (sync)
1278 inode_sync_wait(inode);
1279 return ret;
1280}
1281EXPORT_SYMBOL(write_inode_now);
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294int sync_inode(struct inode *inode, struct writeback_control *wbc)
1295{
1296 int ret;
1297
1298 spin_lock(&inode_lock);
1299 ret = writeback_single_inode(inode, wbc);
1300 spin_unlock(&inode_lock);
1301 return ret;
1302}
1303EXPORT_SYMBOL(sync_inode);
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314int sync_inode_metadata(struct inode *inode, int wait)
1315{
1316 struct writeback_control wbc = {
1317 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
1318 .nr_to_write = 0,
1319 };
1320
1321 return sync_inode(inode, &wbc);
1322}
1323EXPORT_SYMBOL(sync_inode_metadata);
1324