1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/spinlock.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/mm.h>
23#include <linux/pagemap.h>
24#include <linux/kthread.h>
25#include <linux/writeback.h>
26#include <linux/blkdev.h>
27#include <linux/backing-dev.h>
28#include <linux/tracepoint.h>
29#include "internal.h"
30
31
32
33
34#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
35
36
37
38
39struct wb_writeback_work {
40 long nr_pages;
41 struct super_block *sb;
42 unsigned long *older_than_this;
43 enum writeback_sync_modes sync_mode;
44 unsigned int tagged_writepages:1;
45 unsigned int for_kupdate:1;
46 unsigned int range_cyclic:1;
47 unsigned int for_background:1;
48 unsigned int for_sync:1;
49 enum wb_reason reason;
50
51 struct list_head list;
52 struct completion *done;
53};
54
55
56
57
58
59
60
61
62int writeback_in_progress(struct backing_dev_info *bdi)
63{
64 return test_bit(BDI_writeback_running, &bdi->state);
65}
66EXPORT_SYMBOL(writeback_in_progress);
67
68static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
69{
70 struct super_block *sb = inode->i_sb;
71
72 if (strcmp(sb->s_type->name, "bdev") == 0)
73 return inode->i_mapping->backing_dev_info;
74
75 return sb->s_bdi;
76}
77
78static inline struct inode *wb_inode(struct list_head *head)
79{
80 return list_entry(head, struct inode, i_wb_list);
81}
82
83
84
85
86
87
88#define CREATE_TRACE_POINTS
89#include <trace/events/writeback.h>
90
91static void bdi_queue_work(struct backing_dev_info *bdi,
92 struct wb_writeback_work *work)
93{
94 trace_writeback_queue(bdi, work);
95
96 spin_lock_bh(&bdi->wb_lock);
97 list_add_tail(&work->list, &bdi->work_list);
98 spin_unlock_bh(&bdi->wb_lock);
99
100 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
101}
102
103static void
104__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
105 bool range_cyclic, enum wb_reason reason)
106{
107 struct wb_writeback_work *work;
108
109
110
111
112
113 work = kzalloc(sizeof(*work), GFP_ATOMIC);
114 if (!work) {
115 trace_writeback_nowork(bdi);
116 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
117 return;
118 }
119
120 work->sync_mode = WB_SYNC_NONE;
121 work->nr_pages = nr_pages;
122 work->range_cyclic = range_cyclic;
123 work->reason = reason;
124
125 bdi_queue_work(bdi, work);
126}
127
128
129
130
131
132
133
134
135
136
137
138
139
140void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
141 enum wb_reason reason)
142{
143 __bdi_start_writeback(bdi, nr_pages, true, reason);
144}
145
146
147
148
149
150
151
152
153
154
155
156void bdi_start_background_writeback(struct backing_dev_info *bdi)
157{
158
159
160
161
162 trace_writeback_wake_background(bdi);
163 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
164}
165
166
167
168
169void inode_wb_list_del(struct inode *inode)
170{
171 struct backing_dev_info *bdi = inode_to_bdi(inode);
172
173 spin_lock(&bdi->wb.list_lock);
174 list_del_init(&inode->i_wb_list);
175 spin_unlock(&bdi->wb.list_lock);
176}
177
178
179
180
181
182
183
184
185
186
187static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
188{
189 assert_spin_locked(&wb->list_lock);
190 if (!list_empty(&wb->b_dirty)) {
191 struct inode *tail;
192
193 tail = wb_inode(wb->b_dirty.next);
194 if (time_before(inode->dirtied_when, tail->dirtied_when))
195 inode->dirtied_when = jiffies;
196 }
197 list_move(&inode->i_wb_list, &wb->b_dirty);
198}
199
200
201
202
203static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
204{
205 assert_spin_locked(&wb->list_lock);
206 list_move(&inode->i_wb_list, &wb->b_more_io);
207}
208
209static void inode_sync_complete(struct inode *inode)
210{
211 inode->i_state &= ~I_SYNC;
212
213 inode_add_lru(inode);
214
215 smp_mb();
216 wake_up_bit(&inode->i_state, __I_SYNC);
217}
218
219static bool inode_dirtied_after(struct inode *inode, unsigned long t)
220{
221 bool ret = time_after(inode->dirtied_when, t);
222#ifndef CONFIG_64BIT
223
224
225
226
227
228
229 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
230#endif
231 return ret;
232}
233
234
235
236
237
238static int move_expired_inodes(struct list_head *delaying_queue,
239 struct list_head *dispatch_queue,
240 struct wb_writeback_work *work)
241{
242 LIST_HEAD(tmp);
243 struct list_head *pos, *node;
244 struct super_block *sb = NULL;
245 struct inode *inode;
246 int do_sb_sort = 0;
247 int moved = 0;
248
249 while (!list_empty(delaying_queue)) {
250 inode = wb_inode(delaying_queue->prev);
251 if (work->older_than_this &&
252 inode_dirtied_after(inode, *work->older_than_this))
253 break;
254 if (sb && sb != inode->i_sb)
255 do_sb_sort = 1;
256 sb = inode->i_sb;
257 list_move(&inode->i_wb_list, &tmp);
258 moved++;
259 }
260
261
262 if (!do_sb_sort) {
263 list_splice(&tmp, dispatch_queue);
264 goto out;
265 }
266
267
268 while (!list_empty(&tmp)) {
269 sb = wb_inode(tmp.prev)->i_sb;
270 list_for_each_prev_safe(pos, node, &tmp) {
271 inode = wb_inode(pos);
272 if (inode->i_sb == sb)
273 list_move(&inode->i_wb_list, dispatch_queue);
274 }
275 }
276out:
277 return moved;
278}
279
280
281
282
283
284
285
286
287
288
289
290
291static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
292{
293 int moved;
294 assert_spin_locked(&wb->list_lock);
295 list_splice_init(&wb->b_more_io, &wb->b_io);
296 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
297 trace_writeback_queue_io(wb, work, moved);
298}
299
300static int write_inode(struct inode *inode, struct writeback_control *wbc)
301{
302 int ret;
303
304 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
305 trace_writeback_write_inode_start(inode, wbc);
306 ret = inode->i_sb->s_op->write_inode(inode, wbc);
307 trace_writeback_write_inode(inode, wbc);
308 return ret;
309 }
310 return 0;
311}
312
313
314
315
316
317static void __inode_wait_for_writeback(struct inode *inode)
318 __releases(inode->i_lock)
319 __acquires(inode->i_lock)
320{
321 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
322 wait_queue_head_t *wqh;
323
324 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
325 while (inode->i_state & I_SYNC) {
326 spin_unlock(&inode->i_lock);
327 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
328 spin_lock(&inode->i_lock);
329 }
330}
331
332
333
334
335void inode_wait_for_writeback(struct inode *inode)
336{
337 spin_lock(&inode->i_lock);
338 __inode_wait_for_writeback(inode);
339 spin_unlock(&inode->i_lock);
340}
341
342
343
344
345
346
347static void inode_sleep_on_writeback(struct inode *inode)
348 __releases(inode->i_lock)
349{
350 DEFINE_WAIT(wait);
351 wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
352 int sleep;
353
354 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
355 sleep = inode->i_state & I_SYNC;
356 spin_unlock(&inode->i_lock);
357 if (sleep)
358 schedule();
359 finish_wait(wqh, &wait);
360}
361
362
363
364
365
366
367
368
369
370static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
371 struct writeback_control *wbc)
372{
373 if (inode->i_state & I_FREEING)
374 return;
375
376
377
378
379
380
381 if ((inode->i_state & I_DIRTY) &&
382 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
383 inode->dirtied_when = jiffies;
384
385 if (wbc->pages_skipped) {
386
387
388
389
390 redirty_tail(inode, wb);
391 return;
392 }
393
394 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
395
396
397
398
399 if (wbc->nr_to_write <= 0) {
400
401 requeue_io(inode, wb);
402 } else {
403
404
405
406
407
408
409
410 redirty_tail(inode, wb);
411 }
412 } else if (inode->i_state & I_DIRTY) {
413
414
415
416
417
418 redirty_tail(inode, wb);
419 } else {
420
421 list_del_init(&inode->i_wb_list);
422 }
423}
424
425
426
427
428
429
430static int
431__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
432{
433 struct address_space *mapping = inode->i_mapping;
434 long nr_to_write = wbc->nr_to_write;
435 unsigned dirty;
436 int ret;
437
438 WARN_ON(!(inode->i_state & I_SYNC));
439
440 trace_writeback_single_inode_start(inode, wbc, nr_to_write);
441
442 ret = do_writepages(mapping, wbc);
443
444
445
446
447
448
449
450
451 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
452 int err = filemap_fdatawait(mapping);
453 if (ret == 0)
454 ret = err;
455 }
456
457
458
459
460
461
462 spin_lock(&inode->i_lock);
463
464 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
465 inode->i_state &= ~I_DIRTY_PAGES;
466 dirty = inode->i_state & I_DIRTY;
467 inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
468 spin_unlock(&inode->i_lock);
469
470 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
471 int err = write_inode(inode, wbc);
472 if (ret == 0)
473 ret = err;
474 }
475 trace_writeback_single_inode(inode, wbc, nr_to_write);
476 return ret;
477}
478
479
480
481
482
483
484
485
486
487static int
488writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
489 struct writeback_control *wbc)
490{
491 int ret = 0;
492
493 spin_lock(&inode->i_lock);
494 if (!atomic_read(&inode->i_count))
495 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
496 else
497 WARN_ON(inode->i_state & I_WILL_FREE);
498
499 if (inode->i_state & I_SYNC) {
500 if (wbc->sync_mode != WB_SYNC_ALL)
501 goto out;
502
503
504
505
506
507 __inode_wait_for_writeback(inode);
508 }
509 WARN_ON(inode->i_state & I_SYNC);
510
511
512
513
514
515
516
517 if (!(inode->i_state & I_DIRTY))
518 goto out;
519 inode->i_state |= I_SYNC;
520 spin_unlock(&inode->i_lock);
521
522 ret = __writeback_single_inode(inode, wbc);
523
524 spin_lock(&wb->list_lock);
525 spin_lock(&inode->i_lock);
526
527
528
529
530 if (!(inode->i_state & I_DIRTY))
531 list_del_init(&inode->i_wb_list);
532 spin_unlock(&wb->list_lock);
533 inode_sync_complete(inode);
534out:
535 spin_unlock(&inode->i_lock);
536 return ret;
537}
538
539static long writeback_chunk_size(struct backing_dev_info *bdi,
540 struct wb_writeback_work *work)
541{
542 long pages;
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
558 pages = LONG_MAX;
559 else {
560 pages = min(bdi->avg_write_bandwidth / 2,
561 global_dirty_limit / DIRTY_SCOPE);
562 pages = min(pages, work->nr_pages);
563 pages = round_down(pages + MIN_WRITEBACK_PAGES,
564 MIN_WRITEBACK_PAGES);
565 }
566
567 return pages;
568}
569
570
571
572
573
574
575static long writeback_sb_inodes(struct super_block *sb,
576 struct bdi_writeback *wb,
577 struct wb_writeback_work *work)
578{
579 struct writeback_control wbc = {
580 .sync_mode = work->sync_mode,
581 .tagged_writepages = work->tagged_writepages,
582 .for_kupdate = work->for_kupdate,
583 .for_background = work->for_background,
584 .for_sync = work->for_sync,
585 .range_cyclic = work->range_cyclic,
586 .range_start = 0,
587 .range_end = LLONG_MAX,
588 };
589 unsigned long start_time = jiffies;
590 long write_chunk;
591 long wrote = 0;
592
593 while (!list_empty(&wb->b_io)) {
594 struct inode *inode = wb_inode(wb->b_io.prev);
595
596 if (inode->i_sb != sb) {
597 if (work->sb) {
598
599
600
601
602
603 redirty_tail(inode, wb);
604 continue;
605 }
606
607
608
609
610
611
612 break;
613 }
614
615
616
617
618
619
620 spin_lock(&inode->i_lock);
621 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
622 spin_unlock(&inode->i_lock);
623 redirty_tail(inode, wb);
624 continue;
625 }
626 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
627
628
629
630
631
632
633
634
635
636 spin_unlock(&inode->i_lock);
637 requeue_io(inode, wb);
638 trace_writeback_sb_inodes_requeue(inode);
639 continue;
640 }
641 spin_unlock(&wb->list_lock);
642
643
644
645
646
647
648 if (inode->i_state & I_SYNC) {
649
650 inode_sleep_on_writeback(inode);
651
652 spin_lock(&wb->list_lock);
653 continue;
654 }
655 inode->i_state |= I_SYNC;
656 spin_unlock(&inode->i_lock);
657
658 write_chunk = writeback_chunk_size(wb->bdi, work);
659 wbc.nr_to_write = write_chunk;
660 wbc.pages_skipped = 0;
661
662
663
664
665
666 __writeback_single_inode(inode, &wbc);
667
668 work->nr_pages -= write_chunk - wbc.nr_to_write;
669 wrote += write_chunk - wbc.nr_to_write;
670 spin_lock(&wb->list_lock);
671 spin_lock(&inode->i_lock);
672 if (!(inode->i_state & I_DIRTY))
673 wrote++;
674 requeue_inode(inode, wb, &wbc);
675 inode_sync_complete(inode);
676 spin_unlock(&inode->i_lock);
677 cond_resched_lock(&wb->list_lock);
678
679
680
681
682 if (wrote) {
683 if (time_is_before_jiffies(start_time + HZ / 10UL))
684 break;
685 if (work->nr_pages <= 0)
686 break;
687 }
688 }
689 return wrote;
690}
691
692static long __writeback_inodes_wb(struct bdi_writeback *wb,
693 struct wb_writeback_work *work)
694{
695 unsigned long start_time = jiffies;
696 long wrote = 0;
697
698 while (!list_empty(&wb->b_io)) {
699 struct inode *inode = wb_inode(wb->b_io.prev);
700 struct super_block *sb = inode->i_sb;
701
702 if (!grab_super_passive(sb)) {
703
704
705
706
707
708 redirty_tail(inode, wb);
709 continue;
710 }
711 wrote += writeback_sb_inodes(sb, wb, work);
712 drop_super(sb);
713
714
715 if (wrote) {
716 if (time_is_before_jiffies(start_time + HZ / 10UL))
717 break;
718 if (work->nr_pages <= 0)
719 break;
720 }
721 }
722
723 return wrote;
724}
725
726long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
727 enum wb_reason reason)
728{
729 struct wb_writeback_work work = {
730 .nr_pages = nr_pages,
731 .sync_mode = WB_SYNC_NONE,
732 .range_cyclic = 1,
733 .reason = reason,
734 };
735
736 spin_lock(&wb->list_lock);
737 if (list_empty(&wb->b_io))
738 queue_io(wb, &work);
739 __writeback_inodes_wb(wb, &work);
740 spin_unlock(&wb->list_lock);
741
742 return nr_pages - work.nr_pages;
743}
744
745static bool over_bground_thresh(struct backing_dev_info *bdi)
746{
747 unsigned long background_thresh, dirty_thresh;
748
749 global_dirty_limits(&background_thresh, &dirty_thresh);
750
751 if (global_page_state(NR_FILE_DIRTY) +
752 global_page_state(NR_UNSTABLE_NFS) > background_thresh)
753 return true;
754
755 if (bdi_stat(bdi, BDI_RECLAIMABLE) >
756 bdi_dirty_limit(bdi, background_thresh))
757 return true;
758
759 return false;
760}
761
762
763
764
765
766static void wb_update_bandwidth(struct bdi_writeback *wb,
767 unsigned long start_time)
768{
769 __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
770}
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787static long wb_writeback(struct bdi_writeback *wb,
788 struct wb_writeback_work *work)
789{
790 unsigned long wb_start = jiffies;
791 long nr_pages = work->nr_pages;
792 unsigned long oldest_jif;
793 struct inode *inode;
794 long progress;
795
796 oldest_jif = jiffies;
797 work->older_than_this = &oldest_jif;
798
799 spin_lock(&wb->list_lock);
800 for (;;) {
801
802
803
804 if (work->nr_pages <= 0)
805 break;
806
807
808
809
810
811
812
813 if ((work->for_background || work->for_kupdate) &&
814 !list_empty(&wb->bdi->work_list))
815 break;
816
817
818
819
820
821 if (work->for_background && !over_bground_thresh(wb->bdi))
822 break;
823
824
825
826
827
828
829
830 if (work->for_kupdate) {
831 oldest_jif = jiffies -
832 msecs_to_jiffies(dirty_expire_interval * 10);
833 } else if (work->for_background)
834 oldest_jif = jiffies;
835
836 trace_writeback_start(wb->bdi, work);
837 if (list_empty(&wb->b_io))
838 queue_io(wb, work);
839 if (work->sb)
840 progress = writeback_sb_inodes(work->sb, wb, work);
841 else
842 progress = __writeback_inodes_wb(wb, work);
843 trace_writeback_written(wb->bdi, work);
844
845 wb_update_bandwidth(wb, wb_start);
846
847
848
849
850
851
852
853
854
855 if (progress)
856 continue;
857
858
859
860 if (list_empty(&wb->b_more_io))
861 break;
862
863
864
865
866
867 if (!list_empty(&wb->b_more_io)) {
868 trace_writeback_wait(wb->bdi, work);
869 inode = wb_inode(wb->b_more_io.prev);
870 spin_lock(&inode->i_lock);
871 spin_unlock(&wb->list_lock);
872
873 inode_sleep_on_writeback(inode);
874 spin_lock(&wb->list_lock);
875 }
876 }
877 spin_unlock(&wb->list_lock);
878
879 return nr_pages - work->nr_pages;
880}
881
882
883
884
885static struct wb_writeback_work *
886get_next_work_item(struct backing_dev_info *bdi)
887{
888 struct wb_writeback_work *work = NULL;
889
890 spin_lock_bh(&bdi->wb_lock);
891 if (!list_empty(&bdi->work_list)) {
892 work = list_entry(bdi->work_list.next,
893 struct wb_writeback_work, list);
894 list_del_init(&work->list);
895 }
896 spin_unlock_bh(&bdi->wb_lock);
897 return work;
898}
899
900
901
902
903
904static unsigned long get_nr_dirty_pages(void)
905{
906 return global_page_state(NR_FILE_DIRTY) +
907 global_page_state(NR_UNSTABLE_NFS) +
908 get_nr_dirty_inodes();
909}
910
911static long wb_check_background_flush(struct bdi_writeback *wb)
912{
913 if (over_bground_thresh(wb->bdi)) {
914
915 struct wb_writeback_work work = {
916 .nr_pages = LONG_MAX,
917 .sync_mode = WB_SYNC_NONE,
918 .for_background = 1,
919 .range_cyclic = 1,
920 .reason = WB_REASON_BACKGROUND,
921 };
922
923 return wb_writeback(wb, &work);
924 }
925
926 return 0;
927}
928
929static long wb_check_old_data_flush(struct bdi_writeback *wb)
930{
931 unsigned long expired;
932 long nr_pages;
933
934
935
936
937 if (!dirty_writeback_interval)
938 return 0;
939
940 expired = wb->last_old_flush +
941 msecs_to_jiffies(dirty_writeback_interval * 10);
942 if (time_before(jiffies, expired))
943 return 0;
944
945 wb->last_old_flush = jiffies;
946 nr_pages = get_nr_dirty_pages();
947
948 if (nr_pages) {
949 struct wb_writeback_work work = {
950 .nr_pages = nr_pages,
951 .sync_mode = WB_SYNC_NONE,
952 .for_kupdate = 1,
953 .range_cyclic = 1,
954 .reason = WB_REASON_PERIODIC,
955 };
956
957 return wb_writeback(wb, &work);
958 }
959
960 return 0;
961}
962
963
964
965
966static long wb_do_writeback(struct bdi_writeback *wb)
967{
968 struct backing_dev_info *bdi = wb->bdi;
969 struct wb_writeback_work *work;
970 long wrote = 0;
971
972 set_bit(BDI_writeback_running, &wb->bdi->state);
973 while ((work = get_next_work_item(bdi)) != NULL) {
974
975 trace_writeback_exec(bdi, work);
976
977 wrote += wb_writeback(wb, work);
978
979
980
981
982
983 if (work->done)
984 complete(work->done);
985 else
986 kfree(work);
987 }
988
989
990
991
992 wrote += wb_check_old_data_flush(wb);
993 wrote += wb_check_background_flush(wb);
994 clear_bit(BDI_writeback_running, &wb->bdi->state);
995
996 return wrote;
997}
998
999
1000
1001
1002
1003void bdi_writeback_workfn(struct work_struct *work)
1004{
1005 struct bdi_writeback *wb = container_of(to_delayed_work(work),
1006 struct bdi_writeback, dwork);
1007 struct backing_dev_info *bdi = wb->bdi;
1008 long pages_written;
1009
1010 set_worker_desc("flush-%s", dev_name(bdi->dev));
1011 current->flags |= PF_SWAPWRITE;
1012
1013 if (likely(!current_is_workqueue_rescuer() ||
1014 list_empty(&bdi->bdi_list))) {
1015
1016
1017
1018
1019
1020
1021 do {
1022 pages_written = wb_do_writeback(wb);
1023 trace_writeback_pages_written(pages_written);
1024 } while (!list_empty(&bdi->work_list));
1025 } else {
1026
1027
1028
1029
1030
1031 pages_written = writeback_inodes_wb(&bdi->wb, 1024,
1032 WB_REASON_FORKER_THREAD);
1033 trace_writeback_pages_written(pages_written);
1034 }
1035
1036 if (!list_empty(&bdi->work_list) ||
1037 (wb_has_dirty_io(wb) && dirty_writeback_interval))
1038 queue_delayed_work(bdi_wq, &wb->dwork,
1039 msecs_to_jiffies(dirty_writeback_interval * 10));
1040
1041 current->flags &= ~PF_SWAPWRITE;
1042}
1043
1044
1045
1046
1047
1048void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
1049{
1050 struct backing_dev_info *bdi;
1051
1052 if (!nr_pages) {
1053 nr_pages = global_page_state(NR_FILE_DIRTY) +
1054 global_page_state(NR_UNSTABLE_NFS);
1055 }
1056
1057 rcu_read_lock();
1058 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1059 if (!bdi_has_dirty_io(bdi))
1060 continue;
1061 __bdi_start_writeback(bdi, nr_pages, false, reason);
1062 }
1063 rcu_read_unlock();
1064}
1065
1066static noinline void block_dump___mark_inode_dirty(struct inode *inode)
1067{
1068 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
1069 struct dentry *dentry;
1070 const char *name = "?";
1071
1072 dentry = d_find_alias(inode);
1073 if (dentry) {
1074 spin_lock(&dentry->d_lock);
1075 name = (const char *) dentry->d_name.name;
1076 }
1077 printk(KERN_DEBUG
1078 "%s(%d): dirtied inode %lu (%s) on %s\n",
1079 current->comm, task_pid_nr(current), inode->i_ino,
1080 name, inode->i_sb->s_id);
1081 if (dentry) {
1082 spin_unlock(&dentry->d_lock);
1083 dput(dentry);
1084 }
1085 }
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112void __mark_inode_dirty(struct inode *inode, int flags)
1113{
1114 struct super_block *sb = inode->i_sb;
1115 struct backing_dev_info *bdi = NULL;
1116
1117
1118
1119
1120
1121 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
1122 trace_writeback_dirty_inode_start(inode, flags);
1123
1124 if (sb->s_op->dirty_inode)
1125 sb->s_op->dirty_inode(inode, flags);
1126
1127 trace_writeback_dirty_inode(inode, flags);
1128 }
1129
1130
1131
1132
1133
1134 smp_mb();
1135
1136
1137 if ((inode->i_state & flags) == flags)
1138 return;
1139
1140 if (unlikely(block_dump))
1141 block_dump___mark_inode_dirty(inode);
1142
1143 spin_lock(&inode->i_lock);
1144 if ((inode->i_state & flags) != flags) {
1145 const int was_dirty = inode->i_state & I_DIRTY;
1146
1147 inode->i_state |= flags;
1148
1149
1150
1151
1152
1153
1154 if (inode->i_state & I_SYNC)
1155 goto out_unlock_inode;
1156
1157
1158
1159
1160
1161 if (!S_ISBLK(inode->i_mode)) {
1162 if (inode_unhashed(inode))
1163 goto out_unlock_inode;
1164 }
1165 if (inode->i_state & I_FREEING)
1166 goto out_unlock_inode;
1167
1168
1169
1170
1171
1172 if (!was_dirty) {
1173 bool wakeup_bdi = false;
1174 bdi = inode_to_bdi(inode);
1175
1176 if (bdi_cap_writeback_dirty(bdi)) {
1177 WARN(!test_bit(BDI_registered, &bdi->state),
1178 "bdi-%s not registered\n", bdi->name);
1179
1180
1181
1182
1183
1184
1185
1186 if (!wb_has_dirty_io(&bdi->wb))
1187 wakeup_bdi = true;
1188 }
1189
1190 spin_unlock(&inode->i_lock);
1191 spin_lock(&bdi->wb.list_lock);
1192 inode->dirtied_when = jiffies;
1193 list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1194 spin_unlock(&bdi->wb.list_lock);
1195
1196 if (wakeup_bdi)
1197 bdi_wakeup_thread_delayed(bdi);
1198 return;
1199 }
1200 }
1201out_unlock_inode:
1202 spin_unlock(&inode->i_lock);
1203
1204}
1205EXPORT_SYMBOL(__mark_inode_dirty);
1206
1207static void wait_sb_inodes(struct super_block *sb)
1208{
1209 struct inode *inode, *old_inode = NULL;
1210
1211
1212
1213
1214
1215 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1216
1217 spin_lock(&inode_sb_list_lock);
1218
1219
1220
1221
1222
1223
1224
1225
1226 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1227 struct address_space *mapping = inode->i_mapping;
1228
1229 spin_lock(&inode->i_lock);
1230 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
1231 (mapping->nrpages == 0)) {
1232 spin_unlock(&inode->i_lock);
1233 continue;
1234 }
1235 __iget(inode);
1236 spin_unlock(&inode->i_lock);
1237 spin_unlock(&inode_sb_list_lock);
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247 iput(old_inode);
1248 old_inode = inode;
1249
1250 filemap_fdatawait(mapping);
1251
1252 cond_resched();
1253
1254 spin_lock(&inode_sb_list_lock);
1255 }
1256 spin_unlock(&inode_sb_list_lock);
1257 iput(old_inode);
1258}
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270void writeback_inodes_sb_nr(struct super_block *sb,
1271 unsigned long nr,
1272 enum wb_reason reason)
1273{
1274 DECLARE_COMPLETION_ONSTACK(done);
1275 struct wb_writeback_work work = {
1276 .sb = sb,
1277 .sync_mode = WB_SYNC_NONE,
1278 .tagged_writepages = 1,
1279 .done = &done,
1280 .nr_pages = nr,
1281 .reason = reason,
1282 };
1283
1284 if (sb->s_bdi == &noop_backing_dev_info)
1285 return;
1286 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1287 bdi_queue_work(sb->s_bdi, &work);
1288 wait_for_completion(&done);
1289}
1290EXPORT_SYMBOL(writeback_inodes_sb_nr);
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1302{
1303 return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1304}
1305EXPORT_SYMBOL(writeback_inodes_sb);
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316int try_to_writeback_inodes_sb_nr(struct super_block *sb,
1317 unsigned long nr,
1318 enum wb_reason reason)
1319{
1320 if (writeback_in_progress(sb->s_bdi))
1321 return 1;
1322
1323 if (!down_read_trylock(&sb->s_umount))
1324 return 0;
1325
1326 writeback_inodes_sb_nr(sb, nr, reason);
1327 up_read(&sb->s_umount);
1328 return 1;
1329}
1330EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1341{
1342 return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1343}
1344EXPORT_SYMBOL(try_to_writeback_inodes_sb);
1345
1346
1347
1348
1349
1350
1351
1352
1353void sync_inodes_sb(struct super_block *sb)
1354{
1355 DECLARE_COMPLETION_ONSTACK(done);
1356 struct wb_writeback_work work = {
1357 .sb = sb,
1358 .sync_mode = WB_SYNC_ALL,
1359 .nr_pages = LONG_MAX,
1360 .range_cyclic = 0,
1361 .done = &done,
1362 .reason = WB_REASON_SYNC,
1363 .for_sync = 1,
1364 };
1365
1366
1367 if (sb->s_bdi == &noop_backing_dev_info)
1368 return;
1369 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1370
1371 bdi_queue_work(sb->s_bdi, &work);
1372 wait_for_completion(&done);
1373
1374 wait_sb_inodes(sb);
1375}
1376EXPORT_SYMBOL(sync_inodes_sb);
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388int write_inode_now(struct inode *inode, int sync)
1389{
1390 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1391 struct writeback_control wbc = {
1392 .nr_to_write = LONG_MAX,
1393 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1394 .range_start = 0,
1395 .range_end = LLONG_MAX,
1396 };
1397
1398 if (!mapping_cap_writeback_dirty(inode->i_mapping))
1399 wbc.nr_to_write = 0;
1400
1401 might_sleep();
1402 return writeback_single_inode(inode, wb, &wbc);
1403}
1404EXPORT_SYMBOL(write_inode_now);
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417int sync_inode(struct inode *inode, struct writeback_control *wbc)
1418{
1419 return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
1420}
1421EXPORT_SYMBOL(sync_inode);
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432int sync_inode_metadata(struct inode *inode, int wait)
1433{
1434 struct writeback_control wbc = {
1435 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
1436 .nr_to_write = 0,
1437 };
1438
1439 return sync_inode(inode, &wbc);
1440}
1441EXPORT_SYMBOL(sync_inode_metadata);
1442