1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/spinlock.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/mm.h>
23#include <linux/pagemap.h>
24#include <linux/kthread.h>
25#include <linux/writeback.h>
26#include <linux/blkdev.h>
27#include <linux/backing-dev.h>
28#include <linux/tracepoint.h>
29#include <linux/device.h>
30#include "internal.h"
31
32
33
34
35#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
36
37
38
39
40struct wb_writeback_work {
41 long nr_pages;
42 struct super_block *sb;
43 unsigned long *older_than_this;
44 enum writeback_sync_modes sync_mode;
45 unsigned int tagged_writepages:1;
46 unsigned int for_kupdate:1;
47 unsigned int range_cyclic:1;
48 unsigned int for_background:1;
49 unsigned int for_sync:1;
50 enum wb_reason reason;
51
52 struct list_head list;
53 struct completion *done;
54};
55
56
57
58
59
60
61
62
63int writeback_in_progress(struct backing_dev_info *bdi)
64{
65 return test_bit(BDI_writeback_running, &bdi->state);
66}
67EXPORT_SYMBOL(writeback_in_progress);
68
69static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
70{
71 struct super_block *sb = inode->i_sb;
72
73 if (sb_is_blkdev_sb(sb))
74 return inode->i_mapping->backing_dev_info;
75
76 return sb->s_bdi;
77}
78
79static inline struct inode *wb_inode(struct list_head *head)
80{
81 return list_entry(head, struct inode, i_wb_list);
82}
83
84
85
86
87
88
89#define CREATE_TRACE_POINTS
90#include <trace/events/writeback.h>
91
92static void bdi_queue_work(struct backing_dev_info *bdi,
93 struct wb_writeback_work *work)
94{
95 trace_writeback_queue(bdi, work);
96
97 spin_lock_bh(&bdi->wb_lock);
98 list_add_tail(&work->list, &bdi->work_list);
99 spin_unlock_bh(&bdi->wb_lock);
100
101 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
102}
103
104static void
105__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
106 bool range_cyclic, enum wb_reason reason)
107{
108 struct wb_writeback_work *work;
109
110
111
112
113
114 work = kzalloc(sizeof(*work), GFP_ATOMIC);
115 if (!work) {
116 trace_writeback_nowork(bdi);
117 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
118 return;
119 }
120
121 work->sync_mode = WB_SYNC_NONE;
122 work->nr_pages = nr_pages;
123 work->range_cyclic = range_cyclic;
124 work->reason = reason;
125
126 bdi_queue_work(bdi, work);
127}
128
129
130
131
132
133
134
135
136
137
138
139
140
141void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
142 enum wb_reason reason)
143{
144 __bdi_start_writeback(bdi, nr_pages, true, reason);
145}
146
147
148
149
150
151
152
153
154
155
156
157void bdi_start_background_writeback(struct backing_dev_info *bdi)
158{
159
160
161
162
163 trace_writeback_wake_background(bdi);
164 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
165}
166
167
168
169
170void inode_wb_list_del(struct inode *inode)
171{
172 struct backing_dev_info *bdi = inode_to_bdi(inode);
173
174 spin_lock(&bdi->wb.list_lock);
175 list_del_init(&inode->i_wb_list);
176 spin_unlock(&bdi->wb.list_lock);
177}
178
179
180
181
182
183
184
185
186
187
188static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
189{
190 assert_spin_locked(&wb->list_lock);
191 if (!list_empty(&wb->b_dirty)) {
192 struct inode *tail;
193
194 tail = wb_inode(wb->b_dirty.next);
195 if (time_before(inode->dirtied_when, tail->dirtied_when))
196 inode->dirtied_when = jiffies;
197 }
198 list_move(&inode->i_wb_list, &wb->b_dirty);
199}
200
201
202
203
204static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
205{
206 assert_spin_locked(&wb->list_lock);
207 list_move(&inode->i_wb_list, &wb->b_more_io);
208}
209
210static void inode_sync_complete(struct inode *inode)
211{
212 inode->i_state &= ~I_SYNC;
213
214 inode_add_lru(inode);
215
216 smp_mb();
217 wake_up_bit(&inode->i_state, __I_SYNC);
218}
219
220static bool inode_dirtied_after(struct inode *inode, unsigned long t)
221{
222 bool ret = time_after(inode->dirtied_when, t);
223#ifndef CONFIG_64BIT
224
225
226
227
228
229
230 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
231#endif
232 return ret;
233}
234
235
236
237
238
239static int move_expired_inodes(struct list_head *delaying_queue,
240 struct list_head *dispatch_queue,
241 struct wb_writeback_work *work)
242{
243 LIST_HEAD(tmp);
244 struct list_head *pos, *node;
245 struct super_block *sb = NULL;
246 struct inode *inode;
247 int do_sb_sort = 0;
248 int moved = 0;
249
250 while (!list_empty(delaying_queue)) {
251 inode = wb_inode(delaying_queue->prev);
252 if (work->older_than_this &&
253 inode_dirtied_after(inode, *work->older_than_this))
254 break;
255 list_move(&inode->i_wb_list, &tmp);
256 moved++;
257 if (sb_is_blkdev_sb(inode->i_sb))
258 continue;
259 if (sb && sb != inode->i_sb)
260 do_sb_sort = 1;
261 sb = inode->i_sb;
262 }
263
264
265 if (!do_sb_sort) {
266 list_splice(&tmp, dispatch_queue);
267 goto out;
268 }
269
270
271 while (!list_empty(&tmp)) {
272 sb = wb_inode(tmp.prev)->i_sb;
273 list_for_each_prev_safe(pos, node, &tmp) {
274 inode = wb_inode(pos);
275 if (inode->i_sb == sb)
276 list_move(&inode->i_wb_list, dispatch_queue);
277 }
278 }
279out:
280 return moved;
281}
282
283
284
285
286
287
288
289
290
291
292
293
294static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
295{
296 int moved;
297 assert_spin_locked(&wb->list_lock);
298 list_splice_init(&wb->b_more_io, &wb->b_io);
299 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
300 trace_writeback_queue_io(wb, work, moved);
301}
302
303static int write_inode(struct inode *inode, struct writeback_control *wbc)
304{
305 int ret;
306
307 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
308 trace_writeback_write_inode_start(inode, wbc);
309 ret = inode->i_sb->s_op->write_inode(inode, wbc);
310 trace_writeback_write_inode(inode, wbc);
311 return ret;
312 }
313 return 0;
314}
315
316
317
318
319
320static void __inode_wait_for_writeback(struct inode *inode)
321 __releases(inode->i_lock)
322 __acquires(inode->i_lock)
323{
324 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
325 wait_queue_head_t *wqh;
326
327 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
328 while (inode->i_state & I_SYNC) {
329 spin_unlock(&inode->i_lock);
330 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
331 spin_lock(&inode->i_lock);
332 }
333}
334
335
336
337
338void inode_wait_for_writeback(struct inode *inode)
339{
340 spin_lock(&inode->i_lock);
341 __inode_wait_for_writeback(inode);
342 spin_unlock(&inode->i_lock);
343}
344
345
346
347
348
349
350static void inode_sleep_on_writeback(struct inode *inode)
351 __releases(inode->i_lock)
352{
353 DEFINE_WAIT(wait);
354 wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
355 int sleep;
356
357 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
358 sleep = inode->i_state & I_SYNC;
359 spin_unlock(&inode->i_lock);
360 if (sleep)
361 schedule();
362 finish_wait(wqh, &wait);
363}
364
365
366
367
368
369
370
371
372
373static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
374 struct writeback_control *wbc)
375{
376 if (inode->i_state & I_FREEING)
377 return;
378
379
380
381
382
383
384 if ((inode->i_state & I_DIRTY) &&
385 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
386 inode->dirtied_when = jiffies;
387
388 if (wbc->pages_skipped) {
389
390
391
392
393 redirty_tail(inode, wb);
394 return;
395 }
396
397 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
398
399
400
401
402 if (wbc->nr_to_write <= 0) {
403
404 requeue_io(inode, wb);
405 } else {
406
407
408
409
410
411
412
413 redirty_tail(inode, wb);
414 }
415 } else if (inode->i_state & I_DIRTY) {
416
417
418
419
420
421 redirty_tail(inode, wb);
422 } else {
423
424 list_del_init(&inode->i_wb_list);
425 }
426}
427
428
429
430
431
432
433static int
434__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
435{
436 struct address_space *mapping = inode->i_mapping;
437 long nr_to_write = wbc->nr_to_write;
438 unsigned dirty;
439 int ret;
440
441 WARN_ON(!(inode->i_state & I_SYNC));
442
443 trace_writeback_single_inode_start(inode, wbc, nr_to_write);
444
445 ret = do_writepages(mapping, wbc);
446
447
448
449
450
451
452
453
454 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
455 int err = filemap_fdatawait(mapping);
456 if (ret == 0)
457 ret = err;
458 }
459
460
461
462
463
464
465 spin_lock(&inode->i_lock);
466
467 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
468 inode->i_state &= ~I_DIRTY_PAGES;
469 dirty = inode->i_state & I_DIRTY;
470 inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
471 spin_unlock(&inode->i_lock);
472
473 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
474 int err = write_inode(inode, wbc);
475 if (ret == 0)
476 ret = err;
477 }
478 trace_writeback_single_inode(inode, wbc, nr_to_write);
479 return ret;
480}
481
482
483
484
485
486
487
488
489
490static int
491writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
492 struct writeback_control *wbc)
493{
494 int ret = 0;
495
496 spin_lock(&inode->i_lock);
497 if (!atomic_read(&inode->i_count))
498 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
499 else
500 WARN_ON(inode->i_state & I_WILL_FREE);
501
502 if (inode->i_state & I_SYNC) {
503 if (wbc->sync_mode != WB_SYNC_ALL)
504 goto out;
505
506
507
508
509
510 __inode_wait_for_writeback(inode);
511 }
512 WARN_ON(inode->i_state & I_SYNC);
513
514
515
516
517
518
519
520
521 if (!(inode->i_state & I_DIRTY) &&
522 (wbc->sync_mode != WB_SYNC_ALL ||
523 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
524 goto out;
525 inode->i_state |= I_SYNC;
526 spin_unlock(&inode->i_lock);
527
528 ret = __writeback_single_inode(inode, wbc);
529
530 spin_lock(&wb->list_lock);
531 spin_lock(&inode->i_lock);
532
533
534
535
536 if (!(inode->i_state & I_DIRTY))
537 list_del_init(&inode->i_wb_list);
538 spin_unlock(&wb->list_lock);
539 inode_sync_complete(inode);
540out:
541 spin_unlock(&inode->i_lock);
542 return ret;
543}
544
545static long writeback_chunk_size(struct backing_dev_info *bdi,
546 struct wb_writeback_work *work)
547{
548 long pages;
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
564 pages = LONG_MAX;
565 else {
566 pages = min(bdi->avg_write_bandwidth / 2,
567 global_dirty_limit / DIRTY_SCOPE);
568 pages = min(pages, work->nr_pages);
569 pages = round_down(pages + MIN_WRITEBACK_PAGES,
570 MIN_WRITEBACK_PAGES);
571 }
572
573 return pages;
574}
575
576
577
578
579
580
581static long writeback_sb_inodes(struct super_block *sb,
582 struct bdi_writeback *wb,
583 struct wb_writeback_work *work)
584{
585 struct writeback_control wbc = {
586 .sync_mode = work->sync_mode,
587 .tagged_writepages = work->tagged_writepages,
588 .for_kupdate = work->for_kupdate,
589 .for_background = work->for_background,
590 .for_sync = work->for_sync,
591 .range_cyclic = work->range_cyclic,
592 .range_start = 0,
593 .range_end = LLONG_MAX,
594 };
595 unsigned long start_time = jiffies;
596 long write_chunk;
597 long wrote = 0;
598
599 while (!list_empty(&wb->b_io)) {
600 struct inode *inode = wb_inode(wb->b_io.prev);
601
602 if (inode->i_sb != sb) {
603 if (work->sb) {
604
605
606
607
608
609 redirty_tail(inode, wb);
610 continue;
611 }
612
613
614
615
616
617
618 break;
619 }
620
621
622
623
624
625
626 spin_lock(&inode->i_lock);
627 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
628 spin_unlock(&inode->i_lock);
629 redirty_tail(inode, wb);
630 continue;
631 }
632 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
633
634
635
636
637
638
639
640
641
642 spin_unlock(&inode->i_lock);
643 requeue_io(inode, wb);
644 trace_writeback_sb_inodes_requeue(inode);
645 continue;
646 }
647 spin_unlock(&wb->list_lock);
648
649
650
651
652
653
654 if (inode->i_state & I_SYNC) {
655
656 inode_sleep_on_writeback(inode);
657
658 spin_lock(&wb->list_lock);
659 continue;
660 }
661 inode->i_state |= I_SYNC;
662 spin_unlock(&inode->i_lock);
663
664 write_chunk = writeback_chunk_size(wb->bdi, work);
665 wbc.nr_to_write = write_chunk;
666 wbc.pages_skipped = 0;
667
668
669
670
671
672 __writeback_single_inode(inode, &wbc);
673
674 work->nr_pages -= write_chunk - wbc.nr_to_write;
675 wrote += write_chunk - wbc.nr_to_write;
676 spin_lock(&wb->list_lock);
677 spin_lock(&inode->i_lock);
678 if (!(inode->i_state & I_DIRTY))
679 wrote++;
680 requeue_inode(inode, wb, &wbc);
681 inode_sync_complete(inode);
682 spin_unlock(&inode->i_lock);
683 cond_resched_lock(&wb->list_lock);
684
685
686
687
688 if (wrote) {
689 if (time_is_before_jiffies(start_time + HZ / 10UL))
690 break;
691 if (work->nr_pages <= 0)
692 break;
693 }
694 }
695 return wrote;
696}
697
698static long __writeback_inodes_wb(struct bdi_writeback *wb,
699 struct wb_writeback_work *work)
700{
701 unsigned long start_time = jiffies;
702 long wrote = 0;
703
704 while (!list_empty(&wb->b_io)) {
705 struct inode *inode = wb_inode(wb->b_io.prev);
706 struct super_block *sb = inode->i_sb;
707
708 if (!grab_super_passive(sb)) {
709
710
711
712
713
714 redirty_tail(inode, wb);
715 continue;
716 }
717 wrote += writeback_sb_inodes(sb, wb, work);
718 drop_super(sb);
719
720
721 if (wrote) {
722 if (time_is_before_jiffies(start_time + HZ / 10UL))
723 break;
724 if (work->nr_pages <= 0)
725 break;
726 }
727 }
728
729 return wrote;
730}
731
732static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
733 enum wb_reason reason)
734{
735 struct wb_writeback_work work = {
736 .nr_pages = nr_pages,
737 .sync_mode = WB_SYNC_NONE,
738 .range_cyclic = 1,
739 .reason = reason,
740 };
741
742 spin_lock(&wb->list_lock);
743 if (list_empty(&wb->b_io))
744 queue_io(wb, &work);
745 __writeback_inodes_wb(wb, &work);
746 spin_unlock(&wb->list_lock);
747
748 return nr_pages - work.nr_pages;
749}
750
751static bool over_bground_thresh(struct backing_dev_info *bdi)
752{
753 unsigned long background_thresh, dirty_thresh;
754
755 global_dirty_limits(&background_thresh, &dirty_thresh);
756
757 if (global_page_state(NR_FILE_DIRTY) +
758 global_page_state(NR_UNSTABLE_NFS) > background_thresh)
759 return true;
760
761 if (bdi_stat(bdi, BDI_RECLAIMABLE) >
762 bdi_dirty_limit(bdi, background_thresh))
763 return true;
764
765 return false;
766}
767
768
769
770
771
772static void wb_update_bandwidth(struct bdi_writeback *wb,
773 unsigned long start_time)
774{
775 __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
776}
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793static long wb_writeback(struct bdi_writeback *wb,
794 struct wb_writeback_work *work)
795{
796 unsigned long wb_start = jiffies;
797 long nr_pages = work->nr_pages;
798 unsigned long oldest_jif;
799 struct inode *inode;
800 long progress;
801
802 oldest_jif = jiffies;
803 work->older_than_this = &oldest_jif;
804
805 spin_lock(&wb->list_lock);
806 for (;;) {
807
808
809
810 if (work->nr_pages <= 0)
811 break;
812
813
814
815
816
817
818
819 if ((work->for_background || work->for_kupdate) &&
820 !list_empty(&wb->bdi->work_list))
821 break;
822
823
824
825
826
827 if (work->for_background && !over_bground_thresh(wb->bdi))
828 break;
829
830
831
832
833
834
835
836 if (work->for_kupdate) {
837 oldest_jif = jiffies -
838 msecs_to_jiffies(dirty_expire_interval * 10);
839 } else if (work->for_background)
840 oldest_jif = jiffies;
841
842 trace_writeback_start(wb->bdi, work);
843 if (list_empty(&wb->b_io))
844 queue_io(wb, work);
845 if (work->sb)
846 progress = writeback_sb_inodes(work->sb, wb, work);
847 else
848 progress = __writeback_inodes_wb(wb, work);
849 trace_writeback_written(wb->bdi, work);
850
851 wb_update_bandwidth(wb, wb_start);
852
853
854
855
856
857
858
859
860
861 if (progress)
862 continue;
863
864
865
866 if (list_empty(&wb->b_more_io))
867 break;
868
869
870
871
872
873 if (!list_empty(&wb->b_more_io)) {
874 trace_writeback_wait(wb->bdi, work);
875 inode = wb_inode(wb->b_more_io.prev);
876 spin_lock(&inode->i_lock);
877 spin_unlock(&wb->list_lock);
878
879 inode_sleep_on_writeback(inode);
880 spin_lock(&wb->list_lock);
881 }
882 }
883 spin_unlock(&wb->list_lock);
884
885 return nr_pages - work->nr_pages;
886}
887
888
889
890
891static struct wb_writeback_work *
892get_next_work_item(struct backing_dev_info *bdi)
893{
894 struct wb_writeback_work *work = NULL;
895
896 spin_lock_bh(&bdi->wb_lock);
897 if (!list_empty(&bdi->work_list)) {
898 work = list_entry(bdi->work_list.next,
899 struct wb_writeback_work, list);
900 list_del_init(&work->list);
901 }
902 spin_unlock_bh(&bdi->wb_lock);
903 return work;
904}
905
906
907
908
909
910static unsigned long get_nr_dirty_pages(void)
911{
912 return global_page_state(NR_FILE_DIRTY) +
913 global_page_state(NR_UNSTABLE_NFS) +
914 get_nr_dirty_inodes();
915}
916
917static long wb_check_background_flush(struct bdi_writeback *wb)
918{
919 if (over_bground_thresh(wb->bdi)) {
920
921 struct wb_writeback_work work = {
922 .nr_pages = LONG_MAX,
923 .sync_mode = WB_SYNC_NONE,
924 .for_background = 1,
925 .range_cyclic = 1,
926 .reason = WB_REASON_BACKGROUND,
927 };
928
929 return wb_writeback(wb, &work);
930 }
931
932 return 0;
933}
934
935static long wb_check_old_data_flush(struct bdi_writeback *wb)
936{
937 unsigned long expired;
938 long nr_pages;
939
940
941
942
943 if (!dirty_writeback_interval)
944 return 0;
945
946 expired = wb->last_old_flush +
947 msecs_to_jiffies(dirty_writeback_interval * 10);
948 if (time_before(jiffies, expired))
949 return 0;
950
951 wb->last_old_flush = jiffies;
952 nr_pages = get_nr_dirty_pages();
953
954 if (nr_pages) {
955 struct wb_writeback_work work = {
956 .nr_pages = nr_pages,
957 .sync_mode = WB_SYNC_NONE,
958 .for_kupdate = 1,
959 .range_cyclic = 1,
960 .reason = WB_REASON_PERIODIC,
961 };
962
963 return wb_writeback(wb, &work);
964 }
965
966 return 0;
967}
968
969
970
971
972static long wb_do_writeback(struct bdi_writeback *wb)
973{
974 struct backing_dev_info *bdi = wb->bdi;
975 struct wb_writeback_work *work;
976 long wrote = 0;
977
978 set_bit(BDI_writeback_running, &wb->bdi->state);
979 while ((work = get_next_work_item(bdi)) != NULL) {
980
981 trace_writeback_exec(bdi, work);
982
983 wrote += wb_writeback(wb, work);
984
985
986
987
988
989 if (work->done)
990 complete(work->done);
991 else
992 kfree(work);
993 }
994
995
996
997
998 wrote += wb_check_old_data_flush(wb);
999 wrote += wb_check_background_flush(wb);
1000 clear_bit(BDI_writeback_running, &wb->bdi->state);
1001
1002 return wrote;
1003}
1004
1005
1006
1007
1008
1009void bdi_writeback_workfn(struct work_struct *work)
1010{
1011 struct bdi_writeback *wb = container_of(to_delayed_work(work),
1012 struct bdi_writeback, dwork);
1013 struct backing_dev_info *bdi = wb->bdi;
1014 long pages_written;
1015
1016 set_worker_desc("flush-%s", dev_name(bdi->dev));
1017 current->flags |= PF_SWAPWRITE;
1018
1019 if (likely(!current_is_workqueue_rescuer() ||
1020 list_empty(&bdi->bdi_list))) {
1021
1022
1023
1024
1025
1026
1027 do {
1028 pages_written = wb_do_writeback(wb);
1029 trace_writeback_pages_written(pages_written);
1030 } while (!list_empty(&bdi->work_list));
1031 } else {
1032
1033
1034
1035
1036
1037 pages_written = writeback_inodes_wb(&bdi->wb, 1024,
1038 WB_REASON_FORKER_THREAD);
1039 trace_writeback_pages_written(pages_written);
1040 }
1041
1042 if (!list_empty(&bdi->work_list) ||
1043 (wb_has_dirty_io(wb) && dirty_writeback_interval))
1044 queue_delayed_work(bdi_wq, &wb->dwork,
1045 msecs_to_jiffies(dirty_writeback_interval * 10));
1046
1047 current->flags &= ~PF_SWAPWRITE;
1048}
1049
1050
1051
1052
1053
1054void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
1055{
1056 struct backing_dev_info *bdi;
1057
1058 if (!nr_pages)
1059 nr_pages = get_nr_dirty_pages();
1060
1061 rcu_read_lock();
1062 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1063 if (!bdi_has_dirty_io(bdi))
1064 continue;
1065 __bdi_start_writeback(bdi, nr_pages, false, reason);
1066 }
1067 rcu_read_unlock();
1068}
1069
1070static noinline void block_dump___mark_inode_dirty(struct inode *inode)
1071{
1072 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
1073 struct dentry *dentry;
1074 const char *name = "?";
1075
1076 dentry = d_find_alias(inode);
1077 if (dentry) {
1078 spin_lock(&dentry->d_lock);
1079 name = (const char *) dentry->d_name.name;
1080 }
1081 printk(KERN_DEBUG
1082 "%s(%d): dirtied inode %lu (%s) on %s\n",
1083 current->comm, task_pid_nr(current), inode->i_ino,
1084 name, inode->i_sb->s_id);
1085 if (dentry) {
1086 spin_unlock(&dentry->d_lock);
1087 dput(dentry);
1088 }
1089 }
1090}
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116void __mark_inode_dirty(struct inode *inode, int flags)
1117{
1118 struct super_block *sb = inode->i_sb;
1119 struct backing_dev_info *bdi = NULL;
1120
1121
1122
1123
1124
1125 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
1126 trace_writeback_dirty_inode_start(inode, flags);
1127
1128 if (sb->s_op->dirty_inode)
1129 sb->s_op->dirty_inode(inode, flags);
1130
1131 trace_writeback_dirty_inode(inode, flags);
1132 }
1133
1134
1135
1136
1137
1138 smp_mb();
1139
1140
1141 if ((inode->i_state & flags) == flags)
1142 return;
1143
1144 if (unlikely(block_dump))
1145 block_dump___mark_inode_dirty(inode);
1146
1147 spin_lock(&inode->i_lock);
1148 if ((inode->i_state & flags) != flags) {
1149 const int was_dirty = inode->i_state & I_DIRTY;
1150
1151 inode->i_state |= flags;
1152
1153
1154
1155
1156
1157
1158 if (inode->i_state & I_SYNC)
1159 goto out_unlock_inode;
1160
1161
1162
1163
1164
1165 if (!S_ISBLK(inode->i_mode)) {
1166 if (inode_unhashed(inode))
1167 goto out_unlock_inode;
1168 }
1169 if (inode->i_state & I_FREEING)
1170 goto out_unlock_inode;
1171
1172
1173
1174
1175
1176 if (!was_dirty) {
1177 bool wakeup_bdi = false;
1178 bdi = inode_to_bdi(inode);
1179
1180 spin_unlock(&inode->i_lock);
1181 spin_lock(&bdi->wb.list_lock);
1182 if (bdi_cap_writeback_dirty(bdi)) {
1183 WARN(!test_bit(BDI_registered, &bdi->state),
1184 "bdi-%s not registered\n", bdi->name);
1185
1186
1187
1188
1189
1190
1191
1192 if (!wb_has_dirty_io(&bdi->wb))
1193 wakeup_bdi = true;
1194 }
1195
1196 inode->dirtied_when = jiffies;
1197 list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1198 spin_unlock(&bdi->wb.list_lock);
1199
1200 if (wakeup_bdi)
1201 bdi_wakeup_thread_delayed(bdi);
1202 return;
1203 }
1204 }
1205out_unlock_inode:
1206 spin_unlock(&inode->i_lock);
1207
1208}
1209EXPORT_SYMBOL(__mark_inode_dirty);
1210
1211static void wait_sb_inodes(struct super_block *sb)
1212{
1213 struct inode *inode, *old_inode = NULL;
1214
1215
1216
1217
1218
1219 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1220
1221 spin_lock(&inode_sb_list_lock);
1222
1223
1224
1225
1226
1227
1228
1229
1230 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1231 struct address_space *mapping = inode->i_mapping;
1232
1233 spin_lock(&inode->i_lock);
1234 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
1235 (mapping->nrpages == 0)) {
1236 spin_unlock(&inode->i_lock);
1237 continue;
1238 }
1239 __iget(inode);
1240 spin_unlock(&inode->i_lock);
1241 spin_unlock(&inode_sb_list_lock);
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251 iput(old_inode);
1252 old_inode = inode;
1253
1254 filemap_fdatawait(mapping);
1255
1256 cond_resched();
1257
1258 spin_lock(&inode_sb_list_lock);
1259 }
1260 spin_unlock(&inode_sb_list_lock);
1261 iput(old_inode);
1262}
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274void writeback_inodes_sb_nr(struct super_block *sb,
1275 unsigned long nr,
1276 enum wb_reason reason)
1277{
1278 DECLARE_COMPLETION_ONSTACK(done);
1279 struct wb_writeback_work work = {
1280 .sb = sb,
1281 .sync_mode = WB_SYNC_NONE,
1282 .tagged_writepages = 1,
1283 .done = &done,
1284 .nr_pages = nr,
1285 .reason = reason,
1286 };
1287
1288 if (sb->s_bdi == &noop_backing_dev_info)
1289 return;
1290 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1291 bdi_queue_work(sb->s_bdi, &work);
1292 wait_for_completion(&done);
1293}
1294EXPORT_SYMBOL(writeback_inodes_sb_nr);
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1306{
1307 return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1308}
1309EXPORT_SYMBOL(writeback_inodes_sb);
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320int try_to_writeback_inodes_sb_nr(struct super_block *sb,
1321 unsigned long nr,
1322 enum wb_reason reason)
1323{
1324 if (writeback_in_progress(sb->s_bdi))
1325 return 1;
1326
1327 if (!down_read_trylock(&sb->s_umount))
1328 return 0;
1329
1330 writeback_inodes_sb_nr(sb, nr, reason);
1331 up_read(&sb->s_umount);
1332 return 1;
1333}
1334EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1345{
1346 return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1347}
1348EXPORT_SYMBOL(try_to_writeback_inodes_sb);
1349
1350
1351
1352
1353
1354
1355
1356
1357void sync_inodes_sb(struct super_block *sb)
1358{
1359 DECLARE_COMPLETION_ONSTACK(done);
1360 struct wb_writeback_work work = {
1361 .sb = sb,
1362 .sync_mode = WB_SYNC_ALL,
1363 .nr_pages = LONG_MAX,
1364 .range_cyclic = 0,
1365 .done = &done,
1366 .reason = WB_REASON_SYNC,
1367 .for_sync = 1,
1368 };
1369
1370
1371 if (sb->s_bdi == &noop_backing_dev_info)
1372 return;
1373 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1374
1375 bdi_queue_work(sb->s_bdi, &work);
1376 wait_for_completion(&done);
1377
1378 wait_sb_inodes(sb);
1379}
1380EXPORT_SYMBOL(sync_inodes_sb);
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392int write_inode_now(struct inode *inode, int sync)
1393{
1394 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1395 struct writeback_control wbc = {
1396 .nr_to_write = LONG_MAX,
1397 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1398 .range_start = 0,
1399 .range_end = LLONG_MAX,
1400 };
1401
1402 if (!mapping_cap_writeback_dirty(inode->i_mapping))
1403 wbc.nr_to_write = 0;
1404
1405 might_sleep();
1406 return writeback_single_inode(inode, wb, &wbc);
1407}
1408EXPORT_SYMBOL(write_inode_now);
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421int sync_inode(struct inode *inode, struct writeback_control *wbc)
1422{
1423 return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
1424}
1425EXPORT_SYMBOL(sync_inode);
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436int sync_inode_metadata(struct inode *inode, int wait)
1437{
1438 struct writeback_control wbc = {
1439 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
1440 .nr_to_write = 0,
1441 };
1442
1443 return sync_inode(inode, &wbc);
1444}
1445EXPORT_SYMBOL(sync_inode_metadata);
1446