1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/spinlock.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/mm.h>
23#include <linux/pagemap.h>
24#include <linux/kthread.h>
25#include <linux/writeback.h>
26#include <linux/blkdev.h>
27#include <linux/backing-dev.h>
28#include <linux/tracepoint.h>
29#include <linux/device.h>
30#include "internal.h"
31
32
33
34
35#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
36
37
38
39
40struct wb_writeback_work {
41 long nr_pages;
42 struct super_block *sb;
43 unsigned long *older_than_this;
44 enum writeback_sync_modes sync_mode;
45 unsigned int tagged_writepages:1;
46 unsigned int for_kupdate:1;
47 unsigned int range_cyclic:1;
48 unsigned int for_background:1;
49 unsigned int for_sync:1;
50 enum wb_reason reason;
51
52 struct list_head list;
53 struct completion *done;
54};
55
56
57
58
59
60
61
62
63int writeback_in_progress(struct backing_dev_info *bdi)
64{
65 return test_bit(BDI_writeback_running, &bdi->state);
66}
67EXPORT_SYMBOL(writeback_in_progress);
68
69static inline struct inode *wb_inode(struct list_head *head)
70{
71 return list_entry(head, struct inode, i_wb_list);
72}
73
74
75
76
77
78
79#define CREATE_TRACE_POINTS
80#include <trace/events/writeback.h>
81
82EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);
83
84static void bdi_wakeup_thread(struct backing_dev_info *bdi)
85{
86 spin_lock_bh(&bdi->wb_lock);
87 if (test_bit(BDI_registered, &bdi->state))
88 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
89 spin_unlock_bh(&bdi->wb_lock);
90}
91
92static void bdi_queue_work(struct backing_dev_info *bdi,
93 struct wb_writeback_work *work)
94{
95 trace_writeback_queue(bdi, work);
96
97 spin_lock_bh(&bdi->wb_lock);
98 if (!test_bit(BDI_registered, &bdi->state)) {
99 if (work->done)
100 complete(work->done);
101 goto out_unlock;
102 }
103 list_add_tail(&work->list, &bdi->work_list);
104 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
105out_unlock:
106 spin_unlock_bh(&bdi->wb_lock);
107}
108
109static void
110__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
111 bool range_cyclic, enum wb_reason reason)
112{
113 struct wb_writeback_work *work;
114
115
116
117
118
119 work = kzalloc(sizeof(*work), GFP_ATOMIC);
120 if (!work) {
121 trace_writeback_nowork(bdi);
122 bdi_wakeup_thread(bdi);
123 return;
124 }
125
126 work->sync_mode = WB_SYNC_NONE;
127 work->nr_pages = nr_pages;
128 work->range_cyclic = range_cyclic;
129 work->reason = reason;
130
131 bdi_queue_work(bdi, work);
132}
133
134
135
136
137
138
139
140
141
142
143
144
145
146void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
147 enum wb_reason reason)
148{
149 __bdi_start_writeback(bdi, nr_pages, true, reason);
150}
151
152
153
154
155
156
157
158
159
160
161
162void bdi_start_background_writeback(struct backing_dev_info *bdi)
163{
164
165
166
167
168 trace_writeback_wake_background(bdi);
169 bdi_wakeup_thread(bdi);
170}
171
172
173
174
175void inode_wb_list_del(struct inode *inode)
176{
177 struct backing_dev_info *bdi = inode_to_bdi(inode);
178
179 spin_lock(&bdi->wb.list_lock);
180 list_del_init(&inode->i_wb_list);
181 spin_unlock(&bdi->wb.list_lock);
182}
183
184
185
186
187
188
189
190
191
192
193static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
194{
195 assert_spin_locked(&wb->list_lock);
196 if (!list_empty(&wb->b_dirty)) {
197 struct inode *tail;
198
199 tail = wb_inode(wb->b_dirty.next);
200 if (time_before(inode->dirtied_when, tail->dirtied_when))
201 inode->dirtied_when = jiffies;
202 }
203 list_move(&inode->i_wb_list, &wb->b_dirty);
204}
205
206
207
208
209static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
210{
211 assert_spin_locked(&wb->list_lock);
212 list_move(&inode->i_wb_list, &wb->b_more_io);
213}
214
215static void inode_sync_complete(struct inode *inode)
216{
217 inode->i_state &= ~I_SYNC;
218
219 inode_add_lru(inode);
220
221 smp_mb();
222 wake_up_bit(&inode->i_state, __I_SYNC);
223}
224
225static bool inode_dirtied_after(struct inode *inode, unsigned long t)
226{
227 bool ret = time_after(inode->dirtied_when, t);
228#ifndef CONFIG_64BIT
229
230
231
232
233
234
235 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
236#endif
237 return ret;
238}
239
240
241
242
243
244static int move_expired_inodes(struct list_head *delaying_queue,
245 struct list_head *dispatch_queue,
246 struct wb_writeback_work *work)
247{
248 LIST_HEAD(tmp);
249 struct list_head *pos, *node;
250 struct super_block *sb = NULL;
251 struct inode *inode;
252 int do_sb_sort = 0;
253 int moved = 0;
254
255 while (!list_empty(delaying_queue)) {
256 inode = wb_inode(delaying_queue->prev);
257 if (work->older_than_this &&
258 inode_dirtied_after(inode, *work->older_than_this))
259 break;
260 list_move(&inode->i_wb_list, &tmp);
261 moved++;
262 if (sb_is_blkdev_sb(inode->i_sb))
263 continue;
264 if (sb && sb != inode->i_sb)
265 do_sb_sort = 1;
266 sb = inode->i_sb;
267 }
268
269
270 if (!do_sb_sort) {
271 list_splice(&tmp, dispatch_queue);
272 goto out;
273 }
274
275
276 while (!list_empty(&tmp)) {
277 sb = wb_inode(tmp.prev)->i_sb;
278 list_for_each_prev_safe(pos, node, &tmp) {
279 inode = wb_inode(pos);
280 if (inode->i_sb == sb)
281 list_move(&inode->i_wb_list, dispatch_queue);
282 }
283 }
284out:
285 return moved;
286}
287
288
289
290
291
292
293
294
295
296
297
298
299static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
300{
301 int moved;
302 assert_spin_locked(&wb->list_lock);
303 list_splice_init(&wb->b_more_io, &wb->b_io);
304 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
305 trace_writeback_queue_io(wb, work, moved);
306}
307
308static int write_inode(struct inode *inode, struct writeback_control *wbc)
309{
310 int ret;
311
312 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
313 trace_writeback_write_inode_start(inode, wbc);
314 ret = inode->i_sb->s_op->write_inode(inode, wbc);
315 trace_writeback_write_inode(inode, wbc);
316 return ret;
317 }
318 return 0;
319}
320
321
322
323
324
325static void __inode_wait_for_writeback(struct inode *inode)
326 __releases(inode->i_lock)
327 __acquires(inode->i_lock)
328{
329 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
330 wait_queue_head_t *wqh;
331
332 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
333 while (inode->i_state & I_SYNC) {
334 spin_unlock(&inode->i_lock);
335 __wait_on_bit(wqh, &wq, bit_wait,
336 TASK_UNINTERRUPTIBLE);
337 spin_lock(&inode->i_lock);
338 }
339}
340
341
342
343
344void inode_wait_for_writeback(struct inode *inode)
345{
346 spin_lock(&inode->i_lock);
347 __inode_wait_for_writeback(inode);
348 spin_unlock(&inode->i_lock);
349}
350
351
352
353
354
355
356static void inode_sleep_on_writeback(struct inode *inode)
357 __releases(inode->i_lock)
358{
359 DEFINE_WAIT(wait);
360 wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
361 int sleep;
362
363 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
364 sleep = inode->i_state & I_SYNC;
365 spin_unlock(&inode->i_lock);
366 if (sleep)
367 schedule();
368 finish_wait(wqh, &wait);
369}
370
371
372
373
374
375
376
377
378
379static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
380 struct writeback_control *wbc)
381{
382 if (inode->i_state & I_FREEING)
383 return;
384
385
386
387
388
389
390 if ((inode->i_state & I_DIRTY) &&
391 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
392 inode->dirtied_when = jiffies;
393
394 if (wbc->pages_skipped) {
395
396
397
398
399 redirty_tail(inode, wb);
400 return;
401 }
402
403 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
404
405
406
407
408 if (wbc->nr_to_write <= 0) {
409
410 requeue_io(inode, wb);
411 } else {
412
413
414
415
416
417
418
419 redirty_tail(inode, wb);
420 }
421 } else if (inode->i_state & I_DIRTY) {
422
423
424
425
426
427 redirty_tail(inode, wb);
428 } else {
429
430 list_del_init(&inode->i_wb_list);
431 }
432}
433
434
435
436
437
438
439static int
440__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
441{
442 struct address_space *mapping = inode->i_mapping;
443 long nr_to_write = wbc->nr_to_write;
444 unsigned dirty;
445 int ret;
446
447 WARN_ON(!(inode->i_state & I_SYNC));
448
449 trace_writeback_single_inode_start(inode, wbc, nr_to_write);
450
451 ret = do_writepages(mapping, wbc);
452
453
454
455
456
457
458
459
460 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
461 int err = filemap_fdatawait(mapping);
462 if (ret == 0)
463 ret = err;
464 }
465
466
467
468
469
470
471 spin_lock(&inode->i_lock);
472
473 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
474 inode->i_state &= ~I_DIRTY_PAGES;
475 dirty = inode->i_state & I_DIRTY;
476 inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
477 spin_unlock(&inode->i_lock);
478
479 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
480 int err = write_inode(inode, wbc);
481 if (ret == 0)
482 ret = err;
483 }
484 trace_writeback_single_inode(inode, wbc, nr_to_write);
485 return ret;
486}
487
488
489
490
491
492
493
494
495
496static int
497writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
498 struct writeback_control *wbc)
499{
500 int ret = 0;
501
502 spin_lock(&inode->i_lock);
503 if (!atomic_read(&inode->i_count))
504 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
505 else
506 WARN_ON(inode->i_state & I_WILL_FREE);
507
508 if (inode->i_state & I_SYNC) {
509 if (wbc->sync_mode != WB_SYNC_ALL)
510 goto out;
511
512
513
514
515
516 __inode_wait_for_writeback(inode);
517 }
518 WARN_ON(inode->i_state & I_SYNC);
519
520
521
522
523
524
525
526
527 if (!(inode->i_state & I_DIRTY) &&
528 (wbc->sync_mode != WB_SYNC_ALL ||
529 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
530 goto out;
531 inode->i_state |= I_SYNC;
532 spin_unlock(&inode->i_lock);
533
534 ret = __writeback_single_inode(inode, wbc);
535
536 spin_lock(&wb->list_lock);
537 spin_lock(&inode->i_lock);
538
539
540
541
542 if (!(inode->i_state & I_DIRTY))
543 list_del_init(&inode->i_wb_list);
544 spin_unlock(&wb->list_lock);
545 inode_sync_complete(inode);
546out:
547 spin_unlock(&inode->i_lock);
548 return ret;
549}
550
551static long writeback_chunk_size(struct backing_dev_info *bdi,
552 struct wb_writeback_work *work)
553{
554 long pages;
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
570 pages = LONG_MAX;
571 else {
572 pages = min(bdi->avg_write_bandwidth / 2,
573 global_dirty_limit / DIRTY_SCOPE);
574 pages = min(pages, work->nr_pages);
575 pages = round_down(pages + MIN_WRITEBACK_PAGES,
576 MIN_WRITEBACK_PAGES);
577 }
578
579 return pages;
580}
581
582
583
584
585
586
587static long writeback_sb_inodes(struct super_block *sb,
588 struct bdi_writeback *wb,
589 struct wb_writeback_work *work)
590{
591 struct writeback_control wbc = {
592 .sync_mode = work->sync_mode,
593 .tagged_writepages = work->tagged_writepages,
594 .for_kupdate = work->for_kupdate,
595 .for_background = work->for_background,
596 .for_sync = work->for_sync,
597 .range_cyclic = work->range_cyclic,
598 .range_start = 0,
599 .range_end = LLONG_MAX,
600 };
601 unsigned long start_time = jiffies;
602 long write_chunk;
603 long wrote = 0;
604
605 while (!list_empty(&wb->b_io)) {
606 struct inode *inode = wb_inode(wb->b_io.prev);
607
608 if (inode->i_sb != sb) {
609 if (work->sb) {
610
611
612
613
614
615 redirty_tail(inode, wb);
616 continue;
617 }
618
619
620
621
622
623
624 break;
625 }
626
627
628
629
630
631
632 spin_lock(&inode->i_lock);
633 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
634 spin_unlock(&inode->i_lock);
635 redirty_tail(inode, wb);
636 continue;
637 }
638 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
639
640
641
642
643
644
645
646
647
648 spin_unlock(&inode->i_lock);
649 requeue_io(inode, wb);
650 trace_writeback_sb_inodes_requeue(inode);
651 continue;
652 }
653 spin_unlock(&wb->list_lock);
654
655
656
657
658
659
660 if (inode->i_state & I_SYNC) {
661
662 inode_sleep_on_writeback(inode);
663
664 spin_lock(&wb->list_lock);
665 continue;
666 }
667 inode->i_state |= I_SYNC;
668 spin_unlock(&inode->i_lock);
669
670 write_chunk = writeback_chunk_size(wb->bdi, work);
671 wbc.nr_to_write = write_chunk;
672 wbc.pages_skipped = 0;
673
674
675
676
677
678 __writeback_single_inode(inode, &wbc);
679
680 work->nr_pages -= write_chunk - wbc.nr_to_write;
681 wrote += write_chunk - wbc.nr_to_write;
682 spin_lock(&wb->list_lock);
683 spin_lock(&inode->i_lock);
684 if (!(inode->i_state & I_DIRTY))
685 wrote++;
686 requeue_inode(inode, wb, &wbc);
687 inode_sync_complete(inode);
688 spin_unlock(&inode->i_lock);
689 cond_resched_lock(&wb->list_lock);
690
691
692
693
694 if (wrote) {
695 if (time_is_before_jiffies(start_time + HZ / 10UL))
696 break;
697 if (work->nr_pages <= 0)
698 break;
699 }
700 }
701 return wrote;
702}
703
704static long __writeback_inodes_wb(struct bdi_writeback *wb,
705 struct wb_writeback_work *work)
706{
707 unsigned long start_time = jiffies;
708 long wrote = 0;
709
710 while (!list_empty(&wb->b_io)) {
711 struct inode *inode = wb_inode(wb->b_io.prev);
712 struct super_block *sb = inode->i_sb;
713
714 if (!grab_super_passive(sb)) {
715
716
717
718
719
720 redirty_tail(inode, wb);
721 continue;
722 }
723 wrote += writeback_sb_inodes(sb, wb, work);
724 drop_super(sb);
725
726
727 if (wrote) {
728 if (time_is_before_jiffies(start_time + HZ / 10UL))
729 break;
730 if (work->nr_pages <= 0)
731 break;
732 }
733 }
734
735 return wrote;
736}
737
738static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
739 enum wb_reason reason)
740{
741 struct wb_writeback_work work = {
742 .nr_pages = nr_pages,
743 .sync_mode = WB_SYNC_NONE,
744 .range_cyclic = 1,
745 .reason = reason,
746 };
747
748 spin_lock(&wb->list_lock);
749 if (list_empty(&wb->b_io))
750 queue_io(wb, &work);
751 __writeback_inodes_wb(wb, &work);
752 spin_unlock(&wb->list_lock);
753
754 return nr_pages - work.nr_pages;
755}
756
757static bool over_bground_thresh(struct backing_dev_info *bdi)
758{
759 unsigned long background_thresh, dirty_thresh;
760
761 global_dirty_limits(&background_thresh, &dirty_thresh);
762
763 if (global_page_state(NR_FILE_DIRTY) +
764 global_page_state(NR_UNSTABLE_NFS) > background_thresh)
765 return true;
766
767 if (bdi_stat(bdi, BDI_RECLAIMABLE) >
768 bdi_dirty_limit(bdi, background_thresh))
769 return true;
770
771 return false;
772}
773
774
775
776
777
778static void wb_update_bandwidth(struct bdi_writeback *wb,
779 unsigned long start_time)
780{
781 __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
782}
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799static long wb_writeback(struct bdi_writeback *wb,
800 struct wb_writeback_work *work)
801{
802 unsigned long wb_start = jiffies;
803 long nr_pages = work->nr_pages;
804 unsigned long oldest_jif;
805 struct inode *inode;
806 long progress;
807
808 oldest_jif = jiffies;
809 work->older_than_this = &oldest_jif;
810
811 spin_lock(&wb->list_lock);
812 for (;;) {
813
814
815
816 if (work->nr_pages <= 0)
817 break;
818
819
820
821
822
823
824
825 if ((work->for_background || work->for_kupdate) &&
826 !list_empty(&wb->bdi->work_list))
827 break;
828
829
830
831
832
833 if (work->for_background && !over_bground_thresh(wb->bdi))
834 break;
835
836
837
838
839
840
841
842 if (work->for_kupdate) {
843 oldest_jif = jiffies -
844 msecs_to_jiffies(dirty_expire_interval * 10);
845 } else if (work->for_background)
846 oldest_jif = jiffies;
847
848 trace_writeback_start(wb->bdi, work);
849 if (list_empty(&wb->b_io))
850 queue_io(wb, work);
851 if (work->sb)
852 progress = writeback_sb_inodes(work->sb, wb, work);
853 else
854 progress = __writeback_inodes_wb(wb, work);
855 trace_writeback_written(wb->bdi, work);
856
857 wb_update_bandwidth(wb, wb_start);
858
859
860
861
862
863
864
865
866
867 if (progress)
868 continue;
869
870
871
872 if (list_empty(&wb->b_more_io))
873 break;
874
875
876
877
878
879 if (!list_empty(&wb->b_more_io)) {
880 trace_writeback_wait(wb->bdi, work);
881 inode = wb_inode(wb->b_more_io.prev);
882 spin_lock(&inode->i_lock);
883 spin_unlock(&wb->list_lock);
884
885 inode_sleep_on_writeback(inode);
886 spin_lock(&wb->list_lock);
887 }
888 }
889 spin_unlock(&wb->list_lock);
890
891 return nr_pages - work->nr_pages;
892}
893
894
895
896
897static struct wb_writeback_work *
898get_next_work_item(struct backing_dev_info *bdi)
899{
900 struct wb_writeback_work *work = NULL;
901
902 spin_lock_bh(&bdi->wb_lock);
903 if (!list_empty(&bdi->work_list)) {
904 work = list_entry(bdi->work_list.next,
905 struct wb_writeback_work, list);
906 list_del_init(&work->list);
907 }
908 spin_unlock_bh(&bdi->wb_lock);
909 return work;
910}
911
912
913
914
915
916static unsigned long get_nr_dirty_pages(void)
917{
918 return global_page_state(NR_FILE_DIRTY) +
919 global_page_state(NR_UNSTABLE_NFS) +
920 get_nr_dirty_inodes();
921}
922
923static long wb_check_background_flush(struct bdi_writeback *wb)
924{
925 if (over_bground_thresh(wb->bdi)) {
926
927 struct wb_writeback_work work = {
928 .nr_pages = LONG_MAX,
929 .sync_mode = WB_SYNC_NONE,
930 .for_background = 1,
931 .range_cyclic = 1,
932 .reason = WB_REASON_BACKGROUND,
933 };
934
935 return wb_writeback(wb, &work);
936 }
937
938 return 0;
939}
940
941static long wb_check_old_data_flush(struct bdi_writeback *wb)
942{
943 unsigned long expired;
944 long nr_pages;
945
946
947
948
949 if (!dirty_writeback_interval)
950 return 0;
951
952 expired = wb->last_old_flush +
953 msecs_to_jiffies(dirty_writeback_interval * 10);
954 if (time_before(jiffies, expired))
955 return 0;
956
957 wb->last_old_flush = jiffies;
958 nr_pages = get_nr_dirty_pages();
959
960 if (nr_pages) {
961 struct wb_writeback_work work = {
962 .nr_pages = nr_pages,
963 .sync_mode = WB_SYNC_NONE,
964 .for_kupdate = 1,
965 .range_cyclic = 1,
966 .reason = WB_REASON_PERIODIC,
967 };
968
969 return wb_writeback(wb, &work);
970 }
971
972 return 0;
973}
974
975
976
977
978static long wb_do_writeback(struct bdi_writeback *wb)
979{
980 struct backing_dev_info *bdi = wb->bdi;
981 struct wb_writeback_work *work;
982 long wrote = 0;
983
984 set_bit(BDI_writeback_running, &wb->bdi->state);
985 while ((work = get_next_work_item(bdi)) != NULL) {
986
987 trace_writeback_exec(bdi, work);
988
989 wrote += wb_writeback(wb, work);
990
991
992
993
994
995 if (work->done)
996 complete(work->done);
997 else
998 kfree(work);
999 }
1000
1001
1002
1003
1004 wrote += wb_check_old_data_flush(wb);
1005 wrote += wb_check_background_flush(wb);
1006 clear_bit(BDI_writeback_running, &wb->bdi->state);
1007
1008 return wrote;
1009}
1010
1011
1012
1013
1014
1015void bdi_writeback_workfn(struct work_struct *work)
1016{
1017 struct bdi_writeback *wb = container_of(to_delayed_work(work),
1018 struct bdi_writeback, dwork);
1019 struct backing_dev_info *bdi = wb->bdi;
1020 long pages_written;
1021
1022 set_worker_desc("flush-%s", dev_name(bdi->dev));
1023 current->flags |= PF_SWAPWRITE;
1024
1025 if (likely(!current_is_workqueue_rescuer() ||
1026 !test_bit(BDI_registered, &bdi->state))) {
1027
1028
1029
1030
1031
1032
1033 do {
1034 pages_written = wb_do_writeback(wb);
1035 trace_writeback_pages_written(pages_written);
1036 } while (!list_empty(&bdi->work_list));
1037 } else {
1038
1039
1040
1041
1042
1043 pages_written = writeback_inodes_wb(&bdi->wb, 1024,
1044 WB_REASON_FORKER_THREAD);
1045 trace_writeback_pages_written(pages_written);
1046 }
1047
1048 if (!list_empty(&bdi->work_list))
1049 mod_delayed_work(bdi_wq, &wb->dwork, 0);
1050 else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
1051 bdi_wakeup_thread_delayed(bdi);
1052
1053 current->flags &= ~PF_SWAPWRITE;
1054}
1055
1056
1057
1058
1059
1060void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
1061{
1062 struct backing_dev_info *bdi;
1063
1064 if (!nr_pages)
1065 nr_pages = get_nr_dirty_pages();
1066
1067 rcu_read_lock();
1068 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1069 if (!bdi_has_dirty_io(bdi))
1070 continue;
1071 __bdi_start_writeback(bdi, nr_pages, false, reason);
1072 }
1073 rcu_read_unlock();
1074}
1075
1076static noinline void block_dump___mark_inode_dirty(struct inode *inode)
1077{
1078 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
1079 struct dentry *dentry;
1080 const char *name = "?";
1081
1082 dentry = d_find_alias(inode);
1083 if (dentry) {
1084 spin_lock(&dentry->d_lock);
1085 name = (const char *) dentry->d_name.name;
1086 }
1087 printk(KERN_DEBUG
1088 "%s(%d): dirtied inode %lu (%s) on %s\n",
1089 current->comm, task_pid_nr(current), inode->i_ino,
1090 name, inode->i_sb->s_id);
1091 if (dentry) {
1092 spin_unlock(&dentry->d_lock);
1093 dput(dentry);
1094 }
1095 }
1096}
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122void __mark_inode_dirty(struct inode *inode, int flags)
1123{
1124 struct super_block *sb = inode->i_sb;
1125 struct backing_dev_info *bdi = NULL;
1126
1127
1128
1129
1130
1131 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
1132 trace_writeback_dirty_inode_start(inode, flags);
1133
1134 if (sb->s_op->dirty_inode)
1135 sb->s_op->dirty_inode(inode, flags);
1136
1137 trace_writeback_dirty_inode(inode, flags);
1138 }
1139
1140
1141
1142
1143
1144 smp_mb();
1145
1146
1147 if ((inode->i_state & flags) == flags)
1148 return;
1149
1150 if (unlikely(block_dump))
1151 block_dump___mark_inode_dirty(inode);
1152
1153 spin_lock(&inode->i_lock);
1154 if ((inode->i_state & flags) != flags) {
1155 const int was_dirty = inode->i_state & I_DIRTY;
1156
1157 inode->i_state |= flags;
1158
1159
1160
1161
1162
1163
1164 if (inode->i_state & I_SYNC)
1165 goto out_unlock_inode;
1166
1167
1168
1169
1170
1171 if (!S_ISBLK(inode->i_mode)) {
1172 if (inode_unhashed(inode))
1173 goto out_unlock_inode;
1174 }
1175 if (inode->i_state & I_FREEING)
1176 goto out_unlock_inode;
1177
1178
1179
1180
1181
1182 if (!was_dirty) {
1183 bool wakeup_bdi = false;
1184 bdi = inode_to_bdi(inode);
1185
1186 spin_unlock(&inode->i_lock);
1187 spin_lock(&bdi->wb.list_lock);
1188 if (bdi_cap_writeback_dirty(bdi)) {
1189 WARN(!test_bit(BDI_registered, &bdi->state),
1190 "bdi-%s not registered\n", bdi->name);
1191
1192
1193
1194
1195
1196
1197
1198 if (!wb_has_dirty_io(&bdi->wb))
1199 wakeup_bdi = true;
1200 }
1201
1202 inode->dirtied_when = jiffies;
1203 list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1204 spin_unlock(&bdi->wb.list_lock);
1205
1206 if (wakeup_bdi)
1207 bdi_wakeup_thread_delayed(bdi);
1208 return;
1209 }
1210 }
1211out_unlock_inode:
1212 spin_unlock(&inode->i_lock);
1213
1214}
1215EXPORT_SYMBOL(__mark_inode_dirty);
1216
1217static void wait_sb_inodes(struct super_block *sb)
1218{
1219 struct inode *inode, *old_inode = NULL;
1220
1221
1222
1223
1224
1225 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1226
1227 spin_lock(&inode_sb_list_lock);
1228
1229
1230
1231
1232
1233
1234
1235
1236 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1237 struct address_space *mapping = inode->i_mapping;
1238
1239 spin_lock(&inode->i_lock);
1240 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
1241 (mapping->nrpages == 0)) {
1242 spin_unlock(&inode->i_lock);
1243 continue;
1244 }
1245 __iget(inode);
1246 spin_unlock(&inode->i_lock);
1247 spin_unlock(&inode_sb_list_lock);
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257 iput(old_inode);
1258 old_inode = inode;
1259
1260
1261
1262
1263
1264
1265 filemap_fdatawait_keep_errors(mapping);
1266
1267 cond_resched();
1268
1269 spin_lock(&inode_sb_list_lock);
1270 }
1271 spin_unlock(&inode_sb_list_lock);
1272 iput(old_inode);
1273}
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285void writeback_inodes_sb_nr(struct super_block *sb,
1286 unsigned long nr,
1287 enum wb_reason reason)
1288{
1289 DECLARE_COMPLETION_ONSTACK(done);
1290 struct wb_writeback_work work = {
1291 .sb = sb,
1292 .sync_mode = WB_SYNC_NONE,
1293 .tagged_writepages = 1,
1294 .done = &done,
1295 .nr_pages = nr,
1296 .reason = reason,
1297 };
1298
1299 if (sb->s_bdi == &noop_backing_dev_info)
1300 return;
1301 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1302 bdi_queue_work(sb->s_bdi, &work);
1303 wait_for_completion(&done);
1304}
1305EXPORT_SYMBOL(writeback_inodes_sb_nr);
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1317{
1318 return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1319}
1320EXPORT_SYMBOL(writeback_inodes_sb);
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331int try_to_writeback_inodes_sb_nr(struct super_block *sb,
1332 unsigned long nr,
1333 enum wb_reason reason)
1334{
1335 if (writeback_in_progress(sb->s_bdi))
1336 return 1;
1337
1338 if (!down_read_trylock(&sb->s_umount))
1339 return 0;
1340
1341 writeback_inodes_sb_nr(sb, nr, reason);
1342 up_read(&sb->s_umount);
1343 return 1;
1344}
1345EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1356{
1357 return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1358}
1359EXPORT_SYMBOL(try_to_writeback_inodes_sb);
1360
1361
1362
1363
1364
1365
1366
1367
1368void sync_inodes_sb(struct super_block *sb)
1369{
1370 DECLARE_COMPLETION_ONSTACK(done);
1371 struct wb_writeback_work work = {
1372 .sb = sb,
1373 .sync_mode = WB_SYNC_ALL,
1374 .nr_pages = LONG_MAX,
1375 .range_cyclic = 0,
1376 .done = &done,
1377 .reason = WB_REASON_SYNC,
1378 .for_sync = 1,
1379 };
1380
1381
1382 if (sb->s_bdi == &noop_backing_dev_info)
1383 return;
1384 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1385
1386 bdi_queue_work(sb->s_bdi, &work);
1387 wait_for_completion(&done);
1388
1389 wait_sb_inodes(sb);
1390}
1391EXPORT_SYMBOL(sync_inodes_sb);
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403int write_inode_now(struct inode *inode, int sync)
1404{
1405 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1406 struct writeback_control wbc = {
1407 .nr_to_write = LONG_MAX,
1408 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1409 .range_start = 0,
1410 .range_end = LLONG_MAX,
1411 };
1412
1413 if (!mapping_cap_writeback_dirty(inode->i_mapping))
1414 wbc.nr_to_write = 0;
1415
1416 might_sleep();
1417 return writeback_single_inode(inode, wb, &wbc);
1418}
1419EXPORT_SYMBOL(write_inode_now);
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432int sync_inode(struct inode *inode, struct writeback_control *wbc)
1433{
1434 return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
1435}
1436EXPORT_SYMBOL(sync_inode);
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447int sync_inode_metadata(struct inode *inode, int wait)
1448{
1449 struct writeback_control wbc = {
1450 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
1451 .nr_to_write = 0,
1452 };
1453
1454 return sync_inode(inode, &wbc);
1455}
1456EXPORT_SYMBOL(sync_inode_metadata);
1457