1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/spinlock.h>
17#include <linux/fs.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include <linux/writeback.h>
23#include <linux/init.h>
24#include <linux/backing-dev.h>
25#include <linux/task_io_accounting_ops.h>
26#include <linux/blkdev.h>
27#include <linux/mpage.h>
28#include <linux/rmap.h>
29#include <linux/percpu.h>
30#include <linux/notifier.h>
31#include <linux/smp.h>
32#include <linux/sysctl.h>
33#include <linux/cpu.h>
34#include <linux/syscalls.h>
35#include <linux/buffer_head.h>
36#include <linux/pagevec.h>
37#include <trace/events/writeback.h>
38
39
40
41
42#define MAX_PAUSE max(HZ/5, 1)
43
44
45
46
47#define BANDWIDTH_INTERVAL max(HZ/5, 1)
48
49
50
51
52
53static long ratelimit_pages = 32;
54
55
56
57
58
59
60
61static inline long sync_writeback_pages(unsigned long dirtied)
62{
63 if (dirtied < ratelimit_pages)
64 dirtied = ratelimit_pages;
65
66 return dirtied + dirtied / 2;
67}
68
69
70
71
72
73
74int dirty_background_ratio = 10;
75
76
77
78
79
80unsigned long dirty_background_bytes;
81
82
83
84
85
86int vm_highmem_is_dirtyable;
87
88
89
90
91int vm_dirty_ratio = 20;
92
93
94
95
96
97unsigned long vm_dirty_bytes;
98
99
100
101
102unsigned int dirty_writeback_interval = 5 * 100;
103
104
105
106
107unsigned int dirty_expire_interval = 30 * 100;
108
109
110
111
112int block_dump;
113
114
115
116
117
118int laptop_mode;
119
120EXPORT_SYMBOL(laptop_mode);
121
122
123
124unsigned long global_dirty_limit;
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142static struct prop_descriptor vm_completions;
143static struct prop_descriptor vm_dirties;
144
145
146
147
148
149
150static int calc_period_shift(void)
151{
152 unsigned long dirty_total;
153
154 if (vm_dirty_bytes)
155 dirty_total = vm_dirty_bytes / PAGE_SIZE;
156 else
157 dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
158 100;
159 return 2 + ilog2(dirty_total - 1);
160}
161
162
163
164
165static void update_completion_period(void)
166{
167 int shift = calc_period_shift();
168 prop_change_shift(&vm_completions, shift);
169 prop_change_shift(&vm_dirties, shift);
170}
171
172int dirty_background_ratio_handler(struct ctl_table *table, int write,
173 void __user *buffer, size_t *lenp,
174 loff_t *ppos)
175{
176 int ret;
177
178 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
179 if (ret == 0 && write)
180 dirty_background_bytes = 0;
181 return ret;
182}
183
184int dirty_background_bytes_handler(struct ctl_table *table, int write,
185 void __user *buffer, size_t *lenp,
186 loff_t *ppos)
187{
188 int ret;
189
190 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
191 if (ret == 0 && write)
192 dirty_background_ratio = 0;
193 return ret;
194}
195
196int dirty_ratio_handler(struct ctl_table *table, int write,
197 void __user *buffer, size_t *lenp,
198 loff_t *ppos)
199{
200 int old_ratio = vm_dirty_ratio;
201 int ret;
202
203 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
204 if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
205 update_completion_period();
206 vm_dirty_bytes = 0;
207 }
208 return ret;
209}
210
211
212int dirty_bytes_handler(struct ctl_table *table, int write,
213 void __user *buffer, size_t *lenp,
214 loff_t *ppos)
215{
216 unsigned long old_bytes = vm_dirty_bytes;
217 int ret;
218
219 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
220 if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
221 update_completion_period();
222 vm_dirty_ratio = 0;
223 }
224 return ret;
225}
226
227
228
229
230
231static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
232{
233 __inc_bdi_stat(bdi, BDI_WRITTEN);
234 __prop_inc_percpu_max(&vm_completions, &bdi->completions,
235 bdi->max_prop_frac);
236}
237
238void bdi_writeout_inc(struct backing_dev_info *bdi)
239{
240 unsigned long flags;
241
242 local_irq_save(flags);
243 __bdi_writeout_inc(bdi);
244 local_irq_restore(flags);
245}
246EXPORT_SYMBOL_GPL(bdi_writeout_inc);
247
248void task_dirty_inc(struct task_struct *tsk)
249{
250 prop_inc_single(&vm_dirties, &tsk->dirties);
251}
252
253
254
255
256static void bdi_writeout_fraction(struct backing_dev_info *bdi,
257 long *numerator, long *denominator)
258{
259 prop_fraction_percpu(&vm_completions, &bdi->completions,
260 numerator, denominator);
261}
262
263static inline void task_dirties_fraction(struct task_struct *tsk,
264 long *numerator, long *denominator)
265{
266 prop_fraction_single(&vm_dirties, &tsk->dirties,
267 numerator, denominator);
268}
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284#define TASK_LIMIT_FRACTION 8
285static unsigned long task_dirty_limit(struct task_struct *tsk,
286 unsigned long bdi_dirty)
287{
288 long numerator, denominator;
289 unsigned long dirty = bdi_dirty;
290 u64 inv = dirty / TASK_LIMIT_FRACTION;
291
292 task_dirties_fraction(tsk, &numerator, &denominator);
293 inv *= numerator;
294 do_div(inv, denominator);
295
296 dirty -= inv;
297
298 return max(dirty, bdi_dirty/2);
299}
300
301
302static unsigned long task_min_dirty_limit(unsigned long bdi_dirty)
303{
304 return bdi_dirty - bdi_dirty / TASK_LIMIT_FRACTION;
305}
306
307
308
309
310static unsigned int bdi_min_ratio;
311
312int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
313{
314 int ret = 0;
315
316 spin_lock_bh(&bdi_lock);
317 if (min_ratio > bdi->max_ratio) {
318 ret = -EINVAL;
319 } else {
320 min_ratio -= bdi->min_ratio;
321 if (bdi_min_ratio + min_ratio < 100) {
322 bdi_min_ratio += min_ratio;
323 bdi->min_ratio += min_ratio;
324 } else {
325 ret = -EINVAL;
326 }
327 }
328 spin_unlock_bh(&bdi_lock);
329
330 return ret;
331}
332
333int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
334{
335 int ret = 0;
336
337 if (max_ratio > 100)
338 return -EINVAL;
339
340 spin_lock_bh(&bdi_lock);
341 if (bdi->min_ratio > max_ratio) {
342 ret = -EINVAL;
343 } else {
344 bdi->max_ratio = max_ratio;
345 bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
346 }
347 spin_unlock_bh(&bdi_lock);
348
349 return ret;
350}
351EXPORT_SYMBOL(bdi_set_max_ratio);
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371static unsigned long highmem_dirtyable_memory(unsigned long total)
372{
373#ifdef CONFIG_HIGHMEM
374 int node;
375 unsigned long x = 0;
376
377 for_each_node_state(node, N_HIGH_MEMORY) {
378 struct zone *z =
379 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
380
381 x += zone_page_state(z, NR_FREE_PAGES) +
382 zone_reclaimable_pages(z);
383 }
384
385
386
387
388
389
390 return min(x, total);
391#else
392 return 0;
393#endif
394}
395
396
397
398
399
400
401
402unsigned long determine_dirtyable_memory(void)
403{
404 unsigned long x;
405
406 x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
407
408 if (!vm_highmem_is_dirtyable)
409 x -= highmem_dirtyable_memory(x);
410
411 return x + 1;
412}
413
414static unsigned long hard_dirty_limit(unsigned long thresh)
415{
416 return max(thresh, global_dirty_limit);
417}
418
419
420
421
422
423
424
425
426
427
428void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
429{
430 unsigned long background;
431 unsigned long dirty;
432 unsigned long uninitialized_var(available_memory);
433 struct task_struct *tsk;
434
435 if (!vm_dirty_bytes || !dirty_background_bytes)
436 available_memory = determine_dirtyable_memory();
437
438 if (vm_dirty_bytes)
439 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
440 else
441 dirty = (vm_dirty_ratio * available_memory) / 100;
442
443 if (dirty_background_bytes)
444 background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
445 else
446 background = (dirty_background_ratio * available_memory) / 100;
447
448 if (background >= dirty)
449 background = dirty / 2;
450 tsk = current;
451 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
452 background += background / 4;
453 dirty += dirty / 4;
454 }
455 *pbackground = background;
456 *pdirty = dirty;
457 trace_global_dirty_state(background, dirty);
458}
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
478{
479 u64 bdi_dirty;
480 long numerator, denominator;
481
482
483
484
485 bdi_writeout_fraction(bdi, &numerator, &denominator);
486
487 bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
488 bdi_dirty *= numerator;
489 do_div(bdi_dirty, denominator);
490
491 bdi_dirty += (dirty * bdi->min_ratio) / 100;
492 if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
493 bdi_dirty = dirty * bdi->max_ratio / 100;
494
495 return bdi_dirty;
496}
497
498static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
499 unsigned long elapsed,
500 unsigned long written)
501{
502 const unsigned long period = roundup_pow_of_two(3 * HZ);
503 unsigned long avg = bdi->avg_write_bandwidth;
504 unsigned long old = bdi->write_bandwidth;
505 u64 bw;
506
507
508
509
510
511
512
513
514 bw = written - bdi->written_stamp;
515 bw *= HZ;
516 if (unlikely(elapsed > period)) {
517 do_div(bw, elapsed);
518 avg = bw;
519 goto out;
520 }
521 bw += (u64)bdi->write_bandwidth * (period - elapsed);
522 bw >>= ilog2(period);
523
524
525
526
527 if (avg > old && old >= (unsigned long)bw)
528 avg -= (avg - old) >> 3;
529
530 if (avg < old && old <= (unsigned long)bw)
531 avg += (old - avg) >> 3;
532
533out:
534 bdi->write_bandwidth = bw;
535 bdi->avg_write_bandwidth = avg;
536}
537
538
539
540
541
542
543
544
545
546static void update_dirty_limit(unsigned long thresh, unsigned long dirty)
547{
548 unsigned long limit = global_dirty_limit;
549
550
551
552
553 if (limit < thresh) {
554 limit = thresh;
555 goto update;
556 }
557
558
559
560
561
562
563 thresh = max(thresh, dirty);
564 if (limit > thresh) {
565 limit -= (limit - thresh) >> 5;
566 goto update;
567 }
568 return;
569update:
570 global_dirty_limit = limit;
571}
572
573static void global_update_bandwidth(unsigned long thresh,
574 unsigned long dirty,
575 unsigned long now)
576{
577 static DEFINE_SPINLOCK(dirty_lock);
578 static unsigned long update_time;
579
580
581
582
583 if (time_before(now, update_time + BANDWIDTH_INTERVAL))
584 return;
585
586 spin_lock(&dirty_lock);
587 if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) {
588 update_dirty_limit(thresh, dirty);
589 update_time = now;
590 }
591 spin_unlock(&dirty_lock);
592}
593
594void __bdi_update_bandwidth(struct backing_dev_info *bdi,
595 unsigned long thresh,
596 unsigned long dirty,
597 unsigned long bdi_thresh,
598 unsigned long bdi_dirty,
599 unsigned long start_time)
600{
601 unsigned long now = jiffies;
602 unsigned long elapsed = now - bdi->bw_time_stamp;
603 unsigned long written;
604
605
606
607
608 if (elapsed < BANDWIDTH_INTERVAL)
609 return;
610
611 written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]);
612
613
614
615
616
617 if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time))
618 goto snapshot;
619
620 if (thresh)
621 global_update_bandwidth(thresh, dirty, now);
622
623 bdi_update_write_bandwidth(bdi, elapsed, written);
624
625snapshot:
626 bdi->written_stamp = written;
627 bdi->bw_time_stamp = now;
628}
629
630static void bdi_update_bandwidth(struct backing_dev_info *bdi,
631 unsigned long thresh,
632 unsigned long dirty,
633 unsigned long bdi_thresh,
634 unsigned long bdi_dirty,
635 unsigned long start_time)
636{
637 if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL))
638 return;
639 spin_lock(&bdi->wb.list_lock);
640 __bdi_update_bandwidth(bdi, thresh, dirty, bdi_thresh, bdi_dirty,
641 start_time);
642 spin_unlock(&bdi->wb.list_lock);
643}
644
645
646
647
648
649
650
651
652static void balance_dirty_pages(struct address_space *mapping,
653 unsigned long write_chunk)
654{
655 unsigned long nr_reclaimable, bdi_nr_reclaimable;
656 unsigned long nr_dirty;
657 unsigned long bdi_dirty;
658 unsigned long background_thresh;
659 unsigned long dirty_thresh;
660 unsigned long bdi_thresh;
661 unsigned long task_bdi_thresh;
662 unsigned long min_task_bdi_thresh;
663 unsigned long pages_written = 0;
664 unsigned long pause = 1;
665 bool dirty_exceeded = false;
666 bool clear_dirty_exceeded = true;
667 struct backing_dev_info *bdi = mapping->backing_dev_info;
668 unsigned long start_time = jiffies;
669
670 for (;;) {
671 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
672 global_page_state(NR_UNSTABLE_NFS);
673 nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
674
675 global_dirty_limits(&background_thresh, &dirty_thresh);
676
677
678
679
680
681
682 if (nr_dirty <= (background_thresh + dirty_thresh) / 2)
683 break;
684
685 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
686 min_task_bdi_thresh = task_min_dirty_limit(bdi_thresh);
687 task_bdi_thresh = task_dirty_limit(current, bdi_thresh);
688
689
690
691
692
693
694
695
696
697
698
699 if (task_bdi_thresh < 2 * bdi_stat_error(bdi)) {
700 bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
701 bdi_dirty = bdi_nr_reclaimable +
702 bdi_stat_sum(bdi, BDI_WRITEBACK);
703 } else {
704 bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
705 bdi_dirty = bdi_nr_reclaimable +
706 bdi_stat(bdi, BDI_WRITEBACK);
707 }
708
709
710
711
712
713
714
715 dirty_exceeded = (bdi_dirty > task_bdi_thresh) ||
716 (nr_dirty > dirty_thresh);
717 clear_dirty_exceeded = (bdi_dirty <= min_task_bdi_thresh) &&
718 (nr_dirty <= dirty_thresh);
719
720 if (!dirty_exceeded)
721 break;
722
723 if (!bdi->dirty_exceeded)
724 bdi->dirty_exceeded = 1;
725
726 bdi_update_bandwidth(bdi, dirty_thresh, nr_dirty,
727 bdi_thresh, bdi_dirty, start_time);
728
729
730
731
732
733
734
735
736
737
738 trace_balance_dirty_start(bdi);
739 if (bdi_nr_reclaimable > task_bdi_thresh) {
740 pages_written += writeback_inodes_wb(&bdi->wb,
741 write_chunk);
742 trace_balance_dirty_written(bdi, pages_written);
743 if (pages_written >= write_chunk)
744 break;
745 }
746 __set_current_state(TASK_UNINTERRUPTIBLE);
747 io_schedule_timeout(pause);
748 trace_balance_dirty_wait(bdi);
749
750 dirty_thresh = hard_dirty_limit(dirty_thresh);
751
752
753
754
755
756
757 if (nr_dirty < dirty_thresh &&
758 bdi_dirty < (task_bdi_thresh + bdi_thresh) / 2 &&
759 time_after(jiffies, start_time + MAX_PAUSE))
760 break;
761
762
763
764
765
766 pause <<= 1;
767 if (pause > HZ / 10)
768 pause = HZ / 10;
769 }
770
771
772 if (clear_dirty_exceeded && bdi->dirty_exceeded)
773 bdi->dirty_exceeded = 0;
774
775 if (writeback_in_progress(bdi))
776 return;
777
778
779
780
781
782
783
784
785
786 if ((laptop_mode && pages_written) ||
787 (!laptop_mode && (nr_reclaimable > background_thresh)))
788 bdi_start_background_writeback(bdi);
789}
790
791void set_page_dirty_balance(struct page *page, int page_mkwrite)
792{
793 if (set_page_dirty(page) || page_mkwrite) {
794 struct address_space *mapping = page_mapping(page);
795
796 if (mapping)
797 balance_dirty_pages_ratelimited(mapping);
798 }
799}
800
801static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
818 unsigned long nr_pages_dirtied)
819{
820 struct backing_dev_info *bdi = mapping->backing_dev_info;
821 unsigned long ratelimit;
822 unsigned long *p;
823
824 if (!bdi_cap_account_dirty(bdi))
825 return;
826
827 ratelimit = ratelimit_pages;
828 if (mapping->backing_dev_info->dirty_exceeded)
829 ratelimit = 8;
830
831
832
833
834
835 preempt_disable();
836 p = &__get_cpu_var(bdp_ratelimits);
837 *p += nr_pages_dirtied;
838 if (unlikely(*p >= ratelimit)) {
839 ratelimit = sync_writeback_pages(*p);
840 *p = 0;
841 preempt_enable();
842 balance_dirty_pages(mapping, ratelimit);
843 return;
844 }
845 preempt_enable();
846}
847EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
848
849void throttle_vm_writeout(gfp_t gfp_mask)
850{
851 unsigned long background_thresh;
852 unsigned long dirty_thresh;
853
854 for ( ; ; ) {
855 global_dirty_limits(&background_thresh, &dirty_thresh);
856
857
858
859
860
861 dirty_thresh += dirty_thresh / 10;
862
863 if (global_page_state(NR_UNSTABLE_NFS) +
864 global_page_state(NR_WRITEBACK) <= dirty_thresh)
865 break;
866 congestion_wait(BLK_RW_ASYNC, HZ/10);
867
868
869
870
871
872
873 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
874 break;
875 }
876}
877
878
879
880
881int dirty_writeback_centisecs_handler(ctl_table *table, int write,
882 void __user *buffer, size_t *length, loff_t *ppos)
883{
884 proc_dointvec(table, write, buffer, length, ppos);
885 bdi_arm_supers_timer();
886 return 0;
887}
888
889#ifdef CONFIG_BLOCK
890void laptop_mode_timer_fn(unsigned long data)
891{
892 struct request_queue *q = (struct request_queue *)data;
893 int nr_pages = global_page_state(NR_FILE_DIRTY) +
894 global_page_state(NR_UNSTABLE_NFS);
895
896
897
898
899
900 if (bdi_has_dirty_io(&q->backing_dev_info))
901 bdi_start_writeback(&q->backing_dev_info, nr_pages);
902}
903
904
905
906
907
908
909void laptop_io_completion(struct backing_dev_info *info)
910{
911 mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
912}
913
914
915
916
917
918
919void laptop_sync_completion(void)
920{
921 struct backing_dev_info *bdi;
922
923 rcu_read_lock();
924
925 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
926 del_timer(&bdi->laptop_mode_wb_timer);
927
928 rcu_read_unlock();
929}
930#endif
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949void writeback_set_ratelimit(void)
950{
951 ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
952 if (ratelimit_pages < 16)
953 ratelimit_pages = 16;
954 if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
955 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
956}
957
958static int __cpuinit
959ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
960{
961 writeback_set_ratelimit();
962 return NOTIFY_DONE;
963}
964
965static struct notifier_block __cpuinitdata ratelimit_nb = {
966 .notifier_call = ratelimit_handler,
967 .next = NULL,
968};
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988void __init page_writeback_init(void)
989{
990 int shift;
991
992 writeback_set_ratelimit();
993 register_cpu_notifier(&ratelimit_nb);
994
995 shift = calc_period_shift();
996 prop_descriptor_init(&vm_completions, shift);
997 prop_descriptor_init(&vm_dirties, shift);
998}
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017void tag_pages_for_writeback(struct address_space *mapping,
1018 pgoff_t start, pgoff_t end)
1019{
1020#define WRITEBACK_TAG_BATCH 4096
1021 unsigned long tagged;
1022
1023 do {
1024 spin_lock_irq(&mapping->tree_lock);
1025 tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
1026 &start, end, WRITEBACK_TAG_BATCH,
1027 PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
1028 spin_unlock_irq(&mapping->tree_lock);
1029 WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
1030 cond_resched();
1031
1032 } while (tagged >= WRITEBACK_TAG_BATCH && start);
1033}
1034EXPORT_SYMBOL(tag_pages_for_writeback);
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058int write_cache_pages(struct address_space *mapping,
1059 struct writeback_control *wbc, writepage_t writepage,
1060 void *data)
1061{
1062 int ret = 0;
1063 int done = 0;
1064 struct pagevec pvec;
1065 int nr_pages;
1066 pgoff_t uninitialized_var(writeback_index);
1067 pgoff_t index;
1068 pgoff_t end;
1069 pgoff_t done_index;
1070 int cycled;
1071 int range_whole = 0;
1072 int tag;
1073
1074 pagevec_init(&pvec, 0);
1075 if (wbc->range_cyclic) {
1076 writeback_index = mapping->writeback_index;
1077 index = writeback_index;
1078 if (index == 0)
1079 cycled = 1;
1080 else
1081 cycled = 0;
1082 end = -1;
1083 } else {
1084 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1085 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1086 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1087 range_whole = 1;
1088 cycled = 1;
1089 }
1090 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1091 tag = PAGECACHE_TAG_TOWRITE;
1092 else
1093 tag = PAGECACHE_TAG_DIRTY;
1094retry:
1095 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1096 tag_pages_for_writeback(mapping, index, end);
1097 done_index = index;
1098 while (!done && (index <= end)) {
1099 int i;
1100
1101 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1102 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1103 if (nr_pages == 0)
1104 break;
1105
1106 for (i = 0; i < nr_pages; i++) {
1107 struct page *page = pvec.pages[i];
1108
1109
1110
1111
1112
1113
1114
1115
1116 if (page->index > end) {
1117
1118
1119
1120
1121 done = 1;
1122 break;
1123 }
1124
1125 done_index = page->index;
1126
1127 lock_page(page);
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137 if (unlikely(page->mapping != mapping)) {
1138continue_unlock:
1139 unlock_page(page);
1140 continue;
1141 }
1142
1143 if (!PageDirty(page)) {
1144
1145 goto continue_unlock;
1146 }
1147
1148 if (PageWriteback(page)) {
1149 if (wbc->sync_mode != WB_SYNC_NONE)
1150 wait_on_page_writeback(page);
1151 else
1152 goto continue_unlock;
1153 }
1154
1155 BUG_ON(PageWriteback(page));
1156 if (!clear_page_dirty_for_io(page))
1157 goto continue_unlock;
1158
1159 trace_wbc_writepage(wbc, mapping->backing_dev_info);
1160 ret = (*writepage)(page, wbc, data);
1161 if (unlikely(ret)) {
1162 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1163 unlock_page(page);
1164 ret = 0;
1165 } else {
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175 done_index = page->index + 1;
1176 done = 1;
1177 break;
1178 }
1179 }
1180
1181
1182
1183
1184
1185
1186
1187 if (--wbc->nr_to_write <= 0 &&
1188 wbc->sync_mode == WB_SYNC_NONE) {
1189 done = 1;
1190 break;
1191 }
1192 }
1193 pagevec_release(&pvec);
1194 cond_resched();
1195 }
1196 if (!cycled && !done) {
1197
1198
1199
1200
1201
1202 cycled = 1;
1203 index = 0;
1204 end = writeback_index - 1;
1205 goto retry;
1206 }
1207 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1208 mapping->writeback_index = done_index;
1209
1210 return ret;
1211}
1212EXPORT_SYMBOL(write_cache_pages);
1213
1214
1215
1216
1217
1218static int __writepage(struct page *page, struct writeback_control *wbc,
1219 void *data)
1220{
1221 struct address_space *mapping = data;
1222 int ret = mapping->a_ops->writepage(page, wbc);
1223 mapping_set_error(mapping, ret);
1224 return ret;
1225}
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235int generic_writepages(struct address_space *mapping,
1236 struct writeback_control *wbc)
1237{
1238 struct blk_plug plug;
1239 int ret;
1240
1241
1242 if (!mapping->a_ops->writepage)
1243 return 0;
1244
1245 blk_start_plug(&plug);
1246 ret = write_cache_pages(mapping, wbc, __writepage, mapping);
1247 blk_finish_plug(&plug);
1248 return ret;
1249}
1250
1251EXPORT_SYMBOL(generic_writepages);
1252
1253int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
1254{
1255 int ret;
1256
1257 if (wbc->nr_to_write <= 0)
1258 return 0;
1259 if (mapping->a_ops->writepages)
1260 ret = mapping->a_ops->writepages(mapping, wbc);
1261 else
1262 ret = generic_writepages(mapping, wbc);
1263 return ret;
1264}
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275int write_one_page(struct page *page, int wait)
1276{
1277 struct address_space *mapping = page->mapping;
1278 int ret = 0;
1279 struct writeback_control wbc = {
1280 .sync_mode = WB_SYNC_ALL,
1281 .nr_to_write = 1,
1282 };
1283
1284 BUG_ON(!PageLocked(page));
1285
1286 if (wait)
1287 wait_on_page_writeback(page);
1288
1289 if (clear_page_dirty_for_io(page)) {
1290 page_cache_get(page);
1291 ret = mapping->a_ops->writepage(page, &wbc);
1292 if (ret == 0 && wait) {
1293 wait_on_page_writeback(page);
1294 if (PageError(page))
1295 ret = -EIO;
1296 }
1297 page_cache_release(page);
1298 } else {
1299 unlock_page(page);
1300 }
1301 return ret;
1302}
1303EXPORT_SYMBOL(write_one_page);
1304
1305
1306
1307
1308int __set_page_dirty_no_writeback(struct page *page)
1309{
1310 if (!PageDirty(page))
1311 return !TestSetPageDirty(page);
1312 return 0;
1313}
1314
1315
1316
1317
1318
1319void account_page_dirtied(struct page *page, struct address_space *mapping)
1320{
1321 if (mapping_cap_account_dirty(mapping)) {
1322 __inc_zone_page_state(page, NR_FILE_DIRTY);
1323 __inc_zone_page_state(page, NR_DIRTIED);
1324 __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
1325 task_dirty_inc(current);
1326 task_io_account_write(PAGE_CACHE_SIZE);
1327 }
1328}
1329EXPORT_SYMBOL(account_page_dirtied);
1330
1331
1332
1333
1334
1335
1336void account_page_writeback(struct page *page)
1337{
1338 inc_zone_page_state(page, NR_WRITEBACK);
1339}
1340EXPORT_SYMBOL(account_page_writeback);
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357int __set_page_dirty_nobuffers(struct page *page)
1358{
1359 if (!TestSetPageDirty(page)) {
1360 struct address_space *mapping = page_mapping(page);
1361 struct address_space *mapping2;
1362
1363 if (!mapping)
1364 return 1;
1365
1366 spin_lock_irq(&mapping->tree_lock);
1367 mapping2 = page_mapping(page);
1368 if (mapping2) {
1369 BUG_ON(mapping2 != mapping);
1370 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1371 account_page_dirtied(page, mapping);
1372 radix_tree_tag_set(&mapping->page_tree,
1373 page_index(page), PAGECACHE_TAG_DIRTY);
1374 }
1375 spin_unlock_irq(&mapping->tree_lock);
1376 if (mapping->host) {
1377
1378 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1379 }
1380 return 1;
1381 }
1382 return 0;
1383}
1384EXPORT_SYMBOL(__set_page_dirty_nobuffers);
1385
1386
1387
1388
1389
1390
1391int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
1392{
1393 wbc->pages_skipped++;
1394 return __set_page_dirty_nobuffers(page);
1395}
1396EXPORT_SYMBOL(redirty_page_for_writepage);
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409int set_page_dirty(struct page *page)
1410{
1411 struct address_space *mapping = page_mapping(page);
1412
1413 if (likely(mapping)) {
1414 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425 ClearPageReclaim(page);
1426#ifdef CONFIG_BLOCK
1427 if (!spd)
1428 spd = __set_page_dirty_buffers;
1429#endif
1430 return (*spd)(page);
1431 }
1432 if (!PageDirty(page)) {
1433 if (!TestSetPageDirty(page))
1434 return 1;
1435 }
1436 return 0;
1437}
1438EXPORT_SYMBOL(set_page_dirty);
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450int set_page_dirty_lock(struct page *page)
1451{
1452 int ret;
1453
1454 lock_page(page);
1455 ret = set_page_dirty(page);
1456 unlock_page(page);
1457 return ret;
1458}
1459EXPORT_SYMBOL(set_page_dirty_lock);
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475int clear_page_dirty_for_io(struct page *page)
1476{
1477 struct address_space *mapping = page_mapping(page);
1478
1479 BUG_ON(!PageLocked(page));
1480
1481 if (mapping && mapping_cap_account_dirty(mapping)) {
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507 if (page_mkclean(page))
1508 set_page_dirty(page);
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519 if (TestClearPageDirty(page)) {
1520 dec_zone_page_state(page, NR_FILE_DIRTY);
1521 dec_bdi_stat(mapping->backing_dev_info,
1522 BDI_RECLAIMABLE);
1523 return 1;
1524 }
1525 return 0;
1526 }
1527 return TestClearPageDirty(page);
1528}
1529EXPORT_SYMBOL(clear_page_dirty_for_io);
1530
1531int test_clear_page_writeback(struct page *page)
1532{
1533 struct address_space *mapping = page_mapping(page);
1534 int ret;
1535
1536 if (mapping) {
1537 struct backing_dev_info *bdi = mapping->backing_dev_info;
1538 unsigned long flags;
1539
1540 spin_lock_irqsave(&mapping->tree_lock, flags);
1541 ret = TestClearPageWriteback(page);
1542 if (ret) {
1543 radix_tree_tag_clear(&mapping->page_tree,
1544 page_index(page),
1545 PAGECACHE_TAG_WRITEBACK);
1546 if (bdi_cap_account_writeback(bdi)) {
1547 __dec_bdi_stat(bdi, BDI_WRITEBACK);
1548 __bdi_writeout_inc(bdi);
1549 }
1550 }
1551 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1552 } else {
1553 ret = TestClearPageWriteback(page);
1554 }
1555 if (ret) {
1556 dec_zone_page_state(page, NR_WRITEBACK);
1557 inc_zone_page_state(page, NR_WRITTEN);
1558 }
1559 return ret;
1560}
1561
1562int test_set_page_writeback(struct page *page)
1563{
1564 struct address_space *mapping = page_mapping(page);
1565 int ret;
1566
1567 if (mapping) {
1568 struct backing_dev_info *bdi = mapping->backing_dev_info;
1569 unsigned long flags;
1570
1571 spin_lock_irqsave(&mapping->tree_lock, flags);
1572 ret = TestSetPageWriteback(page);
1573 if (!ret) {
1574 radix_tree_tag_set(&mapping->page_tree,
1575 page_index(page),
1576 PAGECACHE_TAG_WRITEBACK);
1577 if (bdi_cap_account_writeback(bdi))
1578 __inc_bdi_stat(bdi, BDI_WRITEBACK);
1579 }
1580 if (!PageDirty(page))
1581 radix_tree_tag_clear(&mapping->page_tree,
1582 page_index(page),
1583 PAGECACHE_TAG_DIRTY);
1584 radix_tree_tag_clear(&mapping->page_tree,
1585 page_index(page),
1586 PAGECACHE_TAG_TOWRITE);
1587 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1588 } else {
1589 ret = TestSetPageWriteback(page);
1590 }
1591 if (!ret)
1592 account_page_writeback(page);
1593 return ret;
1594
1595}
1596EXPORT_SYMBOL(test_set_page_writeback);
1597
1598
1599
1600
1601
1602int mapping_tagged(struct address_space *mapping, int tag)
1603{
1604 return radix_tree_tagged(&mapping->page_tree, tag);
1605}
1606EXPORT_SYMBOL(mapping_tagged);
1607