1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/spinlock.h>
17#include <linux/fs.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include <linux/writeback.h>
23#include <linux/init.h>
24#include <linux/backing-dev.h>
25#include <linux/task_io_accounting_ops.h>
26#include <linux/blkdev.h>
27#include <linux/mpage.h>
28#include <linux/rmap.h>
29#include <linux/percpu.h>
30#include <linux/notifier.h>
31#include <linux/smp.h>
32#include <linux/sysctl.h>
33#include <linux/cpu.h>
34#include <linux/syscalls.h>
35#include <linux/buffer_head.h>
36#include <linux/pagevec.h>
37
38
39
40
41
42
43
44
45#define MAX_WRITEBACK_PAGES 1024
46
47
48
49
50
51static long ratelimit_pages = 32;
52
53
54
55
56
57
58
59static inline long sync_writeback_pages(void)
60{
61 return ratelimit_pages + ratelimit_pages / 2;
62}
63
64
65
66
67
68
69int dirty_background_ratio = 5;
70
71
72
73
74int vm_dirty_ratio = 10;
75
76
77
78
79int dirty_writeback_interval = 5 * HZ;
80
81
82
83
84int dirty_expire_interval = 30 * HZ;
85
86
87
88
89int block_dump;
90
91
92
93
94
95int laptop_mode;
96
97EXPORT_SYMBOL(laptop_mode);
98
99
100
101
102static void background_writeout(unsigned long _min_pages);
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120static struct prop_descriptor vm_completions;
121static struct prop_descriptor vm_dirties;
122
123static unsigned long determine_dirtyable_memory(void);
124
125
126
127
128
129
130static int calc_period_shift(void)
131{
132 unsigned long dirty_total;
133
134 dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) / 100;
135 return 2 + ilog2(dirty_total - 1);
136}
137
138
139
140
141int dirty_ratio_handler(struct ctl_table *table, int write,
142 struct file *filp, void __user *buffer, size_t *lenp,
143 loff_t *ppos)
144{
145 int old_ratio = vm_dirty_ratio;
146 int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
147 if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
148 int shift = calc_period_shift();
149 prop_change_shift(&vm_completions, shift);
150 prop_change_shift(&vm_dirties, shift);
151 }
152 return ret;
153}
154
155
156
157
158
159static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
160{
161 __prop_inc_percpu(&vm_completions, &bdi->completions);
162}
163
164static inline void task_dirty_inc(struct task_struct *tsk)
165{
166 prop_inc_single(&vm_dirties, &tsk->dirties);
167}
168
169
170
171
172static void bdi_writeout_fraction(struct backing_dev_info *bdi,
173 long *numerator, long *denominator)
174{
175 if (bdi_cap_writeback_dirty(bdi)) {
176 prop_fraction_percpu(&vm_completions, &bdi->completions,
177 numerator, denominator);
178 } else {
179 *numerator = 0;
180 *denominator = 1;
181 }
182}
183
184
185
186
187
188
189static void
190clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty)
191{
192 long avail_dirty;
193
194 avail_dirty = dirty -
195 (global_page_state(NR_FILE_DIRTY) +
196 global_page_state(NR_WRITEBACK) +
197 global_page_state(NR_UNSTABLE_NFS));
198
199 if (avail_dirty < 0)
200 avail_dirty = 0;
201
202 avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) +
203 bdi_stat(bdi, BDI_WRITEBACK);
204
205 *pbdi_dirty = min(*pbdi_dirty, avail_dirty);
206}
207
208static inline void task_dirties_fraction(struct task_struct *tsk,
209 long *numerator, long *denominator)
210{
211 prop_fraction_single(&vm_dirties, &tsk->dirties,
212 numerator, denominator);
213}
214
215
216
217
218
219
220
221
222void task_dirty_limit(struct task_struct *tsk, long *pdirty)
223{
224 long numerator, denominator;
225 long dirty = *pdirty;
226 u64 inv = dirty >> 3;
227
228 task_dirties_fraction(tsk, &numerator, &denominator);
229 inv *= numerator;
230 do_div(inv, denominator);
231
232 dirty -= inv;
233 if (dirty < *pdirty/2)
234 dirty = *pdirty/2;
235
236 *pdirty = dirty;
237}
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257static unsigned long highmem_dirtyable_memory(unsigned long total)
258{
259#ifdef CONFIG_HIGHMEM
260 int node;
261 unsigned long x = 0;
262
263 for_each_node_state(node, N_HIGH_MEMORY) {
264 struct zone *z =
265 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
266
267 x += zone_page_state(z, NR_FREE_PAGES)
268 + zone_page_state(z, NR_INACTIVE)
269 + zone_page_state(z, NR_ACTIVE);
270 }
271
272
273
274
275
276
277 return min(x, total);
278#else
279 return 0;
280#endif
281}
282
283static unsigned long determine_dirtyable_memory(void)
284{
285 unsigned long x;
286
287 x = global_page_state(NR_FREE_PAGES)
288 + global_page_state(NR_INACTIVE)
289 + global_page_state(NR_ACTIVE);
290 x -= highmem_dirtyable_memory(x);
291 return x + 1;
292}
293
294static void
295get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
296 struct backing_dev_info *bdi)
297{
298 int background_ratio;
299 int dirty_ratio;
300 long background;
301 long dirty;
302 unsigned long available_memory = determine_dirtyable_memory();
303 struct task_struct *tsk;
304
305 dirty_ratio = vm_dirty_ratio;
306 if (dirty_ratio < 5)
307 dirty_ratio = 5;
308
309 background_ratio = dirty_background_ratio;
310 if (background_ratio >= dirty_ratio)
311 background_ratio = dirty_ratio / 2;
312
313 background = (background_ratio * available_memory) / 100;
314 dirty = (dirty_ratio * available_memory) / 100;
315 tsk = current;
316 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
317 background += background / 4;
318 dirty += dirty / 4;
319 }
320 *pbackground = background;
321 *pdirty = dirty;
322
323 if (bdi) {
324 u64 bdi_dirty = dirty;
325 long numerator, denominator;
326
327
328
329
330 bdi_writeout_fraction(bdi, &numerator, &denominator);
331
332 bdi_dirty *= numerator;
333 do_div(bdi_dirty, denominator);
334
335 *pbdi_dirty = bdi_dirty;
336 clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
337 task_dirty_limit(current, pbdi_dirty);
338 }
339}
340
341
342
343
344
345
346
347
348static void balance_dirty_pages(struct address_space *mapping)
349{
350 long nr_reclaimable, bdi_nr_reclaimable;
351 long nr_writeback, bdi_nr_writeback;
352 long background_thresh;
353 long dirty_thresh;
354 long bdi_thresh;
355 unsigned long pages_written = 0;
356 unsigned long write_chunk = sync_writeback_pages();
357
358 struct backing_dev_info *bdi = mapping->backing_dev_info;
359
360 for (;;) {
361 struct writeback_control wbc = {
362 .bdi = bdi,
363 .sync_mode = WB_SYNC_NONE,
364 .older_than_this = NULL,
365 .nr_to_write = write_chunk,
366 .range_cyclic = 1,
367 };
368
369 get_dirty_limits(&background_thresh, &dirty_thresh,
370 &bdi_thresh, bdi);
371
372 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
373 global_page_state(NR_UNSTABLE_NFS);
374 nr_writeback = global_page_state(NR_WRITEBACK);
375
376 bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
377 bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
378
379 if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
380 break;
381
382
383
384
385
386
387 if (nr_reclaimable + nr_writeback <
388 (background_thresh + dirty_thresh) / 2)
389 break;
390
391 if (!bdi->dirty_exceeded)
392 bdi->dirty_exceeded = 1;
393
394
395
396
397
398
399
400 if (bdi_nr_reclaimable) {
401 writeback_inodes(&wbc);
402 pages_written += write_chunk - wbc.nr_to_write;
403 get_dirty_limits(&background_thresh, &dirty_thresh,
404 &bdi_thresh, bdi);
405 }
406
407
408
409
410
411
412
413
414
415
416
417 if (bdi_thresh < 2*bdi_stat_error(bdi)) {
418 bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
419 bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
420 } else if (bdi_nr_reclaimable) {
421 bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
422 bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
423 }
424
425 if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
426 break;
427 if (pages_written >= write_chunk)
428 break;
429
430 congestion_wait(WRITE, HZ/10);
431 }
432
433 if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
434 bdi->dirty_exceeded)
435 bdi->dirty_exceeded = 0;
436
437 if (writeback_in_progress(bdi))
438 return;
439
440
441
442
443
444
445
446
447
448 if ((laptop_mode && pages_written) ||
449 (!laptop_mode && (global_page_state(NR_FILE_DIRTY)
450 + global_page_state(NR_UNSTABLE_NFS)
451 > background_thresh)))
452 pdflush_operation(background_writeout, 0);
453}
454
455void set_page_dirty_balance(struct page *page, int page_mkwrite)
456{
457 if (set_page_dirty(page) || page_mkwrite) {
458 struct address_space *mapping = page_mapping(page);
459
460 if (mapping)
461 balance_dirty_pages_ratelimited(mapping);
462 }
463}
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
480 unsigned long nr_pages_dirtied)
481{
482 static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
483 unsigned long ratelimit;
484 unsigned long *p;
485
486 ratelimit = ratelimit_pages;
487 if (mapping->backing_dev_info->dirty_exceeded)
488 ratelimit = 8;
489
490
491
492
493
494 preempt_disable();
495 p = &__get_cpu_var(ratelimits);
496 *p += nr_pages_dirtied;
497 if (unlikely(*p >= ratelimit)) {
498 *p = 0;
499 preempt_enable();
500 balance_dirty_pages(mapping);
501 return;
502 }
503 preempt_enable();
504}
505EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
506
507void throttle_vm_writeout(gfp_t gfp_mask)
508{
509 long background_thresh;
510 long dirty_thresh;
511
512 for ( ; ; ) {
513 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
514
515
516
517
518
519 dirty_thresh += dirty_thresh / 10;
520
521 if (global_page_state(NR_UNSTABLE_NFS) +
522 global_page_state(NR_WRITEBACK) <= dirty_thresh)
523 break;
524 congestion_wait(WRITE, HZ/10);
525
526
527
528
529
530
531 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
532 break;
533 }
534}
535
536
537
538
539
540static void background_writeout(unsigned long _min_pages)
541{
542 long min_pages = _min_pages;
543 struct writeback_control wbc = {
544 .bdi = NULL,
545 .sync_mode = WB_SYNC_NONE,
546 .older_than_this = NULL,
547 .nr_to_write = 0,
548 .nonblocking = 1,
549 .range_cyclic = 1,
550 };
551
552 for ( ; ; ) {
553 long background_thresh;
554 long dirty_thresh;
555
556 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
557 if (global_page_state(NR_FILE_DIRTY) +
558 global_page_state(NR_UNSTABLE_NFS) < background_thresh
559 && min_pages <= 0)
560 break;
561 wbc.encountered_congestion = 0;
562 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
563 wbc.pages_skipped = 0;
564 writeback_inodes(&wbc);
565 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
566 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
567
568 congestion_wait(WRITE, HZ/10);
569 if (!wbc.encountered_congestion)
570 break;
571 }
572 }
573}
574
575
576
577
578
579
580int wakeup_pdflush(long nr_pages)
581{
582 if (nr_pages == 0)
583 nr_pages = global_page_state(NR_FILE_DIRTY) +
584 global_page_state(NR_UNSTABLE_NFS);
585 return pdflush_operation(background_writeout, nr_pages);
586}
587
588static void wb_timer_fn(unsigned long unused);
589static void laptop_timer_fn(unsigned long unused);
590
591static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
592static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609static void wb_kupdate(unsigned long arg)
610{
611 unsigned long oldest_jif;
612 unsigned long start_jif;
613 unsigned long next_jif;
614 long nr_to_write;
615 struct writeback_control wbc = {
616 .bdi = NULL,
617 .sync_mode = WB_SYNC_NONE,
618 .older_than_this = &oldest_jif,
619 .nr_to_write = 0,
620 .nonblocking = 1,
621 .for_kupdate = 1,
622 .range_cyclic = 1,
623 };
624
625 sync_supers();
626
627 oldest_jif = jiffies - dirty_expire_interval;
628 start_jif = jiffies;
629 next_jif = start_jif + dirty_writeback_interval;
630 nr_to_write = global_page_state(NR_FILE_DIRTY) +
631 global_page_state(NR_UNSTABLE_NFS) +
632 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
633 while (nr_to_write > 0) {
634 wbc.encountered_congestion = 0;
635 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
636 writeback_inodes(&wbc);
637 if (wbc.nr_to_write > 0) {
638 if (wbc.encountered_congestion)
639 congestion_wait(WRITE, HZ/10);
640 else
641 break;
642 }
643 nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
644 }
645 if (time_before(next_jif, jiffies + HZ))
646 next_jif = jiffies + HZ;
647 if (dirty_writeback_interval)
648 mod_timer(&wb_timer, next_jif);
649}
650
651
652
653
654int dirty_writeback_centisecs_handler(ctl_table *table, int write,
655 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
656{
657 proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
658 if (dirty_writeback_interval)
659 mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
660 else
661 del_timer(&wb_timer);
662 return 0;
663}
664
665static void wb_timer_fn(unsigned long unused)
666{
667 if (pdflush_operation(wb_kupdate, 0) < 0)
668 mod_timer(&wb_timer, jiffies + HZ);
669}
670
671static void laptop_flush(unsigned long unused)
672{
673 sys_sync();
674}
675
676static void laptop_timer_fn(unsigned long unused)
677{
678 pdflush_operation(laptop_flush, 0);
679}
680
681
682
683
684
685
686void laptop_io_completion(void)
687{
688 mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
689}
690
691
692
693
694
695
696void laptop_sync_completion(void)
697{
698 del_timer(&laptop_mode_wb_timer);
699}
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718void writeback_set_ratelimit(void)
719{
720 ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
721 if (ratelimit_pages < 16)
722 ratelimit_pages = 16;
723 if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
724 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
725}
726
727static int __cpuinit
728ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
729{
730 writeback_set_ratelimit();
731 return NOTIFY_DONE;
732}
733
734static struct notifier_block __cpuinitdata ratelimit_nb = {
735 .notifier_call = ratelimit_handler,
736 .next = NULL,
737};
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757void __init page_writeback_init(void)
758{
759 int shift;
760
761 mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
762 writeback_set_ratelimit();
763 register_cpu_notifier(&ratelimit_nb);
764
765 shift = calc_period_shift();
766 prop_descriptor_init(&vm_completions, shift);
767 prop_descriptor_init(&vm_dirties, shift);
768}
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785int write_cache_pages(struct address_space *mapping,
786 struct writeback_control *wbc, writepage_t writepage,
787 void *data)
788{
789 struct backing_dev_info *bdi = mapping->backing_dev_info;
790 int ret = 0;
791 int done = 0;
792 struct pagevec pvec;
793 int nr_pages;
794 pgoff_t index;
795 pgoff_t end;
796 int scanned = 0;
797 int range_whole = 0;
798
799 if (wbc->nonblocking && bdi_write_congested(bdi)) {
800 wbc->encountered_congestion = 1;
801 return 0;
802 }
803
804 pagevec_init(&pvec, 0);
805 if (wbc->range_cyclic) {
806 index = mapping->writeback_index;
807 end = -1;
808 } else {
809 index = wbc->range_start >> PAGE_CACHE_SHIFT;
810 end = wbc->range_end >> PAGE_CACHE_SHIFT;
811 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
812 range_whole = 1;
813 scanned = 1;
814 }
815retry:
816 while (!done && (index <= end) &&
817 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
818 PAGECACHE_TAG_DIRTY,
819 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
820 unsigned i;
821
822 scanned = 1;
823 for (i = 0; i < nr_pages; i++) {
824 struct page *page = pvec.pages[i];
825
826
827
828
829
830
831
832
833 lock_page(page);
834
835 if (unlikely(page->mapping != mapping)) {
836 unlock_page(page);
837 continue;
838 }
839
840 if (!wbc->range_cyclic && page->index > end) {
841 done = 1;
842 unlock_page(page);
843 continue;
844 }
845
846 if (wbc->sync_mode != WB_SYNC_NONE)
847 wait_on_page_writeback(page);
848
849 if (PageWriteback(page) ||
850 !clear_page_dirty_for_io(page)) {
851 unlock_page(page);
852 continue;
853 }
854
855 ret = (*writepage)(page, wbc, data);
856
857 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
858 unlock_page(page);
859 ret = 0;
860 }
861 if (ret || (--(wbc->nr_to_write) <= 0))
862 done = 1;
863 if (wbc->nonblocking && bdi_write_congested(bdi)) {
864 wbc->encountered_congestion = 1;
865 done = 1;
866 }
867 }
868 pagevec_release(&pvec);
869 cond_resched();
870 }
871 if (!scanned && !done) {
872
873
874
875
876 scanned = 1;
877 index = 0;
878 goto retry;
879 }
880 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
881 mapping->writeback_index = index;
882 return ret;
883}
884EXPORT_SYMBOL(write_cache_pages);
885
886
887
888
889
890static int __writepage(struct page *page, struct writeback_control *wbc,
891 void *data)
892{
893 struct address_space *mapping = data;
894 int ret = mapping->a_ops->writepage(page, wbc);
895 mapping_set_error(mapping, ret);
896 return ret;
897}
898
899
900
901
902
903
904
905
906
907int generic_writepages(struct address_space *mapping,
908 struct writeback_control *wbc)
909{
910
911 if (!mapping->a_ops->writepage)
912 return 0;
913
914 return write_cache_pages(mapping, wbc, __writepage, mapping);
915}
916
917EXPORT_SYMBOL(generic_writepages);
918
919int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
920{
921 int ret;
922
923 if (wbc->nr_to_write <= 0)
924 return 0;
925 wbc->for_writepages = 1;
926 if (mapping->a_ops->writepages)
927 ret = mapping->a_ops->writepages(mapping, wbc);
928 else
929 ret = generic_writepages(mapping, wbc);
930 wbc->for_writepages = 0;
931 return ret;
932}
933
934
935
936
937
938
939
940
941
942
943int write_one_page(struct page *page, int wait)
944{
945 struct address_space *mapping = page->mapping;
946 int ret = 0;
947 struct writeback_control wbc = {
948 .sync_mode = WB_SYNC_ALL,
949 .nr_to_write = 1,
950 };
951
952 BUG_ON(!PageLocked(page));
953
954 if (wait)
955 wait_on_page_writeback(page);
956
957 if (clear_page_dirty_for_io(page)) {
958 page_cache_get(page);
959 ret = mapping->a_ops->writepage(page, &wbc);
960 if (ret == 0 && wait) {
961 wait_on_page_writeback(page);
962 if (PageError(page))
963 ret = -EIO;
964 }
965 page_cache_release(page);
966 } else {
967 unlock_page(page);
968 }
969 return ret;
970}
971EXPORT_SYMBOL(write_one_page);
972
973
974
975
976int __set_page_dirty_no_writeback(struct page *page)
977{
978 if (!PageDirty(page))
979 SetPageDirty(page);
980 return 0;
981}
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998int __set_page_dirty_nobuffers(struct page *page)
999{
1000 if (!TestSetPageDirty(page)) {
1001 struct address_space *mapping = page_mapping(page);
1002 struct address_space *mapping2;
1003
1004 if (!mapping)
1005 return 1;
1006
1007 write_lock_irq(&mapping->tree_lock);
1008 mapping2 = page_mapping(page);
1009 if (mapping2) {
1010 BUG_ON(mapping2 != mapping);
1011 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1012 if (mapping_cap_account_dirty(mapping)) {
1013 __inc_zone_page_state(page, NR_FILE_DIRTY);
1014 __inc_bdi_stat(mapping->backing_dev_info,
1015 BDI_RECLAIMABLE);
1016 task_io_account_write(PAGE_CACHE_SIZE);
1017 }
1018 radix_tree_tag_set(&mapping->page_tree,
1019 page_index(page), PAGECACHE_TAG_DIRTY);
1020 }
1021 write_unlock_irq(&mapping->tree_lock);
1022 if (mapping->host) {
1023
1024 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1025 }
1026 return 1;
1027 }
1028 return 0;
1029}
1030EXPORT_SYMBOL(__set_page_dirty_nobuffers);
1031
1032
1033
1034
1035
1036
1037int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
1038{
1039 wbc->pages_skipped++;
1040 return __set_page_dirty_nobuffers(page);
1041}
1042EXPORT_SYMBOL(redirty_page_for_writepage);
1043
1044
1045
1046
1047
1048static int __set_page_dirty(struct page *page)
1049{
1050 struct address_space *mapping = page_mapping(page);
1051
1052 if (likely(mapping)) {
1053 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
1054#ifdef CONFIG_BLOCK
1055 if (!spd)
1056 spd = __set_page_dirty_buffers;
1057#endif
1058 return (*spd)(page);
1059 }
1060 if (!PageDirty(page)) {
1061 if (!TestSetPageDirty(page))
1062 return 1;
1063 }
1064 return 0;
1065}
1066
1067int fastcall set_page_dirty(struct page *page)
1068{
1069 int ret = __set_page_dirty(page);
1070 if (ret)
1071 task_dirty_inc(current);
1072 return ret;
1073}
1074EXPORT_SYMBOL(set_page_dirty);
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086int set_page_dirty_lock(struct page *page)
1087{
1088 int ret;
1089
1090 lock_page_nosync(page);
1091 ret = set_page_dirty(page);
1092 unlock_page(page);
1093 return ret;
1094}
1095EXPORT_SYMBOL(set_page_dirty_lock);
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111int clear_page_dirty_for_io(struct page *page)
1112{
1113 struct address_space *mapping = page_mapping(page);
1114
1115 BUG_ON(!PageLocked(page));
1116
1117 ClearPageReclaim(page);
1118 if (mapping && mapping_cap_account_dirty(mapping)) {
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144 if (page_mkclean(page))
1145 set_page_dirty(page);
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156 if (TestClearPageDirty(page)) {
1157 dec_zone_page_state(page, NR_FILE_DIRTY);
1158 dec_bdi_stat(mapping->backing_dev_info,
1159 BDI_RECLAIMABLE);
1160 return 1;
1161 }
1162 return 0;
1163 }
1164 return TestClearPageDirty(page);
1165}
1166EXPORT_SYMBOL(clear_page_dirty_for_io);
1167
1168int test_clear_page_writeback(struct page *page)
1169{
1170 struct address_space *mapping = page_mapping(page);
1171 int ret;
1172
1173 if (mapping) {
1174 struct backing_dev_info *bdi = mapping->backing_dev_info;
1175 unsigned long flags;
1176
1177 write_lock_irqsave(&mapping->tree_lock, flags);
1178 ret = TestClearPageWriteback(page);
1179 if (ret) {
1180 radix_tree_tag_clear(&mapping->page_tree,
1181 page_index(page),
1182 PAGECACHE_TAG_WRITEBACK);
1183 if (bdi_cap_writeback_dirty(bdi)) {
1184 __dec_bdi_stat(bdi, BDI_WRITEBACK);
1185 __bdi_writeout_inc(bdi);
1186 }
1187 }
1188 write_unlock_irqrestore(&mapping->tree_lock, flags);
1189 } else {
1190 ret = TestClearPageWriteback(page);
1191 }
1192 if (ret)
1193 dec_zone_page_state(page, NR_WRITEBACK);
1194 return ret;
1195}
1196
1197int test_set_page_writeback(struct page *page)
1198{
1199 struct address_space *mapping = page_mapping(page);
1200 int ret;
1201
1202 if (mapping) {
1203 struct backing_dev_info *bdi = mapping->backing_dev_info;
1204 unsigned long flags;
1205
1206 write_lock_irqsave(&mapping->tree_lock, flags);
1207 ret = TestSetPageWriteback(page);
1208 if (!ret) {
1209 radix_tree_tag_set(&mapping->page_tree,
1210 page_index(page),
1211 PAGECACHE_TAG_WRITEBACK);
1212 if (bdi_cap_writeback_dirty(bdi))
1213 __inc_bdi_stat(bdi, BDI_WRITEBACK);
1214 }
1215 if (!PageDirty(page))
1216 radix_tree_tag_clear(&mapping->page_tree,
1217 page_index(page),
1218 PAGECACHE_TAG_DIRTY);
1219 write_unlock_irqrestore(&mapping->tree_lock, flags);
1220 } else {
1221 ret = TestSetPageWriteback(page);
1222 }
1223 if (!ret)
1224 inc_zone_page_state(page, NR_WRITEBACK);
1225 return ret;
1226
1227}
1228EXPORT_SYMBOL(test_set_page_writeback);
1229
1230
1231
1232
1233
1234int mapping_tagged(struct address_space *mapping, int tag)
1235{
1236 int ret;
1237 rcu_read_lock();
1238 ret = radix_tree_tagged(&mapping->page_tree, tag);
1239 rcu_read_unlock();
1240 return ret;
1241}
1242EXPORT_SYMBOL(mapping_tagged);
1243