1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/kernel_stat.h>
19#include <linux/swap.h>
20#include <linux/mman.h>
21#include <linux/pagemap.h>
22#include <linux/pagevec.h>
23#include <linux/init.h>
24#include <linux/export.h>
25#include <linux/mm_inline.h>
26#include <linux/percpu_counter.h>
27#include <linux/percpu.h>
28#include <linux/cpu.h>
29#include <linux/notifier.h>
30#include <linux/backing-dev.h>
31#include <linux/memcontrol.h>
32#include <linux/gfp.h>
33
34#include "internal.h"
35
36
37int page_cluster;
38
39static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
40static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
41static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
42
43
44
45
46
47static void __page_cache_release(struct page *page)
48{
49 if (PageLRU(page)) {
50 struct zone *zone = page_zone(page);
51 struct lruvec *lruvec;
52 unsigned long flags;
53
54 spin_lock_irqsave(&zone->lru_lock, flags);
55 lruvec = mem_cgroup_page_lruvec(page, zone);
56 VM_BUG_ON(!PageLRU(page));
57 __ClearPageLRU(page);
58 del_page_from_lru_list(page, lruvec, page_off_lru(page));
59 spin_unlock_irqrestore(&zone->lru_lock, flags);
60 }
61}
62
63static void __put_single_page(struct page *page)
64{
65 __page_cache_release(page);
66 free_hot_cold_page(page, 0);
67}
68
69static void __put_compound_page(struct page *page)
70{
71 compound_page_dtor *dtor;
72
73 __page_cache_release(page);
74 dtor = get_compound_page_dtor(page);
75 (*dtor)(page);
76}
77
78static void put_compound_page(struct page *page)
79{
80 if (unlikely(PageTail(page))) {
81
82 struct page *page_head = compound_trans_head(page);
83
84 if (likely(page != page_head &&
85 get_page_unless_zero(page_head))) {
86 unsigned long flags;
87
88
89
90
91
92
93
94
95
96 if (PageSlab(page_head)) {
97 if (PageTail(page)) {
98 if (put_page_testzero(page_head))
99 VM_BUG_ON(1);
100
101 atomic_dec(&page->_mapcount);
102 goto skip_lock_tail;
103 } else
104 goto skip_lock;
105 }
106
107
108
109
110
111
112 flags = compound_lock_irqsave(page_head);
113 if (unlikely(!PageTail(page))) {
114
115 compound_unlock_irqrestore(page_head, flags);
116skip_lock:
117 if (put_page_testzero(page_head))
118 __put_single_page(page_head);
119out_put_single:
120 if (put_page_testzero(page))
121 __put_single_page(page);
122 return;
123 }
124 VM_BUG_ON(page_head != page->first_page);
125
126
127
128
129
130
131 if (put_page_testzero(page_head))
132 VM_BUG_ON(1);
133
134 VM_BUG_ON(page_mapcount(page) <= 0);
135 atomic_dec(&page->_mapcount);
136 VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
137 VM_BUG_ON(atomic_read(&page->_count) != 0);
138 compound_unlock_irqrestore(page_head, flags);
139
140skip_lock_tail:
141 if (put_page_testzero(page_head)) {
142 if (PageHead(page_head))
143 __put_compound_page(page_head);
144 else
145 __put_single_page(page_head);
146 }
147 } else {
148
149 VM_BUG_ON(PageTail(page));
150 goto out_put_single;
151 }
152 } else if (put_page_testzero(page)) {
153 if (PageHead(page))
154 __put_compound_page(page);
155 else
156 __put_single_page(page);
157 }
158}
159
160void put_page(struct page *page)
161{
162 if (unlikely(PageCompound(page)))
163 put_compound_page(page);
164 else if (put_page_testzero(page))
165 __put_single_page(page);
166}
167EXPORT_SYMBOL(put_page);
168
169
170
171
172
173bool __get_page_tail(struct page *page)
174{
175
176
177
178
179
180
181
182
183 unsigned long flags;
184 bool got = false;
185 struct page *page_head = compound_trans_head(page);
186
187 if (likely(page != page_head && get_page_unless_zero(page_head))) {
188
189
190 if (PageSlab(page_head)) {
191 if (likely(PageTail(page))) {
192 __get_page_tail_foll(page, false);
193 return true;
194 } else {
195 put_page(page_head);
196 return false;
197 }
198 }
199
200
201
202
203
204
205
206 flags = compound_lock_irqsave(page_head);
207
208 if (likely(PageTail(page))) {
209 __get_page_tail_foll(page, false);
210 got = true;
211 }
212 compound_unlock_irqrestore(page_head, flags);
213 if (unlikely(!got))
214 put_page(page_head);
215 }
216 return got;
217}
218EXPORT_SYMBOL(__get_page_tail);
219
220
221
222
223
224
225
226
227void put_pages_list(struct list_head *pages)
228{
229 while (!list_empty(pages)) {
230 struct page *victim;
231
232 victim = list_entry(pages->prev, struct page, lru);
233 list_del(&victim->lru);
234 page_cache_release(victim);
235 }
236}
237EXPORT_SYMBOL(put_pages_list);
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
253 struct page **pages)
254{
255 int seg;
256
257 for (seg = 0; seg < nr_segs; seg++) {
258 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
259 return seg;
260
261 pages[seg] = kmap_to_page(kiov[seg].iov_base);
262 page_cache_get(pages[seg]);
263 }
264
265 return seg;
266}
267EXPORT_SYMBOL_GPL(get_kernel_pages);
268
269
270
271
272
273
274
275
276
277
278
279
280int get_kernel_page(unsigned long start, int write, struct page **pages)
281{
282 const struct kvec kiov = {
283 .iov_base = (void *)start,
284 .iov_len = PAGE_SIZE
285 };
286
287 return get_kernel_pages(&kiov, 1, write, pages);
288}
289EXPORT_SYMBOL_GPL(get_kernel_page);
290
291static void pagevec_lru_move_fn(struct pagevec *pvec,
292 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
293 void *arg)
294{
295 int i;
296 struct zone *zone = NULL;
297 struct lruvec *lruvec;
298 unsigned long flags = 0;
299
300 for (i = 0; i < pagevec_count(pvec); i++) {
301 struct page *page = pvec->pages[i];
302 struct zone *pagezone = page_zone(page);
303
304 if (pagezone != zone) {
305 if (zone)
306 spin_unlock_irqrestore(&zone->lru_lock, flags);
307 zone = pagezone;
308 spin_lock_irqsave(&zone->lru_lock, flags);
309 }
310
311 lruvec = mem_cgroup_page_lruvec(page, zone);
312 (*move_fn)(page, lruvec, arg);
313 }
314 if (zone)
315 spin_unlock_irqrestore(&zone->lru_lock, flags);
316 release_pages(pvec->pages, pvec->nr, pvec->cold);
317 pagevec_reinit(pvec);
318}
319
320static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
321 void *arg)
322{
323 int *pgmoved = arg;
324
325 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
326 enum lru_list lru = page_lru_base_type(page);
327 list_move_tail(&page->lru, &lruvec->lists[lru]);
328 (*pgmoved)++;
329 }
330}
331
332
333
334
335
336static void pagevec_move_tail(struct pagevec *pvec)
337{
338 int pgmoved = 0;
339
340 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
341 __count_vm_events(PGROTATED, pgmoved);
342}
343
344
345
346
347
348
349void rotate_reclaimable_page(struct page *page)
350{
351 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
352 !PageUnevictable(page) && PageLRU(page)) {
353 struct pagevec *pvec;
354 unsigned long flags;
355
356 page_cache_get(page);
357 local_irq_save(flags);
358 pvec = &__get_cpu_var(lru_rotate_pvecs);
359 if (!pagevec_add(pvec, page))
360 pagevec_move_tail(pvec);
361 local_irq_restore(flags);
362 }
363}
364
365static void update_page_reclaim_stat(struct lruvec *lruvec,
366 int file, int rotated)
367{
368 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
369
370 reclaim_stat->recent_scanned[file]++;
371 if (rotated)
372 reclaim_stat->recent_rotated[file]++;
373}
374
375static void __activate_page(struct page *page, struct lruvec *lruvec,
376 void *arg)
377{
378 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
379 int file = page_is_file_cache(page);
380 int lru = page_lru_base_type(page);
381
382 del_page_from_lru_list(page, lruvec, lru);
383 SetPageActive(page);
384 lru += LRU_ACTIVE;
385 add_page_to_lru_list(page, lruvec, lru);
386
387 __count_vm_event(PGACTIVATE);
388 update_page_reclaim_stat(lruvec, file, 1);
389 }
390}
391
392#ifdef CONFIG_SMP
393static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
394
395static void activate_page_drain(int cpu)
396{
397 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
398
399 if (pagevec_count(pvec))
400 pagevec_lru_move_fn(pvec, __activate_page, NULL);
401}
402
403void activate_page(struct page *page)
404{
405 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
406 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
407
408 page_cache_get(page);
409 if (!pagevec_add(pvec, page))
410 pagevec_lru_move_fn(pvec, __activate_page, NULL);
411 put_cpu_var(activate_page_pvecs);
412 }
413}
414
415#else
416static inline void activate_page_drain(int cpu)
417{
418}
419
420void activate_page(struct page *page)
421{
422 struct zone *zone = page_zone(page);
423
424 spin_lock_irq(&zone->lru_lock);
425 __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
426 spin_unlock_irq(&zone->lru_lock);
427}
428#endif
429
430
431
432
433
434
435
436
437void mark_page_accessed(struct page *page)
438{
439 if (!PageActive(page) && !PageUnevictable(page) &&
440 PageReferenced(page) && PageLRU(page)) {
441 activate_page(page);
442 ClearPageReferenced(page);
443 } else if (!PageReferenced(page)) {
444 SetPageReferenced(page);
445 }
446}
447EXPORT_SYMBOL(mark_page_accessed);
448
449void __lru_cache_add(struct page *page, enum lru_list lru)
450{
451 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
452
453 page_cache_get(page);
454 if (!pagevec_add(pvec, page))
455 __pagevec_lru_add(pvec, lru);
456 put_cpu_var(lru_add_pvecs);
457}
458EXPORT_SYMBOL(__lru_cache_add);
459
460
461
462
463
464
465void lru_cache_add_lru(struct page *page, enum lru_list lru)
466{
467 if (PageActive(page)) {
468 VM_BUG_ON(PageUnevictable(page));
469 ClearPageActive(page);
470 } else if (PageUnevictable(page)) {
471 VM_BUG_ON(PageActive(page));
472 ClearPageUnevictable(page);
473 }
474
475 VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
476 __lru_cache_add(page, lru);
477}
478
479
480
481
482
483
484
485
486
487
488
489void add_page_to_unevictable_list(struct page *page)
490{
491 struct zone *zone = page_zone(page);
492 struct lruvec *lruvec;
493
494 spin_lock_irq(&zone->lru_lock);
495 lruvec = mem_cgroup_page_lruvec(page, zone);
496 SetPageUnevictable(page);
497 SetPageLRU(page);
498 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
499 spin_unlock_irq(&zone->lru_lock);
500}
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
524 void *arg)
525{
526 int lru, file;
527 bool active;
528
529 if (!PageLRU(page))
530 return;
531
532 if (PageUnevictable(page))
533 return;
534
535
536 if (page_mapped(page))
537 return;
538
539 active = PageActive(page);
540 file = page_is_file_cache(page);
541 lru = page_lru_base_type(page);
542
543 del_page_from_lru_list(page, lruvec, lru + active);
544 ClearPageActive(page);
545 ClearPageReferenced(page);
546 add_page_to_lru_list(page, lruvec, lru);
547
548 if (PageWriteback(page) || PageDirty(page)) {
549
550
551
552
553
554 SetPageReclaim(page);
555 } else {
556
557
558
559
560 list_move_tail(&page->lru, &lruvec->lists[lru]);
561 __count_vm_event(PGROTATED);
562 }
563
564 if (active)
565 __count_vm_event(PGDEACTIVATE);
566 update_page_reclaim_stat(lruvec, file, 0);
567}
568
569
570
571
572
573
574void lru_add_drain_cpu(int cpu)
575{
576 struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
577 struct pagevec *pvec;
578 int lru;
579
580 for_each_lru(lru) {
581 pvec = &pvecs[lru - LRU_BASE];
582 if (pagevec_count(pvec))
583 __pagevec_lru_add(pvec, lru);
584 }
585
586 pvec = &per_cpu(lru_rotate_pvecs, cpu);
587 if (pagevec_count(pvec)) {
588 unsigned long flags;
589
590
591 local_irq_save(flags);
592 pagevec_move_tail(pvec);
593 local_irq_restore(flags);
594 }
595
596 pvec = &per_cpu(lru_deactivate_pvecs, cpu);
597 if (pagevec_count(pvec))
598 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
599
600 activate_page_drain(cpu);
601}
602
603
604
605
606
607
608
609
610
611void deactivate_page(struct page *page)
612{
613
614
615
616
617 if (PageUnevictable(page))
618 return;
619
620 if (likely(get_page_unless_zero(page))) {
621 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
622
623 if (!pagevec_add(pvec, page))
624 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
625 put_cpu_var(lru_deactivate_pvecs);
626 }
627}
628
629void lru_add_drain(void)
630{
631 lru_add_drain_cpu(get_cpu());
632 put_cpu();
633}
634
635static void lru_add_drain_per_cpu(struct work_struct *dummy)
636{
637 lru_add_drain();
638}
639
640
641
642
643int lru_add_drain_all(void)
644{
645 return schedule_on_each_cpu(lru_add_drain_per_cpu);
646}
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661void release_pages(struct page **pages, int nr, int cold)
662{
663 int i;
664 LIST_HEAD(pages_to_free);
665 struct zone *zone = NULL;
666 struct lruvec *lruvec;
667 unsigned long uninitialized_var(flags);
668
669 for (i = 0; i < nr; i++) {
670 struct page *page = pages[i];
671
672 if (unlikely(PageCompound(page))) {
673 if (zone) {
674 spin_unlock_irqrestore(&zone->lru_lock, flags);
675 zone = NULL;
676 }
677 put_compound_page(page);
678 continue;
679 }
680
681 if (!put_page_testzero(page))
682 continue;
683
684 if (PageLRU(page)) {
685 struct zone *pagezone = page_zone(page);
686
687 if (pagezone != zone) {
688 if (zone)
689 spin_unlock_irqrestore(&zone->lru_lock,
690 flags);
691 zone = pagezone;
692 spin_lock_irqsave(&zone->lru_lock, flags);
693 }
694
695 lruvec = mem_cgroup_page_lruvec(page, zone);
696 VM_BUG_ON(!PageLRU(page));
697 __ClearPageLRU(page);
698 del_page_from_lru_list(page, lruvec, page_off_lru(page));
699 }
700
701 list_add(&page->lru, &pages_to_free);
702 }
703 if (zone)
704 spin_unlock_irqrestore(&zone->lru_lock, flags);
705
706 free_hot_cold_page_list(&pages_to_free, cold);
707}
708EXPORT_SYMBOL(release_pages);
709
710
711
712
713
714
715
716
717
718
719
720void __pagevec_release(struct pagevec *pvec)
721{
722 lru_add_drain();
723 release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
724 pagevec_reinit(pvec);
725}
726EXPORT_SYMBOL(__pagevec_release);
727
728#ifdef CONFIG_TRANSPARENT_HUGEPAGE
729
730void lru_add_page_tail(struct page *page, struct page *page_tail,
731 struct lruvec *lruvec)
732{
733 int uninitialized_var(active);
734 enum lru_list lru;
735 const int file = 0;
736
737 VM_BUG_ON(!PageHead(page));
738 VM_BUG_ON(PageCompound(page_tail));
739 VM_BUG_ON(PageLRU(page_tail));
740 VM_BUG_ON(NR_CPUS != 1 &&
741 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
742
743 SetPageLRU(page_tail);
744
745 if (page_evictable(page_tail, NULL)) {
746 if (PageActive(page)) {
747 SetPageActive(page_tail);
748 active = 1;
749 lru = LRU_ACTIVE_ANON;
750 } else {
751 active = 0;
752 lru = LRU_INACTIVE_ANON;
753 }
754 } else {
755 SetPageUnevictable(page_tail);
756 lru = LRU_UNEVICTABLE;
757 }
758
759 if (likely(PageLRU(page)))
760 list_add_tail(&page_tail->lru, &page->lru);
761 else {
762 struct list_head *list_head;
763
764
765
766
767
768
769
770 add_page_to_lru_list(page_tail, lruvec, lru);
771 list_head = page_tail->lru.prev;
772 list_move_tail(&page_tail->lru, list_head);
773 }
774
775 if (!PageUnevictable(page))
776 update_page_reclaim_stat(lruvec, file, active);
777}
778#endif
779
780static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
781 void *arg)
782{
783 enum lru_list lru = (enum lru_list)arg;
784 int file = is_file_lru(lru);
785 int active = is_active_lru(lru);
786
787 VM_BUG_ON(PageActive(page));
788 VM_BUG_ON(PageUnevictable(page));
789 VM_BUG_ON(PageLRU(page));
790
791 SetPageLRU(page);
792 if (active)
793 SetPageActive(page);
794 add_page_to_lru_list(page, lruvec, lru);
795 update_page_reclaim_stat(lruvec, file, active);
796}
797
798
799
800
801
802void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
803{
804 VM_BUG_ON(is_unevictable_lru(lru));
805
806 pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru);
807}
808EXPORT_SYMBOL(__pagevec_lru_add);
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
827 pgoff_t start, unsigned nr_pages)
828{
829 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
830 return pagevec_count(pvec);
831}
832EXPORT_SYMBOL(pagevec_lookup);
833
834unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
835 pgoff_t *index, int tag, unsigned nr_pages)
836{
837 pvec->nr = find_get_pages_tag(mapping, index, tag,
838 nr_pages, pvec->pages);
839 return pagevec_count(pvec);
840}
841EXPORT_SYMBOL(pagevec_lookup_tag);
842
843
844
845
846void __init swap_setup(void)
847{
848 unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
849
850#ifdef CONFIG_SWAP
851 bdi_init(swapper_space.backing_dev_info);
852#endif
853
854
855 if (megs < 16)
856 page_cluster = 2;
857 else
858 page_cluster = 3;
859
860
861
862
863}
864