1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/mm.h>
18#include <linux/sched.h>
19#include <linux/kernel_stat.h>
20#include <linux/swap.h>
21#include <linux/mman.h>
22#include <linux/pagemap.h>
23#include <linux/pagevec.h>
24#include <linux/init.h>
25#include <linux/export.h>
26#include <linux/mm_inline.h>
27#include <linux/percpu_counter.h>
28#include <linux/memremap.h>
29#include <linux/percpu.h>
30#include <linux/cpu.h>
31#include <linux/notifier.h>
32#include <linux/backing-dev.h>
33#include <linux/memcontrol.h>
34#include <linux/gfp.h>
35#include <linux/uio.h>
36#include <linux/hugetlb.h>
37#include <linux/page_idle.h>
38#include <linux/local_lock.h>
39#include <linux/buffer_head.h>
40
41#include "internal.h"
42
43#define CREATE_TRACE_POINTS
44#include <trace/events/pagemap.h>
45
46
47int page_cluster;
48
49
50struct lru_rotate {
51 local_lock_t lock;
52 struct pagevec pvec;
53};
54static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
55 .lock = INIT_LOCAL_LOCK(lock),
56};
57
58
59
60
61
62struct lru_pvecs {
63 local_lock_t lock;
64 struct pagevec lru_add;
65 struct pagevec lru_deactivate_file;
66 struct pagevec lru_deactivate;
67 struct pagevec lru_lazyfree;
68#ifdef CONFIG_SMP
69 struct pagevec activate_page;
70#endif
71};
72static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = {
73 .lock = INIT_LOCAL_LOCK(lock),
74};
75
76
77
78
79
80static void __page_cache_release(struct page *page)
81{
82 if (PageLRU(page)) {
83 struct lruvec *lruvec;
84 unsigned long flags;
85
86 lruvec = lock_page_lruvec_irqsave(page, &flags);
87 del_page_from_lru_list(page, lruvec);
88 __clear_page_lru_flags(page);
89 unlock_page_lruvec_irqrestore(lruvec, flags);
90 }
91 __ClearPageWaiters(page);
92}
93
94static void __put_single_page(struct page *page)
95{
96 __page_cache_release(page);
97 mem_cgroup_uncharge(page);
98 free_unref_page(page, 0);
99}
100
101static void __put_compound_page(struct page *page)
102{
103
104
105
106
107
108
109 if (!PageHuge(page))
110 __page_cache_release(page);
111 destroy_compound_page(page);
112}
113
114void __put_page(struct page *page)
115{
116 if (is_zone_device_page(page)) {
117 put_dev_pagemap(page->pgmap);
118
119
120
121
122
123 return;
124 }
125
126 if (unlikely(PageCompound(page)))
127 __put_compound_page(page);
128 else
129 __put_single_page(page);
130}
131EXPORT_SYMBOL(__put_page);
132
133
134
135
136
137
138
139
140void put_pages_list(struct list_head *pages)
141{
142 while (!list_empty(pages)) {
143 struct page *victim;
144
145 victim = lru_to_page(pages);
146 list_del(&victim->lru);
147 put_page(victim);
148 }
149}
150EXPORT_SYMBOL(put_pages_list);
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
166 struct page **pages)
167{
168 int seg;
169
170 for (seg = 0; seg < nr_segs; seg++) {
171 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
172 return seg;
173
174 pages[seg] = kmap_to_page(kiov[seg].iov_base);
175 get_page(pages[seg]);
176 }
177
178 return seg;
179}
180EXPORT_SYMBOL_GPL(get_kernel_pages);
181
182static void pagevec_lru_move_fn(struct pagevec *pvec,
183 void (*move_fn)(struct page *page, struct lruvec *lruvec))
184{
185 int i;
186 struct lruvec *lruvec = NULL;
187 unsigned long flags = 0;
188
189 for (i = 0; i < pagevec_count(pvec); i++) {
190 struct page *page = pvec->pages[i];
191
192
193 if (!TestClearPageLRU(page))
194 continue;
195
196 lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
197 (*move_fn)(page, lruvec);
198
199 SetPageLRU(page);
200 }
201 if (lruvec)
202 unlock_page_lruvec_irqrestore(lruvec, flags);
203 release_pages(pvec->pages, pvec->nr);
204 pagevec_reinit(pvec);
205}
206
207static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
208{
209 if (!PageUnevictable(page)) {
210 del_page_from_lru_list(page, lruvec);
211 ClearPageActive(page);
212 add_page_to_lru_list_tail(page, lruvec);
213 __count_vm_events(PGROTATED, thp_nr_pages(page));
214 }
215}
216
217
218static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page)
219{
220 bool ret = false;
221
222 if (!pagevec_add(pvec, page) || PageCompound(page) ||
223 lru_cache_disabled())
224 ret = true;
225
226 return ret;
227}
228
229
230
231
232
233
234
235
236void rotate_reclaimable_page(struct page *page)
237{
238 if (!PageLocked(page) && !PageDirty(page) &&
239 !PageUnevictable(page) && PageLRU(page)) {
240 struct pagevec *pvec;
241 unsigned long flags;
242
243 get_page(page);
244 local_lock_irqsave(&lru_rotate.lock, flags);
245 pvec = this_cpu_ptr(&lru_rotate.pvec);
246 if (pagevec_add_and_need_flush(pvec, page))
247 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
248 local_unlock_irqrestore(&lru_rotate.lock, flags);
249 }
250}
251
252void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
253{
254 do {
255 unsigned long lrusize;
256
257
258
259
260
261
262
263
264 spin_lock_irq(&lruvec->lru_lock);
265
266 if (file)
267 lruvec->file_cost += nr_pages;
268 else
269 lruvec->anon_cost += nr_pages;
270
271
272
273
274
275
276
277
278
279 lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
280 lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
281 lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
282 lruvec_page_state(lruvec, NR_ACTIVE_FILE);
283
284 if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
285 lruvec->file_cost /= 2;
286 lruvec->anon_cost /= 2;
287 }
288 spin_unlock_irq(&lruvec->lru_lock);
289 } while ((lruvec = parent_lruvec(lruvec)));
290}
291
292void lru_note_cost_page(struct page *page)
293{
294 lru_note_cost(mem_cgroup_page_lruvec(page),
295 page_is_file_lru(page), thp_nr_pages(page));
296}
297
298static void __activate_page(struct page *page, struct lruvec *lruvec)
299{
300 if (!PageActive(page) && !PageUnevictable(page)) {
301 int nr_pages = thp_nr_pages(page);
302
303 del_page_from_lru_list(page, lruvec);
304 SetPageActive(page);
305 add_page_to_lru_list(page, lruvec);
306 trace_mm_lru_activate(page);
307
308 __count_vm_events(PGACTIVATE, nr_pages);
309 __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
310 nr_pages);
311 }
312}
313
314#ifdef CONFIG_SMP
315static void activate_page_drain(int cpu)
316{
317 struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu);
318
319 if (pagevec_count(pvec))
320 pagevec_lru_move_fn(pvec, __activate_page);
321}
322
323static bool need_activate_page_drain(int cpu)
324{
325 return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
326}
327
328static void activate_page(struct page *page)
329{
330 page = compound_head(page);
331 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
332 struct pagevec *pvec;
333
334 local_lock(&lru_pvecs.lock);
335 pvec = this_cpu_ptr(&lru_pvecs.activate_page);
336 get_page(page);
337 if (pagevec_add_and_need_flush(pvec, page))
338 pagevec_lru_move_fn(pvec, __activate_page);
339 local_unlock(&lru_pvecs.lock);
340 }
341}
342
343#else
344static inline void activate_page_drain(int cpu)
345{
346}
347
348static void activate_page(struct page *page)
349{
350 struct lruvec *lruvec;
351
352 page = compound_head(page);
353 if (TestClearPageLRU(page)) {
354 lruvec = lock_page_lruvec_irq(page);
355 __activate_page(page, lruvec);
356 unlock_page_lruvec_irq(lruvec);
357 SetPageLRU(page);
358 }
359}
360#endif
361
362static void __lru_cache_activate_page(struct page *page)
363{
364 struct pagevec *pvec;
365 int i;
366
367 local_lock(&lru_pvecs.lock);
368 pvec = this_cpu_ptr(&lru_pvecs.lru_add);
369
370
371
372
373
374
375
376
377
378
379
380 for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
381 struct page *pagevec_page = pvec->pages[i];
382
383 if (pagevec_page == page) {
384 SetPageActive(page);
385 break;
386 }
387 }
388
389 local_unlock(&lru_pvecs.lock);
390}
391
392
393
394
395
396
397
398
399
400
401
402void mark_page_accessed(struct page *page)
403{
404 page = compound_head(page);
405
406 if (!PageReferenced(page)) {
407 SetPageReferenced(page);
408 } else if (PageUnevictable(page)) {
409
410
411
412
413
414 } else if (!PageActive(page)) {
415
416
417
418
419
420
421 if (PageLRU(page))
422 activate_page(page);
423 else
424 __lru_cache_activate_page(page);
425 ClearPageReferenced(page);
426 workingset_activation(page);
427 }
428 if (page_is_idle(page))
429 clear_page_idle(page);
430}
431EXPORT_SYMBOL(mark_page_accessed);
432
433
434
435
436
437
438
439
440
441
442void lru_cache_add(struct page *page)
443{
444 struct pagevec *pvec;
445
446 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
447 VM_BUG_ON_PAGE(PageLRU(page), page);
448
449 get_page(page);
450 local_lock(&lru_pvecs.lock);
451 pvec = this_cpu_ptr(&lru_pvecs.lru_add);
452 if (pagevec_add_and_need_flush(pvec, page))
453 __pagevec_lru_add(pvec);
454 local_unlock(&lru_pvecs.lock);
455}
456EXPORT_SYMBOL(lru_cache_add);
457
458
459
460
461
462
463
464
465
466void lru_cache_add_inactive_or_unevictable(struct page *page,
467 struct vm_area_struct *vma)
468{
469 bool unevictable;
470
471 VM_BUG_ON_PAGE(PageLRU(page), page);
472
473 unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
474 if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
475 int nr_pages = thp_nr_pages(page);
476
477
478
479
480
481 __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
482 count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
483 }
484 lru_cache_add(page);
485}
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
509{
510 bool active = PageActive(page);
511 int nr_pages = thp_nr_pages(page);
512
513 if (PageUnevictable(page))
514 return;
515
516
517 if (page_mapped(page))
518 return;
519
520 del_page_from_lru_list(page, lruvec);
521 ClearPageActive(page);
522 ClearPageReferenced(page);
523
524 if (PageWriteback(page) || PageDirty(page)) {
525
526
527
528
529
530 add_page_to_lru_list(page, lruvec);
531 SetPageReclaim(page);
532 } else {
533
534
535
536
537 add_page_to_lru_list_tail(page, lruvec);
538 __count_vm_events(PGROTATED, nr_pages);
539 }
540
541 if (active) {
542 __count_vm_events(PGDEACTIVATE, nr_pages);
543 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
544 nr_pages);
545 }
546}
547
548static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
549{
550 if (PageActive(page) && !PageUnevictable(page)) {
551 int nr_pages = thp_nr_pages(page);
552
553 del_page_from_lru_list(page, lruvec);
554 ClearPageActive(page);
555 ClearPageReferenced(page);
556 add_page_to_lru_list(page, lruvec);
557
558 __count_vm_events(PGDEACTIVATE, nr_pages);
559 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
560 nr_pages);
561 }
562}
563
564static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
565{
566 if (PageAnon(page) && PageSwapBacked(page) &&
567 !PageSwapCache(page) && !PageUnevictable(page)) {
568 int nr_pages = thp_nr_pages(page);
569
570 del_page_from_lru_list(page, lruvec);
571 ClearPageActive(page);
572 ClearPageReferenced(page);
573
574
575
576
577
578 ClearPageSwapBacked(page);
579 add_page_to_lru_list(page, lruvec);
580
581 __count_vm_events(PGLAZYFREE, nr_pages);
582 __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
583 nr_pages);
584 }
585}
586
587
588
589
590
591
592void lru_add_drain_cpu(int cpu)
593{
594 struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu);
595
596 if (pagevec_count(pvec))
597 __pagevec_lru_add(pvec);
598
599 pvec = &per_cpu(lru_rotate.pvec, cpu);
600
601 if (data_race(pagevec_count(pvec))) {
602 unsigned long flags;
603
604
605 local_lock_irqsave(&lru_rotate.lock, flags);
606 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
607 local_unlock_irqrestore(&lru_rotate.lock, flags);
608 }
609
610 pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu);
611 if (pagevec_count(pvec))
612 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
613
614 pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu);
615 if (pagevec_count(pvec))
616 pagevec_lru_move_fn(pvec, lru_deactivate_fn);
617
618 pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu);
619 if (pagevec_count(pvec))
620 pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
621
622 activate_page_drain(cpu);
623}
624
625
626
627
628
629
630
631
632
633void deactivate_file_page(struct page *page)
634{
635
636
637
638
639 if (PageUnevictable(page))
640 return;
641
642 if (likely(get_page_unless_zero(page))) {
643 struct pagevec *pvec;
644
645 local_lock(&lru_pvecs.lock);
646 pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
647
648 if (pagevec_add_and_need_flush(pvec, page))
649 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
650 local_unlock(&lru_pvecs.lock);
651 }
652}
653
654
655
656
657
658
659
660
661
662void deactivate_page(struct page *page)
663{
664 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
665 struct pagevec *pvec;
666
667 local_lock(&lru_pvecs.lock);
668 pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
669 get_page(page);
670 if (pagevec_add_and_need_flush(pvec, page))
671 pagevec_lru_move_fn(pvec, lru_deactivate_fn);
672 local_unlock(&lru_pvecs.lock);
673 }
674}
675
676
677
678
679
680
681
682
683void mark_page_lazyfree(struct page *page)
684{
685 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
686 !PageSwapCache(page) && !PageUnevictable(page)) {
687 struct pagevec *pvec;
688
689 local_lock(&lru_pvecs.lock);
690 pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
691 get_page(page);
692 if (pagevec_add_and_need_flush(pvec, page))
693 pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
694 local_unlock(&lru_pvecs.lock);
695 }
696}
697
698void lru_add_drain(void)
699{
700 local_lock(&lru_pvecs.lock);
701 lru_add_drain_cpu(smp_processor_id());
702 local_unlock(&lru_pvecs.lock);
703}
704
705
706
707
708
709
710
711static void lru_add_and_bh_lrus_drain(void)
712{
713 local_lock(&lru_pvecs.lock);
714 lru_add_drain_cpu(smp_processor_id());
715 local_unlock(&lru_pvecs.lock);
716 invalidate_bh_lrus_cpu();
717}
718
719void lru_add_drain_cpu_zone(struct zone *zone)
720{
721 local_lock(&lru_pvecs.lock);
722 lru_add_drain_cpu(smp_processor_id());
723 drain_local_pages(zone);
724 local_unlock(&lru_pvecs.lock);
725}
726
727#ifdef CONFIG_SMP
728
729static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
730
731static void lru_add_drain_per_cpu(struct work_struct *dummy)
732{
733 lru_add_and_bh_lrus_drain();
734}
735
736
737
738
739
740
741
742
743inline void __lru_add_drain_all(bool force_all_cpus)
744{
745
746
747
748
749
750
751
752
753
754
755 static unsigned int lru_drain_gen;
756 static struct cpumask has_work;
757 static DEFINE_MUTEX(lock);
758 unsigned cpu, this_gen;
759
760
761
762
763
764 if (WARN_ON(!mm_percpu_wq))
765 return;
766
767
768
769
770
771 smp_mb();
772
773
774
775
776
777
778
779
780 this_gen = smp_load_acquire(&lru_drain_gen);
781
782 mutex_lock(&lock);
783
784
785
786
787
788 if (unlikely(this_gen != lru_drain_gen && !force_all_cpus))
789 goto done;
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811 WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
812 smp_mb();
813
814 cpumask_clear(&has_work);
815 for_each_online_cpu(cpu) {
816 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
817
818 if (force_all_cpus ||
819 pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
820 data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
821 pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
822 pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
823 pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
824 need_activate_page_drain(cpu) ||
825 has_bh_in_lru(cpu, NULL)) {
826 INIT_WORK(work, lru_add_drain_per_cpu);
827 queue_work_on(cpu, mm_percpu_wq, work);
828 __cpumask_set_cpu(cpu, &has_work);
829 }
830 }
831
832 for_each_cpu(cpu, &has_work)
833 flush_work(&per_cpu(lru_add_drain_work, cpu));
834
835done:
836 mutex_unlock(&lock);
837}
838
839void lru_add_drain_all(void)
840{
841 __lru_add_drain_all(false);
842}
843#else
844void lru_add_drain_all(void)
845{
846 lru_add_drain();
847}
848#endif
849
850atomic_t lru_disable_count = ATOMIC_INIT(0);
851
852
853
854
855
856
857
858
859
860void lru_cache_disable(void)
861{
862 atomic_inc(&lru_disable_count);
863#ifdef CONFIG_SMP
864
865
866
867
868
869
870
871
872 __lru_add_drain_all(true);
873#else
874 lru_add_and_bh_lrus_drain();
875#endif
876}
877
878
879
880
881
882
883
884
885
886void release_pages(struct page **pages, int nr)
887{
888 int i;
889 LIST_HEAD(pages_to_free);
890 struct lruvec *lruvec = NULL;
891 unsigned long flags;
892 unsigned int lock_batch;
893
894 for (i = 0; i < nr; i++) {
895 struct page *page = pages[i];
896
897
898
899
900
901
902 if (lruvec && ++lock_batch == SWAP_CLUSTER_MAX) {
903 unlock_page_lruvec_irqrestore(lruvec, flags);
904 lruvec = NULL;
905 }
906
907 page = compound_head(page);
908 if (is_huge_zero_page(page))
909 continue;
910
911 if (is_zone_device_page(page)) {
912 if (lruvec) {
913 unlock_page_lruvec_irqrestore(lruvec, flags);
914 lruvec = NULL;
915 }
916
917
918
919
920
921
922 if (page_is_devmap_managed(page)) {
923 put_devmap_managed_page(page);
924 continue;
925 }
926 if (put_page_testzero(page))
927 put_dev_pagemap(page->pgmap);
928 continue;
929 }
930
931 if (!put_page_testzero(page))
932 continue;
933
934 if (PageCompound(page)) {
935 if (lruvec) {
936 unlock_page_lruvec_irqrestore(lruvec, flags);
937 lruvec = NULL;
938 }
939 __put_compound_page(page);
940 continue;
941 }
942
943 if (PageLRU(page)) {
944 struct lruvec *prev_lruvec = lruvec;
945
946 lruvec = relock_page_lruvec_irqsave(page, lruvec,
947 &flags);
948 if (prev_lruvec != lruvec)
949 lock_batch = 0;
950
951 del_page_from_lru_list(page, lruvec);
952 __clear_page_lru_flags(page);
953 }
954
955 __ClearPageWaiters(page);
956
957 list_add(&page->lru, &pages_to_free);
958 }
959 if (lruvec)
960 unlock_page_lruvec_irqrestore(lruvec, flags);
961
962 mem_cgroup_uncharge_list(&pages_to_free);
963 free_unref_page_list(&pages_to_free);
964}
965EXPORT_SYMBOL(release_pages);
966
967
968
969
970
971
972
973
974
975
976
977void __pagevec_release(struct pagevec *pvec)
978{
979 if (!pvec->percpu_pvec_drained) {
980 lru_add_drain();
981 pvec->percpu_pvec_drained = true;
982 }
983 release_pages(pvec->pages, pagevec_count(pvec));
984 pagevec_reinit(pvec);
985}
986EXPORT_SYMBOL(__pagevec_release);
987
988static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
989{
990 int was_unevictable = TestClearPageUnevictable(page);
991 int nr_pages = thp_nr_pages(page);
992
993 VM_BUG_ON_PAGE(PageLRU(page), page);
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021 SetPageLRU(page);
1022 smp_mb__after_atomic();
1023
1024 if (page_evictable(page)) {
1025 if (was_unevictable)
1026 __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
1027 } else {
1028 ClearPageActive(page);
1029 SetPageUnevictable(page);
1030 if (!was_unevictable)
1031 __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
1032 }
1033
1034 add_page_to_lru_list(page, lruvec);
1035 trace_mm_lru_insertion(page);
1036}
1037
1038
1039
1040
1041
1042void __pagevec_lru_add(struct pagevec *pvec)
1043{
1044 int i;
1045 struct lruvec *lruvec = NULL;
1046 unsigned long flags = 0;
1047
1048 for (i = 0; i < pagevec_count(pvec); i++) {
1049 struct page *page = pvec->pages[i];
1050
1051 lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
1052 __pagevec_lru_add_fn(page, lruvec);
1053 }
1054 if (lruvec)
1055 unlock_page_lruvec_irqrestore(lruvec, flags);
1056 release_pages(pvec->pages, pvec->nr);
1057 pagevec_reinit(pvec);
1058}
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069void pagevec_remove_exceptionals(struct pagevec *pvec)
1070{
1071 int i, j;
1072
1073 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
1074 struct page *page = pvec->pages[i];
1075 if (!xa_is_value(page))
1076 pvec->pages[j++] = page;
1077 }
1078 pvec->nr = j;
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101unsigned pagevec_lookup_range(struct pagevec *pvec,
1102 struct address_space *mapping, pgoff_t *start, pgoff_t end)
1103{
1104 pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE,
1105 pvec->pages);
1106 return pagevec_count(pvec);
1107}
1108EXPORT_SYMBOL(pagevec_lookup_range);
1109
1110unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
1111 struct address_space *mapping, pgoff_t *index, pgoff_t end,
1112 xa_mark_t tag)
1113{
1114 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1115 PAGEVEC_SIZE, pvec->pages);
1116 return pagevec_count(pvec);
1117}
1118EXPORT_SYMBOL(pagevec_lookup_range_tag);
1119
1120
1121
1122
1123void __init swap_setup(void)
1124{
1125 unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
1126
1127
1128 if (megs < 16)
1129 page_cluster = 2;
1130 else
1131 page_cluster = 3;
1132
1133
1134
1135
1136}
1137
1138#ifdef CONFIG_DEV_PAGEMAP_OPS
1139void put_devmap_managed_page(struct page *page)
1140{
1141 int count;
1142
1143 if (WARN_ON_ONCE(!page_is_devmap_managed(page)))
1144 return;
1145
1146 count = page_ref_dec_return(page);
1147
1148
1149
1150
1151
1152
1153 if (count == 1)
1154 free_devmap_managed_page(page);
1155 else if (!count)
1156 __put_page(page);
1157}
1158EXPORT_SYMBOL(put_devmap_managed_page);
1159#endif
1160