1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/mm.h>
18#include <linux/sched.h>
19#include <linux/kernel_stat.h>
20#include <linux/swap.h>
21#include <linux/mman.h>
22#include <linux/pagemap.h>
23#include <linux/pagevec.h>
24#include <linux/init.h>
25#include <linux/export.h>
26#include <linux/mm_inline.h>
27#include <linux/percpu_counter.h>
28#include <linux/memremap.h>
29#include <linux/percpu.h>
30#include <linux/cpu.h>
31#include <linux/notifier.h>
32#include <linux/backing-dev.h>
33#include <linux/memcontrol.h>
34#include <linux/gfp.h>
35#include <linux/uio.h>
36#include <linux/hugetlb.h>
37#include <linux/page_idle.h>
38#include <linux/local_lock.h>
39#include <linux/buffer_head.h>
40
41#include "internal.h"
42
43#define CREATE_TRACE_POINTS
44#include <trace/events/pagemap.h>
45
46
47int page_cluster;
48
49
50struct lru_rotate {
51 local_lock_t lock;
52 struct pagevec pvec;
53};
54static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
55 .lock = INIT_LOCAL_LOCK(lock),
56};
57
58
59
60
61
62struct lru_pvecs {
63 local_lock_t lock;
64 struct pagevec lru_add;
65 struct pagevec lru_deactivate_file;
66 struct pagevec lru_deactivate;
67 struct pagevec lru_lazyfree;
68#ifdef CONFIG_SMP
69 struct pagevec activate_page;
70#endif
71};
72static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = {
73 .lock = INIT_LOCAL_LOCK(lock),
74};
75
76
77
78
79
80static void __page_cache_release(struct page *page)
81{
82 if (PageLRU(page)) {
83 struct folio *folio = page_folio(page);
84 struct lruvec *lruvec;
85 unsigned long flags;
86
87 lruvec = folio_lruvec_lock_irqsave(folio, &flags);
88 del_page_from_lru_list(page, lruvec);
89 __clear_page_lru_flags(page);
90 unlock_page_lruvec_irqrestore(lruvec, flags);
91 }
92
93 if (unlikely(PageMlocked(page))) {
94 int nr_pages = thp_nr_pages(page);
95
96 __ClearPageMlocked(page);
97 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
98 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
99 }
100}
101
102static void __put_single_page(struct page *page)
103{
104 __page_cache_release(page);
105 mem_cgroup_uncharge(page_folio(page));
106 free_unref_page(page, 0);
107}
108
109static void __put_compound_page(struct page *page)
110{
111
112
113
114
115
116
117 if (!PageHuge(page))
118 __page_cache_release(page);
119 destroy_compound_page(page);
120}
121
122void __put_page(struct page *page)
123{
124 if (unlikely(is_zone_device_page(page)))
125 free_zone_device_page(page);
126 else if (unlikely(PageCompound(page)))
127 __put_compound_page(page);
128 else
129 __put_single_page(page);
130}
131EXPORT_SYMBOL(__put_page);
132
133
134
135
136
137
138
139void put_pages_list(struct list_head *pages)
140{
141 struct page *page, *next;
142
143 list_for_each_entry_safe(page, next, pages, lru) {
144 if (!put_page_testzero(page)) {
145 list_del(&page->lru);
146 continue;
147 }
148 if (PageHead(page)) {
149 list_del(&page->lru);
150 __put_compound_page(page);
151 continue;
152 }
153
154 }
155
156 free_unref_page_list(pages);
157 INIT_LIST_HEAD(pages);
158}
159EXPORT_SYMBOL(put_pages_list);
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
175 struct page **pages)
176{
177 int seg;
178
179 for (seg = 0; seg < nr_segs; seg++) {
180 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
181 return seg;
182
183 pages[seg] = kmap_to_page(kiov[seg].iov_base);
184 get_page(pages[seg]);
185 }
186
187 return seg;
188}
189EXPORT_SYMBOL_GPL(get_kernel_pages);
190
191static void pagevec_lru_move_fn(struct pagevec *pvec,
192 void (*move_fn)(struct page *page, struct lruvec *lruvec))
193{
194 int i;
195 struct lruvec *lruvec = NULL;
196 unsigned long flags = 0;
197
198 for (i = 0; i < pagevec_count(pvec); i++) {
199 struct page *page = pvec->pages[i];
200 struct folio *folio = page_folio(page);
201
202
203 if (!TestClearPageLRU(page))
204 continue;
205
206 lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
207 (*move_fn)(page, lruvec);
208
209 SetPageLRU(page);
210 }
211 if (lruvec)
212 unlock_page_lruvec_irqrestore(lruvec, flags);
213 release_pages(pvec->pages, pvec->nr);
214 pagevec_reinit(pvec);
215}
216
217static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
218{
219 struct folio *folio = page_folio(page);
220
221 if (!folio_test_unevictable(folio)) {
222 lruvec_del_folio(lruvec, folio);
223 folio_clear_active(folio);
224 lruvec_add_folio_tail(lruvec, folio);
225 __count_vm_events(PGROTATED, folio_nr_pages(folio));
226 }
227}
228
229
230static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page)
231{
232 bool ret = false;
233
234 if (!pagevec_add(pvec, page) || PageCompound(page) ||
235 lru_cache_disabled())
236 ret = true;
237
238 return ret;
239}
240
241
242
243
244
245
246
247
248void folio_rotate_reclaimable(struct folio *folio)
249{
250 if (!folio_test_locked(folio) && !folio_test_dirty(folio) &&
251 !folio_test_unevictable(folio) && folio_test_lru(folio)) {
252 struct pagevec *pvec;
253 unsigned long flags;
254
255 folio_get(folio);
256 local_lock_irqsave(&lru_rotate.lock, flags);
257 pvec = this_cpu_ptr(&lru_rotate.pvec);
258 if (pagevec_add_and_need_flush(pvec, &folio->page))
259 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
260 local_unlock_irqrestore(&lru_rotate.lock, flags);
261 }
262}
263
264void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
265{
266 do {
267 unsigned long lrusize;
268
269
270
271
272
273
274
275
276 spin_lock_irq(&lruvec->lru_lock);
277
278 if (file)
279 lruvec->file_cost += nr_pages;
280 else
281 lruvec->anon_cost += nr_pages;
282
283
284
285
286
287
288
289
290
291 lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
292 lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
293 lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
294 lruvec_page_state(lruvec, NR_ACTIVE_FILE);
295
296 if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
297 lruvec->file_cost /= 2;
298 lruvec->anon_cost /= 2;
299 }
300 spin_unlock_irq(&lruvec->lru_lock);
301 } while ((lruvec = parent_lruvec(lruvec)));
302}
303
304void lru_note_cost_folio(struct folio *folio)
305{
306 lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio),
307 folio_nr_pages(folio));
308}
309
310static void __folio_activate(struct folio *folio, struct lruvec *lruvec)
311{
312 if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
313 long nr_pages = folio_nr_pages(folio);
314
315 lruvec_del_folio(lruvec, folio);
316 folio_set_active(folio);
317 lruvec_add_folio(lruvec, folio);
318 trace_mm_lru_activate(folio);
319
320 __count_vm_events(PGACTIVATE, nr_pages);
321 __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
322 nr_pages);
323 }
324}
325
326#ifdef CONFIG_SMP
327static void __activate_page(struct page *page, struct lruvec *lruvec)
328{
329 return __folio_activate(page_folio(page), lruvec);
330}
331
332static void activate_page_drain(int cpu)
333{
334 struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu);
335
336 if (pagevec_count(pvec))
337 pagevec_lru_move_fn(pvec, __activate_page);
338}
339
340static bool need_activate_page_drain(int cpu)
341{
342 return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
343}
344
345static void folio_activate(struct folio *folio)
346{
347 if (folio_test_lru(folio) && !folio_test_active(folio) &&
348 !folio_test_unevictable(folio)) {
349 struct pagevec *pvec;
350
351 folio_get(folio);
352 local_lock(&lru_pvecs.lock);
353 pvec = this_cpu_ptr(&lru_pvecs.activate_page);
354 if (pagevec_add_and_need_flush(pvec, &folio->page))
355 pagevec_lru_move_fn(pvec, __activate_page);
356 local_unlock(&lru_pvecs.lock);
357 }
358}
359
360#else
361static inline void activate_page_drain(int cpu)
362{
363}
364
365static void folio_activate(struct folio *folio)
366{
367 struct lruvec *lruvec;
368
369 if (folio_test_clear_lru(folio)) {
370 lruvec = folio_lruvec_lock_irq(folio);
371 __folio_activate(folio, lruvec);
372 unlock_page_lruvec_irq(lruvec);
373 folio_set_lru(folio);
374 }
375}
376#endif
377
378static void __lru_cache_activate_folio(struct folio *folio)
379{
380 struct pagevec *pvec;
381 int i;
382
383 local_lock(&lru_pvecs.lock);
384 pvec = this_cpu_ptr(&lru_pvecs.lru_add);
385
386
387
388
389
390
391
392
393
394
395
396 for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
397 struct page *pagevec_page = pvec->pages[i];
398
399 if (pagevec_page == &folio->page) {
400 folio_set_active(folio);
401 break;
402 }
403 }
404
405 local_unlock(&lru_pvecs.lock);
406}
407
408
409
410
411
412
413
414
415
416
417
418void folio_mark_accessed(struct folio *folio)
419{
420 if (!folio_test_referenced(folio)) {
421 folio_set_referenced(folio);
422 } else if (folio_test_unevictable(folio)) {
423
424
425
426
427
428 } else if (!folio_test_active(folio)) {
429
430
431
432
433
434
435 if (folio_test_lru(folio))
436 folio_activate(folio);
437 else
438 __lru_cache_activate_folio(folio);
439 folio_clear_referenced(folio);
440 workingset_activation(folio);
441 }
442 if (folio_test_idle(folio))
443 folio_clear_idle(folio);
444}
445EXPORT_SYMBOL(folio_mark_accessed);
446
447
448
449
450
451
452
453
454
455
456void folio_add_lru(struct folio *folio)
457{
458 struct pagevec *pvec;
459
460 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
461 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
462
463 folio_get(folio);
464 local_lock(&lru_pvecs.lock);
465 pvec = this_cpu_ptr(&lru_pvecs.lru_add);
466 if (pagevec_add_and_need_flush(pvec, &folio->page))
467 __pagevec_lru_add(pvec);
468 local_unlock(&lru_pvecs.lock);
469}
470EXPORT_SYMBOL(folio_add_lru);
471
472
473
474
475
476
477
478
479
480void lru_cache_add_inactive_or_unevictable(struct page *page,
481 struct vm_area_struct *vma)
482{
483 VM_BUG_ON_PAGE(PageLRU(page), page);
484
485 if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED))
486 mlock_new_page(page);
487 else
488 lru_cache_add(page);
489}
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
513{
514 bool active = PageActive(page);
515 int nr_pages = thp_nr_pages(page);
516
517 if (PageUnevictable(page))
518 return;
519
520
521 if (page_mapped(page))
522 return;
523
524 del_page_from_lru_list(page, lruvec);
525 ClearPageActive(page);
526 ClearPageReferenced(page);
527
528 if (PageWriteback(page) || PageDirty(page)) {
529
530
531
532
533
534 add_page_to_lru_list(page, lruvec);
535 SetPageReclaim(page);
536 } else {
537
538
539
540
541 add_page_to_lru_list_tail(page, lruvec);
542 __count_vm_events(PGROTATED, nr_pages);
543 }
544
545 if (active) {
546 __count_vm_events(PGDEACTIVATE, nr_pages);
547 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
548 nr_pages);
549 }
550}
551
552static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
553{
554 if (PageActive(page) && !PageUnevictable(page)) {
555 int nr_pages = thp_nr_pages(page);
556
557 del_page_from_lru_list(page, lruvec);
558 ClearPageActive(page);
559 ClearPageReferenced(page);
560 add_page_to_lru_list(page, lruvec);
561
562 __count_vm_events(PGDEACTIVATE, nr_pages);
563 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
564 nr_pages);
565 }
566}
567
568static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
569{
570 if (PageAnon(page) && PageSwapBacked(page) &&
571 !PageSwapCache(page) && !PageUnevictable(page)) {
572 int nr_pages = thp_nr_pages(page);
573
574 del_page_from_lru_list(page, lruvec);
575 ClearPageActive(page);
576 ClearPageReferenced(page);
577
578
579
580
581
582 ClearPageSwapBacked(page);
583 add_page_to_lru_list(page, lruvec);
584
585 __count_vm_events(PGLAZYFREE, nr_pages);
586 __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
587 nr_pages);
588 }
589}
590
591
592
593
594
595
596void lru_add_drain_cpu(int cpu)
597{
598 struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu);
599
600 if (pagevec_count(pvec))
601 __pagevec_lru_add(pvec);
602
603 pvec = &per_cpu(lru_rotate.pvec, cpu);
604
605 if (data_race(pagevec_count(pvec))) {
606 unsigned long flags;
607
608
609 local_lock_irqsave(&lru_rotate.lock, flags);
610 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
611 local_unlock_irqrestore(&lru_rotate.lock, flags);
612 }
613
614 pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu);
615 if (pagevec_count(pvec))
616 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
617
618 pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu);
619 if (pagevec_count(pvec))
620 pagevec_lru_move_fn(pvec, lru_deactivate_fn);
621
622 pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu);
623 if (pagevec_count(pvec))
624 pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
625
626 activate_page_drain(cpu);
627}
628
629
630
631
632
633
634
635
636
637
638
639void deactivate_file_folio(struct folio *folio)
640{
641 struct pagevec *pvec;
642
643
644
645
646
647 if (folio_test_unevictable(folio))
648 return;
649
650 folio_get(folio);
651 local_lock(&lru_pvecs.lock);
652 pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
653
654 if (pagevec_add_and_need_flush(pvec, &folio->page))
655 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
656 local_unlock(&lru_pvecs.lock);
657}
658
659
660
661
662
663
664
665
666
667void deactivate_page(struct page *page)
668{
669 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
670 struct pagevec *pvec;
671
672 local_lock(&lru_pvecs.lock);
673 pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
674 get_page(page);
675 if (pagevec_add_and_need_flush(pvec, page))
676 pagevec_lru_move_fn(pvec, lru_deactivate_fn);
677 local_unlock(&lru_pvecs.lock);
678 }
679}
680
681
682
683
684
685
686
687
688void mark_page_lazyfree(struct page *page)
689{
690 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
691 !PageSwapCache(page) && !PageUnevictable(page)) {
692 struct pagevec *pvec;
693
694 local_lock(&lru_pvecs.lock);
695 pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
696 get_page(page);
697 if (pagevec_add_and_need_flush(pvec, page))
698 pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
699 local_unlock(&lru_pvecs.lock);
700 }
701}
702
703void lru_add_drain(void)
704{
705 local_lock(&lru_pvecs.lock);
706 lru_add_drain_cpu(smp_processor_id());
707 local_unlock(&lru_pvecs.lock);
708 mlock_page_drain_local();
709}
710
711
712
713
714
715
716
717static void lru_add_and_bh_lrus_drain(void)
718{
719 local_lock(&lru_pvecs.lock);
720 lru_add_drain_cpu(smp_processor_id());
721 local_unlock(&lru_pvecs.lock);
722 invalidate_bh_lrus_cpu();
723 mlock_page_drain_local();
724}
725
726void lru_add_drain_cpu_zone(struct zone *zone)
727{
728 local_lock(&lru_pvecs.lock);
729 lru_add_drain_cpu(smp_processor_id());
730 drain_local_pages(zone);
731 local_unlock(&lru_pvecs.lock);
732 mlock_page_drain_local();
733}
734
735#ifdef CONFIG_SMP
736
737static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
738
739static void lru_add_drain_per_cpu(struct work_struct *dummy)
740{
741 lru_add_and_bh_lrus_drain();
742}
743
744
745
746
747
748
749
750
751static inline void __lru_add_drain_all(bool force_all_cpus)
752{
753
754
755
756
757
758
759
760
761
762
763 static unsigned int lru_drain_gen;
764 static struct cpumask has_work;
765 static DEFINE_MUTEX(lock);
766 unsigned cpu, this_gen;
767
768
769
770
771
772 if (WARN_ON(!mm_percpu_wq))
773 return;
774
775
776
777
778
779 smp_mb();
780
781
782
783
784
785
786
787
788 this_gen = smp_load_acquire(&lru_drain_gen);
789
790 mutex_lock(&lock);
791
792
793
794
795
796 if (unlikely(this_gen != lru_drain_gen && !force_all_cpus))
797 goto done;
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819 WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
820 smp_mb();
821
822 cpumask_clear(&has_work);
823 for_each_online_cpu(cpu) {
824 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
825
826 if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
827 data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
828 pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
829 pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
830 pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
831 need_activate_page_drain(cpu) ||
832 need_mlock_page_drain(cpu) ||
833 has_bh_in_lru(cpu, NULL)) {
834 INIT_WORK(work, lru_add_drain_per_cpu);
835 queue_work_on(cpu, mm_percpu_wq, work);
836 __cpumask_set_cpu(cpu, &has_work);
837 }
838 }
839
840 for_each_cpu(cpu, &has_work)
841 flush_work(&per_cpu(lru_add_drain_work, cpu));
842
843done:
844 mutex_unlock(&lock);
845}
846
847void lru_add_drain_all(void)
848{
849 __lru_add_drain_all(false);
850}
851#else
852void lru_add_drain_all(void)
853{
854 lru_add_drain();
855}
856#endif
857
858atomic_t lru_disable_count = ATOMIC_INIT(0);
859
860
861
862
863
864
865
866
867
868void lru_cache_disable(void)
869{
870 atomic_inc(&lru_disable_count);
871
872
873
874
875
876
877
878
879
880
881
882
883
884 synchronize_rcu_expedited();
885#ifdef CONFIG_SMP
886 __lru_add_drain_all(true);
887#else
888 lru_add_and_bh_lrus_drain();
889#endif
890}
891
892
893
894
895
896
897
898
899
900void release_pages(struct page **pages, int nr)
901{
902 int i;
903 LIST_HEAD(pages_to_free);
904 struct lruvec *lruvec = NULL;
905 unsigned long flags = 0;
906 unsigned int lock_batch;
907
908 for (i = 0; i < nr; i++) {
909 struct page *page = pages[i];
910 struct folio *folio = page_folio(page);
911
912
913
914
915
916
917 if (lruvec && ++lock_batch == SWAP_CLUSTER_MAX) {
918 unlock_page_lruvec_irqrestore(lruvec, flags);
919 lruvec = NULL;
920 }
921
922 page = &folio->page;
923 if (is_huge_zero_page(page))
924 continue;
925
926 if (is_zone_device_page(page)) {
927 if (lruvec) {
928 unlock_page_lruvec_irqrestore(lruvec, flags);
929 lruvec = NULL;
930 }
931 if (put_devmap_managed_page(page))
932 continue;
933 if (put_page_testzero(page))
934 free_zone_device_page(page);
935 continue;
936 }
937
938 if (!put_page_testzero(page))
939 continue;
940
941 if (PageCompound(page)) {
942 if (lruvec) {
943 unlock_page_lruvec_irqrestore(lruvec, flags);
944 lruvec = NULL;
945 }
946 __put_compound_page(page);
947 continue;
948 }
949
950 if (PageLRU(page)) {
951 struct lruvec *prev_lruvec = lruvec;
952
953 lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
954 &flags);
955 if (prev_lruvec != lruvec)
956 lock_batch = 0;
957
958 del_page_from_lru_list(page, lruvec);
959 __clear_page_lru_flags(page);
960 }
961
962
963
964
965
966
967
968 if (unlikely(PageMlocked(page))) {
969 __ClearPageMlocked(page);
970 dec_zone_page_state(page, NR_MLOCK);
971 count_vm_event(UNEVICTABLE_PGCLEARED);
972 }
973
974 list_add(&page->lru, &pages_to_free);
975 }
976 if (lruvec)
977 unlock_page_lruvec_irqrestore(lruvec, flags);
978
979 mem_cgroup_uncharge_list(&pages_to_free);
980 free_unref_page_list(&pages_to_free);
981}
982EXPORT_SYMBOL(release_pages);
983
984
985
986
987
988
989
990
991
992
993
994void __pagevec_release(struct pagevec *pvec)
995{
996 if (!pvec->percpu_pvec_drained) {
997 lru_add_drain();
998 pvec->percpu_pvec_drained = true;
999 }
1000 release_pages(pvec->pages, pagevec_count(pvec));
1001 pagevec_reinit(pvec);
1002}
1003EXPORT_SYMBOL(__pagevec_release);
1004
1005static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
1006{
1007 int was_unevictable = folio_test_clear_unevictable(folio);
1008 long nr_pages = folio_nr_pages(folio);
1009
1010 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
1011
1012 folio_set_lru(folio);
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024 if (folio_evictable(folio)) {
1025 if (was_unevictable)
1026 __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
1027 } else {
1028 folio_clear_active(folio);
1029 folio_set_unevictable(folio);
1030
1031
1032
1033
1034
1035
1036
1037 folio->mlock_count = 0;
1038 if (!was_unevictable)
1039 __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
1040 }
1041
1042 lruvec_add_folio(lruvec, folio);
1043 trace_mm_lru_insertion(folio);
1044}
1045
1046
1047
1048
1049
1050void __pagevec_lru_add(struct pagevec *pvec)
1051{
1052 int i;
1053 struct lruvec *lruvec = NULL;
1054 unsigned long flags = 0;
1055
1056 for (i = 0; i < pagevec_count(pvec); i++) {
1057 struct folio *folio = page_folio(pvec->pages[i]);
1058
1059 lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
1060 __pagevec_lru_add_fn(folio, lruvec);
1061 }
1062 if (lruvec)
1063 unlock_page_lruvec_irqrestore(lruvec, flags);
1064 release_pages(pvec->pages, pvec->nr);
1065 pagevec_reinit(pvec);
1066}
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077void folio_batch_remove_exceptionals(struct folio_batch *fbatch)
1078{
1079 unsigned int i, j;
1080
1081 for (i = 0, j = 0; i < folio_batch_count(fbatch); i++) {
1082 struct folio *folio = fbatch->folios[i];
1083 if (!xa_is_value(folio))
1084 fbatch->folios[j++] = folio;
1085 }
1086 fbatch->nr = j;
1087}
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109unsigned pagevec_lookup_range(struct pagevec *pvec,
1110 struct address_space *mapping, pgoff_t *start, pgoff_t end)
1111{
1112 pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE,
1113 pvec->pages);
1114 return pagevec_count(pvec);
1115}
1116EXPORT_SYMBOL(pagevec_lookup_range);
1117
1118unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
1119 struct address_space *mapping, pgoff_t *index, pgoff_t end,
1120 xa_mark_t tag)
1121{
1122 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1123 PAGEVEC_SIZE, pvec->pages);
1124 return pagevec_count(pvec);
1125}
1126EXPORT_SYMBOL(pagevec_lookup_range_tag);
1127
1128
1129
1130
1131void __init swap_setup(void)
1132{
1133 unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
1134
1135
1136 if (megs < 16)
1137 page_cluster = 2;
1138 else
1139 page_cluster = 3;
1140
1141
1142
1143
1144}
1145