1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/mm.h>
18#include <linux/sched/mm.h>
19#include <linux/module.h>
20#include <linux/gfp.h>
21#include <linux/kernel_stat.h>
22#include <linux/swap.h>
23#include <linux/pagemap.h>
24#include <linux/init.h>
25#include <linux/highmem.h>
26#include <linux/vmpressure.h>
27#include <linux/vmstat.h>
28#include <linux/file.h>
29#include <linux/writeback.h>
30#include <linux/blkdev.h>
31#include <linux/buffer_head.h>
32
33#include <linux/mm_inline.h>
34#include <linux/backing-dev.h>
35#include <linux/rmap.h>
36#include <linux/topology.h>
37#include <linux/cpu.h>
38#include <linux/cpuset.h>
39#include <linux/compaction.h>
40#include <linux/notifier.h>
41#include <linux/rwsem.h>
42#include <linux/delay.h>
43#include <linux/kthread.h>
44#include <linux/freezer.h>
45#include <linux/memcontrol.h>
46#include <linux/delayacct.h>
47#include <linux/sysctl.h>
48#include <linux/oom.h>
49#include <linux/prefetch.h>
50#include <linux/printk.h>
51#include <linux/dax.h>
52
53#include <asm/tlbflush.h>
54#include <asm/div64.h>
55
56#include <linux/swapops.h>
57#include <linux/balloon_compaction.h>
58
59#include "internal.h"
60
61#define CREATE_TRACE_POINTS
62#include <trace/events/vmscan.h>
63
64struct scan_control {
65
66 unsigned long nr_to_reclaim;
67
68
69 gfp_t gfp_mask;
70
71
72 int order;
73
74
75
76
77
78 nodemask_t *nodemask;
79
80
81
82
83
84 struct mem_cgroup *target_mem_cgroup;
85
86
87 int priority;
88
89
90 enum zone_type reclaim_idx;
91
92
93 unsigned int may_writepage:1;
94
95
96 unsigned int may_unmap:1;
97
98
99 unsigned int may_swap:1;
100
101
102
103
104
105
106 unsigned int memcg_low_reclaim:1;
107 unsigned int memcg_low_skipped:1;
108
109 unsigned int hibernation_mode:1;
110
111
112 unsigned int compaction_ready:1;
113
114
115 unsigned long nr_scanned;
116
117
118 unsigned long nr_reclaimed;
119
120 struct {
121 unsigned int dirty;
122 unsigned int unqueued_dirty;
123 unsigned int congested;
124 unsigned int writeback;
125 unsigned int immediate;
126 unsigned int file_taken;
127 unsigned int taken;
128 } nr;
129};
130
131#ifdef ARCH_HAS_PREFETCH
132#define prefetch_prev_lru_page(_page, _base, _field) \
133 do { \
134 if ((_page)->lru.prev != _base) { \
135 struct page *prev; \
136 \
137 prev = lru_to_page(&(_page->lru)); \
138 prefetch(&prev->_field); \
139 } \
140 } while (0)
141#else
142#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
143#endif
144
145#ifdef ARCH_HAS_PREFETCHW
146#define prefetchw_prev_lru_page(_page, _base, _field) \
147 do { \
148 if ((_page)->lru.prev != _base) { \
149 struct page *prev; \
150 \
151 prev = lru_to_page(&(_page->lru)); \
152 prefetchw(&prev->_field); \
153 } \
154 } while (0)
155#else
156#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
157#endif
158
159
160
161
162int vm_swappiness = 60;
163
164
165
166
167unsigned long vm_total_pages;
168
169static LIST_HEAD(shrinker_list);
170static DECLARE_RWSEM(shrinker_rwsem);
171
172#ifdef CONFIG_MEMCG
173static bool global_reclaim(struct scan_control *sc)
174{
175 return !sc->target_mem_cgroup;
176}
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191static bool sane_reclaim(struct scan_control *sc)
192{
193 struct mem_cgroup *memcg = sc->target_mem_cgroup;
194
195 if (!memcg)
196 return true;
197#ifdef CONFIG_CGROUP_WRITEBACK
198 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
199 return true;
200#endif
201 return false;
202}
203
204static void set_memcg_congestion(pg_data_t *pgdat,
205 struct mem_cgroup *memcg,
206 bool congested)
207{
208 struct mem_cgroup_per_node *mn;
209
210 if (!memcg)
211 return;
212
213 mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
214 WRITE_ONCE(mn->congested, congested);
215}
216
217static bool memcg_congested(pg_data_t *pgdat,
218 struct mem_cgroup *memcg)
219{
220 struct mem_cgroup_per_node *mn;
221
222 mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
223 return READ_ONCE(mn->congested);
224
225}
226#else
227static bool global_reclaim(struct scan_control *sc)
228{
229 return true;
230}
231
232static bool sane_reclaim(struct scan_control *sc)
233{
234 return true;
235}
236
237static inline void set_memcg_congestion(struct pglist_data *pgdat,
238 struct mem_cgroup *memcg, bool congested)
239{
240}
241
242static inline bool memcg_congested(struct pglist_data *pgdat,
243 struct mem_cgroup *memcg)
244{
245 return false;
246
247}
248#endif
249
250
251
252
253
254
255unsigned long zone_reclaimable_pages(struct zone *zone)
256{
257 unsigned long nr;
258
259 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
260 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
261 if (get_nr_swap_pages() > 0)
262 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
263 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
264
265 return nr;
266}
267
268
269
270
271
272
273
274unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
275{
276 unsigned long lru_size;
277 int zid;
278
279 if (!mem_cgroup_disabled())
280 lru_size = mem_cgroup_get_lru_size(lruvec, lru);
281 else
282 lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
283
284 for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
285 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
286 unsigned long size;
287
288 if (!managed_zone(zone))
289 continue;
290
291 if (!mem_cgroup_disabled())
292 size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
293 else
294 size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid],
295 NR_ZONE_LRU_BASE + lru);
296 lru_size -= min(size, lru_size);
297 }
298
299 return lru_size;
300
301}
302
303
304
305
306int prealloc_shrinker(struct shrinker *shrinker)
307{
308 size_t size = sizeof(*shrinker->nr_deferred);
309
310 if (shrinker->flags & SHRINKER_NUMA_AWARE)
311 size *= nr_node_ids;
312
313 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
314 if (!shrinker->nr_deferred)
315 return -ENOMEM;
316 return 0;
317}
318
319void free_prealloced_shrinker(struct shrinker *shrinker)
320{
321 kfree(shrinker->nr_deferred);
322 shrinker->nr_deferred = NULL;
323}
324
325void register_shrinker_prepared(struct shrinker *shrinker)
326{
327 down_write(&shrinker_rwsem);
328 list_add_tail(&shrinker->list, &shrinker_list);
329 up_write(&shrinker_rwsem);
330}
331
332int register_shrinker(struct shrinker *shrinker)
333{
334 int err = prealloc_shrinker(shrinker);
335
336 if (err)
337 return err;
338 register_shrinker_prepared(shrinker);
339 return 0;
340}
341EXPORT_SYMBOL(register_shrinker);
342
343
344
345
346void unregister_shrinker(struct shrinker *shrinker)
347{
348 if (!shrinker->nr_deferred)
349 return;
350 down_write(&shrinker_rwsem);
351 list_del(&shrinker->list);
352 up_write(&shrinker_rwsem);
353 kfree(shrinker->nr_deferred);
354 shrinker->nr_deferred = NULL;
355}
356EXPORT_SYMBOL(unregister_shrinker);
357
358#define SHRINK_BATCH 128
359
360static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
361 struct shrinker *shrinker, int priority)
362{
363 unsigned long freed = 0;
364 unsigned long long delta;
365 long total_scan;
366 long freeable;
367 long nr;
368 long new_nr;
369 int nid = shrinkctl->nid;
370 long batch_size = shrinker->batch ? shrinker->batch
371 : SHRINK_BATCH;
372 long scanned = 0, next_deferred;
373
374 freeable = shrinker->count_objects(shrinker, shrinkctl);
375 if (freeable == 0)
376 return 0;
377
378
379
380
381
382
383 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
384
385 total_scan = nr;
386 delta = freeable >> priority;
387 delta *= 4;
388 do_div(delta, shrinker->seeks);
389 total_scan += delta;
390 if (total_scan < 0) {
391 pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
392 shrinker->scan_objects, total_scan);
393 total_scan = freeable;
394 next_deferred = nr;
395 } else
396 next_deferred = total_scan;
397
398
399
400
401
402
403
404
405
406
407
408
409
410 if (delta < freeable / 4)
411 total_scan = min(total_scan, freeable / 2);
412
413
414
415
416
417
418 if (total_scan > freeable * 2)
419 total_scan = freeable * 2;
420
421 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
422 freeable, delta, total_scan, priority);
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439 while (total_scan >= batch_size ||
440 total_scan >= freeable) {
441 unsigned long ret;
442 unsigned long nr_to_scan = min(batch_size, total_scan);
443
444 shrinkctl->nr_to_scan = nr_to_scan;
445 shrinkctl->nr_scanned = nr_to_scan;
446 ret = shrinker->scan_objects(shrinker, shrinkctl);
447 if (ret == SHRINK_STOP)
448 break;
449 freed += ret;
450
451 count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned);
452 total_scan -= shrinkctl->nr_scanned;
453 scanned += shrinkctl->nr_scanned;
454
455 cond_resched();
456 }
457
458 if (next_deferred >= scanned)
459 next_deferred -= scanned;
460 else
461 next_deferred = 0;
462
463
464
465
466
467 if (next_deferred > 0)
468 new_nr = atomic_long_add_return(next_deferred,
469 &shrinker->nr_deferred[nid]);
470 else
471 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
472
473 trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
474 return freed;
475}
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
500 struct mem_cgroup *memcg,
501 int priority)
502{
503 struct shrinker *shrinker;
504 unsigned long freed = 0;
505
506 if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
507 return 0;
508
509 if (!down_read_trylock(&shrinker_rwsem))
510 goto out;
511
512 list_for_each_entry(shrinker, &shrinker_list, list) {
513 struct shrink_control sc = {
514 .gfp_mask = gfp_mask,
515 .nid = nid,
516 .memcg = memcg,
517 };
518
519
520
521
522
523
524 if (memcg_kmem_enabled() &&
525 !!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
526 continue;
527
528 if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
529 sc.nid = 0;
530
531 freed += do_shrink_slab(&sc, shrinker, priority);
532
533
534
535
536
537 if (rwsem_is_contended(&shrinker_rwsem)) {
538 freed = freed ? : 1;
539 break;
540 }
541 }
542
543 up_read(&shrinker_rwsem);
544out:
545 cond_resched();
546 return freed;
547}
548
549void drop_slab_node(int nid)
550{
551 unsigned long freed;
552
553 do {
554 struct mem_cgroup *memcg = NULL;
555
556 freed = 0;
557 do {
558 freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
559 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
560 } while (freed > 10);
561}
562
563void drop_slab(void)
564{
565 int nid;
566
567 for_each_online_node(nid)
568 drop_slab_node(nid);
569}
570
571static inline int is_page_cache_freeable(struct page *page)
572{
573
574
575
576
577
578 int radix_pins = PageTransHuge(page) && PageSwapCache(page) ?
579 HPAGE_PMD_NR : 1;
580 return page_count(page) - page_has_private(page) == 1 + radix_pins;
581}
582
583static int may_write_to_inode(struct inode *inode, struct scan_control *sc)
584{
585 if (current->flags & PF_SWAPWRITE)
586 return 1;
587 if (!inode_write_congested(inode))
588 return 1;
589 if (inode_to_bdi(inode) == current->backing_dev_info)
590 return 1;
591 return 0;
592}
593
594
595
596
597
598
599
600
601
602
603
604
605
606static void handle_write_error(struct address_space *mapping,
607 struct page *page, int error)
608{
609 lock_page(page);
610 if (page_mapping(page) == mapping)
611 mapping_set_error(mapping, error);
612 unlock_page(page);
613}
614
615
616typedef enum {
617
618 PAGE_KEEP,
619
620 PAGE_ACTIVATE,
621
622 PAGE_SUCCESS,
623
624 PAGE_CLEAN,
625} pageout_t;
626
627
628
629
630
631static pageout_t pageout(struct page *page, struct address_space *mapping,
632 struct scan_control *sc)
633{
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650 if (!is_page_cache_freeable(page))
651 return PAGE_KEEP;
652 if (!mapping) {
653
654
655
656
657 if (page_has_private(page)) {
658 if (try_to_free_buffers(page)) {
659 ClearPageDirty(page);
660 pr_info("%s: orphaned page\n", __func__);
661 return PAGE_CLEAN;
662 }
663 }
664 return PAGE_KEEP;
665 }
666 if (mapping->a_ops->writepage == NULL)
667 return PAGE_ACTIVATE;
668 if (!may_write_to_inode(mapping->host, sc))
669 return PAGE_KEEP;
670
671 if (clear_page_dirty_for_io(page)) {
672 int res;
673 struct writeback_control wbc = {
674 .sync_mode = WB_SYNC_NONE,
675 .nr_to_write = SWAP_CLUSTER_MAX,
676 .range_start = 0,
677 .range_end = LLONG_MAX,
678 .for_reclaim = 1,
679 };
680
681 SetPageReclaim(page);
682 res = mapping->a_ops->writepage(page, &wbc);
683 if (res < 0)
684 handle_write_error(mapping, page, res);
685 if (res == AOP_WRITEPAGE_ACTIVATE) {
686 ClearPageReclaim(page);
687 return PAGE_ACTIVATE;
688 }
689
690 if (!PageWriteback(page)) {
691
692 ClearPageReclaim(page);
693 }
694 trace_mm_vmscan_writepage(page);
695 inc_node_page_state(page, NR_VMSCAN_WRITE);
696 return PAGE_SUCCESS;
697 }
698
699 return PAGE_CLEAN;
700}
701
702
703
704
705
706static int __remove_mapping(struct address_space *mapping, struct page *page,
707 bool reclaimed)
708{
709 unsigned long flags;
710 int refcount;
711
712 BUG_ON(!PageLocked(page));
713 BUG_ON(mapping != page_mapping(page));
714
715 xa_lock_irqsave(&mapping->i_pages, flags);
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741 if (unlikely(PageTransHuge(page)) && PageSwapCache(page))
742 refcount = 1 + HPAGE_PMD_NR;
743 else
744 refcount = 2;
745 if (!page_ref_freeze(page, refcount))
746 goto cannot_free;
747
748 if (unlikely(PageDirty(page))) {
749 page_ref_unfreeze(page, refcount);
750 goto cannot_free;
751 }
752
753 if (PageSwapCache(page)) {
754 swp_entry_t swap = { .val = page_private(page) };
755 mem_cgroup_swapout(page, swap);
756 __delete_from_swap_cache(page);
757 xa_unlock_irqrestore(&mapping->i_pages, flags);
758 put_swap_page(page, swap);
759 } else {
760 void (*freepage)(struct page *);
761 void *shadow = NULL;
762
763 freepage = mapping->a_ops->freepage;
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780 if (reclaimed && page_is_file_cache(page) &&
781 !mapping_exiting(mapping) && !dax_mapping(mapping))
782 shadow = workingset_eviction(mapping, page);
783 __delete_from_page_cache(page, shadow);
784 xa_unlock_irqrestore(&mapping->i_pages, flags);
785
786 if (freepage != NULL)
787 freepage(page);
788 }
789
790 return 1;
791
792cannot_free:
793 xa_unlock_irqrestore(&mapping->i_pages, flags);
794 return 0;
795}
796
797
798
799
800
801
802
803int remove_mapping(struct address_space *mapping, struct page *page)
804{
805 if (__remove_mapping(mapping, page, false)) {
806
807
808
809
810
811 page_ref_unfreeze(page, 1);
812 return 1;
813 }
814 return 0;
815}
816
817
818
819
820
821
822
823
824
825
826void putback_lru_page(struct page *page)
827{
828 lru_cache_add(page);
829 put_page(page);
830}
831
832enum page_references {
833 PAGEREF_RECLAIM,
834 PAGEREF_RECLAIM_CLEAN,
835 PAGEREF_KEEP,
836 PAGEREF_ACTIVATE,
837};
838
839static enum page_references page_check_references(struct page *page,
840 struct scan_control *sc)
841{
842 int referenced_ptes, referenced_page;
843 unsigned long vm_flags;
844
845 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
846 &vm_flags);
847 referenced_page = TestClearPageReferenced(page);
848
849
850
851
852
853 if (vm_flags & VM_LOCKED)
854 return PAGEREF_RECLAIM;
855
856 if (referenced_ptes) {
857 if (PageSwapBacked(page))
858 return PAGEREF_ACTIVATE;
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873 SetPageReferenced(page);
874
875 if (referenced_page || referenced_ptes > 1)
876 return PAGEREF_ACTIVATE;
877
878
879
880
881 if (vm_flags & VM_EXEC)
882 return PAGEREF_ACTIVATE;
883
884 return PAGEREF_KEEP;
885 }
886
887
888 if (referenced_page && !PageSwapBacked(page))
889 return PAGEREF_RECLAIM_CLEAN;
890
891 return PAGEREF_RECLAIM;
892}
893
894
895static void page_check_dirty_writeback(struct page *page,
896 bool *dirty, bool *writeback)
897{
898 struct address_space *mapping;
899
900
901
902
903
904 if (!page_is_file_cache(page) ||
905 (PageAnon(page) && !PageSwapBacked(page))) {
906 *dirty = false;
907 *writeback = false;
908 return;
909 }
910
911
912 *dirty = PageDirty(page);
913 *writeback = PageWriteback(page);
914
915
916 if (!page_has_private(page))
917 return;
918
919 mapping = page_mapping(page);
920 if (mapping && mapping->a_ops->is_dirty_writeback)
921 mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
922}
923
924
925
926
927static unsigned long shrink_page_list(struct list_head *page_list,
928 struct pglist_data *pgdat,
929 struct scan_control *sc,
930 enum ttu_flags ttu_flags,
931 struct reclaim_stat *stat,
932 bool force_reclaim)
933{
934 LIST_HEAD(ret_pages);
935 LIST_HEAD(free_pages);
936 int pgactivate = 0;
937 unsigned nr_unqueued_dirty = 0;
938 unsigned nr_dirty = 0;
939 unsigned nr_congested = 0;
940 unsigned nr_reclaimed = 0;
941 unsigned nr_writeback = 0;
942 unsigned nr_immediate = 0;
943 unsigned nr_ref_keep = 0;
944 unsigned nr_unmap_fail = 0;
945
946 cond_resched();
947
948 while (!list_empty(page_list)) {
949 struct address_space *mapping;
950 struct page *page;
951 int may_enter_fs;
952 enum page_references references = PAGEREF_RECLAIM_CLEAN;
953 bool dirty, writeback;
954
955 cond_resched();
956
957 page = lru_to_page(page_list);
958 list_del(&page->lru);
959
960 if (!trylock_page(page))
961 goto keep;
962
963 VM_BUG_ON_PAGE(PageActive(page), page);
964
965 sc->nr_scanned++;
966
967 if (unlikely(!page_evictable(page)))
968 goto activate_locked;
969
970 if (!sc->may_unmap && page_mapped(page))
971 goto keep_locked;
972
973
974 if ((page_mapped(page) || PageSwapCache(page)) &&
975 !(PageAnon(page) && !PageSwapBacked(page)))
976 sc->nr_scanned++;
977
978 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
979 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
980
981
982
983
984
985
986
987 page_check_dirty_writeback(page, &dirty, &writeback);
988 if (dirty || writeback)
989 nr_dirty++;
990
991 if (dirty && !writeback)
992 nr_unqueued_dirty++;
993
994
995
996
997
998
999
1000 mapping = page_mapping(page);
1001 if (((dirty || writeback) && mapping &&
1002 inode_write_congested(mapping->host)) ||
1003 (writeback && PageReclaim(page)))
1004 nr_congested++;
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048 if (PageWriteback(page)) {
1049
1050 if (current_is_kswapd() &&
1051 PageReclaim(page) &&
1052 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1053 nr_immediate++;
1054 goto activate_locked;
1055
1056
1057 } else if (sane_reclaim(sc) ||
1058 !PageReclaim(page) || !may_enter_fs) {
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 SetPageReclaim(page);
1071 nr_writeback++;
1072 goto activate_locked;
1073
1074
1075 } else {
1076 unlock_page(page);
1077 wait_on_page_writeback(page);
1078
1079 list_add_tail(&page->lru, page_list);
1080 continue;
1081 }
1082 }
1083
1084 if (!force_reclaim)
1085 references = page_check_references(page, sc);
1086
1087 switch (references) {
1088 case PAGEREF_ACTIVATE:
1089 goto activate_locked;
1090 case PAGEREF_KEEP:
1091 nr_ref_keep++;
1092 goto keep_locked;
1093 case PAGEREF_RECLAIM:
1094 case PAGEREF_RECLAIM_CLEAN:
1095 ;
1096 }
1097
1098
1099
1100
1101
1102
1103 if (PageAnon(page) && PageSwapBacked(page)) {
1104 if (!PageSwapCache(page)) {
1105 if (!(sc->gfp_mask & __GFP_IO))
1106 goto keep_locked;
1107 if (PageTransHuge(page)) {
1108
1109 if (!can_split_huge_page(page, NULL))
1110 goto activate_locked;
1111
1112
1113
1114
1115
1116 if (!compound_mapcount(page) &&
1117 split_huge_page_to_list(page,
1118 page_list))
1119 goto activate_locked;
1120 }
1121 if (!add_to_swap(page)) {
1122 if (!PageTransHuge(page))
1123 goto activate_locked;
1124
1125 if (split_huge_page_to_list(page,
1126 page_list))
1127 goto activate_locked;
1128#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1129 count_vm_event(THP_SWPOUT_FALLBACK);
1130#endif
1131 if (!add_to_swap(page))
1132 goto activate_locked;
1133 }
1134
1135 may_enter_fs = 1;
1136
1137
1138 mapping = page_mapping(page);
1139 }
1140 } else if (unlikely(PageTransHuge(page))) {
1141
1142 if (split_huge_page_to_list(page, page_list))
1143 goto keep_locked;
1144 }
1145
1146
1147
1148
1149
1150 if (page_mapped(page)) {
1151 enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH;
1152
1153 if (unlikely(PageTransHuge(page)))
1154 flags |= TTU_SPLIT_HUGE_PMD;
1155 if (!try_to_unmap(page, flags)) {
1156 nr_unmap_fail++;
1157 goto activate_locked;
1158 }
1159 }
1160
1161 if (PageDirty(page)) {
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172 if (page_is_file_cache(page) &&
1173 (!current_is_kswapd() || !PageReclaim(page) ||
1174 !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
1175
1176
1177
1178
1179
1180
1181 inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
1182 SetPageReclaim(page);
1183
1184 goto activate_locked;
1185 }
1186
1187 if (references == PAGEREF_RECLAIM_CLEAN)
1188 goto keep_locked;
1189 if (!may_enter_fs)
1190 goto keep_locked;
1191 if (!sc->may_writepage)
1192 goto keep_locked;
1193
1194
1195
1196
1197
1198
1199 try_to_unmap_flush_dirty();
1200 switch (pageout(page, mapping, sc)) {
1201 case PAGE_KEEP:
1202 goto keep_locked;
1203 case PAGE_ACTIVATE:
1204 goto activate_locked;
1205 case PAGE_SUCCESS:
1206 if (PageWriteback(page))
1207 goto keep;
1208 if (PageDirty(page))
1209 goto keep;
1210
1211
1212
1213
1214
1215 if (!trylock_page(page))
1216 goto keep;
1217 if (PageDirty(page) || PageWriteback(page))
1218 goto keep_locked;
1219 mapping = page_mapping(page);
1220 case PAGE_CLEAN:
1221 ;
1222 }
1223 }
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246 if (page_has_private(page)) {
1247 if (!try_to_release_page(page, sc->gfp_mask))
1248 goto activate_locked;
1249 if (!mapping && page_count(page) == 1) {
1250 unlock_page(page);
1251 if (put_page_testzero(page))
1252 goto free_it;
1253 else {
1254
1255
1256
1257
1258
1259
1260
1261 nr_reclaimed++;
1262 continue;
1263 }
1264 }
1265 }
1266
1267 if (PageAnon(page) && !PageSwapBacked(page)) {
1268
1269 if (!page_ref_freeze(page, 1))
1270 goto keep_locked;
1271 if (PageDirty(page)) {
1272 page_ref_unfreeze(page, 1);
1273 goto keep_locked;
1274 }
1275
1276 count_vm_event(PGLAZYFREED);
1277 count_memcg_page_event(page, PGLAZYFREED);
1278 } else if (!mapping || !__remove_mapping(mapping, page, true))
1279 goto keep_locked;
1280
1281
1282
1283
1284
1285
1286
1287 __ClearPageLocked(page);
1288free_it:
1289 nr_reclaimed++;
1290
1291
1292
1293
1294
1295 if (unlikely(PageTransHuge(page))) {
1296 mem_cgroup_uncharge(page);
1297 (*get_compound_page_dtor(page))(page);
1298 } else
1299 list_add(&page->lru, &free_pages);
1300 continue;
1301
1302activate_locked:
1303
1304 if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
1305 PageMlocked(page)))
1306 try_to_free_swap(page);
1307 VM_BUG_ON_PAGE(PageActive(page), page);
1308 if (!PageMlocked(page)) {
1309 SetPageActive(page);
1310 pgactivate++;
1311 count_memcg_page_event(page, PGACTIVATE);
1312 }
1313keep_locked:
1314 unlock_page(page);
1315keep:
1316 list_add(&page->lru, &ret_pages);
1317 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1318 }
1319
1320 mem_cgroup_uncharge_list(&free_pages);
1321 try_to_unmap_flush();
1322 free_unref_page_list(&free_pages);
1323
1324 list_splice(&ret_pages, page_list);
1325 count_vm_events(PGACTIVATE, pgactivate);
1326
1327 if (stat) {
1328 stat->nr_dirty = nr_dirty;
1329 stat->nr_congested = nr_congested;
1330 stat->nr_unqueued_dirty = nr_unqueued_dirty;
1331 stat->nr_writeback = nr_writeback;
1332 stat->nr_immediate = nr_immediate;
1333 stat->nr_activate = pgactivate;
1334 stat->nr_ref_keep = nr_ref_keep;
1335 stat->nr_unmap_fail = nr_unmap_fail;
1336 }
1337 return nr_reclaimed;
1338}
1339
1340unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1341 struct list_head *page_list)
1342{
1343 struct scan_control sc = {
1344 .gfp_mask = GFP_KERNEL,
1345 .priority = DEF_PRIORITY,
1346 .may_unmap = 1,
1347 };
1348 unsigned long ret;
1349 struct page *page, *next;
1350 LIST_HEAD(clean_pages);
1351
1352 list_for_each_entry_safe(page, next, page_list, lru) {
1353 if (page_is_file_cache(page) && !PageDirty(page) &&
1354 !__PageMovable(page)) {
1355 ClearPageActive(page);
1356 list_move(&page->lru, &clean_pages);
1357 }
1358 }
1359
1360 ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
1361 TTU_IGNORE_ACCESS, NULL, true);
1362 list_splice(&clean_pages, page_list);
1363 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
1364 return ret;
1365}
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1378{
1379 int ret = -EINVAL;
1380
1381
1382 if (!PageLRU(page))
1383 return ret;
1384
1385
1386 if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1387 return ret;
1388
1389 ret = -EBUSY;
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399 if (mode & ISOLATE_ASYNC_MIGRATE) {
1400
1401 if (PageWriteback(page))
1402 return ret;
1403
1404 if (PageDirty(page)) {
1405 struct address_space *mapping;
1406 bool migrate_dirty;
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417 if (!trylock_page(page))
1418 return ret;
1419
1420 mapping = page_mapping(page);
1421 migrate_dirty = !mapping || mapping->a_ops->migratepage;
1422 unlock_page(page);
1423 if (!migrate_dirty)
1424 return ret;
1425 }
1426 }
1427
1428 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1429 return ret;
1430
1431 if (likely(get_page_unless_zero(page))) {
1432
1433
1434
1435
1436
1437 ClearPageLRU(page);
1438 ret = 0;
1439 }
1440
1441 return ret;
1442}
1443
1444
1445
1446
1447
1448
1449static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1450 enum lru_list lru, unsigned long *nr_zone_taken)
1451{
1452 int zid;
1453
1454 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1455 if (!nr_zone_taken[zid])
1456 continue;
1457
1458 __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1459#ifdef CONFIG_MEMCG
1460 mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1461#endif
1462 }
1463
1464}
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1487 struct lruvec *lruvec, struct list_head *dst,
1488 unsigned long *nr_scanned, struct scan_control *sc,
1489 isolate_mode_t mode, enum lru_list lru)
1490{
1491 struct list_head *src = &lruvec->lists[lru];
1492 unsigned long nr_taken = 0;
1493 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
1494 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
1495 unsigned long skipped = 0;
1496 unsigned long scan, total_scan, nr_pages;
1497 LIST_HEAD(pages_skipped);
1498
1499 scan = 0;
1500 for (total_scan = 0;
1501 scan < nr_to_scan && nr_taken < nr_to_scan && !list_empty(src);
1502 total_scan++) {
1503 struct page *page;
1504
1505 page = lru_to_page(src);
1506 prefetchw_prev_lru_page(page, src, flags);
1507
1508 VM_BUG_ON_PAGE(!PageLRU(page), page);
1509
1510 if (page_zonenum(page) > sc->reclaim_idx) {
1511 list_move(&page->lru, &pages_skipped);
1512 nr_skipped[page_zonenum(page)]++;
1513 continue;
1514 }
1515
1516
1517
1518
1519
1520
1521
1522 scan++;
1523 switch (__isolate_lru_page(page, mode)) {
1524 case 0:
1525 nr_pages = hpage_nr_pages(page);
1526 nr_taken += nr_pages;
1527 nr_zone_taken[page_zonenum(page)] += nr_pages;
1528 list_move(&page->lru, dst);
1529 break;
1530
1531 case -EBUSY:
1532
1533 list_move(&page->lru, src);
1534 continue;
1535
1536 default:
1537 BUG();
1538 }
1539 }
1540
1541
1542
1543
1544
1545
1546
1547
1548 if (!list_empty(&pages_skipped)) {
1549 int zid;
1550
1551 list_splice(&pages_skipped, src);
1552 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1553 if (!nr_skipped[zid])
1554 continue;
1555
1556 __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
1557 skipped += nr_skipped[zid];
1558 }
1559 }
1560 *nr_scanned = total_scan;
1561 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
1562 total_scan, skipped, nr_taken, mode, lru);
1563 update_lru_sizes(lruvec, lru, nr_zone_taken);
1564 return nr_taken;
1565}
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593int isolate_lru_page(struct page *page)
1594{
1595 int ret = -EBUSY;
1596
1597 VM_BUG_ON_PAGE(!page_count(page), page);
1598 WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
1599
1600 if (PageLRU(page)) {
1601 struct zone *zone = page_zone(page);
1602 struct lruvec *lruvec;
1603
1604 spin_lock_irq(zone_lru_lock(zone));
1605 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
1606 if (PageLRU(page)) {
1607 int lru = page_lru(page);
1608 get_page(page);
1609 ClearPageLRU(page);
1610 del_page_from_lru_list(page, lruvec, lru);
1611 ret = 0;
1612 }
1613 spin_unlock_irq(zone_lru_lock(zone));
1614 }
1615 return ret;
1616}
1617
1618
1619
1620
1621
1622
1623
1624
1625static int too_many_isolated(struct pglist_data *pgdat, int file,
1626 struct scan_control *sc)
1627{
1628 unsigned long inactive, isolated;
1629
1630 if (current_is_kswapd())
1631 return 0;
1632
1633 if (!sane_reclaim(sc))
1634 return 0;
1635
1636 if (file) {
1637 inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
1638 isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
1639 } else {
1640 inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
1641 isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
1642 }
1643
1644
1645
1646
1647
1648
1649 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
1650 inactive >>= 3;
1651
1652 return isolated > inactive;
1653}
1654
1655static noinline_for_stack void
1656putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1657{
1658 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1659 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1660 LIST_HEAD(pages_to_free);
1661
1662
1663
1664
1665 while (!list_empty(page_list)) {
1666 struct page *page = lru_to_page(page_list);
1667 int lru;
1668
1669 VM_BUG_ON_PAGE(PageLRU(page), page);
1670 list_del(&page->lru);
1671 if (unlikely(!page_evictable(page))) {
1672 spin_unlock_irq(&pgdat->lru_lock);
1673 putback_lru_page(page);
1674 spin_lock_irq(&pgdat->lru_lock);
1675 continue;
1676 }
1677
1678 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1679
1680 SetPageLRU(page);
1681 lru = page_lru(page);
1682 add_page_to_lru_list(page, lruvec, lru);
1683
1684 if (is_active_lru(lru)) {
1685 int file = is_file_lru(lru);
1686 int numpages = hpage_nr_pages(page);
1687 reclaim_stat->recent_rotated[file] += numpages;
1688 }
1689 if (put_page_testzero(page)) {
1690 __ClearPageLRU(page);
1691 __ClearPageActive(page);
1692 del_page_from_lru_list(page, lruvec, lru);
1693
1694 if (unlikely(PageCompound(page))) {
1695 spin_unlock_irq(&pgdat->lru_lock);
1696 mem_cgroup_uncharge(page);
1697 (*get_compound_page_dtor(page))(page);
1698 spin_lock_irq(&pgdat->lru_lock);
1699 } else
1700 list_add(&page->lru, &pages_to_free);
1701 }
1702 }
1703
1704
1705
1706
1707 list_splice(&pages_to_free, page_list);
1708}
1709
1710
1711
1712
1713
1714
1715
1716static int current_may_throttle(void)
1717{
1718 return !(current->flags & PF_LESS_THROTTLE) ||
1719 current->backing_dev_info == NULL ||
1720 bdi_write_congested(current->backing_dev_info);
1721}
1722
1723
1724
1725
1726
1727static noinline_for_stack unsigned long
1728shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1729 struct scan_control *sc, enum lru_list lru)
1730{
1731 LIST_HEAD(page_list);
1732 unsigned long nr_scanned;
1733 unsigned long nr_reclaimed = 0;
1734 unsigned long nr_taken;
1735 struct reclaim_stat stat = {};
1736 isolate_mode_t isolate_mode = 0;
1737 int file = is_file_lru(lru);
1738 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1739 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1740 bool stalled = false;
1741
1742 while (unlikely(too_many_isolated(pgdat, file, sc))) {
1743 if (stalled)
1744 return 0;
1745
1746
1747 msleep(100);
1748 stalled = true;
1749
1750
1751 if (fatal_signal_pending(current))
1752 return SWAP_CLUSTER_MAX;
1753 }
1754
1755 lru_add_drain();
1756
1757 if (!sc->may_unmap)
1758 isolate_mode |= ISOLATE_UNMAPPED;
1759
1760 spin_lock_irq(&pgdat->lru_lock);
1761
1762 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1763 &nr_scanned, sc, isolate_mode, lru);
1764
1765 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
1766 reclaim_stat->recent_scanned[file] += nr_taken;
1767
1768 if (current_is_kswapd()) {
1769 if (global_reclaim(sc))
1770 __count_vm_events(PGSCAN_KSWAPD, nr_scanned);
1771 count_memcg_events(lruvec_memcg(lruvec), PGSCAN_KSWAPD,
1772 nr_scanned);
1773 } else {
1774 if (global_reclaim(sc))
1775 __count_vm_events(PGSCAN_DIRECT, nr_scanned);
1776 count_memcg_events(lruvec_memcg(lruvec), PGSCAN_DIRECT,
1777 nr_scanned);
1778 }
1779 spin_unlock_irq(&pgdat->lru_lock);
1780
1781 if (nr_taken == 0)
1782 return 0;
1783
1784 nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, 0,
1785 &stat, false);
1786
1787 spin_lock_irq(&pgdat->lru_lock);
1788
1789 if (current_is_kswapd()) {
1790 if (global_reclaim(sc))
1791 __count_vm_events(PGSTEAL_KSWAPD, nr_reclaimed);
1792 count_memcg_events(lruvec_memcg(lruvec), PGSTEAL_KSWAPD,
1793 nr_reclaimed);
1794 } else {
1795 if (global_reclaim(sc))
1796 __count_vm_events(PGSTEAL_DIRECT, nr_reclaimed);
1797 count_memcg_events(lruvec_memcg(lruvec), PGSTEAL_DIRECT,
1798 nr_reclaimed);
1799 }
1800
1801 putback_inactive_pages(lruvec, &page_list);
1802
1803 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
1804
1805 spin_unlock_irq(&pgdat->lru_lock);
1806
1807 mem_cgroup_uncharge_list(&page_list);
1808 free_unref_page_list(&page_list);
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821 if (stat.nr_unqueued_dirty == nr_taken)
1822 wakeup_flusher_threads(WB_REASON_VMSCAN);
1823
1824 sc->nr.dirty += stat.nr_dirty;
1825 sc->nr.congested += stat.nr_congested;
1826 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
1827 sc->nr.writeback += stat.nr_writeback;
1828 sc->nr.immediate += stat.nr_immediate;
1829 sc->nr.taken += nr_taken;
1830 if (file)
1831 sc->nr.file_taken += nr_taken;
1832
1833 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
1834 nr_scanned, nr_reclaimed, &stat, sc->priority, file);
1835 return nr_reclaimed;
1836}
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
1859 struct list_head *list,
1860 struct list_head *pages_to_free,
1861 enum lru_list lru)
1862{
1863 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1864 struct page *page;
1865 int nr_pages;
1866 int nr_moved = 0;
1867
1868 while (!list_empty(list)) {
1869 page = lru_to_page(list);
1870 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1871
1872 VM_BUG_ON_PAGE(PageLRU(page), page);
1873 SetPageLRU(page);
1874
1875 nr_pages = hpage_nr_pages(page);
1876 update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
1877 list_move(&page->lru, &lruvec->lists[lru]);
1878
1879 if (put_page_testzero(page)) {
1880 __ClearPageLRU(page);
1881 __ClearPageActive(page);
1882 del_page_from_lru_list(page, lruvec, lru);
1883
1884 if (unlikely(PageCompound(page))) {
1885 spin_unlock_irq(&pgdat->lru_lock);
1886 mem_cgroup_uncharge(page);
1887 (*get_compound_page_dtor(page))(page);
1888 spin_lock_irq(&pgdat->lru_lock);
1889 } else
1890 list_add(&page->lru, pages_to_free);
1891 } else {
1892 nr_moved += nr_pages;
1893 }
1894 }
1895
1896 if (!is_active_lru(lru)) {
1897 __count_vm_events(PGDEACTIVATE, nr_moved);
1898 count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
1899 nr_moved);
1900 }
1901
1902 return nr_moved;
1903}
1904
1905static void shrink_active_list(unsigned long nr_to_scan,
1906 struct lruvec *lruvec,
1907 struct scan_control *sc,
1908 enum lru_list lru)
1909{
1910 unsigned long nr_taken;
1911 unsigned long nr_scanned;
1912 unsigned long vm_flags;
1913 LIST_HEAD(l_hold);
1914 LIST_HEAD(l_active);
1915 LIST_HEAD(l_inactive);
1916 struct page *page;
1917 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1918 unsigned nr_deactivate, nr_activate;
1919 unsigned nr_rotated = 0;
1920 isolate_mode_t isolate_mode = 0;
1921 int file = is_file_lru(lru);
1922 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1923
1924 lru_add_drain();
1925
1926 if (!sc->may_unmap)
1927 isolate_mode |= ISOLATE_UNMAPPED;
1928
1929 spin_lock_irq(&pgdat->lru_lock);
1930
1931 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
1932 &nr_scanned, sc, isolate_mode, lru);
1933
1934 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
1935 reclaim_stat->recent_scanned[file] += nr_taken;
1936
1937 __count_vm_events(PGREFILL, nr_scanned);
1938 count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
1939
1940 spin_unlock_irq(&pgdat->lru_lock);
1941
1942 while (!list_empty(&l_hold)) {
1943 cond_resched();
1944 page = lru_to_page(&l_hold);
1945 list_del(&page->lru);
1946
1947 if (unlikely(!page_evictable(page))) {
1948 putback_lru_page(page);
1949 continue;
1950 }
1951
1952 if (unlikely(buffer_heads_over_limit)) {
1953 if (page_has_private(page) && trylock_page(page)) {
1954 if (page_has_private(page))
1955 try_to_release_page(page, 0);
1956 unlock_page(page);
1957 }
1958 }
1959
1960 if (page_referenced(page, 0, sc->target_mem_cgroup,
1961 &vm_flags)) {
1962 nr_rotated += hpage_nr_pages(page);
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
1973 list_add(&page->lru, &l_active);
1974 continue;
1975 }
1976 }
1977
1978 ClearPageActive(page);
1979 list_add(&page->lru, &l_inactive);
1980 }
1981
1982
1983
1984
1985 spin_lock_irq(&pgdat->lru_lock);
1986
1987
1988
1989
1990
1991
1992 reclaim_stat->recent_rotated[file] += nr_rotated;
1993
1994 nr_activate = move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
1995 nr_deactivate = move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
1996 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
1997 spin_unlock_irq(&pgdat->lru_lock);
1998
1999 mem_cgroup_uncharge_list(&l_hold);
2000 free_unref_page_list(&l_hold);
2001 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
2002 nr_deactivate, nr_rotated, sc->priority, file);
2003}
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
2034 struct mem_cgroup *memcg,
2035 struct scan_control *sc, bool actual_reclaim)
2036{
2037 enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
2038 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2039 enum lru_list inactive_lru = file * LRU_FILE;
2040 unsigned long inactive, active;
2041 unsigned long inactive_ratio;
2042 unsigned long refaults;
2043 unsigned long gb;
2044
2045
2046
2047
2048
2049 if (!file && !total_swap_pages)
2050 return false;
2051
2052 inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx);
2053 active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
2054
2055 if (memcg)
2056 refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
2057 else
2058 refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
2059
2060
2061
2062
2063
2064
2065 if (file && actual_reclaim && lruvec->refaults != refaults) {
2066 inactive_ratio = 0;
2067 } else {
2068 gb = (inactive + active) >> (30 - PAGE_SHIFT);
2069 if (gb)
2070 inactive_ratio = int_sqrt(10 * gb);
2071 else
2072 inactive_ratio = 1;
2073 }
2074
2075 if (actual_reclaim)
2076 trace_mm_vmscan_inactive_list_is_low(pgdat->node_id, sc->reclaim_idx,
2077 lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), inactive,
2078 lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), active,
2079 inactive_ratio, file);
2080
2081 return inactive * inactive_ratio < active;
2082}
2083
2084static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
2085 struct lruvec *lruvec, struct mem_cgroup *memcg,
2086 struct scan_control *sc)
2087{
2088 if (is_active_lru(lru)) {
2089 if (inactive_list_is_low(lruvec, is_file_lru(lru),
2090 memcg, sc, true))
2091 shrink_active_list(nr_to_scan, lruvec, sc, lru);
2092 return 0;
2093 }
2094
2095 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2096}
2097
2098enum scan_balance {
2099 SCAN_EQUAL,
2100 SCAN_FRACT,
2101 SCAN_ANON,
2102 SCAN_FILE,
2103};
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
2115 struct scan_control *sc, unsigned long *nr,
2116 unsigned long *lru_pages)
2117{
2118 int swappiness = mem_cgroup_swappiness(memcg);
2119 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
2120 u64 fraction[2];
2121 u64 denominator = 0;
2122 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2123 unsigned long anon_prio, file_prio;
2124 enum scan_balance scan_balance;
2125 unsigned long anon, file;
2126 unsigned long ap, fp;
2127 enum lru_list lru;
2128
2129
2130 if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
2131 scan_balance = SCAN_FILE;
2132 goto out;
2133 }
2134
2135
2136
2137
2138
2139
2140
2141
2142 if (!global_reclaim(sc) && !swappiness) {
2143 scan_balance = SCAN_FILE;
2144 goto out;
2145 }
2146
2147
2148
2149
2150
2151
2152 if (!sc->priority && swappiness) {
2153 scan_balance = SCAN_EQUAL;
2154 goto out;
2155 }
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166 if (global_reclaim(sc)) {
2167 unsigned long pgdatfile;
2168 unsigned long pgdatfree;
2169 int z;
2170 unsigned long total_high_wmark = 0;
2171
2172 pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2173 pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) +
2174 node_page_state(pgdat, NR_INACTIVE_FILE);
2175
2176 for (z = 0; z < MAX_NR_ZONES; z++) {
2177 struct zone *zone = &pgdat->node_zones[z];
2178 if (!managed_zone(zone))
2179 continue;
2180
2181 total_high_wmark += high_wmark_pages(zone);
2182 }
2183
2184 if (unlikely(pgdatfile + pgdatfree <= total_high_wmark)) {
2185
2186
2187
2188
2189
2190 if (!inactive_list_is_low(lruvec, false, memcg, sc, false) &&
2191 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx)
2192 >> sc->priority) {
2193 scan_balance = SCAN_ANON;
2194 goto out;
2195 }
2196 }
2197 }
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208 if (!inactive_list_is_low(lruvec, true, memcg, sc, false) &&
2209 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
2210 scan_balance = SCAN_FILE;
2211 goto out;
2212 }
2213
2214 scan_balance = SCAN_FRACT;
2215
2216
2217
2218
2219
2220 anon_prio = swappiness;
2221 file_prio = 200 - anon_prio;
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235 anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) +
2236 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES);
2237 file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) +
2238 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES);
2239
2240 spin_lock_irq(&pgdat->lru_lock);
2241 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
2242 reclaim_stat->recent_scanned[0] /= 2;
2243 reclaim_stat->recent_rotated[0] /= 2;
2244 }
2245
2246 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
2247 reclaim_stat->recent_scanned[1] /= 2;
2248 reclaim_stat->recent_rotated[1] /= 2;
2249 }
2250
2251
2252
2253
2254
2255
2256 ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
2257 ap /= reclaim_stat->recent_rotated[0] + 1;
2258
2259 fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
2260 fp /= reclaim_stat->recent_rotated[1] + 1;
2261 spin_unlock_irq(&pgdat->lru_lock);
2262
2263 fraction[0] = ap;
2264 fraction[1] = fp;
2265 denominator = ap + fp + 1;
2266out:
2267 *lru_pages = 0;
2268 for_each_evictable_lru(lru) {
2269 int file = is_file_lru(lru);
2270 unsigned long size;
2271 unsigned long scan;
2272
2273 size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2274 scan = size >> sc->priority;
2275
2276
2277
2278
2279 if (!scan && !mem_cgroup_online(memcg))
2280 scan = min(size, SWAP_CLUSTER_MAX);
2281
2282 switch (scan_balance) {
2283 case SCAN_EQUAL:
2284
2285 break;
2286 case SCAN_FRACT:
2287
2288
2289
2290
2291 scan = div64_u64(scan * fraction[file],
2292 denominator);
2293 break;
2294 case SCAN_FILE:
2295 case SCAN_ANON:
2296
2297 if ((scan_balance == SCAN_FILE) != file) {
2298 size = 0;
2299 scan = 0;
2300 }
2301 break;
2302 default:
2303
2304 BUG();
2305 }
2306
2307 *lru_pages += size;
2308 nr[lru] = scan;
2309 }
2310}
2311
2312
2313
2314
2315static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg,
2316 struct scan_control *sc, unsigned long *lru_pages)
2317{
2318 struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
2319 unsigned long nr[NR_LRU_LISTS];
2320 unsigned long targets[NR_LRU_LISTS];
2321 unsigned long nr_to_scan;
2322 enum lru_list lru;
2323 unsigned long nr_reclaimed = 0;
2324 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
2325 struct blk_plug plug;
2326 bool scan_adjusted;
2327
2328 get_scan_count(lruvec, memcg, sc, nr, lru_pages);
2329
2330
2331 memcpy(targets, nr, sizeof(nr));
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
2345 sc->priority == DEF_PRIORITY);
2346
2347 blk_start_plug(&plug);
2348 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2349 nr[LRU_INACTIVE_FILE]) {
2350 unsigned long nr_anon, nr_file, percentage;
2351 unsigned long nr_scanned;
2352
2353 for_each_evictable_lru(lru) {
2354 if (nr[lru]) {
2355 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
2356 nr[lru] -= nr_to_scan;
2357
2358 nr_reclaimed += shrink_list(lru, nr_to_scan,
2359 lruvec, memcg, sc);
2360 }
2361 }
2362
2363 cond_resched();
2364
2365 if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
2366 continue;
2367
2368
2369
2370
2371
2372
2373
2374
2375 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
2376 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
2377
2378
2379
2380
2381
2382
2383
2384 if (!nr_file || !nr_anon)
2385 break;
2386
2387 if (nr_file > nr_anon) {
2388 unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
2389 targets[LRU_ACTIVE_ANON] + 1;
2390 lru = LRU_BASE;
2391 percentage = nr_anon * 100 / scan_target;
2392 } else {
2393 unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
2394 targets[LRU_ACTIVE_FILE] + 1;
2395 lru = LRU_FILE;
2396 percentage = nr_file * 100 / scan_target;
2397 }
2398
2399
2400 nr[lru] = 0;
2401 nr[lru + LRU_ACTIVE] = 0;
2402
2403
2404
2405
2406
2407 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
2408 nr_scanned = targets[lru] - nr[lru];
2409 nr[lru] = targets[lru] * (100 - percentage) / 100;
2410 nr[lru] -= min(nr[lru], nr_scanned);
2411
2412 lru += LRU_ACTIVE;
2413 nr_scanned = targets[lru] - nr[lru];
2414 nr[lru] = targets[lru] * (100 - percentage) / 100;
2415 nr[lru] -= min(nr[lru], nr_scanned);
2416
2417 scan_adjusted = true;
2418 }
2419 blk_finish_plug(&plug);
2420 sc->nr_reclaimed += nr_reclaimed;
2421
2422
2423
2424
2425
2426 if (inactive_list_is_low(lruvec, false, memcg, sc, true))
2427 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2428 sc, LRU_ACTIVE_ANON);
2429}
2430
2431
2432static bool in_reclaim_compaction(struct scan_control *sc)
2433{
2434 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
2435 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
2436 sc->priority < DEF_PRIORITY - 2))
2437 return true;
2438
2439 return false;
2440}
2441
2442
2443
2444
2445
2446
2447
2448
2449static inline bool should_continue_reclaim(struct pglist_data *pgdat,
2450 unsigned long nr_reclaimed,
2451 unsigned long nr_scanned,
2452 struct scan_control *sc)
2453{
2454 unsigned long pages_for_compaction;
2455 unsigned long inactive_lru_pages;
2456 int z;
2457
2458
2459 if (!in_reclaim_compaction(sc))
2460 return false;
2461
2462
2463 if (sc->gfp_mask & __GFP_RETRY_MAYFAIL) {
2464
2465
2466
2467
2468
2469
2470 if (!nr_reclaimed && !nr_scanned)
2471 return false;
2472 } else {
2473
2474
2475
2476
2477
2478
2479
2480
2481 if (!nr_reclaimed)
2482 return false;
2483 }
2484
2485
2486
2487
2488
2489 pages_for_compaction = compact_gap(sc->order);
2490 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
2491 if (get_nr_swap_pages() > 0)
2492 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
2493 if (sc->nr_reclaimed < pages_for_compaction &&
2494 inactive_lru_pages > pages_for_compaction)
2495 return true;
2496
2497
2498 for (z = 0; z <= sc->reclaim_idx; z++) {
2499 struct zone *zone = &pgdat->node_zones[z];
2500 if (!managed_zone(zone))
2501 continue;
2502
2503 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
2504 case COMPACT_SUCCESS:
2505 case COMPACT_CONTINUE:
2506 return false;
2507 default:
2508
2509 ;
2510 }
2511 }
2512 return true;
2513}
2514
2515static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg)
2516{
2517 return test_bit(PGDAT_CONGESTED, &pgdat->flags) ||
2518 (memcg && memcg_congested(pgdat, memcg));
2519}
2520
2521static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
2522{
2523 struct reclaim_state *reclaim_state = current->reclaim_state;
2524 unsigned long nr_reclaimed, nr_scanned;
2525 bool reclaimable = false;
2526
2527 do {
2528 struct mem_cgroup *root = sc->target_mem_cgroup;
2529 struct mem_cgroup_reclaim_cookie reclaim = {
2530 .pgdat = pgdat,
2531 .priority = sc->priority,
2532 };
2533 unsigned long node_lru_pages = 0;
2534 struct mem_cgroup *memcg;
2535
2536 memset(&sc->nr, 0, sizeof(sc->nr));
2537
2538 nr_reclaimed = sc->nr_reclaimed;
2539 nr_scanned = sc->nr_scanned;
2540
2541 memcg = mem_cgroup_iter(root, NULL, &reclaim);
2542 do {
2543 unsigned long lru_pages;
2544 unsigned long reclaimed;
2545 unsigned long scanned;
2546
2547 if (mem_cgroup_low(root, memcg)) {
2548 if (!sc->memcg_low_reclaim) {
2549 sc->memcg_low_skipped = 1;
2550 continue;
2551 }
2552 memcg_memory_event(memcg, MEMCG_LOW);
2553 }
2554
2555 reclaimed = sc->nr_reclaimed;
2556 scanned = sc->nr_scanned;
2557 shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
2558 node_lru_pages += lru_pages;
2559
2560 if (memcg)
2561 shrink_slab(sc->gfp_mask, pgdat->node_id,
2562 memcg, sc->priority);
2563
2564
2565 vmpressure(sc->gfp_mask, memcg, false,
2566 sc->nr_scanned - scanned,
2567 sc->nr_reclaimed - reclaimed);
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579 if (!global_reclaim(sc) &&
2580 sc->nr_reclaimed >= sc->nr_to_reclaim) {
2581 mem_cgroup_iter_break(root, memcg);
2582 break;
2583 }
2584 } while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
2585
2586 if (global_reclaim(sc))
2587 shrink_slab(sc->gfp_mask, pgdat->node_id, NULL,
2588 sc->priority);
2589
2590 if (reclaim_state) {
2591 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2592 reclaim_state->reclaimed_slab = 0;
2593 }
2594
2595
2596 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
2597 sc->nr_scanned - nr_scanned,
2598 sc->nr_reclaimed - nr_reclaimed);
2599
2600 if (sc->nr_reclaimed - nr_reclaimed)
2601 reclaimable = true;
2602
2603 if (current_is_kswapd()) {
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
2622 set_bit(PGDAT_WRITEBACK, &pgdat->flags);
2623
2624
2625
2626
2627
2628
2629 if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
2630 set_bit(PGDAT_CONGESTED, &pgdat->flags);
2631
2632
2633 if (sc->nr.unqueued_dirty == sc->nr.file_taken)
2634 set_bit(PGDAT_DIRTY, &pgdat->flags);
2635
2636
2637
2638
2639
2640
2641
2642 if (sc->nr.immediate)
2643 congestion_wait(BLK_RW_ASYNC, HZ/10);
2644 }
2645
2646
2647
2648
2649
2650 if (!global_reclaim(sc) && sane_reclaim(sc) &&
2651 sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
2652 set_memcg_congestion(pgdat, root, true);
2653
2654
2655
2656
2657
2658
2659
2660 if (!sc->hibernation_mode && !current_is_kswapd() &&
2661 current_may_throttle() && pgdat_memcg_congested(pgdat, root))
2662 wait_iff_congested(BLK_RW_ASYNC, HZ/10);
2663
2664 } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
2665 sc->nr_scanned - nr_scanned, sc));
2666
2667
2668
2669
2670
2671
2672
2673 if (reclaimable)
2674 pgdat->kswapd_failures = 0;
2675
2676 return reclaimable;
2677}
2678
2679
2680
2681
2682
2683
2684static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
2685{
2686 unsigned long watermark;
2687 enum compact_result suitable;
2688
2689 suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
2690 if (suitable == COMPACT_SUCCESS)
2691
2692 return true;
2693 if (suitable == COMPACT_SKIPPED)
2694
2695 return false;
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706 watermark = high_wmark_pages(zone) + compact_gap(sc->order);
2707
2708 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
2709}
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2720{
2721 struct zoneref *z;
2722 struct zone *zone;
2723 unsigned long nr_soft_reclaimed;
2724 unsigned long nr_soft_scanned;
2725 gfp_t orig_mask;
2726 pg_data_t *last_pgdat = NULL;
2727
2728
2729
2730
2731
2732
2733 orig_mask = sc->gfp_mask;
2734 if (buffer_heads_over_limit) {
2735 sc->gfp_mask |= __GFP_HIGHMEM;
2736 sc->reclaim_idx = gfp_zone(sc->gfp_mask);
2737 }
2738
2739 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2740 sc->reclaim_idx, sc->nodemask) {
2741
2742
2743
2744
2745 if (global_reclaim(sc)) {
2746 if (!cpuset_zone_allowed(zone,
2747 GFP_KERNEL | __GFP_HARDWALL))
2748 continue;
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759 if (IS_ENABLED(CONFIG_COMPACTION) &&
2760 sc->order > PAGE_ALLOC_COSTLY_ORDER &&
2761 compaction_ready(zone, sc)) {
2762 sc->compaction_ready = true;
2763 continue;
2764 }
2765
2766
2767
2768
2769
2770
2771
2772 if (zone->zone_pgdat == last_pgdat)
2773 continue;
2774
2775
2776
2777
2778
2779
2780
2781 nr_soft_scanned = 0;
2782 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
2783 sc->order, sc->gfp_mask,
2784 &nr_soft_scanned);
2785 sc->nr_reclaimed += nr_soft_reclaimed;
2786 sc->nr_scanned += nr_soft_scanned;
2787
2788 }
2789
2790
2791 if (zone->zone_pgdat == last_pgdat)
2792 continue;
2793 last_pgdat = zone->zone_pgdat;
2794 shrink_node(zone->zone_pgdat, sc);
2795 }
2796
2797
2798
2799
2800
2801 sc->gfp_mask = orig_mask;
2802}
2803
2804static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
2805{
2806 struct mem_cgroup *memcg;
2807
2808 memcg = mem_cgroup_iter(root_memcg, NULL, NULL);
2809 do {
2810 unsigned long refaults;
2811 struct lruvec *lruvec;
2812
2813 if (memcg)
2814 refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
2815 else
2816 refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
2817
2818 lruvec = mem_cgroup_lruvec(pgdat, memcg);
2819 lruvec->refaults = refaults;
2820 } while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL)));
2821}
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2840 struct scan_control *sc)
2841{
2842 int initial_priority = sc->priority;
2843 pg_data_t *last_pgdat;
2844 struct zoneref *z;
2845 struct zone *zone;
2846retry:
2847 delayacct_freepages_start();
2848
2849 if (global_reclaim(sc))
2850 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
2851
2852 do {
2853 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
2854 sc->priority);
2855 sc->nr_scanned = 0;
2856 shrink_zones(zonelist, sc);
2857
2858 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
2859 break;
2860
2861 if (sc->compaction_ready)
2862 break;
2863
2864
2865
2866
2867
2868 if (sc->priority < DEF_PRIORITY - 2)
2869 sc->may_writepage = 1;
2870 } while (--sc->priority >= 0);
2871
2872 last_pgdat = NULL;
2873 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
2874 sc->nodemask) {
2875 if (zone->zone_pgdat == last_pgdat)
2876 continue;
2877 last_pgdat = zone->zone_pgdat;
2878 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
2879 set_memcg_congestion(last_pgdat, sc->target_mem_cgroup, false);
2880 }
2881
2882 delayacct_freepages_end();
2883
2884 if (sc->nr_reclaimed)
2885 return sc->nr_reclaimed;
2886
2887
2888 if (sc->compaction_ready)
2889 return 1;
2890
2891
2892 if (sc->memcg_low_skipped) {
2893 sc->priority = initial_priority;
2894 sc->memcg_low_reclaim = 1;
2895 sc->memcg_low_skipped = 0;
2896 goto retry;
2897 }
2898
2899 return 0;
2900}
2901
2902static bool allow_direct_reclaim(pg_data_t *pgdat)
2903{
2904 struct zone *zone;
2905 unsigned long pfmemalloc_reserve = 0;
2906 unsigned long free_pages = 0;
2907 int i;
2908 bool wmark_ok;
2909
2910 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
2911 return true;
2912
2913 for (i = 0; i <= ZONE_NORMAL; i++) {
2914 zone = &pgdat->node_zones[i];
2915 if (!managed_zone(zone))
2916 continue;
2917
2918 if (!zone_reclaimable_pages(zone))
2919 continue;
2920
2921 pfmemalloc_reserve += min_wmark_pages(zone);
2922 free_pages += zone_page_state(zone, NR_FREE_PAGES);
2923 }
2924
2925
2926 if (!pfmemalloc_reserve)
2927 return true;
2928
2929 wmark_ok = free_pages > pfmemalloc_reserve / 2;
2930
2931
2932 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
2933 pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
2934 (enum zone_type)ZONE_NORMAL);
2935 wake_up_interruptible(&pgdat->kswapd_wait);
2936 }
2937
2938 return wmark_ok;
2939}
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
2951 nodemask_t *nodemask)
2952{
2953 struct zoneref *z;
2954 struct zone *zone;
2955 pg_data_t *pgdat = NULL;
2956
2957
2958
2959
2960
2961
2962
2963
2964 if (current->flags & PF_KTHREAD)
2965 goto out;
2966
2967
2968
2969
2970
2971 if (fatal_signal_pending(current))
2972 goto out;
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2989 gfp_zone(gfp_mask), nodemask) {
2990 if (zone_idx(zone) > ZONE_NORMAL)
2991 continue;
2992
2993
2994 pgdat = zone->zone_pgdat;
2995 if (allow_direct_reclaim(pgdat))
2996 goto out;
2997 break;
2998 }
2999
3000
3001 if (!pgdat)
3002 goto out;
3003
3004
3005 count_vm_event(PGSCAN_DIRECT_THROTTLE);
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015 if (!(gfp_mask & __GFP_FS)) {
3016 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
3017 allow_direct_reclaim(pgdat), HZ);
3018
3019 goto check_pending;
3020 }
3021
3022
3023 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
3024 allow_direct_reclaim(pgdat));
3025
3026check_pending:
3027 if (fatal_signal_pending(current))
3028 return true;
3029
3030out:
3031 return false;
3032}
3033
3034unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
3035 gfp_t gfp_mask, nodemask_t *nodemask)
3036{
3037 unsigned long nr_reclaimed;
3038 struct scan_control sc = {
3039 .nr_to_reclaim = SWAP_CLUSTER_MAX,
3040 .gfp_mask = current_gfp_context(gfp_mask),
3041 .reclaim_idx = gfp_zone(gfp_mask),
3042 .order = order,
3043 .nodemask = nodemask,
3044 .priority = DEF_PRIORITY,
3045 .may_writepage = !laptop_mode,
3046 .may_unmap = 1,
3047 .may_swap = 1,
3048 };
3049
3050
3051
3052
3053
3054
3055 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
3056 return 1;
3057
3058 trace_mm_vmscan_direct_reclaim_begin(order,
3059 sc.may_writepage,
3060 sc.gfp_mask,
3061 sc.reclaim_idx);
3062
3063 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3064
3065 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
3066
3067 return nr_reclaimed;
3068}
3069
3070#ifdef CONFIG_MEMCG
3071
3072unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3073 gfp_t gfp_mask, bool noswap,
3074 pg_data_t *pgdat,
3075 unsigned long *nr_scanned)
3076{
3077 struct scan_control sc = {
3078 .nr_to_reclaim = SWAP_CLUSTER_MAX,
3079 .target_mem_cgroup = memcg,
3080 .may_writepage = !laptop_mode,
3081 .may_unmap = 1,
3082 .reclaim_idx = MAX_NR_ZONES - 1,
3083 .may_swap = !noswap,
3084 };
3085 unsigned long lru_pages;
3086
3087 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
3088 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
3089
3090 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
3091 sc.may_writepage,
3092 sc.gfp_mask,
3093 sc.reclaim_idx);
3094
3095
3096
3097
3098
3099
3100
3101
3102 shrink_node_memcg(pgdat, memcg, &sc, &lru_pages);
3103
3104 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
3105
3106 *nr_scanned = sc.nr_scanned;
3107 return sc.nr_reclaimed;
3108}
3109
3110unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
3111 unsigned long nr_pages,
3112 gfp_t gfp_mask,
3113 bool may_swap)
3114{
3115 struct zonelist *zonelist;
3116 unsigned long nr_reclaimed;
3117 int nid;
3118 unsigned int noreclaim_flag;
3119 struct scan_control sc = {
3120 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
3121 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
3122 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
3123 .reclaim_idx = MAX_NR_ZONES - 1,
3124 .target_mem_cgroup = memcg,
3125 .priority = DEF_PRIORITY,
3126 .may_writepage = !laptop_mode,
3127 .may_unmap = 1,
3128 .may_swap = may_swap,
3129 };
3130
3131
3132
3133
3134
3135
3136 nid = mem_cgroup_select_victim_node(memcg);
3137
3138 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
3139
3140 trace_mm_vmscan_memcg_reclaim_begin(0,
3141 sc.may_writepage,
3142 sc.gfp_mask,
3143 sc.reclaim_idx);
3144
3145 noreclaim_flag = memalloc_noreclaim_save();
3146 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3147 memalloc_noreclaim_restore(noreclaim_flag);
3148
3149 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
3150
3151 return nr_reclaimed;
3152}
3153#endif
3154
3155static void age_active_anon(struct pglist_data *pgdat,
3156 struct scan_control *sc)
3157{
3158 struct mem_cgroup *memcg;
3159
3160 if (!total_swap_pages)
3161 return;
3162
3163 memcg = mem_cgroup_iter(NULL, NULL, NULL);
3164 do {
3165 struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
3166
3167 if (inactive_list_is_low(lruvec, false, memcg, sc, true))
3168 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
3169 sc, LRU_ACTIVE_ANON);
3170
3171 memcg = mem_cgroup_iter(NULL, memcg, NULL);
3172 } while (memcg);
3173}
3174
3175
3176
3177
3178
3179static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
3180{
3181 int i;
3182 unsigned long mark = -1;
3183 struct zone *zone;
3184
3185 for (i = 0; i <= classzone_idx; i++) {
3186 zone = pgdat->node_zones + i;
3187
3188 if (!managed_zone(zone))
3189 continue;
3190
3191 mark = high_wmark_pages(zone);
3192 if (zone_watermark_ok_safe(zone, order, mark, classzone_idx))
3193 return true;
3194 }
3195
3196
3197
3198
3199
3200
3201 if (mark == -1)
3202 return true;
3203
3204 return false;
3205}
3206
3207
3208static void clear_pgdat_congested(pg_data_t *pgdat)
3209{
3210 clear_bit(PGDAT_CONGESTED, &pgdat->flags);
3211 clear_bit(PGDAT_DIRTY, &pgdat->flags);
3212 clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
3213}
3214
3215
3216
3217
3218
3219
3220
3221static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
3222{
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236 if (waitqueue_active(&pgdat->pfmemalloc_wait))
3237 wake_up_all(&pgdat->pfmemalloc_wait);
3238
3239
3240 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
3241 return true;
3242
3243 if (pgdat_balanced(pgdat, order, classzone_idx)) {
3244 clear_pgdat_congested(pgdat);
3245 return true;
3246 }
3247
3248 return false;
3249}
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259static bool kswapd_shrink_node(pg_data_t *pgdat,
3260 struct scan_control *sc)
3261{
3262 struct zone *zone;
3263 int z;
3264
3265
3266 sc->nr_to_reclaim = 0;
3267 for (z = 0; z <= sc->reclaim_idx; z++) {
3268 zone = pgdat->node_zones + z;
3269 if (!managed_zone(zone))
3270 continue;
3271
3272 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
3273 }
3274
3275
3276
3277
3278
3279 shrink_node(pgdat, sc);
3280
3281
3282
3283
3284
3285
3286
3287
3288 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
3289 sc->order = 0;
3290
3291 return sc->nr_scanned >= sc->nr_to_reclaim;
3292}
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
3308{
3309 int i;
3310 unsigned long nr_soft_reclaimed;
3311 unsigned long nr_soft_scanned;
3312 struct zone *zone;
3313 struct scan_control sc = {
3314 .gfp_mask = GFP_KERNEL,
3315 .order = order,
3316 .priority = DEF_PRIORITY,
3317 .may_writepage = !laptop_mode,
3318 .may_unmap = 1,
3319 .may_swap = 1,
3320 };
3321 count_vm_event(PAGEOUTRUN);
3322
3323 do {
3324 unsigned long nr_reclaimed = sc.nr_reclaimed;
3325 bool raise_priority = true;
3326
3327 sc.reclaim_idx = classzone_idx;
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339 if (buffer_heads_over_limit) {
3340 for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
3341 zone = pgdat->node_zones + i;
3342 if (!managed_zone(zone))
3343 continue;
3344
3345 sc.reclaim_idx = i;
3346 break;
3347 }
3348 }
3349
3350
3351
3352
3353
3354
3355 if (pgdat_balanced(pgdat, sc.order, classzone_idx))
3356 goto out;
3357
3358
3359
3360
3361
3362
3363
3364 age_active_anon(pgdat, &sc);
3365
3366
3367
3368
3369
3370 if (sc.priority < DEF_PRIORITY - 2)
3371 sc.may_writepage = 1;
3372
3373
3374 sc.nr_scanned = 0;
3375 nr_soft_scanned = 0;
3376 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
3377 sc.gfp_mask, &nr_soft_scanned);
3378 sc.nr_reclaimed += nr_soft_reclaimed;
3379
3380
3381
3382
3383
3384
3385 if (kswapd_shrink_node(pgdat, &sc))
3386 raise_priority = false;
3387
3388
3389
3390
3391
3392
3393 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
3394 allow_direct_reclaim(pgdat))
3395 wake_up_all(&pgdat->pfmemalloc_wait);
3396
3397
3398 if (try_to_freeze() || kthread_should_stop())
3399 break;
3400
3401
3402
3403
3404
3405 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
3406 if (raise_priority || !nr_reclaimed)
3407 sc.priority--;
3408 } while (sc.priority >= 1);
3409
3410 if (!sc.nr_reclaimed)
3411 pgdat->kswapd_failures++;
3412
3413out:
3414 snapshot_refaults(NULL, pgdat);
3415
3416
3417
3418
3419
3420
3421 return sc.order;
3422}
3423
3424
3425
3426
3427
3428
3429
3430
3431static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat,
3432 enum zone_type classzone_idx)
3433{
3434 if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
3435 return classzone_idx;
3436
3437 return max(pgdat->kswapd_classzone_idx, classzone_idx);
3438}
3439
3440static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
3441 unsigned int classzone_idx)
3442{
3443 long remaining = 0;
3444 DEFINE_WAIT(wait);
3445
3446 if (freezing(current) || kthread_should_stop())
3447 return;
3448
3449 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3450
3451
3452
3453
3454
3455
3456
3457
3458 if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
3459
3460
3461
3462
3463
3464
3465 reset_isolation_suitable(pgdat);
3466
3467
3468
3469
3470
3471 wakeup_kcompactd(pgdat, alloc_order, classzone_idx);
3472
3473 remaining = schedule_timeout(HZ/10);
3474
3475
3476
3477
3478
3479
3480 if (remaining) {
3481 pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
3482 pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
3483 }
3484
3485 finish_wait(&pgdat->kswapd_wait, &wait);
3486 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3487 }
3488
3489
3490
3491
3492
3493 if (!remaining &&
3494 prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
3495 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
3506
3507 if (!kthread_should_stop())
3508 schedule();
3509
3510 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
3511 } else {
3512 if (remaining)
3513 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
3514 else
3515 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
3516 }
3517 finish_wait(&pgdat->kswapd_wait, &wait);
3518}
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533static int kswapd(void *p)
3534{
3535 unsigned int alloc_order, reclaim_order;
3536 unsigned int classzone_idx = MAX_NR_ZONES - 1;
3537 pg_data_t *pgdat = (pg_data_t*)p;
3538 struct task_struct *tsk = current;
3539
3540 struct reclaim_state reclaim_state = {
3541 .reclaimed_slab = 0,
3542 };
3543 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
3544
3545 if (!cpumask_empty(cpumask))
3546 set_cpus_allowed_ptr(tsk, cpumask);
3547 current->reclaim_state = &reclaim_state;
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
3562 set_freezable();
3563
3564 pgdat->kswapd_order = 0;
3565 pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
3566 for ( ; ; ) {
3567 bool ret;
3568
3569 alloc_order = reclaim_order = pgdat->kswapd_order;
3570 classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
3571
3572kswapd_try_sleep:
3573 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
3574 classzone_idx);
3575
3576
3577 alloc_order = reclaim_order = pgdat->kswapd_order;
3578 classzone_idx = kswapd_classzone_idx(pgdat, 0);
3579 pgdat->kswapd_order = 0;
3580 pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
3581
3582 ret = try_to_freeze();
3583 if (kthread_should_stop())
3584 break;
3585
3586
3587
3588
3589
3590 if (ret)
3591 continue;
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601 trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx,
3602 alloc_order);
3603 fs_reclaim_acquire(GFP_KERNEL);
3604 reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx);
3605 fs_reclaim_release(GFP_KERNEL);
3606 if (reclaim_order < alloc_order)
3607 goto kswapd_try_sleep;
3608 }
3609
3610 tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
3611 current->reclaim_state = NULL;
3612
3613 return 0;
3614}
3615
3616
3617
3618
3619
3620
3621
3622
3623void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
3624 enum zone_type classzone_idx)
3625{
3626 pg_data_t *pgdat;
3627
3628 if (!managed_zone(zone))
3629 return;
3630
3631 if (!cpuset_zone_allowed(zone, gfp_flags))
3632 return;
3633 pgdat = zone->zone_pgdat;
3634 pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat,
3635 classzone_idx);
3636 pgdat->kswapd_order = max(pgdat->kswapd_order, order);
3637 if (!waitqueue_active(&pgdat->kswapd_wait))
3638 return;
3639
3640
3641 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
3642 pgdat_balanced(pgdat, order, classzone_idx)) {
3643
3644
3645
3646
3647
3648
3649
3650 if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
3651 wakeup_kcompactd(pgdat, order, classzone_idx);
3652 return;
3653 }
3654
3655 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, classzone_idx, order,
3656 gfp_flags);
3657 wake_up_interruptible(&pgdat->kswapd_wait);
3658}
3659
3660#ifdef CONFIG_HIBERNATION
3661
3662
3663
3664
3665
3666
3667
3668
3669unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
3670{
3671 struct reclaim_state reclaim_state;
3672 struct scan_control sc = {
3673 .nr_to_reclaim = nr_to_reclaim,
3674 .gfp_mask = GFP_HIGHUSER_MOVABLE,
3675 .reclaim_idx = MAX_NR_ZONES - 1,
3676 .priority = DEF_PRIORITY,
3677 .may_writepage = 1,
3678 .may_unmap = 1,
3679 .may_swap = 1,
3680 .hibernation_mode = 1,
3681 };
3682 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
3683 struct task_struct *p = current;
3684 unsigned long nr_reclaimed;
3685 unsigned int noreclaim_flag;
3686
3687 noreclaim_flag = memalloc_noreclaim_save();
3688 fs_reclaim_acquire(sc.gfp_mask);
3689 reclaim_state.reclaimed_slab = 0;
3690 p->reclaim_state = &reclaim_state;
3691
3692 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3693
3694 p->reclaim_state = NULL;
3695 fs_reclaim_release(sc.gfp_mask);
3696 memalloc_noreclaim_restore(noreclaim_flag);
3697
3698 return nr_reclaimed;
3699}
3700#endif
3701
3702
3703
3704
3705
3706static int kswapd_cpu_online(unsigned int cpu)
3707{
3708 int nid;
3709
3710 for_each_node_state(nid, N_MEMORY) {
3711 pg_data_t *pgdat = NODE_DATA(nid);
3712 const struct cpumask *mask;
3713
3714 mask = cpumask_of_node(pgdat->node_id);
3715
3716 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
3717
3718 set_cpus_allowed_ptr(pgdat->kswapd, mask);
3719 }
3720 return 0;
3721}
3722
3723
3724
3725
3726
3727int kswapd_run(int nid)
3728{
3729 pg_data_t *pgdat = NODE_DATA(nid);
3730 int ret = 0;
3731
3732 if (pgdat->kswapd)
3733 return 0;
3734
3735 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
3736 if (IS_ERR(pgdat->kswapd)) {
3737
3738 BUG_ON(system_state < SYSTEM_RUNNING);
3739 pr_err("Failed to start kswapd on node %d\n", nid);
3740 ret = PTR_ERR(pgdat->kswapd);
3741 pgdat->kswapd = NULL;
3742 }
3743 return ret;
3744}
3745
3746
3747
3748
3749
3750void kswapd_stop(int nid)
3751{
3752 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
3753
3754 if (kswapd) {
3755 kthread_stop(kswapd);
3756 NODE_DATA(nid)->kswapd = NULL;
3757 }
3758}
3759
3760static int __init kswapd_init(void)
3761{
3762 int nid, ret;
3763
3764 swap_setup();
3765 for_each_node_state(nid, N_MEMORY)
3766 kswapd_run(nid);
3767 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
3768 "mm/vmscan:online", kswapd_cpu_online,
3769 NULL);
3770 WARN_ON(ret < 0);
3771 return 0;
3772}
3773
3774module_init(kswapd_init)
3775
3776#ifdef CONFIG_NUMA
3777
3778
3779
3780
3781
3782
3783int node_reclaim_mode __read_mostly;
3784
3785#define RECLAIM_OFF 0
3786#define RECLAIM_ZONE (1<<0)
3787#define RECLAIM_WRITE (1<<1)
3788#define RECLAIM_UNMAP (1<<2)
3789
3790
3791
3792
3793
3794
3795#define NODE_RECLAIM_PRIORITY 4
3796
3797
3798
3799
3800
3801int sysctl_min_unmapped_ratio = 1;
3802
3803
3804
3805
3806
3807int sysctl_min_slab_ratio = 5;
3808
3809static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
3810{
3811 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
3812 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
3813 node_page_state(pgdat, NR_ACTIVE_FILE);
3814
3815
3816
3817
3818
3819
3820 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
3821}
3822
3823
3824static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
3825{
3826 unsigned long nr_pagecache_reclaimable;
3827 unsigned long delta = 0;
3828
3829
3830
3831
3832
3833
3834
3835 if (node_reclaim_mode & RECLAIM_UNMAP)
3836 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
3837 else
3838 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
3839
3840
3841 if (!(node_reclaim_mode & RECLAIM_WRITE))
3842 delta += node_page_state(pgdat, NR_FILE_DIRTY);
3843
3844
3845 if (unlikely(delta > nr_pagecache_reclaimable))
3846 delta = nr_pagecache_reclaimable;
3847
3848 return nr_pagecache_reclaimable - delta;
3849}
3850
3851
3852
3853
3854static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
3855{
3856
3857 const unsigned long nr_pages = 1 << order;
3858 struct task_struct *p = current;
3859 struct reclaim_state reclaim_state;
3860 unsigned int noreclaim_flag;
3861 struct scan_control sc = {
3862 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
3863 .gfp_mask = current_gfp_context(gfp_mask),
3864 .order = order,
3865 .priority = NODE_RECLAIM_PRIORITY,
3866 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
3867 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
3868 .may_swap = 1,
3869 .reclaim_idx = gfp_zone(gfp_mask),
3870 };
3871
3872 cond_resched();
3873
3874
3875
3876
3877
3878 noreclaim_flag = memalloc_noreclaim_save();
3879 p->flags |= PF_SWAPWRITE;
3880 fs_reclaim_acquire(sc.gfp_mask);
3881 reclaim_state.reclaimed_slab = 0;
3882 p->reclaim_state = &reclaim_state;
3883
3884 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
3885
3886
3887
3888
3889 do {
3890 shrink_node(pgdat, &sc);
3891 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
3892 }
3893
3894 p->reclaim_state = NULL;
3895 fs_reclaim_release(gfp_mask);
3896 current->flags &= ~PF_SWAPWRITE;
3897 memalloc_noreclaim_restore(noreclaim_flag);
3898 return sc.nr_reclaimed >= nr_pages;
3899}
3900
3901int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
3902{
3903 int ret;
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
3916 node_page_state(pgdat, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
3917 return NODE_RECLAIM_FULL;
3918
3919
3920
3921
3922 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
3923 return NODE_RECLAIM_NOSCAN;
3924
3925
3926
3927
3928
3929
3930
3931 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
3932 return NODE_RECLAIM_NOSCAN;
3933
3934 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
3935 return NODE_RECLAIM_NOSCAN;
3936
3937 ret = __node_reclaim(pgdat, gfp_mask, order);
3938 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
3939
3940 if (!ret)
3941 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
3942
3943 return ret;
3944}
3945#endif
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959int page_evictable(struct page *page)
3960{
3961 int ret;
3962
3963
3964 rcu_read_lock();
3965 ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
3966 rcu_read_unlock();
3967 return ret;
3968}
3969
3970#ifdef CONFIG_SHMEM
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980void check_move_unevictable_pages(struct page **pages, int nr_pages)
3981{
3982 struct lruvec *lruvec;
3983 struct pglist_data *pgdat = NULL;
3984 int pgscanned = 0;
3985 int pgrescued = 0;
3986 int i;
3987
3988 for (i = 0; i < nr_pages; i++) {
3989 struct page *page = pages[i];
3990 struct pglist_data *pagepgdat = page_pgdat(page);
3991
3992 pgscanned++;
3993 if (pagepgdat != pgdat) {
3994 if (pgdat)
3995 spin_unlock_irq(&pgdat->lru_lock);
3996 pgdat = pagepgdat;
3997 spin_lock_irq(&pgdat->lru_lock);
3998 }
3999 lruvec = mem_cgroup_page_lruvec(page, pgdat);
4000
4001 if (!PageLRU(page) || !PageUnevictable(page))
4002 continue;
4003
4004 if (page_evictable(page)) {
4005 enum lru_list lru = page_lru_base_type(page);
4006
4007 VM_BUG_ON_PAGE(PageActive(page), page);
4008 ClearPageUnevictable(page);
4009 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
4010 add_page_to_lru_list(page, lruvec, lru);
4011 pgrescued++;
4012 }
4013 }
4014
4015 if (pgdat) {
4016 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
4017 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
4018 spin_unlock_irq(&pgdat->lru_lock);
4019 }
4020}
4021#endif
4022