1
2
3
4
5
6
7
8
9
10
11
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/mm.h>
16#include <linux/sched/mm.h>
17#include <linux/module.h>
18#include <linux/gfp.h>
19#include <linux/kernel_stat.h>
20#include <linux/swap.h>
21#include <linux/pagemap.h>
22#include <linux/init.h>
23#include <linux/highmem.h>
24#include <linux/vmpressure.h>
25#include <linux/vmstat.h>
26#include <linux/file.h>
27#include <linux/writeback.h>
28#include <linux/blkdev.h>
29#include <linux/buffer_head.h>
30
31#include <linux/mm_inline.h>
32#include <linux/backing-dev.h>
33#include <linux/rmap.h>
34#include <linux/topology.h>
35#include <linux/cpu.h>
36#include <linux/cpuset.h>
37#include <linux/compaction.h>
38#include <linux/notifier.h>
39#include <linux/rwsem.h>
40#include <linux/delay.h>
41#include <linux/kthread.h>
42#include <linux/freezer.h>
43#include <linux/memcontrol.h>
44#include <linux/delayacct.h>
45#include <linux/sysctl.h>
46#include <linux/oom.h>
47#include <linux/pagevec.h>
48#include <linux/prefetch.h>
49#include <linux/printk.h>
50#include <linux/dax.h>
51#include <linux/psi.h>
52
53#include <asm/tlbflush.h>
54#include <asm/div64.h>
55
56#include <linux/swapops.h>
57#include <linux/balloon_compaction.h>
58
59#include "internal.h"
60
61#define CREATE_TRACE_POINTS
62#include <trace/events/vmscan.h>
63
64struct scan_control {
65
66 unsigned long nr_to_reclaim;
67
68
69
70
71
72 nodemask_t *nodemask;
73
74
75
76
77
78 struct mem_cgroup *target_mem_cgroup;
79
80
81
82
83 unsigned long anon_cost;
84 unsigned long file_cost;
85
86
87#define DEACTIVATE_ANON 1
88#define DEACTIVATE_FILE 2
89 unsigned int may_deactivate:2;
90 unsigned int force_deactivate:1;
91 unsigned int skipped_deactivate:1;
92
93
94 unsigned int may_writepage:1;
95
96
97 unsigned int may_unmap:1;
98
99
100 unsigned int may_swap:1;
101
102
103
104
105
106
107
108
109
110 unsigned int memcg_low_reclaim:1;
111 unsigned int memcg_low_skipped:1;
112
113 unsigned int hibernation_mode:1;
114
115
116 unsigned int compaction_ready:1;
117
118
119 unsigned int cache_trim_mode:1;
120
121
122 unsigned int file_is_tiny:1;
123
124
125 s8 order;
126
127
128 s8 priority;
129
130
131 s8 reclaim_idx;
132
133
134 gfp_t gfp_mask;
135
136
137 unsigned long nr_scanned;
138
139
140 unsigned long nr_reclaimed;
141
142 struct {
143 unsigned int dirty;
144 unsigned int unqueued_dirty;
145 unsigned int congested;
146 unsigned int writeback;
147 unsigned int immediate;
148 unsigned int file_taken;
149 unsigned int taken;
150 } nr;
151
152
153 struct reclaim_state reclaim_state;
154};
155
156#ifdef ARCH_HAS_PREFETCHW
157#define prefetchw_prev_lru_page(_page, _base, _field) \
158 do { \
159 if ((_page)->lru.prev != _base) { \
160 struct page *prev; \
161 \
162 prev = lru_to_page(&(_page->lru)); \
163 prefetchw(&prev->_field); \
164 } \
165 } while (0)
166#else
167#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
168#endif
169
170
171
172
173int vm_swappiness = 60;
174
175static void set_task_reclaim_state(struct task_struct *task,
176 struct reclaim_state *rs)
177{
178
179 WARN_ON_ONCE(rs && task->reclaim_state);
180
181
182 WARN_ON_ONCE(!rs && !task->reclaim_state);
183
184 task->reclaim_state = rs;
185}
186
187static LIST_HEAD(shrinker_list);
188static DECLARE_RWSEM(shrinker_rwsem);
189
190#ifdef CONFIG_MEMCG
191static int shrinker_nr_max;
192
193
194static inline int shrinker_map_size(int nr_items)
195{
196 return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned long));
197}
198
199static inline int shrinker_defer_size(int nr_items)
200{
201 return (round_up(nr_items, BITS_PER_LONG) * sizeof(atomic_long_t));
202}
203
204static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg,
205 int nid)
206{
207 return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
208 lockdep_is_held(&shrinker_rwsem));
209}
210
211static int expand_one_shrinker_info(struct mem_cgroup *memcg,
212 int map_size, int defer_size,
213 int old_map_size, int old_defer_size)
214{
215 struct shrinker_info *new, *old;
216 struct mem_cgroup_per_node *pn;
217 int nid;
218 int size = map_size + defer_size;
219
220 for_each_node(nid) {
221 pn = memcg->nodeinfo[nid];
222 old = shrinker_info_protected(memcg, nid);
223
224 if (!old)
225 return 0;
226
227 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
228 if (!new)
229 return -ENOMEM;
230
231 new->nr_deferred = (atomic_long_t *)(new + 1);
232 new->map = (void *)new->nr_deferred + defer_size;
233
234
235 memset(new->map, (int)0xff, old_map_size);
236 memset((void *)new->map + old_map_size, 0, map_size - old_map_size);
237
238 memcpy(new->nr_deferred, old->nr_deferred, old_defer_size);
239 memset((void *)new->nr_deferred + old_defer_size, 0,
240 defer_size - old_defer_size);
241
242 rcu_assign_pointer(pn->shrinker_info, new);
243 kvfree_rcu(old, rcu);
244 }
245
246 return 0;
247}
248
249void free_shrinker_info(struct mem_cgroup *memcg)
250{
251 struct mem_cgroup_per_node *pn;
252 struct shrinker_info *info;
253 int nid;
254
255 for_each_node(nid) {
256 pn = memcg->nodeinfo[nid];
257 info = rcu_dereference_protected(pn->shrinker_info, true);
258 kvfree(info);
259 rcu_assign_pointer(pn->shrinker_info, NULL);
260 }
261}
262
263int alloc_shrinker_info(struct mem_cgroup *memcg)
264{
265 struct shrinker_info *info;
266 int nid, size, ret = 0;
267 int map_size, defer_size = 0;
268
269 down_write(&shrinker_rwsem);
270 map_size = shrinker_map_size(shrinker_nr_max);
271 defer_size = shrinker_defer_size(shrinker_nr_max);
272 size = map_size + defer_size;
273 for_each_node(nid) {
274 info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid);
275 if (!info) {
276 free_shrinker_info(memcg);
277 ret = -ENOMEM;
278 break;
279 }
280 info->nr_deferred = (atomic_long_t *)(info + 1);
281 info->map = (void *)info->nr_deferred + defer_size;
282 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
283 }
284 up_write(&shrinker_rwsem);
285
286 return ret;
287}
288
289static inline bool need_expand(int nr_max)
290{
291 return round_up(nr_max, BITS_PER_LONG) >
292 round_up(shrinker_nr_max, BITS_PER_LONG);
293}
294
295static int expand_shrinker_info(int new_id)
296{
297 int ret = 0;
298 int new_nr_max = new_id + 1;
299 int map_size, defer_size = 0;
300 int old_map_size, old_defer_size = 0;
301 struct mem_cgroup *memcg;
302
303 if (!need_expand(new_nr_max))
304 goto out;
305
306 if (!root_mem_cgroup)
307 goto out;
308
309 lockdep_assert_held(&shrinker_rwsem);
310
311 map_size = shrinker_map_size(new_nr_max);
312 defer_size = shrinker_defer_size(new_nr_max);
313 old_map_size = shrinker_map_size(shrinker_nr_max);
314 old_defer_size = shrinker_defer_size(shrinker_nr_max);
315
316 memcg = mem_cgroup_iter(NULL, NULL, NULL);
317 do {
318 ret = expand_one_shrinker_info(memcg, map_size, defer_size,
319 old_map_size, old_defer_size);
320 if (ret) {
321 mem_cgroup_iter_break(NULL, memcg);
322 goto out;
323 }
324 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
325out:
326 if (!ret)
327 shrinker_nr_max = new_nr_max;
328
329 return ret;
330}
331
332void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
333{
334 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
335 struct shrinker_info *info;
336
337 rcu_read_lock();
338 info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
339
340 smp_mb__before_atomic();
341 set_bit(shrinker_id, info->map);
342 rcu_read_unlock();
343 }
344}
345
346static DEFINE_IDR(shrinker_idr);
347
348static int prealloc_memcg_shrinker(struct shrinker *shrinker)
349{
350 int id, ret = -ENOMEM;
351
352 if (mem_cgroup_disabled())
353 return -ENOSYS;
354
355 down_write(&shrinker_rwsem);
356
357 id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL);
358 if (id < 0)
359 goto unlock;
360
361 if (id >= shrinker_nr_max) {
362 if (expand_shrinker_info(id)) {
363 idr_remove(&shrinker_idr, id);
364 goto unlock;
365 }
366 }
367 shrinker->id = id;
368 ret = 0;
369unlock:
370 up_write(&shrinker_rwsem);
371 return ret;
372}
373
374static void unregister_memcg_shrinker(struct shrinker *shrinker)
375{
376 int id = shrinker->id;
377
378 BUG_ON(id < 0);
379
380 lockdep_assert_held(&shrinker_rwsem);
381
382 idr_remove(&shrinker_idr, id);
383}
384
385static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
386 struct mem_cgroup *memcg)
387{
388 struct shrinker_info *info;
389
390 info = shrinker_info_protected(memcg, nid);
391 return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
392}
393
394static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
395 struct mem_cgroup *memcg)
396{
397 struct shrinker_info *info;
398
399 info = shrinker_info_protected(memcg, nid);
400 return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
401}
402
403void reparent_shrinker_deferred(struct mem_cgroup *memcg)
404{
405 int i, nid;
406 long nr;
407 struct mem_cgroup *parent;
408 struct shrinker_info *child_info, *parent_info;
409
410 parent = parent_mem_cgroup(memcg);
411 if (!parent)
412 parent = root_mem_cgroup;
413
414
415 down_read(&shrinker_rwsem);
416 for_each_node(nid) {
417 child_info = shrinker_info_protected(memcg, nid);
418 parent_info = shrinker_info_protected(parent, nid);
419 for (i = 0; i < shrinker_nr_max; i++) {
420 nr = atomic_long_read(&child_info->nr_deferred[i]);
421 atomic_long_add(nr, &parent_info->nr_deferred[i]);
422 }
423 }
424 up_read(&shrinker_rwsem);
425}
426
427static bool cgroup_reclaim(struct scan_control *sc)
428{
429 return sc->target_mem_cgroup;
430}
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445static bool writeback_throttling_sane(struct scan_control *sc)
446{
447 if (!cgroup_reclaim(sc))
448 return true;
449#ifdef CONFIG_CGROUP_WRITEBACK
450 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
451 return true;
452#endif
453 return false;
454}
455#else
456static int prealloc_memcg_shrinker(struct shrinker *shrinker)
457{
458 return -ENOSYS;
459}
460
461static void unregister_memcg_shrinker(struct shrinker *shrinker)
462{
463}
464
465static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
466 struct mem_cgroup *memcg)
467{
468 return 0;
469}
470
471static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
472 struct mem_cgroup *memcg)
473{
474 return 0;
475}
476
477static bool cgroup_reclaim(struct scan_control *sc)
478{
479 return false;
480}
481
482static bool writeback_throttling_sane(struct scan_control *sc)
483{
484 return true;
485}
486#endif
487
488static long xchg_nr_deferred(struct shrinker *shrinker,
489 struct shrink_control *sc)
490{
491 int nid = sc->nid;
492
493 if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
494 nid = 0;
495
496 if (sc->memcg &&
497 (shrinker->flags & SHRINKER_MEMCG_AWARE))
498 return xchg_nr_deferred_memcg(nid, shrinker,
499 sc->memcg);
500
501 return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
502}
503
504
505static long add_nr_deferred(long nr, struct shrinker *shrinker,
506 struct shrink_control *sc)
507{
508 int nid = sc->nid;
509
510 if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
511 nid = 0;
512
513 if (sc->memcg &&
514 (shrinker->flags & SHRINKER_MEMCG_AWARE))
515 return add_nr_deferred_memcg(nr, nid, shrinker,
516 sc->memcg);
517
518 return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
519}
520
521
522
523
524
525
526unsigned long zone_reclaimable_pages(struct zone *zone)
527{
528 unsigned long nr;
529
530 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
531 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
532 if (get_nr_swap_pages() > 0)
533 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
534 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
535
536 return nr;
537}
538
539
540
541
542
543
544
545static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru,
546 int zone_idx)
547{
548 unsigned long size = 0;
549 int zid;
550
551 for (zid = 0; zid <= zone_idx && zid < MAX_NR_ZONES; zid++) {
552 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
553
554 if (!managed_zone(zone))
555 continue;
556
557 if (!mem_cgroup_disabled())
558 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
559 else
560 size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
561 }
562 return size;
563}
564
565
566
567
568int prealloc_shrinker(struct shrinker *shrinker)
569{
570 unsigned int size;
571 int err;
572
573 if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
574 err = prealloc_memcg_shrinker(shrinker);
575 if (err != -ENOSYS)
576 return err;
577
578 shrinker->flags &= ~SHRINKER_MEMCG_AWARE;
579 }
580
581 size = sizeof(*shrinker->nr_deferred);
582 if (shrinker->flags & SHRINKER_NUMA_AWARE)
583 size *= nr_node_ids;
584
585 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
586 if (!shrinker->nr_deferred)
587 return -ENOMEM;
588
589 return 0;
590}
591
592void free_prealloced_shrinker(struct shrinker *shrinker)
593{
594 if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
595 down_write(&shrinker_rwsem);
596 unregister_memcg_shrinker(shrinker);
597 up_write(&shrinker_rwsem);
598 return;
599 }
600
601 kfree(shrinker->nr_deferred);
602 shrinker->nr_deferred = NULL;
603}
604
605void register_shrinker_prepared(struct shrinker *shrinker)
606{
607 down_write(&shrinker_rwsem);
608 list_add_tail(&shrinker->list, &shrinker_list);
609 shrinker->flags |= SHRINKER_REGISTERED;
610 up_write(&shrinker_rwsem);
611}
612
613int register_shrinker(struct shrinker *shrinker)
614{
615 int err = prealloc_shrinker(shrinker);
616
617 if (err)
618 return err;
619 register_shrinker_prepared(shrinker);
620 return 0;
621}
622EXPORT_SYMBOL(register_shrinker);
623
624
625
626
627void unregister_shrinker(struct shrinker *shrinker)
628{
629 if (!(shrinker->flags & SHRINKER_REGISTERED))
630 return;
631
632 down_write(&shrinker_rwsem);
633 list_del(&shrinker->list);
634 shrinker->flags &= ~SHRINKER_REGISTERED;
635 if (shrinker->flags & SHRINKER_MEMCG_AWARE)
636 unregister_memcg_shrinker(shrinker);
637 up_write(&shrinker_rwsem);
638
639 kfree(shrinker->nr_deferred);
640 shrinker->nr_deferred = NULL;
641}
642EXPORT_SYMBOL(unregister_shrinker);
643
644#define SHRINK_BATCH 128
645
646static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
647 struct shrinker *shrinker, int priority)
648{
649 unsigned long freed = 0;
650 unsigned long long delta;
651 long total_scan;
652 long freeable;
653 long nr;
654 long new_nr;
655 long batch_size = shrinker->batch ? shrinker->batch
656 : SHRINK_BATCH;
657 long scanned = 0, next_deferred;
658
659 freeable = shrinker->count_objects(shrinker, shrinkctl);
660 if (freeable == 0 || freeable == SHRINK_EMPTY)
661 return freeable;
662
663
664
665
666
667
668 nr = xchg_nr_deferred(shrinker, shrinkctl);
669
670 if (shrinker->seeks) {
671 delta = freeable >> priority;
672 delta *= 4;
673 do_div(delta, shrinker->seeks);
674 } else {
675
676
677
678
679
680 delta = freeable / 2;
681 }
682
683 total_scan = nr >> priority;
684 total_scan += delta;
685 total_scan = min(total_scan, (2 * freeable));
686
687 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
688 freeable, delta, total_scan, priority);
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705 while (total_scan >= batch_size ||
706 total_scan >= freeable) {
707 unsigned long ret;
708 unsigned long nr_to_scan = min(batch_size, total_scan);
709
710 shrinkctl->nr_to_scan = nr_to_scan;
711 shrinkctl->nr_scanned = nr_to_scan;
712 ret = shrinker->scan_objects(shrinker, shrinkctl);
713 if (ret == SHRINK_STOP)
714 break;
715 freed += ret;
716
717 count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned);
718 total_scan -= shrinkctl->nr_scanned;
719 scanned += shrinkctl->nr_scanned;
720
721 cond_resched();
722 }
723
724
725
726
727
728
729
730 next_deferred = max_t(long, (nr + delta - scanned), 0);
731 next_deferred = min(next_deferred, (2 * freeable));
732
733
734
735
736
737 new_nr = add_nr_deferred(next_deferred, shrinker, shrinkctl);
738
739 trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan);
740 return freed;
741}
742
743#ifdef CONFIG_MEMCG
744static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
745 struct mem_cgroup *memcg, int priority)
746{
747 struct shrinker_info *info;
748 unsigned long ret, freed = 0;
749 int i;
750
751 if (!mem_cgroup_online(memcg))
752 return 0;
753
754 if (!down_read_trylock(&shrinker_rwsem))
755 return 0;
756
757 info = shrinker_info_protected(memcg, nid);
758 if (unlikely(!info))
759 goto unlock;
760
761 for_each_set_bit(i, info->map, shrinker_nr_max) {
762 struct shrink_control sc = {
763 .gfp_mask = gfp_mask,
764 .nid = nid,
765 .memcg = memcg,
766 };
767 struct shrinker *shrinker;
768
769 shrinker = idr_find(&shrinker_idr, i);
770 if (unlikely(!shrinker || !(shrinker->flags & SHRINKER_REGISTERED))) {
771 if (!shrinker)
772 clear_bit(i, info->map);
773 continue;
774 }
775
776
777 if (!memcg_kmem_enabled() &&
778 !(shrinker->flags & SHRINKER_NONSLAB))
779 continue;
780
781 ret = do_shrink_slab(&sc, shrinker, priority);
782 if (ret == SHRINK_EMPTY) {
783 clear_bit(i, info->map);
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799 smp_mb__after_atomic();
800 ret = do_shrink_slab(&sc, shrinker, priority);
801 if (ret == SHRINK_EMPTY)
802 ret = 0;
803 else
804 set_shrinker_bit(memcg, nid, i);
805 }
806 freed += ret;
807
808 if (rwsem_is_contended(&shrinker_rwsem)) {
809 freed = freed ? : 1;
810 break;
811 }
812 }
813unlock:
814 up_read(&shrinker_rwsem);
815 return freed;
816}
817#else
818static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
819 struct mem_cgroup *memcg, int priority)
820{
821 return 0;
822}
823#endif
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
846 struct mem_cgroup *memcg,
847 int priority)
848{
849 unsigned long ret, freed = 0;
850 struct shrinker *shrinker;
851
852
853
854
855
856
857
858
859 if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
860 return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
861
862 if (!down_read_trylock(&shrinker_rwsem))
863 goto out;
864
865 list_for_each_entry(shrinker, &shrinker_list, list) {
866 struct shrink_control sc = {
867 .gfp_mask = gfp_mask,
868 .nid = nid,
869 .memcg = memcg,
870 };
871
872 ret = do_shrink_slab(&sc, shrinker, priority);
873 if (ret == SHRINK_EMPTY)
874 ret = 0;
875 freed += ret;
876
877
878
879
880
881 if (rwsem_is_contended(&shrinker_rwsem)) {
882 freed = freed ? : 1;
883 break;
884 }
885 }
886
887 up_read(&shrinker_rwsem);
888out:
889 cond_resched();
890 return freed;
891}
892
893void drop_slab_node(int nid)
894{
895 unsigned long freed;
896
897 do {
898 struct mem_cgroup *memcg = NULL;
899
900 if (fatal_signal_pending(current))
901 return;
902
903 freed = 0;
904 memcg = mem_cgroup_iter(NULL, NULL, NULL);
905 do {
906 freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
907 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
908 } while (freed > 10);
909}
910
911void drop_slab(void)
912{
913 int nid;
914
915 for_each_online_node(nid)
916 drop_slab_node(nid);
917}
918
919static inline int is_page_cache_freeable(struct page *page)
920{
921
922
923
924
925
926 int page_cache_pins = thp_nr_pages(page);
927 return page_count(page) - page_has_private(page) == 1 + page_cache_pins;
928}
929
930static int may_write_to_inode(struct inode *inode)
931{
932 if (current->flags & PF_SWAPWRITE)
933 return 1;
934 if (!inode_write_congested(inode))
935 return 1;
936 if (inode_to_bdi(inode) == current->backing_dev_info)
937 return 1;
938 return 0;
939}
940
941
942
943
944
945
946
947
948
949
950
951
952
953static void handle_write_error(struct address_space *mapping,
954 struct page *page, int error)
955{
956 lock_page(page);
957 if (page_mapping(page) == mapping)
958 mapping_set_error(mapping, error);
959 unlock_page(page);
960}
961
962
963typedef enum {
964
965 PAGE_KEEP,
966
967 PAGE_ACTIVATE,
968
969 PAGE_SUCCESS,
970
971 PAGE_CLEAN,
972} pageout_t;
973
974
975
976
977
978static pageout_t pageout(struct page *page, struct address_space *mapping)
979{
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996 if (!is_page_cache_freeable(page))
997 return PAGE_KEEP;
998 if (!mapping) {
999
1000
1001
1002
1003 if (page_has_private(page)) {
1004 if (try_to_free_buffers(page)) {
1005 ClearPageDirty(page);
1006 pr_info("%s: orphaned page\n", __func__);
1007 return PAGE_CLEAN;
1008 }
1009 }
1010 return PAGE_KEEP;
1011 }
1012 if (mapping->a_ops->writepage == NULL)
1013 return PAGE_ACTIVATE;
1014 if (!may_write_to_inode(mapping->host))
1015 return PAGE_KEEP;
1016
1017 if (clear_page_dirty_for_io(page)) {
1018 int res;
1019 struct writeback_control wbc = {
1020 .sync_mode = WB_SYNC_NONE,
1021 .nr_to_write = SWAP_CLUSTER_MAX,
1022 .range_start = 0,
1023 .range_end = LLONG_MAX,
1024 .for_reclaim = 1,
1025 };
1026
1027 SetPageReclaim(page);
1028 res = mapping->a_ops->writepage(page, &wbc);
1029 if (res < 0)
1030 handle_write_error(mapping, page, res);
1031 if (res == AOP_WRITEPAGE_ACTIVATE) {
1032 ClearPageReclaim(page);
1033 return PAGE_ACTIVATE;
1034 }
1035
1036 if (!PageWriteback(page)) {
1037
1038 ClearPageReclaim(page);
1039 }
1040 trace_mm_vmscan_writepage(page);
1041 inc_node_page_state(page, NR_VMSCAN_WRITE);
1042 return PAGE_SUCCESS;
1043 }
1044
1045 return PAGE_CLEAN;
1046}
1047
1048
1049
1050
1051
1052static int __remove_mapping(struct address_space *mapping, struct page *page,
1053 bool reclaimed, struct mem_cgroup *target_memcg)
1054{
1055 unsigned long flags;
1056 int refcount;
1057 void *shadow = NULL;
1058
1059 BUG_ON(!PageLocked(page));
1060 BUG_ON(mapping != page_mapping(page));
1061
1062 xa_lock_irqsave(&mapping->i_pages, flags);
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088 refcount = 1 + compound_nr(page);
1089 if (!page_ref_freeze(page, refcount))
1090 goto cannot_free;
1091
1092 if (unlikely(PageDirty(page))) {
1093 page_ref_unfreeze(page, refcount);
1094 goto cannot_free;
1095 }
1096
1097 if (PageSwapCache(page)) {
1098 swp_entry_t swap = { .val = page_private(page) };
1099 mem_cgroup_swapout(page, swap);
1100 if (reclaimed && !mapping_exiting(mapping))
1101 shadow = workingset_eviction(page, target_memcg);
1102 __delete_from_swap_cache(page, swap, shadow);
1103 xa_unlock_irqrestore(&mapping->i_pages, flags);
1104 put_swap_page(page, swap);
1105 } else {
1106 void (*freepage)(struct page *);
1107
1108 freepage = mapping->a_ops->freepage;
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125 if (reclaimed && page_is_file_lru(page) &&
1126 !mapping_exiting(mapping) && !dax_mapping(mapping))
1127 shadow = workingset_eviction(page, target_memcg);
1128 __delete_from_page_cache(page, shadow);
1129 xa_unlock_irqrestore(&mapping->i_pages, flags);
1130
1131 if (freepage != NULL)
1132 freepage(page);
1133 }
1134
1135 return 1;
1136
1137cannot_free:
1138 xa_unlock_irqrestore(&mapping->i_pages, flags);
1139 return 0;
1140}
1141
1142
1143
1144
1145
1146
1147
1148int remove_mapping(struct address_space *mapping, struct page *page)
1149{
1150 if (__remove_mapping(mapping, page, false, NULL)) {
1151
1152
1153
1154
1155
1156 page_ref_unfreeze(page, 1);
1157 return 1;
1158 }
1159 return 0;
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171void putback_lru_page(struct page *page)
1172{
1173 lru_cache_add(page);
1174 put_page(page);
1175}
1176
1177enum page_references {
1178 PAGEREF_RECLAIM,
1179 PAGEREF_RECLAIM_CLEAN,
1180 PAGEREF_KEEP,
1181 PAGEREF_ACTIVATE,
1182};
1183
1184static enum page_references page_check_references(struct page *page,
1185 struct scan_control *sc)
1186{
1187 int referenced_ptes, referenced_page;
1188 unsigned long vm_flags;
1189
1190 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
1191 &vm_flags);
1192 referenced_page = TestClearPageReferenced(page);
1193
1194
1195
1196
1197
1198 if (vm_flags & VM_LOCKED)
1199 return PAGEREF_RECLAIM;
1200
1201 if (referenced_ptes) {
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216 SetPageReferenced(page);
1217
1218 if (referenced_page || referenced_ptes > 1)
1219 return PAGEREF_ACTIVATE;
1220
1221
1222
1223
1224 if ((vm_flags & VM_EXEC) && !PageSwapBacked(page))
1225 return PAGEREF_ACTIVATE;
1226
1227 return PAGEREF_KEEP;
1228 }
1229
1230
1231 if (referenced_page && !PageSwapBacked(page))
1232 return PAGEREF_RECLAIM_CLEAN;
1233
1234 return PAGEREF_RECLAIM;
1235}
1236
1237
1238static void page_check_dirty_writeback(struct page *page,
1239 bool *dirty, bool *writeback)
1240{
1241 struct address_space *mapping;
1242
1243
1244
1245
1246
1247 if (!page_is_file_lru(page) ||
1248 (PageAnon(page) && !PageSwapBacked(page))) {
1249 *dirty = false;
1250 *writeback = false;
1251 return;
1252 }
1253
1254
1255 *dirty = PageDirty(page);
1256 *writeback = PageWriteback(page);
1257
1258
1259 if (!page_has_private(page))
1260 return;
1261
1262 mapping = page_mapping(page);
1263 if (mapping && mapping->a_ops->is_dirty_writeback)
1264 mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
1265}
1266
1267
1268
1269
1270static unsigned int shrink_page_list(struct list_head *page_list,
1271 struct pglist_data *pgdat,
1272 struct scan_control *sc,
1273 struct reclaim_stat *stat,
1274 bool ignore_references)
1275{
1276 LIST_HEAD(ret_pages);
1277 LIST_HEAD(free_pages);
1278 unsigned int nr_reclaimed = 0;
1279 unsigned int pgactivate = 0;
1280
1281 memset(stat, 0, sizeof(*stat));
1282 cond_resched();
1283
1284 while (!list_empty(page_list)) {
1285 struct address_space *mapping;
1286 struct page *page;
1287 enum page_references references = PAGEREF_RECLAIM;
1288 bool dirty, writeback, may_enter_fs;
1289 unsigned int nr_pages;
1290
1291 cond_resched();
1292
1293 page = lru_to_page(page_list);
1294 list_del(&page->lru);
1295
1296 if (!trylock_page(page))
1297 goto keep;
1298
1299 VM_BUG_ON_PAGE(PageActive(page), page);
1300
1301 nr_pages = compound_nr(page);
1302
1303
1304 sc->nr_scanned += nr_pages;
1305
1306 if (unlikely(!page_evictable(page)))
1307 goto activate_locked;
1308
1309 if (!sc->may_unmap && page_mapped(page))
1310 goto keep_locked;
1311
1312 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
1313 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
1314
1315
1316
1317
1318
1319
1320
1321 page_check_dirty_writeback(page, &dirty, &writeback);
1322 if (dirty || writeback)
1323 stat->nr_dirty++;
1324
1325 if (dirty && !writeback)
1326 stat->nr_unqueued_dirty++;
1327
1328
1329
1330
1331
1332
1333
1334 mapping = page_mapping(page);
1335 if (((dirty || writeback) && mapping &&
1336 inode_write_congested(mapping->host)) ||
1337 (writeback && PageReclaim(page)))
1338 stat->nr_congested++;
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382 if (PageWriteback(page)) {
1383
1384 if (current_is_kswapd() &&
1385 PageReclaim(page) &&
1386 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1387 stat->nr_immediate++;
1388 goto activate_locked;
1389
1390
1391 } else if (writeback_throttling_sane(sc) ||
1392 !PageReclaim(page) || !may_enter_fs) {
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404 SetPageReclaim(page);
1405 stat->nr_writeback++;
1406 goto activate_locked;
1407
1408
1409 } else {
1410 unlock_page(page);
1411 wait_on_page_writeback(page);
1412
1413 list_add_tail(&page->lru, page_list);
1414 continue;
1415 }
1416 }
1417
1418 if (!ignore_references)
1419 references = page_check_references(page, sc);
1420
1421 switch (references) {
1422 case PAGEREF_ACTIVATE:
1423 goto activate_locked;
1424 case PAGEREF_KEEP:
1425 stat->nr_ref_keep += nr_pages;
1426 goto keep_locked;
1427 case PAGEREF_RECLAIM:
1428 case PAGEREF_RECLAIM_CLEAN:
1429 ;
1430 }
1431
1432
1433
1434
1435
1436
1437 if (PageAnon(page) && PageSwapBacked(page)) {
1438 if (!PageSwapCache(page)) {
1439 if (!(sc->gfp_mask & __GFP_IO))
1440 goto keep_locked;
1441 if (page_maybe_dma_pinned(page))
1442 goto keep_locked;
1443 if (PageTransHuge(page)) {
1444
1445 if (!can_split_huge_page(page, NULL))
1446 goto activate_locked;
1447
1448
1449
1450
1451
1452 if (!compound_mapcount(page) &&
1453 split_huge_page_to_list(page,
1454 page_list))
1455 goto activate_locked;
1456 }
1457 if (!add_to_swap(page)) {
1458 if (!PageTransHuge(page))
1459 goto activate_locked_split;
1460
1461 if (split_huge_page_to_list(page,
1462 page_list))
1463 goto activate_locked;
1464#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1465 count_vm_event(THP_SWPOUT_FALLBACK);
1466#endif
1467 if (!add_to_swap(page))
1468 goto activate_locked_split;
1469 }
1470
1471 may_enter_fs = true;
1472
1473
1474 mapping = page_mapping(page);
1475 }
1476 } else if (unlikely(PageTransHuge(page))) {
1477
1478 if (split_huge_page_to_list(page, page_list))
1479 goto keep_locked;
1480 }
1481
1482
1483
1484
1485
1486
1487
1488
1489 if ((nr_pages > 1) && !PageTransHuge(page)) {
1490 sc->nr_scanned -= (nr_pages - 1);
1491 nr_pages = 1;
1492 }
1493
1494
1495
1496
1497
1498 if (page_mapped(page)) {
1499 enum ttu_flags flags = TTU_BATCH_FLUSH;
1500 bool was_swapbacked = PageSwapBacked(page);
1501
1502 if (unlikely(PageTransHuge(page)))
1503 flags |= TTU_SPLIT_HUGE_PMD;
1504
1505 try_to_unmap(page, flags);
1506 if (page_mapped(page)) {
1507 stat->nr_unmap_fail += nr_pages;
1508 if (!was_swapbacked && PageSwapBacked(page))
1509 stat->nr_lazyfree_fail += nr_pages;
1510 goto activate_locked;
1511 }
1512 }
1513
1514 if (PageDirty(page)) {
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525 if (page_is_file_lru(page) &&
1526 (!current_is_kswapd() || !PageReclaim(page) ||
1527 !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
1528
1529
1530
1531
1532
1533
1534 inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
1535 SetPageReclaim(page);
1536
1537 goto activate_locked;
1538 }
1539
1540 if (references == PAGEREF_RECLAIM_CLEAN)
1541 goto keep_locked;
1542 if (!may_enter_fs)
1543 goto keep_locked;
1544 if (!sc->may_writepage)
1545 goto keep_locked;
1546
1547
1548
1549
1550
1551
1552 try_to_unmap_flush_dirty();
1553 switch (pageout(page, mapping)) {
1554 case PAGE_KEEP:
1555 goto keep_locked;
1556 case PAGE_ACTIVATE:
1557 goto activate_locked;
1558 case PAGE_SUCCESS:
1559 stat->nr_pageout += thp_nr_pages(page);
1560
1561 if (PageWriteback(page))
1562 goto keep;
1563 if (PageDirty(page))
1564 goto keep;
1565
1566
1567
1568
1569
1570 if (!trylock_page(page))
1571 goto keep;
1572 if (PageDirty(page) || PageWriteback(page))
1573 goto keep_locked;
1574 mapping = page_mapping(page);
1575 fallthrough;
1576 case PAGE_CLEAN:
1577 ;
1578 }
1579 }
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602 if (page_has_private(page)) {
1603 if (!try_to_release_page(page, sc->gfp_mask))
1604 goto activate_locked;
1605 if (!mapping && page_count(page) == 1) {
1606 unlock_page(page);
1607 if (put_page_testzero(page))
1608 goto free_it;
1609 else {
1610
1611
1612
1613
1614
1615
1616
1617 nr_reclaimed++;
1618 continue;
1619 }
1620 }
1621 }
1622
1623 if (PageAnon(page) && !PageSwapBacked(page)) {
1624
1625 if (!page_ref_freeze(page, 1))
1626 goto keep_locked;
1627 if (PageDirty(page)) {
1628 page_ref_unfreeze(page, 1);
1629 goto keep_locked;
1630 }
1631
1632 count_vm_event(PGLAZYFREED);
1633 count_memcg_page_event(page, PGLAZYFREED);
1634 } else if (!mapping || !__remove_mapping(mapping, page, true,
1635 sc->target_mem_cgroup))
1636 goto keep_locked;
1637
1638 unlock_page(page);
1639free_it:
1640
1641
1642
1643
1644 nr_reclaimed += nr_pages;
1645
1646
1647
1648
1649
1650 if (unlikely(PageTransHuge(page)))
1651 destroy_compound_page(page);
1652 else
1653 list_add(&page->lru, &free_pages);
1654 continue;
1655
1656activate_locked_split:
1657
1658
1659
1660
1661 if (nr_pages > 1) {
1662 sc->nr_scanned -= (nr_pages - 1);
1663 nr_pages = 1;
1664 }
1665activate_locked:
1666
1667 if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
1668 PageMlocked(page)))
1669 try_to_free_swap(page);
1670 VM_BUG_ON_PAGE(PageActive(page), page);
1671 if (!PageMlocked(page)) {
1672 int type = page_is_file_lru(page);
1673 SetPageActive(page);
1674 stat->nr_activate[type] += nr_pages;
1675 count_memcg_page_event(page, PGACTIVATE);
1676 }
1677keep_locked:
1678 unlock_page(page);
1679keep:
1680 list_add(&page->lru, &ret_pages);
1681 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1682 }
1683
1684 pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
1685
1686 mem_cgroup_uncharge_list(&free_pages);
1687 try_to_unmap_flush();
1688 free_unref_page_list(&free_pages);
1689
1690 list_splice(&ret_pages, page_list);
1691 count_vm_events(PGACTIVATE, pgactivate);
1692
1693 return nr_reclaimed;
1694}
1695
1696unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1697 struct list_head *page_list)
1698{
1699 struct scan_control sc = {
1700 .gfp_mask = GFP_KERNEL,
1701 .priority = DEF_PRIORITY,
1702 .may_unmap = 1,
1703 };
1704 struct reclaim_stat stat;
1705 unsigned int nr_reclaimed;
1706 struct page *page, *next;
1707 LIST_HEAD(clean_pages);
1708 unsigned int noreclaim_flag;
1709
1710 list_for_each_entry_safe(page, next, page_list, lru) {
1711 if (!PageHuge(page) && page_is_file_lru(page) &&
1712 !PageDirty(page) && !__PageMovable(page) &&
1713 !PageUnevictable(page)) {
1714 ClearPageActive(page);
1715 list_move(&page->lru, &clean_pages);
1716 }
1717 }
1718
1719
1720
1721
1722
1723
1724
1725 noreclaim_flag = memalloc_noreclaim_save();
1726 nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
1727 &stat, true);
1728 memalloc_noreclaim_restore(noreclaim_flag);
1729
1730 list_splice(&clean_pages, page_list);
1731 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
1732 -(long)nr_reclaimed);
1733
1734
1735
1736
1737
1738
1739 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON,
1740 stat.nr_lazyfree_fail);
1741 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
1742 -(long)stat.nr_lazyfree_fail);
1743 return nr_reclaimed;
1744}
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
1757{
1758
1759 if (!PageLRU(page))
1760 return false;
1761
1762
1763 if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1764 return false;
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774 if (mode & ISOLATE_ASYNC_MIGRATE) {
1775
1776 if (PageWriteback(page))
1777 return false;
1778
1779 if (PageDirty(page)) {
1780 struct address_space *mapping;
1781 bool migrate_dirty;
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792 if (!trylock_page(page))
1793 return false;
1794
1795 mapping = page_mapping(page);
1796 migrate_dirty = !mapping || mapping->a_ops->migratepage;
1797 unlock_page(page);
1798 if (!migrate_dirty)
1799 return false;
1800 }
1801 }
1802
1803 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1804 return false;
1805
1806 return true;
1807}
1808
1809
1810
1811
1812
1813static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1814 enum lru_list lru, unsigned long *nr_zone_taken)
1815{
1816 int zid;
1817
1818 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1819 if (!nr_zone_taken[zid])
1820 continue;
1821
1822 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1823 }
1824
1825}
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1849 struct lruvec *lruvec, struct list_head *dst,
1850 unsigned long *nr_scanned, struct scan_control *sc,
1851 enum lru_list lru)
1852{
1853 struct list_head *src = &lruvec->lists[lru];
1854 unsigned long nr_taken = 0;
1855 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
1856 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
1857 unsigned long skipped = 0;
1858 unsigned long scan, total_scan, nr_pages;
1859 LIST_HEAD(pages_skipped);
1860 isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
1861
1862 total_scan = 0;
1863 scan = 0;
1864 while (scan < nr_to_scan && !list_empty(src)) {
1865 struct page *page;
1866
1867 page = lru_to_page(src);
1868 prefetchw_prev_lru_page(page, src, flags);
1869
1870 nr_pages = compound_nr(page);
1871 total_scan += nr_pages;
1872
1873 if (page_zonenum(page) > sc->reclaim_idx) {
1874 list_move(&page->lru, &pages_skipped);
1875 nr_skipped[page_zonenum(page)] += nr_pages;
1876 continue;
1877 }
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889 scan += nr_pages;
1890 if (!__isolate_lru_page_prepare(page, mode)) {
1891
1892 list_move(&page->lru, src);
1893 continue;
1894 }
1895
1896
1897
1898
1899
1900 if (unlikely(!get_page_unless_zero(page))) {
1901 list_move(&page->lru, src);
1902 continue;
1903 }
1904
1905 if (!TestClearPageLRU(page)) {
1906
1907 put_page(page);
1908 list_move(&page->lru, src);
1909 continue;
1910 }
1911
1912 nr_taken += nr_pages;
1913 nr_zone_taken[page_zonenum(page)] += nr_pages;
1914 list_move(&page->lru, dst);
1915 }
1916
1917
1918
1919
1920
1921
1922
1923
1924 if (!list_empty(&pages_skipped)) {
1925 int zid;
1926
1927 list_splice(&pages_skipped, src);
1928 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1929 if (!nr_skipped[zid])
1930 continue;
1931
1932 __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
1933 skipped += nr_skipped[zid];
1934 }
1935 }
1936 *nr_scanned = total_scan;
1937 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
1938 total_scan, skipped, nr_taken, mode, lru);
1939 update_lru_sizes(lruvec, lru, nr_zone_taken);
1940 return nr_taken;
1941}
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969int isolate_lru_page(struct page *page)
1970{
1971 int ret = -EBUSY;
1972
1973 VM_BUG_ON_PAGE(!page_count(page), page);
1974 WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
1975
1976 if (TestClearPageLRU(page)) {
1977 struct lruvec *lruvec;
1978
1979 get_page(page);
1980 lruvec = lock_page_lruvec_irq(page);
1981 del_page_from_lru_list(page, lruvec);
1982 unlock_page_lruvec_irq(lruvec);
1983 ret = 0;
1984 }
1985
1986 return ret;
1987}
1988
1989
1990
1991
1992
1993
1994
1995
1996static int too_many_isolated(struct pglist_data *pgdat, int file,
1997 struct scan_control *sc)
1998{
1999 unsigned long inactive, isolated;
2000
2001 if (current_is_kswapd())
2002 return 0;
2003
2004 if (!writeback_throttling_sane(sc))
2005 return 0;
2006
2007 if (file) {
2008 inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
2009 isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
2010 } else {
2011 inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
2012 isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
2013 }
2014
2015
2016
2017
2018
2019
2020 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
2021 inactive >>= 3;
2022
2023 return isolated > inactive;
2024}
2025
2026
2027
2028
2029
2030
2031
2032static unsigned int move_pages_to_lru(struct lruvec *lruvec,
2033 struct list_head *list)
2034{
2035 int nr_pages, nr_moved = 0;
2036 LIST_HEAD(pages_to_free);
2037 struct page *page;
2038
2039 while (!list_empty(list)) {
2040 page = lru_to_page(list);
2041 VM_BUG_ON_PAGE(PageLRU(page), page);
2042 list_del(&page->lru);
2043 if (unlikely(!page_evictable(page))) {
2044 spin_unlock_irq(&lruvec->lru_lock);
2045 putback_lru_page(page);
2046 spin_lock_irq(&lruvec->lru_lock);
2047 continue;
2048 }
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061 SetPageLRU(page);
2062
2063 if (unlikely(put_page_testzero(page))) {
2064 __clear_page_lru_flags(page);
2065
2066 if (unlikely(PageCompound(page))) {
2067 spin_unlock_irq(&lruvec->lru_lock);
2068 destroy_compound_page(page);
2069 spin_lock_irq(&lruvec->lru_lock);
2070 } else
2071 list_add(&page->lru, &pages_to_free);
2072
2073 continue;
2074 }
2075
2076
2077
2078
2079
2080 VM_BUG_ON_PAGE(!page_matches_lruvec(page, lruvec), page);
2081 add_page_to_lru_list(page, lruvec);
2082 nr_pages = thp_nr_pages(page);
2083 nr_moved += nr_pages;
2084 if (PageActive(page))
2085 workingset_age_nonresident(lruvec, nr_pages);
2086 }
2087
2088
2089
2090
2091 list_splice(&pages_to_free, list);
2092
2093 return nr_moved;
2094}
2095
2096
2097
2098
2099
2100
2101
2102static int current_may_throttle(void)
2103{
2104 return !(current->flags & PF_LOCAL_THROTTLE) ||
2105 current->backing_dev_info == NULL ||
2106 bdi_write_congested(current->backing_dev_info);
2107}
2108
2109
2110
2111
2112
2113static unsigned long
2114shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
2115 struct scan_control *sc, enum lru_list lru)
2116{
2117 LIST_HEAD(page_list);
2118 unsigned long nr_scanned;
2119 unsigned int nr_reclaimed = 0;
2120 unsigned long nr_taken;
2121 struct reclaim_stat stat;
2122 bool file = is_file_lru(lru);
2123 enum vm_event_item item;
2124 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2125 bool stalled = false;
2126
2127 while (unlikely(too_many_isolated(pgdat, file, sc))) {
2128 if (stalled)
2129 return 0;
2130
2131
2132 msleep(100);
2133 stalled = true;
2134
2135
2136 if (fatal_signal_pending(current))
2137 return SWAP_CLUSTER_MAX;
2138 }
2139
2140 lru_add_drain();
2141
2142 spin_lock_irq(&lruvec->lru_lock);
2143
2144 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
2145 &nr_scanned, sc, lru);
2146
2147 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2148 item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
2149 if (!cgroup_reclaim(sc))
2150 __count_vm_events(item, nr_scanned);
2151 __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
2152 __count_vm_events(PGSCAN_ANON + file, nr_scanned);
2153
2154 spin_unlock_irq(&lruvec->lru_lock);
2155
2156 if (nr_taken == 0)
2157 return 0;
2158
2159 nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false);
2160
2161 spin_lock_irq(&lruvec->lru_lock);
2162 move_pages_to_lru(lruvec, &page_list);
2163
2164 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2165 item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
2166 if (!cgroup_reclaim(sc))
2167 __count_vm_events(item, nr_reclaimed);
2168 __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
2169 __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
2170 spin_unlock_irq(&lruvec->lru_lock);
2171
2172 lru_note_cost(lruvec, file, stat.nr_pageout);
2173 mem_cgroup_uncharge_list(&page_list);
2174 free_unref_page_list(&page_list);
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187 if (stat.nr_unqueued_dirty == nr_taken)
2188 wakeup_flusher_threads(WB_REASON_VMSCAN);
2189
2190 sc->nr.dirty += stat.nr_dirty;
2191 sc->nr.congested += stat.nr_congested;
2192 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
2193 sc->nr.writeback += stat.nr_writeback;
2194 sc->nr.immediate += stat.nr_immediate;
2195 sc->nr.taken += nr_taken;
2196 if (file)
2197 sc->nr.file_taken += nr_taken;
2198
2199 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
2200 nr_scanned, nr_reclaimed, &stat, sc->priority, file);
2201 return nr_reclaimed;
2202}
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221static void shrink_active_list(unsigned long nr_to_scan,
2222 struct lruvec *lruvec,
2223 struct scan_control *sc,
2224 enum lru_list lru)
2225{
2226 unsigned long nr_taken;
2227 unsigned long nr_scanned;
2228 unsigned long vm_flags;
2229 LIST_HEAD(l_hold);
2230 LIST_HEAD(l_active);
2231 LIST_HEAD(l_inactive);
2232 struct page *page;
2233 unsigned nr_deactivate, nr_activate;
2234 unsigned nr_rotated = 0;
2235 int file = is_file_lru(lru);
2236 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2237
2238 lru_add_drain();
2239
2240 spin_lock_irq(&lruvec->lru_lock);
2241
2242 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
2243 &nr_scanned, sc, lru);
2244
2245 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2246
2247 if (!cgroup_reclaim(sc))
2248 __count_vm_events(PGREFILL, nr_scanned);
2249 __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
2250
2251 spin_unlock_irq(&lruvec->lru_lock);
2252
2253 while (!list_empty(&l_hold)) {
2254 cond_resched();
2255 page = lru_to_page(&l_hold);
2256 list_del(&page->lru);
2257
2258 if (unlikely(!page_evictable(page))) {
2259 putback_lru_page(page);
2260 continue;
2261 }
2262
2263 if (unlikely(buffer_heads_over_limit)) {
2264 if (page_has_private(page) && trylock_page(page)) {
2265 if (page_has_private(page))
2266 try_to_release_page(page, 0);
2267 unlock_page(page);
2268 }
2269 }
2270
2271 if (page_referenced(page, 0, sc->target_mem_cgroup,
2272 &vm_flags)) {
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282 if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
2283 nr_rotated += thp_nr_pages(page);
2284 list_add(&page->lru, &l_active);
2285 continue;
2286 }
2287 }
2288
2289 ClearPageActive(page);
2290 SetPageWorkingset(page);
2291 list_add(&page->lru, &l_inactive);
2292 }
2293
2294
2295
2296
2297 spin_lock_irq(&lruvec->lru_lock);
2298
2299 nr_activate = move_pages_to_lru(lruvec, &l_active);
2300 nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
2301
2302 list_splice(&l_inactive, &l_active);
2303
2304 __count_vm_events(PGDEACTIVATE, nr_deactivate);
2305 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2306
2307 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2308 spin_unlock_irq(&lruvec->lru_lock);
2309
2310 mem_cgroup_uncharge_list(&l_active);
2311 free_unref_page_list(&l_active);
2312 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
2313 nr_deactivate, nr_rotated, sc->priority, file);
2314}
2315
2316unsigned long reclaim_pages(struct list_head *page_list)
2317{
2318 int nid = NUMA_NO_NODE;
2319 unsigned int nr_reclaimed = 0;
2320 LIST_HEAD(node_page_list);
2321 struct reclaim_stat dummy_stat;
2322 struct page *page;
2323 unsigned int noreclaim_flag;
2324 struct scan_control sc = {
2325 .gfp_mask = GFP_KERNEL,
2326 .priority = DEF_PRIORITY,
2327 .may_writepage = 1,
2328 .may_unmap = 1,
2329 .may_swap = 1,
2330 };
2331
2332 noreclaim_flag = memalloc_noreclaim_save();
2333
2334 while (!list_empty(page_list)) {
2335 page = lru_to_page(page_list);
2336 if (nid == NUMA_NO_NODE) {
2337 nid = page_to_nid(page);
2338 INIT_LIST_HEAD(&node_page_list);
2339 }
2340
2341 if (nid == page_to_nid(page)) {
2342 ClearPageActive(page);
2343 list_move(&page->lru, &node_page_list);
2344 continue;
2345 }
2346
2347 nr_reclaimed += shrink_page_list(&node_page_list,
2348 NODE_DATA(nid),
2349 &sc, &dummy_stat, false);
2350 while (!list_empty(&node_page_list)) {
2351 page = lru_to_page(&node_page_list);
2352 list_del(&page->lru);
2353 putback_lru_page(page);
2354 }
2355
2356 nid = NUMA_NO_NODE;
2357 }
2358
2359 if (!list_empty(&node_page_list)) {
2360 nr_reclaimed += shrink_page_list(&node_page_list,
2361 NODE_DATA(nid),
2362 &sc, &dummy_stat, false);
2363 while (!list_empty(&node_page_list)) {
2364 page = lru_to_page(&node_page_list);
2365 list_del(&page->lru);
2366 putback_lru_page(page);
2367 }
2368 }
2369
2370 memalloc_noreclaim_restore(noreclaim_flag);
2371
2372 return nr_reclaimed;
2373}
2374
2375static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
2376 struct lruvec *lruvec, struct scan_control *sc)
2377{
2378 if (is_active_lru(lru)) {
2379 if (sc->may_deactivate & (1 << is_file_lru(lru)))
2380 shrink_active_list(nr_to_scan, lruvec, sc, lru);
2381 else
2382 sc->skipped_deactivate = 1;
2383 return 0;
2384 }
2385
2386 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2387}
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
2418{
2419 enum lru_list active_lru = inactive_lru + LRU_ACTIVE;
2420 unsigned long inactive, active;
2421 unsigned long inactive_ratio;
2422 unsigned long gb;
2423
2424 inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
2425 active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
2426
2427 gb = (inactive + active) >> (30 - PAGE_SHIFT);
2428 if (gb)
2429 inactive_ratio = int_sqrt(10 * gb);
2430 else
2431 inactive_ratio = 1;
2432
2433 return inactive * inactive_ratio < active;
2434}
2435
2436enum scan_balance {
2437 SCAN_EQUAL,
2438 SCAN_FRACT,
2439 SCAN_ANON,
2440 SCAN_FILE,
2441};
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
2453 unsigned long *nr)
2454{
2455 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2456 unsigned long anon_cost, file_cost, total_cost;
2457 int swappiness = mem_cgroup_swappiness(memcg);
2458 u64 fraction[ANON_AND_FILE];
2459 u64 denominator = 0;
2460 enum scan_balance scan_balance;
2461 unsigned long ap, fp;
2462 enum lru_list lru;
2463
2464
2465 if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
2466 scan_balance = SCAN_FILE;
2467 goto out;
2468 }
2469
2470
2471
2472
2473
2474
2475
2476
2477 if (cgroup_reclaim(sc) && !swappiness) {
2478 scan_balance = SCAN_FILE;
2479 goto out;
2480 }
2481
2482
2483
2484
2485
2486
2487 if (!sc->priority && swappiness) {
2488 scan_balance = SCAN_EQUAL;
2489 goto out;
2490 }
2491
2492
2493
2494
2495 if (sc->file_is_tiny) {
2496 scan_balance = SCAN_ANON;
2497 goto out;
2498 }
2499
2500
2501
2502
2503
2504 if (sc->cache_trim_mode) {
2505 scan_balance = SCAN_FILE;
2506 goto out;
2507 }
2508
2509 scan_balance = SCAN_FRACT;
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525 total_cost = sc->anon_cost + sc->file_cost;
2526 anon_cost = total_cost + sc->anon_cost;
2527 file_cost = total_cost + sc->file_cost;
2528 total_cost = anon_cost + file_cost;
2529
2530 ap = swappiness * (total_cost + 1);
2531 ap /= anon_cost + 1;
2532
2533 fp = (200 - swappiness) * (total_cost + 1);
2534 fp /= file_cost + 1;
2535
2536 fraction[0] = ap;
2537 fraction[1] = fp;
2538 denominator = ap + fp;
2539out:
2540 for_each_evictable_lru(lru) {
2541 int file = is_file_lru(lru);
2542 unsigned long lruvec_size;
2543 unsigned long low, min;
2544 unsigned long scan;
2545
2546 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2547 mem_cgroup_protection(sc->target_mem_cgroup, memcg,
2548 &min, &low);
2549
2550 if (min || low) {
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580 unsigned long cgroup_size = mem_cgroup_size(memcg);
2581 unsigned long protection;
2582
2583
2584 if (!sc->memcg_low_reclaim && low > min) {
2585 protection = low;
2586 sc->memcg_low_skipped = 1;
2587 } else {
2588 protection = min;
2589 }
2590
2591
2592 cgroup_size = max(cgroup_size, protection);
2593
2594 scan = lruvec_size - lruvec_size * protection /
2595 cgroup_size;
2596
2597
2598
2599
2600
2601
2602 scan = max(scan, SWAP_CLUSTER_MAX);
2603 } else {
2604 scan = lruvec_size;
2605 }
2606
2607 scan >>= sc->priority;
2608
2609
2610
2611
2612
2613 if (!scan && !mem_cgroup_online(memcg))
2614 scan = min(lruvec_size, SWAP_CLUSTER_MAX);
2615
2616 switch (scan_balance) {
2617 case SCAN_EQUAL:
2618
2619 break;
2620 case SCAN_FRACT:
2621
2622
2623
2624
2625
2626
2627
2628 scan = mem_cgroup_online(memcg) ?
2629 div64_u64(scan * fraction[file], denominator) :
2630 DIV64_U64_ROUND_UP(scan * fraction[file],
2631 denominator);
2632 break;
2633 case SCAN_FILE:
2634 case SCAN_ANON:
2635
2636 if ((scan_balance == SCAN_FILE) != file)
2637 scan = 0;
2638 break;
2639 default:
2640
2641 BUG();
2642 }
2643
2644 nr[lru] = scan;
2645 }
2646}
2647
2648static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
2649{
2650 unsigned long nr[NR_LRU_LISTS];
2651 unsigned long targets[NR_LRU_LISTS];
2652 unsigned long nr_to_scan;
2653 enum lru_list lru;
2654 unsigned long nr_reclaimed = 0;
2655 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
2656 struct blk_plug plug;
2657 bool scan_adjusted;
2658
2659 get_scan_count(lruvec, sc, nr);
2660
2661
2662 memcpy(targets, nr, sizeof(nr));
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675 scan_adjusted = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
2676 sc->priority == DEF_PRIORITY);
2677
2678 blk_start_plug(&plug);
2679 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2680 nr[LRU_INACTIVE_FILE]) {
2681 unsigned long nr_anon, nr_file, percentage;
2682 unsigned long nr_scanned;
2683
2684 for_each_evictable_lru(lru) {
2685 if (nr[lru]) {
2686 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
2687 nr[lru] -= nr_to_scan;
2688
2689 nr_reclaimed += shrink_list(lru, nr_to_scan,
2690 lruvec, sc);
2691 }
2692 }
2693
2694 cond_resched();
2695
2696 if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
2697 continue;
2698
2699
2700
2701
2702
2703
2704
2705
2706 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
2707 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
2708
2709
2710
2711
2712
2713
2714
2715 if (!nr_file || !nr_anon)
2716 break;
2717
2718 if (nr_file > nr_anon) {
2719 unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
2720 targets[LRU_ACTIVE_ANON] + 1;
2721 lru = LRU_BASE;
2722 percentage = nr_anon * 100 / scan_target;
2723 } else {
2724 unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
2725 targets[LRU_ACTIVE_FILE] + 1;
2726 lru = LRU_FILE;
2727 percentage = nr_file * 100 / scan_target;
2728 }
2729
2730
2731 nr[lru] = 0;
2732 nr[lru + LRU_ACTIVE] = 0;
2733
2734
2735
2736
2737
2738 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
2739 nr_scanned = targets[lru] - nr[lru];
2740 nr[lru] = targets[lru] * (100 - percentage) / 100;
2741 nr[lru] -= min(nr[lru], nr_scanned);
2742
2743 lru += LRU_ACTIVE;
2744 nr_scanned = targets[lru] - nr[lru];
2745 nr[lru] = targets[lru] * (100 - percentage) / 100;
2746 nr[lru] -= min(nr[lru], nr_scanned);
2747
2748 scan_adjusted = true;
2749 }
2750 blk_finish_plug(&plug);
2751 sc->nr_reclaimed += nr_reclaimed;
2752
2753
2754
2755
2756
2757 if (total_swap_pages && inactive_is_low(lruvec, LRU_INACTIVE_ANON))
2758 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2759 sc, LRU_ACTIVE_ANON);
2760}
2761
2762
2763static bool in_reclaim_compaction(struct scan_control *sc)
2764{
2765 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
2766 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
2767 sc->priority < DEF_PRIORITY - 2))
2768 return true;
2769
2770 return false;
2771}
2772
2773
2774
2775
2776
2777
2778
2779
2780static inline bool should_continue_reclaim(struct pglist_data *pgdat,
2781 unsigned long nr_reclaimed,
2782 struct scan_control *sc)
2783{
2784 unsigned long pages_for_compaction;
2785 unsigned long inactive_lru_pages;
2786 int z;
2787
2788
2789 if (!in_reclaim_compaction(sc))
2790 return false;
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802 if (!nr_reclaimed)
2803 return false;
2804
2805
2806 for (z = 0; z <= sc->reclaim_idx; z++) {
2807 struct zone *zone = &pgdat->node_zones[z];
2808 if (!managed_zone(zone))
2809 continue;
2810
2811 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
2812 case COMPACT_SUCCESS:
2813 case COMPACT_CONTINUE:
2814 return false;
2815 default:
2816
2817 ;
2818 }
2819 }
2820
2821
2822
2823
2824
2825 pages_for_compaction = compact_gap(sc->order);
2826 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
2827 if (get_nr_swap_pages() > 0)
2828 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
2829
2830 return inactive_lru_pages > pages_for_compaction;
2831}
2832
2833static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
2834{
2835 struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
2836 struct mem_cgroup *memcg;
2837
2838 memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
2839 do {
2840 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
2841 unsigned long reclaimed;
2842 unsigned long scanned;
2843
2844
2845
2846
2847
2848
2849
2850 cond_resched();
2851
2852 mem_cgroup_calculate_protection(target_memcg, memcg);
2853
2854 if (mem_cgroup_below_min(memcg)) {
2855
2856
2857
2858
2859 continue;
2860 } else if (mem_cgroup_below_low(memcg)) {
2861
2862
2863
2864
2865
2866
2867 if (!sc->memcg_low_reclaim) {
2868 sc->memcg_low_skipped = 1;
2869 continue;
2870 }
2871 memcg_memory_event(memcg, MEMCG_LOW);
2872 }
2873
2874 reclaimed = sc->nr_reclaimed;
2875 scanned = sc->nr_scanned;
2876
2877 shrink_lruvec(lruvec, sc);
2878
2879 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
2880 sc->priority);
2881
2882
2883 vmpressure(sc->gfp_mask, memcg, false,
2884 sc->nr_scanned - scanned,
2885 sc->nr_reclaimed - reclaimed);
2886
2887 } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
2888}
2889
2890static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
2891{
2892 struct reclaim_state *reclaim_state = current->reclaim_state;
2893 unsigned long nr_reclaimed, nr_scanned;
2894 struct lruvec *target_lruvec;
2895 bool reclaimable = false;
2896 unsigned long file;
2897
2898 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
2899
2900again:
2901 memset(&sc->nr, 0, sizeof(sc->nr));
2902
2903 nr_reclaimed = sc->nr_reclaimed;
2904 nr_scanned = sc->nr_scanned;
2905
2906
2907
2908
2909 spin_lock_irq(&target_lruvec->lru_lock);
2910 sc->anon_cost = target_lruvec->anon_cost;
2911 sc->file_cost = target_lruvec->file_cost;
2912 spin_unlock_irq(&target_lruvec->lru_lock);
2913
2914
2915
2916
2917
2918 if (!sc->force_deactivate) {
2919 unsigned long refaults;
2920
2921 refaults = lruvec_page_state(target_lruvec,
2922 WORKINGSET_ACTIVATE_ANON);
2923 if (refaults != target_lruvec->refaults[0] ||
2924 inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
2925 sc->may_deactivate |= DEACTIVATE_ANON;
2926 else
2927 sc->may_deactivate &= ~DEACTIVATE_ANON;
2928
2929
2930
2931
2932
2933
2934 refaults = lruvec_page_state(target_lruvec,
2935 WORKINGSET_ACTIVATE_FILE);
2936 if (refaults != target_lruvec->refaults[1] ||
2937 inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
2938 sc->may_deactivate |= DEACTIVATE_FILE;
2939 else
2940 sc->may_deactivate &= ~DEACTIVATE_FILE;
2941 } else
2942 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
2943
2944
2945
2946
2947
2948
2949 file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
2950 if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
2951 sc->cache_trim_mode = 1;
2952 else
2953 sc->cache_trim_mode = 0;
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964 if (!cgroup_reclaim(sc)) {
2965 unsigned long total_high_wmark = 0;
2966 unsigned long free, anon;
2967 int z;
2968
2969 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2970 file = node_page_state(pgdat, NR_ACTIVE_FILE) +
2971 node_page_state(pgdat, NR_INACTIVE_FILE);
2972
2973 for (z = 0; z < MAX_NR_ZONES; z++) {
2974 struct zone *zone = &pgdat->node_zones[z];
2975 if (!managed_zone(zone))
2976 continue;
2977
2978 total_high_wmark += high_wmark_pages(zone);
2979 }
2980
2981
2982
2983
2984
2985
2986 anon = node_page_state(pgdat, NR_INACTIVE_ANON);
2987
2988 sc->file_is_tiny =
2989 file + free <= total_high_wmark &&
2990 !(sc->may_deactivate & DEACTIVATE_ANON) &&
2991 anon >> sc->priority;
2992 }
2993
2994 shrink_node_memcgs(pgdat, sc);
2995
2996 if (reclaim_state) {
2997 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2998 reclaim_state->reclaimed_slab = 0;
2999 }
3000
3001
3002 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
3003 sc->nr_scanned - nr_scanned,
3004 sc->nr_reclaimed - nr_reclaimed);
3005
3006 if (sc->nr_reclaimed - nr_reclaimed)
3007 reclaimable = true;
3008
3009 if (current_is_kswapd()) {
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
3028 set_bit(PGDAT_WRITEBACK, &pgdat->flags);
3029
3030
3031 if (sc->nr.unqueued_dirty == sc->nr.file_taken)
3032 set_bit(PGDAT_DIRTY, &pgdat->flags);
3033
3034
3035
3036
3037
3038
3039
3040 if (sc->nr.immediate)
3041 congestion_wait(BLK_RW_ASYNC, HZ/10);
3042 }
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052 if ((current_is_kswapd() ||
3053 (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) &&
3054 sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
3055 set_bit(LRUVEC_CONGESTED, &target_lruvec->flags);
3056
3057
3058
3059
3060
3061
3062
3063 if (!current_is_kswapd() && current_may_throttle() &&
3064 !sc->hibernation_mode &&
3065 test_bit(LRUVEC_CONGESTED, &target_lruvec->flags))
3066 wait_iff_congested(BLK_RW_ASYNC, HZ/10);
3067
3068 if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
3069 sc))
3070 goto again;
3071
3072
3073
3074
3075
3076
3077
3078 if (reclaimable)
3079 pgdat->kswapd_failures = 0;
3080}
3081
3082
3083
3084
3085
3086
3087static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
3088{
3089 unsigned long watermark;
3090 enum compact_result suitable;
3091
3092 suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
3093 if (suitable == COMPACT_SUCCESS)
3094
3095 return true;
3096 if (suitable == COMPACT_SKIPPED)
3097
3098 return false;
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109 watermark = high_wmark_pages(zone) + compact_gap(sc->order);
3110
3111 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
3112}
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
3123{
3124 struct zoneref *z;
3125 struct zone *zone;
3126 unsigned long nr_soft_reclaimed;
3127 unsigned long nr_soft_scanned;
3128 gfp_t orig_mask;
3129 pg_data_t *last_pgdat = NULL;
3130
3131
3132
3133
3134
3135
3136 orig_mask = sc->gfp_mask;
3137 if (buffer_heads_over_limit) {
3138 sc->gfp_mask |= __GFP_HIGHMEM;
3139 sc->reclaim_idx = gfp_zone(sc->gfp_mask);
3140 }
3141
3142 for_each_zone_zonelist_nodemask(zone, z, zonelist,
3143 sc->reclaim_idx, sc->nodemask) {
3144
3145
3146
3147
3148 if (!cgroup_reclaim(sc)) {
3149 if (!cpuset_zone_allowed(zone,
3150 GFP_KERNEL | __GFP_HARDWALL))
3151 continue;
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162 if (IS_ENABLED(CONFIG_COMPACTION) &&
3163 sc->order > PAGE_ALLOC_COSTLY_ORDER &&
3164 compaction_ready(zone, sc)) {
3165 sc->compaction_ready = true;
3166 continue;
3167 }
3168
3169
3170
3171
3172
3173
3174
3175 if (zone->zone_pgdat == last_pgdat)
3176 continue;
3177
3178
3179
3180
3181
3182
3183
3184 nr_soft_scanned = 0;
3185 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
3186 sc->order, sc->gfp_mask,
3187 &nr_soft_scanned);
3188 sc->nr_reclaimed += nr_soft_reclaimed;
3189 sc->nr_scanned += nr_soft_scanned;
3190
3191 }
3192
3193
3194 if (zone->zone_pgdat == last_pgdat)
3195 continue;
3196 last_pgdat = zone->zone_pgdat;
3197 shrink_node(zone->zone_pgdat, sc);
3198 }
3199
3200
3201
3202
3203
3204 sc->gfp_mask = orig_mask;
3205}
3206
3207static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
3208{
3209 struct lruvec *target_lruvec;
3210 unsigned long refaults;
3211
3212 target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
3213 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
3214 target_lruvec->refaults[0] = refaults;
3215 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE);
3216 target_lruvec->refaults[1] = refaults;
3217}
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
3236 struct scan_control *sc)
3237{
3238 int initial_priority = sc->priority;
3239 pg_data_t *last_pgdat;
3240 struct zoneref *z;
3241 struct zone *zone;
3242retry:
3243 delayacct_freepages_start();
3244
3245 if (!cgroup_reclaim(sc))
3246 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
3247
3248 do {
3249 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
3250 sc->priority);
3251 sc->nr_scanned = 0;
3252 shrink_zones(zonelist, sc);
3253
3254 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
3255 break;
3256
3257 if (sc->compaction_ready)
3258 break;
3259
3260
3261
3262
3263
3264 if (sc->priority < DEF_PRIORITY - 2)
3265 sc->may_writepage = 1;
3266 } while (--sc->priority >= 0);
3267
3268 last_pgdat = NULL;
3269 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
3270 sc->nodemask) {
3271 if (zone->zone_pgdat == last_pgdat)
3272 continue;
3273 last_pgdat = zone->zone_pgdat;
3274
3275 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
3276
3277 if (cgroup_reclaim(sc)) {
3278 struct lruvec *lruvec;
3279
3280 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
3281 zone->zone_pgdat);
3282 clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
3283 }
3284 }
3285
3286 delayacct_freepages_end();
3287
3288 if (sc->nr_reclaimed)
3289 return sc->nr_reclaimed;
3290
3291
3292 if (sc->compaction_ready)
3293 return 1;
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304 if (sc->skipped_deactivate) {
3305 sc->priority = initial_priority;
3306 sc->force_deactivate = 1;
3307 sc->skipped_deactivate = 0;
3308 goto retry;
3309 }
3310
3311
3312 if (sc->memcg_low_skipped) {
3313 sc->priority = initial_priority;
3314 sc->force_deactivate = 0;
3315 sc->memcg_low_reclaim = 1;
3316 sc->memcg_low_skipped = 0;
3317 goto retry;
3318 }
3319
3320 return 0;
3321}
3322
3323static bool allow_direct_reclaim(pg_data_t *pgdat)
3324{
3325 struct zone *zone;
3326 unsigned long pfmemalloc_reserve = 0;
3327 unsigned long free_pages = 0;
3328 int i;
3329 bool wmark_ok;
3330
3331 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
3332 return true;
3333
3334 for (i = 0; i <= ZONE_NORMAL; i++) {
3335 zone = &pgdat->node_zones[i];
3336 if (!managed_zone(zone))
3337 continue;
3338
3339 if (!zone_reclaimable_pages(zone))
3340 continue;
3341
3342 pfmemalloc_reserve += min_wmark_pages(zone);
3343 free_pages += zone_page_state(zone, NR_FREE_PAGES);
3344 }
3345
3346
3347 if (!pfmemalloc_reserve)
3348 return true;
3349
3350 wmark_ok = free_pages > pfmemalloc_reserve / 2;
3351
3352
3353 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
3354 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL)
3355 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL);
3356
3357 wake_up_interruptible(&pgdat->kswapd_wait);
3358 }
3359
3360 return wmark_ok;
3361}
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
3373 nodemask_t *nodemask)
3374{
3375 struct zoneref *z;
3376 struct zone *zone;
3377 pg_data_t *pgdat = NULL;
3378
3379
3380
3381
3382
3383
3384
3385
3386 if (current->flags & PF_KTHREAD)
3387 goto out;
3388
3389
3390
3391
3392
3393 if (fatal_signal_pending(current))
3394 goto out;
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410 for_each_zone_zonelist_nodemask(zone, z, zonelist,
3411 gfp_zone(gfp_mask), nodemask) {
3412 if (zone_idx(zone) > ZONE_NORMAL)
3413 continue;
3414
3415
3416 pgdat = zone->zone_pgdat;
3417 if (allow_direct_reclaim(pgdat))
3418 goto out;
3419 break;
3420 }
3421
3422
3423 if (!pgdat)
3424 goto out;
3425
3426
3427 count_vm_event(PGSCAN_DIRECT_THROTTLE);
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437 if (!(gfp_mask & __GFP_FS)) {
3438 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
3439 allow_direct_reclaim(pgdat), HZ);
3440
3441 goto check_pending;
3442 }
3443
3444
3445 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
3446 allow_direct_reclaim(pgdat));
3447
3448check_pending:
3449 if (fatal_signal_pending(current))
3450 return true;
3451
3452out:
3453 return false;
3454}
3455
3456unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
3457 gfp_t gfp_mask, nodemask_t *nodemask)
3458{
3459 unsigned long nr_reclaimed;
3460 struct scan_control sc = {
3461 .nr_to_reclaim = SWAP_CLUSTER_MAX,
3462 .gfp_mask = current_gfp_context(gfp_mask),
3463 .reclaim_idx = gfp_zone(gfp_mask),
3464 .order = order,
3465 .nodemask = nodemask,
3466 .priority = DEF_PRIORITY,
3467 .may_writepage = !laptop_mode,
3468 .may_unmap = 1,
3469 .may_swap = 1,
3470 };
3471
3472
3473
3474
3475
3476 BUILD_BUG_ON(MAX_ORDER > S8_MAX);
3477 BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
3478 BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
3479
3480
3481
3482
3483
3484
3485 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
3486 return 1;
3487
3488 set_task_reclaim_state(current, &sc.reclaim_state);
3489 trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
3490
3491 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3492
3493 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
3494 set_task_reclaim_state(current, NULL);
3495
3496 return nr_reclaimed;
3497}
3498
3499#ifdef CONFIG_MEMCG
3500
3501
3502unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3503 gfp_t gfp_mask, bool noswap,
3504 pg_data_t *pgdat,
3505 unsigned long *nr_scanned)
3506{
3507 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
3508 struct scan_control sc = {
3509 .nr_to_reclaim = SWAP_CLUSTER_MAX,
3510 .target_mem_cgroup = memcg,
3511 .may_writepage = !laptop_mode,
3512 .may_unmap = 1,
3513 .reclaim_idx = MAX_NR_ZONES - 1,
3514 .may_swap = !noswap,
3515 };
3516
3517 WARN_ON_ONCE(!current->reclaim_state);
3518
3519 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
3520 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
3521
3522 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
3523 sc.gfp_mask);
3524
3525
3526
3527
3528
3529
3530
3531
3532 shrink_lruvec(lruvec, &sc);
3533
3534 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
3535
3536 *nr_scanned = sc.nr_scanned;
3537
3538 return sc.nr_reclaimed;
3539}
3540
3541unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
3542 unsigned long nr_pages,
3543 gfp_t gfp_mask,
3544 bool may_swap)
3545{
3546 unsigned long nr_reclaimed;
3547 unsigned int noreclaim_flag;
3548 struct scan_control sc = {
3549 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
3550 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
3551 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
3552 .reclaim_idx = MAX_NR_ZONES - 1,
3553 .target_mem_cgroup = memcg,
3554 .priority = DEF_PRIORITY,
3555 .may_writepage = !laptop_mode,
3556 .may_unmap = 1,
3557 .may_swap = may_swap,
3558 };
3559
3560
3561
3562
3563
3564 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
3565
3566 set_task_reclaim_state(current, &sc.reclaim_state);
3567 trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
3568 noreclaim_flag = memalloc_noreclaim_save();
3569
3570 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3571
3572 memalloc_noreclaim_restore(noreclaim_flag);
3573 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
3574 set_task_reclaim_state(current, NULL);
3575
3576 return nr_reclaimed;
3577}
3578#endif
3579
3580static void age_active_anon(struct pglist_data *pgdat,
3581 struct scan_control *sc)
3582{
3583 struct mem_cgroup *memcg;
3584 struct lruvec *lruvec;
3585
3586 if (!total_swap_pages)
3587 return;
3588
3589 lruvec = mem_cgroup_lruvec(NULL, pgdat);
3590 if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON))
3591 return;
3592
3593 memcg = mem_cgroup_iter(NULL, NULL, NULL);
3594 do {
3595 lruvec = mem_cgroup_lruvec(memcg, pgdat);
3596 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
3597 sc, LRU_ACTIVE_ANON);
3598 memcg = mem_cgroup_iter(NULL, memcg, NULL);
3599 } while (memcg);
3600}
3601
3602static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx)
3603{
3604 int i;
3605 struct zone *zone;
3606
3607
3608
3609
3610
3611
3612
3613
3614 for (i = highest_zoneidx; i >= 0; i--) {
3615 zone = pgdat->node_zones + i;
3616 if (!managed_zone(zone))
3617 continue;
3618
3619 if (zone->watermark_boost)
3620 return true;
3621 }
3622
3623 return false;
3624}
3625
3626
3627
3628
3629
3630static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
3631{
3632 int i;
3633 unsigned long mark = -1;
3634 struct zone *zone;
3635
3636
3637
3638
3639
3640 for (i = 0; i <= highest_zoneidx; i++) {
3641 zone = pgdat->node_zones + i;
3642
3643 if (!managed_zone(zone))
3644 continue;
3645
3646 mark = high_wmark_pages(zone);
3647 if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx))
3648 return true;
3649 }
3650
3651
3652
3653
3654
3655
3656 if (mark == -1)
3657 return true;
3658
3659 return false;
3660}
3661
3662
3663static void clear_pgdat_congested(pg_data_t *pgdat)
3664{
3665 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
3666
3667 clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
3668 clear_bit(PGDAT_DIRTY, &pgdat->flags);
3669 clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
3670}
3671
3672
3673
3674
3675
3676
3677
3678static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order,
3679 int highest_zoneidx)
3680{
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694 if (waitqueue_active(&pgdat->pfmemalloc_wait))
3695 wake_up_all(&pgdat->pfmemalloc_wait);
3696
3697
3698 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
3699 return true;
3700
3701 if (pgdat_balanced(pgdat, order, highest_zoneidx)) {
3702 clear_pgdat_congested(pgdat);
3703 return true;
3704 }
3705
3706 return false;
3707}
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717static bool kswapd_shrink_node(pg_data_t *pgdat,
3718 struct scan_control *sc)
3719{
3720 struct zone *zone;
3721 int z;
3722
3723
3724 sc->nr_to_reclaim = 0;
3725 for (z = 0; z <= sc->reclaim_idx; z++) {
3726 zone = pgdat->node_zones + z;
3727 if (!managed_zone(zone))
3728 continue;
3729
3730 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
3731 }
3732
3733
3734
3735
3736
3737 shrink_node(pgdat, sc);
3738
3739
3740
3741
3742
3743
3744
3745
3746 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
3747 sc->order = 0;
3748
3749 return sc->nr_scanned >= sc->nr_to_reclaim;
3750}
3751
3752
3753static inline void
3754update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active)
3755{
3756 int i;
3757 struct zone *zone;
3758
3759 for (i = 0; i <= highest_zoneidx; i++) {
3760 zone = pgdat->node_zones + i;
3761
3762 if (!managed_zone(zone))
3763 continue;
3764
3765 if (active)
3766 set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags);
3767 else
3768 clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags);
3769 }
3770}
3771
3772static inline void
3773set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx)
3774{
3775 update_reclaim_active(pgdat, highest_zoneidx, true);
3776}
3777
3778static inline void
3779clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx)
3780{
3781 update_reclaim_active(pgdat, highest_zoneidx, false);
3782}
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
3798{
3799 int i;
3800 unsigned long nr_soft_reclaimed;
3801 unsigned long nr_soft_scanned;
3802 unsigned long pflags;
3803 unsigned long nr_boost_reclaim;
3804 unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };
3805 bool boosted;
3806 struct zone *zone;
3807 struct scan_control sc = {
3808 .gfp_mask = GFP_KERNEL,
3809 .order = order,
3810 .may_unmap = 1,
3811 };
3812
3813 set_task_reclaim_state(current, &sc.reclaim_state);
3814 psi_memstall_enter(&pflags);
3815 __fs_reclaim_acquire();
3816
3817 count_vm_event(PAGEOUTRUN);
3818
3819
3820
3821
3822
3823
3824 nr_boost_reclaim = 0;
3825 for (i = 0; i <= highest_zoneidx; i++) {
3826 zone = pgdat->node_zones + i;
3827 if (!managed_zone(zone))
3828 continue;
3829
3830 nr_boost_reclaim += zone->watermark_boost;
3831 zone_boosts[i] = zone->watermark_boost;
3832 }
3833 boosted = nr_boost_reclaim;
3834
3835restart:
3836 set_reclaim_active(pgdat, highest_zoneidx);
3837 sc.priority = DEF_PRIORITY;
3838 do {
3839 unsigned long nr_reclaimed = sc.nr_reclaimed;
3840 bool raise_priority = true;
3841 bool balanced;
3842 bool ret;
3843
3844 sc.reclaim_idx = highest_zoneidx;
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856 if (buffer_heads_over_limit) {
3857 for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
3858 zone = pgdat->node_zones + i;
3859 if (!managed_zone(zone))
3860 continue;
3861
3862 sc.reclaim_idx = i;
3863 break;
3864 }
3865 }
3866
3867
3868
3869
3870
3871
3872
3873
3874 balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx);
3875 if (!balanced && nr_boost_reclaim) {
3876 nr_boost_reclaim = 0;
3877 goto restart;
3878 }
3879
3880
3881
3882
3883
3884
3885 if (!nr_boost_reclaim && balanced)
3886 goto out;
3887
3888
3889 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)
3890 raise_priority = false;
3891
3892
3893
3894
3895
3896
3897
3898 sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
3899 sc.may_swap = !nr_boost_reclaim;
3900
3901
3902
3903
3904
3905
3906
3907 age_active_anon(pgdat, &sc);
3908
3909
3910
3911
3912
3913 if (sc.priority < DEF_PRIORITY - 2)
3914 sc.may_writepage = 1;
3915
3916
3917 sc.nr_scanned = 0;
3918 nr_soft_scanned = 0;
3919 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
3920 sc.gfp_mask, &nr_soft_scanned);
3921 sc.nr_reclaimed += nr_soft_reclaimed;
3922
3923
3924
3925
3926
3927
3928 if (kswapd_shrink_node(pgdat, &sc))
3929 raise_priority = false;
3930
3931
3932
3933
3934
3935
3936 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
3937 allow_direct_reclaim(pgdat))
3938 wake_up_all(&pgdat->pfmemalloc_wait);
3939
3940
3941 __fs_reclaim_release();
3942 ret = try_to_freeze();
3943 __fs_reclaim_acquire();
3944 if (ret || kthread_should_stop())
3945 break;
3946
3947
3948
3949
3950
3951 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
3952 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);
3953
3954
3955
3956
3957
3958
3959 if (nr_boost_reclaim && !nr_reclaimed)
3960 break;
3961
3962 if (raise_priority || !nr_reclaimed)
3963 sc.priority--;
3964 } while (sc.priority >= 1);
3965
3966 if (!sc.nr_reclaimed)
3967 pgdat->kswapd_failures++;
3968
3969out:
3970 clear_reclaim_active(pgdat, highest_zoneidx);
3971
3972
3973 if (boosted) {
3974 unsigned long flags;
3975
3976 for (i = 0; i <= highest_zoneidx; i++) {
3977 if (!zone_boosts[i])
3978 continue;
3979
3980
3981 zone = pgdat->node_zones + i;
3982 spin_lock_irqsave(&zone->lock, flags);
3983 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
3984 spin_unlock_irqrestore(&zone->lock, flags);
3985 }
3986
3987
3988
3989
3990
3991 wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx);
3992 }
3993
3994 snapshot_refaults(NULL, pgdat);
3995 __fs_reclaim_release();
3996 psi_memstall_leave(&pflags);
3997 set_task_reclaim_state(current, NULL);
3998
3999
4000
4001
4002
4003
4004
4005 return sc.order;
4006}
4007
4008
4009
4010
4011
4012
4013
4014
4015static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat,
4016 enum zone_type prev_highest_zoneidx)
4017{
4018 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
4019
4020 return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx;
4021}
4022
4023static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
4024 unsigned int highest_zoneidx)
4025{
4026 long remaining = 0;
4027 DEFINE_WAIT(wait);
4028
4029 if (freezing(current) || kthread_should_stop())
4030 return;
4031
4032 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
4033
4034
4035
4036
4037
4038
4039
4040
4041 if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
4042
4043
4044
4045
4046
4047
4048 reset_isolation_suitable(pgdat);
4049
4050
4051
4052
4053
4054 wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx);
4055
4056 remaining = schedule_timeout(HZ/10);
4057
4058
4059
4060
4061
4062
4063 if (remaining) {
4064 WRITE_ONCE(pgdat->kswapd_highest_zoneidx,
4065 kswapd_highest_zoneidx(pgdat,
4066 highest_zoneidx));
4067
4068 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)
4069 WRITE_ONCE(pgdat->kswapd_order, reclaim_order);
4070 }
4071
4072 finish_wait(&pgdat->kswapd_wait, &wait);
4073 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
4074 }
4075
4076
4077
4078
4079
4080 if (!remaining &&
4081 prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
4082 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
4093
4094 if (!kthread_should_stop())
4095 schedule();
4096
4097 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
4098 } else {
4099 if (remaining)
4100 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
4101 else
4102 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
4103 }
4104 finish_wait(&pgdat->kswapd_wait, &wait);
4105}
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120static int kswapd(void *p)
4121{
4122 unsigned int alloc_order, reclaim_order;
4123 unsigned int highest_zoneidx = MAX_NR_ZONES - 1;
4124 pg_data_t *pgdat = (pg_data_t *)p;
4125 struct task_struct *tsk = current;
4126 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
4127
4128 if (!cpumask_empty(cpumask))
4129 set_cpus_allowed_ptr(tsk, cpumask);
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
4144 set_freezable();
4145
4146 WRITE_ONCE(pgdat->kswapd_order, 0);
4147 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
4148 for ( ; ; ) {
4149 bool ret;
4150
4151 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
4152 highest_zoneidx = kswapd_highest_zoneidx(pgdat,
4153 highest_zoneidx);
4154
4155kswapd_try_sleep:
4156 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
4157 highest_zoneidx);
4158
4159
4160 alloc_order = READ_ONCE(pgdat->kswapd_order);
4161 highest_zoneidx = kswapd_highest_zoneidx(pgdat,
4162 highest_zoneidx);
4163 WRITE_ONCE(pgdat->kswapd_order, 0);
4164 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
4165
4166 ret = try_to_freeze();
4167 if (kthread_should_stop())
4168 break;
4169
4170
4171
4172
4173
4174 if (ret)
4175 continue;
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx,
4186 alloc_order);
4187 reclaim_order = balance_pgdat(pgdat, alloc_order,
4188 highest_zoneidx);
4189 if (reclaim_order < alloc_order)
4190 goto kswapd_try_sleep;
4191 }
4192
4193 tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
4194
4195 return 0;
4196}
4197
4198
4199
4200
4201
4202
4203
4204
4205void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
4206 enum zone_type highest_zoneidx)
4207{
4208 pg_data_t *pgdat;
4209 enum zone_type curr_idx;
4210
4211 if (!managed_zone(zone))
4212 return;
4213
4214 if (!cpuset_zone_allowed(zone, gfp_flags))
4215 return;
4216
4217 pgdat = zone->zone_pgdat;
4218 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
4219
4220 if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx)
4221 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx);
4222
4223 if (READ_ONCE(pgdat->kswapd_order) < order)
4224 WRITE_ONCE(pgdat->kswapd_order, order);
4225
4226 if (!waitqueue_active(&pgdat->kswapd_wait))
4227 return;
4228
4229
4230 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
4231 (pgdat_balanced(pgdat, order, highest_zoneidx) &&
4232 !pgdat_watermark_boosted(pgdat, highest_zoneidx))) {
4233
4234
4235
4236
4237
4238
4239
4240 if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
4241 wakeup_kcompactd(pgdat, order, highest_zoneidx);
4242 return;
4243 }
4244
4245 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order,
4246 gfp_flags);
4247 wake_up_interruptible(&pgdat->kswapd_wait);
4248}
4249
4250#ifdef CONFIG_HIBERNATION
4251
4252
4253
4254
4255
4256
4257
4258
4259unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
4260{
4261 struct scan_control sc = {
4262 .nr_to_reclaim = nr_to_reclaim,
4263 .gfp_mask = GFP_HIGHUSER_MOVABLE,
4264 .reclaim_idx = MAX_NR_ZONES - 1,
4265 .priority = DEF_PRIORITY,
4266 .may_writepage = 1,
4267 .may_unmap = 1,
4268 .may_swap = 1,
4269 .hibernation_mode = 1,
4270 };
4271 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
4272 unsigned long nr_reclaimed;
4273 unsigned int noreclaim_flag;
4274
4275 fs_reclaim_acquire(sc.gfp_mask);
4276 noreclaim_flag = memalloc_noreclaim_save();
4277 set_task_reclaim_state(current, &sc.reclaim_state);
4278
4279 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
4280
4281 set_task_reclaim_state(current, NULL);
4282 memalloc_noreclaim_restore(noreclaim_flag);
4283 fs_reclaim_release(sc.gfp_mask);
4284
4285 return nr_reclaimed;
4286}
4287#endif
4288
4289
4290
4291
4292
4293int kswapd_run(int nid)
4294{
4295 pg_data_t *pgdat = NODE_DATA(nid);
4296 int ret = 0;
4297
4298 if (pgdat->kswapd)
4299 return 0;
4300
4301 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
4302 if (IS_ERR(pgdat->kswapd)) {
4303
4304 BUG_ON(system_state < SYSTEM_RUNNING);
4305 pr_err("Failed to start kswapd on node %d\n", nid);
4306 ret = PTR_ERR(pgdat->kswapd);
4307 pgdat->kswapd = NULL;
4308 }
4309 return ret;
4310}
4311
4312
4313
4314
4315
4316void kswapd_stop(int nid)
4317{
4318 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
4319
4320 if (kswapd) {
4321 kthread_stop(kswapd);
4322 NODE_DATA(nid)->kswapd = NULL;
4323 }
4324}
4325
4326static int __init kswapd_init(void)
4327{
4328 int nid;
4329
4330 swap_setup();
4331 for_each_node_state(nid, N_MEMORY)
4332 kswapd_run(nid);
4333 return 0;
4334}
4335
4336module_init(kswapd_init)
4337
4338#ifdef CONFIG_NUMA
4339
4340
4341
4342
4343
4344
4345int node_reclaim_mode __read_mostly;
4346
4347
4348
4349
4350
4351
4352#define NODE_RECLAIM_PRIORITY 4
4353
4354
4355
4356
4357
4358int sysctl_min_unmapped_ratio = 1;
4359
4360
4361
4362
4363
4364int sysctl_min_slab_ratio = 5;
4365
4366static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
4367{
4368 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
4369 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
4370 node_page_state(pgdat, NR_ACTIVE_FILE);
4371
4372
4373
4374
4375
4376
4377 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
4378}
4379
4380
4381static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
4382{
4383 unsigned long nr_pagecache_reclaimable;
4384 unsigned long delta = 0;
4385
4386
4387
4388
4389
4390
4391
4392 if (node_reclaim_mode & RECLAIM_UNMAP)
4393 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
4394 else
4395 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
4396
4397
4398 if (!(node_reclaim_mode & RECLAIM_WRITE))
4399 delta += node_page_state(pgdat, NR_FILE_DIRTY);
4400
4401
4402 if (unlikely(delta > nr_pagecache_reclaimable))
4403 delta = nr_pagecache_reclaimable;
4404
4405 return nr_pagecache_reclaimable - delta;
4406}
4407
4408
4409
4410
4411static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
4412{
4413
4414 const unsigned long nr_pages = 1 << order;
4415 struct task_struct *p = current;
4416 unsigned int noreclaim_flag;
4417 struct scan_control sc = {
4418 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
4419 .gfp_mask = current_gfp_context(gfp_mask),
4420 .order = order,
4421 .priority = NODE_RECLAIM_PRIORITY,
4422 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
4423 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
4424 .may_swap = 1,
4425 .reclaim_idx = gfp_zone(gfp_mask),
4426 };
4427 unsigned long pflags;
4428
4429 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
4430 sc.gfp_mask);
4431
4432 cond_resched();
4433 psi_memstall_enter(&pflags);
4434 fs_reclaim_acquire(sc.gfp_mask);
4435
4436
4437
4438
4439
4440 noreclaim_flag = memalloc_noreclaim_save();
4441 p->flags |= PF_SWAPWRITE;
4442 set_task_reclaim_state(p, &sc.reclaim_state);
4443
4444 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
4445
4446
4447
4448
4449 do {
4450 shrink_node(pgdat, &sc);
4451 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
4452 }
4453
4454 set_task_reclaim_state(p, NULL);
4455 current->flags &= ~PF_SWAPWRITE;
4456 memalloc_noreclaim_restore(noreclaim_flag);
4457 fs_reclaim_release(sc.gfp_mask);
4458 psi_memstall_leave(&pflags);
4459
4460 trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
4461
4462 return sc.nr_reclaimed >= nr_pages;
4463}
4464
4465int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
4466{
4467 int ret;
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
4480 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <=
4481 pgdat->min_slab_pages)
4482 return NODE_RECLAIM_FULL;
4483
4484
4485
4486
4487 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
4488 return NODE_RECLAIM_NOSCAN;
4489
4490
4491
4492
4493
4494
4495
4496 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
4497 return NODE_RECLAIM_NOSCAN;
4498
4499 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
4500 return NODE_RECLAIM_NOSCAN;
4501
4502 ret = __node_reclaim(pgdat, gfp_mask, order);
4503 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
4504
4505 if (!ret)
4506 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
4507
4508 return ret;
4509}
4510#endif
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521void check_move_unevictable_pages(struct pagevec *pvec)
4522{
4523 struct lruvec *lruvec = NULL;
4524 int pgscanned = 0;
4525 int pgrescued = 0;
4526 int i;
4527
4528 for (i = 0; i < pvec->nr; i++) {
4529 struct page *page = pvec->pages[i];
4530 int nr_pages;
4531
4532 if (PageTransTail(page))
4533 continue;
4534
4535 nr_pages = thp_nr_pages(page);
4536 pgscanned += nr_pages;
4537
4538
4539 if (!TestClearPageLRU(page))
4540 continue;
4541
4542 lruvec = relock_page_lruvec_irq(page, lruvec);
4543 if (page_evictable(page) && PageUnevictable(page)) {
4544 del_page_from_lru_list(page, lruvec);
4545 ClearPageUnevictable(page);
4546 add_page_to_lru_list(page, lruvec);
4547 pgrescued += nr_pages;
4548 }
4549 SetPageLRU(page);
4550 }
4551
4552 if (lruvec) {
4553 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
4554 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
4555 unlock_page_lruvec_irq(lruvec);
4556 } else if (pgscanned) {
4557 count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
4558 }
4559}
4560EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
4561