1
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6#include <linux/sched/mm.h>
7#include <linux/sched/coredump.h>
8#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
19#include <linux/swapops.h>
20#include <linux/shmem_fs.h>
21
22#include <asm/tlb.h>
23#include <asm/pgalloc.h>
24#include "internal.h"
25
26enum scan_result {
27 SCAN_FAIL,
28 SCAN_SUCCEED,
29 SCAN_PMD_NULL,
30 SCAN_EXCEED_NONE_PTE,
31 SCAN_EXCEED_SWAP_PTE,
32 SCAN_EXCEED_SHARED_PTE,
33 SCAN_PTE_NON_PRESENT,
34 SCAN_PTE_UFFD_WP,
35 SCAN_PAGE_RO,
36 SCAN_LACK_REFERENCED_PAGE,
37 SCAN_PAGE_NULL,
38 SCAN_SCAN_ABORT,
39 SCAN_PAGE_COUNT,
40 SCAN_PAGE_LRU,
41 SCAN_PAGE_LOCK,
42 SCAN_PAGE_ANON,
43 SCAN_PAGE_COMPOUND,
44 SCAN_ANY_PROCESS,
45 SCAN_VMA_NULL,
46 SCAN_VMA_CHECK,
47 SCAN_ADDRESS_RANGE,
48 SCAN_SWAP_CACHE_PAGE,
49 SCAN_DEL_PAGE_LRU,
50 SCAN_ALLOC_HUGE_PAGE_FAIL,
51 SCAN_CGROUP_CHARGE_FAIL,
52 SCAN_TRUNCATED,
53 SCAN_PAGE_HAS_PRIVATE,
54};
55
56#define CREATE_TRACE_POINTS
57#include <trace/events/huge_memory.h>
58
59static struct task_struct *khugepaged_thread __read_mostly;
60static DEFINE_MUTEX(khugepaged_mutex);
61
62
63static unsigned int khugepaged_pages_to_scan __read_mostly;
64static unsigned int khugepaged_pages_collapsed;
65static unsigned int khugepaged_full_scans;
66static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67
68static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69static unsigned long khugepaged_sleep_expire;
70static DEFINE_SPINLOCK(khugepaged_mm_lock);
71static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
72
73
74
75
76
77static unsigned int khugepaged_max_ptes_none __read_mostly;
78static unsigned int khugepaged_max_ptes_swap __read_mostly;
79static unsigned int khugepaged_max_ptes_shared __read_mostly;
80
81#define MM_SLOTS_HASH_BITS 10
82static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
83
84static struct kmem_cache *mm_slot_cache __read_mostly;
85
86#define MAX_PTE_MAPPED_THP 8
87
88
89
90
91
92
93
94
95
96struct mm_slot {
97 struct hlist_node hash;
98 struct list_head mm_node;
99 struct mm_struct *mm;
100
101
102 int nr_pte_mapped_thp;
103 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
104};
105
106
107
108
109
110
111
112
113
114struct khugepaged_scan {
115 struct list_head mm_head;
116 struct mm_slot *mm_slot;
117 unsigned long address;
118};
119
120static struct khugepaged_scan khugepaged_scan = {
121 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
122};
123
124#ifdef CONFIG_SYSFS
125static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
126 struct kobj_attribute *attr,
127 char *buf)
128{
129 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
130}
131
132static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
133 struct kobj_attribute *attr,
134 const char *buf, size_t count)
135{
136 unsigned int msecs;
137 int err;
138
139 err = kstrtouint(buf, 10, &msecs);
140 if (err)
141 return -EINVAL;
142
143 khugepaged_scan_sleep_millisecs = msecs;
144 khugepaged_sleep_expire = 0;
145 wake_up_interruptible(&khugepaged_wait);
146
147 return count;
148}
149static struct kobj_attribute scan_sleep_millisecs_attr =
150 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
151 scan_sleep_millisecs_store);
152
153static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
154 struct kobj_attribute *attr,
155 char *buf)
156{
157 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
158}
159
160static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
161 struct kobj_attribute *attr,
162 const char *buf, size_t count)
163{
164 unsigned int msecs;
165 int err;
166
167 err = kstrtouint(buf, 10, &msecs);
168 if (err)
169 return -EINVAL;
170
171 khugepaged_alloc_sleep_millisecs = msecs;
172 khugepaged_sleep_expire = 0;
173 wake_up_interruptible(&khugepaged_wait);
174
175 return count;
176}
177static struct kobj_attribute alloc_sleep_millisecs_attr =
178 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
179 alloc_sleep_millisecs_store);
180
181static ssize_t pages_to_scan_show(struct kobject *kobj,
182 struct kobj_attribute *attr,
183 char *buf)
184{
185 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
186}
187static ssize_t pages_to_scan_store(struct kobject *kobj,
188 struct kobj_attribute *attr,
189 const char *buf, size_t count)
190{
191 unsigned int pages;
192 int err;
193
194 err = kstrtouint(buf, 10, &pages);
195 if (err || !pages)
196 return -EINVAL;
197
198 khugepaged_pages_to_scan = pages;
199
200 return count;
201}
202static struct kobj_attribute pages_to_scan_attr =
203 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
204 pages_to_scan_store);
205
206static ssize_t pages_collapsed_show(struct kobject *kobj,
207 struct kobj_attribute *attr,
208 char *buf)
209{
210 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
211}
212static struct kobj_attribute pages_collapsed_attr =
213 __ATTR_RO(pages_collapsed);
214
215static ssize_t full_scans_show(struct kobject *kobj,
216 struct kobj_attribute *attr,
217 char *buf)
218{
219 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
220}
221static struct kobj_attribute full_scans_attr =
222 __ATTR_RO(full_scans);
223
224static ssize_t khugepaged_defrag_show(struct kobject *kobj,
225 struct kobj_attribute *attr, char *buf)
226{
227 return single_hugepage_flag_show(kobj, attr, buf,
228 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
229}
230static ssize_t khugepaged_defrag_store(struct kobject *kobj,
231 struct kobj_attribute *attr,
232 const char *buf, size_t count)
233{
234 return single_hugepage_flag_store(kobj, attr, buf, count,
235 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
236}
237static struct kobj_attribute khugepaged_defrag_attr =
238 __ATTR(defrag, 0644, khugepaged_defrag_show,
239 khugepaged_defrag_store);
240
241
242
243
244
245
246
247
248
249static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
250 struct kobj_attribute *attr,
251 char *buf)
252{
253 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
254}
255static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
256 struct kobj_attribute *attr,
257 const char *buf, size_t count)
258{
259 int err;
260 unsigned long max_ptes_none;
261
262 err = kstrtoul(buf, 10, &max_ptes_none);
263 if (err || max_ptes_none > HPAGE_PMD_NR-1)
264 return -EINVAL;
265
266 khugepaged_max_ptes_none = max_ptes_none;
267
268 return count;
269}
270static struct kobj_attribute khugepaged_max_ptes_none_attr =
271 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
272 khugepaged_max_ptes_none_store);
273
274static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
275 struct kobj_attribute *attr,
276 char *buf)
277{
278 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
279}
280
281static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
282 struct kobj_attribute *attr,
283 const char *buf, size_t count)
284{
285 int err;
286 unsigned long max_ptes_swap;
287
288 err = kstrtoul(buf, 10, &max_ptes_swap);
289 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
290 return -EINVAL;
291
292 khugepaged_max_ptes_swap = max_ptes_swap;
293
294 return count;
295}
296
297static struct kobj_attribute khugepaged_max_ptes_swap_attr =
298 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
299 khugepaged_max_ptes_swap_store);
300
301static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
302 struct kobj_attribute *attr,
303 char *buf)
304{
305 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
306}
307
308static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
309 struct kobj_attribute *attr,
310 const char *buf, size_t count)
311{
312 int err;
313 unsigned long max_ptes_shared;
314
315 err = kstrtoul(buf, 10, &max_ptes_shared);
316 if (err || max_ptes_shared > HPAGE_PMD_NR-1)
317 return -EINVAL;
318
319 khugepaged_max_ptes_shared = max_ptes_shared;
320
321 return count;
322}
323
324static struct kobj_attribute khugepaged_max_ptes_shared_attr =
325 __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
326 khugepaged_max_ptes_shared_store);
327
328static struct attribute *khugepaged_attr[] = {
329 &khugepaged_defrag_attr.attr,
330 &khugepaged_max_ptes_none_attr.attr,
331 &khugepaged_max_ptes_swap_attr.attr,
332 &khugepaged_max_ptes_shared_attr.attr,
333 &pages_to_scan_attr.attr,
334 &pages_collapsed_attr.attr,
335 &full_scans_attr.attr,
336 &scan_sleep_millisecs_attr.attr,
337 &alloc_sleep_millisecs_attr.attr,
338 NULL,
339};
340
341struct attribute_group khugepaged_attr_group = {
342 .attrs = khugepaged_attr,
343 .name = "khugepaged",
344};
345#endif
346
347int hugepage_madvise(struct vm_area_struct *vma,
348 unsigned long *vm_flags, int advice)
349{
350 switch (advice) {
351 case MADV_HUGEPAGE:
352#ifdef CONFIG_S390
353
354
355
356
357
358 if (mm_has_pgste(vma->vm_mm))
359 return 0;
360#endif
361 *vm_flags &= ~VM_NOHUGEPAGE;
362 *vm_flags |= VM_HUGEPAGE;
363
364
365
366
367
368 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
369 khugepaged_enter_vma_merge(vma, *vm_flags))
370 return -ENOMEM;
371 break;
372 case MADV_NOHUGEPAGE:
373 *vm_flags &= ~VM_HUGEPAGE;
374 *vm_flags |= VM_NOHUGEPAGE;
375
376
377
378
379
380 break;
381 }
382
383 return 0;
384}
385
386int __init khugepaged_init(void)
387{
388 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
389 sizeof(struct mm_slot),
390 __alignof__(struct mm_slot), 0, NULL);
391 if (!mm_slot_cache)
392 return -ENOMEM;
393
394 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
395 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
396 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
397 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
398
399 return 0;
400}
401
402void __init khugepaged_destroy(void)
403{
404 kmem_cache_destroy(mm_slot_cache);
405}
406
407static inline struct mm_slot *alloc_mm_slot(void)
408{
409 if (!mm_slot_cache)
410 return NULL;
411 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
412}
413
414static inline void free_mm_slot(struct mm_slot *mm_slot)
415{
416 kmem_cache_free(mm_slot_cache, mm_slot);
417}
418
419static struct mm_slot *get_mm_slot(struct mm_struct *mm)
420{
421 struct mm_slot *mm_slot;
422
423 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
424 if (mm == mm_slot->mm)
425 return mm_slot;
426
427 return NULL;
428}
429
430static void insert_to_mm_slots_hash(struct mm_struct *mm,
431 struct mm_slot *mm_slot)
432{
433 mm_slot->mm = mm;
434 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
435}
436
437static inline int khugepaged_test_exit(struct mm_struct *mm)
438{
439 return atomic_read(&mm->mm_users) == 0;
440}
441
442static bool hugepage_vma_check(struct vm_area_struct *vma,
443 unsigned long vm_flags)
444{
445 if (!transhuge_vma_enabled(vma, vm_flags))
446 return false;
447
448
449 if (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) {
450 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
451 HPAGE_PMD_NR);
452 }
453
454
455 if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
456 return false;
457
458
459 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
460 !inode_is_open_for_write(vma->vm_file->f_inode) &&
461 (vm_flags & VM_EXEC)) {
462 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
463 HPAGE_PMD_NR);
464 }
465
466 if (!vma->anon_vma || vma->vm_ops)
467 return false;
468 if (vma_is_temporary_stack(vma))
469 return false;
470 return !(vm_flags & VM_NO_KHUGEPAGED);
471}
472
473int __khugepaged_enter(struct mm_struct *mm)
474{
475 struct mm_slot *mm_slot;
476 int wakeup;
477
478 mm_slot = alloc_mm_slot();
479 if (!mm_slot)
480 return -ENOMEM;
481
482
483 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
484 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
485 free_mm_slot(mm_slot);
486 return 0;
487 }
488
489 spin_lock(&khugepaged_mm_lock);
490 insert_to_mm_slots_hash(mm, mm_slot);
491
492
493
494
495 wakeup = list_empty(&khugepaged_scan.mm_head);
496 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
497 spin_unlock(&khugepaged_mm_lock);
498
499 mmgrab(mm);
500 if (wakeup)
501 wake_up_interruptible(&khugepaged_wait);
502
503 return 0;
504}
505
506int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
507 unsigned long vm_flags)
508{
509 unsigned long hstart, hend;
510
511
512
513
514
515
516 if (!hugepage_vma_check(vma, vm_flags))
517 return 0;
518
519 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
520 hend = vma->vm_end & HPAGE_PMD_MASK;
521 if (hstart < hend)
522 return khugepaged_enter(vma, vm_flags);
523 return 0;
524}
525
526void __khugepaged_exit(struct mm_struct *mm)
527{
528 struct mm_slot *mm_slot;
529 int free = 0;
530
531 spin_lock(&khugepaged_mm_lock);
532 mm_slot = get_mm_slot(mm);
533 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
534 hash_del(&mm_slot->hash);
535 list_del(&mm_slot->mm_node);
536 free = 1;
537 }
538 spin_unlock(&khugepaged_mm_lock);
539
540 if (free) {
541 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
542 free_mm_slot(mm_slot);
543 mmdrop(mm);
544 } else if (mm_slot) {
545
546
547
548
549
550
551
552
553 mmap_write_lock(mm);
554 mmap_write_unlock(mm);
555 }
556}
557
558static void release_pte_page(struct page *page)
559{
560 mod_node_page_state(page_pgdat(page),
561 NR_ISOLATED_ANON + page_is_file_lru(page),
562 -compound_nr(page));
563 unlock_page(page);
564 putback_lru_page(page);
565}
566
567static void release_pte_pages(pte_t *pte, pte_t *_pte,
568 struct list_head *compound_pagelist)
569{
570 struct page *page, *tmp;
571
572 while (--_pte >= pte) {
573 pte_t pteval = *_pte;
574
575 page = pte_page(pteval);
576 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
577 !PageCompound(page))
578 release_pte_page(page);
579 }
580
581 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
582 list_del(&page->lru);
583 release_pte_page(page);
584 }
585}
586
587static bool is_refcount_suitable(struct page *page)
588{
589 int expected_refcount;
590
591 expected_refcount = total_mapcount(page);
592 if (PageSwapCache(page))
593 expected_refcount += compound_nr(page);
594
595 return page_count(page) == expected_refcount;
596}
597
598static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
599 unsigned long address,
600 pte_t *pte,
601 struct list_head *compound_pagelist)
602{
603 struct page *page = NULL;
604 pte_t *_pte;
605 int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
606 bool writable = false;
607
608 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
609 _pte++, address += PAGE_SIZE) {
610 pte_t pteval = *_pte;
611 if (pte_none(pteval) || (pte_present(pteval) &&
612 is_zero_pfn(pte_pfn(pteval)))) {
613 if (!userfaultfd_armed(vma) &&
614 ++none_or_zero <= khugepaged_max_ptes_none) {
615 continue;
616 } else {
617 result = SCAN_EXCEED_NONE_PTE;
618 goto out;
619 }
620 }
621 if (!pte_present(pteval)) {
622 result = SCAN_PTE_NON_PRESENT;
623 goto out;
624 }
625 page = vm_normal_page(vma, address, pteval);
626 if (unlikely(!page)) {
627 result = SCAN_PAGE_NULL;
628 goto out;
629 }
630
631 VM_BUG_ON_PAGE(!PageAnon(page), page);
632
633 if (page_mapcount(page) > 1 &&
634 ++shared > khugepaged_max_ptes_shared) {
635 result = SCAN_EXCEED_SHARED_PTE;
636 goto out;
637 }
638
639 if (PageCompound(page)) {
640 struct page *p;
641 page = compound_head(page);
642
643
644
645
646
647 list_for_each_entry(p, compound_pagelist, lru) {
648 if (page == p)
649 goto next;
650 }
651 }
652
653
654
655
656
657
658
659 if (!trylock_page(page)) {
660 result = SCAN_PAGE_LOCK;
661 goto out;
662 }
663
664
665
666
667
668
669
670
671
672
673
674
675 if (!is_refcount_suitable(page)) {
676 unlock_page(page);
677 result = SCAN_PAGE_COUNT;
678 goto out;
679 }
680 if (!pte_write(pteval) && PageSwapCache(page) &&
681 !reuse_swap_page(page, NULL)) {
682
683
684
685
686 unlock_page(page);
687 result = SCAN_SWAP_CACHE_PAGE;
688 goto out;
689 }
690
691
692
693
694
695 if (isolate_lru_page(page)) {
696 unlock_page(page);
697 result = SCAN_DEL_PAGE_LRU;
698 goto out;
699 }
700 mod_node_page_state(page_pgdat(page),
701 NR_ISOLATED_ANON + page_is_file_lru(page),
702 compound_nr(page));
703 VM_BUG_ON_PAGE(!PageLocked(page), page);
704 VM_BUG_ON_PAGE(PageLRU(page), page);
705
706 if (PageCompound(page))
707 list_add_tail(&page->lru, compound_pagelist);
708next:
709
710 if (pte_young(pteval) ||
711 page_is_young(page) || PageReferenced(page) ||
712 mmu_notifier_test_young(vma->vm_mm, address))
713 referenced++;
714
715 if (pte_write(pteval))
716 writable = true;
717 }
718
719 if (unlikely(!writable)) {
720 result = SCAN_PAGE_RO;
721 } else if (unlikely(!referenced)) {
722 result = SCAN_LACK_REFERENCED_PAGE;
723 } else {
724 result = SCAN_SUCCEED;
725 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
726 referenced, writable, result);
727 return 1;
728 }
729out:
730 release_pte_pages(pte, _pte, compound_pagelist);
731 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
732 referenced, writable, result);
733 return 0;
734}
735
736static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
737 struct vm_area_struct *vma,
738 unsigned long address,
739 spinlock_t *ptl,
740 struct list_head *compound_pagelist)
741{
742 struct page *src_page, *tmp;
743 pte_t *_pte;
744 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
745 _pte++, page++, address += PAGE_SIZE) {
746 pte_t pteval = *_pte;
747
748 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
749 clear_user_highpage(page, address);
750 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
751 if (is_zero_pfn(pte_pfn(pteval))) {
752
753
754
755 spin_lock(ptl);
756
757
758
759
760 pte_clear(vma->vm_mm, address, _pte);
761 spin_unlock(ptl);
762 }
763 } else {
764 src_page = pte_page(pteval);
765 copy_user_highpage(page, src_page, address, vma);
766 if (!PageCompound(src_page))
767 release_pte_page(src_page);
768
769
770
771
772
773 spin_lock(ptl);
774
775
776
777
778 pte_clear(vma->vm_mm, address, _pte);
779 page_remove_rmap(src_page, false);
780 spin_unlock(ptl);
781 free_page_and_swap_cache(src_page);
782 }
783 }
784
785 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
786 list_del(&src_page->lru);
787 release_pte_page(src_page);
788 }
789}
790
791static void khugepaged_alloc_sleep(void)
792{
793 DEFINE_WAIT(wait);
794
795 add_wait_queue(&khugepaged_wait, &wait);
796 freezable_schedule_timeout_interruptible(
797 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
798 remove_wait_queue(&khugepaged_wait, &wait);
799}
800
801static int khugepaged_node_load[MAX_NUMNODES];
802
803static bool khugepaged_scan_abort(int nid)
804{
805 int i;
806
807
808
809
810
811 if (!node_reclaim_enabled())
812 return false;
813
814
815 if (khugepaged_node_load[nid])
816 return false;
817
818 for (i = 0; i < MAX_NUMNODES; i++) {
819 if (!khugepaged_node_load[i])
820 continue;
821 if (node_distance(nid, i) > node_reclaim_distance)
822 return true;
823 }
824 return false;
825}
826
827
828static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
829{
830 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
831}
832
833#ifdef CONFIG_NUMA
834static int khugepaged_find_target_node(void)
835{
836 static int last_khugepaged_target_node = NUMA_NO_NODE;
837 int nid, target_node = 0, max_value = 0;
838
839
840 for (nid = 0; nid < MAX_NUMNODES; nid++)
841 if (khugepaged_node_load[nid] > max_value) {
842 max_value = khugepaged_node_load[nid];
843 target_node = nid;
844 }
845
846
847 if (target_node <= last_khugepaged_target_node)
848 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
849 nid++)
850 if (max_value == khugepaged_node_load[nid]) {
851 target_node = nid;
852 break;
853 }
854
855 last_khugepaged_target_node = target_node;
856 return target_node;
857}
858
859static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
860{
861 if (IS_ERR(*hpage)) {
862 if (!*wait)
863 return false;
864
865 *wait = false;
866 *hpage = NULL;
867 khugepaged_alloc_sleep();
868 } else if (*hpage) {
869 put_page(*hpage);
870 *hpage = NULL;
871 }
872
873 return true;
874}
875
876static struct page *
877khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
878{
879 VM_BUG_ON_PAGE(*hpage, *hpage);
880
881 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
882 if (unlikely(!*hpage)) {
883 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
884 *hpage = ERR_PTR(-ENOMEM);
885 return NULL;
886 }
887
888 prep_transhuge_page(*hpage);
889 count_vm_event(THP_COLLAPSE_ALLOC);
890 return *hpage;
891}
892#else
893static int khugepaged_find_target_node(void)
894{
895 return 0;
896}
897
898static inline struct page *alloc_khugepaged_hugepage(void)
899{
900 struct page *page;
901
902 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
903 HPAGE_PMD_ORDER);
904 if (page)
905 prep_transhuge_page(page);
906 return page;
907}
908
909static struct page *khugepaged_alloc_hugepage(bool *wait)
910{
911 struct page *hpage;
912
913 do {
914 hpage = alloc_khugepaged_hugepage();
915 if (!hpage) {
916 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
917 if (!*wait)
918 return NULL;
919
920 *wait = false;
921 khugepaged_alloc_sleep();
922 } else
923 count_vm_event(THP_COLLAPSE_ALLOC);
924 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
925
926 return hpage;
927}
928
929static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
930{
931
932
933
934
935
936
937
938 if (*hpage && page_count(*hpage) > 1) {
939 put_page(*hpage);
940 *hpage = NULL;
941 }
942
943 if (!*hpage)
944 *hpage = khugepaged_alloc_hugepage(wait);
945
946 if (unlikely(!*hpage))
947 return false;
948
949 return true;
950}
951
952static struct page *
953khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
954{
955 VM_BUG_ON(!*hpage);
956
957 return *hpage;
958}
959#endif
960
961
962
963
964
965
966
967
968static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
969 struct vm_area_struct **vmap)
970{
971 struct vm_area_struct *vma;
972 unsigned long hstart, hend;
973
974 if (unlikely(khugepaged_test_exit(mm)))
975 return SCAN_ANY_PROCESS;
976
977 *vmap = vma = find_vma(mm, address);
978 if (!vma)
979 return SCAN_VMA_NULL;
980
981 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
982 hend = vma->vm_end & HPAGE_PMD_MASK;
983 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
984 return SCAN_ADDRESS_RANGE;
985 if (!hugepage_vma_check(vma, vma->vm_flags))
986 return SCAN_VMA_CHECK;
987
988 if (!vma->anon_vma || vma->vm_ops)
989 return SCAN_VMA_CHECK;
990 return 0;
991}
992
993
994
995
996
997
998
999
1000
1001static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1002 struct vm_area_struct *vma,
1003 unsigned long haddr, pmd_t *pmd,
1004 int referenced)
1005{
1006 int swapped_in = 0;
1007 vm_fault_t ret = 0;
1008 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
1009
1010 for (address = haddr; address < end; address += PAGE_SIZE) {
1011 struct vm_fault vmf = {
1012 .vma = vma,
1013 .address = address,
1014 .pgoff = linear_page_index(vma, haddr),
1015 .flags = FAULT_FLAG_ALLOW_RETRY,
1016 .pmd = pmd,
1017 };
1018
1019 vmf.pte = pte_offset_map(pmd, address);
1020 vmf.orig_pte = *vmf.pte;
1021 if (!is_swap_pte(vmf.orig_pte)) {
1022 pte_unmap(vmf.pte);
1023 continue;
1024 }
1025 swapped_in++;
1026 ret = do_swap_page(&vmf);
1027
1028
1029 if (ret & VM_FAULT_RETRY) {
1030 mmap_read_lock(mm);
1031 if (hugepage_vma_revalidate(mm, haddr, &vma)) {
1032
1033 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1034 return false;
1035 }
1036
1037 if (mm_find_pmd(mm, haddr) != pmd) {
1038 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1039 return false;
1040 }
1041 }
1042 if (ret & VM_FAULT_ERROR) {
1043 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1044 return false;
1045 }
1046 }
1047
1048
1049 if (swapped_in)
1050 lru_add_drain();
1051
1052 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
1053 return true;
1054}
1055
1056static void collapse_huge_page(struct mm_struct *mm,
1057 unsigned long address,
1058 struct page **hpage,
1059 int node, int referenced, int unmapped)
1060{
1061 LIST_HEAD(compound_pagelist);
1062 pmd_t *pmd, _pmd;
1063 pte_t *pte;
1064 pgtable_t pgtable;
1065 struct page *new_page;
1066 spinlock_t *pmd_ptl, *pte_ptl;
1067 int isolated = 0, result = 0;
1068 struct vm_area_struct *vma;
1069 struct mmu_notifier_range range;
1070 gfp_t gfp;
1071
1072 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1073
1074
1075 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1076
1077
1078
1079
1080
1081
1082
1083 mmap_read_unlock(mm);
1084 new_page = khugepaged_alloc_page(hpage, gfp, node);
1085 if (!new_page) {
1086 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1087 goto out_nolock;
1088 }
1089
1090 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1091 result = SCAN_CGROUP_CHARGE_FAIL;
1092 goto out_nolock;
1093 }
1094 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1095
1096 mmap_read_lock(mm);
1097 result = hugepage_vma_revalidate(mm, address, &vma);
1098 if (result) {
1099 mmap_read_unlock(mm);
1100 goto out_nolock;
1101 }
1102
1103 pmd = mm_find_pmd(mm, address);
1104 if (!pmd) {
1105 result = SCAN_PMD_NULL;
1106 mmap_read_unlock(mm);
1107 goto out_nolock;
1108 }
1109
1110
1111
1112
1113
1114
1115 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1116 pmd, referenced)) {
1117 mmap_read_unlock(mm);
1118 goto out_nolock;
1119 }
1120
1121 mmap_read_unlock(mm);
1122
1123
1124
1125
1126
1127 mmap_write_lock(mm);
1128 result = hugepage_vma_revalidate(mm, address, &vma);
1129 if (result)
1130 goto out_up_write;
1131
1132 if (mm_find_pmd(mm, address) != pmd)
1133 goto out_up_write;
1134
1135 anon_vma_lock_write(vma->anon_vma);
1136
1137 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1138 address, address + HPAGE_PMD_SIZE);
1139 mmu_notifier_invalidate_range_start(&range);
1140
1141 pte = pte_offset_map(pmd, address);
1142 pte_ptl = pte_lockptr(mm, pmd);
1143
1144 pmd_ptl = pmd_lock(mm, pmd);
1145
1146
1147
1148
1149
1150
1151 _pmd = pmdp_collapse_flush(vma, address, pmd);
1152 spin_unlock(pmd_ptl);
1153 mmu_notifier_invalidate_range_end(&range);
1154
1155 spin_lock(pte_ptl);
1156 isolated = __collapse_huge_page_isolate(vma, address, pte,
1157 &compound_pagelist);
1158 spin_unlock(pte_ptl);
1159
1160 if (unlikely(!isolated)) {
1161 pte_unmap(pte);
1162 spin_lock(pmd_ptl);
1163 BUG_ON(!pmd_none(*pmd));
1164
1165
1166
1167
1168
1169 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1170 spin_unlock(pmd_ptl);
1171 anon_vma_unlock_write(vma->anon_vma);
1172 result = SCAN_FAIL;
1173 goto out_up_write;
1174 }
1175
1176
1177
1178
1179
1180 anon_vma_unlock_write(vma->anon_vma);
1181
1182 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1183 &compound_pagelist);
1184 pte_unmap(pte);
1185
1186
1187
1188
1189
1190
1191 __SetPageUptodate(new_page);
1192 pgtable = pmd_pgtable(_pmd);
1193
1194 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1195 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1196
1197 spin_lock(pmd_ptl);
1198 BUG_ON(!pmd_none(*pmd));
1199 page_add_new_anon_rmap(new_page, vma, address, true);
1200 lru_cache_add_inactive_or_unevictable(new_page, vma);
1201 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1202 set_pmd_at(mm, address, pmd, _pmd);
1203 update_mmu_cache_pmd(vma, address, pmd);
1204 spin_unlock(pmd_ptl);
1205
1206 *hpage = NULL;
1207
1208 khugepaged_pages_collapsed++;
1209 result = SCAN_SUCCEED;
1210out_up_write:
1211 mmap_write_unlock(mm);
1212out_nolock:
1213 if (!IS_ERR_OR_NULL(*hpage))
1214 mem_cgroup_uncharge(*hpage);
1215 trace_mm_collapse_huge_page(mm, isolated, result);
1216 return;
1217}
1218
1219static int khugepaged_scan_pmd(struct mm_struct *mm,
1220 struct vm_area_struct *vma,
1221 unsigned long address,
1222 struct page **hpage)
1223{
1224 pmd_t *pmd;
1225 pte_t *pte, *_pte;
1226 int ret = 0, result = 0, referenced = 0;
1227 int none_or_zero = 0, shared = 0;
1228 struct page *page = NULL;
1229 unsigned long _address;
1230 spinlock_t *ptl;
1231 int node = NUMA_NO_NODE, unmapped = 0;
1232 bool writable = false;
1233
1234 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1235
1236 pmd = mm_find_pmd(mm, address);
1237 if (!pmd) {
1238 result = SCAN_PMD_NULL;
1239 goto out;
1240 }
1241
1242 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1243 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1244 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1245 _pte++, _address += PAGE_SIZE) {
1246 pte_t pteval = *_pte;
1247 if (is_swap_pte(pteval)) {
1248 if (++unmapped <= khugepaged_max_ptes_swap) {
1249
1250
1251
1252
1253
1254 if (pte_swp_uffd_wp(pteval)) {
1255 result = SCAN_PTE_UFFD_WP;
1256 goto out_unmap;
1257 }
1258 continue;
1259 } else {
1260 result = SCAN_EXCEED_SWAP_PTE;
1261 goto out_unmap;
1262 }
1263 }
1264 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1265 if (!userfaultfd_armed(vma) &&
1266 ++none_or_zero <= khugepaged_max_ptes_none) {
1267 continue;
1268 } else {
1269 result = SCAN_EXCEED_NONE_PTE;
1270 goto out_unmap;
1271 }
1272 }
1273 if (pte_uffd_wp(pteval)) {
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283 result = SCAN_PTE_UFFD_WP;
1284 goto out_unmap;
1285 }
1286 if (pte_write(pteval))
1287 writable = true;
1288
1289 page = vm_normal_page(vma, _address, pteval);
1290 if (unlikely(!page)) {
1291 result = SCAN_PAGE_NULL;
1292 goto out_unmap;
1293 }
1294
1295 if (page_mapcount(page) > 1 &&
1296 ++shared > khugepaged_max_ptes_shared) {
1297 result = SCAN_EXCEED_SHARED_PTE;
1298 goto out_unmap;
1299 }
1300
1301 page = compound_head(page);
1302
1303
1304
1305
1306
1307
1308
1309 node = page_to_nid(page);
1310 if (khugepaged_scan_abort(node)) {
1311 result = SCAN_SCAN_ABORT;
1312 goto out_unmap;
1313 }
1314 khugepaged_node_load[node]++;
1315 if (!PageLRU(page)) {
1316 result = SCAN_PAGE_LRU;
1317 goto out_unmap;
1318 }
1319 if (PageLocked(page)) {
1320 result = SCAN_PAGE_LOCK;
1321 goto out_unmap;
1322 }
1323 if (!PageAnon(page)) {
1324 result = SCAN_PAGE_ANON;
1325 goto out_unmap;
1326 }
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345 if (!is_refcount_suitable(page)) {
1346 result = SCAN_PAGE_COUNT;
1347 goto out_unmap;
1348 }
1349 if (pte_young(pteval) ||
1350 page_is_young(page) || PageReferenced(page) ||
1351 mmu_notifier_test_young(vma->vm_mm, address))
1352 referenced++;
1353 }
1354 if (!writable) {
1355 result = SCAN_PAGE_RO;
1356 } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1357 result = SCAN_LACK_REFERENCED_PAGE;
1358 } else {
1359 result = SCAN_SUCCEED;
1360 ret = 1;
1361 }
1362out_unmap:
1363 pte_unmap_unlock(pte, ptl);
1364 if (ret) {
1365 node = khugepaged_find_target_node();
1366
1367 collapse_huge_page(mm, address, hpage, node,
1368 referenced, unmapped);
1369 }
1370out:
1371 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1372 none_or_zero, result, unmapped);
1373 return ret;
1374}
1375
1376static void collect_mm_slot(struct mm_slot *mm_slot)
1377{
1378 struct mm_struct *mm = mm_slot->mm;
1379
1380 lockdep_assert_held(&khugepaged_mm_lock);
1381
1382 if (khugepaged_test_exit(mm)) {
1383
1384 hash_del(&mm_slot->hash);
1385 list_del(&mm_slot->mm_node);
1386
1387
1388
1389
1390
1391
1392
1393
1394 free_mm_slot(mm_slot);
1395 mmdrop(mm);
1396 }
1397}
1398
1399#ifdef CONFIG_SHMEM
1400
1401
1402
1403
1404static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1405 unsigned long addr)
1406{
1407 struct mm_slot *mm_slot;
1408
1409 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1410
1411 spin_lock(&khugepaged_mm_lock);
1412 mm_slot = get_mm_slot(mm);
1413 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1414 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1415 spin_unlock(&khugepaged_mm_lock);
1416 return 0;
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1431{
1432 unsigned long haddr = addr & HPAGE_PMD_MASK;
1433 struct vm_area_struct *vma = find_vma(mm, haddr);
1434 struct page *hpage;
1435 pte_t *start_pte, *pte;
1436 pmd_t *pmd, _pmd;
1437 spinlock_t *ptl;
1438 int count = 0;
1439 int i;
1440
1441 if (!vma || !vma->vm_file ||
1442 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1443 return;
1444
1445
1446
1447
1448
1449
1450
1451 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1452 return;
1453
1454 hpage = find_lock_page(vma->vm_file->f_mapping,
1455 linear_page_index(vma, haddr));
1456 if (!hpage)
1457 return;
1458
1459 if (!PageHead(hpage))
1460 goto drop_hpage;
1461
1462 pmd = mm_find_pmd(mm, haddr);
1463 if (!pmd)
1464 goto drop_hpage;
1465
1466 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1467
1468
1469 for (i = 0, addr = haddr, pte = start_pte;
1470 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1471 struct page *page;
1472
1473
1474 if (pte_none(*pte))
1475 continue;
1476
1477
1478 if (!pte_present(*pte))
1479 goto abort;
1480
1481 page = vm_normal_page(vma, addr, *pte);
1482
1483
1484
1485
1486
1487 if (hpage + i != page)
1488 goto abort;
1489 count++;
1490 }
1491
1492
1493 for (i = 0, addr = haddr, pte = start_pte;
1494 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1495 struct page *page;
1496
1497 if (pte_none(*pte))
1498 continue;
1499 page = vm_normal_page(vma, addr, *pte);
1500 page_remove_rmap(page, false);
1501 }
1502
1503 pte_unmap_unlock(start_pte, ptl);
1504
1505
1506 if (count) {
1507 page_ref_sub(hpage, count);
1508 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1509 }
1510
1511
1512 ptl = pmd_lock(vma->vm_mm, pmd);
1513 _pmd = pmdp_collapse_flush(vma, haddr, pmd);
1514 spin_unlock(ptl);
1515 mm_dec_nr_ptes(mm);
1516 pte_free(mm, pmd_pgtable(_pmd));
1517
1518drop_hpage:
1519 unlock_page(hpage);
1520 put_page(hpage);
1521 return;
1522
1523abort:
1524 pte_unmap_unlock(start_pte, ptl);
1525 goto drop_hpage;
1526}
1527
1528static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1529{
1530 struct mm_struct *mm = mm_slot->mm;
1531 int i;
1532
1533 if (likely(mm_slot->nr_pte_mapped_thp == 0))
1534 return;
1535
1536 if (!mmap_write_trylock(mm))
1537 return;
1538
1539 if (unlikely(khugepaged_test_exit(mm)))
1540 goto out;
1541
1542 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1543 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1544
1545out:
1546 mm_slot->nr_pte_mapped_thp = 0;
1547 mmap_write_unlock(mm);
1548}
1549
1550static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1551{
1552 struct vm_area_struct *vma;
1553 struct mm_struct *mm;
1554 unsigned long addr;
1555 pmd_t *pmd, _pmd;
1556
1557 i_mmap_lock_write(mapping);
1558 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575 if (vma->anon_vma)
1576 continue;
1577 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1578 if (addr & ~HPAGE_PMD_MASK)
1579 continue;
1580 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1581 continue;
1582 mm = vma->vm_mm;
1583 pmd = mm_find_pmd(mm, addr);
1584 if (!pmd)
1585 continue;
1586
1587
1588
1589
1590
1591
1592
1593 if (mmap_write_trylock(mm)) {
1594 if (!khugepaged_test_exit(mm)) {
1595 spinlock_t *ptl = pmd_lock(mm, pmd);
1596
1597 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1598 spin_unlock(ptl);
1599 mm_dec_nr_ptes(mm);
1600 pte_free(mm, pmd_pgtable(_pmd));
1601 }
1602 mmap_write_unlock(mm);
1603 } else {
1604
1605 khugepaged_add_pte_mapped_thp(mm, addr);
1606 }
1607 }
1608 i_mmap_unlock_write(mapping);
1609}
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635static void collapse_file(struct mm_struct *mm,
1636 struct file *file, pgoff_t start,
1637 struct page **hpage, int node)
1638{
1639 struct address_space *mapping = file->f_mapping;
1640 gfp_t gfp;
1641 struct page *new_page;
1642 pgoff_t index, end = start + HPAGE_PMD_NR;
1643 LIST_HEAD(pagelist);
1644 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1645 int nr_none = 0, result = SCAN_SUCCEED;
1646 bool is_shmem = shmem_file(file);
1647 int nr;
1648
1649 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1650 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1651
1652
1653 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1654
1655 new_page = khugepaged_alloc_page(hpage, gfp, node);
1656 if (!new_page) {
1657 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1658 goto out;
1659 }
1660
1661 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1662 result = SCAN_CGROUP_CHARGE_FAIL;
1663 goto out;
1664 }
1665 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1666
1667
1668 do {
1669 xas_lock_irq(&xas);
1670 xas_create_range(&xas);
1671 if (!xas_error(&xas))
1672 break;
1673 xas_unlock_irq(&xas);
1674 if (!xas_nomem(&xas, GFP_KERNEL)) {
1675 result = SCAN_FAIL;
1676 goto out;
1677 }
1678 } while (1);
1679
1680 __SetPageLocked(new_page);
1681 if (is_shmem)
1682 __SetPageSwapBacked(new_page);
1683 new_page->index = start;
1684 new_page->mapping = mapping;
1685
1686
1687
1688
1689
1690
1691
1692 xas_set(&xas, start);
1693 for (index = start; index < end; index++) {
1694 struct page *page = xas_next(&xas);
1695
1696 VM_BUG_ON(index != xas.xa_index);
1697 if (is_shmem) {
1698 if (!page) {
1699
1700
1701
1702
1703
1704 if (index == start) {
1705 if (!xas_next_entry(&xas, end - 1)) {
1706 result = SCAN_TRUNCATED;
1707 goto xa_locked;
1708 }
1709 xas_set(&xas, index);
1710 }
1711 if (!shmem_charge(mapping->host, 1)) {
1712 result = SCAN_FAIL;
1713 goto xa_locked;
1714 }
1715 xas_store(&xas, new_page);
1716 nr_none++;
1717 continue;
1718 }
1719
1720 if (xa_is_value(page) || !PageUptodate(page)) {
1721 xas_unlock_irq(&xas);
1722
1723 if (shmem_getpage(mapping->host, index, &page,
1724 SGP_NOHUGE)) {
1725 result = SCAN_FAIL;
1726 goto xa_unlocked;
1727 }
1728 } else if (trylock_page(page)) {
1729 get_page(page);
1730 xas_unlock_irq(&xas);
1731 } else {
1732 result = SCAN_PAGE_LOCK;
1733 goto xa_locked;
1734 }
1735 } else {
1736 if (!page || xa_is_value(page)) {
1737 xas_unlock_irq(&xas);
1738 page_cache_sync_readahead(mapping, &file->f_ra,
1739 file, index,
1740 end - index);
1741
1742 lru_add_drain();
1743 page = find_lock_page(mapping, index);
1744 if (unlikely(page == NULL)) {
1745 result = SCAN_FAIL;
1746 goto xa_unlocked;
1747 }
1748 } else if (PageDirty(page)) {
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762 xas_unlock_irq(&xas);
1763 filemap_flush(mapping);
1764 result = SCAN_FAIL;
1765 goto xa_unlocked;
1766 } else if (trylock_page(page)) {
1767 get_page(page);
1768 xas_unlock_irq(&xas);
1769 } else {
1770 result = SCAN_PAGE_LOCK;
1771 goto xa_locked;
1772 }
1773 }
1774
1775
1776
1777
1778
1779 VM_BUG_ON_PAGE(!PageLocked(page), page);
1780
1781
1782 if (unlikely(!PageUptodate(page))) {
1783 result = SCAN_FAIL;
1784 goto out_unlock;
1785 }
1786
1787
1788
1789
1790
1791 if (PageTransCompound(page)) {
1792 result = SCAN_PAGE_COMPOUND;
1793 goto out_unlock;
1794 }
1795
1796 if (page_mapping(page) != mapping) {
1797 result = SCAN_TRUNCATED;
1798 goto out_unlock;
1799 }
1800
1801 if (!is_shmem && PageDirty(page)) {
1802
1803
1804
1805
1806
1807 result = SCAN_FAIL;
1808 goto out_unlock;
1809 }
1810
1811 if (isolate_lru_page(page)) {
1812 result = SCAN_DEL_PAGE_LRU;
1813 goto out_unlock;
1814 }
1815
1816 if (page_has_private(page) &&
1817 !try_to_release_page(page, GFP_KERNEL)) {
1818 result = SCAN_PAGE_HAS_PRIVATE;
1819 putback_lru_page(page);
1820 goto out_unlock;
1821 }
1822
1823 if (page_mapped(page))
1824 unmap_mapping_pages(mapping, index, 1, false);
1825
1826 xas_lock_irq(&xas);
1827 xas_set(&xas, index);
1828
1829 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1830 VM_BUG_ON_PAGE(page_mapped(page), page);
1831
1832
1833
1834
1835
1836
1837
1838 if (!page_ref_freeze(page, 3)) {
1839 result = SCAN_PAGE_COUNT;
1840 xas_unlock_irq(&xas);
1841 putback_lru_page(page);
1842 goto out_unlock;
1843 }
1844
1845
1846
1847
1848
1849 list_add_tail(&page->lru, &pagelist);
1850
1851
1852 xas_store(&xas, new_page);
1853 continue;
1854out_unlock:
1855 unlock_page(page);
1856 put_page(page);
1857 goto xa_unlocked;
1858 }
1859 nr = thp_nr_pages(new_page);
1860
1861 if (is_shmem)
1862 __mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr);
1863 else {
1864 __mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
1865 filemap_nr_thps_inc(mapping);
1866
1867
1868
1869
1870
1871
1872 smp_mb();
1873 if (inode_is_open_for_write(mapping->host)) {
1874 result = SCAN_FAIL;
1875 __mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr);
1876 filemap_nr_thps_dec(mapping);
1877 goto xa_locked;
1878 }
1879 }
1880
1881 if (nr_none) {
1882 __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
1883 if (is_shmem)
1884 __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
1885 }
1886
1887xa_locked:
1888 xas_unlock_irq(&xas);
1889xa_unlocked:
1890
1891 if (result == SCAN_SUCCEED) {
1892 struct page *page, *tmp;
1893
1894
1895
1896
1897
1898 index = start;
1899 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1900 while (index < page->index) {
1901 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1902 index++;
1903 }
1904 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1905 page);
1906 list_del(&page->lru);
1907 page->mapping = NULL;
1908 page_ref_unfreeze(page, 1);
1909 ClearPageActive(page);
1910 ClearPageUnevictable(page);
1911 unlock_page(page);
1912 put_page(page);
1913 index++;
1914 }
1915 while (index < end) {
1916 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1917 index++;
1918 }
1919
1920 SetPageUptodate(new_page);
1921 page_ref_add(new_page, HPAGE_PMD_NR - 1);
1922 if (is_shmem)
1923 set_page_dirty(new_page);
1924 lru_cache_add(new_page);
1925
1926
1927
1928
1929 retract_page_tables(mapping, start);
1930 *hpage = NULL;
1931
1932 khugepaged_pages_collapsed++;
1933 } else {
1934 struct page *page;
1935
1936
1937 xas_lock_irq(&xas);
1938 mapping->nrpages -= nr_none;
1939
1940 if (is_shmem)
1941 shmem_uncharge(mapping->host, nr_none);
1942
1943 xas_set(&xas, start);
1944 xas_for_each(&xas, page, end - 1) {
1945 page = list_first_entry_or_null(&pagelist,
1946 struct page, lru);
1947 if (!page || xas.xa_index < page->index) {
1948 if (!nr_none)
1949 break;
1950 nr_none--;
1951
1952 xas_store(&xas, NULL);
1953 continue;
1954 }
1955
1956 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
1957
1958
1959 list_del(&page->lru);
1960 page_ref_unfreeze(page, 2);
1961 xas_store(&xas, page);
1962 xas_pause(&xas);
1963 xas_unlock_irq(&xas);
1964 unlock_page(page);
1965 putback_lru_page(page);
1966 xas_lock_irq(&xas);
1967 }
1968 VM_BUG_ON(nr_none);
1969 xas_unlock_irq(&xas);
1970
1971 new_page->mapping = NULL;
1972 }
1973
1974 unlock_page(new_page);
1975out:
1976 VM_BUG_ON(!list_empty(&pagelist));
1977 if (!IS_ERR_OR_NULL(*hpage))
1978 mem_cgroup_uncharge(*hpage);
1979
1980}
1981
1982static void khugepaged_scan_file(struct mm_struct *mm,
1983 struct file *file, pgoff_t start, struct page **hpage)
1984{
1985 struct page *page = NULL;
1986 struct address_space *mapping = file->f_mapping;
1987 XA_STATE(xas, &mapping->i_pages, start);
1988 int present, swap;
1989 int node = NUMA_NO_NODE;
1990 int result = SCAN_SUCCEED;
1991
1992 present = 0;
1993 swap = 0;
1994 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1995 rcu_read_lock();
1996 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1997 if (xas_retry(&xas, page))
1998 continue;
1999
2000 if (xa_is_value(page)) {
2001 if (++swap > khugepaged_max_ptes_swap) {
2002 result = SCAN_EXCEED_SWAP_PTE;
2003 break;
2004 }
2005 continue;
2006 }
2007
2008 if (PageTransCompound(page)) {
2009 result = SCAN_PAGE_COMPOUND;
2010 break;
2011 }
2012
2013 node = page_to_nid(page);
2014 if (khugepaged_scan_abort(node)) {
2015 result = SCAN_SCAN_ABORT;
2016 break;
2017 }
2018 khugepaged_node_load[node]++;
2019
2020 if (!PageLRU(page)) {
2021 result = SCAN_PAGE_LRU;
2022 break;
2023 }
2024
2025 if (page_count(page) !=
2026 1 + page_mapcount(page) + page_has_private(page)) {
2027 result = SCAN_PAGE_COUNT;
2028 break;
2029 }
2030
2031
2032
2033
2034
2035
2036
2037 present++;
2038
2039 if (need_resched()) {
2040 xas_pause(&xas);
2041 cond_resched_rcu();
2042 }
2043 }
2044 rcu_read_unlock();
2045
2046 if (result == SCAN_SUCCEED) {
2047 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2048 result = SCAN_EXCEED_NONE_PTE;
2049 } else {
2050 node = khugepaged_find_target_node();
2051 collapse_file(mm, file, start, hpage, node);
2052 }
2053 }
2054
2055
2056}
2057#else
2058static void khugepaged_scan_file(struct mm_struct *mm,
2059 struct file *file, pgoff_t start, struct page **hpage)
2060{
2061 BUILD_BUG();
2062}
2063
2064static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2065{
2066}
2067#endif
2068
2069static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2070 struct page **hpage)
2071 __releases(&khugepaged_mm_lock)
2072 __acquires(&khugepaged_mm_lock)
2073{
2074 struct mm_slot *mm_slot;
2075 struct mm_struct *mm;
2076 struct vm_area_struct *vma;
2077 int progress = 0;
2078
2079 VM_BUG_ON(!pages);
2080 lockdep_assert_held(&khugepaged_mm_lock);
2081
2082 if (khugepaged_scan.mm_slot)
2083 mm_slot = khugepaged_scan.mm_slot;
2084 else {
2085 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2086 struct mm_slot, mm_node);
2087 khugepaged_scan.address = 0;
2088 khugepaged_scan.mm_slot = mm_slot;
2089 }
2090 spin_unlock(&khugepaged_mm_lock);
2091 khugepaged_collapse_pte_mapped_thps(mm_slot);
2092
2093 mm = mm_slot->mm;
2094
2095
2096
2097
2098 vma = NULL;
2099 if (unlikely(!mmap_read_trylock(mm)))
2100 goto breakouterloop_mmap_lock;
2101 if (likely(!khugepaged_test_exit(mm)))
2102 vma = find_vma(mm, khugepaged_scan.address);
2103
2104 progress++;
2105 for (; vma; vma = vma->vm_next) {
2106 unsigned long hstart, hend;
2107
2108 cond_resched();
2109 if (unlikely(khugepaged_test_exit(mm))) {
2110 progress++;
2111 break;
2112 }
2113 if (!hugepage_vma_check(vma, vma->vm_flags)) {
2114skip:
2115 progress++;
2116 continue;
2117 }
2118 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2119 hend = vma->vm_end & HPAGE_PMD_MASK;
2120 if (hstart >= hend)
2121 goto skip;
2122 if (khugepaged_scan.address > hend)
2123 goto skip;
2124 if (khugepaged_scan.address < hstart)
2125 khugepaged_scan.address = hstart;
2126 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2127 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2128 goto skip;
2129
2130 while (khugepaged_scan.address < hend) {
2131 int ret;
2132 cond_resched();
2133 if (unlikely(khugepaged_test_exit(mm)))
2134 goto breakouterloop;
2135
2136 VM_BUG_ON(khugepaged_scan.address < hstart ||
2137 khugepaged_scan.address + HPAGE_PMD_SIZE >
2138 hend);
2139 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2140 struct file *file = get_file(vma->vm_file);
2141 pgoff_t pgoff = linear_page_index(vma,
2142 khugepaged_scan.address);
2143
2144 mmap_read_unlock(mm);
2145 ret = 1;
2146 khugepaged_scan_file(mm, file, pgoff, hpage);
2147 fput(file);
2148 } else {
2149 ret = khugepaged_scan_pmd(mm, vma,
2150 khugepaged_scan.address,
2151 hpage);
2152 }
2153
2154 khugepaged_scan.address += HPAGE_PMD_SIZE;
2155 progress += HPAGE_PMD_NR;
2156 if (ret)
2157
2158 goto breakouterloop_mmap_lock;
2159 if (progress >= pages)
2160 goto breakouterloop;
2161 }
2162 }
2163breakouterloop:
2164 mmap_read_unlock(mm);
2165breakouterloop_mmap_lock:
2166
2167 spin_lock(&khugepaged_mm_lock);
2168 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2169
2170
2171
2172
2173 if (khugepaged_test_exit(mm) || !vma) {
2174
2175
2176
2177
2178
2179 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2180 khugepaged_scan.mm_slot = list_entry(
2181 mm_slot->mm_node.next,
2182 struct mm_slot, mm_node);
2183 khugepaged_scan.address = 0;
2184 } else {
2185 khugepaged_scan.mm_slot = NULL;
2186 khugepaged_full_scans++;
2187 }
2188
2189 collect_mm_slot(mm_slot);
2190 }
2191
2192 return progress;
2193}
2194
2195static int khugepaged_has_work(void)
2196{
2197 return !list_empty(&khugepaged_scan.mm_head) &&
2198 khugepaged_enabled();
2199}
2200
2201static int khugepaged_wait_event(void)
2202{
2203 return !list_empty(&khugepaged_scan.mm_head) ||
2204 kthread_should_stop();
2205}
2206
2207static void khugepaged_do_scan(void)
2208{
2209 struct page *hpage = NULL;
2210 unsigned int progress = 0, pass_through_head = 0;
2211 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2212 bool wait = true;
2213
2214 lru_add_drain_all();
2215
2216 while (progress < pages) {
2217 if (!khugepaged_prealloc_page(&hpage, &wait))
2218 break;
2219
2220 cond_resched();
2221
2222 if (unlikely(kthread_should_stop() || try_to_freeze()))
2223 break;
2224
2225 spin_lock(&khugepaged_mm_lock);
2226 if (!khugepaged_scan.mm_slot)
2227 pass_through_head++;
2228 if (khugepaged_has_work() &&
2229 pass_through_head < 2)
2230 progress += khugepaged_scan_mm_slot(pages - progress,
2231 &hpage);
2232 else
2233 progress = pages;
2234 spin_unlock(&khugepaged_mm_lock);
2235 }
2236
2237 if (!IS_ERR_OR_NULL(hpage))
2238 put_page(hpage);
2239}
2240
2241static bool khugepaged_should_wakeup(void)
2242{
2243 return kthread_should_stop() ||
2244 time_after_eq(jiffies, khugepaged_sleep_expire);
2245}
2246
2247static void khugepaged_wait_work(void)
2248{
2249 if (khugepaged_has_work()) {
2250 const unsigned long scan_sleep_jiffies =
2251 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2252
2253 if (!scan_sleep_jiffies)
2254 return;
2255
2256 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2257 wait_event_freezable_timeout(khugepaged_wait,
2258 khugepaged_should_wakeup(),
2259 scan_sleep_jiffies);
2260 return;
2261 }
2262
2263 if (khugepaged_enabled())
2264 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2265}
2266
2267static int khugepaged(void *none)
2268{
2269 struct mm_slot *mm_slot;
2270
2271 set_freezable();
2272 set_user_nice(current, MAX_NICE);
2273
2274 while (!kthread_should_stop()) {
2275 khugepaged_do_scan();
2276 khugepaged_wait_work();
2277 }
2278
2279 spin_lock(&khugepaged_mm_lock);
2280 mm_slot = khugepaged_scan.mm_slot;
2281 khugepaged_scan.mm_slot = NULL;
2282 if (mm_slot)
2283 collect_mm_slot(mm_slot);
2284 spin_unlock(&khugepaged_mm_lock);
2285 return 0;
2286}
2287
2288static void set_recommended_min_free_kbytes(void)
2289{
2290 struct zone *zone;
2291 int nr_zones = 0;
2292 unsigned long recommended_min;
2293
2294 for_each_populated_zone(zone) {
2295
2296
2297
2298
2299 if (zone_idx(zone) > gfp_zone(GFP_USER))
2300 continue;
2301
2302 nr_zones++;
2303 }
2304
2305
2306 recommended_min = pageblock_nr_pages * nr_zones * 2;
2307
2308
2309
2310
2311
2312
2313
2314 recommended_min += pageblock_nr_pages * nr_zones *
2315 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2316
2317
2318 recommended_min = min(recommended_min,
2319 (unsigned long) nr_free_buffer_pages() / 20);
2320 recommended_min <<= (PAGE_SHIFT-10);
2321
2322 if (recommended_min > min_free_kbytes) {
2323 if (user_min_free_kbytes >= 0)
2324 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2325 min_free_kbytes, recommended_min);
2326
2327 min_free_kbytes = recommended_min;
2328 }
2329 setup_per_zone_wmarks();
2330}
2331
2332int start_stop_khugepaged(void)
2333{
2334 int err = 0;
2335
2336 mutex_lock(&khugepaged_mutex);
2337 if (khugepaged_enabled()) {
2338 if (!khugepaged_thread)
2339 khugepaged_thread = kthread_run(khugepaged, NULL,
2340 "khugepaged");
2341 if (IS_ERR(khugepaged_thread)) {
2342 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2343 err = PTR_ERR(khugepaged_thread);
2344 khugepaged_thread = NULL;
2345 goto fail;
2346 }
2347
2348 if (!list_empty(&khugepaged_scan.mm_head))
2349 wake_up_interruptible(&khugepaged_wait);
2350
2351 set_recommended_min_free_kbytes();
2352 } else if (khugepaged_thread) {
2353 kthread_stop(khugepaged_thread);
2354 khugepaged_thread = NULL;
2355 }
2356fail:
2357 mutex_unlock(&khugepaged_mutex);
2358 return err;
2359}
2360
2361void khugepaged_min_free_kbytes_update(void)
2362{
2363 mutex_lock(&khugepaged_mutex);
2364 if (khugepaged_enabled() && khugepaged_thread)
2365 set_recommended_min_free_kbytes();
2366 mutex_unlock(&khugepaged_mutex);
2367}
2368