linux/mm/khugepaged.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   3
   4#include <linux/mm.h>
   5#include <linux/sched.h>
   6#include <linux/sched/mm.h>
   7#include <linux/sched/coredump.h>
   8#include <linux/mmu_notifier.h>
   9#include <linux/rmap.h>
  10#include <linux/swap.h>
  11#include <linux/mm_inline.h>
  12#include <linux/kthread.h>
  13#include <linux/khugepaged.h>
  14#include <linux/freezer.h>
  15#include <linux/mman.h>
  16#include <linux/hashtable.h>
  17#include <linux/userfaultfd_k.h>
  18#include <linux/page_idle.h>
  19#include <linux/swapops.h>
  20#include <linux/shmem_fs.h>
  21
  22#include <asm/tlb.h>
  23#include <asm/pgalloc.h>
  24#include "internal.h"
  25
  26enum scan_result {
  27        SCAN_FAIL,
  28        SCAN_SUCCEED,
  29        SCAN_PMD_NULL,
  30        SCAN_EXCEED_NONE_PTE,
  31        SCAN_EXCEED_SWAP_PTE,
  32        SCAN_EXCEED_SHARED_PTE,
  33        SCAN_PTE_NON_PRESENT,
  34        SCAN_PTE_UFFD_WP,
  35        SCAN_PAGE_RO,
  36        SCAN_LACK_REFERENCED_PAGE,
  37        SCAN_PAGE_NULL,
  38        SCAN_SCAN_ABORT,
  39        SCAN_PAGE_COUNT,
  40        SCAN_PAGE_LRU,
  41        SCAN_PAGE_LOCK,
  42        SCAN_PAGE_ANON,
  43        SCAN_PAGE_COMPOUND,
  44        SCAN_ANY_PROCESS,
  45        SCAN_VMA_NULL,
  46        SCAN_VMA_CHECK,
  47        SCAN_ADDRESS_RANGE,
  48        SCAN_SWAP_CACHE_PAGE,
  49        SCAN_DEL_PAGE_LRU,
  50        SCAN_ALLOC_HUGE_PAGE_FAIL,
  51        SCAN_CGROUP_CHARGE_FAIL,
  52        SCAN_TRUNCATED,
  53        SCAN_PAGE_HAS_PRIVATE,
  54};
  55
  56#define CREATE_TRACE_POINTS
  57#include <trace/events/huge_memory.h>
  58
  59static struct task_struct *khugepaged_thread __read_mostly;
  60static DEFINE_MUTEX(khugepaged_mutex);
  61
  62/* default scan 8*512 pte (or vmas) every 30 second */
  63static unsigned int khugepaged_pages_to_scan __read_mostly;
  64static unsigned int khugepaged_pages_collapsed;
  65static unsigned int khugepaged_full_scans;
  66static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
  67/* during fragmentation poll the hugepage allocator once every minute */
  68static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
  69static unsigned long khugepaged_sleep_expire;
  70static DEFINE_SPINLOCK(khugepaged_mm_lock);
  71static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  72/*
  73 * default collapse hugepages if there is at least one pte mapped like
  74 * it would have happened if the vma was large enough during page
  75 * fault.
  76 */
  77static unsigned int khugepaged_max_ptes_none __read_mostly;
  78static unsigned int khugepaged_max_ptes_swap __read_mostly;
  79static unsigned int khugepaged_max_ptes_shared __read_mostly;
  80
  81#define MM_SLOTS_HASH_BITS 10
  82static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
  83
  84static struct kmem_cache *mm_slot_cache __read_mostly;
  85
  86#define MAX_PTE_MAPPED_THP 8
  87
  88/**
  89 * struct mm_slot - hash lookup from mm to mm_slot
  90 * @hash: hash collision list
  91 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
  92 * @mm: the mm that this information is valid for
  93 * @nr_pte_mapped_thp: number of pte mapped THP
  94 * @pte_mapped_thp: address array corresponding pte mapped THP
  95 */
  96struct mm_slot {
  97        struct hlist_node hash;
  98        struct list_head mm_node;
  99        struct mm_struct *mm;
 100
 101        /* pte-mapped THP in this mm */
 102        int nr_pte_mapped_thp;
 103        unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
 104};
 105
 106/**
 107 * struct khugepaged_scan - cursor for scanning
 108 * @mm_head: the head of the mm list to scan
 109 * @mm_slot: the current mm_slot we are scanning
 110 * @address: the next address inside that to be scanned
 111 *
 112 * There is only the one khugepaged_scan instance of this cursor structure.
 113 */
 114struct khugepaged_scan {
 115        struct list_head mm_head;
 116        struct mm_slot *mm_slot;
 117        unsigned long address;
 118};
 119
 120static struct khugepaged_scan khugepaged_scan = {
 121        .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
 122};
 123
 124#ifdef CONFIG_SYSFS
 125static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
 126                                         struct kobj_attribute *attr,
 127                                         char *buf)
 128{
 129        return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
 130}
 131
 132static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
 133                                          struct kobj_attribute *attr,
 134                                          const char *buf, size_t count)
 135{
 136        unsigned int msecs;
 137        int err;
 138
 139        err = kstrtouint(buf, 10, &msecs);
 140        if (err)
 141                return -EINVAL;
 142
 143        khugepaged_scan_sleep_millisecs = msecs;
 144        khugepaged_sleep_expire = 0;
 145        wake_up_interruptible(&khugepaged_wait);
 146
 147        return count;
 148}
 149static struct kobj_attribute scan_sleep_millisecs_attr =
 150        __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
 151               scan_sleep_millisecs_store);
 152
 153static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
 154                                          struct kobj_attribute *attr,
 155                                          char *buf)
 156{
 157        return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
 158}
 159
 160static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
 161                                           struct kobj_attribute *attr,
 162                                           const char *buf, size_t count)
 163{
 164        unsigned int msecs;
 165        int err;
 166
 167        err = kstrtouint(buf, 10, &msecs);
 168        if (err)
 169                return -EINVAL;
 170
 171        khugepaged_alloc_sleep_millisecs = msecs;
 172        khugepaged_sleep_expire = 0;
 173        wake_up_interruptible(&khugepaged_wait);
 174
 175        return count;
 176}
 177static struct kobj_attribute alloc_sleep_millisecs_attr =
 178        __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
 179               alloc_sleep_millisecs_store);
 180
 181static ssize_t pages_to_scan_show(struct kobject *kobj,
 182                                  struct kobj_attribute *attr,
 183                                  char *buf)
 184{
 185        return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
 186}
 187static ssize_t pages_to_scan_store(struct kobject *kobj,
 188                                   struct kobj_attribute *attr,
 189                                   const char *buf, size_t count)
 190{
 191        unsigned int pages;
 192        int err;
 193
 194        err = kstrtouint(buf, 10, &pages);
 195        if (err || !pages)
 196                return -EINVAL;
 197
 198        khugepaged_pages_to_scan = pages;
 199
 200        return count;
 201}
 202static struct kobj_attribute pages_to_scan_attr =
 203        __ATTR(pages_to_scan, 0644, pages_to_scan_show,
 204               pages_to_scan_store);
 205
 206static ssize_t pages_collapsed_show(struct kobject *kobj,
 207                                    struct kobj_attribute *attr,
 208                                    char *buf)
 209{
 210        return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
 211}
 212static struct kobj_attribute pages_collapsed_attr =
 213        __ATTR_RO(pages_collapsed);
 214
 215static ssize_t full_scans_show(struct kobject *kobj,
 216                               struct kobj_attribute *attr,
 217                               char *buf)
 218{
 219        return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
 220}
 221static struct kobj_attribute full_scans_attr =
 222        __ATTR_RO(full_scans);
 223
 224static ssize_t khugepaged_defrag_show(struct kobject *kobj,
 225                                      struct kobj_attribute *attr, char *buf)
 226{
 227        return single_hugepage_flag_show(kobj, attr, buf,
 228                                         TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 229}
 230static ssize_t khugepaged_defrag_store(struct kobject *kobj,
 231                                       struct kobj_attribute *attr,
 232                                       const char *buf, size_t count)
 233{
 234        return single_hugepage_flag_store(kobj, attr, buf, count,
 235                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 236}
 237static struct kobj_attribute khugepaged_defrag_attr =
 238        __ATTR(defrag, 0644, khugepaged_defrag_show,
 239               khugepaged_defrag_store);
 240
 241/*
 242 * max_ptes_none controls if khugepaged should collapse hugepages over
 243 * any unmapped ptes in turn potentially increasing the memory
 244 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
 245 * reduce the available free memory in the system as it
 246 * runs. Increasing max_ptes_none will instead potentially reduce the
 247 * free memory in the system during the khugepaged scan.
 248 */
 249static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
 250                                             struct kobj_attribute *attr,
 251                                             char *buf)
 252{
 253        return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
 254}
 255static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
 256                                              struct kobj_attribute *attr,
 257                                              const char *buf, size_t count)
 258{
 259        int err;
 260        unsigned long max_ptes_none;
 261
 262        err = kstrtoul(buf, 10, &max_ptes_none);
 263        if (err || max_ptes_none > HPAGE_PMD_NR-1)
 264                return -EINVAL;
 265
 266        khugepaged_max_ptes_none = max_ptes_none;
 267
 268        return count;
 269}
 270static struct kobj_attribute khugepaged_max_ptes_none_attr =
 271        __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
 272               khugepaged_max_ptes_none_store);
 273
 274static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
 275                                             struct kobj_attribute *attr,
 276                                             char *buf)
 277{
 278        return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
 279}
 280
 281static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
 282                                              struct kobj_attribute *attr,
 283                                              const char *buf, size_t count)
 284{
 285        int err;
 286        unsigned long max_ptes_swap;
 287
 288        err  = kstrtoul(buf, 10, &max_ptes_swap);
 289        if (err || max_ptes_swap > HPAGE_PMD_NR-1)
 290                return -EINVAL;
 291
 292        khugepaged_max_ptes_swap = max_ptes_swap;
 293
 294        return count;
 295}
 296
 297static struct kobj_attribute khugepaged_max_ptes_swap_attr =
 298        __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
 299               khugepaged_max_ptes_swap_store);
 300
 301static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
 302                                               struct kobj_attribute *attr,
 303                                               char *buf)
 304{
 305        return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
 306}
 307
 308static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
 309                                              struct kobj_attribute *attr,
 310                                              const char *buf, size_t count)
 311{
 312        int err;
 313        unsigned long max_ptes_shared;
 314
 315        err  = kstrtoul(buf, 10, &max_ptes_shared);
 316        if (err || max_ptes_shared > HPAGE_PMD_NR-1)
 317                return -EINVAL;
 318
 319        khugepaged_max_ptes_shared = max_ptes_shared;
 320
 321        return count;
 322}
 323
 324static struct kobj_attribute khugepaged_max_ptes_shared_attr =
 325        __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
 326               khugepaged_max_ptes_shared_store);
 327
 328static struct attribute *khugepaged_attr[] = {
 329        &khugepaged_defrag_attr.attr,
 330        &khugepaged_max_ptes_none_attr.attr,
 331        &khugepaged_max_ptes_swap_attr.attr,
 332        &khugepaged_max_ptes_shared_attr.attr,
 333        &pages_to_scan_attr.attr,
 334        &pages_collapsed_attr.attr,
 335        &full_scans_attr.attr,
 336        &scan_sleep_millisecs_attr.attr,
 337        &alloc_sleep_millisecs_attr.attr,
 338        NULL,
 339};
 340
 341struct attribute_group khugepaged_attr_group = {
 342        .attrs = khugepaged_attr,
 343        .name = "khugepaged",
 344};
 345#endif /* CONFIG_SYSFS */
 346
 347int hugepage_madvise(struct vm_area_struct *vma,
 348                     unsigned long *vm_flags, int advice)
 349{
 350        switch (advice) {
 351        case MADV_HUGEPAGE:
 352#ifdef CONFIG_S390
 353                /*
 354                 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
 355                 * can't handle this properly after s390_enable_sie, so we simply
 356                 * ignore the madvise to prevent qemu from causing a SIGSEGV.
 357                 */
 358                if (mm_has_pgste(vma->vm_mm))
 359                        return 0;
 360#endif
 361                *vm_flags &= ~VM_NOHUGEPAGE;
 362                *vm_flags |= VM_HUGEPAGE;
 363                /*
 364                 * If the vma become good for khugepaged to scan,
 365                 * register it here without waiting a page fault that
 366                 * may not happen any time soon.
 367                 */
 368                if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
 369                                khugepaged_enter_vma_merge(vma, *vm_flags))
 370                        return -ENOMEM;
 371                break;
 372        case MADV_NOHUGEPAGE:
 373                *vm_flags &= ~VM_HUGEPAGE;
 374                *vm_flags |= VM_NOHUGEPAGE;
 375                /*
 376                 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
 377                 * this vma even if we leave the mm registered in khugepaged if
 378                 * it got registered before VM_NOHUGEPAGE was set.
 379                 */
 380                break;
 381        }
 382
 383        return 0;
 384}
 385
 386int __init khugepaged_init(void)
 387{
 388        mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
 389                                          sizeof(struct mm_slot),
 390                                          __alignof__(struct mm_slot), 0, NULL);
 391        if (!mm_slot_cache)
 392                return -ENOMEM;
 393
 394        khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
 395        khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
 396        khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
 397        khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
 398
 399        return 0;
 400}
 401
 402void __init khugepaged_destroy(void)
 403{
 404        kmem_cache_destroy(mm_slot_cache);
 405}
 406
 407static inline struct mm_slot *alloc_mm_slot(void)
 408{
 409        if (!mm_slot_cache)     /* initialization failed */
 410                return NULL;
 411        return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
 412}
 413
 414static inline void free_mm_slot(struct mm_slot *mm_slot)
 415{
 416        kmem_cache_free(mm_slot_cache, mm_slot);
 417}
 418
 419static struct mm_slot *get_mm_slot(struct mm_struct *mm)
 420{
 421        struct mm_slot *mm_slot;
 422
 423        hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
 424                if (mm == mm_slot->mm)
 425                        return mm_slot;
 426
 427        return NULL;
 428}
 429
 430static void insert_to_mm_slots_hash(struct mm_struct *mm,
 431                                    struct mm_slot *mm_slot)
 432{
 433        mm_slot->mm = mm;
 434        hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
 435}
 436
 437static inline int khugepaged_test_exit(struct mm_struct *mm)
 438{
 439        return atomic_read(&mm->mm_users) == 0;
 440}
 441
 442static bool hugepage_vma_check(struct vm_area_struct *vma,
 443                               unsigned long vm_flags)
 444{
 445        if (!transhuge_vma_enabled(vma, vm_flags))
 446                return false;
 447
 448        /* Enabled via shmem mount options or sysfs settings. */
 449        if (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) {
 450                return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
 451                                HPAGE_PMD_NR);
 452        }
 453
 454        /* THP settings require madvise. */
 455        if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
 456                return false;
 457
 458        /* Read-only file mappings need to be aligned for THP to work. */
 459        if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
 460            !inode_is_open_for_write(vma->vm_file->f_inode) &&
 461            (vm_flags & VM_EXEC)) {
 462                return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
 463                                HPAGE_PMD_NR);
 464        }
 465
 466        if (!vma->anon_vma || vma->vm_ops)
 467                return false;
 468        if (vma_is_temporary_stack(vma))
 469                return false;
 470        return !(vm_flags & VM_NO_KHUGEPAGED);
 471}
 472
 473int __khugepaged_enter(struct mm_struct *mm)
 474{
 475        struct mm_slot *mm_slot;
 476        int wakeup;
 477
 478        mm_slot = alloc_mm_slot();
 479        if (!mm_slot)
 480                return -ENOMEM;
 481
 482        /* __khugepaged_exit() must not run from under us */
 483        VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
 484        if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
 485                free_mm_slot(mm_slot);
 486                return 0;
 487        }
 488
 489        spin_lock(&khugepaged_mm_lock);
 490        insert_to_mm_slots_hash(mm, mm_slot);
 491        /*
 492         * Insert just behind the scanning cursor, to let the area settle
 493         * down a little.
 494         */
 495        wakeup = list_empty(&khugepaged_scan.mm_head);
 496        list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
 497        spin_unlock(&khugepaged_mm_lock);
 498
 499        mmgrab(mm);
 500        if (wakeup)
 501                wake_up_interruptible(&khugepaged_wait);
 502
 503        return 0;
 504}
 505
 506int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
 507                               unsigned long vm_flags)
 508{
 509        unsigned long hstart, hend;
 510
 511        /*
 512         * khugepaged only supports read-only files for non-shmem files.
 513         * khugepaged does not yet work on special mappings. And
 514         * file-private shmem THP is not supported.
 515         */
 516        if (!hugepage_vma_check(vma, vm_flags))
 517                return 0;
 518
 519        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
 520        hend = vma->vm_end & HPAGE_PMD_MASK;
 521        if (hstart < hend)
 522                return khugepaged_enter(vma, vm_flags);
 523        return 0;
 524}
 525
 526void __khugepaged_exit(struct mm_struct *mm)
 527{
 528        struct mm_slot *mm_slot;
 529        int free = 0;
 530
 531        spin_lock(&khugepaged_mm_lock);
 532        mm_slot = get_mm_slot(mm);
 533        if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
 534                hash_del(&mm_slot->hash);
 535                list_del(&mm_slot->mm_node);
 536                free = 1;
 537        }
 538        spin_unlock(&khugepaged_mm_lock);
 539
 540        if (free) {
 541                clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
 542                free_mm_slot(mm_slot);
 543                mmdrop(mm);
 544        } else if (mm_slot) {
 545                /*
 546                 * This is required to serialize against
 547                 * khugepaged_test_exit() (which is guaranteed to run
 548                 * under mmap sem read mode). Stop here (after we
 549                 * return all pagetables will be destroyed) until
 550                 * khugepaged has finished working on the pagetables
 551                 * under the mmap_lock.
 552                 */
 553                mmap_write_lock(mm);
 554                mmap_write_unlock(mm);
 555        }
 556}
 557
 558static void release_pte_page(struct page *page)
 559{
 560        mod_node_page_state(page_pgdat(page),
 561                        NR_ISOLATED_ANON + page_is_file_lru(page),
 562                        -compound_nr(page));
 563        unlock_page(page);
 564        putback_lru_page(page);
 565}
 566
 567static void release_pte_pages(pte_t *pte, pte_t *_pte,
 568                struct list_head *compound_pagelist)
 569{
 570        struct page *page, *tmp;
 571
 572        while (--_pte >= pte) {
 573                pte_t pteval = *_pte;
 574
 575                page = pte_page(pteval);
 576                if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
 577                                !PageCompound(page))
 578                        release_pte_page(page);
 579        }
 580
 581        list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
 582                list_del(&page->lru);
 583                release_pte_page(page);
 584        }
 585}
 586
 587static bool is_refcount_suitable(struct page *page)
 588{
 589        int expected_refcount;
 590
 591        expected_refcount = total_mapcount(page);
 592        if (PageSwapCache(page))
 593                expected_refcount += compound_nr(page);
 594
 595        return page_count(page) == expected_refcount;
 596}
 597
 598static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
 599                                        unsigned long address,
 600                                        pte_t *pte,
 601                                        struct list_head *compound_pagelist)
 602{
 603        struct page *page = NULL;
 604        pte_t *_pte;
 605        int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
 606        bool writable = false;
 607
 608        for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
 609             _pte++, address += PAGE_SIZE) {
 610                pte_t pteval = *_pte;
 611                if (pte_none(pteval) || (pte_present(pteval) &&
 612                                is_zero_pfn(pte_pfn(pteval)))) {
 613                        if (!userfaultfd_armed(vma) &&
 614                            ++none_or_zero <= khugepaged_max_ptes_none) {
 615                                continue;
 616                        } else {
 617                                result = SCAN_EXCEED_NONE_PTE;
 618                                goto out;
 619                        }
 620                }
 621                if (!pte_present(pteval)) {
 622                        result = SCAN_PTE_NON_PRESENT;
 623                        goto out;
 624                }
 625                page = vm_normal_page(vma, address, pteval);
 626                if (unlikely(!page)) {
 627                        result = SCAN_PAGE_NULL;
 628                        goto out;
 629                }
 630
 631                VM_BUG_ON_PAGE(!PageAnon(page), page);
 632
 633                if (page_mapcount(page) > 1 &&
 634                                ++shared > khugepaged_max_ptes_shared) {
 635                        result = SCAN_EXCEED_SHARED_PTE;
 636                        goto out;
 637                }
 638
 639                if (PageCompound(page)) {
 640                        struct page *p;
 641                        page = compound_head(page);
 642
 643                        /*
 644                         * Check if we have dealt with the compound page
 645                         * already
 646                         */
 647                        list_for_each_entry(p, compound_pagelist, lru) {
 648                                if (page == p)
 649                                        goto next;
 650                        }
 651                }
 652
 653                /*
 654                 * We can do it before isolate_lru_page because the
 655                 * page can't be freed from under us. NOTE: PG_lock
 656                 * is needed to serialize against split_huge_page
 657                 * when invoked from the VM.
 658                 */
 659                if (!trylock_page(page)) {
 660                        result = SCAN_PAGE_LOCK;
 661                        goto out;
 662                }
 663
 664                /*
 665                 * Check if the page has any GUP (or other external) pins.
 666                 *
 667                 * The page table that maps the page has been already unlinked
 668                 * from the page table tree and this process cannot get
 669                 * an additional pin on the page.
 670                 *
 671                 * New pins can come later if the page is shared across fork,
 672                 * but not from this process. The other process cannot write to
 673                 * the page, only trigger CoW.
 674                 */
 675                if (!is_refcount_suitable(page)) {
 676                        unlock_page(page);
 677                        result = SCAN_PAGE_COUNT;
 678                        goto out;
 679                }
 680                if (!pte_write(pteval) && PageSwapCache(page) &&
 681                                !reuse_swap_page(page, NULL)) {
 682                        /*
 683                         * Page is in the swap cache and cannot be re-used.
 684                         * It cannot be collapsed into a THP.
 685                         */
 686                        unlock_page(page);
 687                        result = SCAN_SWAP_CACHE_PAGE;
 688                        goto out;
 689                }
 690
 691                /*
 692                 * Isolate the page to avoid collapsing an hugepage
 693                 * currently in use by the VM.
 694                 */
 695                if (isolate_lru_page(page)) {
 696                        unlock_page(page);
 697                        result = SCAN_DEL_PAGE_LRU;
 698                        goto out;
 699                }
 700                mod_node_page_state(page_pgdat(page),
 701                                NR_ISOLATED_ANON + page_is_file_lru(page),
 702                                compound_nr(page));
 703                VM_BUG_ON_PAGE(!PageLocked(page), page);
 704                VM_BUG_ON_PAGE(PageLRU(page), page);
 705
 706                if (PageCompound(page))
 707                        list_add_tail(&page->lru, compound_pagelist);
 708next:
 709                /* There should be enough young pte to collapse the page */
 710                if (pte_young(pteval) ||
 711                    page_is_young(page) || PageReferenced(page) ||
 712                    mmu_notifier_test_young(vma->vm_mm, address))
 713                        referenced++;
 714
 715                if (pte_write(pteval))
 716                        writable = true;
 717        }
 718
 719        if (unlikely(!writable)) {
 720                result = SCAN_PAGE_RO;
 721        } else if (unlikely(!referenced)) {
 722                result = SCAN_LACK_REFERENCED_PAGE;
 723        } else {
 724                result = SCAN_SUCCEED;
 725                trace_mm_collapse_huge_page_isolate(page, none_or_zero,
 726                                                    referenced, writable, result);
 727                return 1;
 728        }
 729out:
 730        release_pte_pages(pte, _pte, compound_pagelist);
 731        trace_mm_collapse_huge_page_isolate(page, none_or_zero,
 732                                            referenced, writable, result);
 733        return 0;
 734}
 735
 736static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
 737                                      struct vm_area_struct *vma,
 738                                      unsigned long address,
 739                                      spinlock_t *ptl,
 740                                      struct list_head *compound_pagelist)
 741{
 742        struct page *src_page, *tmp;
 743        pte_t *_pte;
 744        for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
 745                                _pte++, page++, address += PAGE_SIZE) {
 746                pte_t pteval = *_pte;
 747
 748                if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
 749                        clear_user_highpage(page, address);
 750                        add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
 751                        if (is_zero_pfn(pte_pfn(pteval))) {
 752                                /*
 753                                 * ptl mostly unnecessary.
 754                                 */
 755                                spin_lock(ptl);
 756                                /*
 757                                 * paravirt calls inside pte_clear here are
 758                                 * superfluous.
 759                                 */
 760                                pte_clear(vma->vm_mm, address, _pte);
 761                                spin_unlock(ptl);
 762                        }
 763                } else {
 764                        src_page = pte_page(pteval);
 765                        copy_user_highpage(page, src_page, address, vma);
 766                        if (!PageCompound(src_page))
 767                                release_pte_page(src_page);
 768                        /*
 769                         * ptl mostly unnecessary, but preempt has to
 770                         * be disabled to update the per-cpu stats
 771                         * inside page_remove_rmap().
 772                         */
 773                        spin_lock(ptl);
 774                        /*
 775                         * paravirt calls inside pte_clear here are
 776                         * superfluous.
 777                         */
 778                        pte_clear(vma->vm_mm, address, _pte);
 779                        page_remove_rmap(src_page, false);
 780                        spin_unlock(ptl);
 781                        free_page_and_swap_cache(src_page);
 782                }
 783        }
 784
 785        list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
 786                list_del(&src_page->lru);
 787                release_pte_page(src_page);
 788        }
 789}
 790
 791static void khugepaged_alloc_sleep(void)
 792{
 793        DEFINE_WAIT(wait);
 794
 795        add_wait_queue(&khugepaged_wait, &wait);
 796        freezable_schedule_timeout_interruptible(
 797                msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
 798        remove_wait_queue(&khugepaged_wait, &wait);
 799}
 800
 801static int khugepaged_node_load[MAX_NUMNODES];
 802
 803static bool khugepaged_scan_abort(int nid)
 804{
 805        int i;
 806
 807        /*
 808         * If node_reclaim_mode is disabled, then no extra effort is made to
 809         * allocate memory locally.
 810         */
 811        if (!node_reclaim_enabled())
 812                return false;
 813
 814        /* If there is a count for this node already, it must be acceptable */
 815        if (khugepaged_node_load[nid])
 816                return false;
 817
 818        for (i = 0; i < MAX_NUMNODES; i++) {
 819                if (!khugepaged_node_load[i])
 820                        continue;
 821                if (node_distance(nid, i) > node_reclaim_distance)
 822                        return true;
 823        }
 824        return false;
 825}
 826
 827/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
 828static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
 829{
 830        return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
 831}
 832
 833#ifdef CONFIG_NUMA
 834static int khugepaged_find_target_node(void)
 835{
 836        static int last_khugepaged_target_node = NUMA_NO_NODE;
 837        int nid, target_node = 0, max_value = 0;
 838
 839        /* find first node with max normal pages hit */
 840        for (nid = 0; nid < MAX_NUMNODES; nid++)
 841                if (khugepaged_node_load[nid] > max_value) {
 842                        max_value = khugepaged_node_load[nid];
 843                        target_node = nid;
 844                }
 845
 846        /* do some balance if several nodes have the same hit record */
 847        if (target_node <= last_khugepaged_target_node)
 848                for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
 849                                nid++)
 850                        if (max_value == khugepaged_node_load[nid]) {
 851                                target_node = nid;
 852                                break;
 853                        }
 854
 855        last_khugepaged_target_node = target_node;
 856        return target_node;
 857}
 858
 859static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 860{
 861        if (IS_ERR(*hpage)) {
 862                if (!*wait)
 863                        return false;
 864
 865                *wait = false;
 866                *hpage = NULL;
 867                khugepaged_alloc_sleep();
 868        } else if (*hpage) {
 869                put_page(*hpage);
 870                *hpage = NULL;
 871        }
 872
 873        return true;
 874}
 875
 876static struct page *
 877khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 878{
 879        VM_BUG_ON_PAGE(*hpage, *hpage);
 880
 881        *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
 882        if (unlikely(!*hpage)) {
 883                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 884                *hpage = ERR_PTR(-ENOMEM);
 885                return NULL;
 886        }
 887
 888        prep_transhuge_page(*hpage);
 889        count_vm_event(THP_COLLAPSE_ALLOC);
 890        return *hpage;
 891}
 892#else
 893static int khugepaged_find_target_node(void)
 894{
 895        return 0;
 896}
 897
 898static inline struct page *alloc_khugepaged_hugepage(void)
 899{
 900        struct page *page;
 901
 902        page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
 903                           HPAGE_PMD_ORDER);
 904        if (page)
 905                prep_transhuge_page(page);
 906        return page;
 907}
 908
 909static struct page *khugepaged_alloc_hugepage(bool *wait)
 910{
 911        struct page *hpage;
 912
 913        do {
 914                hpage = alloc_khugepaged_hugepage();
 915                if (!hpage) {
 916                        count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 917                        if (!*wait)
 918                                return NULL;
 919
 920                        *wait = false;
 921                        khugepaged_alloc_sleep();
 922                } else
 923                        count_vm_event(THP_COLLAPSE_ALLOC);
 924        } while (unlikely(!hpage) && likely(khugepaged_enabled()));
 925
 926        return hpage;
 927}
 928
 929static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 930{
 931        /*
 932         * If the hpage allocated earlier was briefly exposed in page cache
 933         * before collapse_file() failed, it is possible that racing lookups
 934         * have not yet completed, and would then be unpleasantly surprised by
 935         * finding the hpage reused for the same mapping at a different offset.
 936         * Just release the previous allocation if there is any danger of that.
 937         */
 938        if (*hpage && page_count(*hpage) > 1) {
 939                put_page(*hpage);
 940                *hpage = NULL;
 941        }
 942
 943        if (!*hpage)
 944                *hpage = khugepaged_alloc_hugepage(wait);
 945
 946        if (unlikely(!*hpage))
 947                return false;
 948
 949        return true;
 950}
 951
 952static struct page *
 953khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 954{
 955        VM_BUG_ON(!*hpage);
 956
 957        return  *hpage;
 958}
 959#endif
 960
 961/*
 962 * If mmap_lock temporarily dropped, revalidate vma
 963 * before taking mmap_lock.
 964 * Return 0 if succeeds, otherwise return none-zero
 965 * value (scan code).
 966 */
 967
 968static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
 969                struct vm_area_struct **vmap)
 970{
 971        struct vm_area_struct *vma;
 972        unsigned long hstart, hend;
 973
 974        if (unlikely(khugepaged_test_exit(mm)))
 975                return SCAN_ANY_PROCESS;
 976
 977        *vmap = vma = find_vma(mm, address);
 978        if (!vma)
 979                return SCAN_VMA_NULL;
 980
 981        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
 982        hend = vma->vm_end & HPAGE_PMD_MASK;
 983        if (address < hstart || address + HPAGE_PMD_SIZE > hend)
 984                return SCAN_ADDRESS_RANGE;
 985        if (!hugepage_vma_check(vma, vma->vm_flags))
 986                return SCAN_VMA_CHECK;
 987        /* Anon VMA expected */
 988        if (!vma->anon_vma || vma->vm_ops)
 989                return SCAN_VMA_CHECK;
 990        return 0;
 991}
 992
 993/*
 994 * Bring missing pages in from swap, to complete THP collapse.
 995 * Only done if khugepaged_scan_pmd believes it is worthwhile.
 996 *
 997 * Called and returns without pte mapped or spinlocks held,
 998 * but with mmap_lock held to protect against vma changes.
 999 */
1000
1001static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1002                                        struct vm_area_struct *vma,
1003                                        unsigned long haddr, pmd_t *pmd,
1004                                        int referenced)
1005{
1006        int swapped_in = 0;
1007        vm_fault_t ret = 0;
1008        unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
1009
1010        for (address = haddr; address < end; address += PAGE_SIZE) {
1011                struct vm_fault vmf = {
1012                        .vma = vma,
1013                        .address = address,
1014                        .pgoff = linear_page_index(vma, haddr),
1015                        .flags = FAULT_FLAG_ALLOW_RETRY,
1016                        .pmd = pmd,
1017                };
1018
1019                vmf.pte = pte_offset_map(pmd, address);
1020                vmf.orig_pte = *vmf.pte;
1021                if (!is_swap_pte(vmf.orig_pte)) {
1022                        pte_unmap(vmf.pte);
1023                        continue;
1024                }
1025                swapped_in++;
1026                ret = do_swap_page(&vmf);
1027
1028                /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
1029                if (ret & VM_FAULT_RETRY) {
1030                        mmap_read_lock(mm);
1031                        if (hugepage_vma_revalidate(mm, haddr, &vma)) {
1032                                /* vma is no longer available, don't continue to swapin */
1033                                trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1034                                return false;
1035                        }
1036                        /* check if the pmd is still valid */
1037                        if (mm_find_pmd(mm, haddr) != pmd) {
1038                                trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1039                                return false;
1040                        }
1041                }
1042                if (ret & VM_FAULT_ERROR) {
1043                        trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1044                        return false;
1045                }
1046        }
1047
1048        /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1049        if (swapped_in)
1050                lru_add_drain();
1051
1052        trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
1053        return true;
1054}
1055
1056static void collapse_huge_page(struct mm_struct *mm,
1057                                   unsigned long address,
1058                                   struct page **hpage,
1059                                   int node, int referenced, int unmapped)
1060{
1061        LIST_HEAD(compound_pagelist);
1062        pmd_t *pmd, _pmd;
1063        pte_t *pte;
1064        pgtable_t pgtable;
1065        struct page *new_page;
1066        spinlock_t *pmd_ptl, *pte_ptl;
1067        int isolated = 0, result = 0;
1068        struct vm_area_struct *vma;
1069        struct mmu_notifier_range range;
1070        gfp_t gfp;
1071
1072        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1073
1074        /* Only allocate from the target node */
1075        gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1076
1077        /*
1078         * Before allocating the hugepage, release the mmap_lock read lock.
1079         * The allocation can take potentially a long time if it involves
1080         * sync compaction, and we do not need to hold the mmap_lock during
1081         * that. We will recheck the vma after taking it again in write mode.
1082         */
1083        mmap_read_unlock(mm);
1084        new_page = khugepaged_alloc_page(hpage, gfp, node);
1085        if (!new_page) {
1086                result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1087                goto out_nolock;
1088        }
1089
1090        if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1091                result = SCAN_CGROUP_CHARGE_FAIL;
1092                goto out_nolock;
1093        }
1094        count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1095
1096        mmap_read_lock(mm);
1097        result = hugepage_vma_revalidate(mm, address, &vma);
1098        if (result) {
1099                mmap_read_unlock(mm);
1100                goto out_nolock;
1101        }
1102
1103        pmd = mm_find_pmd(mm, address);
1104        if (!pmd) {
1105                result = SCAN_PMD_NULL;
1106                mmap_read_unlock(mm);
1107                goto out_nolock;
1108        }
1109
1110        /*
1111         * __collapse_huge_page_swapin always returns with mmap_lock locked.
1112         * If it fails, we release mmap_lock and jump out_nolock.
1113         * Continuing to collapse causes inconsistency.
1114         */
1115        if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1116                                                     pmd, referenced)) {
1117                mmap_read_unlock(mm);
1118                goto out_nolock;
1119        }
1120
1121        mmap_read_unlock(mm);
1122        /*
1123         * Prevent all access to pagetables with the exception of
1124         * gup_fast later handled by the ptep_clear_flush and the VM
1125         * handled by the anon_vma lock + PG_lock.
1126         */
1127        mmap_write_lock(mm);
1128        result = hugepage_vma_revalidate(mm, address, &vma);
1129        if (result)
1130                goto out_up_write;
1131        /* check if the pmd is still valid */
1132        if (mm_find_pmd(mm, address) != pmd)
1133                goto out_up_write;
1134
1135        anon_vma_lock_write(vma->anon_vma);
1136
1137        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1138                                address, address + HPAGE_PMD_SIZE);
1139        mmu_notifier_invalidate_range_start(&range);
1140
1141        pte = pte_offset_map(pmd, address);
1142        pte_ptl = pte_lockptr(mm, pmd);
1143
1144        pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1145        /*
1146         * After this gup_fast can't run anymore. This also removes
1147         * any huge TLB entry from the CPU so we won't allow
1148         * huge and small TLB entries for the same virtual address
1149         * to avoid the risk of CPU bugs in that area.
1150         */
1151        _pmd = pmdp_collapse_flush(vma, address, pmd);
1152        spin_unlock(pmd_ptl);
1153        mmu_notifier_invalidate_range_end(&range);
1154
1155        spin_lock(pte_ptl);
1156        isolated = __collapse_huge_page_isolate(vma, address, pte,
1157                        &compound_pagelist);
1158        spin_unlock(pte_ptl);
1159
1160        if (unlikely(!isolated)) {
1161                pte_unmap(pte);
1162                spin_lock(pmd_ptl);
1163                BUG_ON(!pmd_none(*pmd));
1164                /*
1165                 * We can only use set_pmd_at when establishing
1166                 * hugepmds and never for establishing regular pmds that
1167                 * points to regular pagetables. Use pmd_populate for that
1168                 */
1169                pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1170                spin_unlock(pmd_ptl);
1171                anon_vma_unlock_write(vma->anon_vma);
1172                result = SCAN_FAIL;
1173                goto out_up_write;
1174        }
1175
1176        /*
1177         * All pages are isolated and locked so anon_vma rmap
1178         * can't run anymore.
1179         */
1180        anon_vma_unlock_write(vma->anon_vma);
1181
1182        __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1183                        &compound_pagelist);
1184        pte_unmap(pte);
1185        /*
1186         * spin_lock() below is not the equivalent of smp_wmb(), but
1187         * the smp_wmb() inside __SetPageUptodate() can be reused to
1188         * avoid the copy_huge_page writes to become visible after
1189         * the set_pmd_at() write.
1190         */
1191        __SetPageUptodate(new_page);
1192        pgtable = pmd_pgtable(_pmd);
1193
1194        _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1195        _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1196
1197        spin_lock(pmd_ptl);
1198        BUG_ON(!pmd_none(*pmd));
1199        page_add_new_anon_rmap(new_page, vma, address, true);
1200        lru_cache_add_inactive_or_unevictable(new_page, vma);
1201        pgtable_trans_huge_deposit(mm, pmd, pgtable);
1202        set_pmd_at(mm, address, pmd, _pmd);
1203        update_mmu_cache_pmd(vma, address, pmd);
1204        spin_unlock(pmd_ptl);
1205
1206        *hpage = NULL;
1207
1208        khugepaged_pages_collapsed++;
1209        result = SCAN_SUCCEED;
1210out_up_write:
1211        mmap_write_unlock(mm);
1212out_nolock:
1213        if (!IS_ERR_OR_NULL(*hpage))
1214                mem_cgroup_uncharge(*hpage);
1215        trace_mm_collapse_huge_page(mm, isolated, result);
1216        return;
1217}
1218
1219static int khugepaged_scan_pmd(struct mm_struct *mm,
1220                               struct vm_area_struct *vma,
1221                               unsigned long address,
1222                               struct page **hpage)
1223{
1224        pmd_t *pmd;
1225        pte_t *pte, *_pte;
1226        int ret = 0, result = 0, referenced = 0;
1227        int none_or_zero = 0, shared = 0;
1228        struct page *page = NULL;
1229        unsigned long _address;
1230        spinlock_t *ptl;
1231        int node = NUMA_NO_NODE, unmapped = 0;
1232        bool writable = false;
1233
1234        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1235
1236        pmd = mm_find_pmd(mm, address);
1237        if (!pmd) {
1238                result = SCAN_PMD_NULL;
1239                goto out;
1240        }
1241
1242        memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1243        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1244        for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1245             _pte++, _address += PAGE_SIZE) {
1246                pte_t pteval = *_pte;
1247                if (is_swap_pte(pteval)) {
1248                        if (++unmapped <= khugepaged_max_ptes_swap) {
1249                                /*
1250                                 * Always be strict with uffd-wp
1251                                 * enabled swap entries.  Please see
1252                                 * comment below for pte_uffd_wp().
1253                                 */
1254                                if (pte_swp_uffd_wp(pteval)) {
1255                                        result = SCAN_PTE_UFFD_WP;
1256                                        goto out_unmap;
1257                                }
1258                                continue;
1259                        } else {
1260                                result = SCAN_EXCEED_SWAP_PTE;
1261                                goto out_unmap;
1262                        }
1263                }
1264                if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1265                        if (!userfaultfd_armed(vma) &&
1266                            ++none_or_zero <= khugepaged_max_ptes_none) {
1267                                continue;
1268                        } else {
1269                                result = SCAN_EXCEED_NONE_PTE;
1270                                goto out_unmap;
1271                        }
1272                }
1273                if (pte_uffd_wp(pteval)) {
1274                        /*
1275                         * Don't collapse the page if any of the small
1276                         * PTEs are armed with uffd write protection.
1277                         * Here we can also mark the new huge pmd as
1278                         * write protected if any of the small ones is
1279                         * marked but that could bring unknown
1280                         * userfault messages that falls outside of
1281                         * the registered range.  So, just be simple.
1282                         */
1283                        result = SCAN_PTE_UFFD_WP;
1284                        goto out_unmap;
1285                }
1286                if (pte_write(pteval))
1287                        writable = true;
1288
1289                page = vm_normal_page(vma, _address, pteval);
1290                if (unlikely(!page)) {
1291                        result = SCAN_PAGE_NULL;
1292                        goto out_unmap;
1293                }
1294
1295                if (page_mapcount(page) > 1 &&
1296                                ++shared > khugepaged_max_ptes_shared) {
1297                        result = SCAN_EXCEED_SHARED_PTE;
1298                        goto out_unmap;
1299                }
1300
1301                page = compound_head(page);
1302
1303                /*
1304                 * Record which node the original page is from and save this
1305                 * information to khugepaged_node_load[].
1306                 * Khupaged will allocate hugepage from the node has the max
1307                 * hit record.
1308                 */
1309                node = page_to_nid(page);
1310                if (khugepaged_scan_abort(node)) {
1311                        result = SCAN_SCAN_ABORT;
1312                        goto out_unmap;
1313                }
1314                khugepaged_node_load[node]++;
1315                if (!PageLRU(page)) {
1316                        result = SCAN_PAGE_LRU;
1317                        goto out_unmap;
1318                }
1319                if (PageLocked(page)) {
1320                        result = SCAN_PAGE_LOCK;
1321                        goto out_unmap;
1322                }
1323                if (!PageAnon(page)) {
1324                        result = SCAN_PAGE_ANON;
1325                        goto out_unmap;
1326                }
1327
1328                /*
1329                 * Check if the page has any GUP (or other external) pins.
1330                 *
1331                 * Here the check is racy it may see totmal_mapcount > refcount
1332                 * in some cases.
1333                 * For example, one process with one forked child process.
1334                 * The parent has the PMD split due to MADV_DONTNEED, then
1335                 * the child is trying unmap the whole PMD, but khugepaged
1336                 * may be scanning the parent between the child has
1337                 * PageDoubleMap flag cleared and dec the mapcount.  So
1338                 * khugepaged may see total_mapcount > refcount.
1339                 *
1340                 * But such case is ephemeral we could always retry collapse
1341                 * later.  However it may report false positive if the page
1342                 * has excessive GUP pins (i.e. 512).  Anyway the same check
1343                 * will be done again later the risk seems low.
1344                 */
1345                if (!is_refcount_suitable(page)) {
1346                        result = SCAN_PAGE_COUNT;
1347                        goto out_unmap;
1348                }
1349                if (pte_young(pteval) ||
1350                    page_is_young(page) || PageReferenced(page) ||
1351                    mmu_notifier_test_young(vma->vm_mm, address))
1352                        referenced++;
1353        }
1354        if (!writable) {
1355                result = SCAN_PAGE_RO;
1356        } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1357                result = SCAN_LACK_REFERENCED_PAGE;
1358        } else {
1359                result = SCAN_SUCCEED;
1360                ret = 1;
1361        }
1362out_unmap:
1363        pte_unmap_unlock(pte, ptl);
1364        if (ret) {
1365                node = khugepaged_find_target_node();
1366                /* collapse_huge_page will return with the mmap_lock released */
1367                collapse_huge_page(mm, address, hpage, node,
1368                                referenced, unmapped);
1369        }
1370out:
1371        trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1372                                     none_or_zero, result, unmapped);
1373        return ret;
1374}
1375
1376static void collect_mm_slot(struct mm_slot *mm_slot)
1377{
1378        struct mm_struct *mm = mm_slot->mm;
1379
1380        lockdep_assert_held(&khugepaged_mm_lock);
1381
1382        if (khugepaged_test_exit(mm)) {
1383                /* free mm_slot */
1384                hash_del(&mm_slot->hash);
1385                list_del(&mm_slot->mm_node);
1386
1387                /*
1388                 * Not strictly needed because the mm exited already.
1389                 *
1390                 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1391                 */
1392
1393                /* khugepaged_mm_lock actually not necessary for the below */
1394                free_mm_slot(mm_slot);
1395                mmdrop(mm);
1396        }
1397}
1398
1399#ifdef CONFIG_SHMEM
1400/*
1401 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1402 * khugepaged should try to collapse the page table.
1403 */
1404static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1405                                         unsigned long addr)
1406{
1407        struct mm_slot *mm_slot;
1408
1409        VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1410
1411        spin_lock(&khugepaged_mm_lock);
1412        mm_slot = get_mm_slot(mm);
1413        if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1414                mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1415        spin_unlock(&khugepaged_mm_lock);
1416        return 0;
1417}
1418
1419/**
1420 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1421 * address haddr.
1422 *
1423 * @mm: process address space where collapse happens
1424 * @addr: THP collapse address
1425 *
1426 * This function checks whether all the PTEs in the PMD are pointing to the
1427 * right THP. If so, retract the page table so the THP can refault in with
1428 * as pmd-mapped.
1429 */
1430void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1431{
1432        unsigned long haddr = addr & HPAGE_PMD_MASK;
1433        struct vm_area_struct *vma = find_vma(mm, haddr);
1434        struct page *hpage;
1435        pte_t *start_pte, *pte;
1436        pmd_t *pmd, _pmd;
1437        spinlock_t *ptl;
1438        int count = 0;
1439        int i;
1440
1441        if (!vma || !vma->vm_file ||
1442            !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1443                return;
1444
1445        /*
1446         * This vm_flags may not have VM_HUGEPAGE if the page was not
1447         * collapsed by this mm. But we can still collapse if the page is
1448         * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1449         * will not fail the vma for missing VM_HUGEPAGE
1450         */
1451        if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1452                return;
1453
1454        hpage = find_lock_page(vma->vm_file->f_mapping,
1455                               linear_page_index(vma, haddr));
1456        if (!hpage)
1457                return;
1458
1459        if (!PageHead(hpage))
1460                goto drop_hpage;
1461
1462        pmd = mm_find_pmd(mm, haddr);
1463        if (!pmd)
1464                goto drop_hpage;
1465
1466        start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1467
1468        /* step 1: check all mapped PTEs are to the right huge page */
1469        for (i = 0, addr = haddr, pte = start_pte;
1470             i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1471                struct page *page;
1472
1473                /* empty pte, skip */
1474                if (pte_none(*pte))
1475                        continue;
1476
1477                /* page swapped out, abort */
1478                if (!pte_present(*pte))
1479                        goto abort;
1480
1481                page = vm_normal_page(vma, addr, *pte);
1482
1483                /*
1484                 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1485                 * page table, but the new page will not be a subpage of hpage.
1486                 */
1487                if (hpage + i != page)
1488                        goto abort;
1489                count++;
1490        }
1491
1492        /* step 2: adjust rmap */
1493        for (i = 0, addr = haddr, pte = start_pte;
1494             i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1495                struct page *page;
1496
1497                if (pte_none(*pte))
1498                        continue;
1499                page = vm_normal_page(vma, addr, *pte);
1500                page_remove_rmap(page, false);
1501        }
1502
1503        pte_unmap_unlock(start_pte, ptl);
1504
1505        /* step 3: set proper refcount and mm_counters. */
1506        if (count) {
1507                page_ref_sub(hpage, count);
1508                add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1509        }
1510
1511        /* step 4: collapse pmd */
1512        ptl = pmd_lock(vma->vm_mm, pmd);
1513        _pmd = pmdp_collapse_flush(vma, haddr, pmd);
1514        spin_unlock(ptl);
1515        mm_dec_nr_ptes(mm);
1516        pte_free(mm, pmd_pgtable(_pmd));
1517
1518drop_hpage:
1519        unlock_page(hpage);
1520        put_page(hpage);
1521        return;
1522
1523abort:
1524        pte_unmap_unlock(start_pte, ptl);
1525        goto drop_hpage;
1526}
1527
1528static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1529{
1530        struct mm_struct *mm = mm_slot->mm;
1531        int i;
1532
1533        if (likely(mm_slot->nr_pte_mapped_thp == 0))
1534                return;
1535
1536        if (!mmap_write_trylock(mm))
1537                return;
1538
1539        if (unlikely(khugepaged_test_exit(mm)))
1540                goto out;
1541
1542        for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1543                collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1544
1545out:
1546        mm_slot->nr_pte_mapped_thp = 0;
1547        mmap_write_unlock(mm);
1548}
1549
1550static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1551{
1552        struct vm_area_struct *vma;
1553        struct mm_struct *mm;
1554        unsigned long addr;
1555        pmd_t *pmd, _pmd;
1556
1557        i_mmap_lock_write(mapping);
1558        vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1559                /*
1560                 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1561                 * got written to. These VMAs are likely not worth investing
1562                 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1563                 * later.
1564                 *
1565                 * Not that vma->anon_vma check is racy: it can be set up after
1566                 * the check but before we took mmap_lock by the fault path.
1567                 * But page lock would prevent establishing any new ptes of the
1568                 * page, so we are safe.
1569                 *
1570                 * An alternative would be drop the check, but check that page
1571                 * table is clear before calling pmdp_collapse_flush() under
1572                 * ptl. It has higher chance to recover THP for the VMA, but
1573                 * has higher cost too.
1574                 */
1575                if (vma->anon_vma)
1576                        continue;
1577                addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1578                if (addr & ~HPAGE_PMD_MASK)
1579                        continue;
1580                if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1581                        continue;
1582                mm = vma->vm_mm;
1583                pmd = mm_find_pmd(mm, addr);
1584                if (!pmd)
1585                        continue;
1586                /*
1587                 * We need exclusive mmap_lock to retract page table.
1588                 *
1589                 * We use trylock due to lock inversion: we need to acquire
1590                 * mmap_lock while holding page lock. Fault path does it in
1591                 * reverse order. Trylock is a way to avoid deadlock.
1592                 */
1593                if (mmap_write_trylock(mm)) {
1594                        if (!khugepaged_test_exit(mm)) {
1595                                spinlock_t *ptl = pmd_lock(mm, pmd);
1596                                /* assume page table is clear */
1597                                _pmd = pmdp_collapse_flush(vma, addr, pmd);
1598                                spin_unlock(ptl);
1599                                mm_dec_nr_ptes(mm);
1600                                pte_free(mm, pmd_pgtable(_pmd));
1601                        }
1602                        mmap_write_unlock(mm);
1603                } else {
1604                        /* Try again later */
1605                        khugepaged_add_pte_mapped_thp(mm, addr);
1606                }
1607        }
1608        i_mmap_unlock_write(mapping);
1609}
1610
1611/**
1612 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1613 *
1614 * @mm: process address space where collapse happens
1615 * @file: file that collapse on
1616 * @start: collapse start address
1617 * @hpage: new allocated huge page for collapse
1618 * @node: appointed node the new huge page allocate from
1619 *
1620 * Basic scheme is simple, details are more complex:
1621 *  - allocate and lock a new huge page;
1622 *  - scan page cache replacing old pages with the new one
1623 *    + swap/gup in pages if necessary;
1624 *    + fill in gaps;
1625 *    + keep old pages around in case rollback is required;
1626 *  - if replacing succeeds:
1627 *    + copy data over;
1628 *    + free old pages;
1629 *    + unlock huge page;
1630 *  - if replacing failed;
1631 *    + put all pages back and unfreeze them;
1632 *    + restore gaps in the page cache;
1633 *    + unlock and free huge page;
1634 */
1635static void collapse_file(struct mm_struct *mm,
1636                struct file *file, pgoff_t start,
1637                struct page **hpage, int node)
1638{
1639        struct address_space *mapping = file->f_mapping;
1640        gfp_t gfp;
1641        struct page *new_page;
1642        pgoff_t index, end = start + HPAGE_PMD_NR;
1643        LIST_HEAD(pagelist);
1644        XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1645        int nr_none = 0, result = SCAN_SUCCEED;
1646        bool is_shmem = shmem_file(file);
1647        int nr;
1648
1649        VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1650        VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1651
1652        /* Only allocate from the target node */
1653        gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1654
1655        new_page = khugepaged_alloc_page(hpage, gfp, node);
1656        if (!new_page) {
1657                result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1658                goto out;
1659        }
1660
1661        if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1662                result = SCAN_CGROUP_CHARGE_FAIL;
1663                goto out;
1664        }
1665        count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1666
1667        /* This will be less messy when we use multi-index entries */
1668        do {
1669                xas_lock_irq(&xas);
1670                xas_create_range(&xas);
1671                if (!xas_error(&xas))
1672                        break;
1673                xas_unlock_irq(&xas);
1674                if (!xas_nomem(&xas, GFP_KERNEL)) {
1675                        result = SCAN_FAIL;
1676                        goto out;
1677                }
1678        } while (1);
1679
1680        __SetPageLocked(new_page);
1681        if (is_shmem)
1682                __SetPageSwapBacked(new_page);
1683        new_page->index = start;
1684        new_page->mapping = mapping;
1685
1686        /*
1687         * At this point the new_page is locked and not up-to-date.
1688         * It's safe to insert it into the page cache, because nobody would
1689         * be able to map it or use it in another way until we unlock it.
1690         */
1691
1692        xas_set(&xas, start);
1693        for (index = start; index < end; index++) {
1694                struct page *page = xas_next(&xas);
1695
1696                VM_BUG_ON(index != xas.xa_index);
1697                if (is_shmem) {
1698                        if (!page) {
1699                                /*
1700                                 * Stop if extent has been truncated or
1701                                 * hole-punched, and is now completely
1702                                 * empty.
1703                                 */
1704                                if (index == start) {
1705                                        if (!xas_next_entry(&xas, end - 1)) {
1706                                                result = SCAN_TRUNCATED;
1707                                                goto xa_locked;
1708                                        }
1709                                        xas_set(&xas, index);
1710                                }
1711                                if (!shmem_charge(mapping->host, 1)) {
1712                                        result = SCAN_FAIL;
1713                                        goto xa_locked;
1714                                }
1715                                xas_store(&xas, new_page);
1716                                nr_none++;
1717                                continue;
1718                        }
1719
1720                        if (xa_is_value(page) || !PageUptodate(page)) {
1721                                xas_unlock_irq(&xas);
1722                                /* swap in or instantiate fallocated page */
1723                                if (shmem_getpage(mapping->host, index, &page,
1724                                                  SGP_NOHUGE)) {
1725                                        result = SCAN_FAIL;
1726                                        goto xa_unlocked;
1727                                }
1728                        } else if (trylock_page(page)) {
1729                                get_page(page);
1730                                xas_unlock_irq(&xas);
1731                        } else {
1732                                result = SCAN_PAGE_LOCK;
1733                                goto xa_locked;
1734                        }
1735                } else {        /* !is_shmem */
1736                        if (!page || xa_is_value(page)) {
1737                                xas_unlock_irq(&xas);
1738                                page_cache_sync_readahead(mapping, &file->f_ra,
1739                                                          file, index,
1740                                                          end - index);
1741                                /* drain pagevecs to help isolate_lru_page() */
1742                                lru_add_drain();
1743                                page = find_lock_page(mapping, index);
1744                                if (unlikely(page == NULL)) {
1745                                        result = SCAN_FAIL;
1746                                        goto xa_unlocked;
1747                                }
1748                        } else if (PageDirty(page)) {
1749                                /*
1750                                 * khugepaged only works on read-only fd,
1751                                 * so this page is dirty because it hasn't
1752                                 * been flushed since first write. There
1753                                 * won't be new dirty pages.
1754                                 *
1755                                 * Trigger async flush here and hope the
1756                                 * writeback is done when khugepaged
1757                                 * revisits this page.
1758                                 *
1759                                 * This is a one-off situation. We are not
1760                                 * forcing writeback in loop.
1761                                 */
1762                                xas_unlock_irq(&xas);
1763                                filemap_flush(mapping);
1764                                result = SCAN_FAIL;
1765                                goto xa_unlocked;
1766                        } else if (trylock_page(page)) {
1767                                get_page(page);
1768                                xas_unlock_irq(&xas);
1769                        } else {
1770                                result = SCAN_PAGE_LOCK;
1771                                goto xa_locked;
1772                        }
1773                }
1774
1775                /*
1776                 * The page must be locked, so we can drop the i_pages lock
1777                 * without racing with truncate.
1778                 */
1779                VM_BUG_ON_PAGE(!PageLocked(page), page);
1780
1781                /* make sure the page is up to date */
1782                if (unlikely(!PageUptodate(page))) {
1783                        result = SCAN_FAIL;
1784                        goto out_unlock;
1785                }
1786
1787                /*
1788                 * If file was truncated then extended, or hole-punched, before
1789                 * we locked the first page, then a THP might be there already.
1790                 */
1791                if (PageTransCompound(page)) {
1792                        result = SCAN_PAGE_COMPOUND;
1793                        goto out_unlock;
1794                }
1795
1796                if (page_mapping(page) != mapping) {
1797                        result = SCAN_TRUNCATED;
1798                        goto out_unlock;
1799                }
1800
1801                if (!is_shmem && PageDirty(page)) {
1802                        /*
1803                         * khugepaged only works on read-only fd, so this
1804                         * page is dirty because it hasn't been flushed
1805                         * since first write.
1806                         */
1807                        result = SCAN_FAIL;
1808                        goto out_unlock;
1809                }
1810
1811                if (isolate_lru_page(page)) {
1812                        result = SCAN_DEL_PAGE_LRU;
1813                        goto out_unlock;
1814                }
1815
1816                if (page_has_private(page) &&
1817                    !try_to_release_page(page, GFP_KERNEL)) {
1818                        result = SCAN_PAGE_HAS_PRIVATE;
1819                        putback_lru_page(page);
1820                        goto out_unlock;
1821                }
1822
1823                if (page_mapped(page))
1824                        unmap_mapping_pages(mapping, index, 1, false);
1825
1826                xas_lock_irq(&xas);
1827                xas_set(&xas, index);
1828
1829                VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1830                VM_BUG_ON_PAGE(page_mapped(page), page);
1831
1832                /*
1833                 * The page is expected to have page_count() == 3:
1834                 *  - we hold a pin on it;
1835                 *  - one reference from page cache;
1836                 *  - one from isolate_lru_page;
1837                 */
1838                if (!page_ref_freeze(page, 3)) {
1839                        result = SCAN_PAGE_COUNT;
1840                        xas_unlock_irq(&xas);
1841                        putback_lru_page(page);
1842                        goto out_unlock;
1843                }
1844
1845                /*
1846                 * Add the page to the list to be able to undo the collapse if
1847                 * something go wrong.
1848                 */
1849                list_add_tail(&page->lru, &pagelist);
1850
1851                /* Finally, replace with the new page. */
1852                xas_store(&xas, new_page);
1853                continue;
1854out_unlock:
1855                unlock_page(page);
1856                put_page(page);
1857                goto xa_unlocked;
1858        }
1859        nr = thp_nr_pages(new_page);
1860
1861        if (is_shmem)
1862                __mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr);
1863        else {
1864                __mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
1865                filemap_nr_thps_inc(mapping);
1866                /*
1867                 * Paired with smp_mb() in do_dentry_open() to ensure
1868                 * i_writecount is up to date and the update to nr_thps is
1869                 * visible. Ensures the page cache will be truncated if the
1870                 * file is opened writable.
1871                 */
1872                smp_mb();
1873                if (inode_is_open_for_write(mapping->host)) {
1874                        result = SCAN_FAIL;
1875                        __mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr);
1876                        filemap_nr_thps_dec(mapping);
1877                        goto xa_locked;
1878                }
1879        }
1880
1881        if (nr_none) {
1882                __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
1883                if (is_shmem)
1884                        __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
1885        }
1886
1887xa_locked:
1888        xas_unlock_irq(&xas);
1889xa_unlocked:
1890
1891        if (result == SCAN_SUCCEED) {
1892                struct page *page, *tmp;
1893
1894                /*
1895                 * Replacing old pages with new one has succeeded, now we
1896                 * need to copy the content and free the old pages.
1897                 */
1898                index = start;
1899                list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1900                        while (index < page->index) {
1901                                clear_highpage(new_page + (index % HPAGE_PMD_NR));
1902                                index++;
1903                        }
1904                        copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1905                                        page);
1906                        list_del(&page->lru);
1907                        page->mapping = NULL;
1908                        page_ref_unfreeze(page, 1);
1909                        ClearPageActive(page);
1910                        ClearPageUnevictable(page);
1911                        unlock_page(page);
1912                        put_page(page);
1913                        index++;
1914                }
1915                while (index < end) {
1916                        clear_highpage(new_page + (index % HPAGE_PMD_NR));
1917                        index++;
1918                }
1919
1920                SetPageUptodate(new_page);
1921                page_ref_add(new_page, HPAGE_PMD_NR - 1);
1922                if (is_shmem)
1923                        set_page_dirty(new_page);
1924                lru_cache_add(new_page);
1925
1926                /*
1927                 * Remove pte page tables, so we can re-fault the page as huge.
1928                 */
1929                retract_page_tables(mapping, start);
1930                *hpage = NULL;
1931
1932                khugepaged_pages_collapsed++;
1933        } else {
1934                struct page *page;
1935
1936                /* Something went wrong: roll back page cache changes */
1937                xas_lock_irq(&xas);
1938                mapping->nrpages -= nr_none;
1939
1940                if (is_shmem)
1941                        shmem_uncharge(mapping->host, nr_none);
1942
1943                xas_set(&xas, start);
1944                xas_for_each(&xas, page, end - 1) {
1945                        page = list_first_entry_or_null(&pagelist,
1946                                        struct page, lru);
1947                        if (!page || xas.xa_index < page->index) {
1948                                if (!nr_none)
1949                                        break;
1950                                nr_none--;
1951                                /* Put holes back where they were */
1952                                xas_store(&xas, NULL);
1953                                continue;
1954                        }
1955
1956                        VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
1957
1958                        /* Unfreeze the page. */
1959                        list_del(&page->lru);
1960                        page_ref_unfreeze(page, 2);
1961                        xas_store(&xas, page);
1962                        xas_pause(&xas);
1963                        xas_unlock_irq(&xas);
1964                        unlock_page(page);
1965                        putback_lru_page(page);
1966                        xas_lock_irq(&xas);
1967                }
1968                VM_BUG_ON(nr_none);
1969                xas_unlock_irq(&xas);
1970
1971                new_page->mapping = NULL;
1972        }
1973
1974        unlock_page(new_page);
1975out:
1976        VM_BUG_ON(!list_empty(&pagelist));
1977        if (!IS_ERR_OR_NULL(*hpage))
1978                mem_cgroup_uncharge(*hpage);
1979        /* TODO: tracepoints */
1980}
1981
1982static void khugepaged_scan_file(struct mm_struct *mm,
1983                struct file *file, pgoff_t start, struct page **hpage)
1984{
1985        struct page *page = NULL;
1986        struct address_space *mapping = file->f_mapping;
1987        XA_STATE(xas, &mapping->i_pages, start);
1988        int present, swap;
1989        int node = NUMA_NO_NODE;
1990        int result = SCAN_SUCCEED;
1991
1992        present = 0;
1993        swap = 0;
1994        memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1995        rcu_read_lock();
1996        xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1997                if (xas_retry(&xas, page))
1998                        continue;
1999
2000                if (xa_is_value(page)) {
2001                        if (++swap > khugepaged_max_ptes_swap) {
2002                                result = SCAN_EXCEED_SWAP_PTE;
2003                                break;
2004                        }
2005                        continue;
2006                }
2007
2008                if (PageTransCompound(page)) {
2009                        result = SCAN_PAGE_COMPOUND;
2010                        break;
2011                }
2012
2013                node = page_to_nid(page);
2014                if (khugepaged_scan_abort(node)) {
2015                        result = SCAN_SCAN_ABORT;
2016                        break;
2017                }
2018                khugepaged_node_load[node]++;
2019
2020                if (!PageLRU(page)) {
2021                        result = SCAN_PAGE_LRU;
2022                        break;
2023                }
2024
2025                if (page_count(page) !=
2026                    1 + page_mapcount(page) + page_has_private(page)) {
2027                        result = SCAN_PAGE_COUNT;
2028                        break;
2029                }
2030
2031                /*
2032                 * We probably should check if the page is referenced here, but
2033                 * nobody would transfer pte_young() to PageReferenced() for us.
2034                 * And rmap walk here is just too costly...
2035                 */
2036
2037                present++;
2038
2039                if (need_resched()) {
2040                        xas_pause(&xas);
2041                        cond_resched_rcu();
2042                }
2043        }
2044        rcu_read_unlock();
2045
2046        if (result == SCAN_SUCCEED) {
2047                if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2048                        result = SCAN_EXCEED_NONE_PTE;
2049                } else {
2050                        node = khugepaged_find_target_node();
2051                        collapse_file(mm, file, start, hpage, node);
2052                }
2053        }
2054
2055        /* TODO: tracepoints */
2056}
2057#else
2058static void khugepaged_scan_file(struct mm_struct *mm,
2059                struct file *file, pgoff_t start, struct page **hpage)
2060{
2061        BUILD_BUG();
2062}
2063
2064static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2065{
2066}
2067#endif
2068
2069static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2070                                            struct page **hpage)
2071        __releases(&khugepaged_mm_lock)
2072        __acquires(&khugepaged_mm_lock)
2073{
2074        struct mm_slot *mm_slot;
2075        struct mm_struct *mm;
2076        struct vm_area_struct *vma;
2077        int progress = 0;
2078
2079        VM_BUG_ON(!pages);
2080        lockdep_assert_held(&khugepaged_mm_lock);
2081
2082        if (khugepaged_scan.mm_slot)
2083                mm_slot = khugepaged_scan.mm_slot;
2084        else {
2085                mm_slot = list_entry(khugepaged_scan.mm_head.next,
2086                                     struct mm_slot, mm_node);
2087                khugepaged_scan.address = 0;
2088                khugepaged_scan.mm_slot = mm_slot;
2089        }
2090        spin_unlock(&khugepaged_mm_lock);
2091        khugepaged_collapse_pte_mapped_thps(mm_slot);
2092
2093        mm = mm_slot->mm;
2094        /*
2095         * Don't wait for semaphore (to avoid long wait times).  Just move to
2096         * the next mm on the list.
2097         */
2098        vma = NULL;
2099        if (unlikely(!mmap_read_trylock(mm)))
2100                goto breakouterloop_mmap_lock;
2101        if (likely(!khugepaged_test_exit(mm)))
2102                vma = find_vma(mm, khugepaged_scan.address);
2103
2104        progress++;
2105        for (; vma; vma = vma->vm_next) {
2106                unsigned long hstart, hend;
2107
2108                cond_resched();
2109                if (unlikely(khugepaged_test_exit(mm))) {
2110                        progress++;
2111                        break;
2112                }
2113                if (!hugepage_vma_check(vma, vma->vm_flags)) {
2114skip:
2115                        progress++;
2116                        continue;
2117                }
2118                hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2119                hend = vma->vm_end & HPAGE_PMD_MASK;
2120                if (hstart >= hend)
2121                        goto skip;
2122                if (khugepaged_scan.address > hend)
2123                        goto skip;
2124                if (khugepaged_scan.address < hstart)
2125                        khugepaged_scan.address = hstart;
2126                VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2127                if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2128                        goto skip;
2129
2130                while (khugepaged_scan.address < hend) {
2131                        int ret;
2132                        cond_resched();
2133                        if (unlikely(khugepaged_test_exit(mm)))
2134                                goto breakouterloop;
2135
2136                        VM_BUG_ON(khugepaged_scan.address < hstart ||
2137                                  khugepaged_scan.address + HPAGE_PMD_SIZE >
2138                                  hend);
2139                        if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2140                                struct file *file = get_file(vma->vm_file);
2141                                pgoff_t pgoff = linear_page_index(vma,
2142                                                khugepaged_scan.address);
2143
2144                                mmap_read_unlock(mm);
2145                                ret = 1;
2146                                khugepaged_scan_file(mm, file, pgoff, hpage);
2147                                fput(file);
2148                        } else {
2149                                ret = khugepaged_scan_pmd(mm, vma,
2150                                                khugepaged_scan.address,
2151                                                hpage);
2152                        }
2153                        /* move to next address */
2154                        khugepaged_scan.address += HPAGE_PMD_SIZE;
2155                        progress += HPAGE_PMD_NR;
2156                        if (ret)
2157                                /* we released mmap_lock so break loop */
2158                                goto breakouterloop_mmap_lock;
2159                        if (progress >= pages)
2160                                goto breakouterloop;
2161                }
2162        }
2163breakouterloop:
2164        mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2165breakouterloop_mmap_lock:
2166
2167        spin_lock(&khugepaged_mm_lock);
2168        VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2169        /*
2170         * Release the current mm_slot if this mm is about to die, or
2171         * if we scanned all vmas of this mm.
2172         */
2173        if (khugepaged_test_exit(mm) || !vma) {
2174                /*
2175                 * Make sure that if mm_users is reaching zero while
2176                 * khugepaged runs here, khugepaged_exit will find
2177                 * mm_slot not pointing to the exiting mm.
2178                 */
2179                if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2180                        khugepaged_scan.mm_slot = list_entry(
2181                                mm_slot->mm_node.next,
2182                                struct mm_slot, mm_node);
2183                        khugepaged_scan.address = 0;
2184                } else {
2185                        khugepaged_scan.mm_slot = NULL;
2186                        khugepaged_full_scans++;
2187                }
2188
2189                collect_mm_slot(mm_slot);
2190        }
2191
2192        return progress;
2193}
2194
2195static int khugepaged_has_work(void)
2196{
2197        return !list_empty(&khugepaged_scan.mm_head) &&
2198                khugepaged_enabled();
2199}
2200
2201static int khugepaged_wait_event(void)
2202{
2203        return !list_empty(&khugepaged_scan.mm_head) ||
2204                kthread_should_stop();
2205}
2206
2207static void khugepaged_do_scan(void)
2208{
2209        struct page *hpage = NULL;
2210        unsigned int progress = 0, pass_through_head = 0;
2211        unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2212        bool wait = true;
2213
2214        lru_add_drain_all();
2215
2216        while (progress < pages) {
2217                if (!khugepaged_prealloc_page(&hpage, &wait))
2218                        break;
2219
2220                cond_resched();
2221
2222                if (unlikely(kthread_should_stop() || try_to_freeze()))
2223                        break;
2224
2225                spin_lock(&khugepaged_mm_lock);
2226                if (!khugepaged_scan.mm_slot)
2227                        pass_through_head++;
2228                if (khugepaged_has_work() &&
2229                    pass_through_head < 2)
2230                        progress += khugepaged_scan_mm_slot(pages - progress,
2231                                                            &hpage);
2232                else
2233                        progress = pages;
2234                spin_unlock(&khugepaged_mm_lock);
2235        }
2236
2237        if (!IS_ERR_OR_NULL(hpage))
2238                put_page(hpage);
2239}
2240
2241static bool khugepaged_should_wakeup(void)
2242{
2243        return kthread_should_stop() ||
2244               time_after_eq(jiffies, khugepaged_sleep_expire);
2245}
2246
2247static void khugepaged_wait_work(void)
2248{
2249        if (khugepaged_has_work()) {
2250                const unsigned long scan_sleep_jiffies =
2251                        msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2252
2253                if (!scan_sleep_jiffies)
2254                        return;
2255
2256                khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2257                wait_event_freezable_timeout(khugepaged_wait,
2258                                             khugepaged_should_wakeup(),
2259                                             scan_sleep_jiffies);
2260                return;
2261        }
2262
2263        if (khugepaged_enabled())
2264                wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2265}
2266
2267static int khugepaged(void *none)
2268{
2269        struct mm_slot *mm_slot;
2270
2271        set_freezable();
2272        set_user_nice(current, MAX_NICE);
2273
2274        while (!kthread_should_stop()) {
2275                khugepaged_do_scan();
2276                khugepaged_wait_work();
2277        }
2278
2279        spin_lock(&khugepaged_mm_lock);
2280        mm_slot = khugepaged_scan.mm_slot;
2281        khugepaged_scan.mm_slot = NULL;
2282        if (mm_slot)
2283                collect_mm_slot(mm_slot);
2284        spin_unlock(&khugepaged_mm_lock);
2285        return 0;
2286}
2287
2288static void set_recommended_min_free_kbytes(void)
2289{
2290        struct zone *zone;
2291        int nr_zones = 0;
2292        unsigned long recommended_min;
2293
2294        for_each_populated_zone(zone) {
2295                /*
2296                 * We don't need to worry about fragmentation of
2297                 * ZONE_MOVABLE since it only has movable pages.
2298                 */
2299                if (zone_idx(zone) > gfp_zone(GFP_USER))
2300                        continue;
2301
2302                nr_zones++;
2303        }
2304
2305        /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2306        recommended_min = pageblock_nr_pages * nr_zones * 2;
2307
2308        /*
2309         * Make sure that on average at least two pageblocks are almost free
2310         * of another type, one for a migratetype to fall back to and a
2311         * second to avoid subsequent fallbacks of other types There are 3
2312         * MIGRATE_TYPES we care about.
2313         */
2314        recommended_min += pageblock_nr_pages * nr_zones *
2315                           MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2316
2317        /* don't ever allow to reserve more than 5% of the lowmem */
2318        recommended_min = min(recommended_min,
2319                              (unsigned long) nr_free_buffer_pages() / 20);
2320        recommended_min <<= (PAGE_SHIFT-10);
2321
2322        if (recommended_min > min_free_kbytes) {
2323                if (user_min_free_kbytes >= 0)
2324                        pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2325                                min_free_kbytes, recommended_min);
2326
2327                min_free_kbytes = recommended_min;
2328        }
2329        setup_per_zone_wmarks();
2330}
2331
2332int start_stop_khugepaged(void)
2333{
2334        int err = 0;
2335
2336        mutex_lock(&khugepaged_mutex);
2337        if (khugepaged_enabled()) {
2338                if (!khugepaged_thread)
2339                        khugepaged_thread = kthread_run(khugepaged, NULL,
2340                                                        "khugepaged");
2341                if (IS_ERR(khugepaged_thread)) {
2342                        pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2343                        err = PTR_ERR(khugepaged_thread);
2344                        khugepaged_thread = NULL;
2345                        goto fail;
2346                }
2347
2348                if (!list_empty(&khugepaged_scan.mm_head))
2349                        wake_up_interruptible(&khugepaged_wait);
2350
2351                set_recommended_min_free_kbytes();
2352        } else if (khugepaged_thread) {
2353                kthread_stop(khugepaged_thread);
2354                khugepaged_thread = NULL;
2355        }
2356fail:
2357        mutex_unlock(&khugepaged_mutex);
2358        return err;
2359}
2360
2361void khugepaged_min_free_kbytes_update(void)
2362{
2363        mutex_lock(&khugepaged_mutex);
2364        if (khugepaged_enabled() && khugepaged_thread)
2365                set_recommended_min_free_kbytes();
2366        mutex_unlock(&khugepaged_mutex);
2367}
2368