linux/mm/khugepaged.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   3
   4#include <linux/mm.h>
   5#include <linux/sched.h>
   6#include <linux/sched/mm.h>
   7#include <linux/sched/coredump.h>
   8#include <linux/mmu_notifier.h>
   9#include <linux/rmap.h>
  10#include <linux/swap.h>
  11#include <linux/mm_inline.h>
  12#include <linux/kthread.h>
  13#include <linux/khugepaged.h>
  14#include <linux/freezer.h>
  15#include <linux/mman.h>
  16#include <linux/hashtable.h>
  17#include <linux/userfaultfd_k.h>
  18#include <linux/page_idle.h>
  19#include <linux/swapops.h>
  20#include <linux/shmem_fs.h>
  21
  22#include <asm/tlb.h>
  23#include <asm/pgalloc.h>
  24#include "internal.h"
  25
  26enum scan_result {
  27        SCAN_FAIL,
  28        SCAN_SUCCEED,
  29        SCAN_PMD_NULL,
  30        SCAN_EXCEED_NONE_PTE,
  31        SCAN_EXCEED_SWAP_PTE,
  32        SCAN_EXCEED_SHARED_PTE,
  33        SCAN_PTE_NON_PRESENT,
  34        SCAN_PTE_UFFD_WP,
  35        SCAN_PAGE_RO,
  36        SCAN_LACK_REFERENCED_PAGE,
  37        SCAN_PAGE_NULL,
  38        SCAN_SCAN_ABORT,
  39        SCAN_PAGE_COUNT,
  40        SCAN_PAGE_LRU,
  41        SCAN_PAGE_LOCK,
  42        SCAN_PAGE_ANON,
  43        SCAN_PAGE_COMPOUND,
  44        SCAN_ANY_PROCESS,
  45        SCAN_VMA_NULL,
  46        SCAN_VMA_CHECK,
  47        SCAN_ADDRESS_RANGE,
  48        SCAN_SWAP_CACHE_PAGE,
  49        SCAN_DEL_PAGE_LRU,
  50        SCAN_ALLOC_HUGE_PAGE_FAIL,
  51        SCAN_CGROUP_CHARGE_FAIL,
  52        SCAN_TRUNCATED,
  53        SCAN_PAGE_HAS_PRIVATE,
  54};
  55
  56#define CREATE_TRACE_POINTS
  57#include <trace/events/huge_memory.h>
  58
  59static struct task_struct *khugepaged_thread __read_mostly;
  60static DEFINE_MUTEX(khugepaged_mutex);
  61
  62/* default scan 8*512 pte (or vmas) every 30 second */
  63static unsigned int khugepaged_pages_to_scan __read_mostly;
  64static unsigned int khugepaged_pages_collapsed;
  65static unsigned int khugepaged_full_scans;
  66static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
  67/* during fragmentation poll the hugepage allocator once every minute */
  68static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
  69static unsigned long khugepaged_sleep_expire;
  70static DEFINE_SPINLOCK(khugepaged_mm_lock);
  71static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  72/*
  73 * default collapse hugepages if there is at least one pte mapped like
  74 * it would have happened if the vma was large enough during page
  75 * fault.
  76 */
  77static unsigned int khugepaged_max_ptes_none __read_mostly;
  78static unsigned int khugepaged_max_ptes_swap __read_mostly;
  79static unsigned int khugepaged_max_ptes_shared __read_mostly;
  80
  81#define MM_SLOTS_HASH_BITS 10
  82static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
  83
  84static struct kmem_cache *mm_slot_cache __read_mostly;
  85
  86#define MAX_PTE_MAPPED_THP 8
  87
  88/**
  89 * struct mm_slot - hash lookup from mm to mm_slot
  90 * @hash: hash collision list
  91 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
  92 * @mm: the mm that this information is valid for
  93 * @nr_pte_mapped_thp: number of pte mapped THP
  94 * @pte_mapped_thp: address array corresponding pte mapped THP
  95 */
  96struct mm_slot {
  97        struct hlist_node hash;
  98        struct list_head mm_node;
  99        struct mm_struct *mm;
 100
 101        /* pte-mapped THP in this mm */
 102        int nr_pte_mapped_thp;
 103        unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
 104};
 105
 106/**
 107 * struct khugepaged_scan - cursor for scanning
 108 * @mm_head: the head of the mm list to scan
 109 * @mm_slot: the current mm_slot we are scanning
 110 * @address: the next address inside that to be scanned
 111 *
 112 * There is only the one khugepaged_scan instance of this cursor structure.
 113 */
 114struct khugepaged_scan {
 115        struct list_head mm_head;
 116        struct mm_slot *mm_slot;
 117        unsigned long address;
 118};
 119
 120static struct khugepaged_scan khugepaged_scan = {
 121        .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
 122};
 123
 124#ifdef CONFIG_SYSFS
 125static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
 126                                         struct kobj_attribute *attr,
 127                                         char *buf)
 128{
 129        return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
 130}
 131
 132static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
 133                                          struct kobj_attribute *attr,
 134                                          const char *buf, size_t count)
 135{
 136        unsigned int msecs;
 137        int err;
 138
 139        err = kstrtouint(buf, 10, &msecs);
 140        if (err)
 141                return -EINVAL;
 142
 143        khugepaged_scan_sleep_millisecs = msecs;
 144        khugepaged_sleep_expire = 0;
 145        wake_up_interruptible(&khugepaged_wait);
 146
 147        return count;
 148}
 149static struct kobj_attribute scan_sleep_millisecs_attr =
 150        __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
 151               scan_sleep_millisecs_store);
 152
 153static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
 154                                          struct kobj_attribute *attr,
 155                                          char *buf)
 156{
 157        return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
 158}
 159
 160static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
 161                                           struct kobj_attribute *attr,
 162                                           const char *buf, size_t count)
 163{
 164        unsigned int msecs;
 165        int err;
 166
 167        err = kstrtouint(buf, 10, &msecs);
 168        if (err)
 169                return -EINVAL;
 170
 171        khugepaged_alloc_sleep_millisecs = msecs;
 172        khugepaged_sleep_expire = 0;
 173        wake_up_interruptible(&khugepaged_wait);
 174
 175        return count;
 176}
 177static struct kobj_attribute alloc_sleep_millisecs_attr =
 178        __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
 179               alloc_sleep_millisecs_store);
 180
 181static ssize_t pages_to_scan_show(struct kobject *kobj,
 182                                  struct kobj_attribute *attr,
 183                                  char *buf)
 184{
 185        return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
 186}
 187static ssize_t pages_to_scan_store(struct kobject *kobj,
 188                                   struct kobj_attribute *attr,
 189                                   const char *buf, size_t count)
 190{
 191        unsigned int pages;
 192        int err;
 193
 194        err = kstrtouint(buf, 10, &pages);
 195        if (err || !pages)
 196                return -EINVAL;
 197
 198        khugepaged_pages_to_scan = pages;
 199
 200        return count;
 201}
 202static struct kobj_attribute pages_to_scan_attr =
 203        __ATTR(pages_to_scan, 0644, pages_to_scan_show,
 204               pages_to_scan_store);
 205
 206static ssize_t pages_collapsed_show(struct kobject *kobj,
 207                                    struct kobj_attribute *attr,
 208                                    char *buf)
 209{
 210        return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
 211}
 212static struct kobj_attribute pages_collapsed_attr =
 213        __ATTR_RO(pages_collapsed);
 214
 215static ssize_t full_scans_show(struct kobject *kobj,
 216                               struct kobj_attribute *attr,
 217                               char *buf)
 218{
 219        return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
 220}
 221static struct kobj_attribute full_scans_attr =
 222        __ATTR_RO(full_scans);
 223
 224static ssize_t khugepaged_defrag_show(struct kobject *kobj,
 225                                      struct kobj_attribute *attr, char *buf)
 226{
 227        return single_hugepage_flag_show(kobj, attr, buf,
 228                                         TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 229}
 230static ssize_t khugepaged_defrag_store(struct kobject *kobj,
 231                                       struct kobj_attribute *attr,
 232                                       const char *buf, size_t count)
 233{
 234        return single_hugepage_flag_store(kobj, attr, buf, count,
 235                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 236}
 237static struct kobj_attribute khugepaged_defrag_attr =
 238        __ATTR(defrag, 0644, khugepaged_defrag_show,
 239               khugepaged_defrag_store);
 240
 241/*
 242 * max_ptes_none controls if khugepaged should collapse hugepages over
 243 * any unmapped ptes in turn potentially increasing the memory
 244 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
 245 * reduce the available free memory in the system as it
 246 * runs. Increasing max_ptes_none will instead potentially reduce the
 247 * free memory in the system during the khugepaged scan.
 248 */
 249static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
 250                                             struct kobj_attribute *attr,
 251                                             char *buf)
 252{
 253        return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
 254}
 255static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
 256                                              struct kobj_attribute *attr,
 257                                              const char *buf, size_t count)
 258{
 259        int err;
 260        unsigned long max_ptes_none;
 261
 262        err = kstrtoul(buf, 10, &max_ptes_none);
 263        if (err || max_ptes_none > HPAGE_PMD_NR-1)
 264                return -EINVAL;
 265
 266        khugepaged_max_ptes_none = max_ptes_none;
 267
 268        return count;
 269}
 270static struct kobj_attribute khugepaged_max_ptes_none_attr =
 271        __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
 272               khugepaged_max_ptes_none_store);
 273
 274static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
 275                                             struct kobj_attribute *attr,
 276                                             char *buf)
 277{
 278        return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
 279}
 280
 281static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
 282                                              struct kobj_attribute *attr,
 283                                              const char *buf, size_t count)
 284{
 285        int err;
 286        unsigned long max_ptes_swap;
 287
 288        err  = kstrtoul(buf, 10, &max_ptes_swap);
 289        if (err || max_ptes_swap > HPAGE_PMD_NR-1)
 290                return -EINVAL;
 291
 292        khugepaged_max_ptes_swap = max_ptes_swap;
 293
 294        return count;
 295}
 296
 297static struct kobj_attribute khugepaged_max_ptes_swap_attr =
 298        __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
 299               khugepaged_max_ptes_swap_store);
 300
 301static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
 302                                               struct kobj_attribute *attr,
 303                                               char *buf)
 304{
 305        return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
 306}
 307
 308static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
 309                                              struct kobj_attribute *attr,
 310                                              const char *buf, size_t count)
 311{
 312        int err;
 313        unsigned long max_ptes_shared;
 314
 315        err  = kstrtoul(buf, 10, &max_ptes_shared);
 316        if (err || max_ptes_shared > HPAGE_PMD_NR-1)
 317                return -EINVAL;
 318
 319        khugepaged_max_ptes_shared = max_ptes_shared;
 320
 321        return count;
 322}
 323
 324static struct kobj_attribute khugepaged_max_ptes_shared_attr =
 325        __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
 326               khugepaged_max_ptes_shared_store);
 327
 328static struct attribute *khugepaged_attr[] = {
 329        &khugepaged_defrag_attr.attr,
 330        &khugepaged_max_ptes_none_attr.attr,
 331        &khugepaged_max_ptes_swap_attr.attr,
 332        &khugepaged_max_ptes_shared_attr.attr,
 333        &pages_to_scan_attr.attr,
 334        &pages_collapsed_attr.attr,
 335        &full_scans_attr.attr,
 336        &scan_sleep_millisecs_attr.attr,
 337        &alloc_sleep_millisecs_attr.attr,
 338        NULL,
 339};
 340
 341struct attribute_group khugepaged_attr_group = {
 342        .attrs = khugepaged_attr,
 343        .name = "khugepaged",
 344};
 345#endif /* CONFIG_SYSFS */
 346
 347int hugepage_madvise(struct vm_area_struct *vma,
 348                     unsigned long *vm_flags, int advice)
 349{
 350        switch (advice) {
 351        case MADV_HUGEPAGE:
 352#ifdef CONFIG_S390
 353                /*
 354                 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
 355                 * can't handle this properly after s390_enable_sie, so we simply
 356                 * ignore the madvise to prevent qemu from causing a SIGSEGV.
 357                 */
 358                if (mm_has_pgste(vma->vm_mm))
 359                        return 0;
 360#endif
 361                *vm_flags &= ~VM_NOHUGEPAGE;
 362                *vm_flags |= VM_HUGEPAGE;
 363                /*
 364                 * If the vma become good for khugepaged to scan,
 365                 * register it here without waiting a page fault that
 366                 * may not happen any time soon.
 367                 */
 368                if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
 369                                khugepaged_enter_vma_merge(vma, *vm_flags))
 370                        return -ENOMEM;
 371                break;
 372        case MADV_NOHUGEPAGE:
 373                *vm_flags &= ~VM_HUGEPAGE;
 374                *vm_flags |= VM_NOHUGEPAGE;
 375                /*
 376                 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
 377                 * this vma even if we leave the mm registered in khugepaged if
 378                 * it got registered before VM_NOHUGEPAGE was set.
 379                 */
 380                break;
 381        }
 382
 383        return 0;
 384}
 385
 386int __init khugepaged_init(void)
 387{
 388        mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
 389                                          sizeof(struct mm_slot),
 390                                          __alignof__(struct mm_slot), 0, NULL);
 391        if (!mm_slot_cache)
 392                return -ENOMEM;
 393
 394        khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
 395        khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
 396        khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
 397        khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
 398
 399        return 0;
 400}
 401
 402void __init khugepaged_destroy(void)
 403{
 404        kmem_cache_destroy(mm_slot_cache);
 405}
 406
 407static inline struct mm_slot *alloc_mm_slot(void)
 408{
 409        if (!mm_slot_cache)     /* initialization failed */
 410                return NULL;
 411        return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
 412}
 413
 414static inline void free_mm_slot(struct mm_slot *mm_slot)
 415{
 416        kmem_cache_free(mm_slot_cache, mm_slot);
 417}
 418
 419static struct mm_slot *get_mm_slot(struct mm_struct *mm)
 420{
 421        struct mm_slot *mm_slot;
 422
 423        hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
 424                if (mm == mm_slot->mm)
 425                        return mm_slot;
 426
 427        return NULL;
 428}
 429
 430static void insert_to_mm_slots_hash(struct mm_struct *mm,
 431                                    struct mm_slot *mm_slot)
 432{
 433        mm_slot->mm = mm;
 434        hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
 435}
 436
 437static inline int khugepaged_test_exit(struct mm_struct *mm)
 438{
 439        return atomic_read(&mm->mm_users) == 0;
 440}
 441
 442static bool hugepage_vma_check(struct vm_area_struct *vma,
 443                               unsigned long vm_flags)
 444{
 445        if (!transhuge_vma_enabled(vma, vm_flags))
 446                return false;
 447
 448        if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
 449                                vma->vm_pgoff, HPAGE_PMD_NR))
 450                return false;
 451
 452        /* Enabled via shmem mount options or sysfs settings. */
 453        if (shmem_file(vma->vm_file))
 454                return shmem_huge_enabled(vma);
 455
 456        /* THP settings require madvise. */
 457        if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
 458                return false;
 459
 460        /* Only regular file is valid */
 461        if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
 462            (vm_flags & VM_EXEC)) {
 463                struct inode *inode = vma->vm_file->f_inode;
 464
 465                return !inode_is_open_for_write(inode) &&
 466                        S_ISREG(inode->i_mode);
 467        }
 468
 469        if (!vma->anon_vma || vma->vm_ops)
 470                return false;
 471        if (vma_is_temporary_stack(vma))
 472                return false;
 473        return !(vm_flags & VM_NO_KHUGEPAGED);
 474}
 475
 476int __khugepaged_enter(struct mm_struct *mm)
 477{
 478        struct mm_slot *mm_slot;
 479        int wakeup;
 480
 481        mm_slot = alloc_mm_slot();
 482        if (!mm_slot)
 483                return -ENOMEM;
 484
 485        /* __khugepaged_exit() must not run from under us */
 486        VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
 487        if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
 488                free_mm_slot(mm_slot);
 489                return 0;
 490        }
 491
 492        spin_lock(&khugepaged_mm_lock);
 493        insert_to_mm_slots_hash(mm, mm_slot);
 494        /*
 495         * Insert just behind the scanning cursor, to let the area settle
 496         * down a little.
 497         */
 498        wakeup = list_empty(&khugepaged_scan.mm_head);
 499        list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
 500        spin_unlock(&khugepaged_mm_lock);
 501
 502        mmgrab(mm);
 503        if (wakeup)
 504                wake_up_interruptible(&khugepaged_wait);
 505
 506        return 0;
 507}
 508
 509int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
 510                               unsigned long vm_flags)
 511{
 512        unsigned long hstart, hend;
 513
 514        /*
 515         * khugepaged only supports read-only files for non-shmem files.
 516         * khugepaged does not yet work on special mappings. And
 517         * file-private shmem THP is not supported.
 518         */
 519        if (!hugepage_vma_check(vma, vm_flags))
 520                return 0;
 521
 522        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
 523        hend = vma->vm_end & HPAGE_PMD_MASK;
 524        if (hstart < hend)
 525                return khugepaged_enter(vma, vm_flags);
 526        return 0;
 527}
 528
 529void __khugepaged_exit(struct mm_struct *mm)
 530{
 531        struct mm_slot *mm_slot;
 532        int free = 0;
 533
 534        spin_lock(&khugepaged_mm_lock);
 535        mm_slot = get_mm_slot(mm);
 536        if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
 537                hash_del(&mm_slot->hash);
 538                list_del(&mm_slot->mm_node);
 539                free = 1;
 540        }
 541        spin_unlock(&khugepaged_mm_lock);
 542
 543        if (free) {
 544                clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
 545                free_mm_slot(mm_slot);
 546                mmdrop(mm);
 547        } else if (mm_slot) {
 548                /*
 549                 * This is required to serialize against
 550                 * khugepaged_test_exit() (which is guaranteed to run
 551                 * under mmap sem read mode). Stop here (after we
 552                 * return all pagetables will be destroyed) until
 553                 * khugepaged has finished working on the pagetables
 554                 * under the mmap_lock.
 555                 */
 556                mmap_write_lock(mm);
 557                mmap_write_unlock(mm);
 558        }
 559}
 560
 561static void release_pte_page(struct page *page)
 562{
 563        mod_node_page_state(page_pgdat(page),
 564                        NR_ISOLATED_ANON + page_is_file_lru(page),
 565                        -compound_nr(page));
 566        unlock_page(page);
 567        putback_lru_page(page);
 568}
 569
 570static void release_pte_pages(pte_t *pte, pte_t *_pte,
 571                struct list_head *compound_pagelist)
 572{
 573        struct page *page, *tmp;
 574
 575        while (--_pte >= pte) {
 576                pte_t pteval = *_pte;
 577
 578                page = pte_page(pteval);
 579                if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
 580                                !PageCompound(page))
 581                        release_pte_page(page);
 582        }
 583
 584        list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
 585                list_del(&page->lru);
 586                release_pte_page(page);
 587        }
 588}
 589
 590static bool is_refcount_suitable(struct page *page)
 591{
 592        int expected_refcount;
 593
 594        expected_refcount = total_mapcount(page);
 595        if (PageSwapCache(page))
 596                expected_refcount += compound_nr(page);
 597
 598        return page_count(page) == expected_refcount;
 599}
 600
 601static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
 602                                        unsigned long address,
 603                                        pte_t *pte,
 604                                        struct list_head *compound_pagelist)
 605{
 606        struct page *page = NULL;
 607        pte_t *_pte;
 608        int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
 609        bool writable = false;
 610
 611        for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
 612             _pte++, address += PAGE_SIZE) {
 613                pte_t pteval = *_pte;
 614                if (pte_none(pteval) || (pte_present(pteval) &&
 615                                is_zero_pfn(pte_pfn(pteval)))) {
 616                        if (!userfaultfd_armed(vma) &&
 617                            ++none_or_zero <= khugepaged_max_ptes_none) {
 618                                continue;
 619                        } else {
 620                                result = SCAN_EXCEED_NONE_PTE;
 621                                goto out;
 622                        }
 623                }
 624                if (!pte_present(pteval)) {
 625                        result = SCAN_PTE_NON_PRESENT;
 626                        goto out;
 627                }
 628                page = vm_normal_page(vma, address, pteval);
 629                if (unlikely(!page)) {
 630                        result = SCAN_PAGE_NULL;
 631                        goto out;
 632                }
 633
 634                VM_BUG_ON_PAGE(!PageAnon(page), page);
 635
 636                if (page_mapcount(page) > 1 &&
 637                                ++shared > khugepaged_max_ptes_shared) {
 638                        result = SCAN_EXCEED_SHARED_PTE;
 639                        goto out;
 640                }
 641
 642                if (PageCompound(page)) {
 643                        struct page *p;
 644                        page = compound_head(page);
 645
 646                        /*
 647                         * Check if we have dealt with the compound page
 648                         * already
 649                         */
 650                        list_for_each_entry(p, compound_pagelist, lru) {
 651                                if (page == p)
 652                                        goto next;
 653                        }
 654                }
 655
 656                /*
 657                 * We can do it before isolate_lru_page because the
 658                 * page can't be freed from under us. NOTE: PG_lock
 659                 * is needed to serialize against split_huge_page
 660                 * when invoked from the VM.
 661                 */
 662                if (!trylock_page(page)) {
 663                        result = SCAN_PAGE_LOCK;
 664                        goto out;
 665                }
 666
 667                /*
 668                 * Check if the page has any GUP (or other external) pins.
 669                 *
 670                 * The page table that maps the page has been already unlinked
 671                 * from the page table tree and this process cannot get
 672                 * an additional pin on the page.
 673                 *
 674                 * New pins can come later if the page is shared across fork,
 675                 * but not from this process. The other process cannot write to
 676                 * the page, only trigger CoW.
 677                 */
 678                if (!is_refcount_suitable(page)) {
 679                        unlock_page(page);
 680                        result = SCAN_PAGE_COUNT;
 681                        goto out;
 682                }
 683                if (!pte_write(pteval) && PageSwapCache(page) &&
 684                                !reuse_swap_page(page, NULL)) {
 685                        /*
 686                         * Page is in the swap cache and cannot be re-used.
 687                         * It cannot be collapsed into a THP.
 688                         */
 689                        unlock_page(page);
 690                        result = SCAN_SWAP_CACHE_PAGE;
 691                        goto out;
 692                }
 693
 694                /*
 695                 * Isolate the page to avoid collapsing an hugepage
 696                 * currently in use by the VM.
 697                 */
 698                if (isolate_lru_page(page)) {
 699                        unlock_page(page);
 700                        result = SCAN_DEL_PAGE_LRU;
 701                        goto out;
 702                }
 703                mod_node_page_state(page_pgdat(page),
 704                                NR_ISOLATED_ANON + page_is_file_lru(page),
 705                                compound_nr(page));
 706                VM_BUG_ON_PAGE(!PageLocked(page), page);
 707                VM_BUG_ON_PAGE(PageLRU(page), page);
 708
 709                if (PageCompound(page))
 710                        list_add_tail(&page->lru, compound_pagelist);
 711next:
 712                /* There should be enough young pte to collapse the page */
 713                if (pte_young(pteval) ||
 714                    page_is_young(page) || PageReferenced(page) ||
 715                    mmu_notifier_test_young(vma->vm_mm, address))
 716                        referenced++;
 717
 718                if (pte_write(pteval))
 719                        writable = true;
 720        }
 721
 722        if (unlikely(!writable)) {
 723                result = SCAN_PAGE_RO;
 724        } else if (unlikely(!referenced)) {
 725                result = SCAN_LACK_REFERENCED_PAGE;
 726        } else {
 727                result = SCAN_SUCCEED;
 728                trace_mm_collapse_huge_page_isolate(page, none_or_zero,
 729                                                    referenced, writable, result);
 730                return 1;
 731        }
 732out:
 733        release_pte_pages(pte, _pte, compound_pagelist);
 734        trace_mm_collapse_huge_page_isolate(page, none_or_zero,
 735                                            referenced, writable, result);
 736        return 0;
 737}
 738
 739static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
 740                                      struct vm_area_struct *vma,
 741                                      unsigned long address,
 742                                      spinlock_t *ptl,
 743                                      struct list_head *compound_pagelist)
 744{
 745        struct page *src_page, *tmp;
 746        pte_t *_pte;
 747        for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
 748                                _pte++, page++, address += PAGE_SIZE) {
 749                pte_t pteval = *_pte;
 750
 751                if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
 752                        clear_user_highpage(page, address);
 753                        add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
 754                        if (is_zero_pfn(pte_pfn(pteval))) {
 755                                /*
 756                                 * ptl mostly unnecessary.
 757                                 */
 758                                spin_lock(ptl);
 759                                /*
 760                                 * paravirt calls inside pte_clear here are
 761                                 * superfluous.
 762                                 */
 763                                pte_clear(vma->vm_mm, address, _pte);
 764                                spin_unlock(ptl);
 765                        }
 766                } else {
 767                        src_page = pte_page(pteval);
 768                        copy_user_highpage(page, src_page, address, vma);
 769                        if (!PageCompound(src_page))
 770                                release_pte_page(src_page);
 771                        /*
 772                         * ptl mostly unnecessary, but preempt has to
 773                         * be disabled to update the per-cpu stats
 774                         * inside page_remove_rmap().
 775                         */
 776                        spin_lock(ptl);
 777                        /*
 778                         * paravirt calls inside pte_clear here are
 779                         * superfluous.
 780                         */
 781                        pte_clear(vma->vm_mm, address, _pte);
 782                        page_remove_rmap(src_page, false);
 783                        spin_unlock(ptl);
 784                        free_page_and_swap_cache(src_page);
 785                }
 786        }
 787
 788        list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
 789                list_del(&src_page->lru);
 790                release_pte_page(src_page);
 791        }
 792}
 793
 794static void khugepaged_alloc_sleep(void)
 795{
 796        DEFINE_WAIT(wait);
 797
 798        add_wait_queue(&khugepaged_wait, &wait);
 799        freezable_schedule_timeout_interruptible(
 800                msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
 801        remove_wait_queue(&khugepaged_wait, &wait);
 802}
 803
 804static int khugepaged_node_load[MAX_NUMNODES];
 805
 806static bool khugepaged_scan_abort(int nid)
 807{
 808        int i;
 809
 810        /*
 811         * If node_reclaim_mode is disabled, then no extra effort is made to
 812         * allocate memory locally.
 813         */
 814        if (!node_reclaim_enabled())
 815                return false;
 816
 817        /* If there is a count for this node already, it must be acceptable */
 818        if (khugepaged_node_load[nid])
 819                return false;
 820
 821        for (i = 0; i < MAX_NUMNODES; i++) {
 822                if (!khugepaged_node_load[i])
 823                        continue;
 824                if (node_distance(nid, i) > node_reclaim_distance)
 825                        return true;
 826        }
 827        return false;
 828}
 829
 830/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
 831static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
 832{
 833        return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
 834}
 835
 836#ifdef CONFIG_NUMA
 837static int khugepaged_find_target_node(void)
 838{
 839        static int last_khugepaged_target_node = NUMA_NO_NODE;
 840        int nid, target_node = 0, max_value = 0;
 841
 842        /* find first node with max normal pages hit */
 843        for (nid = 0; nid < MAX_NUMNODES; nid++)
 844                if (khugepaged_node_load[nid] > max_value) {
 845                        max_value = khugepaged_node_load[nid];
 846                        target_node = nid;
 847                }
 848
 849        /* do some balance if several nodes have the same hit record */
 850        if (target_node <= last_khugepaged_target_node)
 851                for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
 852                                nid++)
 853                        if (max_value == khugepaged_node_load[nid]) {
 854                                target_node = nid;
 855                                break;
 856                        }
 857
 858        last_khugepaged_target_node = target_node;
 859        return target_node;
 860}
 861
 862static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 863{
 864        if (IS_ERR(*hpage)) {
 865                if (!*wait)
 866                        return false;
 867
 868                *wait = false;
 869                *hpage = NULL;
 870                khugepaged_alloc_sleep();
 871        } else if (*hpage) {
 872                put_page(*hpage);
 873                *hpage = NULL;
 874        }
 875
 876        return true;
 877}
 878
 879static struct page *
 880khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 881{
 882        VM_BUG_ON_PAGE(*hpage, *hpage);
 883
 884        *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
 885        if (unlikely(!*hpage)) {
 886                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 887                *hpage = ERR_PTR(-ENOMEM);
 888                return NULL;
 889        }
 890
 891        prep_transhuge_page(*hpage);
 892        count_vm_event(THP_COLLAPSE_ALLOC);
 893        return *hpage;
 894}
 895#else
 896static int khugepaged_find_target_node(void)
 897{
 898        return 0;
 899}
 900
 901static inline struct page *alloc_khugepaged_hugepage(void)
 902{
 903        struct page *page;
 904
 905        page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
 906                           HPAGE_PMD_ORDER);
 907        if (page)
 908                prep_transhuge_page(page);
 909        return page;
 910}
 911
 912static struct page *khugepaged_alloc_hugepage(bool *wait)
 913{
 914        struct page *hpage;
 915
 916        do {
 917                hpage = alloc_khugepaged_hugepage();
 918                if (!hpage) {
 919                        count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 920                        if (!*wait)
 921                                return NULL;
 922
 923                        *wait = false;
 924                        khugepaged_alloc_sleep();
 925                } else
 926                        count_vm_event(THP_COLLAPSE_ALLOC);
 927        } while (unlikely(!hpage) && likely(khugepaged_enabled()));
 928
 929        return hpage;
 930}
 931
 932static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 933{
 934        /*
 935         * If the hpage allocated earlier was briefly exposed in page cache
 936         * before collapse_file() failed, it is possible that racing lookups
 937         * have not yet completed, and would then be unpleasantly surprised by
 938         * finding the hpage reused for the same mapping at a different offset.
 939         * Just release the previous allocation if there is any danger of that.
 940         */
 941        if (*hpage && page_count(*hpage) > 1) {
 942                put_page(*hpage);
 943                *hpage = NULL;
 944        }
 945
 946        if (!*hpage)
 947                *hpage = khugepaged_alloc_hugepage(wait);
 948
 949        if (unlikely(!*hpage))
 950                return false;
 951
 952        return true;
 953}
 954
 955static struct page *
 956khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 957{
 958        VM_BUG_ON(!*hpage);
 959
 960        return  *hpage;
 961}
 962#endif
 963
 964/*
 965 * If mmap_lock temporarily dropped, revalidate vma
 966 * before taking mmap_lock.
 967 * Return 0 if succeeds, otherwise return none-zero
 968 * value (scan code).
 969 */
 970
 971static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
 972                struct vm_area_struct **vmap)
 973{
 974        struct vm_area_struct *vma;
 975        unsigned long hstart, hend;
 976
 977        if (unlikely(khugepaged_test_exit(mm)))
 978                return SCAN_ANY_PROCESS;
 979
 980        *vmap = vma = find_vma(mm, address);
 981        if (!vma)
 982                return SCAN_VMA_NULL;
 983
 984        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
 985        hend = vma->vm_end & HPAGE_PMD_MASK;
 986        if (address < hstart || address + HPAGE_PMD_SIZE > hend)
 987                return SCAN_ADDRESS_RANGE;
 988        if (!hugepage_vma_check(vma, vma->vm_flags))
 989                return SCAN_VMA_CHECK;
 990        /* Anon VMA expected */
 991        if (!vma->anon_vma || vma->vm_ops)
 992                return SCAN_VMA_CHECK;
 993        return 0;
 994}
 995
 996/*
 997 * Bring missing pages in from swap, to complete THP collapse.
 998 * Only done if khugepaged_scan_pmd believes it is worthwhile.
 999 *
1000 * Called and returns without pte mapped or spinlocks held,
1001 * but with mmap_lock held to protect against vma changes.
1002 */
1003
1004static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1005                                        struct vm_area_struct *vma,
1006                                        unsigned long haddr, pmd_t *pmd,
1007                                        int referenced)
1008{
1009        int swapped_in = 0;
1010        vm_fault_t ret = 0;
1011        unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
1012
1013        for (address = haddr; address < end; address += PAGE_SIZE) {
1014                struct vm_fault vmf = {
1015                        .vma = vma,
1016                        .address = address,
1017                        .pgoff = linear_page_index(vma, haddr),
1018                        .flags = FAULT_FLAG_ALLOW_RETRY,
1019                        .pmd = pmd,
1020                };
1021
1022                vmf.pte = pte_offset_map(pmd, address);
1023                vmf.orig_pte = *vmf.pte;
1024                if (!is_swap_pte(vmf.orig_pte)) {
1025                        pte_unmap(vmf.pte);
1026                        continue;
1027                }
1028                swapped_in++;
1029                ret = do_swap_page(&vmf);
1030
1031                /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
1032                if (ret & VM_FAULT_RETRY) {
1033                        mmap_read_lock(mm);
1034                        if (hugepage_vma_revalidate(mm, haddr, &vma)) {
1035                                /* vma is no longer available, don't continue to swapin */
1036                                trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1037                                return false;
1038                        }
1039                        /* check if the pmd is still valid */
1040                        if (mm_find_pmd(mm, haddr) != pmd) {
1041                                trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1042                                return false;
1043                        }
1044                }
1045                if (ret & VM_FAULT_ERROR) {
1046                        trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1047                        return false;
1048                }
1049        }
1050
1051        /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1052        if (swapped_in)
1053                lru_add_drain();
1054
1055        trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
1056        return true;
1057}
1058
1059static void collapse_huge_page(struct mm_struct *mm,
1060                                   unsigned long address,
1061                                   struct page **hpage,
1062                                   int node, int referenced, int unmapped)
1063{
1064        LIST_HEAD(compound_pagelist);
1065        pmd_t *pmd, _pmd;
1066        pte_t *pte;
1067        pgtable_t pgtable;
1068        struct page *new_page;
1069        spinlock_t *pmd_ptl, *pte_ptl;
1070        int isolated = 0, result = 0;
1071        struct vm_area_struct *vma;
1072        struct mmu_notifier_range range;
1073        gfp_t gfp;
1074
1075        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1076
1077        /* Only allocate from the target node */
1078        gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1079
1080        /*
1081         * Before allocating the hugepage, release the mmap_lock read lock.
1082         * The allocation can take potentially a long time if it involves
1083         * sync compaction, and we do not need to hold the mmap_lock during
1084         * that. We will recheck the vma after taking it again in write mode.
1085         */
1086        mmap_read_unlock(mm);
1087        new_page = khugepaged_alloc_page(hpage, gfp, node);
1088        if (!new_page) {
1089                result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1090                goto out_nolock;
1091        }
1092
1093        if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1094                result = SCAN_CGROUP_CHARGE_FAIL;
1095                goto out_nolock;
1096        }
1097        count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1098
1099        mmap_read_lock(mm);
1100        result = hugepage_vma_revalidate(mm, address, &vma);
1101        if (result) {
1102                mmap_read_unlock(mm);
1103                goto out_nolock;
1104        }
1105
1106        pmd = mm_find_pmd(mm, address);
1107        if (!pmd) {
1108                result = SCAN_PMD_NULL;
1109                mmap_read_unlock(mm);
1110                goto out_nolock;
1111        }
1112
1113        /*
1114         * __collapse_huge_page_swapin always returns with mmap_lock locked.
1115         * If it fails, we release mmap_lock and jump out_nolock.
1116         * Continuing to collapse causes inconsistency.
1117         */
1118        if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1119                                                     pmd, referenced)) {
1120                mmap_read_unlock(mm);
1121                goto out_nolock;
1122        }
1123
1124        mmap_read_unlock(mm);
1125        /*
1126         * Prevent all access to pagetables with the exception of
1127         * gup_fast later handled by the ptep_clear_flush and the VM
1128         * handled by the anon_vma lock + PG_lock.
1129         */
1130        mmap_write_lock(mm);
1131        result = hugepage_vma_revalidate(mm, address, &vma);
1132        if (result)
1133                goto out_up_write;
1134        /* check if the pmd is still valid */
1135        if (mm_find_pmd(mm, address) != pmd)
1136                goto out_up_write;
1137
1138        anon_vma_lock_write(vma->anon_vma);
1139
1140        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1141                                address, address + HPAGE_PMD_SIZE);
1142        mmu_notifier_invalidate_range_start(&range);
1143
1144        pte = pte_offset_map(pmd, address);
1145        pte_ptl = pte_lockptr(mm, pmd);
1146
1147        pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1148        /*
1149         * After this gup_fast can't run anymore. This also removes
1150         * any huge TLB entry from the CPU so we won't allow
1151         * huge and small TLB entries for the same virtual address
1152         * to avoid the risk of CPU bugs in that area.
1153         */
1154        _pmd = pmdp_collapse_flush(vma, address, pmd);
1155        spin_unlock(pmd_ptl);
1156        mmu_notifier_invalidate_range_end(&range);
1157
1158        spin_lock(pte_ptl);
1159        isolated = __collapse_huge_page_isolate(vma, address, pte,
1160                        &compound_pagelist);
1161        spin_unlock(pte_ptl);
1162
1163        if (unlikely(!isolated)) {
1164                pte_unmap(pte);
1165                spin_lock(pmd_ptl);
1166                BUG_ON(!pmd_none(*pmd));
1167                /*
1168                 * We can only use set_pmd_at when establishing
1169                 * hugepmds and never for establishing regular pmds that
1170                 * points to regular pagetables. Use pmd_populate for that
1171                 */
1172                pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1173                spin_unlock(pmd_ptl);
1174                anon_vma_unlock_write(vma->anon_vma);
1175                result = SCAN_FAIL;
1176                goto out_up_write;
1177        }
1178
1179        /*
1180         * All pages are isolated and locked so anon_vma rmap
1181         * can't run anymore.
1182         */
1183        anon_vma_unlock_write(vma->anon_vma);
1184
1185        __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1186                        &compound_pagelist);
1187        pte_unmap(pte);
1188        /*
1189         * spin_lock() below is not the equivalent of smp_wmb(), but
1190         * the smp_wmb() inside __SetPageUptodate() can be reused to
1191         * avoid the copy_huge_page writes to become visible after
1192         * the set_pmd_at() write.
1193         */
1194        __SetPageUptodate(new_page);
1195        pgtable = pmd_pgtable(_pmd);
1196
1197        _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1198        _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1199
1200        spin_lock(pmd_ptl);
1201        BUG_ON(!pmd_none(*pmd));
1202        page_add_new_anon_rmap(new_page, vma, address, true);
1203        lru_cache_add_inactive_or_unevictable(new_page, vma);
1204        pgtable_trans_huge_deposit(mm, pmd, pgtable);
1205        set_pmd_at(mm, address, pmd, _pmd);
1206        update_mmu_cache_pmd(vma, address, pmd);
1207        spin_unlock(pmd_ptl);
1208
1209        *hpage = NULL;
1210
1211        khugepaged_pages_collapsed++;
1212        result = SCAN_SUCCEED;
1213out_up_write:
1214        mmap_write_unlock(mm);
1215out_nolock:
1216        if (!IS_ERR_OR_NULL(*hpage))
1217                mem_cgroup_uncharge(*hpage);
1218        trace_mm_collapse_huge_page(mm, isolated, result);
1219        return;
1220}
1221
1222static int khugepaged_scan_pmd(struct mm_struct *mm,
1223                               struct vm_area_struct *vma,
1224                               unsigned long address,
1225                               struct page **hpage)
1226{
1227        pmd_t *pmd;
1228        pte_t *pte, *_pte;
1229        int ret = 0, result = 0, referenced = 0;
1230        int none_or_zero = 0, shared = 0;
1231        struct page *page = NULL;
1232        unsigned long _address;
1233        spinlock_t *ptl;
1234        int node = NUMA_NO_NODE, unmapped = 0;
1235        bool writable = false;
1236
1237        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1238
1239        pmd = mm_find_pmd(mm, address);
1240        if (!pmd) {
1241                result = SCAN_PMD_NULL;
1242                goto out;
1243        }
1244
1245        memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1246        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1247        for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1248             _pte++, _address += PAGE_SIZE) {
1249                pte_t pteval = *_pte;
1250                if (is_swap_pte(pteval)) {
1251                        if (++unmapped <= khugepaged_max_ptes_swap) {
1252                                /*
1253                                 * Always be strict with uffd-wp
1254                                 * enabled swap entries.  Please see
1255                                 * comment below for pte_uffd_wp().
1256                                 */
1257                                if (pte_swp_uffd_wp(pteval)) {
1258                                        result = SCAN_PTE_UFFD_WP;
1259                                        goto out_unmap;
1260                                }
1261                                continue;
1262                        } else {
1263                                result = SCAN_EXCEED_SWAP_PTE;
1264                                goto out_unmap;
1265                        }
1266                }
1267                if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1268                        if (!userfaultfd_armed(vma) &&
1269                            ++none_or_zero <= khugepaged_max_ptes_none) {
1270                                continue;
1271                        } else {
1272                                result = SCAN_EXCEED_NONE_PTE;
1273                                goto out_unmap;
1274                        }
1275                }
1276                if (pte_uffd_wp(pteval)) {
1277                        /*
1278                         * Don't collapse the page if any of the small
1279                         * PTEs are armed with uffd write protection.
1280                         * Here we can also mark the new huge pmd as
1281                         * write protected if any of the small ones is
1282                         * marked but that could bring unknown
1283                         * userfault messages that falls outside of
1284                         * the registered range.  So, just be simple.
1285                         */
1286                        result = SCAN_PTE_UFFD_WP;
1287                        goto out_unmap;
1288                }
1289                if (pte_write(pteval))
1290                        writable = true;
1291
1292                page = vm_normal_page(vma, _address, pteval);
1293                if (unlikely(!page)) {
1294                        result = SCAN_PAGE_NULL;
1295                        goto out_unmap;
1296                }
1297
1298                if (page_mapcount(page) > 1 &&
1299                                ++shared > khugepaged_max_ptes_shared) {
1300                        result = SCAN_EXCEED_SHARED_PTE;
1301                        goto out_unmap;
1302                }
1303
1304                page = compound_head(page);
1305
1306                /*
1307                 * Record which node the original page is from and save this
1308                 * information to khugepaged_node_load[].
1309                 * Khupaged will allocate hugepage from the node has the max
1310                 * hit record.
1311                 */
1312                node = page_to_nid(page);
1313                if (khugepaged_scan_abort(node)) {
1314                        result = SCAN_SCAN_ABORT;
1315                        goto out_unmap;
1316                }
1317                khugepaged_node_load[node]++;
1318                if (!PageLRU(page)) {
1319                        result = SCAN_PAGE_LRU;
1320                        goto out_unmap;
1321                }
1322                if (PageLocked(page)) {
1323                        result = SCAN_PAGE_LOCK;
1324                        goto out_unmap;
1325                }
1326                if (!PageAnon(page)) {
1327                        result = SCAN_PAGE_ANON;
1328                        goto out_unmap;
1329                }
1330
1331                /*
1332                 * Check if the page has any GUP (or other external) pins.
1333                 *
1334                 * Here the check is racy it may see totmal_mapcount > refcount
1335                 * in some cases.
1336                 * For example, one process with one forked child process.
1337                 * The parent has the PMD split due to MADV_DONTNEED, then
1338                 * the child is trying unmap the whole PMD, but khugepaged
1339                 * may be scanning the parent between the child has
1340                 * PageDoubleMap flag cleared and dec the mapcount.  So
1341                 * khugepaged may see total_mapcount > refcount.
1342                 *
1343                 * But such case is ephemeral we could always retry collapse
1344                 * later.  However it may report false positive if the page
1345                 * has excessive GUP pins (i.e. 512).  Anyway the same check
1346                 * will be done again later the risk seems low.
1347                 */
1348                if (!is_refcount_suitable(page)) {
1349                        result = SCAN_PAGE_COUNT;
1350                        goto out_unmap;
1351                }
1352                if (pte_young(pteval) ||
1353                    page_is_young(page) || PageReferenced(page) ||
1354                    mmu_notifier_test_young(vma->vm_mm, address))
1355                        referenced++;
1356        }
1357        if (!writable) {
1358                result = SCAN_PAGE_RO;
1359        } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1360                result = SCAN_LACK_REFERENCED_PAGE;
1361        } else {
1362                result = SCAN_SUCCEED;
1363                ret = 1;
1364        }
1365out_unmap:
1366        pte_unmap_unlock(pte, ptl);
1367        if (ret) {
1368                node = khugepaged_find_target_node();
1369                /* collapse_huge_page will return with the mmap_lock released */
1370                collapse_huge_page(mm, address, hpage, node,
1371                                referenced, unmapped);
1372        }
1373out:
1374        trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1375                                     none_or_zero, result, unmapped);
1376        return ret;
1377}
1378
1379static void collect_mm_slot(struct mm_slot *mm_slot)
1380{
1381        struct mm_struct *mm = mm_slot->mm;
1382
1383        lockdep_assert_held(&khugepaged_mm_lock);
1384
1385        if (khugepaged_test_exit(mm)) {
1386                /* free mm_slot */
1387                hash_del(&mm_slot->hash);
1388                list_del(&mm_slot->mm_node);
1389
1390                /*
1391                 * Not strictly needed because the mm exited already.
1392                 *
1393                 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1394                 */
1395
1396                /* khugepaged_mm_lock actually not necessary for the below */
1397                free_mm_slot(mm_slot);
1398                mmdrop(mm);
1399        }
1400}
1401
1402#ifdef CONFIG_SHMEM
1403/*
1404 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1405 * khugepaged should try to collapse the page table.
1406 */
1407static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1408                                         unsigned long addr)
1409{
1410        struct mm_slot *mm_slot;
1411
1412        VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1413
1414        spin_lock(&khugepaged_mm_lock);
1415        mm_slot = get_mm_slot(mm);
1416        if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1417                mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1418        spin_unlock(&khugepaged_mm_lock);
1419        return 0;
1420}
1421
1422/**
1423 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1424 * address haddr.
1425 *
1426 * @mm: process address space where collapse happens
1427 * @addr: THP collapse address
1428 *
1429 * This function checks whether all the PTEs in the PMD are pointing to the
1430 * right THP. If so, retract the page table so the THP can refault in with
1431 * as pmd-mapped.
1432 */
1433void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1434{
1435        unsigned long haddr = addr & HPAGE_PMD_MASK;
1436        struct vm_area_struct *vma = find_vma(mm, haddr);
1437        struct page *hpage;
1438        pte_t *start_pte, *pte;
1439        pmd_t *pmd, _pmd;
1440        spinlock_t *ptl;
1441        int count = 0;
1442        int i;
1443
1444        if (!vma || !vma->vm_file ||
1445            !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1446                return;
1447
1448        /*
1449         * This vm_flags may not have VM_HUGEPAGE if the page was not
1450         * collapsed by this mm. But we can still collapse if the page is
1451         * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1452         * will not fail the vma for missing VM_HUGEPAGE
1453         */
1454        if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1455                return;
1456
1457        hpage = find_lock_page(vma->vm_file->f_mapping,
1458                               linear_page_index(vma, haddr));
1459        if (!hpage)
1460                return;
1461
1462        if (!PageHead(hpage))
1463                goto drop_hpage;
1464
1465        pmd = mm_find_pmd(mm, haddr);
1466        if (!pmd)
1467                goto drop_hpage;
1468
1469        start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1470
1471        /* step 1: check all mapped PTEs are to the right huge page */
1472        for (i = 0, addr = haddr, pte = start_pte;
1473             i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1474                struct page *page;
1475
1476                /* empty pte, skip */
1477                if (pte_none(*pte))
1478                        continue;
1479
1480                /* page swapped out, abort */
1481                if (!pte_present(*pte))
1482                        goto abort;
1483
1484                page = vm_normal_page(vma, addr, *pte);
1485
1486                /*
1487                 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1488                 * page table, but the new page will not be a subpage of hpage.
1489                 */
1490                if (hpage + i != page)
1491                        goto abort;
1492                count++;
1493        }
1494
1495        /* step 2: adjust rmap */
1496        for (i = 0, addr = haddr, pte = start_pte;
1497             i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1498                struct page *page;
1499
1500                if (pte_none(*pte))
1501                        continue;
1502                page = vm_normal_page(vma, addr, *pte);
1503                page_remove_rmap(page, false);
1504        }
1505
1506        pte_unmap_unlock(start_pte, ptl);
1507
1508        /* step 3: set proper refcount and mm_counters. */
1509        if (count) {
1510                page_ref_sub(hpage, count);
1511                add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1512        }
1513
1514        /* step 4: collapse pmd */
1515        ptl = pmd_lock(vma->vm_mm, pmd);
1516        _pmd = pmdp_collapse_flush(vma, haddr, pmd);
1517        spin_unlock(ptl);
1518        mm_dec_nr_ptes(mm);
1519        pte_free(mm, pmd_pgtable(_pmd));
1520
1521drop_hpage:
1522        unlock_page(hpage);
1523        put_page(hpage);
1524        return;
1525
1526abort:
1527        pte_unmap_unlock(start_pte, ptl);
1528        goto drop_hpage;
1529}
1530
1531static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1532{
1533        struct mm_struct *mm = mm_slot->mm;
1534        int i;
1535
1536        if (likely(mm_slot->nr_pte_mapped_thp == 0))
1537                return;
1538
1539        if (!mmap_write_trylock(mm))
1540                return;
1541
1542        if (unlikely(khugepaged_test_exit(mm)))
1543                goto out;
1544
1545        for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1546                collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1547
1548out:
1549        mm_slot->nr_pte_mapped_thp = 0;
1550        mmap_write_unlock(mm);
1551}
1552
1553static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1554{
1555        struct vm_area_struct *vma;
1556        struct mm_struct *mm;
1557        unsigned long addr;
1558        pmd_t *pmd, _pmd;
1559
1560        i_mmap_lock_write(mapping);
1561        vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1562                /*
1563                 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1564                 * got written to. These VMAs are likely not worth investing
1565                 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1566                 * later.
1567                 *
1568                 * Not that vma->anon_vma check is racy: it can be set up after
1569                 * the check but before we took mmap_lock by the fault path.
1570                 * But page lock would prevent establishing any new ptes of the
1571                 * page, so we are safe.
1572                 *
1573                 * An alternative would be drop the check, but check that page
1574                 * table is clear before calling pmdp_collapse_flush() under
1575                 * ptl. It has higher chance to recover THP for the VMA, but
1576                 * has higher cost too.
1577                 */
1578                if (vma->anon_vma)
1579                        continue;
1580                addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1581                if (addr & ~HPAGE_PMD_MASK)
1582                        continue;
1583                if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1584                        continue;
1585                mm = vma->vm_mm;
1586                pmd = mm_find_pmd(mm, addr);
1587                if (!pmd)
1588                        continue;
1589                /*
1590                 * We need exclusive mmap_lock to retract page table.
1591                 *
1592                 * We use trylock due to lock inversion: we need to acquire
1593                 * mmap_lock while holding page lock. Fault path does it in
1594                 * reverse order. Trylock is a way to avoid deadlock.
1595                 */
1596                if (mmap_write_trylock(mm)) {
1597                        if (!khugepaged_test_exit(mm)) {
1598                                spinlock_t *ptl = pmd_lock(mm, pmd);
1599                                /* assume page table is clear */
1600                                _pmd = pmdp_collapse_flush(vma, addr, pmd);
1601                                spin_unlock(ptl);
1602                                mm_dec_nr_ptes(mm);
1603                                pte_free(mm, pmd_pgtable(_pmd));
1604                        }
1605                        mmap_write_unlock(mm);
1606                } else {
1607                        /* Try again later */
1608                        khugepaged_add_pte_mapped_thp(mm, addr);
1609                }
1610        }
1611        i_mmap_unlock_write(mapping);
1612}
1613
1614/**
1615 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1616 *
1617 * @mm: process address space where collapse happens
1618 * @file: file that collapse on
1619 * @start: collapse start address
1620 * @hpage: new allocated huge page for collapse
1621 * @node: appointed node the new huge page allocate from
1622 *
1623 * Basic scheme is simple, details are more complex:
1624 *  - allocate and lock a new huge page;
1625 *  - scan page cache replacing old pages with the new one
1626 *    + swap/gup in pages if necessary;
1627 *    + fill in gaps;
1628 *    + keep old pages around in case rollback is required;
1629 *  - if replacing succeeds:
1630 *    + copy data over;
1631 *    + free old pages;
1632 *    + unlock huge page;
1633 *  - if replacing failed;
1634 *    + put all pages back and unfreeze them;
1635 *    + restore gaps in the page cache;
1636 *    + unlock and free huge page;
1637 */
1638static void collapse_file(struct mm_struct *mm,
1639                struct file *file, pgoff_t start,
1640                struct page **hpage, int node)
1641{
1642        struct address_space *mapping = file->f_mapping;
1643        gfp_t gfp;
1644        struct page *new_page;
1645        pgoff_t index, end = start + HPAGE_PMD_NR;
1646        LIST_HEAD(pagelist);
1647        XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1648        int nr_none = 0, result = SCAN_SUCCEED;
1649        bool is_shmem = shmem_file(file);
1650        int nr;
1651
1652        VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1653        VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1654
1655        /* Only allocate from the target node */
1656        gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1657
1658        new_page = khugepaged_alloc_page(hpage, gfp, node);
1659        if (!new_page) {
1660                result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1661                goto out;
1662        }
1663
1664        if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1665                result = SCAN_CGROUP_CHARGE_FAIL;
1666                goto out;
1667        }
1668        count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1669
1670        /* This will be less messy when we use multi-index entries */
1671        do {
1672                xas_lock_irq(&xas);
1673                xas_create_range(&xas);
1674                if (!xas_error(&xas))
1675                        break;
1676                xas_unlock_irq(&xas);
1677                if (!xas_nomem(&xas, GFP_KERNEL)) {
1678                        result = SCAN_FAIL;
1679                        goto out;
1680                }
1681        } while (1);
1682
1683        __SetPageLocked(new_page);
1684        if (is_shmem)
1685                __SetPageSwapBacked(new_page);
1686        new_page->index = start;
1687        new_page->mapping = mapping;
1688
1689        /*
1690         * At this point the new_page is locked and not up-to-date.
1691         * It's safe to insert it into the page cache, because nobody would
1692         * be able to map it or use it in another way until we unlock it.
1693         */
1694
1695        xas_set(&xas, start);
1696        for (index = start; index < end; index++) {
1697                struct page *page = xas_next(&xas);
1698
1699                VM_BUG_ON(index != xas.xa_index);
1700                if (is_shmem) {
1701                        if (!page) {
1702                                /*
1703                                 * Stop if extent has been truncated or
1704                                 * hole-punched, and is now completely
1705                                 * empty.
1706                                 */
1707                                if (index == start) {
1708                                        if (!xas_next_entry(&xas, end - 1)) {
1709                                                result = SCAN_TRUNCATED;
1710                                                goto xa_locked;
1711                                        }
1712                                        xas_set(&xas, index);
1713                                }
1714                                if (!shmem_charge(mapping->host, 1)) {
1715                                        result = SCAN_FAIL;
1716                                        goto xa_locked;
1717                                }
1718                                xas_store(&xas, new_page);
1719                                nr_none++;
1720                                continue;
1721                        }
1722
1723                        if (xa_is_value(page) || !PageUptodate(page)) {
1724                                xas_unlock_irq(&xas);
1725                                /* swap in or instantiate fallocated page */
1726                                if (shmem_getpage(mapping->host, index, &page,
1727                                                  SGP_NOALLOC)) {
1728                                        result = SCAN_FAIL;
1729                                        goto xa_unlocked;
1730                                }
1731                        } else if (trylock_page(page)) {
1732                                get_page(page);
1733                                xas_unlock_irq(&xas);
1734                        } else {
1735                                result = SCAN_PAGE_LOCK;
1736                                goto xa_locked;
1737                        }
1738                } else {        /* !is_shmem */
1739                        if (!page || xa_is_value(page)) {
1740                                xas_unlock_irq(&xas);
1741                                page_cache_sync_readahead(mapping, &file->f_ra,
1742                                                          file, index,
1743                                                          end - index);
1744                                /* drain pagevecs to help isolate_lru_page() */
1745                                lru_add_drain();
1746                                page = find_lock_page(mapping, index);
1747                                if (unlikely(page == NULL)) {
1748                                        result = SCAN_FAIL;
1749                                        goto xa_unlocked;
1750                                }
1751                        } else if (PageDirty(page)) {
1752                                /*
1753                                 * khugepaged only works on read-only fd,
1754                                 * so this page is dirty because it hasn't
1755                                 * been flushed since first write. There
1756                                 * won't be new dirty pages.
1757                                 *
1758                                 * Trigger async flush here and hope the
1759                                 * writeback is done when khugepaged
1760                                 * revisits this page.
1761                                 *
1762                                 * This is a one-off situation. We are not
1763                                 * forcing writeback in loop.
1764                                 */
1765                                xas_unlock_irq(&xas);
1766                                filemap_flush(mapping);
1767                                result = SCAN_FAIL;
1768                                goto xa_unlocked;
1769                        } else if (PageWriteback(page)) {
1770                                xas_unlock_irq(&xas);
1771                                result = SCAN_FAIL;
1772                                goto xa_unlocked;
1773                        } else if (trylock_page(page)) {
1774                                get_page(page);
1775                                xas_unlock_irq(&xas);
1776                        } else {
1777                                result = SCAN_PAGE_LOCK;
1778                                goto xa_locked;
1779                        }
1780                }
1781
1782                /*
1783                 * The page must be locked, so we can drop the i_pages lock
1784                 * without racing with truncate.
1785                 */
1786                VM_BUG_ON_PAGE(!PageLocked(page), page);
1787
1788                /* make sure the page is up to date */
1789                if (unlikely(!PageUptodate(page))) {
1790                        result = SCAN_FAIL;
1791                        goto out_unlock;
1792                }
1793
1794                /*
1795                 * If file was truncated then extended, or hole-punched, before
1796                 * we locked the first page, then a THP might be there already.
1797                 */
1798                if (PageTransCompound(page)) {
1799                        result = SCAN_PAGE_COMPOUND;
1800                        goto out_unlock;
1801                }
1802
1803                if (page_mapping(page) != mapping) {
1804                        result = SCAN_TRUNCATED;
1805                        goto out_unlock;
1806                }
1807
1808                if (!is_shmem && (PageDirty(page) ||
1809                                  PageWriteback(page))) {
1810                        /*
1811                         * khugepaged only works on read-only fd, so this
1812                         * page is dirty because it hasn't been flushed
1813                         * since first write.
1814                         */
1815                        result = SCAN_FAIL;
1816                        goto out_unlock;
1817                }
1818
1819                if (isolate_lru_page(page)) {
1820                        result = SCAN_DEL_PAGE_LRU;
1821                        goto out_unlock;
1822                }
1823
1824                if (page_has_private(page) &&
1825                    !try_to_release_page(page, GFP_KERNEL)) {
1826                        result = SCAN_PAGE_HAS_PRIVATE;
1827                        putback_lru_page(page);
1828                        goto out_unlock;
1829                }
1830
1831                if (page_mapped(page))
1832                        unmap_mapping_pages(mapping, index, 1, false);
1833
1834                xas_lock_irq(&xas);
1835                xas_set(&xas, index);
1836
1837                VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1838                VM_BUG_ON_PAGE(page_mapped(page), page);
1839
1840                /*
1841                 * The page is expected to have page_count() == 3:
1842                 *  - we hold a pin on it;
1843                 *  - one reference from page cache;
1844                 *  - one from isolate_lru_page;
1845                 */
1846                if (!page_ref_freeze(page, 3)) {
1847                        result = SCAN_PAGE_COUNT;
1848                        xas_unlock_irq(&xas);
1849                        putback_lru_page(page);
1850                        goto out_unlock;
1851                }
1852
1853                /*
1854                 * Add the page to the list to be able to undo the collapse if
1855                 * something go wrong.
1856                 */
1857                list_add_tail(&page->lru, &pagelist);
1858
1859                /* Finally, replace with the new page. */
1860                xas_store(&xas, new_page);
1861                continue;
1862out_unlock:
1863                unlock_page(page);
1864                put_page(page);
1865                goto xa_unlocked;
1866        }
1867        nr = thp_nr_pages(new_page);
1868
1869        if (is_shmem)
1870                __mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr);
1871        else {
1872                __mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
1873                filemap_nr_thps_inc(mapping);
1874                /*
1875                 * Paired with smp_mb() in do_dentry_open() to ensure
1876                 * i_writecount is up to date and the update to nr_thps is
1877                 * visible. Ensures the page cache will be truncated if the
1878                 * file is opened writable.
1879                 */
1880                smp_mb();
1881                if (inode_is_open_for_write(mapping->host)) {
1882                        result = SCAN_FAIL;
1883                        __mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr);
1884                        filemap_nr_thps_dec(mapping);
1885                        goto xa_locked;
1886                }
1887        }
1888
1889        if (nr_none) {
1890                __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
1891                if (is_shmem)
1892                        __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
1893        }
1894
1895xa_locked:
1896        xas_unlock_irq(&xas);
1897xa_unlocked:
1898
1899        if (result == SCAN_SUCCEED) {
1900                struct page *page, *tmp;
1901
1902                /*
1903                 * Replacing old pages with new one has succeeded, now we
1904                 * need to copy the content and free the old pages.
1905                 */
1906                index = start;
1907                list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1908                        while (index < page->index) {
1909                                clear_highpage(new_page + (index % HPAGE_PMD_NR));
1910                                index++;
1911                        }
1912                        copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1913                                        page);
1914                        list_del(&page->lru);
1915                        page->mapping = NULL;
1916                        page_ref_unfreeze(page, 1);
1917                        ClearPageActive(page);
1918                        ClearPageUnevictable(page);
1919                        unlock_page(page);
1920                        put_page(page);
1921                        index++;
1922                }
1923                while (index < end) {
1924                        clear_highpage(new_page + (index % HPAGE_PMD_NR));
1925                        index++;
1926                }
1927
1928                SetPageUptodate(new_page);
1929                page_ref_add(new_page, HPAGE_PMD_NR - 1);
1930                if (is_shmem)
1931                        set_page_dirty(new_page);
1932                lru_cache_add(new_page);
1933
1934                /*
1935                 * Remove pte page tables, so we can re-fault the page as huge.
1936                 */
1937                retract_page_tables(mapping, start);
1938                *hpage = NULL;
1939
1940                khugepaged_pages_collapsed++;
1941        } else {
1942                struct page *page;
1943
1944                /* Something went wrong: roll back page cache changes */
1945                xas_lock_irq(&xas);
1946                mapping->nrpages -= nr_none;
1947
1948                if (is_shmem)
1949                        shmem_uncharge(mapping->host, nr_none);
1950
1951                xas_set(&xas, start);
1952                xas_for_each(&xas, page, end - 1) {
1953                        page = list_first_entry_or_null(&pagelist,
1954                                        struct page, lru);
1955                        if (!page || xas.xa_index < page->index) {
1956                                if (!nr_none)
1957                                        break;
1958                                nr_none--;
1959                                /* Put holes back where they were */
1960                                xas_store(&xas, NULL);
1961                                continue;
1962                        }
1963
1964                        VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
1965
1966                        /* Unfreeze the page. */
1967                        list_del(&page->lru);
1968                        page_ref_unfreeze(page, 2);
1969                        xas_store(&xas, page);
1970                        xas_pause(&xas);
1971                        xas_unlock_irq(&xas);
1972                        unlock_page(page);
1973                        putback_lru_page(page);
1974                        xas_lock_irq(&xas);
1975                }
1976                VM_BUG_ON(nr_none);
1977                xas_unlock_irq(&xas);
1978
1979                new_page->mapping = NULL;
1980        }
1981
1982        unlock_page(new_page);
1983out:
1984        VM_BUG_ON(!list_empty(&pagelist));
1985        if (!IS_ERR_OR_NULL(*hpage))
1986                mem_cgroup_uncharge(*hpage);
1987        /* TODO: tracepoints */
1988}
1989
1990static void khugepaged_scan_file(struct mm_struct *mm,
1991                struct file *file, pgoff_t start, struct page **hpage)
1992{
1993        struct page *page = NULL;
1994        struct address_space *mapping = file->f_mapping;
1995        XA_STATE(xas, &mapping->i_pages, start);
1996        int present, swap;
1997        int node = NUMA_NO_NODE;
1998        int result = SCAN_SUCCEED;
1999
2000        present = 0;
2001        swap = 0;
2002        memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2003        rcu_read_lock();
2004        xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2005                if (xas_retry(&xas, page))
2006                        continue;
2007
2008                if (xa_is_value(page)) {
2009                        if (++swap > khugepaged_max_ptes_swap) {
2010                                result = SCAN_EXCEED_SWAP_PTE;
2011                                break;
2012                        }
2013                        continue;
2014                }
2015
2016                if (PageTransCompound(page)) {
2017                        result = SCAN_PAGE_COMPOUND;
2018                        break;
2019                }
2020
2021                node = page_to_nid(page);
2022                if (khugepaged_scan_abort(node)) {
2023                        result = SCAN_SCAN_ABORT;
2024                        break;
2025                }
2026                khugepaged_node_load[node]++;
2027
2028                if (!PageLRU(page)) {
2029                        result = SCAN_PAGE_LRU;
2030                        break;
2031                }
2032
2033                if (page_count(page) !=
2034                    1 + page_mapcount(page) + page_has_private(page)) {
2035                        result = SCAN_PAGE_COUNT;
2036                        break;
2037                }
2038
2039                /*
2040                 * We probably should check if the page is referenced here, but
2041                 * nobody would transfer pte_young() to PageReferenced() for us.
2042                 * And rmap walk here is just too costly...
2043                 */
2044
2045                present++;
2046
2047                if (need_resched()) {
2048                        xas_pause(&xas);
2049                        cond_resched_rcu();
2050                }
2051        }
2052        rcu_read_unlock();
2053
2054        if (result == SCAN_SUCCEED) {
2055                if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2056                        result = SCAN_EXCEED_NONE_PTE;
2057                } else {
2058                        node = khugepaged_find_target_node();
2059                        collapse_file(mm, file, start, hpage, node);
2060                }
2061        }
2062
2063        /* TODO: tracepoints */
2064}
2065#else
2066static void khugepaged_scan_file(struct mm_struct *mm,
2067                struct file *file, pgoff_t start, struct page **hpage)
2068{
2069        BUILD_BUG();
2070}
2071
2072static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2073{
2074}
2075#endif
2076
2077static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2078                                            struct page **hpage)
2079        __releases(&khugepaged_mm_lock)
2080        __acquires(&khugepaged_mm_lock)
2081{
2082        struct mm_slot *mm_slot;
2083        struct mm_struct *mm;
2084        struct vm_area_struct *vma;
2085        int progress = 0;
2086
2087        VM_BUG_ON(!pages);
2088        lockdep_assert_held(&khugepaged_mm_lock);
2089
2090        if (khugepaged_scan.mm_slot)
2091                mm_slot = khugepaged_scan.mm_slot;
2092        else {
2093                mm_slot = list_entry(khugepaged_scan.mm_head.next,
2094                                     struct mm_slot, mm_node);
2095                khugepaged_scan.address = 0;
2096                khugepaged_scan.mm_slot = mm_slot;
2097        }
2098        spin_unlock(&khugepaged_mm_lock);
2099        khugepaged_collapse_pte_mapped_thps(mm_slot);
2100
2101        mm = mm_slot->mm;
2102        /*
2103         * Don't wait for semaphore (to avoid long wait times).  Just move to
2104         * the next mm on the list.
2105         */
2106        vma = NULL;
2107        if (unlikely(!mmap_read_trylock(mm)))
2108                goto breakouterloop_mmap_lock;
2109        if (likely(!khugepaged_test_exit(mm)))
2110                vma = find_vma(mm, khugepaged_scan.address);
2111
2112        progress++;
2113        for (; vma; vma = vma->vm_next) {
2114                unsigned long hstart, hend;
2115
2116                cond_resched();
2117                if (unlikely(khugepaged_test_exit(mm))) {
2118                        progress++;
2119                        break;
2120                }
2121                if (!hugepage_vma_check(vma, vma->vm_flags)) {
2122skip:
2123                        progress++;
2124                        continue;
2125                }
2126                hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2127                hend = vma->vm_end & HPAGE_PMD_MASK;
2128                if (hstart >= hend)
2129                        goto skip;
2130                if (khugepaged_scan.address > hend)
2131                        goto skip;
2132                if (khugepaged_scan.address < hstart)
2133                        khugepaged_scan.address = hstart;
2134                VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2135                if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2136                        goto skip;
2137
2138                while (khugepaged_scan.address < hend) {
2139                        int ret;
2140                        cond_resched();
2141                        if (unlikely(khugepaged_test_exit(mm)))
2142                                goto breakouterloop;
2143
2144                        VM_BUG_ON(khugepaged_scan.address < hstart ||
2145                                  khugepaged_scan.address + HPAGE_PMD_SIZE >
2146                                  hend);
2147                        if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2148                                struct file *file = get_file(vma->vm_file);
2149                                pgoff_t pgoff = linear_page_index(vma,
2150                                                khugepaged_scan.address);
2151
2152                                mmap_read_unlock(mm);
2153                                ret = 1;
2154                                khugepaged_scan_file(mm, file, pgoff, hpage);
2155                                fput(file);
2156                        } else {
2157                                ret = khugepaged_scan_pmd(mm, vma,
2158                                                khugepaged_scan.address,
2159                                                hpage);
2160                        }
2161                        /* move to next address */
2162                        khugepaged_scan.address += HPAGE_PMD_SIZE;
2163                        progress += HPAGE_PMD_NR;
2164                        if (ret)
2165                                /* we released mmap_lock so break loop */
2166                                goto breakouterloop_mmap_lock;
2167                        if (progress >= pages)
2168                                goto breakouterloop;
2169                }
2170        }
2171breakouterloop:
2172        mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2173breakouterloop_mmap_lock:
2174
2175        spin_lock(&khugepaged_mm_lock);
2176        VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2177        /*
2178         * Release the current mm_slot if this mm is about to die, or
2179         * if we scanned all vmas of this mm.
2180         */
2181        if (khugepaged_test_exit(mm) || !vma) {
2182                /*
2183                 * Make sure that if mm_users is reaching zero while
2184                 * khugepaged runs here, khugepaged_exit will find
2185                 * mm_slot not pointing to the exiting mm.
2186                 */
2187                if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2188                        khugepaged_scan.mm_slot = list_entry(
2189                                mm_slot->mm_node.next,
2190                                struct mm_slot, mm_node);
2191                        khugepaged_scan.address = 0;
2192                } else {
2193                        khugepaged_scan.mm_slot = NULL;
2194                        khugepaged_full_scans++;
2195                }
2196
2197                collect_mm_slot(mm_slot);
2198        }
2199
2200        return progress;
2201}
2202
2203static int khugepaged_has_work(void)
2204{
2205        return !list_empty(&khugepaged_scan.mm_head) &&
2206                khugepaged_enabled();
2207}
2208
2209static int khugepaged_wait_event(void)
2210{
2211        return !list_empty(&khugepaged_scan.mm_head) ||
2212                kthread_should_stop();
2213}
2214
2215static void khugepaged_do_scan(void)
2216{
2217        struct page *hpage = NULL;
2218        unsigned int progress = 0, pass_through_head = 0;
2219        unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2220        bool wait = true;
2221
2222        lru_add_drain_all();
2223
2224        while (progress < pages) {
2225                if (!khugepaged_prealloc_page(&hpage, &wait))
2226                        break;
2227
2228                cond_resched();
2229
2230                if (unlikely(kthread_should_stop() || try_to_freeze()))
2231                        break;
2232
2233                spin_lock(&khugepaged_mm_lock);
2234                if (!khugepaged_scan.mm_slot)
2235                        pass_through_head++;
2236                if (khugepaged_has_work() &&
2237                    pass_through_head < 2)
2238                        progress += khugepaged_scan_mm_slot(pages - progress,
2239                                                            &hpage);
2240                else
2241                        progress = pages;
2242                spin_unlock(&khugepaged_mm_lock);
2243        }
2244
2245        if (!IS_ERR_OR_NULL(hpage))
2246                put_page(hpage);
2247}
2248
2249static bool khugepaged_should_wakeup(void)
2250{
2251        return kthread_should_stop() ||
2252               time_after_eq(jiffies, khugepaged_sleep_expire);
2253}
2254
2255static void khugepaged_wait_work(void)
2256{
2257        if (khugepaged_has_work()) {
2258                const unsigned long scan_sleep_jiffies =
2259                        msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2260
2261                if (!scan_sleep_jiffies)
2262                        return;
2263
2264                khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2265                wait_event_freezable_timeout(khugepaged_wait,
2266                                             khugepaged_should_wakeup(),
2267                                             scan_sleep_jiffies);
2268                return;
2269        }
2270
2271        if (khugepaged_enabled())
2272                wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2273}
2274
2275static int khugepaged(void *none)
2276{
2277        struct mm_slot *mm_slot;
2278
2279        set_freezable();
2280        set_user_nice(current, MAX_NICE);
2281
2282        while (!kthread_should_stop()) {
2283                khugepaged_do_scan();
2284                khugepaged_wait_work();
2285        }
2286
2287        spin_lock(&khugepaged_mm_lock);
2288        mm_slot = khugepaged_scan.mm_slot;
2289        khugepaged_scan.mm_slot = NULL;
2290        if (mm_slot)
2291                collect_mm_slot(mm_slot);
2292        spin_unlock(&khugepaged_mm_lock);
2293        return 0;
2294}
2295
2296static void set_recommended_min_free_kbytes(void)
2297{
2298        struct zone *zone;
2299        int nr_zones = 0;
2300        unsigned long recommended_min;
2301
2302        for_each_populated_zone(zone) {
2303                /*
2304                 * We don't need to worry about fragmentation of
2305                 * ZONE_MOVABLE since it only has movable pages.
2306                 */
2307                if (zone_idx(zone) > gfp_zone(GFP_USER))
2308                        continue;
2309
2310                nr_zones++;
2311        }
2312
2313        /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2314        recommended_min = pageblock_nr_pages * nr_zones * 2;
2315
2316        /*
2317         * Make sure that on average at least two pageblocks are almost free
2318         * of another type, one for a migratetype to fall back to and a
2319         * second to avoid subsequent fallbacks of other types There are 3
2320         * MIGRATE_TYPES we care about.
2321         */
2322        recommended_min += pageblock_nr_pages * nr_zones *
2323                           MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2324
2325        /* don't ever allow to reserve more than 5% of the lowmem */
2326        recommended_min = min(recommended_min,
2327                              (unsigned long) nr_free_buffer_pages() / 20);
2328        recommended_min <<= (PAGE_SHIFT-10);
2329
2330        if (recommended_min > min_free_kbytes) {
2331                if (user_min_free_kbytes >= 0)
2332                        pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2333                                min_free_kbytes, recommended_min);
2334
2335                min_free_kbytes = recommended_min;
2336        }
2337        setup_per_zone_wmarks();
2338}
2339
2340int start_stop_khugepaged(void)
2341{
2342        int err = 0;
2343
2344        mutex_lock(&khugepaged_mutex);
2345        if (khugepaged_enabled()) {
2346                if (!khugepaged_thread)
2347                        khugepaged_thread = kthread_run(khugepaged, NULL,
2348                                                        "khugepaged");
2349                if (IS_ERR(khugepaged_thread)) {
2350                        pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2351                        err = PTR_ERR(khugepaged_thread);
2352                        khugepaged_thread = NULL;
2353                        goto fail;
2354                }
2355
2356                if (!list_empty(&khugepaged_scan.mm_head))
2357                        wake_up_interruptible(&khugepaged_wait);
2358
2359                set_recommended_min_free_kbytes();
2360        } else if (khugepaged_thread) {
2361                kthread_stop(khugepaged_thread);
2362                khugepaged_thread = NULL;
2363        }
2364fail:
2365        mutex_unlock(&khugepaged_mutex);
2366        return err;
2367}
2368
2369void khugepaged_min_free_kbytes_update(void)
2370{
2371        mutex_lock(&khugepaged_mutex);
2372        if (khugepaged_enabled() && khugepaged_thread)
2373                set_recommended_min_free_kbytes();
2374        mutex_unlock(&khugepaged_mutex);
2375}
2376