linux/mm/huge_memory.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 2009  Red Hat, Inc.
   3 *
   4 *  This work is licensed under the terms of the GNU GPL, version 2. See
   5 *  the COPYING file in the top-level directory.
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/mm.h>
  11#include <linux/sched.h>
  12#include <linux/highmem.h>
  13#include <linux/hugetlb.h>
  14#include <linux/mmu_notifier.h>
  15#include <linux/rmap.h>
  16#include <linux/swap.h>
  17#include <linux/shrinker.h>
  18#include <linux/mm_inline.h>
  19#include <linux/kthread.h>
  20#include <linux/khugepaged.h>
  21#include <linux/freezer.h>
  22#include <linux/mman.h>
  23#include <linux/pagemap.h>
  24#include <linux/migrate.h>
  25#include <linux/hashtable.h>
  26
  27#include <asm/tlb.h>
  28#include <asm/pgalloc.h>
  29#include "internal.h"
  30
  31/*
  32 * By default transparent hugepage support is disabled in order that avoid
  33 * to risk increase the memory footprint of applications without a guaranteed
  34 * benefit. When transparent hugepage support is enabled, is for all mappings,
  35 * and khugepaged scans all mappings.
  36 * Defrag is invoked by khugepaged hugepage allocations and by page faults
  37 * for all hugepage allocations.
  38 */
  39unsigned long transparent_hugepage_flags __read_mostly =
  40#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
  41        (1<<TRANSPARENT_HUGEPAGE_FLAG)|
  42#endif
  43#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
  44        (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
  45#endif
  46        (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
  47        (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
  48        (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
  49
  50/* default scan 8*512 pte (or vmas) every 30 second */
  51static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
  52static unsigned int khugepaged_pages_collapsed;
  53static unsigned int khugepaged_full_scans;
  54static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
  55/* during fragmentation poll the hugepage allocator once every minute */
  56static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
  57static struct task_struct *khugepaged_thread __read_mostly;
  58static DEFINE_MUTEX(khugepaged_mutex);
  59static DEFINE_SPINLOCK(khugepaged_mm_lock);
  60static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  61/*
  62 * default collapse hugepages if there is at least one pte mapped like
  63 * it would have happened if the vma was large enough during page
  64 * fault.
  65 */
  66static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
  67
  68static int khugepaged(void *none);
  69static int khugepaged_slab_init(void);
  70
  71#define MM_SLOTS_HASH_BITS 10
  72static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
  73
  74static struct kmem_cache *mm_slot_cache __read_mostly;
  75
  76/**
  77 * struct mm_slot - hash lookup from mm to mm_slot
  78 * @hash: hash collision list
  79 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
  80 * @mm: the mm that this information is valid for
  81 */
  82struct mm_slot {
  83        struct hlist_node hash;
  84        struct list_head mm_node;
  85        struct mm_struct *mm;
  86};
  87
  88/**
  89 * struct khugepaged_scan - cursor for scanning
  90 * @mm_head: the head of the mm list to scan
  91 * @mm_slot: the current mm_slot we are scanning
  92 * @address: the next address inside that to be scanned
  93 *
  94 * There is only the one khugepaged_scan instance of this cursor structure.
  95 */
  96struct khugepaged_scan {
  97        struct list_head mm_head;
  98        struct mm_slot *mm_slot;
  99        unsigned long address;
 100};
 101static struct khugepaged_scan khugepaged_scan = {
 102        .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
 103};
 104
 105
 106static int set_recommended_min_free_kbytes(void)
 107{
 108        struct zone *zone;
 109        int nr_zones = 0;
 110        unsigned long recommended_min;
 111
 112        if (!khugepaged_enabled())
 113                return 0;
 114
 115        for_each_populated_zone(zone)
 116                nr_zones++;
 117
 118        /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
 119        recommended_min = pageblock_nr_pages * nr_zones * 2;
 120
 121        /*
 122         * Make sure that on average at least two pageblocks are almost free
 123         * of another type, one for a migratetype to fall back to and a
 124         * second to avoid subsequent fallbacks of other types There are 3
 125         * MIGRATE_TYPES we care about.
 126         */
 127        recommended_min += pageblock_nr_pages * nr_zones *
 128                           MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
 129
 130        /* don't ever allow to reserve more than 5% of the lowmem */
 131        recommended_min = min(recommended_min,
 132                              (unsigned long) nr_free_buffer_pages() / 20);
 133        recommended_min <<= (PAGE_SHIFT-10);
 134
 135        if (recommended_min > min_free_kbytes) {
 136                if (user_min_free_kbytes >= 0)
 137                        pr_info("raising min_free_kbytes from %d to %lu "
 138                                "to help transparent hugepage allocations\n",
 139                                min_free_kbytes, recommended_min);
 140
 141                min_free_kbytes = recommended_min;
 142        }
 143        setup_per_zone_wmarks();
 144        return 0;
 145}
 146late_initcall(set_recommended_min_free_kbytes);
 147
 148static int start_khugepaged(void)
 149{
 150        int err = 0;
 151        if (khugepaged_enabled()) {
 152                if (!khugepaged_thread)
 153                        khugepaged_thread = kthread_run(khugepaged, NULL,
 154                                                        "khugepaged");
 155                if (unlikely(IS_ERR(khugepaged_thread))) {
 156                        pr_err("khugepaged: kthread_run(khugepaged) failed\n");
 157                        err = PTR_ERR(khugepaged_thread);
 158                        khugepaged_thread = NULL;
 159                }
 160
 161                if (!list_empty(&khugepaged_scan.mm_head))
 162                        wake_up_interruptible(&khugepaged_wait);
 163
 164                set_recommended_min_free_kbytes();
 165        } else if (khugepaged_thread) {
 166                kthread_stop(khugepaged_thread);
 167                khugepaged_thread = NULL;
 168        }
 169
 170        return err;
 171}
 172
 173static atomic_t huge_zero_refcount;
 174static struct page *huge_zero_page __read_mostly;
 175
 176static inline bool is_huge_zero_page(struct page *page)
 177{
 178        return ACCESS_ONCE(huge_zero_page) == page;
 179}
 180
 181static inline bool is_huge_zero_pmd(pmd_t pmd)
 182{
 183        return is_huge_zero_page(pmd_page(pmd));
 184}
 185
 186static struct page *get_huge_zero_page(void)
 187{
 188        struct page *zero_page;
 189retry:
 190        if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
 191                return ACCESS_ONCE(huge_zero_page);
 192
 193        zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
 194                        HPAGE_PMD_ORDER);
 195        if (!zero_page) {
 196                count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
 197                return NULL;
 198        }
 199        count_vm_event(THP_ZERO_PAGE_ALLOC);
 200        preempt_disable();
 201        if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
 202                preempt_enable();
 203                __free_page(zero_page);
 204                goto retry;
 205        }
 206
 207        /* We take additional reference here. It will be put back by shrinker */
 208        atomic_set(&huge_zero_refcount, 2);
 209        preempt_enable();
 210        return ACCESS_ONCE(huge_zero_page);
 211}
 212
 213static void put_huge_zero_page(void)
 214{
 215        /*
 216         * Counter should never go to zero here. Only shrinker can put
 217         * last reference.
 218         */
 219        BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
 220}
 221
 222static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
 223                                        struct shrink_control *sc)
 224{
 225        /* we can free zero page only if last reference remains */
 226        return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
 227}
 228
 229static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
 230                                       struct shrink_control *sc)
 231{
 232        if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
 233                struct page *zero_page = xchg(&huge_zero_page, NULL);
 234                BUG_ON(zero_page == NULL);
 235                __free_page(zero_page);
 236                return HPAGE_PMD_NR;
 237        }
 238
 239        return 0;
 240}
 241
 242static struct shrinker huge_zero_page_shrinker = {
 243        .count_objects = shrink_huge_zero_page_count,
 244        .scan_objects = shrink_huge_zero_page_scan,
 245        .seeks = DEFAULT_SEEKS,
 246};
 247
 248#ifdef CONFIG_SYSFS
 249
 250static ssize_t double_flag_show(struct kobject *kobj,
 251                                struct kobj_attribute *attr, char *buf,
 252                                enum transparent_hugepage_flag enabled,
 253                                enum transparent_hugepage_flag req_madv)
 254{
 255        if (test_bit(enabled, &transparent_hugepage_flags)) {
 256                VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
 257                return sprintf(buf, "[always] madvise never\n");
 258        } else if (test_bit(req_madv, &transparent_hugepage_flags))
 259                return sprintf(buf, "always [madvise] never\n");
 260        else
 261                return sprintf(buf, "always madvise [never]\n");
 262}
 263static ssize_t double_flag_store(struct kobject *kobj,
 264                                 struct kobj_attribute *attr,
 265                                 const char *buf, size_t count,
 266                                 enum transparent_hugepage_flag enabled,
 267                                 enum transparent_hugepage_flag req_madv)
 268{
 269        if (!memcmp("always", buf,
 270                    min(sizeof("always")-1, count))) {
 271                set_bit(enabled, &transparent_hugepage_flags);
 272                clear_bit(req_madv, &transparent_hugepage_flags);
 273        } else if (!memcmp("madvise", buf,
 274                           min(sizeof("madvise")-1, count))) {
 275                clear_bit(enabled, &transparent_hugepage_flags);
 276                set_bit(req_madv, &transparent_hugepage_flags);
 277        } else if (!memcmp("never", buf,
 278                           min(sizeof("never")-1, count))) {
 279                clear_bit(enabled, &transparent_hugepage_flags);
 280                clear_bit(req_madv, &transparent_hugepage_flags);
 281        } else
 282                return -EINVAL;
 283
 284        return count;
 285}
 286
 287static ssize_t enabled_show(struct kobject *kobj,
 288                            struct kobj_attribute *attr, char *buf)
 289{
 290        return double_flag_show(kobj, attr, buf,
 291                                TRANSPARENT_HUGEPAGE_FLAG,
 292                                TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
 293}
 294static ssize_t enabled_store(struct kobject *kobj,
 295                             struct kobj_attribute *attr,
 296                             const char *buf, size_t count)
 297{
 298        ssize_t ret;
 299
 300        ret = double_flag_store(kobj, attr, buf, count,
 301                                TRANSPARENT_HUGEPAGE_FLAG,
 302                                TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
 303
 304        if (ret > 0) {
 305                int err;
 306
 307                mutex_lock(&khugepaged_mutex);
 308                err = start_khugepaged();
 309                mutex_unlock(&khugepaged_mutex);
 310
 311                if (err)
 312                        ret = err;
 313        }
 314
 315        return ret;
 316}
 317static struct kobj_attribute enabled_attr =
 318        __ATTR(enabled, 0644, enabled_show, enabled_store);
 319
 320static ssize_t single_flag_show(struct kobject *kobj,
 321                                struct kobj_attribute *attr, char *buf,
 322                                enum transparent_hugepage_flag flag)
 323{
 324        return sprintf(buf, "%d\n",
 325                       !!test_bit(flag, &transparent_hugepage_flags));
 326}
 327
 328static ssize_t single_flag_store(struct kobject *kobj,
 329                                 struct kobj_attribute *attr,
 330                                 const char *buf, size_t count,
 331                                 enum transparent_hugepage_flag flag)
 332{
 333        unsigned long value;
 334        int ret;
 335
 336        ret = kstrtoul(buf, 10, &value);
 337        if (ret < 0)
 338                return ret;
 339        if (value > 1)
 340                return -EINVAL;
 341
 342        if (value)
 343                set_bit(flag, &transparent_hugepage_flags);
 344        else
 345                clear_bit(flag, &transparent_hugepage_flags);
 346
 347        return count;
 348}
 349
 350/*
 351 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
 352 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
 353 * memory just to allocate one more hugepage.
 354 */
 355static ssize_t defrag_show(struct kobject *kobj,
 356                           struct kobj_attribute *attr, char *buf)
 357{
 358        return double_flag_show(kobj, attr, buf,
 359                                TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
 360                                TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
 361}
 362static ssize_t defrag_store(struct kobject *kobj,
 363                            struct kobj_attribute *attr,
 364                            const char *buf, size_t count)
 365{
 366        return double_flag_store(kobj, attr, buf, count,
 367                                 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
 368                                 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
 369}
 370static struct kobj_attribute defrag_attr =
 371        __ATTR(defrag, 0644, defrag_show, defrag_store);
 372
 373static ssize_t use_zero_page_show(struct kobject *kobj,
 374                struct kobj_attribute *attr, char *buf)
 375{
 376        return single_flag_show(kobj, attr, buf,
 377                                TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
 378}
 379static ssize_t use_zero_page_store(struct kobject *kobj,
 380                struct kobj_attribute *attr, const char *buf, size_t count)
 381{
 382        return single_flag_store(kobj, attr, buf, count,
 383                                 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
 384}
 385static struct kobj_attribute use_zero_page_attr =
 386        __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
 387#ifdef CONFIG_DEBUG_VM
 388static ssize_t debug_cow_show(struct kobject *kobj,
 389                                struct kobj_attribute *attr, char *buf)
 390{
 391        return single_flag_show(kobj, attr, buf,
 392                                TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
 393}
 394static ssize_t debug_cow_store(struct kobject *kobj,
 395                               struct kobj_attribute *attr,
 396                               const char *buf, size_t count)
 397{
 398        return single_flag_store(kobj, attr, buf, count,
 399                                 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
 400}
 401static struct kobj_attribute debug_cow_attr =
 402        __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
 403#endif /* CONFIG_DEBUG_VM */
 404
 405static struct attribute *hugepage_attr[] = {
 406        &enabled_attr.attr,
 407        &defrag_attr.attr,
 408        &use_zero_page_attr.attr,
 409#ifdef CONFIG_DEBUG_VM
 410        &debug_cow_attr.attr,
 411#endif
 412        NULL,
 413};
 414
 415static struct attribute_group hugepage_attr_group = {
 416        .attrs = hugepage_attr,
 417};
 418
 419static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
 420                                         struct kobj_attribute *attr,
 421                                         char *buf)
 422{
 423        return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
 424}
 425
 426static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
 427                                          struct kobj_attribute *attr,
 428                                          const char *buf, size_t count)
 429{
 430        unsigned long msecs;
 431        int err;
 432
 433        err = kstrtoul(buf, 10, &msecs);
 434        if (err || msecs > UINT_MAX)
 435                return -EINVAL;
 436
 437        khugepaged_scan_sleep_millisecs = msecs;
 438        wake_up_interruptible(&khugepaged_wait);
 439
 440        return count;
 441}
 442static struct kobj_attribute scan_sleep_millisecs_attr =
 443        __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
 444               scan_sleep_millisecs_store);
 445
 446static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
 447                                          struct kobj_attribute *attr,
 448                                          char *buf)
 449{
 450        return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
 451}
 452
 453static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
 454                                           struct kobj_attribute *attr,
 455                                           const char *buf, size_t count)
 456{
 457        unsigned long msecs;
 458        int err;
 459
 460        err = kstrtoul(buf, 10, &msecs);
 461        if (err || msecs > UINT_MAX)
 462                return -EINVAL;
 463
 464        khugepaged_alloc_sleep_millisecs = msecs;
 465        wake_up_interruptible(&khugepaged_wait);
 466
 467        return count;
 468}
 469static struct kobj_attribute alloc_sleep_millisecs_attr =
 470        __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
 471               alloc_sleep_millisecs_store);
 472
 473static ssize_t pages_to_scan_show(struct kobject *kobj,
 474                                  struct kobj_attribute *attr,
 475                                  char *buf)
 476{
 477        return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
 478}
 479static ssize_t pages_to_scan_store(struct kobject *kobj,
 480                                   struct kobj_attribute *attr,
 481                                   const char *buf, size_t count)
 482{
 483        int err;
 484        unsigned long pages;
 485
 486        err = kstrtoul(buf, 10, &pages);
 487        if (err || !pages || pages > UINT_MAX)
 488                return -EINVAL;
 489
 490        khugepaged_pages_to_scan = pages;
 491
 492        return count;
 493}
 494static struct kobj_attribute pages_to_scan_attr =
 495        __ATTR(pages_to_scan, 0644, pages_to_scan_show,
 496               pages_to_scan_store);
 497
 498static ssize_t pages_collapsed_show(struct kobject *kobj,
 499                                    struct kobj_attribute *attr,
 500                                    char *buf)
 501{
 502        return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
 503}
 504static struct kobj_attribute pages_collapsed_attr =
 505        __ATTR_RO(pages_collapsed);
 506
 507static ssize_t full_scans_show(struct kobject *kobj,
 508                               struct kobj_attribute *attr,
 509                               char *buf)
 510{
 511        return sprintf(buf, "%u\n", khugepaged_full_scans);
 512}
 513static struct kobj_attribute full_scans_attr =
 514        __ATTR_RO(full_scans);
 515
 516static ssize_t khugepaged_defrag_show(struct kobject *kobj,
 517                                      struct kobj_attribute *attr, char *buf)
 518{
 519        return single_flag_show(kobj, attr, buf,
 520                                TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 521}
 522static ssize_t khugepaged_defrag_store(struct kobject *kobj,
 523                                       struct kobj_attribute *attr,
 524                                       const char *buf, size_t count)
 525{
 526        return single_flag_store(kobj, attr, buf, count,
 527                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 528}
 529static struct kobj_attribute khugepaged_defrag_attr =
 530        __ATTR(defrag, 0644, khugepaged_defrag_show,
 531               khugepaged_defrag_store);
 532
 533/*
 534 * max_ptes_none controls if khugepaged should collapse hugepages over
 535 * any unmapped ptes in turn potentially increasing the memory
 536 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
 537 * reduce the available free memory in the system as it
 538 * runs. Increasing max_ptes_none will instead potentially reduce the
 539 * free memory in the system during the khugepaged scan.
 540 */
 541static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
 542                                             struct kobj_attribute *attr,
 543                                             char *buf)
 544{
 545        return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
 546}
 547static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
 548                                              struct kobj_attribute *attr,
 549                                              const char *buf, size_t count)
 550{
 551        int err;
 552        unsigned long max_ptes_none;
 553
 554        err = kstrtoul(buf, 10, &max_ptes_none);
 555        if (err || max_ptes_none > HPAGE_PMD_NR-1)
 556                return -EINVAL;
 557
 558        khugepaged_max_ptes_none = max_ptes_none;
 559
 560        return count;
 561}
 562static struct kobj_attribute khugepaged_max_ptes_none_attr =
 563        __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
 564               khugepaged_max_ptes_none_store);
 565
 566static struct attribute *khugepaged_attr[] = {
 567        &khugepaged_defrag_attr.attr,
 568        &khugepaged_max_ptes_none_attr.attr,
 569        &pages_to_scan_attr.attr,
 570        &pages_collapsed_attr.attr,
 571        &full_scans_attr.attr,
 572        &scan_sleep_millisecs_attr.attr,
 573        &alloc_sleep_millisecs_attr.attr,
 574        NULL,
 575};
 576
 577static struct attribute_group khugepaged_attr_group = {
 578        .attrs = khugepaged_attr,
 579        .name = "khugepaged",
 580};
 581
 582static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
 583{
 584        int err;
 585
 586        *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
 587        if (unlikely(!*hugepage_kobj)) {
 588                pr_err("failed to create transparent hugepage kobject\n");
 589                return -ENOMEM;
 590        }
 591
 592        err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
 593        if (err) {
 594                pr_err("failed to register transparent hugepage group\n");
 595                goto delete_obj;
 596        }
 597
 598        err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
 599        if (err) {
 600                pr_err("failed to register transparent hugepage group\n");
 601                goto remove_hp_group;
 602        }
 603
 604        return 0;
 605
 606remove_hp_group:
 607        sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
 608delete_obj:
 609        kobject_put(*hugepage_kobj);
 610        return err;
 611}
 612
 613static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
 614{
 615        sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
 616        sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
 617        kobject_put(hugepage_kobj);
 618}
 619#else
 620static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
 621{
 622        return 0;
 623}
 624
 625static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
 626{
 627}
 628#endif /* CONFIG_SYSFS */
 629
 630static int __init hugepage_init(void)
 631{
 632        int err;
 633        struct kobject *hugepage_kobj;
 634
 635        if (!has_transparent_hugepage()) {
 636                transparent_hugepage_flags = 0;
 637                return -EINVAL;
 638        }
 639
 640        err = hugepage_init_sysfs(&hugepage_kobj);
 641        if (err)
 642                return err;
 643
 644        err = khugepaged_slab_init();
 645        if (err)
 646                goto out;
 647
 648        register_shrinker(&huge_zero_page_shrinker);
 649
 650        /*
 651         * By default disable transparent hugepages on smaller systems,
 652         * where the extra memory used could hurt more than TLB overhead
 653         * is likely to save.  The admin can still enable it through /sys.
 654         */
 655        if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
 656                transparent_hugepage_flags = 0;
 657
 658        start_khugepaged();
 659
 660        return 0;
 661out:
 662        hugepage_exit_sysfs(hugepage_kobj);
 663        return err;
 664}
 665subsys_initcall(hugepage_init);
 666
 667static int __init setup_transparent_hugepage(char *str)
 668{
 669        int ret = 0;
 670        if (!str)
 671                goto out;
 672        if (!strcmp(str, "always")) {
 673                set_bit(TRANSPARENT_HUGEPAGE_FLAG,
 674                        &transparent_hugepage_flags);
 675                clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 676                          &transparent_hugepage_flags);
 677                ret = 1;
 678        } else if (!strcmp(str, "madvise")) {
 679                clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
 680                          &transparent_hugepage_flags);
 681                set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 682                        &transparent_hugepage_flags);
 683                ret = 1;
 684        } else if (!strcmp(str, "never")) {
 685                clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
 686                          &transparent_hugepage_flags);
 687                clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 688                          &transparent_hugepage_flags);
 689                ret = 1;
 690        }
 691out:
 692        if (!ret)
 693                pr_warn("transparent_hugepage= cannot parse, ignored\n");
 694        return ret;
 695}
 696__setup("transparent_hugepage=", setup_transparent_hugepage);
 697
 698pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
 699{
 700        if (likely(vma->vm_flags & VM_WRITE))
 701                pmd = pmd_mkwrite(pmd);
 702        return pmd;
 703}
 704
 705static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
 706{
 707        pmd_t entry;
 708        entry = mk_pmd(page, prot);
 709        entry = pmd_mkhuge(entry);
 710        return entry;
 711}
 712
 713static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
 714                                        struct vm_area_struct *vma,
 715                                        unsigned long haddr, pmd_t *pmd,
 716                                        struct page *page)
 717{
 718        pgtable_t pgtable;
 719        spinlock_t *ptl;
 720
 721        VM_BUG_ON_PAGE(!PageCompound(page), page);
 722        pgtable = pte_alloc_one(mm, haddr);
 723        if (unlikely(!pgtable))
 724                return VM_FAULT_OOM;
 725
 726        clear_huge_page(page, haddr, HPAGE_PMD_NR);
 727        /*
 728         * The memory barrier inside __SetPageUptodate makes sure that
 729         * clear_huge_page writes become visible before the set_pmd_at()
 730         * write.
 731         */
 732        __SetPageUptodate(page);
 733
 734        ptl = pmd_lock(mm, pmd);
 735        if (unlikely(!pmd_none(*pmd))) {
 736                spin_unlock(ptl);
 737                mem_cgroup_uncharge_page(page);
 738                put_page(page);
 739                pte_free(mm, pgtable);
 740        } else {
 741                pmd_t entry;
 742                entry = mk_huge_pmd(page, vma->vm_page_prot);
 743                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 744                page_add_new_anon_rmap(page, vma, haddr);
 745                pgtable_trans_huge_deposit(mm, pmd, pgtable);
 746                set_pmd_at(mm, haddr, pmd, entry);
 747                add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
 748                atomic_long_inc(&mm->nr_ptes);
 749                spin_unlock(ptl);
 750        }
 751
 752        return 0;
 753}
 754
 755static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
 756{
 757        return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
 758}
 759
 760static inline struct page *alloc_hugepage_vma(int defrag,
 761                                              struct vm_area_struct *vma,
 762                                              unsigned long haddr, int nd,
 763                                              gfp_t extra_gfp)
 764{
 765        return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
 766                               HPAGE_PMD_ORDER, vma, haddr, nd);
 767}
 768
 769/* Caller must hold page table lock. */
 770static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
 771                struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
 772                struct page *zero_page)
 773{
 774        pmd_t entry;
 775        if (!pmd_none(*pmd))
 776                return false;
 777        entry = mk_pmd(zero_page, vma->vm_page_prot);
 778        entry = pmd_wrprotect(entry);
 779        entry = pmd_mkhuge(entry);
 780        pgtable_trans_huge_deposit(mm, pmd, pgtable);
 781        set_pmd_at(mm, haddr, pmd, entry);
 782        atomic_long_inc(&mm->nr_ptes);
 783        return true;
 784}
 785
 786int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 787                               unsigned long address, pmd_t *pmd,
 788                               unsigned int flags)
 789{
 790        struct page *page;
 791        unsigned long haddr = address & HPAGE_PMD_MASK;
 792
 793        if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
 794                return VM_FAULT_FALLBACK;
 795        if (unlikely(anon_vma_prepare(vma)))
 796                return VM_FAULT_OOM;
 797        if (unlikely(khugepaged_enter(vma)))
 798                return VM_FAULT_OOM;
 799        if (!(flags & FAULT_FLAG_WRITE) &&
 800                        transparent_hugepage_use_zero_page()) {
 801                spinlock_t *ptl;
 802                pgtable_t pgtable;
 803                struct page *zero_page;
 804                bool set;
 805                pgtable = pte_alloc_one(mm, haddr);
 806                if (unlikely(!pgtable))
 807                        return VM_FAULT_OOM;
 808                zero_page = get_huge_zero_page();
 809                if (unlikely(!zero_page)) {
 810                        pte_free(mm, pgtable);
 811                        count_vm_event(THP_FAULT_FALLBACK);
 812                        return VM_FAULT_FALLBACK;
 813                }
 814                ptl = pmd_lock(mm, pmd);
 815                set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
 816                                zero_page);
 817                spin_unlock(ptl);
 818                if (!set) {
 819                        pte_free(mm, pgtable);
 820                        put_huge_zero_page();
 821                }
 822                return 0;
 823        }
 824        page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
 825                        vma, haddr, numa_node_id(), 0);
 826        if (unlikely(!page)) {
 827                count_vm_event(THP_FAULT_FALLBACK);
 828                return VM_FAULT_FALLBACK;
 829        }
 830        if (unlikely(mem_cgroup_charge_anon(page, mm, GFP_KERNEL))) {
 831                put_page(page);
 832                count_vm_event(THP_FAULT_FALLBACK);
 833                return VM_FAULT_FALLBACK;
 834        }
 835        if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
 836                mem_cgroup_uncharge_page(page);
 837                put_page(page);
 838                count_vm_event(THP_FAULT_FALLBACK);
 839                return VM_FAULT_FALLBACK;
 840        }
 841
 842        count_vm_event(THP_FAULT_ALLOC);
 843        return 0;
 844}
 845
 846int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 847                  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
 848                  struct vm_area_struct *vma)
 849{
 850        spinlock_t *dst_ptl, *src_ptl;
 851        struct page *src_page;
 852        pmd_t pmd;
 853        pgtable_t pgtable;
 854        int ret;
 855
 856        ret = -ENOMEM;
 857        pgtable = pte_alloc_one(dst_mm, addr);
 858        if (unlikely(!pgtable))
 859                goto out;
 860
 861        dst_ptl = pmd_lock(dst_mm, dst_pmd);
 862        src_ptl = pmd_lockptr(src_mm, src_pmd);
 863        spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
 864
 865        ret = -EAGAIN;
 866        pmd = *src_pmd;
 867        if (unlikely(!pmd_trans_huge(pmd))) {
 868                pte_free(dst_mm, pgtable);
 869                goto out_unlock;
 870        }
 871        /*
 872         * When page table lock is held, the huge zero pmd should not be
 873         * under splitting since we don't split the page itself, only pmd to
 874         * a page table.
 875         */
 876        if (is_huge_zero_pmd(pmd)) {
 877                struct page *zero_page;
 878                bool set;
 879                /*
 880                 * get_huge_zero_page() will never allocate a new page here,
 881                 * since we already have a zero page to copy. It just takes a
 882                 * reference.
 883                 */
 884                zero_page = get_huge_zero_page();
 885                set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
 886                                zero_page);
 887                BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
 888                ret = 0;
 889                goto out_unlock;
 890        }
 891
 892        if (unlikely(pmd_trans_splitting(pmd))) {
 893                /* split huge page running from under us */
 894                spin_unlock(src_ptl);
 895                spin_unlock(dst_ptl);
 896                pte_free(dst_mm, pgtable);
 897
 898                wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
 899                goto out;
 900        }
 901        src_page = pmd_page(pmd);
 902        VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
 903        get_page(src_page);
 904        page_dup_rmap(src_page);
 905        add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
 906
 907        pmdp_set_wrprotect(src_mm, addr, src_pmd);
 908        pmd = pmd_mkold(pmd_wrprotect(pmd));
 909        pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
 910        set_pmd_at(dst_mm, addr, dst_pmd, pmd);
 911        atomic_long_inc(&dst_mm->nr_ptes);
 912
 913        ret = 0;
 914out_unlock:
 915        spin_unlock(src_ptl);
 916        spin_unlock(dst_ptl);
 917out:
 918        return ret;
 919}
 920
 921void huge_pmd_set_accessed(struct mm_struct *mm,
 922                           struct vm_area_struct *vma,
 923                           unsigned long address,
 924                           pmd_t *pmd, pmd_t orig_pmd,
 925                           int dirty)
 926{
 927        spinlock_t *ptl;
 928        pmd_t entry;
 929        unsigned long haddr;
 930
 931        ptl = pmd_lock(mm, pmd);
 932        if (unlikely(!pmd_same(*pmd, orig_pmd)))
 933                goto unlock;
 934
 935        entry = pmd_mkyoung(orig_pmd);
 936        haddr = address & HPAGE_PMD_MASK;
 937        if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
 938                update_mmu_cache_pmd(vma, address, pmd);
 939
 940unlock:
 941        spin_unlock(ptl);
 942}
 943
 944/*
 945 * Save CONFIG_DEBUG_PAGEALLOC from faulting falsely on tail pages
 946 * during copy_user_huge_page()'s copy_page_rep(): in the case when
 947 * the source page gets split and a tail freed before copy completes.
 948 * Called under pmd_lock of checked pmd, so safe from splitting itself.
 949 */
 950static void get_user_huge_page(struct page *page)
 951{
 952        if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
 953                struct page *endpage = page + HPAGE_PMD_NR;
 954
 955                atomic_add(HPAGE_PMD_NR, &page->_count);
 956                while (++page < endpage)
 957                        get_huge_page_tail(page);
 958        } else {
 959                get_page(page);
 960        }
 961}
 962
 963static void put_user_huge_page(struct page *page)
 964{
 965        if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
 966                struct page *endpage = page + HPAGE_PMD_NR;
 967
 968                while (page < endpage)
 969                        put_page(page++);
 970        } else {
 971                put_page(page);
 972        }
 973}
 974
 975static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
 976                                        struct vm_area_struct *vma,
 977                                        unsigned long address,
 978                                        pmd_t *pmd, pmd_t orig_pmd,
 979                                        struct page *page,
 980                                        unsigned long haddr)
 981{
 982        spinlock_t *ptl;
 983        pgtable_t pgtable;
 984        pmd_t _pmd;
 985        int ret = 0, i;
 986        struct page **pages;
 987        unsigned long mmun_start;       /* For mmu_notifiers */
 988        unsigned long mmun_end;         /* For mmu_notifiers */
 989
 990        pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
 991                        GFP_KERNEL);
 992        if (unlikely(!pages)) {
 993                ret |= VM_FAULT_OOM;
 994                goto out;
 995        }
 996
 997        for (i = 0; i < HPAGE_PMD_NR; i++) {
 998                pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
 999                                               __GFP_OTHER_NODE,
1000                                               vma, address, page_to_nid(page));
1001                if (unlikely(!pages[i] ||
1002                             mem_cgroup_charge_anon(pages[i], mm,
1003                                                       GFP_KERNEL))) {
1004                        if (pages[i])
1005                                put_page(pages[i]);
1006                        mem_cgroup_uncharge_start();
1007                        while (--i >= 0) {
1008                                mem_cgroup_uncharge_page(pages[i]);
1009                                put_page(pages[i]);
1010                        }
1011                        mem_cgroup_uncharge_end();
1012                        kfree(pages);
1013                        ret |= VM_FAULT_OOM;
1014                        goto out;
1015                }
1016        }
1017
1018        for (i = 0; i < HPAGE_PMD_NR; i++) {
1019                copy_user_highpage(pages[i], page + i,
1020                                   haddr + PAGE_SIZE * i, vma);
1021                __SetPageUptodate(pages[i]);
1022                cond_resched();
1023        }
1024
1025        mmun_start = haddr;
1026        mmun_end   = haddr + HPAGE_PMD_SIZE;
1027        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1028
1029        ptl = pmd_lock(mm, pmd);
1030        if (unlikely(!pmd_same(*pmd, orig_pmd)))
1031                goto out_free_pages;
1032        VM_BUG_ON_PAGE(!PageHead(page), page);
1033
1034        pmdp_clear_flush(vma, haddr, pmd);
1035        /* leave pmd empty until pte is filled */
1036
1037        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1038        pmd_populate(mm, &_pmd, pgtable);
1039
1040        for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1041                pte_t *pte, entry;
1042                entry = mk_pte(pages[i], vma->vm_page_prot);
1043                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1044                page_add_new_anon_rmap(pages[i], vma, haddr);
1045                pte = pte_offset_map(&_pmd, haddr);
1046                VM_BUG_ON(!pte_none(*pte));
1047                set_pte_at(mm, haddr, pte, entry);
1048                pte_unmap(pte);
1049        }
1050        kfree(pages);
1051
1052        smp_wmb(); /* make pte visible before pmd */
1053        pmd_populate(mm, pmd, pgtable);
1054        page_remove_rmap(page);
1055        spin_unlock(ptl);
1056
1057        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1058
1059        ret |= VM_FAULT_WRITE;
1060        put_page(page);
1061
1062out:
1063        return ret;
1064
1065out_free_pages:
1066        spin_unlock(ptl);
1067        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1068        mem_cgroup_uncharge_start();
1069        for (i = 0; i < HPAGE_PMD_NR; i++) {
1070                mem_cgroup_uncharge_page(pages[i]);
1071                put_page(pages[i]);
1072        }
1073        mem_cgroup_uncharge_end();
1074        kfree(pages);
1075        goto out;
1076}
1077
1078int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1079                        unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
1080{
1081        spinlock_t *ptl;
1082        int ret = 0;
1083        struct page *page = NULL, *new_page;
1084        unsigned long haddr;
1085        unsigned long mmun_start;       /* For mmu_notifiers */
1086        unsigned long mmun_end;         /* For mmu_notifiers */
1087
1088        ptl = pmd_lockptr(mm, pmd);
1089        VM_BUG_ON(!vma->anon_vma);
1090        haddr = address & HPAGE_PMD_MASK;
1091        if (is_huge_zero_pmd(orig_pmd))
1092                goto alloc;
1093        spin_lock(ptl);
1094        if (unlikely(!pmd_same(*pmd, orig_pmd)))
1095                goto out_unlock;
1096
1097        page = pmd_page(orig_pmd);
1098        VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
1099        if (page_mapcount(page) == 1) {
1100                pmd_t entry;
1101                entry = pmd_mkyoung(orig_pmd);
1102                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1103                if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
1104                        update_mmu_cache_pmd(vma, address, pmd);
1105                ret |= VM_FAULT_WRITE;
1106                goto out_unlock;
1107        }
1108        get_user_huge_page(page);
1109        spin_unlock(ptl);
1110alloc:
1111        if (transparent_hugepage_enabled(vma) &&
1112            !transparent_hugepage_debug_cow())
1113                new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
1114                                              vma, haddr, numa_node_id(), 0);
1115        else
1116                new_page = NULL;
1117
1118        if (unlikely(!new_page)) {
1119                if (!page) {
1120                        split_huge_page_pmd(vma, address, pmd);
1121                        ret |= VM_FAULT_FALLBACK;
1122                } else {
1123                        ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
1124                                        pmd, orig_pmd, page, haddr);
1125                        if (ret & VM_FAULT_OOM) {
1126                                split_huge_page(page);
1127                                ret |= VM_FAULT_FALLBACK;
1128                        }
1129                        put_user_huge_page(page);
1130                }
1131                count_vm_event(THP_FAULT_FALLBACK);
1132                goto out;
1133        }
1134
1135        if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))) {
1136                put_page(new_page);
1137                if (page) {
1138                        split_huge_page(page);
1139                        put_user_huge_page(page);
1140                } else
1141                        split_huge_page_pmd(vma, address, pmd);
1142                ret |= VM_FAULT_FALLBACK;
1143                count_vm_event(THP_FAULT_FALLBACK);
1144                goto out;
1145        }
1146
1147        count_vm_event(THP_FAULT_ALLOC);
1148
1149        if (!page)
1150                clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
1151        else
1152                copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
1153        __SetPageUptodate(new_page);
1154
1155        mmun_start = haddr;
1156        mmun_end   = haddr + HPAGE_PMD_SIZE;
1157        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1158
1159        spin_lock(ptl);
1160        if (page)
1161                put_user_huge_page(page);
1162        if (unlikely(!pmd_same(*pmd, orig_pmd))) {
1163                spin_unlock(ptl);
1164                mem_cgroup_uncharge_page(new_page);
1165                put_page(new_page);
1166                goto out_mn;
1167        } else {
1168                pmd_t entry;
1169                entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1170                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1171                pmdp_clear_flush(vma, haddr, pmd);
1172                page_add_new_anon_rmap(new_page, vma, haddr);
1173                set_pmd_at(mm, haddr, pmd, entry);
1174                update_mmu_cache_pmd(vma, address, pmd);
1175                if (!page) {
1176                        add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
1177                        put_huge_zero_page();
1178                } else {
1179                        VM_BUG_ON_PAGE(!PageHead(page), page);
1180                        page_remove_rmap(page);
1181                        put_page(page);
1182                }
1183                ret |= VM_FAULT_WRITE;
1184        }
1185        spin_unlock(ptl);
1186out_mn:
1187        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1188out:
1189        return ret;
1190out_unlock:
1191        spin_unlock(ptl);
1192        return ret;
1193}
1194
1195struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1196                                   unsigned long addr,
1197                                   pmd_t *pmd,
1198                                   unsigned int flags)
1199{
1200        struct mm_struct *mm = vma->vm_mm;
1201        struct page *page = NULL;
1202
1203        assert_spin_locked(pmd_lockptr(mm, pmd));
1204
1205        if (flags & FOLL_WRITE && !pmd_write(*pmd))
1206                goto out;
1207
1208        /* Avoid dumping huge zero page */
1209        if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1210                return ERR_PTR(-EFAULT);
1211
1212        /* Full NUMA hinting faults to serialise migration in fault paths */
1213        if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
1214                goto out;
1215
1216        page = pmd_page(*pmd);
1217        VM_BUG_ON_PAGE(!PageHead(page), page);
1218        if (flags & FOLL_TOUCH) {
1219                pmd_t _pmd;
1220                /*
1221                 * We should set the dirty bit only for FOLL_WRITE but
1222                 * for now the dirty bit in the pmd is meaningless.
1223                 * And if the dirty bit will become meaningful and
1224                 * we'll only set it with FOLL_WRITE, an atomic
1225                 * set_bit will be required on the pmd to set the
1226                 * young bit, instead of the current set_pmd_at.
1227                 */
1228                _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
1229                if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1230                                          pmd, _pmd,  1))
1231                        update_mmu_cache_pmd(vma, addr, pmd);
1232        }
1233        if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1234                if (page->mapping && trylock_page(page)) {
1235                        lru_add_drain();
1236                        if (page->mapping)
1237                                mlock_vma_page(page);
1238                        unlock_page(page);
1239                }
1240        }
1241        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1242        VM_BUG_ON_PAGE(!PageCompound(page), page);
1243        if (flags & FOLL_GET)
1244                get_page_foll(page);
1245
1246out:
1247        return page;
1248}
1249
1250/* NUMA hinting page fault entry point for trans huge pmds */
1251int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1252                                unsigned long addr, pmd_t pmd, pmd_t *pmdp)
1253{
1254        spinlock_t *ptl;
1255        struct anon_vma *anon_vma = NULL;
1256        struct page *page;
1257        unsigned long haddr = addr & HPAGE_PMD_MASK;
1258        int page_nid = -1, this_nid = numa_node_id();
1259        int target_nid, last_cpupid = -1;
1260        bool page_locked;
1261        bool migrated = false;
1262        int flags = 0;
1263
1264        ptl = pmd_lock(mm, pmdp);
1265        if (unlikely(!pmd_same(pmd, *pmdp)))
1266                goto out_unlock;
1267
1268        /*
1269         * If there are potential migrations, wait for completion and retry
1270         * without disrupting NUMA hinting information. Do not relock and
1271         * check_same as the page may no longer be mapped.
1272         */
1273        if (unlikely(pmd_trans_migrating(*pmdp))) {
1274                spin_unlock(ptl);
1275                wait_migrate_huge_page(vma->anon_vma, pmdp);
1276                goto out;
1277        }
1278
1279        page = pmd_page(pmd);
1280        BUG_ON(is_huge_zero_page(page));
1281        page_nid = page_to_nid(page);
1282        last_cpupid = page_cpupid_last(page);
1283        count_vm_numa_event(NUMA_HINT_FAULTS);
1284        if (page_nid == this_nid) {
1285                count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
1286                flags |= TNF_FAULT_LOCAL;
1287        }
1288
1289        /*
1290         * Avoid grouping on DSO/COW pages in specific and RO pages
1291         * in general, RO pages shouldn't hurt as much anyway since
1292         * they can be in shared cache state.
1293         */
1294        if (!pmd_write(pmd))
1295                flags |= TNF_NO_GROUP;
1296
1297        /*
1298         * Acquire the page lock to serialise THP migrations but avoid dropping
1299         * page_table_lock if at all possible
1300         */
1301        page_locked = trylock_page(page);
1302        target_nid = mpol_misplaced(page, vma, haddr);
1303        if (target_nid == -1) {
1304                /* If the page was locked, there are no parallel migrations */
1305                if (page_locked)
1306                        goto clear_pmdnuma;
1307        }
1308
1309        /* Migration could have started since the pmd_trans_migrating check */
1310        if (!page_locked) {
1311                spin_unlock(ptl);
1312                wait_on_page_locked(page);
1313                page_nid = -1;
1314                goto out;
1315        }
1316
1317        /*
1318         * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1319         * to serialises splits
1320         */
1321        get_page(page);
1322        spin_unlock(ptl);
1323        anon_vma = page_lock_anon_vma_read(page);
1324
1325        /* Confirm the PMD did not change while page_table_lock was released */
1326        spin_lock(ptl);
1327        if (unlikely(!pmd_same(pmd, *pmdp))) {
1328                unlock_page(page);
1329                put_page(page);
1330                page_nid = -1;
1331                goto out_unlock;
1332        }
1333
1334        /* Bail if we fail to protect against THP splits for any reason */
1335        if (unlikely(!anon_vma)) {
1336                put_page(page);
1337                page_nid = -1;
1338                goto clear_pmdnuma;
1339        }
1340
1341        /*
1342         * Migrate the THP to the requested node, returns with page unlocked
1343         * and pmd_numa cleared.
1344         */
1345        spin_unlock(ptl);
1346        migrated = migrate_misplaced_transhuge_page(mm, vma,
1347                                pmdp, pmd, addr, page, target_nid);
1348        if (migrated) {
1349                flags |= TNF_MIGRATED;
1350                page_nid = target_nid;
1351        }
1352
1353        goto out;
1354clear_pmdnuma:
1355        BUG_ON(!PageLocked(page));
1356        pmd = pmd_mknonnuma(pmd);
1357        set_pmd_at(mm, haddr, pmdp, pmd);
1358        VM_BUG_ON(pmd_numa(*pmdp));
1359        update_mmu_cache_pmd(vma, addr, pmdp);
1360        unlock_page(page);
1361out_unlock:
1362        spin_unlock(ptl);
1363
1364out:
1365        if (anon_vma)
1366                page_unlock_anon_vma_read(anon_vma);
1367
1368        if (page_nid != -1)
1369                task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
1370
1371        return 0;
1372}
1373
1374int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1375                 pmd_t *pmd, unsigned long addr)
1376{
1377        spinlock_t *ptl;
1378        int ret = 0;
1379
1380        if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1381                struct page *page;
1382                pgtable_t pgtable;
1383                pmd_t orig_pmd;
1384                /*
1385                 * For architectures like ppc64 we look at deposited pgtable
1386                 * when calling pmdp_get_and_clear. So do the
1387                 * pgtable_trans_huge_withdraw after finishing pmdp related
1388                 * operations.
1389                 */
1390                orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd);
1391                tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1392                pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
1393                if (is_huge_zero_pmd(orig_pmd)) {
1394                        atomic_long_dec(&tlb->mm->nr_ptes);
1395                        spin_unlock(ptl);
1396                        put_huge_zero_page();
1397                } else {
1398                        page = pmd_page(orig_pmd);
1399                        page_remove_rmap(page);
1400                        VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1401                        add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1402                        VM_BUG_ON_PAGE(!PageHead(page), page);
1403                        atomic_long_dec(&tlb->mm->nr_ptes);
1404                        spin_unlock(ptl);
1405                        tlb_remove_page(tlb, page);
1406                }
1407                pte_free(tlb->mm, pgtable);
1408                ret = 1;
1409        }
1410        return ret;
1411}
1412
1413int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1414                unsigned long addr, unsigned long end,
1415                unsigned char *vec)
1416{
1417        spinlock_t *ptl;
1418        int ret = 0;
1419
1420        if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1421                /*
1422                 * All logical pages in the range are present
1423                 * if backed by a huge page.
1424                 */
1425                spin_unlock(ptl);
1426                memset(vec, 1, (end - addr) >> PAGE_SHIFT);
1427                ret = 1;
1428        }
1429
1430        return ret;
1431}
1432
1433int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1434                  unsigned long old_addr,
1435                  unsigned long new_addr, unsigned long old_end,
1436                  pmd_t *old_pmd, pmd_t *new_pmd)
1437{
1438        spinlock_t *old_ptl, *new_ptl;
1439        int ret = 0;
1440        pmd_t pmd;
1441
1442        struct mm_struct *mm = vma->vm_mm;
1443
1444        if ((old_addr & ~HPAGE_PMD_MASK) ||
1445            (new_addr & ~HPAGE_PMD_MASK) ||
1446            old_end - old_addr < HPAGE_PMD_SIZE ||
1447            (new_vma->vm_flags & VM_NOHUGEPAGE))
1448                goto out;
1449
1450        /*
1451         * The destination pmd shouldn't be established, free_pgtables()
1452         * should have release it.
1453         */
1454        if (WARN_ON(!pmd_none(*new_pmd))) {
1455                VM_BUG_ON(pmd_trans_huge(*new_pmd));
1456                goto out;
1457        }
1458
1459        /*
1460         * We don't have to worry about the ordering of src and dst
1461         * ptlocks because exclusive mmap_sem prevents deadlock.
1462         */
1463        ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl);
1464        if (ret == 1) {
1465                new_ptl = pmd_lockptr(mm, new_pmd);
1466                if (new_ptl != old_ptl)
1467                        spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1468                pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
1469                VM_BUG_ON(!pmd_none(*new_pmd));
1470
1471                if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
1472                        pgtable_t pgtable;
1473                        pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1474                        pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1475                }
1476                set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1477                if (new_ptl != old_ptl)
1478                        spin_unlock(new_ptl);
1479                spin_unlock(old_ptl);
1480        }
1481out:
1482        return ret;
1483}
1484
1485/*
1486 * Returns
1487 *  - 0 if PMD could not be locked
1488 *  - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1489 *  - HPAGE_PMD_NR is protections changed and TLB flush necessary
1490 */
1491int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1492                unsigned long addr, pgprot_t newprot, int prot_numa)
1493{
1494        struct mm_struct *mm = vma->vm_mm;
1495        spinlock_t *ptl;
1496        int ret = 0;
1497
1498        if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1499                pmd_t entry;
1500                ret = 1;
1501                if (!prot_numa) {
1502                        entry = pmdp_get_and_clear(mm, addr, pmd);
1503                        if (pmd_numa(entry))
1504                                entry = pmd_mknonnuma(entry);
1505                        entry = pmd_modify(entry, newprot);
1506                        ret = HPAGE_PMD_NR;
1507                        set_pmd_at(mm, addr, pmd, entry);
1508                        BUG_ON(pmd_write(entry));
1509                } else {
1510                        struct page *page = pmd_page(*pmd);
1511
1512                        /*
1513                         * Do not trap faults against the zero page. The
1514                         * read-only data is likely to be read-cached on the
1515                         * local CPU cache and it is less useful to know about
1516                         * local vs remote hits on the zero page.
1517                         */
1518                        if (!is_huge_zero_page(page) &&
1519                            !pmd_numa(*pmd)) {
1520                                pmdp_set_numa(mm, addr, pmd);
1521                                ret = HPAGE_PMD_NR;
1522                        }
1523                }
1524                spin_unlock(ptl);
1525        }
1526
1527        return ret;
1528}
1529
1530/*
1531 * Returns 1 if a given pmd maps a stable (not under splitting) thp.
1532 * Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
1533 *
1534 * Note that if it returns 1, this routine returns without unlocking page
1535 * table locks. So callers must unlock them.
1536 */
1537int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
1538                spinlock_t **ptl)
1539{
1540        *ptl = pmd_lock(vma->vm_mm, pmd);
1541        if (likely(pmd_trans_huge(*pmd))) {
1542                if (unlikely(pmd_trans_splitting(*pmd))) {
1543                        spin_unlock(*ptl);
1544                        wait_split_huge_page(vma->anon_vma, pmd);
1545                        return -1;
1546                } else {
1547                        /* Thp mapped by 'pmd' is stable, so we can
1548                         * handle it as it is. */
1549                        return 1;
1550                }
1551        }
1552        spin_unlock(*ptl);
1553        return 0;
1554}
1555
1556/*
1557 * This function returns whether a given @page is mapped onto the @address
1558 * in the virtual space of @mm.
1559 *
1560 * When it's true, this function returns *pmd with holding the page table lock
1561 * and passing it back to the caller via @ptl.
1562 * If it's false, returns NULL without holding the page table lock.
1563 */
1564pmd_t *page_check_address_pmd(struct page *page,
1565                              struct mm_struct *mm,
1566                              unsigned long address,
1567                              enum page_check_address_pmd_flag flag,
1568                              spinlock_t **ptl)
1569{
1570        pgd_t *pgd;
1571        pud_t *pud;
1572        pmd_t *pmd;
1573
1574        if (address & ~HPAGE_PMD_MASK)
1575                return NULL;
1576
1577        pgd = pgd_offset(mm, address);
1578        if (!pgd_present(*pgd))
1579                return NULL;
1580        pud = pud_offset(pgd, address);
1581        if (!pud_present(*pud))
1582                return NULL;
1583        pmd = pmd_offset(pud, address);
1584
1585        *ptl = pmd_lock(mm, pmd);
1586        if (!pmd_present(*pmd))
1587                goto unlock;
1588        if (pmd_page(*pmd) != page)
1589                goto unlock;
1590        /*
1591         * split_vma() may create temporary aliased mappings. There is
1592         * no risk as long as all huge pmd are found and have their
1593         * splitting bit set before __split_huge_page_refcount
1594         * runs. Finding the same huge pmd more than once during the
1595         * same rmap walk is not a problem.
1596         */
1597        if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1598            pmd_trans_splitting(*pmd))
1599                goto unlock;
1600        if (pmd_trans_huge(*pmd)) {
1601                VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1602                          !pmd_trans_splitting(*pmd));
1603                return pmd;
1604        }
1605unlock:
1606        spin_unlock(*ptl);
1607        return NULL;
1608}
1609
1610static int __split_huge_page_splitting(struct page *page,
1611                                       struct vm_area_struct *vma,
1612                                       unsigned long address)
1613{
1614        struct mm_struct *mm = vma->vm_mm;
1615        spinlock_t *ptl;
1616        pmd_t *pmd;
1617        int ret = 0;
1618        /* For mmu_notifiers */
1619        const unsigned long mmun_start = address;
1620        const unsigned long mmun_end   = address + HPAGE_PMD_SIZE;
1621
1622        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1623        pmd = page_check_address_pmd(page, mm, address,
1624                        PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl);
1625        if (pmd) {
1626                /*
1627                 * We can't temporarily set the pmd to null in order
1628                 * to split it, the pmd must remain marked huge at all
1629                 * times or the VM won't take the pmd_trans_huge paths
1630                 * and it won't wait on the anon_vma->root->rwsem to
1631                 * serialize against split_huge_page*.
1632                 */
1633                pmdp_splitting_flush(vma, address, pmd);
1634                ret = 1;
1635                spin_unlock(ptl);
1636        }
1637        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1638
1639        return ret;
1640}
1641
1642static void __split_huge_page_refcount(struct page *page,
1643                                       struct list_head *list)
1644{
1645        int i;
1646        struct zone *zone = page_zone(page);
1647        struct lruvec *lruvec;
1648        int tail_count = 0;
1649
1650        /* prevent PageLRU to go away from under us, and freeze lru stats */
1651        spin_lock_irq(&zone->lru_lock);
1652        lruvec = mem_cgroup_page_lruvec(page, zone);
1653
1654        compound_lock(page);
1655        /* complete memcg works before add pages to LRU */
1656        mem_cgroup_split_huge_fixup(page);
1657
1658        for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
1659                struct page *page_tail = page + i;
1660
1661                /* tail_page->_mapcount cannot change */
1662                BUG_ON(page_mapcount(page_tail) < 0);
1663                tail_count += page_mapcount(page_tail);
1664                /* check for overflow */
1665                BUG_ON(tail_count < 0);
1666                BUG_ON(atomic_read(&page_tail->_count) != 0);
1667                /*
1668                 * tail_page->_count is zero and not changing from
1669                 * under us. But get_page_unless_zero() may be running
1670                 * from under us on the tail_page. If we used
1671                 * atomic_set() below instead of atomic_add(), we
1672                 * would then run atomic_set() concurrently with
1673                 * get_page_unless_zero(), and atomic_set() is
1674                 * implemented in C not using locked ops. spin_unlock
1675                 * on x86 sometime uses locked ops because of PPro
1676                 * errata 66, 92, so unless somebody can guarantee
1677                 * atomic_set() here would be safe on all archs (and
1678                 * not only on x86), it's safer to use atomic_add().
1679                 */
1680                atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
1681                           &page_tail->_count);
1682
1683                /* after clearing PageTail the gup refcount can be released */
1684                smp_mb();
1685
1686                /*
1687                 * retain hwpoison flag of the poisoned tail page:
1688                 *   fix for the unsuitable process killed on Guest Machine(KVM)
1689                 *   by the memory-failure.
1690                 */
1691                page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
1692                page_tail->flags |= (page->flags &
1693                                     ((1L << PG_referenced) |
1694                                      (1L << PG_swapbacked) |
1695                                      (1L << PG_mlocked) |
1696                                      (1L << PG_uptodate) |
1697                                      (1L << PG_active) |
1698                                      (1L << PG_unevictable)));
1699                page_tail->flags |= (1L << PG_dirty);
1700
1701                /* clear PageTail before overwriting first_page */
1702                smp_wmb();
1703
1704                /*
1705                 * __split_huge_page_splitting() already set the
1706                 * splitting bit in all pmd that could map this
1707                 * hugepage, that will ensure no CPU can alter the
1708                 * mapcount on the head page. The mapcount is only
1709                 * accounted in the head page and it has to be
1710                 * transferred to all tail pages in the below code. So
1711                 * for this code to be safe, the split the mapcount
1712                 * can't change. But that doesn't mean userland can't
1713                 * keep changing and reading the page contents while
1714                 * we transfer the mapcount, so the pmd splitting
1715                 * status is achieved setting a reserved bit in the
1716                 * pmd, not by clearing the present bit.
1717                */
1718                page_tail->_mapcount = page->_mapcount;
1719
1720                BUG_ON(page_tail->mapping);
1721                page_tail->mapping = page->mapping;
1722
1723                page_tail->index = page->index + i;
1724                page_cpupid_xchg_last(page_tail, page_cpupid_last(page));
1725
1726                BUG_ON(!PageAnon(page_tail));
1727                BUG_ON(!PageUptodate(page_tail));
1728                BUG_ON(!PageDirty(page_tail));
1729                BUG_ON(!PageSwapBacked(page_tail));
1730
1731                lru_add_page_tail(page, page_tail, lruvec, list);
1732        }
1733        atomic_sub(tail_count, &page->_count);
1734        BUG_ON(atomic_read(&page->_count) <= 0);
1735
1736        __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
1737
1738        ClearPageCompound(page);
1739        compound_unlock(page);
1740        spin_unlock_irq(&zone->lru_lock);
1741
1742        for (i = 1; i < HPAGE_PMD_NR; i++) {
1743                struct page *page_tail = page + i;
1744                BUG_ON(page_count(page_tail) <= 0);
1745                /*
1746                 * Tail pages may be freed if there wasn't any mapping
1747                 * like if add_to_swap() is running on a lru page that
1748                 * had its mapping zapped. And freeing these pages
1749                 * requires taking the lru_lock so we do the put_page
1750                 * of the tail pages after the split is complete.
1751                 */
1752                put_page(page_tail);
1753        }
1754
1755        /*
1756         * Only the head page (now become a regular page) is required
1757         * to be pinned by the caller.
1758         */
1759        BUG_ON(page_count(page) <= 0);
1760}
1761
1762static int __split_huge_page_map(struct page *page,
1763                                 struct vm_area_struct *vma,
1764                                 unsigned long address)
1765{
1766        struct mm_struct *mm = vma->vm_mm;
1767        spinlock_t *ptl;
1768        pmd_t *pmd, _pmd;
1769        int ret = 0, i;
1770        pgtable_t pgtable;
1771        unsigned long haddr;
1772
1773        pmd = page_check_address_pmd(page, mm, address,
1774                        PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl);
1775        if (pmd) {
1776                pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1777                pmd_populate(mm, &_pmd, pgtable);
1778
1779                haddr = address;
1780                for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1781                        pte_t *pte, entry;
1782                        BUG_ON(PageCompound(page+i));
1783                        entry = mk_pte(page + i, vma->vm_page_prot);
1784                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1785                        if (!pmd_write(*pmd))
1786                                entry = pte_wrprotect(entry);
1787                        else
1788                                BUG_ON(page_mapcount(page) != 1);
1789                        if (!pmd_young(*pmd))
1790                                entry = pte_mkold(entry);
1791                        if (pmd_numa(*pmd))
1792                                entry = pte_mknuma(entry);
1793                        pte = pte_offset_map(&_pmd, haddr);
1794                        BUG_ON(!pte_none(*pte));
1795                        set_pte_at(mm, haddr, pte, entry);
1796                        pte_unmap(pte);
1797                }
1798
1799                smp_wmb(); /* make pte visible before pmd */
1800                /*
1801                 * Up to this point the pmd is present and huge and
1802                 * userland has the whole access to the hugepage
1803                 * during the split (which happens in place). If we
1804                 * overwrite the pmd with the not-huge version
1805                 * pointing to the pte here (which of course we could
1806                 * if all CPUs were bug free), userland could trigger
1807                 * a small page size TLB miss on the small sized TLB
1808                 * while the hugepage TLB entry is still established
1809                 * in the huge TLB. Some CPU doesn't like that. See
1810                 * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1811                 * Erratum 383 on page 93. Intel should be safe but is
1812                 * also warns that it's only safe if the permission
1813                 * and cache attributes of the two entries loaded in
1814                 * the two TLB is identical (which should be the case
1815                 * here). But it is generally safer to never allow
1816                 * small and huge TLB entries for the same virtual
1817                 * address to be loaded simultaneously. So instead of
1818                 * doing "pmd_populate(); flush_tlb_range();" we first
1819                 * mark the current pmd notpresent (atomically because
1820                 * here the pmd_trans_huge and pmd_trans_splitting
1821                 * must remain set at all times on the pmd until the
1822                 * split is complete for this pmd), then we flush the
1823                 * SMP TLB and finally we write the non-huge version
1824                 * of the pmd entry with pmd_populate.
1825                 */
1826                pmdp_invalidate(vma, address, pmd);
1827                pmd_populate(mm, pmd, pgtable);
1828                ret = 1;
1829                spin_unlock(ptl);
1830        }
1831
1832        return ret;
1833}
1834
1835/* must be called with anon_vma->root->rwsem held */
1836static void __split_huge_page(struct page *page,
1837                              struct anon_vma *anon_vma,
1838                              struct list_head *list)
1839{
1840        int mapcount, mapcount2;
1841        pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1842        struct anon_vma_chain *avc;
1843
1844        BUG_ON(!PageHead(page));
1845        BUG_ON(PageTail(page));
1846
1847        mapcount = 0;
1848        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1849                struct vm_area_struct *vma = avc->vma;
1850                unsigned long addr = vma_address(page, vma);
1851                BUG_ON(is_vma_temporary_stack(vma));
1852                mapcount += __split_huge_page_splitting(page, vma, addr);
1853        }
1854        /*
1855         * It is critical that new vmas are added to the tail of the
1856         * anon_vma list. This guarantes that if copy_huge_pmd() runs
1857         * and establishes a child pmd before
1858         * __split_huge_page_splitting() freezes the parent pmd (so if
1859         * we fail to prevent copy_huge_pmd() from running until the
1860         * whole __split_huge_page() is complete), we will still see
1861         * the newly established pmd of the child later during the
1862         * walk, to be able to set it as pmd_trans_splitting too.
1863         */
1864        if (mapcount != page_mapcount(page)) {
1865                pr_err("mapcount %d page_mapcount %d\n",
1866                        mapcount, page_mapcount(page));
1867                BUG();
1868        }
1869
1870        __split_huge_page_refcount(page, list);
1871
1872        mapcount2 = 0;
1873        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1874                struct vm_area_struct *vma = avc->vma;
1875                unsigned long addr = vma_address(page, vma);
1876                BUG_ON(is_vma_temporary_stack(vma));
1877                mapcount2 += __split_huge_page_map(page, vma, addr);
1878        }
1879        if (mapcount != mapcount2) {
1880                pr_err("mapcount %d mapcount2 %d page_mapcount %d\n",
1881                        mapcount, mapcount2, page_mapcount(page));
1882                BUG();
1883        }
1884}
1885
1886/*
1887 * Split a hugepage into normal pages. This doesn't change the position of head
1888 * page. If @list is null, tail pages will be added to LRU list, otherwise, to
1889 * @list. Both head page and tail pages will inherit mapping, flags, and so on
1890 * from the hugepage.
1891 * Return 0 if the hugepage is split successfully otherwise return 1.
1892 */
1893int split_huge_page_to_list(struct page *page, struct list_head *list)
1894{
1895        struct anon_vma *anon_vma;
1896        int ret = 1;
1897
1898        BUG_ON(is_huge_zero_page(page));
1899        BUG_ON(!PageAnon(page));
1900
1901        /*
1902         * The caller does not necessarily hold an mmap_sem that would prevent
1903         * the anon_vma disappearing so we first we take a reference to it
1904         * and then lock the anon_vma for write. This is similar to
1905         * page_lock_anon_vma_read except the write lock is taken to serialise
1906         * against parallel split or collapse operations.
1907         */
1908        anon_vma = page_get_anon_vma(page);
1909        if (!anon_vma)
1910                goto out;
1911        anon_vma_lock_write(anon_vma);
1912
1913        ret = 0;
1914        if (!PageCompound(page))
1915                goto out_unlock;
1916
1917        BUG_ON(!PageSwapBacked(page));
1918        __split_huge_page(page, anon_vma, list);
1919        count_vm_event(THP_SPLIT);
1920
1921        BUG_ON(PageCompound(page));
1922out_unlock:
1923        anon_vma_unlock_write(anon_vma);
1924        put_anon_vma(anon_vma);
1925out:
1926        return ret;
1927}
1928
1929#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
1930
1931int hugepage_madvise(struct vm_area_struct *vma,
1932                     unsigned long *vm_flags, int advice)
1933{
1934        switch (advice) {
1935        case MADV_HUGEPAGE:
1936#ifdef CONFIG_S390
1937                /*
1938                 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
1939                 * can't handle this properly after s390_enable_sie, so we simply
1940                 * ignore the madvise to prevent qemu from causing a SIGSEGV.
1941                 */
1942                if (mm_has_pgste(vma->vm_mm))
1943                        return 0;
1944#endif
1945                /*
1946                 * Be somewhat over-protective like KSM for now!
1947                 */
1948                if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
1949                        return -EINVAL;
1950                *vm_flags &= ~VM_NOHUGEPAGE;
1951                *vm_flags |= VM_HUGEPAGE;
1952                /*
1953                 * If the vma become good for khugepaged to scan,
1954                 * register it here without waiting a page fault that
1955                 * may not happen any time soon.
1956                 */
1957                if (unlikely(khugepaged_enter_vma_merge(vma)))
1958                        return -ENOMEM;
1959                break;
1960        case MADV_NOHUGEPAGE:
1961                /*
1962                 * Be somewhat over-protective like KSM for now!
1963                 */
1964                if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
1965                        return -EINVAL;
1966                *vm_flags &= ~VM_HUGEPAGE;
1967                *vm_flags |= VM_NOHUGEPAGE;
1968                /*
1969                 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1970                 * this vma even if we leave the mm registered in khugepaged if
1971                 * it got registered before VM_NOHUGEPAGE was set.
1972                 */
1973                break;
1974        }
1975
1976        return 0;
1977}
1978
1979static int __init khugepaged_slab_init(void)
1980{
1981        mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1982                                          sizeof(struct mm_slot),
1983                                          __alignof__(struct mm_slot), 0, NULL);
1984        if (!mm_slot_cache)
1985                return -ENOMEM;
1986
1987        return 0;
1988}
1989
1990static inline struct mm_slot *alloc_mm_slot(void)
1991{
1992        if (!mm_slot_cache)     /* initialization failed */
1993                return NULL;
1994        return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1995}
1996
1997static inline void free_mm_slot(struct mm_slot *mm_slot)
1998{
1999        kmem_cache_free(mm_slot_cache, mm_slot);
2000}
2001
2002static struct mm_slot *get_mm_slot(struct mm_struct *mm)
2003{
2004        struct mm_slot *mm_slot;
2005
2006        hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
2007                if (mm == mm_slot->mm)
2008                        return mm_slot;
2009
2010        return NULL;
2011}
2012
2013static void insert_to_mm_slots_hash(struct mm_struct *mm,
2014                                    struct mm_slot *mm_slot)
2015{
2016        mm_slot->mm = mm;
2017        hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
2018}
2019
2020static inline int khugepaged_test_exit(struct mm_struct *mm)
2021{
2022        return atomic_read(&mm->mm_users) == 0;
2023}
2024
2025int __khugepaged_enter(struct mm_struct *mm)
2026{
2027        struct mm_slot *mm_slot;
2028        int wakeup;
2029
2030        mm_slot = alloc_mm_slot();
2031        if (!mm_slot)
2032                return -ENOMEM;
2033
2034        /* __khugepaged_exit() must not run from under us */
2035        VM_BUG_ON(khugepaged_test_exit(mm));
2036        if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
2037                free_mm_slot(mm_slot);
2038                return 0;
2039        }
2040
2041        spin_lock(&khugepaged_mm_lock);
2042        insert_to_mm_slots_hash(mm, mm_slot);
2043        /*
2044         * Insert just behind the scanning cursor, to let the area settle
2045         * down a little.
2046         */
2047        wakeup = list_empty(&khugepaged_scan.mm_head);
2048        list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
2049        spin_unlock(&khugepaged_mm_lock);
2050
2051        atomic_inc(&mm->mm_count);
2052        if (wakeup)
2053                wake_up_interruptible(&khugepaged_wait);
2054
2055        return 0;
2056}
2057
2058int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
2059{
2060        unsigned long hstart, hend;
2061        if (!vma->anon_vma)
2062                /*
2063                 * Not yet faulted in so we will register later in the
2064                 * page fault if needed.
2065                 */
2066                return 0;
2067        if (vma->vm_ops)
2068                /* khugepaged not yet working on file or special mappings */
2069                return 0;
2070        VM_BUG_ON(vma->vm_flags & VM_NO_THP);
2071        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2072        hend = vma->vm_end & HPAGE_PMD_MASK;
2073        if (hstart < hend)
2074                return khugepaged_enter(vma);
2075        return 0;
2076}
2077
2078void __khugepaged_exit(struct mm_struct *mm)
2079{
2080        struct mm_slot *mm_slot;
2081        int free = 0;
2082
2083        spin_lock(&khugepaged_mm_lock);
2084        mm_slot = get_mm_slot(mm);
2085        if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
2086                hash_del(&mm_slot->hash);
2087                list_del(&mm_slot->mm_node);
2088                free = 1;
2089        }
2090        spin_unlock(&khugepaged_mm_lock);
2091
2092        if (free) {
2093                clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2094                free_mm_slot(mm_slot);
2095                mmdrop(mm);
2096        } else if (mm_slot) {
2097                /*
2098                 * This is required to serialize against
2099                 * khugepaged_test_exit() (which is guaranteed to run
2100                 * under mmap sem read mode). Stop here (after we
2101                 * return all pagetables will be destroyed) until
2102                 * khugepaged has finished working on the pagetables
2103                 * under the mmap_sem.
2104                 */
2105                down_write(&mm->mmap_sem);
2106                up_write(&mm->mmap_sem);
2107        }
2108}
2109
2110static void release_pte_page(struct page *page)
2111{
2112        /* 0 stands for page_is_file_cache(page) == false */
2113        dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
2114        unlock_page(page);
2115        putback_lru_page(page);
2116}
2117
2118static void release_pte_pages(pte_t *pte, pte_t *_pte)
2119{
2120        while (--_pte >= pte) {
2121                pte_t pteval = *_pte;
2122                if (!pte_none(pteval))
2123                        release_pte_page(pte_page(pteval));
2124        }
2125}
2126
2127static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
2128                                        unsigned long address,
2129                                        pte_t *pte)
2130{
2131        struct page *page;
2132        pte_t *_pte;
2133        int referenced = 0, none = 0;
2134        for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
2135             _pte++, address += PAGE_SIZE) {
2136                pte_t pteval = *_pte;
2137                if (pte_none(pteval)) {
2138                        if (++none <= khugepaged_max_ptes_none)
2139                                continue;
2140                        else
2141                                goto out;
2142                }
2143                if (!pte_present(pteval) || !pte_write(pteval))
2144                        goto out;
2145                page = vm_normal_page(vma, address, pteval);
2146                if (unlikely(!page))
2147                        goto out;
2148
2149                VM_BUG_ON_PAGE(PageCompound(page), page);
2150                VM_BUG_ON_PAGE(!PageAnon(page), page);
2151                VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
2152
2153                /* cannot use mapcount: can't collapse if there's a gup pin */
2154                if (page_count(page) != 1)
2155                        goto out;
2156                /*
2157                 * We can do it before isolate_lru_page because the
2158                 * page can't be freed from under us. NOTE: PG_lock
2159                 * is needed to serialize against split_huge_page
2160                 * when invoked from the VM.
2161                 */
2162                if (!trylock_page(page))
2163                        goto out;
2164                /*
2165                 * Isolate the page to avoid collapsing an hugepage
2166                 * currently in use by the VM.
2167                 */
2168                if (isolate_lru_page(page)) {
2169                        unlock_page(page);
2170                        goto out;
2171                }
2172                /* 0 stands for page_is_file_cache(page) == false */
2173                inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
2174                VM_BUG_ON_PAGE(!PageLocked(page), page);
2175                VM_BUG_ON_PAGE(PageLRU(page), page);
2176
2177                /* If there is no mapped pte young don't collapse the page */
2178                if (pte_young(pteval) || PageReferenced(page) ||
2179                    mmu_notifier_test_young(vma->vm_mm, address))
2180                        referenced = 1;
2181        }
2182        if (likely(referenced))
2183                return 1;
2184out:
2185        release_pte_pages(pte, _pte);
2186        return 0;
2187}
2188
2189static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
2190                                      struct vm_area_struct *vma,
2191                                      unsigned long address,
2192                                      spinlock_t *ptl)
2193{
2194        pte_t *_pte;
2195        for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
2196                pte_t pteval = *_pte;
2197                struct page *src_page;
2198
2199                if (pte_none(pteval)) {
2200                        clear_user_highpage(page, address);
2201                        add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
2202                } else {
2203                        src_page = pte_page(pteval);
2204                        copy_user_highpage(page, src_page, address, vma);
2205                        VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
2206                        release_pte_page(src_page);
2207                        /*
2208                         * ptl mostly unnecessary, but preempt has to
2209                         * be disabled to update the per-cpu stats
2210                         * inside page_remove_rmap().
2211                         */
2212                        spin_lock(ptl);
2213                        /*
2214                         * paravirt calls inside pte_clear here are
2215                         * superfluous.
2216                         */
2217                        pte_clear(vma->vm_mm, address, _pte);
2218                        page_remove_rmap(src_page);
2219                        spin_unlock(ptl);
2220                        free_page_and_swap_cache(src_page);
2221                }
2222
2223                address += PAGE_SIZE;
2224                page++;
2225        }
2226}
2227
2228static void khugepaged_alloc_sleep(void)
2229{
2230        wait_event_freezable_timeout(khugepaged_wait, false,
2231                        msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2232}
2233
2234static int khugepaged_node_load[MAX_NUMNODES];
2235
2236#ifdef CONFIG_NUMA
2237static int khugepaged_find_target_node(void)
2238{
2239        static int last_khugepaged_target_node = NUMA_NO_NODE;
2240        int nid, target_node = 0, max_value = 0;
2241
2242        /* find first node with max normal pages hit */
2243        for (nid = 0; nid < MAX_NUMNODES; nid++)
2244                if (khugepaged_node_load[nid] > max_value) {
2245                        max_value = khugepaged_node_load[nid];
2246                        target_node = nid;
2247                }
2248
2249        /* do some balance if several nodes have the same hit record */
2250        if (target_node <= last_khugepaged_target_node)
2251                for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
2252                                nid++)
2253                        if (max_value == khugepaged_node_load[nid]) {
2254                                target_node = nid;
2255                                break;
2256                        }
2257
2258        last_khugepaged_target_node = target_node;
2259        return target_node;
2260}
2261
2262static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2263{
2264        if (IS_ERR(*hpage)) {
2265                if (!*wait)
2266                        return false;
2267
2268                *wait = false;
2269                *hpage = NULL;
2270                khugepaged_alloc_sleep();
2271        } else if (*hpage) {
2272                put_page(*hpage);
2273                *hpage = NULL;
2274        }
2275
2276        return true;
2277}
2278
2279static struct page
2280*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
2281                       struct vm_area_struct *vma, unsigned long address,
2282                       int node)
2283{
2284        VM_BUG_ON_PAGE(*hpage, *hpage);
2285        /*
2286         * Allocate the page while the vma is still valid and under
2287         * the mmap_sem read mode so there is no memory allocation
2288         * later when we take the mmap_sem in write mode. This is more
2289         * friendly behavior (OTOH it may actually hide bugs) to
2290         * filesystems in userland with daemons allocating memory in
2291         * the userland I/O paths.  Allocating memory with the
2292         * mmap_sem in read mode is good idea also to allow greater
2293         * scalability.
2294         */
2295        *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask(
2296                khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER);
2297        /*
2298         * After allocating the hugepage, release the mmap_sem read lock in
2299         * preparation for taking it in write mode.
2300         */
2301        up_read(&mm->mmap_sem);
2302        if (unlikely(!*hpage)) {
2303                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2304                *hpage = ERR_PTR(-ENOMEM);
2305                return NULL;
2306        }
2307
2308        count_vm_event(THP_COLLAPSE_ALLOC);
2309        return *hpage;
2310}
2311#else
2312static int khugepaged_find_target_node(void)
2313{
2314        return 0;
2315}
2316
2317static inline struct page *alloc_hugepage(int defrag)
2318{
2319        return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
2320                           HPAGE_PMD_ORDER);
2321}
2322
2323static struct page *khugepaged_alloc_hugepage(bool *wait)
2324{
2325        struct page *hpage;
2326
2327        do {
2328                hpage = alloc_hugepage(khugepaged_defrag());
2329                if (!hpage) {
2330                        count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2331                        if (!*wait)
2332                                return NULL;
2333
2334                        *wait = false;
2335                        khugepaged_alloc_sleep();
2336                } else
2337                        count_vm_event(THP_COLLAPSE_ALLOC);
2338        } while (unlikely(!hpage) && likely(khugepaged_enabled()));
2339
2340        return hpage;
2341}
2342
2343static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2344{
2345        if (!*hpage)
2346                *hpage = khugepaged_alloc_hugepage(wait);
2347
2348        if (unlikely(!*hpage))
2349                return false;
2350
2351        return true;
2352}
2353
2354static struct page
2355*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
2356                       struct vm_area_struct *vma, unsigned long address,
2357                       int node)
2358{
2359        up_read(&mm->mmap_sem);
2360        VM_BUG_ON(!*hpage);
2361        return  *hpage;
2362}
2363#endif
2364
2365static bool hugepage_vma_check(struct vm_area_struct *vma)
2366{
2367        if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
2368            (vma->vm_flags & VM_NOHUGEPAGE))
2369                return false;
2370
2371        if (!vma->anon_vma || vma->vm_ops)
2372                return false;
2373        if (is_vma_temporary_stack(vma))
2374                return false;
2375        VM_BUG_ON(vma->vm_flags & VM_NO_THP);
2376        return true;
2377}
2378
2379static void collapse_huge_page(struct mm_struct *mm,
2380                                   unsigned long address,
2381                                   struct page **hpage,
2382                                   struct vm_area_struct *vma,
2383                                   int node)
2384{
2385        pmd_t *pmd, _pmd;
2386        pte_t *pte;
2387        pgtable_t pgtable;
2388        struct page *new_page;
2389        spinlock_t *pmd_ptl, *pte_ptl;
2390        int isolated;
2391        unsigned long hstart, hend;
2392        unsigned long mmun_start;       /* For mmu_notifiers */
2393        unsigned long mmun_end;         /* For mmu_notifiers */
2394
2395        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2396
2397        /* release the mmap_sem read lock. */
2398        new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
2399        if (!new_page)
2400                return;
2401
2402        if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL)))
2403                return;
2404
2405        /*
2406         * Prevent all access to pagetables with the exception of
2407         * gup_fast later hanlded by the ptep_clear_flush and the VM
2408         * handled by the anon_vma lock + PG_lock.
2409         */
2410        down_write(&mm->mmap_sem);
2411        if (unlikely(khugepaged_test_exit(mm)))
2412                goto out;
2413
2414        vma = find_vma(mm, address);
2415        if (!vma)
2416                goto out;
2417        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2418        hend = vma->vm_end & HPAGE_PMD_MASK;
2419        if (address < hstart || address + HPAGE_PMD_SIZE > hend)
2420                goto out;
2421        if (!hugepage_vma_check(vma))
2422                goto out;
2423        pmd = mm_find_pmd(mm, address);
2424        if (!pmd)
2425                goto out;
2426
2427        anon_vma_lock_write(vma->anon_vma);
2428
2429        pte = pte_offset_map(pmd, address);
2430        pte_ptl = pte_lockptr(mm, pmd);
2431
2432        mmun_start = address;
2433        mmun_end   = address + HPAGE_PMD_SIZE;
2434        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2435        pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
2436        /*
2437         * After this gup_fast can't run anymore. This also removes
2438         * any huge TLB entry from the CPU so we won't allow
2439         * huge and small TLB entries for the same virtual address
2440         * to avoid the risk of CPU bugs in that area.
2441         */
2442        _pmd = pmdp_clear_flush(vma, address, pmd);
2443        spin_unlock(pmd_ptl);
2444        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2445
2446        spin_lock(pte_ptl);
2447        isolated = __collapse_huge_page_isolate(vma, address, pte);
2448        spin_unlock(pte_ptl);
2449
2450        if (unlikely(!isolated)) {
2451                pte_unmap(pte);
2452                spin_lock(pmd_ptl);
2453                BUG_ON(!pmd_none(*pmd));
2454                /*
2455                 * We can only use set_pmd_at when establishing
2456                 * hugepmds and never for establishing regular pmds that
2457                 * points to regular pagetables. Use pmd_populate for that
2458                 */
2459                pmd_populate(mm, pmd, pmd_pgtable(_pmd));
2460                spin_unlock(pmd_ptl);
2461                anon_vma_unlock_write(vma->anon_vma);
2462                goto out;
2463        }
2464
2465        /*
2466         * All pages are isolated and locked so anon_vma rmap
2467         * can't run anymore.
2468         */
2469        anon_vma_unlock_write(vma->anon_vma);
2470
2471        __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
2472        pte_unmap(pte);
2473        __SetPageUptodate(new_page);
2474        pgtable = pmd_pgtable(_pmd);
2475
2476        _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
2477        _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
2478
2479        /*
2480         * spin_lock() below is not the equivalent of smp_wmb(), so
2481         * this is needed to avoid the copy_huge_page writes to become
2482         * visible after the set_pmd_at() write.
2483         */
2484        smp_wmb();
2485
2486        spin_lock(pmd_ptl);
2487        BUG_ON(!pmd_none(*pmd));
2488        page_add_new_anon_rmap(new_page, vma, address);
2489        pgtable_trans_huge_deposit(mm, pmd, pgtable);
2490        set_pmd_at(mm, address, pmd, _pmd);
2491        update_mmu_cache_pmd(vma, address, pmd);
2492        spin_unlock(pmd_ptl);
2493
2494        *hpage = NULL;
2495
2496        khugepaged_pages_collapsed++;
2497out_up_write:
2498        up_write(&mm->mmap_sem);
2499        return;
2500
2501out:
2502        mem_cgroup_uncharge_page(new_page);
2503        goto out_up_write;
2504}
2505
2506static int khugepaged_scan_pmd(struct mm_struct *mm,
2507                               struct vm_area_struct *vma,
2508                               unsigned long address,
2509                               struct page **hpage)
2510{
2511        pmd_t *pmd;
2512        pte_t *pte, *_pte;
2513        int ret = 0, referenced = 0, none = 0;
2514        struct page *page;
2515        unsigned long _address;
2516        spinlock_t *ptl;
2517        int node = NUMA_NO_NODE;
2518
2519        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2520
2521        pmd = mm_find_pmd(mm, address);
2522        if (!pmd)
2523                goto out;
2524
2525        memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2526        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2527        for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2528             _pte++, _address += PAGE_SIZE) {
2529                pte_t pteval = *_pte;
2530                if (pte_none(pteval)) {
2531                        if (++none <= khugepaged_max_ptes_none)
2532                                continue;
2533                        else
2534                                goto out_unmap;
2535                }
2536                if (!pte_present(pteval) || !pte_write(pteval))
2537                        goto out_unmap;
2538                page = vm_normal_page(vma, _address, pteval);
2539                if (unlikely(!page))
2540                        goto out_unmap;
2541                /*
2542                 * Record which node the original page is from and save this
2543                 * information to khugepaged_node_load[].
2544                 * Khupaged will allocate hugepage from the node has the max
2545                 * hit record.
2546                 */
2547                node = page_to_nid(page);
2548                khugepaged_node_load[node]++;
2549                VM_BUG_ON_PAGE(PageCompound(page), page);
2550                if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
2551                        goto out_unmap;
2552                /* cannot use mapcount: can't collapse if there's a gup pin */
2553                if (page_count(page) != 1)
2554                        goto out_unmap;
2555                if (pte_young(pteval) || PageReferenced(page) ||
2556                    mmu_notifier_test_young(vma->vm_mm, address))
2557                        referenced = 1;
2558        }
2559        if (referenced)
2560                ret = 1;
2561out_unmap:
2562        pte_unmap_unlock(pte, ptl);
2563        if (ret) {
2564                node = khugepaged_find_target_node();
2565                /* collapse_huge_page will return with the mmap_sem released */
2566                collapse_huge_page(mm, address, hpage, vma, node);
2567        }
2568out:
2569        return ret;
2570}
2571
2572static void collect_mm_slot(struct mm_slot *mm_slot)
2573{
2574        struct mm_struct *mm = mm_slot->mm;
2575
2576        VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2577
2578        if (khugepaged_test_exit(mm)) {
2579                /* free mm_slot */
2580                hash_del(&mm_slot->hash);
2581                list_del(&mm_slot->mm_node);
2582
2583                /*
2584                 * Not strictly needed because the mm exited already.
2585                 *
2586                 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2587                 */
2588
2589                /* khugepaged_mm_lock actually not necessary for the below */
2590                free_mm_slot(mm_slot);
2591                mmdrop(mm);
2592        }
2593}
2594
2595static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2596                                            struct page **hpage)
2597        __releases(&khugepaged_mm_lock)
2598        __acquires(&khugepaged_mm_lock)
2599{
2600        struct mm_slot *mm_slot;
2601        struct mm_struct *mm;
2602        struct vm_area_struct *vma;
2603        int progress = 0;
2604
2605        VM_BUG_ON(!pages);
2606        VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2607
2608        if (khugepaged_scan.mm_slot)
2609                mm_slot = khugepaged_scan.mm_slot;
2610        else {
2611                mm_slot = list_entry(khugepaged_scan.mm_head.next,
2612                                     struct mm_slot, mm_node);
2613                khugepaged_scan.address = 0;
2614                khugepaged_scan.mm_slot = mm_slot;
2615        }
2616        spin_unlock(&khugepaged_mm_lock);
2617
2618        mm = mm_slot->mm;
2619        down_read(&mm->mmap_sem);
2620        if (unlikely(khugepaged_test_exit(mm)))
2621                vma = NULL;
2622        else
2623                vma = find_vma(mm, khugepaged_scan.address);
2624
2625        progress++;
2626        for (; vma; vma = vma->vm_next) {
2627                unsigned long hstart, hend;
2628
2629                cond_resched();
2630                if (unlikely(khugepaged_test_exit(mm))) {
2631                        progress++;
2632                        break;
2633                }
2634                if (!hugepage_vma_check(vma)) {
2635skip:
2636                        progress++;
2637                        continue;
2638                }
2639                hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2640                hend = vma->vm_end & HPAGE_PMD_MASK;
2641                if (hstart >= hend)
2642                        goto skip;
2643                if (khugepaged_scan.address > hend)
2644                        goto skip;
2645                if (khugepaged_scan.address < hstart)
2646                        khugepaged_scan.address = hstart;
2647                VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2648
2649                while (khugepaged_scan.address < hend) {
2650                        int ret;
2651                        cond_resched();
2652                        if (unlikely(khugepaged_test_exit(mm)))
2653                                goto breakouterloop;
2654
2655                        VM_BUG_ON(khugepaged_scan.address < hstart ||
2656                                  khugepaged_scan.address + HPAGE_PMD_SIZE >
2657                                  hend);
2658                        ret = khugepaged_scan_pmd(mm, vma,
2659                                                  khugepaged_scan.address,
2660                                                  hpage);
2661                        /* move to next address */
2662                        khugepaged_scan.address += HPAGE_PMD_SIZE;
2663                        progress += HPAGE_PMD_NR;
2664                        if (ret)
2665                                /* we released mmap_sem so break loop */
2666                                goto breakouterloop_mmap_sem;
2667                        if (progress >= pages)
2668                                goto breakouterloop;
2669                }
2670        }
2671breakouterloop:
2672        up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2673breakouterloop_mmap_sem:
2674
2675        spin_lock(&khugepaged_mm_lock);
2676        VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2677        /*
2678         * Release the current mm_slot if this mm is about to die, or
2679         * if we scanned all vmas of this mm.
2680         */
2681        if (khugepaged_test_exit(mm) || !vma) {
2682                /*
2683                 * Make sure that if mm_users is reaching zero while
2684                 * khugepaged runs here, khugepaged_exit will find
2685                 * mm_slot not pointing to the exiting mm.
2686                 */
2687                if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2688                        khugepaged_scan.mm_slot = list_entry(
2689                                mm_slot->mm_node.next,
2690                                struct mm_slot, mm_node);
2691                        khugepaged_scan.address = 0;
2692                } else {
2693                        khugepaged_scan.mm_slot = NULL;
2694                        khugepaged_full_scans++;
2695                }
2696
2697                collect_mm_slot(mm_slot);
2698        }
2699
2700        return progress;
2701}
2702
2703static int khugepaged_has_work(void)
2704{
2705        return !list_empty(&khugepaged_scan.mm_head) &&
2706                khugepaged_enabled();
2707}
2708
2709static int khugepaged_wait_event(void)
2710{
2711        return !list_empty(&khugepaged_scan.mm_head) ||
2712                kthread_should_stop();
2713}
2714
2715static void khugepaged_do_scan(void)
2716{
2717        struct page *hpage = NULL;
2718        unsigned int progress = 0, pass_through_head = 0;
2719        unsigned int pages = khugepaged_pages_to_scan;
2720        bool wait = true;
2721
2722        barrier(); /* write khugepaged_pages_to_scan to local stack */
2723
2724        while (progress < pages) {
2725                if (!khugepaged_prealloc_page(&hpage, &wait))
2726                        break;
2727
2728                cond_resched();
2729
2730                if (unlikely(kthread_should_stop() || freezing(current)))
2731                        break;
2732
2733                spin_lock(&khugepaged_mm_lock);
2734                if (!khugepaged_scan.mm_slot)
2735                        pass_through_head++;
2736                if (khugepaged_has_work() &&
2737                    pass_through_head < 2)
2738                        progress += khugepaged_scan_mm_slot(pages - progress,
2739                                                            &hpage);
2740                else
2741                        progress = pages;
2742                spin_unlock(&khugepaged_mm_lock);
2743        }
2744
2745        if (!IS_ERR_OR_NULL(hpage))
2746                put_page(hpage);
2747}
2748
2749static void khugepaged_wait_work(void)
2750{
2751        try_to_freeze();
2752
2753        if (khugepaged_has_work()) {
2754                if (!khugepaged_scan_sleep_millisecs)
2755                        return;
2756
2757                wait_event_freezable_timeout(khugepaged_wait,
2758                                             kthread_should_stop(),
2759                        msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
2760                return;
2761        }
2762
2763        if (khugepaged_enabled())
2764                wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2765}
2766
2767static int khugepaged(void *none)
2768{
2769        struct mm_slot *mm_slot;
2770
2771        set_freezable();
2772        set_user_nice(current, MAX_NICE);
2773
2774        while (!kthread_should_stop()) {
2775                khugepaged_do_scan();
2776                khugepaged_wait_work();
2777        }
2778
2779        spin_lock(&khugepaged_mm_lock);
2780        mm_slot = khugepaged_scan.mm_slot;
2781        khugepaged_scan.mm_slot = NULL;
2782        if (mm_slot)
2783                collect_mm_slot(mm_slot);
2784        spin_unlock(&khugepaged_mm_lock);
2785        return 0;
2786}
2787
2788static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2789                unsigned long haddr, pmd_t *pmd)
2790{
2791        struct mm_struct *mm = vma->vm_mm;
2792        pgtable_t pgtable;
2793        pmd_t _pmd;
2794        int i;
2795
2796        pmdp_clear_flush(vma, haddr, pmd);
2797        /* leave pmd empty until pte is filled */
2798
2799        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2800        pmd_populate(mm, &_pmd, pgtable);
2801
2802        for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2803                pte_t *pte, entry;
2804                entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2805                entry = pte_mkspecial(entry);
2806                pte = pte_offset_map(&_pmd, haddr);
2807                VM_BUG_ON(!pte_none(*pte));
2808                set_pte_at(mm, haddr, pte, entry);
2809                pte_unmap(pte);
2810        }
2811        smp_wmb(); /* make pte visible before pmd */
2812        pmd_populate(mm, pmd, pgtable);
2813        put_huge_zero_page();
2814}
2815
2816void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
2817                pmd_t *pmd)
2818{
2819        spinlock_t *ptl;
2820        struct page *page;
2821        struct mm_struct *mm = vma->vm_mm;
2822        unsigned long haddr = address & HPAGE_PMD_MASK;
2823        unsigned long mmun_start;       /* For mmu_notifiers */
2824        unsigned long mmun_end;         /* For mmu_notifiers */
2825
2826        BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE);
2827
2828        mmun_start = haddr;
2829        mmun_end   = haddr + HPAGE_PMD_SIZE;
2830again:
2831        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2832        ptl = pmd_lock(mm, pmd);
2833        if (unlikely(!pmd_trans_huge(*pmd))) {
2834                spin_unlock(ptl);
2835                mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2836                return;
2837        }
2838        if (is_huge_zero_pmd(*pmd)) {
2839                __split_huge_zero_page_pmd(vma, haddr, pmd);
2840                spin_unlock(ptl);
2841                mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2842                return;
2843        }
2844        page = pmd_page(*pmd);
2845        VM_BUG_ON_PAGE(!page_count(page), page);
2846        get_page(page);
2847        spin_unlock(ptl);
2848        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2849
2850        split_huge_page(page);
2851
2852        put_page(page);
2853
2854        /*
2855         * We don't always have down_write of mmap_sem here: a racing
2856         * do_huge_pmd_wp_page() might have copied-on-write to another
2857         * huge page before our split_huge_page() got the anon_vma lock.
2858         */
2859        if (unlikely(pmd_trans_huge(*pmd)))
2860                goto again;
2861}
2862
2863void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
2864                pmd_t *pmd)
2865{
2866        struct vm_area_struct *vma;
2867
2868        vma = find_vma(mm, address);
2869        BUG_ON(vma == NULL);
2870        split_huge_page_pmd(vma, address, pmd);
2871}
2872
2873static void split_huge_page_address(struct mm_struct *mm,
2874                                    unsigned long address)
2875{
2876        pgd_t *pgd;
2877        pud_t *pud;
2878        pmd_t *pmd;
2879
2880        VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2881
2882        pgd = pgd_offset(mm, address);
2883        if (!pgd_present(*pgd))
2884                return;
2885
2886        pud = pud_offset(pgd, address);
2887        if (!pud_present(*pud))
2888                return;
2889
2890        pmd = pmd_offset(pud, address);
2891        if (!pmd_present(*pmd))
2892                return;
2893        /*
2894         * Caller holds the mmap_sem write mode, so a huge pmd cannot
2895         * materialize from under us.
2896         */
2897        split_huge_page_pmd_mm(mm, address, pmd);
2898}
2899
2900void __vma_adjust_trans_huge(struct vm_area_struct *vma,
2901                             unsigned long start,
2902                             unsigned long end,
2903                             long adjust_next)
2904{
2905        /*
2906         * If the new start address isn't hpage aligned and it could
2907         * previously contain an hugepage: check if we need to split
2908         * an huge pmd.
2909         */
2910        if (start & ~HPAGE_PMD_MASK &&
2911            (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2912            (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2913                split_huge_page_address(vma->vm_mm, start);
2914
2915        /*
2916         * If the new end address isn't hpage aligned and it could
2917         * previously contain an hugepage: check if we need to split
2918         * an huge pmd.
2919         */
2920        if (end & ~HPAGE_PMD_MASK &&
2921            (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2922            (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2923                split_huge_page_address(vma->vm_mm, end);
2924
2925        /*
2926         * If we're also updating the vma->vm_next->vm_start, if the new
2927         * vm_next->vm_start isn't page aligned and it could previously
2928         * contain an hugepage: check if we need to split an huge pmd.
2929         */
2930        if (adjust_next > 0) {
2931                struct vm_area_struct *next = vma->vm_next;
2932                unsigned long nstart = next->vm_start;
2933                nstart += adjust_next << PAGE_SHIFT;
2934                if (nstart & ~HPAGE_PMD_MASK &&
2935                    (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2936                    (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2937                        split_huge_page_address(next->vm_mm, nstart);
2938        }
2939}
2940