linux/mm/huge_memory.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 2009  Red Hat, Inc.
   3 *
   4 *  This work is licensed under the terms of the GNU GPL, version 2. See
   5 *  the COPYING file in the top-level directory.
   6 */
   7
   8#include <linux/mm.h>
   9#include <linux/sched.h>
  10#include <linux/highmem.h>
  11#include <linux/hugetlb.h>
  12#include <linux/mmu_notifier.h>
  13#include <linux/rmap.h>
  14#include <linux/swap.h>
  15#include <linux/mm_inline.h>
  16#include <linux/kthread.h>
  17#include <linux/khugepaged.h>
  18#include <linux/freezer.h>
  19#include <linux/mman.h>
  20#include <asm/tlb.h>
  21#include <asm/pgalloc.h>
  22#include "internal.h"
  23
  24/*
  25 * By default transparent hugepage support is enabled for all mappings
  26 * and khugepaged scans all mappings. Defrag is only invoked by
  27 * khugepaged hugepage allocations and by page faults inside
  28 * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
  29 * allocations.
  30 */
  31unsigned long transparent_hugepage_flags __read_mostly =
  32#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
  33        (1<<TRANSPARENT_HUGEPAGE_FLAG)|
  34#endif
  35#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
  36        (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
  37#endif
  38        (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
  39        (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
  40
  41/* default scan 8*512 pte (or vmas) every 30 second */
  42static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
  43static unsigned int khugepaged_pages_collapsed;
  44static unsigned int khugepaged_full_scans;
  45static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
  46/* during fragmentation poll the hugepage allocator once every minute */
  47static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
  48static struct task_struct *khugepaged_thread __read_mostly;
  49static DEFINE_MUTEX(khugepaged_mutex);
  50static DEFINE_SPINLOCK(khugepaged_mm_lock);
  51static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  52/*
  53 * default collapse hugepages if there is at least one pte mapped like
  54 * it would have happened if the vma was large enough during page
  55 * fault.
  56 */
  57static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
  58
  59static int khugepaged(void *none);
  60static int mm_slots_hash_init(void);
  61static int khugepaged_slab_init(void);
  62static void khugepaged_slab_free(void);
  63
  64#define MM_SLOTS_HASH_HEADS 1024
  65static struct hlist_head *mm_slots_hash __read_mostly;
  66static struct kmem_cache *mm_slot_cache __read_mostly;
  67
  68/**
  69 * struct mm_slot - hash lookup from mm to mm_slot
  70 * @hash: hash collision list
  71 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
  72 * @mm: the mm that this information is valid for
  73 */
  74struct mm_slot {
  75        struct hlist_node hash;
  76        struct list_head mm_node;
  77        struct mm_struct *mm;
  78};
  79
  80/**
  81 * struct khugepaged_scan - cursor for scanning
  82 * @mm_head: the head of the mm list to scan
  83 * @mm_slot: the current mm_slot we are scanning
  84 * @address: the next address inside that to be scanned
  85 *
  86 * There is only the one khugepaged_scan instance of this cursor structure.
  87 */
  88struct khugepaged_scan {
  89        struct list_head mm_head;
  90        struct mm_slot *mm_slot;
  91        unsigned long address;
  92};
  93static struct khugepaged_scan khugepaged_scan = {
  94        .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
  95};
  96
  97
  98static int set_recommended_min_free_kbytes(void)
  99{
 100        struct zone *zone;
 101        int nr_zones = 0;
 102        unsigned long recommended_min;
 103        extern int min_free_kbytes;
 104
 105        if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
 106                      &transparent_hugepage_flags) &&
 107            !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 108                      &transparent_hugepage_flags))
 109                return 0;
 110
 111        for_each_populated_zone(zone)
 112                nr_zones++;
 113
 114        /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
 115        recommended_min = pageblock_nr_pages * nr_zones * 2;
 116
 117        /*
 118         * Make sure that on average at least two pageblocks are almost free
 119         * of another type, one for a migratetype to fall back to and a
 120         * second to avoid subsequent fallbacks of other types There are 3
 121         * MIGRATE_TYPES we care about.
 122         */
 123        recommended_min += pageblock_nr_pages * nr_zones *
 124                           MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
 125
 126        /* don't ever allow to reserve more than 5% of the lowmem */
 127        recommended_min = min(recommended_min,
 128                              (unsigned long) nr_free_buffer_pages() / 20);
 129        recommended_min <<= (PAGE_SHIFT-10);
 130
 131        if (recommended_min > min_free_kbytes)
 132                min_free_kbytes = recommended_min;
 133        setup_per_zone_wmarks();
 134        return 0;
 135}
 136late_initcall(set_recommended_min_free_kbytes);
 137
 138static int start_khugepaged(void)
 139{
 140        int err = 0;
 141        if (khugepaged_enabled()) {
 142                int wakeup;
 143                if (unlikely(!mm_slot_cache || !mm_slots_hash)) {
 144                        err = -ENOMEM;
 145                        goto out;
 146                }
 147                mutex_lock(&khugepaged_mutex);
 148                if (!khugepaged_thread)
 149                        khugepaged_thread = kthread_run(khugepaged, NULL,
 150                                                        "khugepaged");
 151                if (unlikely(IS_ERR(khugepaged_thread))) {
 152                        printk(KERN_ERR
 153                               "khugepaged: kthread_run(khugepaged) failed\n");
 154                        err = PTR_ERR(khugepaged_thread);
 155                        khugepaged_thread = NULL;
 156                }
 157                wakeup = !list_empty(&khugepaged_scan.mm_head);
 158                mutex_unlock(&khugepaged_mutex);
 159                if (wakeup)
 160                        wake_up_interruptible(&khugepaged_wait);
 161
 162                set_recommended_min_free_kbytes();
 163        } else
 164                /* wakeup to exit */
 165                wake_up_interruptible(&khugepaged_wait);
 166out:
 167        return err;
 168}
 169
 170#ifdef CONFIG_SYSFS
 171
 172static ssize_t double_flag_show(struct kobject *kobj,
 173                                struct kobj_attribute *attr, char *buf,
 174                                enum transparent_hugepage_flag enabled,
 175                                enum transparent_hugepage_flag req_madv)
 176{
 177        if (test_bit(enabled, &transparent_hugepage_flags)) {
 178                VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
 179                return sprintf(buf, "[always] madvise never\n");
 180        } else if (test_bit(req_madv, &transparent_hugepage_flags))
 181                return sprintf(buf, "always [madvise] never\n");
 182        else
 183                return sprintf(buf, "always madvise [never]\n");
 184}
 185static ssize_t double_flag_store(struct kobject *kobj,
 186                                 struct kobj_attribute *attr,
 187                                 const char *buf, size_t count,
 188                                 enum transparent_hugepage_flag enabled,
 189                                 enum transparent_hugepage_flag req_madv)
 190{
 191        if (!memcmp("always", buf,
 192                    min(sizeof("always")-1, count))) {
 193                set_bit(enabled, &transparent_hugepage_flags);
 194                clear_bit(req_madv, &transparent_hugepage_flags);
 195        } else if (!memcmp("madvise", buf,
 196                           min(sizeof("madvise")-1, count))) {
 197                clear_bit(enabled, &transparent_hugepage_flags);
 198                set_bit(req_madv, &transparent_hugepage_flags);
 199        } else if (!memcmp("never", buf,
 200                           min(sizeof("never")-1, count))) {
 201                clear_bit(enabled, &transparent_hugepage_flags);
 202                clear_bit(req_madv, &transparent_hugepage_flags);
 203        } else
 204                return -EINVAL;
 205
 206        return count;
 207}
 208
 209static ssize_t enabled_show(struct kobject *kobj,
 210                            struct kobj_attribute *attr, char *buf)
 211{
 212        return double_flag_show(kobj, attr, buf,
 213                                TRANSPARENT_HUGEPAGE_FLAG,
 214                                TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
 215}
 216static ssize_t enabled_store(struct kobject *kobj,
 217                             struct kobj_attribute *attr,
 218                             const char *buf, size_t count)
 219{
 220        ssize_t ret;
 221
 222        ret = double_flag_store(kobj, attr, buf, count,
 223                                TRANSPARENT_HUGEPAGE_FLAG,
 224                                TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
 225
 226        if (ret > 0) {
 227                int err = start_khugepaged();
 228                if (err)
 229                        ret = err;
 230        }
 231
 232        if (ret > 0 &&
 233            (test_bit(TRANSPARENT_HUGEPAGE_FLAG,
 234                      &transparent_hugepage_flags) ||
 235             test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 236                      &transparent_hugepage_flags)))
 237                set_recommended_min_free_kbytes();
 238
 239        return ret;
 240}
 241static struct kobj_attribute enabled_attr =
 242        __ATTR(enabled, 0644, enabled_show, enabled_store);
 243
 244static ssize_t single_flag_show(struct kobject *kobj,
 245                                struct kobj_attribute *attr, char *buf,
 246                                enum transparent_hugepage_flag flag)
 247{
 248        return sprintf(buf, "%d\n",
 249                       !!test_bit(flag, &transparent_hugepage_flags));
 250}
 251
 252static ssize_t single_flag_store(struct kobject *kobj,
 253                                 struct kobj_attribute *attr,
 254                                 const char *buf, size_t count,
 255                                 enum transparent_hugepage_flag flag)
 256{
 257        unsigned long value;
 258        int ret;
 259
 260        ret = kstrtoul(buf, 10, &value);
 261        if (ret < 0)
 262                return ret;
 263        if (value > 1)
 264                return -EINVAL;
 265
 266        if (value)
 267                set_bit(flag, &transparent_hugepage_flags);
 268        else
 269                clear_bit(flag, &transparent_hugepage_flags);
 270
 271        return count;
 272}
 273
 274/*
 275 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
 276 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
 277 * memory just to allocate one more hugepage.
 278 */
 279static ssize_t defrag_show(struct kobject *kobj,
 280                           struct kobj_attribute *attr, char *buf)
 281{
 282        return double_flag_show(kobj, attr, buf,
 283                                TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
 284                                TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
 285}
 286static ssize_t defrag_store(struct kobject *kobj,
 287                            struct kobj_attribute *attr,
 288                            const char *buf, size_t count)
 289{
 290        return double_flag_store(kobj, attr, buf, count,
 291                                 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
 292                                 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
 293}
 294static struct kobj_attribute defrag_attr =
 295        __ATTR(defrag, 0644, defrag_show, defrag_store);
 296
 297#ifdef CONFIG_DEBUG_VM
 298static ssize_t debug_cow_show(struct kobject *kobj,
 299                                struct kobj_attribute *attr, char *buf)
 300{
 301        return single_flag_show(kobj, attr, buf,
 302                                TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
 303}
 304static ssize_t debug_cow_store(struct kobject *kobj,
 305                               struct kobj_attribute *attr,
 306                               const char *buf, size_t count)
 307{
 308        return single_flag_store(kobj, attr, buf, count,
 309                                 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
 310}
 311static struct kobj_attribute debug_cow_attr =
 312        __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
 313#endif /* CONFIG_DEBUG_VM */
 314
 315static struct attribute *hugepage_attr[] = {
 316        &enabled_attr.attr,
 317        &defrag_attr.attr,
 318#ifdef CONFIG_DEBUG_VM
 319        &debug_cow_attr.attr,
 320#endif
 321        NULL,
 322};
 323
 324static struct attribute_group hugepage_attr_group = {
 325        .attrs = hugepage_attr,
 326};
 327
 328static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
 329                                         struct kobj_attribute *attr,
 330                                         char *buf)
 331{
 332        return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
 333}
 334
 335static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
 336                                          struct kobj_attribute *attr,
 337                                          const char *buf, size_t count)
 338{
 339        unsigned long msecs;
 340        int err;
 341
 342        err = strict_strtoul(buf, 10, &msecs);
 343        if (err || msecs > UINT_MAX)
 344                return -EINVAL;
 345
 346        khugepaged_scan_sleep_millisecs = msecs;
 347        wake_up_interruptible(&khugepaged_wait);
 348
 349        return count;
 350}
 351static struct kobj_attribute scan_sleep_millisecs_attr =
 352        __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
 353               scan_sleep_millisecs_store);
 354
 355static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
 356                                          struct kobj_attribute *attr,
 357                                          char *buf)
 358{
 359        return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
 360}
 361
 362static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
 363                                           struct kobj_attribute *attr,
 364                                           const char *buf, size_t count)
 365{
 366        unsigned long msecs;
 367        int err;
 368
 369        err = strict_strtoul(buf, 10, &msecs);
 370        if (err || msecs > UINT_MAX)
 371                return -EINVAL;
 372
 373        khugepaged_alloc_sleep_millisecs = msecs;
 374        wake_up_interruptible(&khugepaged_wait);
 375
 376        return count;
 377}
 378static struct kobj_attribute alloc_sleep_millisecs_attr =
 379        __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
 380               alloc_sleep_millisecs_store);
 381
 382static ssize_t pages_to_scan_show(struct kobject *kobj,
 383                                  struct kobj_attribute *attr,
 384                                  char *buf)
 385{
 386        return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
 387}
 388static ssize_t pages_to_scan_store(struct kobject *kobj,
 389                                   struct kobj_attribute *attr,
 390                                   const char *buf, size_t count)
 391{
 392        int err;
 393        unsigned long pages;
 394
 395        err = strict_strtoul(buf, 10, &pages);
 396        if (err || !pages || pages > UINT_MAX)
 397                return -EINVAL;
 398
 399        khugepaged_pages_to_scan = pages;
 400
 401        return count;
 402}
 403static struct kobj_attribute pages_to_scan_attr =
 404        __ATTR(pages_to_scan, 0644, pages_to_scan_show,
 405               pages_to_scan_store);
 406
 407static ssize_t pages_collapsed_show(struct kobject *kobj,
 408                                    struct kobj_attribute *attr,
 409                                    char *buf)
 410{
 411        return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
 412}
 413static struct kobj_attribute pages_collapsed_attr =
 414        __ATTR_RO(pages_collapsed);
 415
 416static ssize_t full_scans_show(struct kobject *kobj,
 417                               struct kobj_attribute *attr,
 418                               char *buf)
 419{
 420        return sprintf(buf, "%u\n", khugepaged_full_scans);
 421}
 422static struct kobj_attribute full_scans_attr =
 423        __ATTR_RO(full_scans);
 424
 425static ssize_t khugepaged_defrag_show(struct kobject *kobj,
 426                                      struct kobj_attribute *attr, char *buf)
 427{
 428        return single_flag_show(kobj, attr, buf,
 429                                TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 430}
 431static ssize_t khugepaged_defrag_store(struct kobject *kobj,
 432                                       struct kobj_attribute *attr,
 433                                       const char *buf, size_t count)
 434{
 435        return single_flag_store(kobj, attr, buf, count,
 436                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 437}
 438static struct kobj_attribute khugepaged_defrag_attr =
 439        __ATTR(defrag, 0644, khugepaged_defrag_show,
 440               khugepaged_defrag_store);
 441
 442/*
 443 * max_ptes_none controls if khugepaged should collapse hugepages over
 444 * any unmapped ptes in turn potentially increasing the memory
 445 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
 446 * reduce the available free memory in the system as it
 447 * runs. Increasing max_ptes_none will instead potentially reduce the
 448 * free memory in the system during the khugepaged scan.
 449 */
 450static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
 451                                             struct kobj_attribute *attr,
 452                                             char *buf)
 453{
 454        return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
 455}
 456static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
 457                                              struct kobj_attribute *attr,
 458                                              const char *buf, size_t count)
 459{
 460        int err;
 461        unsigned long max_ptes_none;
 462
 463        err = strict_strtoul(buf, 10, &max_ptes_none);
 464        if (err || max_ptes_none > HPAGE_PMD_NR-1)
 465                return -EINVAL;
 466
 467        khugepaged_max_ptes_none = max_ptes_none;
 468
 469        return count;
 470}
 471static struct kobj_attribute khugepaged_max_ptes_none_attr =
 472        __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
 473               khugepaged_max_ptes_none_store);
 474
 475static struct attribute *khugepaged_attr[] = {
 476        &khugepaged_defrag_attr.attr,
 477        &khugepaged_max_ptes_none_attr.attr,
 478        &pages_to_scan_attr.attr,
 479        &pages_collapsed_attr.attr,
 480        &full_scans_attr.attr,
 481        &scan_sleep_millisecs_attr.attr,
 482        &alloc_sleep_millisecs_attr.attr,
 483        NULL,
 484};
 485
 486static struct attribute_group khugepaged_attr_group = {
 487        .attrs = khugepaged_attr,
 488        .name = "khugepaged",
 489};
 490#endif /* CONFIG_SYSFS */
 491
 492static int __init hugepage_init(void)
 493{
 494        int err;
 495#ifdef CONFIG_SYSFS
 496        static struct kobject *hugepage_kobj;
 497#endif
 498
 499        err = -EINVAL;
 500        if (!has_transparent_hugepage()) {
 501                transparent_hugepage_flags = 0;
 502                goto out;
 503        }
 504
 505#ifdef CONFIG_SYSFS
 506        err = -ENOMEM;
 507        hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
 508        if (unlikely(!hugepage_kobj)) {
 509                printk(KERN_ERR "hugepage: failed kobject create\n");
 510                goto out;
 511        }
 512
 513        err = sysfs_create_group(hugepage_kobj, &hugepage_attr_group);
 514        if (err) {
 515                printk(KERN_ERR "hugepage: failed register hugeage group\n");
 516                goto out;
 517        }
 518
 519        err = sysfs_create_group(hugepage_kobj, &khugepaged_attr_group);
 520        if (err) {
 521                printk(KERN_ERR "hugepage: failed register hugeage group\n");
 522                goto out;
 523        }
 524#endif
 525
 526        err = khugepaged_slab_init();
 527        if (err)
 528                goto out;
 529
 530        err = mm_slots_hash_init();
 531        if (err) {
 532                khugepaged_slab_free();
 533                goto out;
 534        }
 535
 536        /*
 537         * By default disable transparent hugepages on smaller systems,
 538         * where the extra memory used could hurt more than TLB overhead
 539         * is likely to save.  The admin can still enable it through /sys.
 540         */
 541        if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
 542                transparent_hugepage_flags = 0;
 543
 544        start_khugepaged();
 545
 546        set_recommended_min_free_kbytes();
 547
 548out:
 549        return err;
 550}
 551module_init(hugepage_init)
 552
 553static int __init setup_transparent_hugepage(char *str)
 554{
 555        int ret = 0;
 556        if (!str)
 557                goto out;
 558        if (!strcmp(str, "always")) {
 559                set_bit(TRANSPARENT_HUGEPAGE_FLAG,
 560                        &transparent_hugepage_flags);
 561                clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 562                          &transparent_hugepage_flags);
 563                ret = 1;
 564        } else if (!strcmp(str, "madvise")) {
 565                clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
 566                          &transparent_hugepage_flags);
 567                set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 568                        &transparent_hugepage_flags);
 569                ret = 1;
 570        } else if (!strcmp(str, "never")) {
 571                clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
 572                          &transparent_hugepage_flags);
 573                clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 574                          &transparent_hugepage_flags);
 575                ret = 1;
 576        }
 577out:
 578        if (!ret)
 579                printk(KERN_WARNING
 580                       "transparent_hugepage= cannot parse, ignored\n");
 581        return ret;
 582}
 583__setup("transparent_hugepage=", setup_transparent_hugepage);
 584
 585static void prepare_pmd_huge_pte(pgtable_t pgtable,
 586                                 struct mm_struct *mm)
 587{
 588        assert_spin_locked(&mm->page_table_lock);
 589
 590        /* FIFO */
 591        if (!mm->pmd_huge_pte)
 592                INIT_LIST_HEAD(&pgtable->lru);
 593        else
 594                list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
 595        mm->pmd_huge_pte = pgtable;
 596}
 597
 598static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
 599{
 600        if (likely(vma->vm_flags & VM_WRITE))
 601                pmd = pmd_mkwrite(pmd);
 602        return pmd;
 603}
 604
 605static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
 606                                        struct vm_area_struct *vma,
 607                                        unsigned long haddr, pmd_t *pmd,
 608                                        struct page *page)
 609{
 610        int ret = 0;
 611        pgtable_t pgtable;
 612
 613        VM_BUG_ON(!PageCompound(page));
 614        pgtable = pte_alloc_one(mm, haddr);
 615        if (unlikely(!pgtable)) {
 616                mem_cgroup_uncharge_page(page);
 617                put_page(page);
 618                return VM_FAULT_OOM;
 619        }
 620
 621        clear_huge_page(page, haddr, HPAGE_PMD_NR);
 622        __SetPageUptodate(page);
 623
 624        spin_lock(&mm->page_table_lock);
 625        if (unlikely(!pmd_none(*pmd))) {
 626                spin_unlock(&mm->page_table_lock);
 627                mem_cgroup_uncharge_page(page);
 628                put_page(page);
 629                pte_free(mm, pgtable);
 630        } else {
 631                pmd_t entry;
 632                entry = mk_pmd(page, vma->vm_page_prot);
 633                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 634                entry = pmd_mkhuge(entry);
 635                /*
 636                 * The spinlocking to take the lru_lock inside
 637                 * page_add_new_anon_rmap() acts as a full memory
 638                 * barrier to be sure clear_huge_page writes become
 639                 * visible after the set_pmd_at() write.
 640                 */
 641                page_add_new_anon_rmap(page, vma, haddr);
 642                set_pmd_at(mm, haddr, pmd, entry);
 643                prepare_pmd_huge_pte(pgtable, mm);
 644                add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
 645                spin_unlock(&mm->page_table_lock);
 646        }
 647
 648        return ret;
 649}
 650
 651static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
 652{
 653        return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
 654}
 655
 656static inline struct page *alloc_hugepage_vma(int defrag,
 657                                              struct vm_area_struct *vma,
 658                                              unsigned long haddr, int nd,
 659                                              gfp_t extra_gfp)
 660{
 661        return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
 662                               HPAGE_PMD_ORDER, vma, haddr, nd);
 663}
 664
 665#ifndef CONFIG_NUMA
 666static inline struct page *alloc_hugepage(int defrag)
 667{
 668        return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
 669                           HPAGE_PMD_ORDER);
 670}
 671#endif
 672
 673int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 674                               unsigned long address, pmd_t *pmd,
 675                               unsigned int flags)
 676{
 677        struct page *page;
 678        unsigned long haddr = address & HPAGE_PMD_MASK;
 679        pte_t *pte;
 680
 681        if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
 682                if (unlikely(anon_vma_prepare(vma)))
 683                        return VM_FAULT_OOM;
 684                if (unlikely(khugepaged_enter(vma)))
 685                        return VM_FAULT_OOM;
 686                page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
 687                                          vma, haddr, numa_node_id(), 0);
 688                if (unlikely(!page)) {
 689                        count_vm_event(THP_FAULT_FALLBACK);
 690                        goto out;
 691                }
 692                count_vm_event(THP_FAULT_ALLOC);
 693                if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
 694                        put_page(page);
 695                        goto out;
 696                }
 697
 698                return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
 699        }
 700out:
 701        /*
 702         * Use __pte_alloc instead of pte_alloc_map, because we can't
 703         * run pte_offset_map on the pmd, if an huge pmd could
 704         * materialize from under us from a different thread.
 705         */
 706        if (unlikely(__pte_alloc(mm, vma, pmd, address)))
 707                return VM_FAULT_OOM;
 708        /* if an huge pmd materialized from under us just retry later */
 709        if (unlikely(pmd_trans_huge(*pmd)))
 710                return 0;
 711        /*
 712         * A regular pmd is established and it can't morph into a huge pmd
 713         * from under us anymore at this point because we hold the mmap_sem
 714         * read mode and khugepaged takes it in write mode. So now it's
 715         * safe to run pte_offset_map().
 716         */
 717        pte = pte_offset_map(pmd, address);
 718        return handle_pte_fault(mm, vma, address, pte, pmd, flags);
 719}
 720
 721int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 722                  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
 723                  struct vm_area_struct *vma)
 724{
 725        struct page *src_page;
 726        pmd_t pmd;
 727        pgtable_t pgtable;
 728        int ret;
 729
 730        ret = -ENOMEM;
 731        pgtable = pte_alloc_one(dst_mm, addr);
 732        if (unlikely(!pgtable))
 733                goto out;
 734
 735        spin_lock(&dst_mm->page_table_lock);
 736        spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
 737
 738        ret = -EAGAIN;
 739        pmd = *src_pmd;
 740        if (unlikely(!pmd_trans_huge(pmd))) {
 741                pte_free(dst_mm, pgtable);
 742                goto out_unlock;
 743        }
 744        if (unlikely(pmd_trans_splitting(pmd))) {
 745                /* split huge page running from under us */
 746                spin_unlock(&src_mm->page_table_lock);
 747                spin_unlock(&dst_mm->page_table_lock);
 748                pte_free(dst_mm, pgtable);
 749
 750                wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
 751                goto out;
 752        }
 753        src_page = pmd_page(pmd);
 754        VM_BUG_ON(!PageHead(src_page));
 755        get_page(src_page);
 756        page_dup_rmap(src_page);
 757        add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
 758
 759        pmdp_set_wrprotect(src_mm, addr, src_pmd);
 760        pmd = pmd_mkold(pmd_wrprotect(pmd));
 761        set_pmd_at(dst_mm, addr, dst_pmd, pmd);
 762        prepare_pmd_huge_pte(pgtable, dst_mm);
 763
 764        ret = 0;
 765out_unlock:
 766        spin_unlock(&src_mm->page_table_lock);
 767        spin_unlock(&dst_mm->page_table_lock);
 768out:
 769        return ret;
 770}
 771
 772/* no "address" argument so destroys page coloring of some arch */
 773pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
 774{
 775        pgtable_t pgtable;
 776
 777        assert_spin_locked(&mm->page_table_lock);
 778
 779        /* FIFO */
 780        pgtable = mm->pmd_huge_pte;
 781        if (list_empty(&pgtable->lru))
 782                mm->pmd_huge_pte = NULL;
 783        else {
 784                mm->pmd_huge_pte = list_entry(pgtable->lru.next,
 785                                              struct page, lru);
 786                list_del(&pgtable->lru);
 787        }
 788        return pgtable;
 789}
 790
 791static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
 792                                        struct vm_area_struct *vma,
 793                                        unsigned long address,
 794                                        pmd_t *pmd, pmd_t orig_pmd,
 795                                        struct page *page,
 796                                        unsigned long haddr)
 797{
 798        pgtable_t pgtable;
 799        pmd_t _pmd;
 800        int ret = 0, i;
 801        struct page **pages;
 802
 803        pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
 804                        GFP_KERNEL);
 805        if (unlikely(!pages)) {
 806                ret |= VM_FAULT_OOM;
 807                goto out;
 808        }
 809
 810        for (i = 0; i < HPAGE_PMD_NR; i++) {
 811                pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
 812                                               __GFP_OTHER_NODE,
 813                                               vma, address, page_to_nid(page));
 814                if (unlikely(!pages[i] ||
 815                             mem_cgroup_newpage_charge(pages[i], mm,
 816                                                       GFP_KERNEL))) {
 817                        if (pages[i])
 818                                put_page(pages[i]);
 819                        mem_cgroup_uncharge_start();
 820                        while (--i >= 0) {
 821                                mem_cgroup_uncharge_page(pages[i]);
 822                                put_page(pages[i]);
 823                        }
 824                        mem_cgroup_uncharge_end();
 825                        kfree(pages);
 826                        ret |= VM_FAULT_OOM;
 827                        goto out;
 828                }
 829        }
 830
 831        for (i = 0; i < HPAGE_PMD_NR; i++) {
 832                copy_user_highpage(pages[i], page + i,
 833                                   haddr + PAGE_SIZE * i, vma);
 834                __SetPageUptodate(pages[i]);
 835                cond_resched();
 836        }
 837
 838        spin_lock(&mm->page_table_lock);
 839        if (unlikely(!pmd_same(*pmd, orig_pmd)))
 840                goto out_free_pages;
 841        VM_BUG_ON(!PageHead(page));
 842
 843        pmdp_clear_flush_notify(vma, haddr, pmd);
 844        /* leave pmd empty until pte is filled */
 845
 846        pgtable = get_pmd_huge_pte(mm);
 847        pmd_populate(mm, &_pmd, pgtable);
 848
 849        for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
 850                pte_t *pte, entry;
 851                entry = mk_pte(pages[i], vma->vm_page_prot);
 852                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 853                page_add_new_anon_rmap(pages[i], vma, haddr);
 854                pte = pte_offset_map(&_pmd, haddr);
 855                VM_BUG_ON(!pte_none(*pte));
 856                set_pte_at(mm, haddr, pte, entry);
 857                pte_unmap(pte);
 858        }
 859        kfree(pages);
 860
 861        mm->nr_ptes++;
 862        smp_wmb(); /* make pte visible before pmd */
 863        pmd_populate(mm, pmd, pgtable);
 864        page_remove_rmap(page);
 865        spin_unlock(&mm->page_table_lock);
 866
 867        ret |= VM_FAULT_WRITE;
 868        put_page(page);
 869
 870out:
 871        return ret;
 872
 873out_free_pages:
 874        spin_unlock(&mm->page_table_lock);
 875        mem_cgroup_uncharge_start();
 876        for (i = 0; i < HPAGE_PMD_NR; i++) {
 877                mem_cgroup_uncharge_page(pages[i]);
 878                put_page(pages[i]);
 879        }
 880        mem_cgroup_uncharge_end();
 881        kfree(pages);
 882        goto out;
 883}
 884
 885int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
 886                        unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
 887{
 888        int ret = 0;
 889        struct page *page, *new_page;
 890        unsigned long haddr;
 891
 892        VM_BUG_ON(!vma->anon_vma);
 893        spin_lock(&mm->page_table_lock);
 894        if (unlikely(!pmd_same(*pmd, orig_pmd)))
 895                goto out_unlock;
 896
 897        page = pmd_page(orig_pmd);
 898        VM_BUG_ON(!PageCompound(page) || !PageHead(page));
 899        haddr = address & HPAGE_PMD_MASK;
 900        if (page_mapcount(page) == 1) {
 901                pmd_t entry;
 902                entry = pmd_mkyoung(orig_pmd);
 903                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 904                if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
 905                        update_mmu_cache(vma, address, entry);
 906                ret |= VM_FAULT_WRITE;
 907                goto out_unlock;
 908        }
 909        get_page(page);
 910        spin_unlock(&mm->page_table_lock);
 911
 912        if (transparent_hugepage_enabled(vma) &&
 913            !transparent_hugepage_debug_cow())
 914                new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
 915                                              vma, haddr, numa_node_id(), 0);
 916        else
 917                new_page = NULL;
 918
 919        if (unlikely(!new_page)) {
 920                count_vm_event(THP_FAULT_FALLBACK);
 921                ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
 922                                                   pmd, orig_pmd, page, haddr);
 923                put_page(page);
 924                goto out;
 925        }
 926        count_vm_event(THP_FAULT_ALLOC);
 927
 928        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
 929                put_page(new_page);
 930                put_page(page);
 931                ret |= VM_FAULT_OOM;
 932                goto out;
 933        }
 934
 935        copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
 936        __SetPageUptodate(new_page);
 937
 938        spin_lock(&mm->page_table_lock);
 939        put_page(page);
 940        if (unlikely(!pmd_same(*pmd, orig_pmd))) {
 941                mem_cgroup_uncharge_page(new_page);
 942                put_page(new_page);
 943        } else {
 944                pmd_t entry;
 945                VM_BUG_ON(!PageHead(page));
 946                entry = mk_pmd(new_page, vma->vm_page_prot);
 947                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 948                entry = pmd_mkhuge(entry);
 949                pmdp_clear_flush_notify(vma, haddr, pmd);
 950                page_add_new_anon_rmap(new_page, vma, haddr);
 951                set_pmd_at(mm, haddr, pmd, entry);
 952                update_mmu_cache(vma, address, entry);
 953                page_remove_rmap(page);
 954                put_page(page);
 955                ret |= VM_FAULT_WRITE;
 956        }
 957out_unlock:
 958        spin_unlock(&mm->page_table_lock);
 959out:
 960        return ret;
 961}
 962
 963struct page *follow_trans_huge_pmd(struct mm_struct *mm,
 964                                   unsigned long addr,
 965                                   pmd_t *pmd,
 966                                   unsigned int flags)
 967{
 968        struct page *page = NULL;
 969
 970        assert_spin_locked(&mm->page_table_lock);
 971
 972        if (flags & FOLL_WRITE && !pmd_write(*pmd))
 973                goto out;
 974
 975        page = pmd_page(*pmd);
 976        VM_BUG_ON(!PageHead(page));
 977        if (flags & FOLL_TOUCH) {
 978                pmd_t _pmd;
 979                /*
 980                 * We should set the dirty bit only for FOLL_WRITE but
 981                 * for now the dirty bit in the pmd is meaningless.
 982                 * And if the dirty bit will become meaningful and
 983                 * we'll only set it with FOLL_WRITE, an atomic
 984                 * set_bit will be required on the pmd to set the
 985                 * young bit, instead of the current set_pmd_at.
 986                 */
 987                _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
 988                set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
 989        }
 990        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
 991        VM_BUG_ON(!PageCompound(page));
 992        if (flags & FOLL_GET)
 993                get_page_foll(page);
 994
 995out:
 996        return page;
 997}
 998
 999int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1000                 pmd_t *pmd)
1001{
1002        int ret = 0;
1003
1004        spin_lock(&tlb->mm->page_table_lock);
1005        if (likely(pmd_trans_huge(*pmd))) {
1006                if (unlikely(pmd_trans_splitting(*pmd))) {
1007                        spin_unlock(&tlb->mm->page_table_lock);
1008                        wait_split_huge_page(vma->anon_vma,
1009                                             pmd);
1010                } else {
1011                        struct page *page;
1012                        pgtable_t pgtable;
1013                        pgtable = get_pmd_huge_pte(tlb->mm);
1014                        page = pmd_page(*pmd);
1015                        pmd_clear(pmd);
1016                        page_remove_rmap(page);
1017                        VM_BUG_ON(page_mapcount(page) < 0);
1018                        add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1019                        VM_BUG_ON(!PageHead(page));
1020                        spin_unlock(&tlb->mm->page_table_lock);
1021                        tlb_remove_page(tlb, page);
1022                        pte_free(tlb->mm, pgtable);
1023                        ret = 1;
1024                }
1025        } else
1026                spin_unlock(&tlb->mm->page_table_lock);
1027
1028        return ret;
1029}
1030
1031int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1032                unsigned long addr, unsigned long end,
1033                unsigned char *vec)
1034{
1035        int ret = 0;
1036
1037        spin_lock(&vma->vm_mm->page_table_lock);
1038        if (likely(pmd_trans_huge(*pmd))) {
1039                ret = !pmd_trans_splitting(*pmd);
1040                spin_unlock(&vma->vm_mm->page_table_lock);
1041                if (unlikely(!ret))
1042                        wait_split_huge_page(vma->anon_vma, pmd);
1043                else {
1044                        /*
1045                         * All logical pages in the range are present
1046                         * if backed by a huge page.
1047                         */
1048                        memset(vec, 1, (end - addr) >> PAGE_SHIFT);
1049                }
1050        } else
1051                spin_unlock(&vma->vm_mm->page_table_lock);
1052
1053        return ret;
1054}
1055
1056int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1057                  unsigned long old_addr,
1058                  unsigned long new_addr, unsigned long old_end,
1059                  pmd_t *old_pmd, pmd_t *new_pmd)
1060{
1061        int ret = 0;
1062        pmd_t pmd;
1063
1064        struct mm_struct *mm = vma->vm_mm;
1065
1066        if ((old_addr & ~HPAGE_PMD_MASK) ||
1067            (new_addr & ~HPAGE_PMD_MASK) ||
1068            old_end - old_addr < HPAGE_PMD_SIZE ||
1069            (new_vma->vm_flags & VM_NOHUGEPAGE))
1070                goto out;
1071
1072        /*
1073         * The destination pmd shouldn't be established, free_pgtables()
1074         * should have release it.
1075         */
1076        if (WARN_ON(!pmd_none(*new_pmd))) {
1077                VM_BUG_ON(pmd_trans_huge(*new_pmd));
1078                goto out;
1079        }
1080
1081        spin_lock(&mm->page_table_lock);
1082        if (likely(pmd_trans_huge(*old_pmd))) {
1083                if (pmd_trans_splitting(*old_pmd)) {
1084                        spin_unlock(&mm->page_table_lock);
1085                        wait_split_huge_page(vma->anon_vma, old_pmd);
1086                        ret = -1;
1087                } else {
1088                        pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
1089                        VM_BUG_ON(!pmd_none(*new_pmd));
1090                        set_pmd_at(mm, new_addr, new_pmd, pmd);
1091                        spin_unlock(&mm->page_table_lock);
1092                        ret = 1;
1093                }
1094        } else {
1095                spin_unlock(&mm->page_table_lock);
1096        }
1097out:
1098        return ret;
1099}
1100
1101int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1102                unsigned long addr, pgprot_t newprot)
1103{
1104        struct mm_struct *mm = vma->vm_mm;
1105        int ret = 0;
1106
1107        spin_lock(&mm->page_table_lock);
1108        if (likely(pmd_trans_huge(*pmd))) {
1109                if (unlikely(pmd_trans_splitting(*pmd))) {
1110                        spin_unlock(&mm->page_table_lock);
1111                        wait_split_huge_page(vma->anon_vma, pmd);
1112                } else {
1113                        pmd_t entry;
1114
1115                        entry = pmdp_get_and_clear(mm, addr, pmd);
1116                        entry = pmd_modify(entry, newprot);
1117                        set_pmd_at(mm, addr, pmd, entry);
1118                        spin_unlock(&vma->vm_mm->page_table_lock);
1119                        flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
1120                        ret = 1;
1121                }
1122        } else
1123                spin_unlock(&vma->vm_mm->page_table_lock);
1124
1125        return ret;
1126}
1127
1128pmd_t *page_check_address_pmd(struct page *page,
1129                              struct mm_struct *mm,
1130                              unsigned long address,
1131                              enum page_check_address_pmd_flag flag)
1132{
1133        pgd_t *pgd;
1134        pud_t *pud;
1135        pmd_t *pmd, *ret = NULL;
1136
1137        if (address & ~HPAGE_PMD_MASK)
1138                goto out;
1139
1140        pgd = pgd_offset(mm, address);
1141        if (!pgd_present(*pgd))
1142                goto out;
1143
1144        pud = pud_offset(pgd, address);
1145        if (!pud_present(*pud))
1146                goto out;
1147
1148        pmd = pmd_offset(pud, address);
1149        if (pmd_none(*pmd))
1150                goto out;
1151        if (pmd_page(*pmd) != page)
1152                goto out;
1153        /*
1154         * split_vma() may create temporary aliased mappings. There is
1155         * no risk as long as all huge pmd are found and have their
1156         * splitting bit set before __split_huge_page_refcount
1157         * runs. Finding the same huge pmd more than once during the
1158         * same rmap walk is not a problem.
1159         */
1160        if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1161            pmd_trans_splitting(*pmd))
1162                goto out;
1163        if (pmd_trans_huge(*pmd)) {
1164                VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1165                          !pmd_trans_splitting(*pmd));
1166                ret = pmd;
1167        }
1168out:
1169        return ret;
1170}
1171
1172static int __split_huge_page_splitting(struct page *page,
1173                                       struct vm_area_struct *vma,
1174                                       unsigned long address)
1175{
1176        struct mm_struct *mm = vma->vm_mm;
1177        pmd_t *pmd;
1178        int ret = 0;
1179
1180        spin_lock(&mm->page_table_lock);
1181        pmd = page_check_address_pmd(page, mm, address,
1182                                     PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
1183        if (pmd) {
1184                /*
1185                 * We can't temporarily set the pmd to null in order
1186                 * to split it, the pmd must remain marked huge at all
1187                 * times or the VM won't take the pmd_trans_huge paths
1188                 * and it won't wait on the anon_vma->root->mutex to
1189                 * serialize against split_huge_page*.
1190                 */
1191                pmdp_splitting_flush_notify(vma, address, pmd);
1192                ret = 1;
1193        }
1194        spin_unlock(&mm->page_table_lock);
1195
1196        return ret;
1197}
1198
1199static void __split_huge_page_refcount(struct page *page)
1200{
1201        int i;
1202        unsigned long head_index = page->index;
1203        struct zone *zone = page_zone(page);
1204        int zonestat;
1205        int tail_count = 0;
1206
1207        /* prevent PageLRU to go away from under us, and freeze lru stats */
1208        spin_lock_irq(&zone->lru_lock);
1209        compound_lock(page);
1210
1211        for (i = 1; i < HPAGE_PMD_NR; i++) {
1212                struct page *page_tail = page + i;
1213
1214                /* tail_page->_mapcount cannot change */
1215                BUG_ON(page_mapcount(page_tail) < 0);
1216                tail_count += page_mapcount(page_tail);
1217                /* check for overflow */
1218                BUG_ON(tail_count < 0);
1219                BUG_ON(atomic_read(&page_tail->_count) != 0);
1220                /*
1221                 * tail_page->_count is zero and not changing from
1222                 * under us. But get_page_unless_zero() may be running
1223                 * from under us on the tail_page. If we used
1224                 * atomic_set() below instead of atomic_add(), we
1225                 * would then run atomic_set() concurrently with
1226                 * get_page_unless_zero(), and atomic_set() is
1227                 * implemented in C not using locked ops. spin_unlock
1228                 * on x86 sometime uses locked ops because of PPro
1229                 * errata 66, 92, so unless somebody can guarantee
1230                 * atomic_set() here would be safe on all archs (and
1231                 * not only on x86), it's safer to use atomic_add().
1232                 */
1233                atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
1234                           &page_tail->_count);
1235
1236                /* after clearing PageTail the gup refcount can be released */
1237                smp_mb();
1238
1239                /*
1240                 * retain hwpoison flag of the poisoned tail page:
1241                 *   fix for the unsuitable process killed on Guest Machine(KVM)
1242                 *   by the memory-failure.
1243                 */
1244                page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
1245                page_tail->flags |= (page->flags &
1246                                     ((1L << PG_referenced) |
1247                                      (1L << PG_swapbacked) |
1248                                      (1L << PG_mlocked) |
1249                                      (1L << PG_uptodate)));
1250                page_tail->flags |= (1L << PG_dirty);
1251
1252                /* clear PageTail before overwriting first_page */
1253                smp_wmb();
1254
1255                /*
1256                 * __split_huge_page_splitting() already set the
1257                 * splitting bit in all pmd that could map this
1258                 * hugepage, that will ensure no CPU can alter the
1259                 * mapcount on the head page. The mapcount is only
1260                 * accounted in the head page and it has to be
1261                 * transferred to all tail pages in the below code. So
1262                 * for this code to be safe, the split the mapcount
1263                 * can't change. But that doesn't mean userland can't
1264                 * keep changing and reading the page contents while
1265                 * we transfer the mapcount, so the pmd splitting
1266                 * status is achieved setting a reserved bit in the
1267                 * pmd, not by clearing the present bit.
1268                */
1269                page_tail->_mapcount = page->_mapcount;
1270
1271                BUG_ON(page_tail->mapping);
1272                page_tail->mapping = page->mapping;
1273
1274                page_tail->index = ++head_index;
1275
1276                BUG_ON(!PageAnon(page_tail));
1277                BUG_ON(!PageUptodate(page_tail));
1278                BUG_ON(!PageDirty(page_tail));
1279                BUG_ON(!PageSwapBacked(page_tail));
1280
1281                mem_cgroup_split_huge_fixup(page, page_tail);
1282
1283                lru_add_page_tail(zone, page, page_tail);
1284        }
1285        atomic_sub(tail_count, &page->_count);
1286        BUG_ON(atomic_read(&page->_count) <= 0);
1287
1288        __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1289        __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
1290
1291        /*
1292         * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics,
1293         * so adjust those appropriately if this page is on the LRU.
1294         */
1295        if (PageLRU(page)) {
1296                zonestat = NR_LRU_BASE + page_lru(page);
1297                __mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1));
1298        }
1299
1300        ClearPageCompound(page);
1301        compound_unlock(page);
1302        spin_unlock_irq(&zone->lru_lock);
1303
1304        for (i = 1; i < HPAGE_PMD_NR; i++) {
1305                struct page *page_tail = page + i;
1306                BUG_ON(page_count(page_tail) <= 0);
1307                /*
1308                 * Tail pages may be freed if there wasn't any mapping
1309                 * like if add_to_swap() is running on a lru page that
1310                 * had its mapping zapped. And freeing these pages
1311                 * requires taking the lru_lock so we do the put_page
1312                 * of the tail pages after the split is complete.
1313                 */
1314                put_page(page_tail);
1315        }
1316
1317        /*
1318         * Only the head page (now become a regular page) is required
1319         * to be pinned by the caller.
1320         */
1321        BUG_ON(page_count(page) <= 0);
1322}
1323
1324static int __split_huge_page_map(struct page *page,
1325                                 struct vm_area_struct *vma,
1326                                 unsigned long address)
1327{
1328        struct mm_struct *mm = vma->vm_mm;
1329        pmd_t *pmd, _pmd;
1330        int ret = 0, i;
1331        pgtable_t pgtable;
1332        unsigned long haddr;
1333
1334        spin_lock(&mm->page_table_lock);
1335        pmd = page_check_address_pmd(page, mm, address,
1336                                     PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
1337        if (pmd) {
1338                pgtable = get_pmd_huge_pte(mm);
1339                pmd_populate(mm, &_pmd, pgtable);
1340
1341                for (i = 0, haddr = address; i < HPAGE_PMD_NR;
1342                     i++, haddr += PAGE_SIZE) {
1343                        pte_t *pte, entry;
1344                        BUG_ON(PageCompound(page+i));
1345                        entry = mk_pte(page + i, vma->vm_page_prot);
1346                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1347                        if (!pmd_write(*pmd))
1348                                entry = pte_wrprotect(entry);
1349                        else
1350                                BUG_ON(page_mapcount(page) != 1);
1351                        if (!pmd_young(*pmd))
1352                                entry = pte_mkold(entry);
1353                        pte = pte_offset_map(&_pmd, haddr);
1354                        BUG_ON(!pte_none(*pte));
1355                        set_pte_at(mm, haddr, pte, entry);
1356                        pte_unmap(pte);
1357                }
1358
1359                mm->nr_ptes++;
1360                smp_wmb(); /* make pte visible before pmd */
1361                /*
1362                 * Up to this point the pmd is present and huge and
1363                 * userland has the whole access to the hugepage
1364                 * during the split (which happens in place). If we
1365                 * overwrite the pmd with the not-huge version
1366                 * pointing to the pte here (which of course we could
1367                 * if all CPUs were bug free), userland could trigger
1368                 * a small page size TLB miss on the small sized TLB
1369                 * while the hugepage TLB entry is still established
1370                 * in the huge TLB. Some CPU doesn't like that. See
1371                 * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1372                 * Erratum 383 on page 93. Intel should be safe but is
1373                 * also warns that it's only safe if the permission
1374                 * and cache attributes of the two entries loaded in
1375                 * the two TLB is identical (which should be the case
1376                 * here). But it is generally safer to never allow
1377                 * small and huge TLB entries for the same virtual
1378                 * address to be loaded simultaneously. So instead of
1379                 * doing "pmd_populate(); flush_tlb_range();" we first
1380                 * mark the current pmd notpresent (atomically because
1381                 * here the pmd_trans_huge and pmd_trans_splitting
1382                 * must remain set at all times on the pmd until the
1383                 * split is complete for this pmd), then we flush the
1384                 * SMP TLB and finally we write the non-huge version
1385                 * of the pmd entry with pmd_populate.
1386                 */
1387                set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd));
1388                flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
1389                pmd_populate(mm, pmd, pgtable);
1390                ret = 1;
1391        }
1392        spin_unlock(&mm->page_table_lock);
1393
1394        return ret;
1395}
1396
1397/* must be called with anon_vma->root->mutex hold */
1398static void __split_huge_page(struct page *page,
1399                              struct anon_vma *anon_vma)
1400{
1401        int mapcount, mapcount2;
1402        struct anon_vma_chain *avc;
1403
1404        BUG_ON(!PageHead(page));
1405        BUG_ON(PageTail(page));
1406
1407        mapcount = 0;
1408        list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1409                struct vm_area_struct *vma = avc->vma;
1410                unsigned long addr = vma_address(page, vma);
1411                BUG_ON(is_vma_temporary_stack(vma));
1412                if (addr == -EFAULT)
1413                        continue;
1414                mapcount += __split_huge_page_splitting(page, vma, addr);
1415        }
1416        /*
1417         * It is critical that new vmas are added to the tail of the
1418         * anon_vma list. This guarantes that if copy_huge_pmd() runs
1419         * and establishes a child pmd before
1420         * __split_huge_page_splitting() freezes the parent pmd (so if
1421         * we fail to prevent copy_huge_pmd() from running until the
1422         * whole __split_huge_page() is complete), we will still see
1423         * the newly established pmd of the child later during the
1424         * walk, to be able to set it as pmd_trans_splitting too.
1425         */
1426        if (mapcount != page_mapcount(page))
1427                printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1428                       mapcount, page_mapcount(page));
1429        BUG_ON(mapcount != page_mapcount(page));
1430
1431        __split_huge_page_refcount(page);
1432
1433        mapcount2 = 0;
1434        list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1435                struct vm_area_struct *vma = avc->vma;
1436                unsigned long addr = vma_address(page, vma);
1437                BUG_ON(is_vma_temporary_stack(vma));
1438                if (addr == -EFAULT)
1439                        continue;
1440                mapcount2 += __split_huge_page_map(page, vma, addr);
1441        }
1442        if (mapcount != mapcount2)
1443                printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
1444                       mapcount, mapcount2, page_mapcount(page));
1445        BUG_ON(mapcount != mapcount2);
1446}
1447
1448int split_huge_page(struct page *page)
1449{
1450        struct anon_vma *anon_vma;
1451        int ret = 1;
1452
1453        BUG_ON(!PageAnon(page));
1454        anon_vma = page_lock_anon_vma(page);
1455        if (!anon_vma)
1456                goto out;
1457        ret = 0;
1458        if (!PageCompound(page))
1459                goto out_unlock;
1460
1461        BUG_ON(!PageSwapBacked(page));
1462        __split_huge_page(page, anon_vma);
1463        count_vm_event(THP_SPLIT);
1464
1465        BUG_ON(PageCompound(page));
1466out_unlock:
1467        page_unlock_anon_vma(anon_vma);
1468out:
1469        return ret;
1470}
1471
1472#define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \
1473                   VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
1474
1475int hugepage_madvise(struct vm_area_struct *vma,
1476                     unsigned long *vm_flags, int advice)
1477{
1478        switch (advice) {
1479        case MADV_HUGEPAGE:
1480                /*
1481                 * Be somewhat over-protective like KSM for now!
1482                 */
1483                if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
1484                        return -EINVAL;
1485                *vm_flags &= ~VM_NOHUGEPAGE;
1486                *vm_flags |= VM_HUGEPAGE;
1487                /*
1488                 * If the vma become good for khugepaged to scan,
1489                 * register it here without waiting a page fault that
1490                 * may not happen any time soon.
1491                 */
1492                if (unlikely(khugepaged_enter_vma_merge(vma)))
1493                        return -ENOMEM;
1494                break;
1495        case MADV_NOHUGEPAGE:
1496                /*
1497                 * Be somewhat over-protective like KSM for now!
1498                 */
1499                if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
1500                        return -EINVAL;
1501                *vm_flags &= ~VM_HUGEPAGE;
1502                *vm_flags |= VM_NOHUGEPAGE;
1503                /*
1504                 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1505                 * this vma even if we leave the mm registered in khugepaged if
1506                 * it got registered before VM_NOHUGEPAGE was set.
1507                 */
1508                break;
1509        }
1510
1511        return 0;
1512}
1513
1514static int __init khugepaged_slab_init(void)
1515{
1516        mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1517                                          sizeof(struct mm_slot),
1518                                          __alignof__(struct mm_slot), 0, NULL);
1519        if (!mm_slot_cache)
1520                return -ENOMEM;
1521
1522        return 0;
1523}
1524
1525static void __init khugepaged_slab_free(void)
1526{
1527        kmem_cache_destroy(mm_slot_cache);
1528        mm_slot_cache = NULL;
1529}
1530
1531static inline struct mm_slot *alloc_mm_slot(void)
1532{
1533        if (!mm_slot_cache)     /* initialization failed */
1534                return NULL;
1535        return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1536}
1537
1538static inline void free_mm_slot(struct mm_slot *mm_slot)
1539{
1540        kmem_cache_free(mm_slot_cache, mm_slot);
1541}
1542
1543static int __init mm_slots_hash_init(void)
1544{
1545        mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
1546                                GFP_KERNEL);
1547        if (!mm_slots_hash)
1548                return -ENOMEM;
1549        return 0;
1550}
1551
1552#if 0
1553static void __init mm_slots_hash_free(void)
1554{
1555        kfree(mm_slots_hash);
1556        mm_slots_hash = NULL;
1557}
1558#endif
1559
1560static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1561{
1562        struct mm_slot *mm_slot;
1563        struct hlist_head *bucket;
1564        struct hlist_node *node;
1565
1566        bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1567                                % MM_SLOTS_HASH_HEADS];
1568        hlist_for_each_entry(mm_slot, node, bucket, hash) {
1569                if (mm == mm_slot->mm)
1570                        return mm_slot;
1571        }
1572        return NULL;
1573}
1574
1575static void insert_to_mm_slots_hash(struct mm_struct *mm,
1576                                    struct mm_slot *mm_slot)
1577{
1578        struct hlist_head *bucket;
1579
1580        bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1581                                % MM_SLOTS_HASH_HEADS];
1582        mm_slot->mm = mm;
1583        hlist_add_head(&mm_slot->hash, bucket);
1584}
1585
1586static inline int khugepaged_test_exit(struct mm_struct *mm)
1587{
1588        return atomic_read(&mm->mm_users) == 0;
1589}
1590
1591int __khugepaged_enter(struct mm_struct *mm)
1592{
1593        struct mm_slot *mm_slot;
1594        int wakeup;
1595
1596        mm_slot = alloc_mm_slot();
1597        if (!mm_slot)
1598                return -ENOMEM;
1599
1600        /* __khugepaged_exit() must not run from under us */
1601        VM_BUG_ON(khugepaged_test_exit(mm));
1602        if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1603                free_mm_slot(mm_slot);
1604                return 0;
1605        }
1606
1607        spin_lock(&khugepaged_mm_lock);
1608        insert_to_mm_slots_hash(mm, mm_slot);
1609        /*
1610         * Insert just behind the scanning cursor, to let the area settle
1611         * down a little.
1612         */
1613        wakeup = list_empty(&khugepaged_scan.mm_head);
1614        list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
1615        spin_unlock(&khugepaged_mm_lock);
1616
1617        atomic_inc(&mm->mm_count);
1618        if (wakeup)
1619                wake_up_interruptible(&khugepaged_wait);
1620
1621        return 0;
1622}
1623
1624int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
1625{
1626        unsigned long hstart, hend;
1627        if (!vma->anon_vma)
1628                /*
1629                 * Not yet faulted in so we will register later in the
1630                 * page fault if needed.
1631                 */
1632                return 0;
1633        if (vma->vm_ops)
1634                /* khugepaged not yet working on file or special mappings */
1635                return 0;
1636        /*
1637         * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
1638         * true too, verify it here.
1639         */
1640        VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
1641        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1642        hend = vma->vm_end & HPAGE_PMD_MASK;
1643        if (hstart < hend)
1644                return khugepaged_enter(vma);
1645        return 0;
1646}
1647
1648void __khugepaged_exit(struct mm_struct *mm)
1649{
1650        struct mm_slot *mm_slot;
1651        int free = 0;
1652
1653        spin_lock(&khugepaged_mm_lock);
1654        mm_slot = get_mm_slot(mm);
1655        if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
1656                hlist_del(&mm_slot->hash);
1657                list_del(&mm_slot->mm_node);
1658                free = 1;
1659        }
1660        spin_unlock(&khugepaged_mm_lock);
1661
1662        if (free) {
1663                clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1664                free_mm_slot(mm_slot);
1665                mmdrop(mm);
1666        } else if (mm_slot) {
1667                /*
1668                 * This is required to serialize against
1669                 * khugepaged_test_exit() (which is guaranteed to run
1670                 * under mmap sem read mode). Stop here (after we
1671                 * return all pagetables will be destroyed) until
1672                 * khugepaged has finished working on the pagetables
1673                 * under the mmap_sem.
1674                 */
1675                down_write(&mm->mmap_sem);
1676                up_write(&mm->mmap_sem);
1677        }
1678}
1679
1680static void release_pte_page(struct page *page)
1681{
1682        /* 0 stands for page_is_file_cache(page) == false */
1683        dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
1684        unlock_page(page);
1685        putback_lru_page(page);
1686}
1687
1688static void release_pte_pages(pte_t *pte, pte_t *_pte)
1689{
1690        while (--_pte >= pte) {
1691                pte_t pteval = *_pte;
1692                if (!pte_none(pteval))
1693                        release_pte_page(pte_page(pteval));
1694        }
1695}
1696
1697static void release_all_pte_pages(pte_t *pte)
1698{
1699        release_pte_pages(pte, pte + HPAGE_PMD_NR);
1700}
1701
1702static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
1703                                        unsigned long address,
1704                                        pte_t *pte)
1705{
1706        struct page *page;
1707        pte_t *_pte;
1708        int referenced = 0, isolated = 0, none = 0;
1709        for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
1710             _pte++, address += PAGE_SIZE) {
1711                pte_t pteval = *_pte;
1712                if (pte_none(pteval)) {
1713                        if (++none <= khugepaged_max_ptes_none)
1714                                continue;
1715                        else {
1716                                release_pte_pages(pte, _pte);
1717                                goto out;
1718                        }
1719                }
1720                if (!pte_present(pteval) || !pte_write(pteval)) {
1721                        release_pte_pages(pte, _pte);
1722                        goto out;
1723                }
1724                page = vm_normal_page(vma, address, pteval);
1725                if (unlikely(!page)) {
1726                        release_pte_pages(pte, _pte);
1727                        goto out;
1728                }
1729                VM_BUG_ON(PageCompound(page));
1730                BUG_ON(!PageAnon(page));
1731                VM_BUG_ON(!PageSwapBacked(page));
1732
1733                /* cannot use mapcount: can't collapse if there's a gup pin */
1734                if (page_count(page) != 1) {
1735                        release_pte_pages(pte, _pte);
1736                        goto out;
1737                }
1738                /*
1739                 * We can do it before isolate_lru_page because the
1740                 * page can't be freed from under us. NOTE: PG_lock
1741                 * is needed to serialize against split_huge_page
1742                 * when invoked from the VM.
1743                 */
1744                if (!trylock_page(page)) {
1745                        release_pte_pages(pte, _pte);
1746                        goto out;
1747                }
1748                /*
1749                 * Isolate the page to avoid collapsing an hugepage
1750                 * currently in use by the VM.
1751                 */
1752                if (isolate_lru_page(page)) {
1753                        unlock_page(page);
1754                        release_pte_pages(pte, _pte);
1755                        goto out;
1756                }
1757                /* 0 stands for page_is_file_cache(page) == false */
1758                inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
1759                VM_BUG_ON(!PageLocked(page));
1760                VM_BUG_ON(PageLRU(page));
1761
1762                /* If there is no mapped pte young don't collapse the page */
1763                if (pte_young(pteval) || PageReferenced(page) ||
1764                    mmu_notifier_test_young(vma->vm_mm, address))
1765                        referenced = 1;
1766        }
1767        if (unlikely(!referenced))
1768                release_all_pte_pages(pte);
1769        else
1770                isolated = 1;
1771out:
1772        return isolated;
1773}
1774
1775static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
1776                                      struct vm_area_struct *vma,
1777                                      unsigned long address,
1778                                      spinlock_t *ptl)
1779{
1780        pte_t *_pte;
1781        for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
1782                pte_t pteval = *_pte;
1783                struct page *src_page;
1784
1785                if (pte_none(pteval)) {
1786                        clear_user_highpage(page, address);
1787                        add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
1788                } else {
1789                        src_page = pte_page(pteval);
1790                        copy_user_highpage(page, src_page, address, vma);
1791                        VM_BUG_ON(page_mapcount(src_page) != 1);
1792                        VM_BUG_ON(page_count(src_page) != 2);
1793                        release_pte_page(src_page);
1794                        /*
1795                         * ptl mostly unnecessary, but preempt has to
1796                         * be disabled to update the per-cpu stats
1797                         * inside page_remove_rmap().
1798                         */
1799                        spin_lock(ptl);
1800                        /*
1801                         * paravirt calls inside pte_clear here are
1802                         * superfluous.
1803                         */
1804                        pte_clear(vma->vm_mm, address, _pte);
1805                        page_remove_rmap(src_page);
1806                        spin_unlock(ptl);
1807                        free_page_and_swap_cache(src_page);
1808                }
1809
1810                address += PAGE_SIZE;
1811                page++;
1812        }
1813}
1814
1815static void collapse_huge_page(struct mm_struct *mm,
1816                               unsigned long address,
1817                               struct page **hpage,
1818                               struct vm_area_struct *vma,
1819                               int node)
1820{
1821        pgd_t *pgd;
1822        pud_t *pud;
1823        pmd_t *pmd, _pmd;
1824        pte_t *pte;
1825        pgtable_t pgtable;
1826        struct page *new_page;
1827        spinlock_t *ptl;
1828        int isolated;
1829        unsigned long hstart, hend;
1830
1831        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1832#ifndef CONFIG_NUMA
1833        up_read(&mm->mmap_sem);
1834        VM_BUG_ON(!*hpage);
1835        new_page = *hpage;
1836#else
1837        VM_BUG_ON(*hpage);
1838        /*
1839         * Allocate the page while the vma is still valid and under
1840         * the mmap_sem read mode so there is no memory allocation
1841         * later when we take the mmap_sem in write mode. This is more
1842         * friendly behavior (OTOH it may actually hide bugs) to
1843         * filesystems in userland with daemons allocating memory in
1844         * the userland I/O paths.  Allocating memory with the
1845         * mmap_sem in read mode is good idea also to allow greater
1846         * scalability.
1847         */
1848        new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
1849                                      node, __GFP_OTHER_NODE);
1850
1851        /*
1852         * After allocating the hugepage, release the mmap_sem read lock in
1853         * preparation for taking it in write mode.
1854         */
1855        up_read(&mm->mmap_sem);
1856        if (unlikely(!new_page)) {
1857                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1858                *hpage = ERR_PTR(-ENOMEM);
1859                return;
1860        }
1861#endif
1862
1863        count_vm_event(THP_COLLAPSE_ALLOC);
1864        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1865#ifdef CONFIG_NUMA
1866                put_page(new_page);
1867#endif
1868                return;
1869        }
1870
1871        /*
1872         * Prevent all access to pagetables with the exception of
1873         * gup_fast later hanlded by the ptep_clear_flush and the VM
1874         * handled by the anon_vma lock + PG_lock.
1875         */
1876        down_write(&mm->mmap_sem);
1877        if (unlikely(khugepaged_test_exit(mm)))
1878                goto out;
1879
1880        vma = find_vma(mm, address);
1881        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1882        hend = vma->vm_end & HPAGE_PMD_MASK;
1883        if (address < hstart || address + HPAGE_PMD_SIZE > hend)
1884                goto out;
1885
1886        if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
1887            (vma->vm_flags & VM_NOHUGEPAGE))
1888                goto out;
1889
1890        if (!vma->anon_vma || vma->vm_ops)
1891                goto out;
1892        if (is_vma_temporary_stack(vma))
1893                goto out;
1894        /*
1895         * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
1896         * true too, verify it here.
1897         */
1898        VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
1899
1900        pgd = pgd_offset(mm, address);
1901        if (!pgd_present(*pgd))
1902                goto out;
1903
1904        pud = pud_offset(pgd, address);
1905        if (!pud_present(*pud))
1906                goto out;
1907
1908        pmd = pmd_offset(pud, address);
1909        /* pmd can't go away or become huge under us */
1910        if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
1911                goto out;
1912
1913        anon_vma_lock(vma->anon_vma);
1914
1915        pte = pte_offset_map(pmd, address);
1916        ptl = pte_lockptr(mm, pmd);
1917
1918        spin_lock(&mm->page_table_lock); /* probably unnecessary */
1919        /*
1920         * After this gup_fast can't run anymore. This also removes
1921         * any huge TLB entry from the CPU so we won't allow
1922         * huge and small TLB entries for the same virtual address
1923         * to avoid the risk of CPU bugs in that area.
1924         */
1925        _pmd = pmdp_clear_flush_notify(vma, address, pmd);
1926        spin_unlock(&mm->page_table_lock);
1927
1928        spin_lock(ptl);
1929        isolated = __collapse_huge_page_isolate(vma, address, pte);
1930        spin_unlock(ptl);
1931
1932        if (unlikely(!isolated)) {
1933                pte_unmap(pte);
1934                spin_lock(&mm->page_table_lock);
1935                BUG_ON(!pmd_none(*pmd));
1936                set_pmd_at(mm, address, pmd, _pmd);
1937                spin_unlock(&mm->page_table_lock);
1938                anon_vma_unlock(vma->anon_vma);
1939                goto out;
1940        }
1941
1942        /*
1943         * All pages are isolated and locked so anon_vma rmap
1944         * can't run anymore.
1945         */
1946        anon_vma_unlock(vma->anon_vma);
1947
1948        __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
1949        pte_unmap(pte);
1950        __SetPageUptodate(new_page);
1951        pgtable = pmd_pgtable(_pmd);
1952        VM_BUG_ON(page_count(pgtable) != 1);
1953        VM_BUG_ON(page_mapcount(pgtable) != 0);
1954
1955        _pmd = mk_pmd(new_page, vma->vm_page_prot);
1956        _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1957        _pmd = pmd_mkhuge(_pmd);
1958
1959        /*
1960         * spin_lock() below is not the equivalent of smp_wmb(), so
1961         * this is needed to avoid the copy_huge_page writes to become
1962         * visible after the set_pmd_at() write.
1963         */
1964        smp_wmb();
1965
1966        spin_lock(&mm->page_table_lock);
1967        BUG_ON(!pmd_none(*pmd));
1968        page_add_new_anon_rmap(new_page, vma, address);
1969        set_pmd_at(mm, address, pmd, _pmd);
1970        update_mmu_cache(vma, address, _pmd);
1971        prepare_pmd_huge_pte(pgtable, mm);
1972        mm->nr_ptes--;
1973        spin_unlock(&mm->page_table_lock);
1974
1975#ifndef CONFIG_NUMA
1976        *hpage = NULL;
1977#endif
1978        khugepaged_pages_collapsed++;
1979out_up_write:
1980        up_write(&mm->mmap_sem);
1981        return;
1982
1983out:
1984        mem_cgroup_uncharge_page(new_page);
1985#ifdef CONFIG_NUMA
1986        put_page(new_page);
1987#endif
1988        goto out_up_write;
1989}
1990
1991static int khugepaged_scan_pmd(struct mm_struct *mm,
1992                               struct vm_area_struct *vma,
1993                               unsigned long address,
1994                               struct page **hpage)
1995{
1996        pgd_t *pgd;
1997        pud_t *pud;
1998        pmd_t *pmd;
1999        pte_t *pte, *_pte;
2000        int ret = 0, referenced = 0, none = 0;
2001        struct page *page;
2002        unsigned long _address;
2003        spinlock_t *ptl;
2004        int node = -1;
2005
2006        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2007
2008        pgd = pgd_offset(mm, address);
2009        if (!pgd_present(*pgd))
2010                goto out;
2011
2012        pud = pud_offset(pgd, address);
2013        if (!pud_present(*pud))
2014                goto out;
2015
2016        pmd = pmd_offset(pud, address);
2017        if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
2018                goto out;
2019
2020        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2021        for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2022             _pte++, _address += PAGE_SIZE) {
2023                pte_t pteval = *_pte;
2024                if (pte_none(pteval)) {
2025                        if (++none <= khugepaged_max_ptes_none)
2026                                continue;
2027                        else
2028                                goto out_unmap;
2029                }
2030                if (!pte_present(pteval) || !pte_write(pteval))
2031                        goto out_unmap;
2032                page = vm_normal_page(vma, _address, pteval);
2033                if (unlikely(!page))
2034                        goto out_unmap;
2035                /*
2036                 * Chose the node of the first page. This could
2037                 * be more sophisticated and look at more pages,
2038                 * but isn't for now.
2039                 */
2040                if (node == -1)
2041                        node = page_to_nid(page);
2042                VM_BUG_ON(PageCompound(page));
2043                if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
2044                        goto out_unmap;
2045                /* cannot use mapcount: can't collapse if there's a gup pin */
2046                if (page_count(page) != 1)
2047                        goto out_unmap;
2048                if (pte_young(pteval) || PageReferenced(page) ||
2049                    mmu_notifier_test_young(vma->vm_mm, address))
2050                        referenced = 1;
2051        }
2052        if (referenced)
2053                ret = 1;
2054out_unmap:
2055        pte_unmap_unlock(pte, ptl);
2056        if (ret)
2057                /* collapse_huge_page will return with the mmap_sem released */
2058                collapse_huge_page(mm, address, hpage, vma, node);
2059out:
2060        return ret;
2061}
2062
2063static void collect_mm_slot(struct mm_slot *mm_slot)
2064{
2065        struct mm_struct *mm = mm_slot->mm;
2066
2067        VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
2068
2069        if (khugepaged_test_exit(mm)) {
2070                /* free mm_slot */
2071                hlist_del(&mm_slot->hash);
2072                list_del(&mm_slot->mm_node);
2073
2074                /*
2075                 * Not strictly needed because the mm exited already.
2076                 *
2077                 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2078                 */
2079
2080                /* khugepaged_mm_lock actually not necessary for the below */
2081                free_mm_slot(mm_slot);
2082                mmdrop(mm);
2083        }
2084}
2085
2086static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2087                                            struct page **hpage)
2088        __releases(&khugepaged_mm_lock)
2089        __acquires(&khugepaged_mm_lock)
2090{
2091        struct mm_slot *mm_slot;
2092        struct mm_struct *mm;
2093        struct vm_area_struct *vma;
2094        int progress = 0;
2095
2096        VM_BUG_ON(!pages);
2097        VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
2098
2099        if (khugepaged_scan.mm_slot)
2100                mm_slot = khugepaged_scan.mm_slot;
2101        else {
2102                mm_slot = list_entry(khugepaged_scan.mm_head.next,
2103                                     struct mm_slot, mm_node);
2104                khugepaged_scan.address = 0;
2105                khugepaged_scan.mm_slot = mm_slot;
2106        }
2107        spin_unlock(&khugepaged_mm_lock);
2108
2109        mm = mm_slot->mm;
2110        down_read(&mm->mmap_sem);
2111        if (unlikely(khugepaged_test_exit(mm)))
2112                vma = NULL;
2113        else
2114                vma = find_vma(mm, khugepaged_scan.address);
2115
2116        progress++;
2117        for (; vma; vma = vma->vm_next) {
2118                unsigned long hstart, hend;
2119
2120                cond_resched();
2121                if (unlikely(khugepaged_test_exit(mm))) {
2122                        progress++;
2123                        break;
2124                }
2125
2126                if ((!(vma->vm_flags & VM_HUGEPAGE) &&
2127                     !khugepaged_always()) ||
2128                    (vma->vm_flags & VM_NOHUGEPAGE)) {
2129                skip:
2130                        progress++;
2131                        continue;
2132                }
2133                if (!vma->anon_vma || vma->vm_ops)
2134                        goto skip;
2135                if (is_vma_temporary_stack(vma))
2136                        goto skip;
2137                /*
2138                 * If is_pfn_mapping() is true is_learn_pfn_mapping()
2139                 * must be true too, verify it here.
2140                 */
2141                VM_BUG_ON(is_linear_pfn_mapping(vma) ||
2142                          vma->vm_flags & VM_NO_THP);
2143
2144                hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2145                hend = vma->vm_end & HPAGE_PMD_MASK;
2146                if (hstart >= hend)
2147                        goto skip;
2148                if (khugepaged_scan.address > hend)
2149                        goto skip;
2150                if (khugepaged_scan.address < hstart)
2151                        khugepaged_scan.address = hstart;
2152                VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2153
2154                while (khugepaged_scan.address < hend) {
2155                        int ret;
2156                        cond_resched();
2157                        if (unlikely(khugepaged_test_exit(mm)))
2158                                goto breakouterloop;
2159
2160                        VM_BUG_ON(khugepaged_scan.address < hstart ||
2161                                  khugepaged_scan.address + HPAGE_PMD_SIZE >
2162                                  hend);
2163                        ret = khugepaged_scan_pmd(mm, vma,
2164                                                  khugepaged_scan.address,
2165                                                  hpage);
2166                        /* move to next address */
2167                        khugepaged_scan.address += HPAGE_PMD_SIZE;
2168                        progress += HPAGE_PMD_NR;
2169                        if (ret)
2170                                /* we released mmap_sem so break loop */
2171                                goto breakouterloop_mmap_sem;
2172                        if (progress >= pages)
2173                                goto breakouterloop;
2174                }
2175        }
2176breakouterloop:
2177        up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2178breakouterloop_mmap_sem:
2179
2180        spin_lock(&khugepaged_mm_lock);
2181        VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2182        /*
2183         * Release the current mm_slot if this mm is about to die, or
2184         * if we scanned all vmas of this mm.
2185         */
2186        if (khugepaged_test_exit(mm) || !vma) {
2187                /*
2188                 * Make sure that if mm_users is reaching zero while
2189                 * khugepaged runs here, khugepaged_exit will find
2190                 * mm_slot not pointing to the exiting mm.
2191                 */
2192                if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2193                        khugepaged_scan.mm_slot = list_entry(
2194                                mm_slot->mm_node.next,
2195                                struct mm_slot, mm_node);
2196                        khugepaged_scan.address = 0;
2197                } else {
2198                        khugepaged_scan.mm_slot = NULL;
2199                        khugepaged_full_scans++;
2200                }
2201
2202                collect_mm_slot(mm_slot);
2203        }
2204
2205        return progress;
2206}
2207
2208static int khugepaged_has_work(void)
2209{
2210        return !list_empty(&khugepaged_scan.mm_head) &&
2211                khugepaged_enabled();
2212}
2213
2214static int khugepaged_wait_event(void)
2215{
2216        return !list_empty(&khugepaged_scan.mm_head) ||
2217                !khugepaged_enabled();
2218}
2219
2220static void khugepaged_do_scan(struct page **hpage)
2221{
2222        unsigned int progress = 0, pass_through_head = 0;
2223        unsigned int pages = khugepaged_pages_to_scan;
2224
2225        barrier(); /* write khugepaged_pages_to_scan to local stack */
2226
2227        while (progress < pages) {
2228                cond_resched();
2229
2230#ifndef CONFIG_NUMA
2231                if (!*hpage) {
2232                        *hpage = alloc_hugepage(khugepaged_defrag());
2233                        if (unlikely(!*hpage)) {
2234                                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2235                                break;
2236                        }
2237                        count_vm_event(THP_COLLAPSE_ALLOC);
2238                }
2239#else
2240                if (IS_ERR(*hpage))
2241                        break;
2242#endif
2243
2244                if (unlikely(kthread_should_stop() || freezing(current)))
2245                        break;
2246
2247                spin_lock(&khugepaged_mm_lock);
2248                if (!khugepaged_scan.mm_slot)
2249                        pass_through_head++;
2250                if (khugepaged_has_work() &&
2251                    pass_through_head < 2)
2252                        progress += khugepaged_scan_mm_slot(pages - progress,
2253                                                            hpage);
2254                else
2255                        progress = pages;
2256                spin_unlock(&khugepaged_mm_lock);
2257        }
2258}
2259
2260static void khugepaged_alloc_sleep(void)
2261{
2262        wait_event_freezable_timeout(khugepaged_wait, false,
2263                        msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2264}
2265
2266#ifndef CONFIG_NUMA
2267static struct page *khugepaged_alloc_hugepage(void)
2268{
2269        struct page *hpage;
2270
2271        do {
2272                hpage = alloc_hugepage(khugepaged_defrag());
2273                if (!hpage) {
2274                        count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2275                        khugepaged_alloc_sleep();
2276                } else
2277                        count_vm_event(THP_COLLAPSE_ALLOC);
2278        } while (unlikely(!hpage) &&
2279                 likely(khugepaged_enabled()));
2280        return hpage;
2281}
2282#endif
2283
2284static void khugepaged_loop(void)
2285{
2286        struct page *hpage;
2287
2288#ifdef CONFIG_NUMA
2289        hpage = NULL;
2290#endif
2291        while (likely(khugepaged_enabled())) {
2292#ifndef CONFIG_NUMA
2293                hpage = khugepaged_alloc_hugepage();
2294                if (unlikely(!hpage))
2295                        break;
2296#else
2297                if (IS_ERR(hpage)) {
2298                        khugepaged_alloc_sleep();
2299                        hpage = NULL;
2300                }
2301#endif
2302
2303                khugepaged_do_scan(&hpage);
2304#ifndef CONFIG_NUMA
2305                if (hpage)
2306                        put_page(hpage);
2307#endif
2308                try_to_freeze();
2309                if (unlikely(kthread_should_stop()))
2310                        break;
2311                if (khugepaged_has_work()) {
2312                        if (!khugepaged_scan_sleep_millisecs)
2313                                continue;
2314                        wait_event_freezable_timeout(khugepaged_wait, false,
2315                            msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
2316                } else if (khugepaged_enabled())
2317                        wait_event_freezable(khugepaged_wait,
2318                                             khugepaged_wait_event());
2319        }
2320}
2321
2322static int khugepaged(void *none)
2323{
2324        struct mm_slot *mm_slot;
2325
2326        set_freezable();
2327        set_user_nice(current, 19);
2328
2329        /* serialize with start_khugepaged() */
2330        mutex_lock(&khugepaged_mutex);
2331
2332        for (;;) {
2333                mutex_unlock(&khugepaged_mutex);
2334                VM_BUG_ON(khugepaged_thread != current);
2335                khugepaged_loop();
2336                VM_BUG_ON(khugepaged_thread != current);
2337
2338                mutex_lock(&khugepaged_mutex);
2339                if (!khugepaged_enabled())
2340                        break;
2341                if (unlikely(kthread_should_stop()))
2342                        break;
2343        }
2344
2345        spin_lock(&khugepaged_mm_lock);
2346        mm_slot = khugepaged_scan.mm_slot;
2347        khugepaged_scan.mm_slot = NULL;
2348        if (mm_slot)
2349                collect_mm_slot(mm_slot);
2350        spin_unlock(&khugepaged_mm_lock);
2351
2352        khugepaged_thread = NULL;
2353        mutex_unlock(&khugepaged_mutex);
2354
2355        return 0;
2356}
2357
2358void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
2359{
2360        struct page *page;
2361
2362        spin_lock(&mm->page_table_lock);
2363        if (unlikely(!pmd_trans_huge(*pmd))) {
2364                spin_unlock(&mm->page_table_lock);
2365                return;
2366        }
2367        page = pmd_page(*pmd);
2368        VM_BUG_ON(!page_count(page));
2369        get_page(page);
2370        spin_unlock(&mm->page_table_lock);
2371
2372        split_huge_page(page);
2373
2374        put_page(page);
2375        BUG_ON(pmd_trans_huge(*pmd));
2376}
2377
2378static void split_huge_page_address(struct mm_struct *mm,
2379                                    unsigned long address)
2380{
2381        pgd_t *pgd;
2382        pud_t *pud;
2383        pmd_t *pmd;
2384
2385        VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2386
2387        pgd = pgd_offset(mm, address);
2388        if (!pgd_present(*pgd))
2389                return;
2390
2391        pud = pud_offset(pgd, address);
2392        if (!pud_present(*pud))
2393                return;
2394
2395        pmd = pmd_offset(pud, address);
2396        if (!pmd_present(*pmd))
2397                return;
2398        /*
2399         * Caller holds the mmap_sem write mode, so a huge pmd cannot
2400         * materialize from under us.
2401         */
2402        split_huge_page_pmd(mm, pmd);
2403}
2404
2405void __vma_adjust_trans_huge(struct vm_area_struct *vma,
2406                             unsigned long start,
2407                             unsigned long end,
2408                             long adjust_next)
2409{
2410        /*
2411         * If the new start address isn't hpage aligned and it could
2412         * previously contain an hugepage: check if we need to split
2413         * an huge pmd.
2414         */
2415        if (start & ~HPAGE_PMD_MASK &&
2416            (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2417            (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2418                split_huge_page_address(vma->vm_mm, start);
2419
2420        /*
2421         * If the new end address isn't hpage aligned and it could
2422         * previously contain an hugepage: check if we need to split
2423         * an huge pmd.
2424         */
2425        if (end & ~HPAGE_PMD_MASK &&
2426            (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2427            (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2428                split_huge_page_address(vma->vm_mm, end);
2429
2430        /*
2431         * If we're also updating the vma->vm_next->vm_start, if the new
2432         * vm_next->vm_start isn't page aligned and it could previously
2433         * contain an hugepage: check if we need to split an huge pmd.
2434         */
2435        if (adjust_next > 0) {
2436                struct vm_area_struct *next = vma->vm_next;
2437                unsigned long nstart = next->vm_start;
2438                nstart += adjust_next << PAGE_SHIFT;
2439                if (nstart & ~HPAGE_PMD_MASK &&
2440                    (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2441                    (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2442                        split_huge_page_address(next->vm_mm, nstart);
2443        }
2444}
2445