linux/mm/hugetlb.c
<<
>>
Prefs
   1/*
   2 * Generic hugetlb support.
   3 * (C) Nadia Yvette Chambers, April 2004
   4 */
   5#include <linux/list.h>
   6#include <linux/init.h>
   7#include <linux/module.h>
   8#include <linux/mm.h>
   9#include <linux/seq_file.h>
  10#include <linux/sysctl.h>
  11#include <linux/highmem.h>
  12#include <linux/mmu_notifier.h>
  13#include <linux/nodemask.h>
  14#include <linux/pagemap.h>
  15#include <linux/mempolicy.h>
  16#include <linux/compiler.h>
  17#include <linux/cpuset.h>
  18#include <linux/mutex.h>
  19#include <linux/bootmem.h>
  20#include <linux/sysfs.h>
  21#include <linux/slab.h>
  22#include <linux/rmap.h>
  23#include <linux/swap.h>
  24#include <linux/swapops.h>
  25#include <linux/page-isolation.h>
  26#include <linux/jhash.h>
  27
  28#include <asm/page.h>
  29#include <asm/pgtable.h>
  30#include <asm/tlb.h>
  31
  32#include <linux/io.h>
  33#include <linux/hugetlb.h>
  34#include <linux/hugetlb_cgroup.h>
  35#include <linux/node.h>
  36#include "internal.h"
  37
  38unsigned long hugepages_treat_as_movable;
  39
  40int hugetlb_max_hstate __read_mostly;
  41unsigned int default_hstate_idx;
  42struct hstate hstates[HUGE_MAX_HSTATE];
  43
  44__initdata LIST_HEAD(huge_boot_pages);
  45
  46/* for command line parsing */
  47static struct hstate * __initdata parsed_hstate;
  48static unsigned long __initdata default_hstate_max_huge_pages;
  49static unsigned long __initdata default_hstate_size;
  50
  51/*
  52 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
  53 * free_huge_pages, and surplus_huge_pages.
  54 */
  55DEFINE_SPINLOCK(hugetlb_lock);
  56
  57/*
  58 * Serializes faults on the same logical page.  This is used to
  59 * prevent spurious OOMs when the hugepage pool is fully utilized.
  60 */
  61static int num_fault_mutexes;
  62static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp;
  63
  64static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
  65{
  66        bool free = (spool->count == 0) && (spool->used_hpages == 0);
  67
  68        spin_unlock(&spool->lock);
  69
  70        /* If no pages are used, and no other handles to the subpool
  71         * remain, free the subpool the subpool remain */
  72        if (free)
  73                kfree(spool);
  74}
  75
  76struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
  77{
  78        struct hugepage_subpool *spool;
  79
  80        spool = kmalloc(sizeof(*spool), GFP_KERNEL);
  81        if (!spool)
  82                return NULL;
  83
  84        spin_lock_init(&spool->lock);
  85        spool->count = 1;
  86        spool->max_hpages = nr_blocks;
  87        spool->used_hpages = 0;
  88
  89        return spool;
  90}
  91
  92void hugepage_put_subpool(struct hugepage_subpool *spool)
  93{
  94        spin_lock(&spool->lock);
  95        BUG_ON(!spool->count);
  96        spool->count--;
  97        unlock_or_release_subpool(spool);
  98}
  99
 100static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
 101                                      long delta)
 102{
 103        int ret = 0;
 104
 105        if (!spool)
 106                return 0;
 107
 108        spin_lock(&spool->lock);
 109        if ((spool->used_hpages + delta) <= spool->max_hpages) {
 110                spool->used_hpages += delta;
 111        } else {
 112                ret = -ENOMEM;
 113        }
 114        spin_unlock(&spool->lock);
 115
 116        return ret;
 117}
 118
 119static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
 120                                       long delta)
 121{
 122        if (!spool)
 123                return;
 124
 125        spin_lock(&spool->lock);
 126        spool->used_hpages -= delta;
 127        /* If hugetlbfs_put_super couldn't free spool due to
 128        * an outstanding quota reference, free it now. */
 129        unlock_or_release_subpool(spool);
 130}
 131
 132static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
 133{
 134        return HUGETLBFS_SB(inode->i_sb)->spool;
 135}
 136
 137static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
 138{
 139        return subpool_inode(file_inode(vma->vm_file));
 140}
 141
 142/*
 143 * Region tracking -- allows tracking of reservations and instantiated pages
 144 *                    across the pages in a mapping.
 145 *
 146 * The region data structures are embedded into a resv_map and
 147 * protected by a resv_map's lock
 148 */
 149struct file_region {
 150        struct list_head link;
 151        long from;
 152        long to;
 153};
 154
 155static long region_add(struct resv_map *resv, long f, long t)
 156{
 157        struct list_head *head = &resv->regions;
 158        struct file_region *rg, *nrg, *trg;
 159
 160        spin_lock(&resv->lock);
 161        /* Locate the region we are either in or before. */
 162        list_for_each_entry(rg, head, link)
 163                if (f <= rg->to)
 164                        break;
 165
 166        /* Round our left edge to the current segment if it encloses us. */
 167        if (f > rg->from)
 168                f = rg->from;
 169
 170        /* Check for and consume any regions we now overlap with. */
 171        nrg = rg;
 172        list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
 173                if (&rg->link == head)
 174                        break;
 175                if (rg->from > t)
 176                        break;
 177
 178                /* If this area reaches higher then extend our area to
 179                 * include it completely.  If this is not the first area
 180                 * which we intend to reuse, free it. */
 181                if (rg->to > t)
 182                        t = rg->to;
 183                if (rg != nrg) {
 184                        list_del(&rg->link);
 185                        kfree(rg);
 186                }
 187        }
 188        nrg->from = f;
 189        nrg->to = t;
 190        spin_unlock(&resv->lock);
 191        return 0;
 192}
 193
 194static long region_chg(struct resv_map *resv, long f, long t)
 195{
 196        struct list_head *head = &resv->regions;
 197        struct file_region *rg, *nrg = NULL;
 198        long chg = 0;
 199
 200retry:
 201        spin_lock(&resv->lock);
 202        /* Locate the region we are before or in. */
 203        list_for_each_entry(rg, head, link)
 204                if (f <= rg->to)
 205                        break;
 206
 207        /* If we are below the current region then a new region is required.
 208         * Subtle, allocate a new region at the position but make it zero
 209         * size such that we can guarantee to record the reservation. */
 210        if (&rg->link == head || t < rg->from) {
 211                if (!nrg) {
 212                        spin_unlock(&resv->lock);
 213                        nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
 214                        if (!nrg)
 215                                return -ENOMEM;
 216
 217                        nrg->from = f;
 218                        nrg->to   = f;
 219                        INIT_LIST_HEAD(&nrg->link);
 220                        goto retry;
 221                }
 222
 223                list_add(&nrg->link, rg->link.prev);
 224                chg = t - f;
 225                goto out_nrg;
 226        }
 227
 228        /* Round our left edge to the current segment if it encloses us. */
 229        if (f > rg->from)
 230                f = rg->from;
 231        chg = t - f;
 232
 233        /* Check for and consume any regions we now overlap with. */
 234        list_for_each_entry(rg, rg->link.prev, link) {
 235                if (&rg->link == head)
 236                        break;
 237                if (rg->from > t)
 238                        goto out;
 239
 240                /* We overlap with this area, if it extends further than
 241                 * us then we must extend ourselves.  Account for its
 242                 * existing reservation. */
 243                if (rg->to > t) {
 244                        chg += rg->to - t;
 245                        t = rg->to;
 246                }
 247                chg -= rg->to - rg->from;
 248        }
 249
 250out:
 251        spin_unlock(&resv->lock);
 252        /*  We already know we raced and no longer need the new region */
 253        kfree(nrg);
 254        return chg;
 255out_nrg:
 256        spin_unlock(&resv->lock);
 257        return chg;
 258}
 259
 260static long region_truncate(struct resv_map *resv, long end)
 261{
 262        struct list_head *head = &resv->regions;
 263        struct file_region *rg, *trg;
 264        long chg = 0;
 265
 266        spin_lock(&resv->lock);
 267        /* Locate the region we are either in or before. */
 268        list_for_each_entry(rg, head, link)
 269                if (end <= rg->to)
 270                        break;
 271        if (&rg->link == head)
 272                goto out;
 273
 274        /* If we are in the middle of a region then adjust it. */
 275        if (end > rg->from) {
 276                chg = rg->to - end;
 277                rg->to = end;
 278                rg = list_entry(rg->link.next, typeof(*rg), link);
 279        }
 280
 281        /* Drop any remaining regions. */
 282        list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
 283                if (&rg->link == head)
 284                        break;
 285                chg += rg->to - rg->from;
 286                list_del(&rg->link);
 287                kfree(rg);
 288        }
 289
 290out:
 291        spin_unlock(&resv->lock);
 292        return chg;
 293}
 294
 295static long region_count(struct resv_map *resv, long f, long t)
 296{
 297        struct list_head *head = &resv->regions;
 298        struct file_region *rg;
 299        long chg = 0;
 300
 301        spin_lock(&resv->lock);
 302        /* Locate each segment we overlap with, and count that overlap. */
 303        list_for_each_entry(rg, head, link) {
 304                long seg_from;
 305                long seg_to;
 306
 307                if (rg->to <= f)
 308                        continue;
 309                if (rg->from >= t)
 310                        break;
 311
 312                seg_from = max(rg->from, f);
 313                seg_to = min(rg->to, t);
 314
 315                chg += seg_to - seg_from;
 316        }
 317        spin_unlock(&resv->lock);
 318
 319        return chg;
 320}
 321
 322/*
 323 * Convert the address within this vma to the page offset within
 324 * the mapping, in pagecache page units; huge pages here.
 325 */
 326static pgoff_t vma_hugecache_offset(struct hstate *h,
 327                        struct vm_area_struct *vma, unsigned long address)
 328{
 329        return ((address - vma->vm_start) >> huge_page_shift(h)) +
 330                        (vma->vm_pgoff >> huge_page_order(h));
 331}
 332
 333pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
 334                                     unsigned long address)
 335{
 336        return vma_hugecache_offset(hstate_vma(vma), vma, address);
 337}
 338
 339/*
 340 * Return the size of the pages allocated when backing a VMA. In the majority
 341 * cases this will be same size as used by the page table entries.
 342 */
 343unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
 344{
 345        struct hstate *hstate;
 346
 347        if (!is_vm_hugetlb_page(vma))
 348                return PAGE_SIZE;
 349
 350        hstate = hstate_vma(vma);
 351
 352        return 1UL << huge_page_shift(hstate);
 353}
 354EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
 355
 356/*
 357 * Return the page size being used by the MMU to back a VMA. In the majority
 358 * of cases, the page size used by the kernel matches the MMU size. On
 359 * architectures where it differs, an architecture-specific version of this
 360 * function is required.
 361 */
 362#ifndef vma_mmu_pagesize
 363unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
 364{
 365        return vma_kernel_pagesize(vma);
 366}
 367#endif
 368
 369/*
 370 * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
 371 * bits of the reservation map pointer, which are always clear due to
 372 * alignment.
 373 */
 374#define HPAGE_RESV_OWNER    (1UL << 0)
 375#define HPAGE_RESV_UNMAPPED (1UL << 1)
 376#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
 377
 378/*
 379 * These helpers are used to track how many pages are reserved for
 380 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
 381 * is guaranteed to have their future faults succeed.
 382 *
 383 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
 384 * the reserve counters are updated with the hugetlb_lock held. It is safe
 385 * to reset the VMA at fork() time as it is not in use yet and there is no
 386 * chance of the global counters getting corrupted as a result of the values.
 387 *
 388 * The private mapping reservation is represented in a subtly different
 389 * manner to a shared mapping.  A shared mapping has a region map associated
 390 * with the underlying file, this region map represents the backing file
 391 * pages which have ever had a reservation assigned which this persists even
 392 * after the page is instantiated.  A private mapping has a region map
 393 * associated with the original mmap which is attached to all VMAs which
 394 * reference it, this region map represents those offsets which have consumed
 395 * reservation ie. where pages have been instantiated.
 396 */
 397static unsigned long get_vma_private_data(struct vm_area_struct *vma)
 398{
 399        return (unsigned long)vma->vm_private_data;
 400}
 401
 402static void set_vma_private_data(struct vm_area_struct *vma,
 403                                                        unsigned long value)
 404{
 405        vma->vm_private_data = (void *)value;
 406}
 407
 408struct resv_map *resv_map_alloc(void)
 409{
 410        struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
 411        if (!resv_map)
 412                return NULL;
 413
 414        kref_init(&resv_map->refs);
 415        spin_lock_init(&resv_map->lock);
 416        INIT_LIST_HEAD(&resv_map->regions);
 417
 418        return resv_map;
 419}
 420
 421void resv_map_release(struct kref *ref)
 422{
 423        struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
 424
 425        /* Clear out any active regions before we release the map. */
 426        region_truncate(resv_map, 0);
 427        kfree(resv_map);
 428}
 429
 430static inline struct resv_map *inode_resv_map(struct inode *inode)
 431{
 432        return inode->i_mapping->private_data;
 433}
 434
 435static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
 436{
 437        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
 438        if (vma->vm_flags & VM_MAYSHARE) {
 439                struct address_space *mapping = vma->vm_file->f_mapping;
 440                struct inode *inode = mapping->host;
 441
 442                return inode_resv_map(inode);
 443
 444        } else {
 445                return (struct resv_map *)(get_vma_private_data(vma) &
 446                                                        ~HPAGE_RESV_MASK);
 447        }
 448}
 449
 450static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
 451{
 452        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
 453        VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
 454
 455        set_vma_private_data(vma, (get_vma_private_data(vma) &
 456                                HPAGE_RESV_MASK) | (unsigned long)map);
 457}
 458
 459static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
 460{
 461        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
 462        VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
 463
 464        set_vma_private_data(vma, get_vma_private_data(vma) | flags);
 465}
 466
 467static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
 468{
 469        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
 470
 471        return (get_vma_private_data(vma) & flag) != 0;
 472}
 473
 474/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
 475void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 476{
 477        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
 478        if (!(vma->vm_flags & VM_MAYSHARE))
 479                vma->vm_private_data = (void *)0;
 480}
 481
 482/* Returns true if the VMA has associated reserve pages */
 483static int vma_has_reserves(struct vm_area_struct *vma, long chg)
 484{
 485        if (vma->vm_flags & VM_NORESERVE) {
 486                /*
 487                 * This address is already reserved by other process(chg == 0),
 488                 * so, we should decrement reserved count. Without decrementing,
 489                 * reserve count remains after releasing inode, because this
 490                 * allocated page will go into page cache and is regarded as
 491                 * coming from reserved pool in releasing step.  Currently, we
 492                 * don't have any other solution to deal with this situation
 493                 * properly, so add work-around here.
 494                 */
 495                if (vma->vm_flags & VM_MAYSHARE && chg == 0)
 496                        return 1;
 497                else
 498                        return 0;
 499        }
 500
 501        /* Shared mappings always use reserves */
 502        if (vma->vm_flags & VM_MAYSHARE)
 503                return 1;
 504
 505        /*
 506         * Only the process that called mmap() has reserves for
 507         * private mappings.
 508         */
 509        if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
 510                return 1;
 511
 512        return 0;
 513}
 514
 515static void enqueue_huge_page(struct hstate *h, struct page *page)
 516{
 517        int nid = page_to_nid(page);
 518        list_move(&page->lru, &h->hugepage_freelists[nid]);
 519        h->free_huge_pages++;
 520        h->free_huge_pages_node[nid]++;
 521}
 522
 523static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
 524{
 525        struct page *page;
 526
 527        list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
 528                if (!is_migrate_isolate_page(page))
 529                        break;
 530        /*
 531         * if 'non-isolated free hugepage' not found on the list,
 532         * the allocation fails.
 533         */
 534        if (&h->hugepage_freelists[nid] == &page->lru)
 535                return NULL;
 536        list_move(&page->lru, &h->hugepage_activelist);
 537        set_page_refcounted(page);
 538        h->free_huge_pages--;
 539        h->free_huge_pages_node[nid]--;
 540        return page;
 541}
 542
 543/* Movability of hugepages depends on migration support. */
 544static inline gfp_t htlb_alloc_mask(struct hstate *h)
 545{
 546        if (hugepages_treat_as_movable || hugepage_migration_supported(h))
 547                return GFP_HIGHUSER_MOVABLE;
 548        else
 549                return GFP_HIGHUSER;
 550}
 551
 552static struct page *dequeue_huge_page_vma(struct hstate *h,
 553                                struct vm_area_struct *vma,
 554                                unsigned long address, int avoid_reserve,
 555                                long chg)
 556{
 557        struct page *page = NULL;
 558        struct mempolicy *mpol;
 559        nodemask_t *nodemask;
 560        struct zonelist *zonelist;
 561        struct zone *zone;
 562        struct zoneref *z;
 563        unsigned int cpuset_mems_cookie;
 564
 565        /*
 566         * A child process with MAP_PRIVATE mappings created by their parent
 567         * have no page reserves. This check ensures that reservations are
 568         * not "stolen". The child may still get SIGKILLed
 569         */
 570        if (!vma_has_reserves(vma, chg) &&
 571                        h->free_huge_pages - h->resv_huge_pages == 0)
 572                goto err;
 573
 574        /* If reserves cannot be used, ensure enough pages are in the pool */
 575        if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
 576                goto err;
 577
 578retry_cpuset:
 579        cpuset_mems_cookie = read_mems_allowed_begin();
 580        zonelist = huge_zonelist(vma, address,
 581                                        htlb_alloc_mask(h), &mpol, &nodemask);
 582
 583        for_each_zone_zonelist_nodemask(zone, z, zonelist,
 584                                                MAX_NR_ZONES - 1, nodemask) {
 585                if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
 586                        page = dequeue_huge_page_node(h, zone_to_nid(zone));
 587                        if (page) {
 588                                if (avoid_reserve)
 589                                        break;
 590                                if (!vma_has_reserves(vma, chg))
 591                                        break;
 592
 593                                SetPagePrivate(page);
 594                                h->resv_huge_pages--;
 595                                break;
 596                        }
 597                }
 598        }
 599
 600        mpol_cond_put(mpol);
 601        if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
 602                goto retry_cpuset;
 603        return page;
 604
 605err:
 606        return NULL;
 607}
 608
 609/*
 610 * common helper functions for hstate_next_node_to_{alloc|free}.
 611 * We may have allocated or freed a huge page based on a different
 612 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
 613 * be outside of *nodes_allowed.  Ensure that we use an allowed
 614 * node for alloc or free.
 615 */
 616static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
 617{
 618        nid = next_node(nid, *nodes_allowed);
 619        if (nid == MAX_NUMNODES)
 620                nid = first_node(*nodes_allowed);
 621        VM_BUG_ON(nid >= MAX_NUMNODES);
 622
 623        return nid;
 624}
 625
 626static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
 627{
 628        if (!node_isset(nid, *nodes_allowed))
 629                nid = next_node_allowed(nid, nodes_allowed);
 630        return nid;
 631}
 632
 633/*
 634 * returns the previously saved node ["this node"] from which to
 635 * allocate a persistent huge page for the pool and advance the
 636 * next node from which to allocate, handling wrap at end of node
 637 * mask.
 638 */
 639static int hstate_next_node_to_alloc(struct hstate *h,
 640                                        nodemask_t *nodes_allowed)
 641{
 642        int nid;
 643
 644        VM_BUG_ON(!nodes_allowed);
 645
 646        nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
 647        h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
 648
 649        return nid;
 650}
 651
 652/*
 653 * helper for free_pool_huge_page() - return the previously saved
 654 * node ["this node"] from which to free a huge page.  Advance the
 655 * next node id whether or not we find a free huge page to free so
 656 * that the next attempt to free addresses the next node.
 657 */
 658static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
 659{
 660        int nid;
 661
 662        VM_BUG_ON(!nodes_allowed);
 663
 664        nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
 665        h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
 666
 667        return nid;
 668}
 669
 670#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
 671        for (nr_nodes = nodes_weight(*mask);                            \
 672                nr_nodes > 0 &&                                         \
 673                ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
 674                nr_nodes--)
 675
 676#define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
 677        for (nr_nodes = nodes_weight(*mask);                            \
 678                nr_nodes > 0 &&                                         \
 679                ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
 680                nr_nodes--)
 681
 682#if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
 683static void destroy_compound_gigantic_page(struct page *page,
 684                                        unsigned long order)
 685{
 686        int i;
 687        int nr_pages = 1 << order;
 688        struct page *p = page + 1;
 689
 690        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
 691                __ClearPageTail(p);
 692                set_page_refcounted(p);
 693                p->first_page = NULL;
 694        }
 695
 696        set_compound_order(page, 0);
 697        __ClearPageHead(page);
 698}
 699
 700static void free_gigantic_page(struct page *page, unsigned order)
 701{
 702        free_contig_range(page_to_pfn(page), 1 << order);
 703}
 704
 705static int __alloc_gigantic_page(unsigned long start_pfn,
 706                                unsigned long nr_pages)
 707{
 708        unsigned long end_pfn = start_pfn + nr_pages;
 709        return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
 710}
 711
 712static bool pfn_range_valid_gigantic(unsigned long start_pfn,
 713                                unsigned long nr_pages)
 714{
 715        unsigned long i, end_pfn = start_pfn + nr_pages;
 716        struct page *page;
 717
 718        for (i = start_pfn; i < end_pfn; i++) {
 719                if (!pfn_valid(i))
 720                        return false;
 721
 722                page = pfn_to_page(i);
 723
 724                if (PageReserved(page))
 725                        return false;
 726
 727                if (page_count(page) > 0)
 728                        return false;
 729
 730                if (PageHuge(page))
 731                        return false;
 732        }
 733
 734        return true;
 735}
 736
 737static bool zone_spans_last_pfn(const struct zone *zone,
 738                        unsigned long start_pfn, unsigned long nr_pages)
 739{
 740        unsigned long last_pfn = start_pfn + nr_pages - 1;
 741        return zone_spans_pfn(zone, last_pfn);
 742}
 743
 744static struct page *alloc_gigantic_page(int nid, unsigned order)
 745{
 746        unsigned long nr_pages = 1 << order;
 747        unsigned long ret, pfn, flags;
 748        struct zone *z;
 749
 750        z = NODE_DATA(nid)->node_zones;
 751        for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
 752                spin_lock_irqsave(&z->lock, flags);
 753
 754                pfn = ALIGN(z->zone_start_pfn, nr_pages);
 755                while (zone_spans_last_pfn(z, pfn, nr_pages)) {
 756                        if (pfn_range_valid_gigantic(pfn, nr_pages)) {
 757                                /*
 758                                 * We release the zone lock here because
 759                                 * alloc_contig_range() will also lock the zone
 760                                 * at some point. If there's an allocation
 761                                 * spinning on this lock, it may win the race
 762                                 * and cause alloc_contig_range() to fail...
 763                                 */
 764                                spin_unlock_irqrestore(&z->lock, flags);
 765                                ret = __alloc_gigantic_page(pfn, nr_pages);
 766                                if (!ret)
 767                                        return pfn_to_page(pfn);
 768                                spin_lock_irqsave(&z->lock, flags);
 769                        }
 770                        pfn += nr_pages;
 771                }
 772
 773                spin_unlock_irqrestore(&z->lock, flags);
 774        }
 775
 776        return NULL;
 777}
 778
 779static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
 780static void prep_compound_gigantic_page(struct page *page, unsigned long order);
 781
 782static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
 783{
 784        struct page *page;
 785
 786        page = alloc_gigantic_page(nid, huge_page_order(h));
 787        if (page) {
 788                prep_compound_gigantic_page(page, huge_page_order(h));
 789                prep_new_huge_page(h, page, nid);
 790        }
 791
 792        return page;
 793}
 794
 795static int alloc_fresh_gigantic_page(struct hstate *h,
 796                                nodemask_t *nodes_allowed)
 797{
 798        struct page *page = NULL;
 799        int nr_nodes, node;
 800
 801        for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
 802                page = alloc_fresh_gigantic_page_node(h, node);
 803                if (page)
 804                        return 1;
 805        }
 806
 807        return 0;
 808}
 809
 810static inline bool gigantic_page_supported(void) { return true; }
 811#else
 812static inline bool gigantic_page_supported(void) { return false; }
 813static inline void free_gigantic_page(struct page *page, unsigned order) { }
 814static inline void destroy_compound_gigantic_page(struct page *page,
 815                                                unsigned long order) { }
 816static inline int alloc_fresh_gigantic_page(struct hstate *h,
 817                                        nodemask_t *nodes_allowed) { return 0; }
 818#endif
 819
 820static void update_and_free_page(struct hstate *h, struct page *page)
 821{
 822        int i;
 823
 824        if (hstate_is_gigantic(h) && !gigantic_page_supported())
 825                return;
 826
 827        h->nr_huge_pages--;
 828        h->nr_huge_pages_node[page_to_nid(page)]--;
 829        for (i = 0; i < pages_per_huge_page(h); i++) {
 830                page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
 831                                1 << PG_referenced | 1 << PG_dirty |
 832                                1 << PG_active | 1 << PG_private |
 833                                1 << PG_writeback);
 834        }
 835        VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
 836        set_compound_page_dtor(page, NULL);
 837        set_page_refcounted(page);
 838        if (hstate_is_gigantic(h)) {
 839                destroy_compound_gigantic_page(page, huge_page_order(h));
 840                free_gigantic_page(page, huge_page_order(h));
 841        } else {
 842                arch_release_hugepage(page);
 843                __free_pages(page, huge_page_order(h));
 844        }
 845}
 846
 847struct hstate *size_to_hstate(unsigned long size)
 848{
 849        struct hstate *h;
 850
 851        for_each_hstate(h) {
 852                if (huge_page_size(h) == size)
 853                        return h;
 854        }
 855        return NULL;
 856}
 857
 858void free_huge_page(struct page *page)
 859{
 860        /*
 861         * Can't pass hstate in here because it is called from the
 862         * compound page destructor.
 863         */
 864        struct hstate *h = page_hstate(page);
 865        int nid = page_to_nid(page);
 866        struct hugepage_subpool *spool =
 867                (struct hugepage_subpool *)page_private(page);
 868        bool restore_reserve;
 869
 870        set_page_private(page, 0);
 871        page->mapping = NULL;
 872        BUG_ON(page_count(page));
 873        BUG_ON(page_mapcount(page));
 874        restore_reserve = PagePrivate(page);
 875        ClearPagePrivate(page);
 876
 877        spin_lock(&hugetlb_lock);
 878        hugetlb_cgroup_uncharge_page(hstate_index(h),
 879                                     pages_per_huge_page(h), page);
 880        if (restore_reserve)
 881                h->resv_huge_pages++;
 882
 883        if (h->surplus_huge_pages_node[nid]) {
 884                /* remove the page from active list */
 885                list_del(&page->lru);
 886                update_and_free_page(h, page);
 887                h->surplus_huge_pages--;
 888                h->surplus_huge_pages_node[nid]--;
 889        } else {
 890                arch_clear_hugepage_flags(page);
 891                enqueue_huge_page(h, page);
 892        }
 893        spin_unlock(&hugetlb_lock);
 894        hugepage_subpool_put_pages(spool, 1);
 895}
 896
 897static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
 898{
 899        INIT_LIST_HEAD(&page->lru);
 900        set_compound_page_dtor(page, free_huge_page);
 901        spin_lock(&hugetlb_lock);
 902        set_hugetlb_cgroup(page, NULL);
 903        h->nr_huge_pages++;
 904        h->nr_huge_pages_node[nid]++;
 905        spin_unlock(&hugetlb_lock);
 906        put_page(page); /* free it into the hugepage allocator */
 907}
 908
 909static void prep_compound_gigantic_page(struct page *page, unsigned long order)
 910{
 911        int i;
 912        int nr_pages = 1 << order;
 913        struct page *p = page + 1;
 914
 915        /* we rely on prep_new_huge_page to set the destructor */
 916        set_compound_order(page, order);
 917        __SetPageHead(page);
 918        __ClearPageReserved(page);
 919        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
 920                __SetPageTail(p);
 921                /*
 922                 * For gigantic hugepages allocated through bootmem at
 923                 * boot, it's safer to be consistent with the not-gigantic
 924                 * hugepages and clear the PG_reserved bit from all tail pages
 925                 * too.  Otherwse drivers using get_user_pages() to access tail
 926                 * pages may get the reference counting wrong if they see
 927                 * PG_reserved set on a tail page (despite the head page not
 928                 * having PG_reserved set).  Enforcing this consistency between
 929                 * head and tail pages allows drivers to optimize away a check
 930                 * on the head page when they need know if put_page() is needed
 931                 * after get_user_pages().
 932                 */
 933                __ClearPageReserved(p);
 934                set_page_count(p, 0);
 935                p->first_page = page;
 936        }
 937}
 938
 939/*
 940 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
 941 * transparent huge pages.  See the PageTransHuge() documentation for more
 942 * details.
 943 */
 944int PageHuge(struct page *page)
 945{
 946        if (!PageCompound(page))
 947                return 0;
 948
 949        page = compound_head(page);
 950        return get_compound_page_dtor(page) == free_huge_page;
 951}
 952EXPORT_SYMBOL_GPL(PageHuge);
 953
 954/*
 955 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
 956 * normal or transparent huge pages.
 957 */
 958int PageHeadHuge(struct page *page_head)
 959{
 960        if (!PageHead(page_head))
 961                return 0;
 962
 963        return get_compound_page_dtor(page_head) == free_huge_page;
 964}
 965
 966pgoff_t __basepage_index(struct page *page)
 967{
 968        struct page *page_head = compound_head(page);
 969        pgoff_t index = page_index(page_head);
 970        unsigned long compound_idx;
 971
 972        if (!PageHuge(page_head))
 973                return page_index(page);
 974
 975        if (compound_order(page_head) >= MAX_ORDER)
 976                compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
 977        else
 978                compound_idx = page - page_head;
 979
 980        return (index << compound_order(page_head)) + compound_idx;
 981}
 982
 983static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
 984{
 985        struct page *page;
 986
 987        page = alloc_pages_exact_node(nid,
 988                htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
 989                                                __GFP_REPEAT|__GFP_NOWARN,
 990                huge_page_order(h));
 991        if (page) {
 992                if (arch_prepare_hugepage(page)) {
 993                        __free_pages(page, huge_page_order(h));
 994                        return NULL;
 995                }
 996                prep_new_huge_page(h, page, nid);
 997        }
 998
 999        return page;
1000}
1001
1002static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1003{
1004        struct page *page;
1005        int nr_nodes, node;
1006        int ret = 0;
1007
1008        for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1009                page = alloc_fresh_huge_page_node(h, node);
1010                if (page) {
1011                        ret = 1;
1012                        break;
1013                }
1014        }
1015
1016        if (ret)
1017                count_vm_event(HTLB_BUDDY_PGALLOC);
1018        else
1019                count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1020
1021        return ret;
1022}
1023
1024/*
1025 * Free huge page from pool from next node to free.
1026 * Attempt to keep persistent huge pages more or less
1027 * balanced over allowed nodes.
1028 * Called with hugetlb_lock locked.
1029 */
1030static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1031                                                         bool acct_surplus)
1032{
1033        int nr_nodes, node;
1034        int ret = 0;
1035
1036        for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1037                /*
1038                 * If we're returning unused surplus pages, only examine
1039                 * nodes with surplus pages.
1040                 */
1041                if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1042                    !list_empty(&h->hugepage_freelists[node])) {
1043                        struct page *page =
1044                                list_entry(h->hugepage_freelists[node].next,
1045                                          struct page, lru);
1046                        list_del(&page->lru);
1047                        h->free_huge_pages--;
1048                        h->free_huge_pages_node[node]--;
1049                        if (acct_surplus) {
1050                                h->surplus_huge_pages--;
1051                                h->surplus_huge_pages_node[node]--;
1052                        }
1053                        update_and_free_page(h, page);
1054                        ret = 1;
1055                        break;
1056                }
1057        }
1058
1059        return ret;
1060}
1061
1062/*
1063 * Dissolve a given free hugepage into free buddy pages. This function does
1064 * nothing for in-use (including surplus) hugepages.
1065 */
1066static void dissolve_free_huge_page(struct page *page)
1067{
1068        spin_lock(&hugetlb_lock);
1069        if (PageHuge(page) && !page_count(page)) {
1070                struct hstate *h = page_hstate(page);
1071                int nid = page_to_nid(page);
1072                list_del(&page->lru);
1073                h->free_huge_pages--;
1074                h->free_huge_pages_node[nid]--;
1075                update_and_free_page(h, page);
1076        }
1077        spin_unlock(&hugetlb_lock);
1078}
1079
1080/*
1081 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1082 * make specified memory blocks removable from the system.
1083 * Note that start_pfn should aligned with (minimum) hugepage size.
1084 */
1085void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1086{
1087        unsigned int order = 8 * sizeof(void *);
1088        unsigned long pfn;
1089        struct hstate *h;
1090
1091        if (!hugepages_supported())
1092                return;
1093
1094        /* Set scan step to minimum hugepage size */
1095        for_each_hstate(h)
1096                if (order > huge_page_order(h))
1097                        order = huge_page_order(h);
1098        VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
1099        for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
1100                dissolve_free_huge_page(pfn_to_page(pfn));
1101}
1102
1103static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
1104{
1105        struct page *page;
1106        unsigned int r_nid;
1107
1108        if (hstate_is_gigantic(h))
1109                return NULL;
1110
1111        /*
1112         * Assume we will successfully allocate the surplus page to
1113         * prevent racing processes from causing the surplus to exceed
1114         * overcommit
1115         *
1116         * This however introduces a different race, where a process B
1117         * tries to grow the static hugepage pool while alloc_pages() is
1118         * called by process A. B will only examine the per-node
1119         * counters in determining if surplus huge pages can be
1120         * converted to normal huge pages in adjust_pool_surplus(). A
1121         * won't be able to increment the per-node counter, until the
1122         * lock is dropped by B, but B doesn't drop hugetlb_lock until
1123         * no more huge pages can be converted from surplus to normal
1124         * state (and doesn't try to convert again). Thus, we have a
1125         * case where a surplus huge page exists, the pool is grown, and
1126         * the surplus huge page still exists after, even though it
1127         * should just have been converted to a normal huge page. This
1128         * does not leak memory, though, as the hugepage will be freed
1129         * once it is out of use. It also does not allow the counters to
1130         * go out of whack in adjust_pool_surplus() as we don't modify
1131         * the node values until we've gotten the hugepage and only the
1132         * per-node value is checked there.
1133         */
1134        spin_lock(&hugetlb_lock);
1135        if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1136                spin_unlock(&hugetlb_lock);
1137                return NULL;
1138        } else {
1139                h->nr_huge_pages++;
1140                h->surplus_huge_pages++;
1141        }
1142        spin_unlock(&hugetlb_lock);
1143
1144        if (nid == NUMA_NO_NODE)
1145                page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
1146                                   __GFP_REPEAT|__GFP_NOWARN,
1147                                   huge_page_order(h));
1148        else
1149                page = alloc_pages_exact_node(nid,
1150                        htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1151                        __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
1152
1153        if (page && arch_prepare_hugepage(page)) {
1154                __free_pages(page, huge_page_order(h));
1155                page = NULL;
1156        }
1157
1158        spin_lock(&hugetlb_lock);
1159        if (page) {
1160                INIT_LIST_HEAD(&page->lru);
1161                r_nid = page_to_nid(page);
1162                set_compound_page_dtor(page, free_huge_page);
1163                set_hugetlb_cgroup(page, NULL);
1164                /*
1165                 * We incremented the global counters already
1166                 */
1167                h->nr_huge_pages_node[r_nid]++;
1168                h->surplus_huge_pages_node[r_nid]++;
1169                __count_vm_event(HTLB_BUDDY_PGALLOC);
1170        } else {
1171                h->nr_huge_pages--;
1172                h->surplus_huge_pages--;
1173                __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1174        }
1175        spin_unlock(&hugetlb_lock);
1176
1177        return page;
1178}
1179
1180/*
1181 * This allocation function is useful in the context where vma is irrelevant.
1182 * E.g. soft-offlining uses this function because it only cares physical
1183 * address of error page.
1184 */
1185struct page *alloc_huge_page_node(struct hstate *h, int nid)
1186{
1187        struct page *page = NULL;
1188
1189        spin_lock(&hugetlb_lock);
1190        if (h->free_huge_pages - h->resv_huge_pages > 0)
1191                page = dequeue_huge_page_node(h, nid);
1192        spin_unlock(&hugetlb_lock);
1193
1194        if (!page)
1195                page = alloc_buddy_huge_page(h, nid);
1196
1197        return page;
1198}
1199
1200/*
1201 * Increase the hugetlb pool such that it can accommodate a reservation
1202 * of size 'delta'.
1203 */
1204static int gather_surplus_pages(struct hstate *h, int delta)
1205{
1206        struct list_head surplus_list;
1207        struct page *page, *tmp;
1208        int ret, i;
1209        int needed, allocated;
1210        bool alloc_ok = true;
1211
1212        needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1213        if (needed <= 0) {
1214                h->resv_huge_pages += delta;
1215                return 0;
1216        }
1217
1218        allocated = 0;
1219        INIT_LIST_HEAD(&surplus_list);
1220
1221        ret = -ENOMEM;
1222retry:
1223        spin_unlock(&hugetlb_lock);
1224        for (i = 0; i < needed; i++) {
1225                page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1226                if (!page) {
1227                        alloc_ok = false;
1228                        break;
1229                }
1230                list_add(&page->lru, &surplus_list);
1231        }
1232        allocated += i;
1233
1234        /*
1235         * After retaking hugetlb_lock, we need to recalculate 'needed'
1236         * because either resv_huge_pages or free_huge_pages may have changed.
1237         */
1238        spin_lock(&hugetlb_lock);
1239        needed = (h->resv_huge_pages + delta) -
1240                        (h->free_huge_pages + allocated);
1241        if (needed > 0) {
1242                if (alloc_ok)
1243                        goto retry;
1244                /*
1245                 * We were not able to allocate enough pages to
1246                 * satisfy the entire reservation so we free what
1247                 * we've allocated so far.
1248                 */
1249                goto free;
1250        }
1251        /*
1252         * The surplus_list now contains _at_least_ the number of extra pages
1253         * needed to accommodate the reservation.  Add the appropriate number
1254         * of pages to the hugetlb pool and free the extras back to the buddy
1255         * allocator.  Commit the entire reservation here to prevent another
1256         * process from stealing the pages as they are added to the pool but
1257         * before they are reserved.
1258         */
1259        needed += allocated;
1260        h->resv_huge_pages += delta;
1261        ret = 0;
1262
1263        /* Free the needed pages to the hugetlb pool */
1264        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1265                if ((--needed) < 0)
1266                        break;
1267                /*
1268                 * This page is now managed by the hugetlb allocator and has
1269                 * no users -- drop the buddy allocator's reference.
1270                 */
1271                put_page_testzero(page);
1272                VM_BUG_ON_PAGE(page_count(page), page);
1273                enqueue_huge_page(h, page);
1274        }
1275free:
1276        spin_unlock(&hugetlb_lock);
1277
1278        /* Free unnecessary surplus pages to the buddy allocator */
1279        list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1280                put_page(page);
1281        spin_lock(&hugetlb_lock);
1282
1283        return ret;
1284}
1285
1286/*
1287 * When releasing a hugetlb pool reservation, any surplus pages that were
1288 * allocated to satisfy the reservation must be explicitly freed if they were
1289 * never used.
1290 * Called with hugetlb_lock held.
1291 */
1292static void return_unused_surplus_pages(struct hstate *h,
1293                                        unsigned long unused_resv_pages)
1294{
1295        unsigned long nr_pages;
1296
1297        /* Uncommit the reservation */
1298        h->resv_huge_pages -= unused_resv_pages;
1299
1300        /* Cannot return gigantic pages currently */
1301        if (hstate_is_gigantic(h))
1302                return;
1303
1304        nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1305
1306        /*
1307         * We want to release as many surplus pages as possible, spread
1308         * evenly across all nodes with memory. Iterate across these nodes
1309         * until we can no longer free unreserved surplus pages. This occurs
1310         * when the nodes with surplus pages have no free pages.
1311         * free_pool_huge_page() will balance the the freed pages across the
1312         * on-line nodes with memory and will handle the hstate accounting.
1313         */
1314        while (nr_pages--) {
1315                if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1316                        break;
1317                cond_resched_lock(&hugetlb_lock);
1318        }
1319}
1320
1321/*
1322 * Determine if the huge page at addr within the vma has an associated
1323 * reservation.  Where it does not we will need to logically increase
1324 * reservation and actually increase subpool usage before an allocation
1325 * can occur.  Where any new reservation would be required the
1326 * reservation change is prepared, but not committed.  Once the page
1327 * has been allocated from the subpool and instantiated the change should
1328 * be committed via vma_commit_reservation.  No action is required on
1329 * failure.
1330 */
1331static long vma_needs_reservation(struct hstate *h,
1332                        struct vm_area_struct *vma, unsigned long addr)
1333{
1334        struct resv_map *resv;
1335        pgoff_t idx;
1336        long chg;
1337
1338        resv = vma_resv_map(vma);
1339        if (!resv)
1340                return 1;
1341
1342        idx = vma_hugecache_offset(h, vma, addr);
1343        chg = region_chg(resv, idx, idx + 1);
1344
1345        if (vma->vm_flags & VM_MAYSHARE)
1346                return chg;
1347        else
1348                return chg < 0 ? chg : 0;
1349}
1350static void vma_commit_reservation(struct hstate *h,
1351                        struct vm_area_struct *vma, unsigned long addr)
1352{
1353        struct resv_map *resv;
1354        pgoff_t idx;
1355
1356        resv = vma_resv_map(vma);
1357        if (!resv)
1358                return;
1359
1360        idx = vma_hugecache_offset(h, vma, addr);
1361        region_add(resv, idx, idx + 1);
1362}
1363
1364static struct page *alloc_huge_page(struct vm_area_struct *vma,
1365                                    unsigned long addr, int avoid_reserve)
1366{
1367        struct hugepage_subpool *spool = subpool_vma(vma);
1368        struct hstate *h = hstate_vma(vma);
1369        struct page *page;
1370        long chg;
1371        int ret, idx;
1372        struct hugetlb_cgroup *h_cg;
1373
1374        idx = hstate_index(h);
1375        /*
1376         * Processes that did not create the mapping will have no
1377         * reserves and will not have accounted against subpool
1378         * limit. Check that the subpool limit can be made before
1379         * satisfying the allocation MAP_NORESERVE mappings may also
1380         * need pages and subpool limit allocated allocated if no reserve
1381         * mapping overlaps.
1382         */
1383        chg = vma_needs_reservation(h, vma, addr);
1384        if (chg < 0)
1385                return ERR_PTR(-ENOMEM);
1386        if (chg || avoid_reserve)
1387                if (hugepage_subpool_get_pages(spool, 1))
1388                        return ERR_PTR(-ENOSPC);
1389
1390        ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1391        if (ret)
1392                goto out_subpool_put;
1393
1394        spin_lock(&hugetlb_lock);
1395        page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
1396        if (!page) {
1397                spin_unlock(&hugetlb_lock);
1398                page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1399                if (!page)
1400                        goto out_uncharge_cgroup;
1401
1402                spin_lock(&hugetlb_lock);
1403                list_move(&page->lru, &h->hugepage_activelist);
1404                /* Fall through */
1405        }
1406        hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1407        spin_unlock(&hugetlb_lock);
1408
1409        set_page_private(page, (unsigned long)spool);
1410
1411        vma_commit_reservation(h, vma, addr);
1412        return page;
1413
1414out_uncharge_cgroup:
1415        hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1416out_subpool_put:
1417        if (chg || avoid_reserve)
1418                hugepage_subpool_put_pages(spool, 1);
1419        return ERR_PTR(-ENOSPC);
1420}
1421
1422/*
1423 * alloc_huge_page()'s wrapper which simply returns the page if allocation
1424 * succeeds, otherwise NULL. This function is called from new_vma_page(),
1425 * where no ERR_VALUE is expected to be returned.
1426 */
1427struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1428                                unsigned long addr, int avoid_reserve)
1429{
1430        struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1431        if (IS_ERR(page))
1432                page = NULL;
1433        return page;
1434}
1435
1436int __weak alloc_bootmem_huge_page(struct hstate *h)
1437{
1438        struct huge_bootmem_page *m;
1439        int nr_nodes, node;
1440
1441        for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1442                void *addr;
1443
1444                addr = memblock_virt_alloc_try_nid_nopanic(
1445                                huge_page_size(h), huge_page_size(h),
1446                                0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1447                if (addr) {
1448                        /*
1449                         * Use the beginning of the huge page to store the
1450                         * huge_bootmem_page struct (until gather_bootmem
1451                         * puts them into the mem_map).
1452                         */
1453                        m = addr;
1454                        goto found;
1455                }
1456        }
1457        return 0;
1458
1459found:
1460        BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
1461        /* Put them into a private list first because mem_map is not up yet */
1462        list_add(&m->list, &huge_boot_pages);
1463        m->hstate = h;
1464        return 1;
1465}
1466
1467static void __init prep_compound_huge_page(struct page *page, int order)
1468{
1469        if (unlikely(order > (MAX_ORDER - 1)))
1470                prep_compound_gigantic_page(page, order);
1471        else
1472                prep_compound_page(page, order);
1473}
1474
1475/* Put bootmem huge pages into the standard lists after mem_map is up */
1476static void __init gather_bootmem_prealloc(void)
1477{
1478        struct huge_bootmem_page *m;
1479
1480        list_for_each_entry(m, &huge_boot_pages, list) {
1481                struct hstate *h = m->hstate;
1482                struct page *page;
1483
1484#ifdef CONFIG_HIGHMEM
1485                page = pfn_to_page(m->phys >> PAGE_SHIFT);
1486                memblock_free_late(__pa(m),
1487                                   sizeof(struct huge_bootmem_page));
1488#else
1489                page = virt_to_page(m);
1490#endif
1491                WARN_ON(page_count(page) != 1);
1492                prep_compound_huge_page(page, h->order);
1493                WARN_ON(PageReserved(page));
1494                prep_new_huge_page(h, page, page_to_nid(page));
1495                /*
1496                 * If we had gigantic hugepages allocated at boot time, we need
1497                 * to restore the 'stolen' pages to totalram_pages in order to
1498                 * fix confusing memory reports from free(1) and another
1499                 * side-effects, like CommitLimit going negative.
1500                 */
1501                if (hstate_is_gigantic(h))
1502                        adjust_managed_page_count(page, 1 << h->order);
1503        }
1504}
1505
1506static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1507{
1508        unsigned long i;
1509
1510        for (i = 0; i < h->max_huge_pages; ++i) {
1511                if (hstate_is_gigantic(h)) {
1512                        if (!alloc_bootmem_huge_page(h))
1513                                break;
1514                } else if (!alloc_fresh_huge_page(h,
1515                                         &node_states[N_MEMORY]))
1516                        break;
1517        }
1518        h->max_huge_pages = i;
1519}
1520
1521static void __init hugetlb_init_hstates(void)
1522{
1523        struct hstate *h;
1524
1525        for_each_hstate(h) {
1526                /* oversize hugepages were init'ed in early boot */
1527                if (!hstate_is_gigantic(h))
1528                        hugetlb_hstate_alloc_pages(h);
1529        }
1530}
1531
1532static char * __init memfmt(char *buf, unsigned long n)
1533{
1534        if (n >= (1UL << 30))
1535                sprintf(buf, "%lu GB", n >> 30);
1536        else if (n >= (1UL << 20))
1537                sprintf(buf, "%lu MB", n >> 20);
1538        else
1539                sprintf(buf, "%lu KB", n >> 10);
1540        return buf;
1541}
1542
1543static void __init report_hugepages(void)
1544{
1545        struct hstate *h;
1546
1547        for_each_hstate(h) {
1548                char buf[32];
1549                pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
1550                        memfmt(buf, huge_page_size(h)),
1551                        h->free_huge_pages);
1552        }
1553}
1554
1555#ifdef CONFIG_HIGHMEM
1556static void try_to_free_low(struct hstate *h, unsigned long count,
1557                                                nodemask_t *nodes_allowed)
1558{
1559        int i;
1560
1561        if (hstate_is_gigantic(h))
1562                return;
1563
1564        for_each_node_mask(i, *nodes_allowed) {
1565                struct page *page, *next;
1566                struct list_head *freel = &h->hugepage_freelists[i];
1567                list_for_each_entry_safe(page, next, freel, lru) {
1568                        if (count >= h->nr_huge_pages)
1569                                return;
1570                        if (PageHighMem(page))
1571                                continue;
1572                        list_del(&page->lru);
1573                        update_and_free_page(h, page);
1574                        h->free_huge_pages--;
1575                        h->free_huge_pages_node[page_to_nid(page)]--;
1576                }
1577        }
1578}
1579#else
1580static inline void try_to_free_low(struct hstate *h, unsigned long count,
1581                                                nodemask_t *nodes_allowed)
1582{
1583}
1584#endif
1585
1586/*
1587 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1588 * balanced by operating on them in a round-robin fashion.
1589 * Returns 1 if an adjustment was made.
1590 */
1591static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1592                                int delta)
1593{
1594        int nr_nodes, node;
1595
1596        VM_BUG_ON(delta != -1 && delta != 1);
1597
1598        if (delta < 0) {
1599                for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1600                        if (h->surplus_huge_pages_node[node])
1601                                goto found;
1602                }
1603        } else {
1604                for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1605                        if (h->surplus_huge_pages_node[node] <
1606                                        h->nr_huge_pages_node[node])
1607                                goto found;
1608                }
1609        }
1610        return 0;
1611
1612found:
1613        h->surplus_huge_pages += delta;
1614        h->surplus_huge_pages_node[node] += delta;
1615        return 1;
1616}
1617
1618#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1619static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1620                                                nodemask_t *nodes_allowed)
1621{
1622        unsigned long min_count, ret;
1623
1624        if (hstate_is_gigantic(h) && !gigantic_page_supported())
1625                return h->max_huge_pages;
1626
1627        /*
1628         * Increase the pool size
1629         * First take pages out of surplus state.  Then make up the
1630         * remaining difference by allocating fresh huge pages.
1631         *
1632         * We might race with alloc_buddy_huge_page() here and be unable
1633         * to convert a surplus huge page to a normal huge page. That is
1634         * not critical, though, it just means the overall size of the
1635         * pool might be one hugepage larger than it needs to be, but
1636         * within all the constraints specified by the sysctls.
1637         */
1638        spin_lock(&hugetlb_lock);
1639        while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1640                if (!adjust_pool_surplus(h, nodes_allowed, -1))
1641                        break;
1642        }
1643
1644        while (count > persistent_huge_pages(h)) {
1645                /*
1646                 * If this allocation races such that we no longer need the
1647                 * page, free_huge_page will handle it by freeing the page
1648                 * and reducing the surplus.
1649                 */
1650                spin_unlock(&hugetlb_lock);
1651                if (hstate_is_gigantic(h))
1652                        ret = alloc_fresh_gigantic_page(h, nodes_allowed);
1653                else
1654                        ret = alloc_fresh_huge_page(h, nodes_allowed);
1655                spin_lock(&hugetlb_lock);
1656                if (!ret)
1657                        goto out;
1658
1659                /* Bail for signals. Probably ctrl-c from user */
1660                if (signal_pending(current))
1661                        goto out;
1662        }
1663
1664        /*
1665         * Decrease the pool size
1666         * First return free pages to the buddy allocator (being careful
1667         * to keep enough around to satisfy reservations).  Then place
1668         * pages into surplus state as needed so the pool will shrink
1669         * to the desired size as pages become free.
1670         *
1671         * By placing pages into the surplus state independent of the
1672         * overcommit value, we are allowing the surplus pool size to
1673         * exceed overcommit. There are few sane options here. Since
1674         * alloc_buddy_huge_page() is checking the global counter,
1675         * though, we'll note that we're not allowed to exceed surplus
1676         * and won't grow the pool anywhere else. Not until one of the
1677         * sysctls are changed, or the surplus pages go out of use.
1678         */
1679        min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1680        min_count = max(count, min_count);
1681        try_to_free_low(h, min_count, nodes_allowed);
1682        while (min_count < persistent_huge_pages(h)) {
1683                if (!free_pool_huge_page(h, nodes_allowed, 0))
1684                        break;
1685                cond_resched_lock(&hugetlb_lock);
1686        }
1687        while (count < persistent_huge_pages(h)) {
1688                if (!adjust_pool_surplus(h, nodes_allowed, 1))
1689                        break;
1690        }
1691out:
1692        ret = persistent_huge_pages(h);
1693        spin_unlock(&hugetlb_lock);
1694        return ret;
1695}
1696
1697#define HSTATE_ATTR_RO(_name) \
1698        static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1699
1700#define HSTATE_ATTR(_name) \
1701        static struct kobj_attribute _name##_attr = \
1702                __ATTR(_name, 0644, _name##_show, _name##_store)
1703
1704static struct kobject *hugepages_kobj;
1705static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1706
1707static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1708
1709static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1710{
1711        int i;
1712
1713        for (i = 0; i < HUGE_MAX_HSTATE; i++)
1714                if (hstate_kobjs[i] == kobj) {
1715                        if (nidp)
1716                                *nidp = NUMA_NO_NODE;
1717                        return &hstates[i];
1718                }
1719
1720        return kobj_to_node_hstate(kobj, nidp);
1721}
1722
1723static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1724                                        struct kobj_attribute *attr, char *buf)
1725{
1726        struct hstate *h;
1727        unsigned long nr_huge_pages;
1728        int nid;
1729
1730        h = kobj_to_hstate(kobj, &nid);
1731        if (nid == NUMA_NO_NODE)
1732                nr_huge_pages = h->nr_huge_pages;
1733        else
1734                nr_huge_pages = h->nr_huge_pages_node[nid];
1735
1736        return sprintf(buf, "%lu\n", nr_huge_pages);
1737}
1738
1739static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
1740                                           struct hstate *h, int nid,
1741                                           unsigned long count, size_t len)
1742{
1743        int err;
1744        NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1745
1746        if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
1747                err = -EINVAL;
1748                goto out;
1749        }
1750
1751        if (nid == NUMA_NO_NODE) {
1752                /*
1753                 * global hstate attribute
1754                 */
1755                if (!(obey_mempolicy &&
1756                                init_nodemask_of_mempolicy(nodes_allowed))) {
1757                        NODEMASK_FREE(nodes_allowed);
1758                        nodes_allowed = &node_states[N_MEMORY];
1759                }
1760        } else if (nodes_allowed) {
1761                /*
1762                 * per node hstate attribute: adjust count to global,
1763                 * but restrict alloc/free to the specified node.
1764                 */
1765                count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1766                init_nodemask_of_node(nodes_allowed, nid);
1767        } else
1768                nodes_allowed = &node_states[N_MEMORY];
1769
1770        h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1771
1772        if (nodes_allowed != &node_states[N_MEMORY])
1773                NODEMASK_FREE(nodes_allowed);
1774
1775        return len;
1776out:
1777        NODEMASK_FREE(nodes_allowed);
1778        return err;
1779}
1780
1781static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1782                                         struct kobject *kobj, const char *buf,
1783                                         size_t len)
1784{
1785        struct hstate *h;
1786        unsigned long count;
1787        int nid;
1788        int err;
1789
1790        err = kstrtoul(buf, 10, &count);
1791        if (err)
1792                return err;
1793
1794        h = kobj_to_hstate(kobj, &nid);
1795        return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
1796}
1797
1798static ssize_t nr_hugepages_show(struct kobject *kobj,
1799                                       struct kobj_attribute *attr, char *buf)
1800{
1801        return nr_hugepages_show_common(kobj, attr, buf);
1802}
1803
1804static ssize_t nr_hugepages_store(struct kobject *kobj,
1805               struct kobj_attribute *attr, const char *buf, size_t len)
1806{
1807        return nr_hugepages_store_common(false, kobj, buf, len);
1808}
1809HSTATE_ATTR(nr_hugepages);
1810
1811#ifdef CONFIG_NUMA
1812
1813/*
1814 * hstate attribute for optionally mempolicy-based constraint on persistent
1815 * huge page alloc/free.
1816 */
1817static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1818                                       struct kobj_attribute *attr, char *buf)
1819{
1820        return nr_hugepages_show_common(kobj, attr, buf);
1821}
1822
1823static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1824               struct kobj_attribute *attr, const char *buf, size_t len)
1825{
1826        return nr_hugepages_store_common(true, kobj, buf, len);
1827}
1828HSTATE_ATTR(nr_hugepages_mempolicy);
1829#endif
1830
1831
1832static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1833                                        struct kobj_attribute *attr, char *buf)
1834{
1835        struct hstate *h = kobj_to_hstate(kobj, NULL);
1836        return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1837}
1838
1839static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1840                struct kobj_attribute *attr, const char *buf, size_t count)
1841{
1842        int err;
1843        unsigned long input;
1844        struct hstate *h = kobj_to_hstate(kobj, NULL);
1845
1846        if (hstate_is_gigantic(h))
1847                return -EINVAL;
1848
1849        err = kstrtoul(buf, 10, &input);
1850        if (err)
1851                return err;
1852
1853        spin_lock(&hugetlb_lock);
1854        h->nr_overcommit_huge_pages = input;
1855        spin_unlock(&hugetlb_lock);
1856
1857        return count;
1858}
1859HSTATE_ATTR(nr_overcommit_hugepages);
1860
1861static ssize_t free_hugepages_show(struct kobject *kobj,
1862                                        struct kobj_attribute *attr, char *buf)
1863{
1864        struct hstate *h;
1865        unsigned long free_huge_pages;
1866        int nid;
1867
1868        h = kobj_to_hstate(kobj, &nid);
1869        if (nid == NUMA_NO_NODE)
1870                free_huge_pages = h->free_huge_pages;
1871        else
1872                free_huge_pages = h->free_huge_pages_node[nid];
1873
1874        return sprintf(buf, "%lu\n", free_huge_pages);
1875}
1876HSTATE_ATTR_RO(free_hugepages);
1877
1878static ssize_t resv_hugepages_show(struct kobject *kobj,
1879                                        struct kobj_attribute *attr, char *buf)
1880{
1881        struct hstate *h = kobj_to_hstate(kobj, NULL);
1882        return sprintf(buf, "%lu\n", h->resv_huge_pages);
1883}
1884HSTATE_ATTR_RO(resv_hugepages);
1885
1886static ssize_t surplus_hugepages_show(struct kobject *kobj,
1887                                        struct kobj_attribute *attr, char *buf)
1888{
1889        struct hstate *h;
1890        unsigned long surplus_huge_pages;
1891        int nid;
1892
1893        h = kobj_to_hstate(kobj, &nid);
1894        if (nid == NUMA_NO_NODE)
1895                surplus_huge_pages = h->surplus_huge_pages;
1896        else
1897                surplus_huge_pages = h->surplus_huge_pages_node[nid];
1898
1899        return sprintf(buf, "%lu\n", surplus_huge_pages);
1900}
1901HSTATE_ATTR_RO(surplus_hugepages);
1902
1903static struct attribute *hstate_attrs[] = {
1904        &nr_hugepages_attr.attr,
1905        &nr_overcommit_hugepages_attr.attr,
1906        &free_hugepages_attr.attr,
1907        &resv_hugepages_attr.attr,
1908        &surplus_hugepages_attr.attr,
1909#ifdef CONFIG_NUMA
1910        &nr_hugepages_mempolicy_attr.attr,
1911#endif
1912        NULL,
1913};
1914
1915static struct attribute_group hstate_attr_group = {
1916        .attrs = hstate_attrs,
1917};
1918
1919static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1920                                    struct kobject **hstate_kobjs,
1921                                    struct attribute_group *hstate_attr_group)
1922{
1923        int retval;
1924        int hi = hstate_index(h);
1925
1926        hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1927        if (!hstate_kobjs[hi])
1928                return -ENOMEM;
1929
1930        retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1931        if (retval)
1932                kobject_put(hstate_kobjs[hi]);
1933
1934        return retval;
1935}
1936
1937static void __init hugetlb_sysfs_init(void)
1938{
1939        struct hstate *h;
1940        int err;
1941
1942        hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1943        if (!hugepages_kobj)
1944                return;
1945
1946        for_each_hstate(h) {
1947                err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1948                                         hstate_kobjs, &hstate_attr_group);
1949                if (err)
1950                        pr_err("Hugetlb: Unable to add hstate %s", h->name);
1951        }
1952}
1953
1954#ifdef CONFIG_NUMA
1955
1956/*
1957 * node_hstate/s - associate per node hstate attributes, via their kobjects,
1958 * with node devices in node_devices[] using a parallel array.  The array
1959 * index of a node device or _hstate == node id.
1960 * This is here to avoid any static dependency of the node device driver, in
1961 * the base kernel, on the hugetlb module.
1962 */
1963struct node_hstate {
1964        struct kobject          *hugepages_kobj;
1965        struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
1966};
1967struct node_hstate node_hstates[MAX_NUMNODES];
1968
1969/*
1970 * A subset of global hstate attributes for node devices
1971 */
1972static struct attribute *per_node_hstate_attrs[] = {
1973        &nr_hugepages_attr.attr,
1974        &free_hugepages_attr.attr,
1975        &surplus_hugepages_attr.attr,
1976        NULL,
1977};
1978
1979static struct attribute_group per_node_hstate_attr_group = {
1980        .attrs = per_node_hstate_attrs,
1981};
1982
1983/*
1984 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
1985 * Returns node id via non-NULL nidp.
1986 */
1987static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1988{
1989        int nid;
1990
1991        for (nid = 0; nid < nr_node_ids; nid++) {
1992                struct node_hstate *nhs = &node_hstates[nid];
1993                int i;
1994                for (i = 0; i < HUGE_MAX_HSTATE; i++)
1995                        if (nhs->hstate_kobjs[i] == kobj) {
1996                                if (nidp)
1997                                        *nidp = nid;
1998                                return &hstates[i];
1999                        }
2000        }
2001
2002        BUG();
2003        return NULL;
2004}
2005
2006/*
2007 * Unregister hstate attributes from a single node device.
2008 * No-op if no hstate attributes attached.
2009 */
2010static void hugetlb_unregister_node(struct node *node)
2011{
2012        struct hstate *h;
2013        struct node_hstate *nhs = &node_hstates[node->dev.id];
2014
2015        if (!nhs->hugepages_kobj)
2016                return;         /* no hstate attributes */
2017
2018        for_each_hstate(h) {
2019                int idx = hstate_index(h);
2020                if (nhs->hstate_kobjs[idx]) {
2021                        kobject_put(nhs->hstate_kobjs[idx]);
2022                        nhs->hstate_kobjs[idx] = NULL;
2023                }
2024        }
2025
2026        kobject_put(nhs->hugepages_kobj);
2027        nhs->hugepages_kobj = NULL;
2028}
2029
2030/*
2031 * hugetlb module exit:  unregister hstate attributes from node devices
2032 * that have them.
2033 */
2034static void hugetlb_unregister_all_nodes(void)
2035{
2036        int nid;
2037
2038        /*
2039         * disable node device registrations.
2040         */
2041        register_hugetlbfs_with_node(NULL, NULL);
2042
2043        /*
2044         * remove hstate attributes from any nodes that have them.
2045         */
2046        for (nid = 0; nid < nr_node_ids; nid++)
2047                hugetlb_unregister_node(node_devices[nid]);
2048}
2049
2050/*
2051 * Register hstate attributes for a single node device.
2052 * No-op if attributes already registered.
2053 */
2054static void hugetlb_register_node(struct node *node)
2055{
2056        struct hstate *h;
2057        struct node_hstate *nhs = &node_hstates[node->dev.id];
2058        int err;
2059
2060        if (nhs->hugepages_kobj)
2061                return;         /* already allocated */
2062
2063        nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2064                                                        &node->dev.kobj);
2065        if (!nhs->hugepages_kobj)
2066                return;
2067
2068        for_each_hstate(h) {
2069                err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2070                                                nhs->hstate_kobjs,
2071                                                &per_node_hstate_attr_group);
2072                if (err) {
2073                        pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2074                                h->name, node->dev.id);
2075                        hugetlb_unregister_node(node);
2076                        break;
2077                }
2078        }
2079}
2080
2081/*
2082 * hugetlb init time:  register hstate attributes for all registered node
2083 * devices of nodes that have memory.  All on-line nodes should have
2084 * registered their associated device by this time.
2085 */
2086static void __init hugetlb_register_all_nodes(void)
2087{
2088        int nid;
2089
2090        for_each_node_state(nid, N_MEMORY) {
2091                struct node *node = node_devices[nid];
2092                if (node->dev.id == nid)
2093                        hugetlb_register_node(node);
2094        }
2095
2096        /*
2097         * Let the node device driver know we're here so it can
2098         * [un]register hstate attributes on node hotplug.
2099         */
2100        register_hugetlbfs_with_node(hugetlb_register_node,
2101                                     hugetlb_unregister_node);
2102}
2103#else   /* !CONFIG_NUMA */
2104
2105static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2106{
2107        BUG();
2108        if (nidp)
2109                *nidp = -1;
2110        return NULL;
2111}
2112
2113static void hugetlb_unregister_all_nodes(void) { }
2114
2115static void hugetlb_register_all_nodes(void) { }
2116
2117#endif
2118
2119static void __exit hugetlb_exit(void)
2120{
2121        struct hstate *h;
2122
2123        hugetlb_unregister_all_nodes();
2124
2125        for_each_hstate(h) {
2126                kobject_put(hstate_kobjs[hstate_index(h)]);
2127        }
2128
2129        kobject_put(hugepages_kobj);
2130        kfree(htlb_fault_mutex_table);
2131}
2132module_exit(hugetlb_exit);
2133
2134static int __init hugetlb_init(void)
2135{
2136        int i;
2137
2138        if (!hugepages_supported())
2139                return 0;
2140
2141        if (!size_to_hstate(default_hstate_size)) {
2142                default_hstate_size = HPAGE_SIZE;
2143                if (!size_to_hstate(default_hstate_size))
2144                        hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2145        }
2146        default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2147        if (default_hstate_max_huge_pages)
2148                default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2149
2150        hugetlb_init_hstates();
2151        gather_bootmem_prealloc();
2152        report_hugepages();
2153
2154        hugetlb_sysfs_init();
2155        hugetlb_register_all_nodes();
2156        hugetlb_cgroup_file_init();
2157
2158#ifdef CONFIG_SMP
2159        num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2160#else
2161        num_fault_mutexes = 1;
2162#endif
2163        htlb_fault_mutex_table =
2164                kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2165        BUG_ON(!htlb_fault_mutex_table);
2166
2167        for (i = 0; i < num_fault_mutexes; i++)
2168                mutex_init(&htlb_fault_mutex_table[i]);
2169        return 0;
2170}
2171module_init(hugetlb_init);
2172
2173/* Should be called on processing a hugepagesz=... option */
2174void __init hugetlb_add_hstate(unsigned order)
2175{
2176        struct hstate *h;
2177        unsigned long i;
2178
2179        if (size_to_hstate(PAGE_SIZE << order)) {
2180                pr_warning("hugepagesz= specified twice, ignoring\n");
2181                return;
2182        }
2183        BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2184        BUG_ON(order == 0);
2185        h = &hstates[hugetlb_max_hstate++];
2186        h->order = order;
2187        h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2188        h->nr_huge_pages = 0;
2189        h->free_huge_pages = 0;
2190        for (i = 0; i < MAX_NUMNODES; ++i)
2191                INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2192        INIT_LIST_HEAD(&h->hugepage_activelist);
2193        h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2194        h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2195        snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2196                                        huge_page_size(h)/1024);
2197
2198        parsed_hstate = h;
2199}
2200
2201static int __init hugetlb_nrpages_setup(char *s)
2202{
2203        unsigned long *mhp;
2204        static unsigned long *last_mhp;
2205
2206        /*
2207         * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2208         * so this hugepages= parameter goes to the "default hstate".
2209         */
2210        if (!hugetlb_max_hstate)
2211                mhp = &default_hstate_max_huge_pages;
2212        else
2213                mhp = &parsed_hstate->max_huge_pages;
2214
2215        if (mhp == last_mhp) {
2216                pr_warning("hugepages= specified twice without "
2217                           "interleaving hugepagesz=, ignoring\n");
2218                return 1;
2219        }
2220
2221        if (sscanf(s, "%lu", mhp) <= 0)
2222                *mhp = 0;
2223
2224        /*
2225         * Global state is always initialized later in hugetlb_init.
2226         * But we need to allocate >= MAX_ORDER hstates here early to still
2227         * use the bootmem allocator.
2228         */
2229        if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2230                hugetlb_hstate_alloc_pages(parsed_hstate);
2231
2232        last_mhp = mhp;
2233
2234        return 1;
2235}
2236__setup("hugepages=", hugetlb_nrpages_setup);
2237
2238static int __init hugetlb_default_setup(char *s)
2239{
2240        default_hstate_size = memparse(s, &s);
2241        return 1;
2242}
2243__setup("default_hugepagesz=", hugetlb_default_setup);
2244
2245static unsigned int cpuset_mems_nr(unsigned int *array)
2246{
2247        int node;
2248        unsigned int nr = 0;
2249
2250        for_each_node_mask(node, cpuset_current_mems_allowed)
2251                nr += array[node];
2252
2253        return nr;
2254}
2255
2256#ifdef CONFIG_SYSCTL
2257static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2258                         struct ctl_table *table, int write,
2259                         void __user *buffer, size_t *length, loff_t *ppos)
2260{
2261        struct hstate *h = &default_hstate;
2262        unsigned long tmp = h->max_huge_pages;
2263        int ret;
2264
2265        if (!hugepages_supported())
2266                return -ENOTSUPP;
2267
2268        table->data = &tmp;
2269        table->maxlen = sizeof(unsigned long);
2270        ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2271        if (ret)
2272                goto out;
2273
2274        if (write)
2275                ret = __nr_hugepages_store_common(obey_mempolicy, h,
2276                                                  NUMA_NO_NODE, tmp, *length);
2277out:
2278        return ret;
2279}
2280
2281int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2282                          void __user *buffer, size_t *length, loff_t *ppos)
2283{
2284
2285        return hugetlb_sysctl_handler_common(false, table, write,
2286                                                        buffer, length, ppos);
2287}
2288
2289#ifdef CONFIG_NUMA
2290int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2291                          void __user *buffer, size_t *length, loff_t *ppos)
2292{
2293        return hugetlb_sysctl_handler_common(true, table, write,
2294                                                        buffer, length, ppos);
2295}
2296#endif /* CONFIG_NUMA */
2297
2298int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2299                        void __user *buffer,
2300                        size_t *length, loff_t *ppos)
2301{
2302        struct hstate *h = &default_hstate;
2303        unsigned long tmp;
2304        int ret;
2305
2306        if (!hugepages_supported())
2307                return -ENOTSUPP;
2308
2309        tmp = h->nr_overcommit_huge_pages;
2310
2311        if (write && hstate_is_gigantic(h))
2312                return -EINVAL;
2313
2314        table->data = &tmp;
2315        table->maxlen = sizeof(unsigned long);
2316        ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2317        if (ret)
2318                goto out;
2319
2320        if (write) {
2321                spin_lock(&hugetlb_lock);
2322                h->nr_overcommit_huge_pages = tmp;
2323                spin_unlock(&hugetlb_lock);
2324        }
2325out:
2326        return ret;
2327}
2328
2329#endif /* CONFIG_SYSCTL */
2330
2331void hugetlb_report_meminfo(struct seq_file *m)
2332{
2333        struct hstate *h = &default_hstate;
2334        if (!hugepages_supported())
2335                return;
2336        seq_printf(m,
2337                        "HugePages_Total:   %5lu\n"
2338                        "HugePages_Free:    %5lu\n"
2339                        "HugePages_Rsvd:    %5lu\n"
2340                        "HugePages_Surp:    %5lu\n"
2341                        "Hugepagesize:   %8lu kB\n",
2342                        h->nr_huge_pages,
2343                        h->free_huge_pages,
2344                        h->resv_huge_pages,
2345                        h->surplus_huge_pages,
2346                        1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2347}
2348
2349int hugetlb_report_node_meminfo(int nid, char *buf)
2350{
2351        struct hstate *h = &default_hstate;
2352        if (!hugepages_supported())
2353                return 0;
2354        return sprintf(buf,
2355                "Node %d HugePages_Total: %5u\n"
2356                "Node %d HugePages_Free:  %5u\n"
2357                "Node %d HugePages_Surp:  %5u\n",
2358                nid, h->nr_huge_pages_node[nid],
2359                nid, h->free_huge_pages_node[nid],
2360                nid, h->surplus_huge_pages_node[nid]);
2361}
2362
2363void hugetlb_show_meminfo(void)
2364{
2365        struct hstate *h;
2366        int nid;
2367
2368        if (!hugepages_supported())
2369                return;
2370
2371        for_each_node_state(nid, N_MEMORY)
2372                for_each_hstate(h)
2373                        pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2374                                nid,
2375                                h->nr_huge_pages_node[nid],
2376                                h->free_huge_pages_node[nid],
2377                                h->surplus_huge_pages_node[nid],
2378                                1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2379}
2380
2381/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2382unsigned long hugetlb_total_pages(void)
2383{
2384        struct hstate *h;
2385        unsigned long nr_total_pages = 0;
2386
2387        for_each_hstate(h)
2388                nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2389        return nr_total_pages;
2390}
2391
2392static int hugetlb_acct_memory(struct hstate *h, long delta)
2393{
2394        int ret = -ENOMEM;
2395
2396        spin_lock(&hugetlb_lock);
2397        /*
2398         * When cpuset is configured, it breaks the strict hugetlb page
2399         * reservation as the accounting is done on a global variable. Such
2400         * reservation is completely rubbish in the presence of cpuset because
2401         * the reservation is not checked against page availability for the
2402         * current cpuset. Application can still potentially OOM'ed by kernel
2403         * with lack of free htlb page in cpuset that the task is in.
2404         * Attempt to enforce strict accounting with cpuset is almost
2405         * impossible (or too ugly) because cpuset is too fluid that
2406         * task or memory node can be dynamically moved between cpusets.
2407         *
2408         * The change of semantics for shared hugetlb mapping with cpuset is
2409         * undesirable. However, in order to preserve some of the semantics,
2410         * we fall back to check against current free page availability as
2411         * a best attempt and hopefully to minimize the impact of changing
2412         * semantics that cpuset has.
2413         */
2414        if (delta > 0) {
2415                if (gather_surplus_pages(h, delta) < 0)
2416                        goto out;
2417
2418                if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2419                        return_unused_surplus_pages(h, delta);
2420                        goto out;
2421                }
2422        }
2423
2424        ret = 0;
2425        if (delta < 0)
2426                return_unused_surplus_pages(h, (unsigned long) -delta);
2427
2428out:
2429        spin_unlock(&hugetlb_lock);
2430        return ret;
2431}
2432
2433static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2434{
2435        struct resv_map *resv = vma_resv_map(vma);
2436
2437        /*
2438         * This new VMA should share its siblings reservation map if present.
2439         * The VMA will only ever have a valid reservation map pointer where
2440         * it is being copied for another still existing VMA.  As that VMA
2441         * has a reference to the reservation map it cannot disappear until
2442         * after this open call completes.  It is therefore safe to take a
2443         * new reference here without additional locking.
2444         */
2445        if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2446                kref_get(&resv->refs);
2447}
2448
2449static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2450{
2451        struct hstate *h = hstate_vma(vma);
2452        struct resv_map *resv = vma_resv_map(vma);
2453        struct hugepage_subpool *spool = subpool_vma(vma);
2454        unsigned long reserve, start, end;
2455
2456        if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2457                return;
2458
2459        start = vma_hugecache_offset(h, vma, vma->vm_start);
2460        end = vma_hugecache_offset(h, vma, vma->vm_end);
2461
2462        reserve = (end - start) - region_count(resv, start, end);
2463
2464        kref_put(&resv->refs, resv_map_release);
2465
2466        if (reserve) {
2467                hugetlb_acct_memory(h, -reserve);
2468                hugepage_subpool_put_pages(spool, reserve);
2469        }
2470}
2471
2472/*
2473 * We cannot handle pagefaults against hugetlb pages at all.  They cause
2474 * handle_mm_fault() to try to instantiate regular-sized pages in the
2475 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2476 * this far.
2477 */
2478static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2479{
2480        BUG();
2481        return 0;
2482}
2483
2484const struct vm_operations_struct hugetlb_vm_ops = {
2485        .fault = hugetlb_vm_op_fault,
2486        .open = hugetlb_vm_op_open,
2487        .close = hugetlb_vm_op_close,
2488};
2489
2490static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2491                                int writable)
2492{
2493        pte_t entry;
2494
2495        if (writable) {
2496                entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2497                                         vma->vm_page_prot)));
2498        } else {
2499                entry = huge_pte_wrprotect(mk_huge_pte(page,
2500                                           vma->vm_page_prot));
2501        }
2502        entry = pte_mkyoung(entry);
2503        entry = pte_mkhuge(entry);
2504        entry = arch_make_huge_pte(entry, vma, page, writable);
2505
2506        return entry;
2507}
2508
2509static void set_huge_ptep_writable(struct vm_area_struct *vma,
2510                                   unsigned long address, pte_t *ptep)
2511{
2512        pte_t entry;
2513
2514        entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
2515        if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2516                update_mmu_cache(vma, address, ptep);
2517}
2518
2519static int is_hugetlb_entry_migration(pte_t pte)
2520{
2521        swp_entry_t swp;
2522
2523        if (huge_pte_none(pte) || pte_present(pte))
2524                return 0;
2525        swp = pte_to_swp_entry(pte);
2526        if (non_swap_entry(swp) && is_migration_entry(swp))
2527                return 1;
2528        else
2529                return 0;
2530}
2531
2532static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2533{
2534        swp_entry_t swp;
2535
2536        if (huge_pte_none(pte) || pte_present(pte))
2537                return 0;
2538        swp = pte_to_swp_entry(pte);
2539        if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2540                return 1;
2541        else
2542                return 0;
2543}
2544
2545int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2546                            struct vm_area_struct *vma)
2547{
2548        pte_t *src_pte, *dst_pte, entry;
2549        struct page *ptepage;
2550        unsigned long addr;
2551        int cow;
2552        struct hstate *h = hstate_vma(vma);
2553        unsigned long sz = huge_page_size(h);
2554        unsigned long mmun_start;       /* For mmu_notifiers */
2555        unsigned long mmun_end;         /* For mmu_notifiers */
2556        int ret = 0;
2557
2558        cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2559
2560        mmun_start = vma->vm_start;
2561        mmun_end = vma->vm_end;
2562        if (cow)
2563                mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
2564
2565        for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2566                spinlock_t *src_ptl, *dst_ptl;
2567                src_pte = huge_pte_offset(src, addr);
2568                if (!src_pte)
2569                        continue;
2570                dst_pte = huge_pte_alloc(dst, addr, sz);
2571                if (!dst_pte) {
2572                        ret = -ENOMEM;
2573                        break;
2574                }
2575
2576                /* If the pagetables are shared don't copy or take references */
2577                if (dst_pte == src_pte)
2578                        continue;
2579
2580                dst_ptl = huge_pte_lock(h, dst, dst_pte);
2581                src_ptl = huge_pte_lockptr(h, src, src_pte);
2582                spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2583                entry = huge_ptep_get(src_pte);
2584                if (huge_pte_none(entry)) { /* skip none entry */
2585                        ;
2586                } else if (unlikely(is_hugetlb_entry_migration(entry) ||
2587                                    is_hugetlb_entry_hwpoisoned(entry))) {
2588                        swp_entry_t swp_entry = pte_to_swp_entry(entry);
2589
2590                        if (is_write_migration_entry(swp_entry) && cow) {
2591                                /*
2592                                 * COW mappings require pages in both
2593                                 * parent and child to be set to read.
2594                                 */
2595                                make_migration_entry_read(&swp_entry);
2596                                entry = swp_entry_to_pte(swp_entry);
2597                                set_huge_pte_at(src, addr, src_pte, entry);
2598                        }
2599                        set_huge_pte_at(dst, addr, dst_pte, entry);
2600                } else {
2601                        if (cow) {
2602                                huge_ptep_set_wrprotect(src, addr, src_pte);
2603                                mmu_notifier_invalidate_range(src, mmun_start,
2604                                                                   mmun_end);
2605                        }
2606                        entry = huge_ptep_get(src_pte);
2607                        ptepage = pte_page(entry);
2608                        get_page(ptepage);
2609                        page_dup_rmap(ptepage);
2610                        set_huge_pte_at(dst, addr, dst_pte, entry);
2611                }
2612                spin_unlock(src_ptl);
2613                spin_unlock(dst_ptl);
2614        }
2615
2616        if (cow)
2617                mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
2618
2619        return ret;
2620}
2621
2622void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2623                            unsigned long start, unsigned long end,
2624                            struct page *ref_page)
2625{
2626        int force_flush = 0;
2627        struct mm_struct *mm = vma->vm_mm;
2628        unsigned long address;
2629        pte_t *ptep;
2630        pte_t pte;
2631        spinlock_t *ptl;
2632        struct page *page;
2633        struct hstate *h = hstate_vma(vma);
2634        unsigned long sz = huge_page_size(h);
2635        const unsigned long mmun_start = start; /* For mmu_notifiers */
2636        const unsigned long mmun_end   = end;   /* For mmu_notifiers */
2637
2638        WARN_ON(!is_vm_hugetlb_page(vma));
2639        BUG_ON(start & ~huge_page_mask(h));
2640        BUG_ON(end & ~huge_page_mask(h));
2641
2642        tlb_start_vma(tlb, vma);
2643        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2644        address = start;
2645again:
2646        for (; address < end; address += sz) {
2647                ptep = huge_pte_offset(mm, address);
2648                if (!ptep)
2649                        continue;
2650
2651                ptl = huge_pte_lock(h, mm, ptep);
2652                if (huge_pmd_unshare(mm, &address, ptep))
2653                        goto unlock;
2654
2655                pte = huge_ptep_get(ptep);
2656                if (huge_pte_none(pte))
2657                        goto unlock;
2658
2659                /*
2660                 * HWPoisoned hugepage is already unmapped and dropped reference
2661                 */
2662                if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
2663                        huge_pte_clear(mm, address, ptep);
2664                        goto unlock;
2665                }
2666
2667                page = pte_page(pte);
2668                /*
2669                 * If a reference page is supplied, it is because a specific
2670                 * page is being unmapped, not a range. Ensure the page we
2671                 * are about to unmap is the actual page of interest.
2672                 */
2673                if (ref_page) {
2674                        if (page != ref_page)
2675                                goto unlock;
2676
2677                        /*
2678                         * Mark the VMA as having unmapped its page so that
2679                         * future faults in this VMA will fail rather than
2680                         * looking like data was lost
2681                         */
2682                        set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2683                }
2684
2685                pte = huge_ptep_get_and_clear(mm, address, ptep);
2686                tlb_remove_tlb_entry(tlb, ptep, address);
2687                if (huge_pte_dirty(pte))
2688                        set_page_dirty(page);
2689
2690                page_remove_rmap(page);
2691                force_flush = !__tlb_remove_page(tlb, page);
2692                if (force_flush) {
2693                        address += sz;
2694                        spin_unlock(ptl);
2695                        break;
2696                }
2697                /* Bail out after unmapping reference page if supplied */
2698                if (ref_page) {
2699                        spin_unlock(ptl);
2700                        break;
2701                }
2702unlock:
2703                spin_unlock(ptl);
2704        }
2705        /*
2706         * mmu_gather ran out of room to batch pages, we break out of
2707         * the PTE lock to avoid doing the potential expensive TLB invalidate
2708         * and page-free while holding it.
2709         */
2710        if (force_flush) {
2711                force_flush = 0;
2712                tlb_flush_mmu(tlb);
2713                if (address < end && !ref_page)
2714                        goto again;
2715        }
2716        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2717        tlb_end_vma(tlb, vma);
2718}
2719
2720void __unmap_hugepage_range_final(struct mmu_gather *tlb,
2721                          struct vm_area_struct *vma, unsigned long start,
2722                          unsigned long end, struct page *ref_page)
2723{
2724        __unmap_hugepage_range(tlb, vma, start, end, ref_page);
2725
2726        /*
2727         * Clear this flag so that x86's huge_pmd_share page_table_shareable
2728         * test will fail on a vma being torn down, and not grab a page table
2729         * on its way out.  We're lucky that the flag has such an appropriate
2730         * name, and can in fact be safely cleared here. We could clear it
2731         * before the __unmap_hugepage_range above, but all that's necessary
2732         * is to clear it before releasing the i_mmap_rwsem. This works
2733         * because in the context this is called, the VMA is about to be
2734         * destroyed and the i_mmap_rwsem is held.
2735         */
2736        vma->vm_flags &= ~VM_MAYSHARE;
2737}
2738
2739void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2740                          unsigned long end, struct page *ref_page)
2741{
2742        struct mm_struct *mm;
2743        struct mmu_gather tlb;
2744
2745        mm = vma->vm_mm;
2746
2747        tlb_gather_mmu(&tlb, mm, start, end);
2748        __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2749        tlb_finish_mmu(&tlb, start, end);
2750}
2751
2752/*
2753 * This is called when the original mapper is failing to COW a MAP_PRIVATE
2754 * mappping it owns the reserve page for. The intention is to unmap the page
2755 * from other VMAs and let the children be SIGKILLed if they are faulting the
2756 * same region.
2757 */
2758static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2759                              struct page *page, unsigned long address)
2760{
2761        struct hstate *h = hstate_vma(vma);
2762        struct vm_area_struct *iter_vma;
2763        struct address_space *mapping;
2764        pgoff_t pgoff;
2765
2766        /*
2767         * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2768         * from page cache lookup which is in HPAGE_SIZE units.
2769         */
2770        address = address & huge_page_mask(h);
2771        pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2772                        vma->vm_pgoff;
2773        mapping = file_inode(vma->vm_file)->i_mapping;
2774
2775        /*
2776         * Take the mapping lock for the duration of the table walk. As
2777         * this mapping should be shared between all the VMAs,
2778         * __unmap_hugepage_range() is called as the lock is already held
2779         */
2780        i_mmap_lock_write(mapping);
2781        vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
2782                /* Do not unmap the current VMA */
2783                if (iter_vma == vma)
2784                        continue;
2785
2786                /*
2787                 * Unmap the page from other VMAs without their own reserves.
2788                 * They get marked to be SIGKILLed if they fault in these
2789                 * areas. This is because a future no-page fault on this VMA
2790                 * could insert a zeroed page instead of the data existing
2791                 * from the time of fork. This would look like data corruption
2792                 */
2793                if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2794                        unmap_hugepage_range(iter_vma, address,
2795                                             address + huge_page_size(h), page);
2796        }
2797        i_mmap_unlock_write(mapping);
2798}
2799
2800/*
2801 * Hugetlb_cow() should be called with page lock of the original hugepage held.
2802 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2803 * cannot race with other handlers or page migration.
2804 * Keep the pte_same checks anyway to make transition from the mutex easier.
2805 */
2806static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2807                        unsigned long address, pte_t *ptep, pte_t pte,
2808                        struct page *pagecache_page, spinlock_t *ptl)
2809{
2810        struct hstate *h = hstate_vma(vma);
2811        struct page *old_page, *new_page;
2812        int ret = 0, outside_reserve = 0;
2813        unsigned long mmun_start;       /* For mmu_notifiers */
2814        unsigned long mmun_end;         /* For mmu_notifiers */
2815
2816        old_page = pte_page(pte);
2817
2818retry_avoidcopy:
2819        /* If no-one else is actually using this page, avoid the copy
2820         * and just make the page writable */
2821        if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
2822                page_move_anon_rmap(old_page, vma, address);
2823                set_huge_ptep_writable(vma, address, ptep);
2824                return 0;
2825        }
2826
2827        /*
2828         * If the process that created a MAP_PRIVATE mapping is about to
2829         * perform a COW due to a shared page count, attempt to satisfy
2830         * the allocation without using the existing reserves. The pagecache
2831         * page is used to determine if the reserve at this address was
2832         * consumed or not. If reserves were used, a partial faulted mapping
2833         * at the time of fork() could consume its reserves on COW instead
2834         * of the full address range.
2835         */
2836        if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2837                        old_page != pagecache_page)
2838                outside_reserve = 1;
2839
2840        page_cache_get(old_page);
2841
2842        /*
2843         * Drop page table lock as buddy allocator may be called. It will
2844         * be acquired again before returning to the caller, as expected.
2845         */
2846        spin_unlock(ptl);
2847        new_page = alloc_huge_page(vma, address, outside_reserve);
2848
2849        if (IS_ERR(new_page)) {
2850                /*
2851                 * If a process owning a MAP_PRIVATE mapping fails to COW,
2852                 * it is due to references held by a child and an insufficient
2853                 * huge page pool. To guarantee the original mappers
2854                 * reliability, unmap the page from child processes. The child
2855                 * may get SIGKILLed if it later faults.
2856                 */
2857                if (outside_reserve) {
2858                        page_cache_release(old_page);
2859                        BUG_ON(huge_pte_none(pte));
2860                        unmap_ref_private(mm, vma, old_page, address);
2861                        BUG_ON(huge_pte_none(pte));
2862                        spin_lock(ptl);
2863                        ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2864                        if (likely(ptep &&
2865                                   pte_same(huge_ptep_get(ptep), pte)))
2866                                goto retry_avoidcopy;
2867                        /*
2868                         * race occurs while re-acquiring page table
2869                         * lock, and our job is done.
2870                         */
2871                        return 0;
2872                }
2873
2874                ret = (PTR_ERR(new_page) == -ENOMEM) ?
2875                        VM_FAULT_OOM : VM_FAULT_SIGBUS;
2876                goto out_release_old;
2877        }
2878
2879        /*
2880         * When the original hugepage is shared one, it does not have
2881         * anon_vma prepared.
2882         */
2883        if (unlikely(anon_vma_prepare(vma))) {
2884                ret = VM_FAULT_OOM;
2885                goto out_release_all;
2886        }
2887
2888        copy_user_huge_page(new_page, old_page, address, vma,
2889                            pages_per_huge_page(h));
2890        __SetPageUptodate(new_page);
2891
2892        mmun_start = address & huge_page_mask(h);
2893        mmun_end = mmun_start + huge_page_size(h);
2894        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2895
2896        /*
2897         * Retake the page table lock to check for racing updates
2898         * before the page tables are altered
2899         */
2900        spin_lock(ptl);
2901        ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2902        if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
2903                ClearPagePrivate(new_page);
2904
2905                /* Break COW */
2906                huge_ptep_clear_flush(vma, address, ptep);
2907                mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
2908                set_huge_pte_at(mm, address, ptep,
2909                                make_huge_pte(vma, new_page, 1));
2910                page_remove_rmap(old_page);
2911                hugepage_add_new_anon_rmap(new_page, vma, address);
2912                /* Make the old page be freed below */
2913                new_page = old_page;
2914        }
2915        spin_unlock(ptl);
2916        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2917out_release_all:
2918        page_cache_release(new_page);
2919out_release_old:
2920        page_cache_release(old_page);
2921
2922        spin_lock(ptl); /* Caller expects lock to be held */
2923        return ret;
2924}
2925
2926/* Return the pagecache page at a given address within a VMA */
2927static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2928                        struct vm_area_struct *vma, unsigned long address)
2929{
2930        struct address_space *mapping;
2931        pgoff_t idx;
2932
2933        mapping = vma->vm_file->f_mapping;
2934        idx = vma_hugecache_offset(h, vma, address);
2935
2936        return find_lock_page(mapping, idx);
2937}
2938
2939/*
2940 * Return whether there is a pagecache page to back given address within VMA.
2941 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2942 */
2943static bool hugetlbfs_pagecache_present(struct hstate *h,
2944                        struct vm_area_struct *vma, unsigned long address)
2945{
2946        struct address_space *mapping;
2947        pgoff_t idx;
2948        struct page *page;
2949
2950        mapping = vma->vm_file->f_mapping;
2951        idx = vma_hugecache_offset(h, vma, address);
2952
2953        page = find_get_page(mapping, idx);
2954        if (page)
2955                put_page(page);
2956        return page != NULL;
2957}
2958
2959static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2960                           struct address_space *mapping, pgoff_t idx,
2961                           unsigned long address, pte_t *ptep, unsigned int flags)
2962{
2963        struct hstate *h = hstate_vma(vma);
2964        int ret = VM_FAULT_SIGBUS;
2965        int anon_rmap = 0;
2966        unsigned long size;
2967        struct page *page;
2968        pte_t new_pte;
2969        spinlock_t *ptl;
2970
2971        /*
2972         * Currently, we are forced to kill the process in the event the
2973         * original mapper has unmapped pages from the child due to a failed
2974         * COW. Warn that such a situation has occurred as it may not be obvious
2975         */
2976        if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2977                pr_warning("PID %d killed due to inadequate hugepage pool\n",
2978                           current->pid);
2979                return ret;
2980        }
2981
2982        /*
2983         * Use page lock to guard against racing truncation
2984         * before we get page_table_lock.
2985         */
2986retry:
2987        page = find_lock_page(mapping, idx);
2988        if (!page) {
2989                size = i_size_read(mapping->host) >> huge_page_shift(h);
2990                if (idx >= size)
2991                        goto out;
2992                page = alloc_huge_page(vma, address, 0);
2993                if (IS_ERR(page)) {
2994                        ret = PTR_ERR(page);
2995                        if (ret == -ENOMEM)
2996                                ret = VM_FAULT_OOM;
2997                        else
2998                                ret = VM_FAULT_SIGBUS;
2999                        goto out;
3000                }
3001                clear_huge_page(page, address, pages_per_huge_page(h));
3002                __SetPageUptodate(page);
3003
3004                if (vma->vm_flags & VM_MAYSHARE) {
3005                        int err;
3006                        struct inode *inode = mapping->host;
3007
3008                        err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3009                        if (err) {
3010                                put_page(page);
3011                                if (err == -EEXIST)
3012                                        goto retry;
3013                                goto out;
3014                        }
3015                        ClearPagePrivate(page);
3016
3017                        spin_lock(&inode->i_lock);
3018                        inode->i_blocks += blocks_per_huge_page(h);
3019                        spin_unlock(&inode->i_lock);
3020                } else {
3021                        lock_page(page);
3022                        if (unlikely(anon_vma_prepare(vma))) {
3023                                ret = VM_FAULT_OOM;
3024                                goto backout_unlocked;
3025                        }
3026                        anon_rmap = 1;
3027                }
3028        } else {
3029                /*
3030                 * If memory error occurs between mmap() and fault, some process
3031                 * don't have hwpoisoned swap entry for errored virtual address.
3032                 * So we need to block hugepage fault by PG_hwpoison bit check.
3033                 */
3034                if (unlikely(PageHWPoison(page))) {
3035                        ret = VM_FAULT_HWPOISON |
3036                                VM_FAULT_SET_HINDEX(hstate_index(h));
3037                        goto backout_unlocked;
3038                }
3039        }
3040
3041        /*
3042         * If we are going to COW a private mapping later, we examine the
3043         * pending reservations for this page now. This will ensure that
3044         * any allocations necessary to record that reservation occur outside
3045         * the spinlock.
3046         */
3047        if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
3048                if (vma_needs_reservation(h, vma, address) < 0) {
3049                        ret = VM_FAULT_OOM;
3050                        goto backout_unlocked;
3051                }
3052
3053        ptl = huge_pte_lockptr(h, mm, ptep);
3054        spin_lock(ptl);
3055        size = i_size_read(mapping->host) >> huge_page_shift(h);
3056        if (idx >= size)
3057                goto backout;
3058
3059        ret = 0;
3060        if (!huge_pte_none(huge_ptep_get(ptep)))
3061                goto backout;
3062
3063        if (anon_rmap) {
3064                ClearPagePrivate(page);
3065                hugepage_add_new_anon_rmap(page, vma, address);
3066        } else
3067                page_dup_rmap(page);
3068        new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3069                                && (vma->vm_flags & VM_SHARED)));
3070        set_huge_pte_at(mm, address, ptep, new_pte);
3071
3072        if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3073                /* Optimization, do the COW without a second fault */
3074                ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3075        }
3076
3077        spin_unlock(ptl);
3078        unlock_page(page);
3079out:
3080        return ret;
3081
3082backout:
3083        spin_unlock(ptl);
3084backout_unlocked:
3085        unlock_page(page);
3086        put_page(page);
3087        goto out;
3088}
3089
3090#ifdef CONFIG_SMP
3091static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3092                            struct vm_area_struct *vma,
3093                            struct address_space *mapping,
3094                            pgoff_t idx, unsigned long address)
3095{
3096        unsigned long key[2];
3097        u32 hash;
3098
3099        if (vma->vm_flags & VM_SHARED) {
3100                key[0] = (unsigned long) mapping;
3101                key[1] = idx;
3102        } else {
3103                key[0] = (unsigned long) mm;
3104                key[1] = address >> huge_page_shift(h);
3105        }
3106
3107        hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3108
3109        return hash & (num_fault_mutexes - 1);
3110}
3111#else
3112/*
3113 * For uniprocesor systems we always use a single mutex, so just
3114 * return 0 and avoid the hashing overhead.
3115 */
3116static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3117                            struct vm_area_struct *vma,
3118                            struct address_space *mapping,
3119                            pgoff_t idx, unsigned long address)
3120{
3121        return 0;
3122}
3123#endif
3124
3125int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3126                        unsigned long address, unsigned int flags)
3127{
3128        pte_t *ptep, entry;
3129        spinlock_t *ptl;
3130        int ret;
3131        u32 hash;
3132        pgoff_t idx;
3133        struct page *page = NULL;
3134        struct page *pagecache_page = NULL;
3135        struct hstate *h = hstate_vma(vma);
3136        struct address_space *mapping;
3137
3138        address &= huge_page_mask(h);
3139
3140        ptep = huge_pte_offset(mm, address);
3141        if (ptep) {
3142                entry = huge_ptep_get(ptep);
3143                if (unlikely(is_hugetlb_entry_migration(entry))) {
3144                        migration_entry_wait_huge(vma, mm, ptep);
3145                        return 0;
3146                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3147                        return VM_FAULT_HWPOISON_LARGE |
3148                                VM_FAULT_SET_HINDEX(hstate_index(h));
3149        }
3150
3151        ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3152        if (!ptep)
3153                return VM_FAULT_OOM;
3154
3155        mapping = vma->vm_file->f_mapping;
3156        idx = vma_hugecache_offset(h, vma, address);
3157
3158        /*
3159         * Serialize hugepage allocation and instantiation, so that we don't
3160         * get spurious allocation failures if two CPUs race to instantiate
3161         * the same page in the page cache.
3162         */
3163        hash = fault_mutex_hash(h, mm, vma, mapping, idx, address);
3164        mutex_lock(&htlb_fault_mutex_table[hash]);
3165
3166        entry = huge_ptep_get(ptep);
3167        if (huge_pte_none(entry)) {
3168                ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3169                goto out_mutex;
3170        }
3171
3172        ret = 0;
3173
3174        /*
3175         * If we are going to COW the mapping later, we examine the pending
3176         * reservations for this page now. This will ensure that any
3177         * allocations necessary to record that reservation occur outside the
3178         * spinlock. For private mappings, we also lookup the pagecache
3179         * page now as it is used to determine if a reservation has been
3180         * consumed.
3181         */
3182        if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3183                if (vma_needs_reservation(h, vma, address) < 0) {
3184                        ret = VM_FAULT_OOM;
3185                        goto out_mutex;
3186                }
3187
3188                if (!(vma->vm_flags & VM_MAYSHARE))
3189                        pagecache_page = hugetlbfs_pagecache_page(h,
3190                                                                vma, address);
3191        }
3192
3193        /*
3194         * hugetlb_cow() requires page locks of pte_page(entry) and
3195         * pagecache_page, so here we need take the former one
3196         * when page != pagecache_page or !pagecache_page.
3197         * Note that locking order is always pagecache_page -> page,
3198         * so no worry about deadlock.
3199         */
3200        page = pte_page(entry);
3201        get_page(page);
3202        if (page != pagecache_page)
3203                lock_page(page);
3204
3205        ptl = huge_pte_lockptr(h, mm, ptep);
3206        spin_lock(ptl);
3207        /* Check for a racing update before calling hugetlb_cow */
3208        if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3209                goto out_ptl;
3210
3211
3212        if (flags & FAULT_FLAG_WRITE) {
3213                if (!huge_pte_write(entry)) {
3214                        ret = hugetlb_cow(mm, vma, address, ptep, entry,
3215                                        pagecache_page, ptl);
3216                        goto out_ptl;
3217                }
3218                entry = huge_pte_mkdirty(entry);
3219        }
3220        entry = pte_mkyoung(entry);
3221        if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3222                                                flags & FAULT_FLAG_WRITE))
3223                update_mmu_cache(vma, address, ptep);
3224
3225out_ptl:
3226        spin_unlock(ptl);
3227
3228        if (pagecache_page) {
3229                unlock_page(pagecache_page);
3230                put_page(pagecache_page);
3231        }
3232        if (page != pagecache_page)
3233                unlock_page(page);
3234        put_page(page);
3235
3236out_mutex:
3237        mutex_unlock(&htlb_fault_mutex_table[hash]);
3238        return ret;
3239}
3240
3241long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3242                         struct page **pages, struct vm_area_struct **vmas,
3243                         unsigned long *position, unsigned long *nr_pages,
3244                         long i, unsigned int flags)
3245{
3246        unsigned long pfn_offset;
3247        unsigned long vaddr = *position;
3248        unsigned long remainder = *nr_pages;
3249        struct hstate *h = hstate_vma(vma);
3250
3251        while (vaddr < vma->vm_end && remainder) {
3252                pte_t *pte;
3253                spinlock_t *ptl = NULL;
3254                int absent;
3255                struct page *page;
3256
3257                /*
3258                 * Some archs (sparc64, sh*) have multiple pte_ts to
3259                 * each hugepage.  We have to make sure we get the
3260                 * first, for the page indexing below to work.
3261                 *
3262                 * Note that page table lock is not held when pte is null.
3263                 */
3264                pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3265                if (pte)
3266                        ptl = huge_pte_lock(h, mm, pte);
3267                absent = !pte || huge_pte_none(huge_ptep_get(pte));
3268
3269                /*
3270                 * When coredumping, it suits get_dump_page if we just return
3271                 * an error where there's an empty slot with no huge pagecache
3272                 * to back it.  This way, we avoid allocating a hugepage, and
3273                 * the sparse dumpfile avoids allocating disk blocks, but its
3274                 * huge holes still show up with zeroes where they need to be.
3275                 */
3276                if (absent && (flags & FOLL_DUMP) &&
3277                    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3278                        if (pte)
3279                                spin_unlock(ptl);
3280                        remainder = 0;
3281                        break;
3282                }
3283
3284                /*
3285                 * We need call hugetlb_fault for both hugepages under migration
3286                 * (in which case hugetlb_fault waits for the migration,) and
3287                 * hwpoisoned hugepages (in which case we need to prevent the
3288                 * caller from accessing to them.) In order to do this, we use
3289                 * here is_swap_pte instead of is_hugetlb_entry_migration and
3290                 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3291                 * both cases, and because we can't follow correct pages
3292                 * directly from any kind of swap entries.
3293                 */
3294                if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3295                    ((flags & FOLL_WRITE) &&
3296                      !huge_pte_write(huge_ptep_get(pte)))) {
3297                        int ret;
3298
3299                        if (pte)
3300                                spin_unlock(ptl);
3301                        ret = hugetlb_fault(mm, vma, vaddr,
3302                                (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3303                        if (!(ret & VM_FAULT_ERROR))
3304                                continue;
3305
3306                        remainder = 0;
3307                        break;
3308                }
3309
3310                pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3311                page = pte_page(huge_ptep_get(pte));
3312same_page:
3313                if (pages) {
3314                        pages[i] = mem_map_offset(page, pfn_offset);
3315                        get_page_foll(pages[i]);
3316                }
3317
3318                if (vmas)
3319                        vmas[i] = vma;
3320
3321                vaddr += PAGE_SIZE;
3322                ++pfn_offset;
3323                --remainder;
3324                ++i;
3325                if (vaddr < vma->vm_end && remainder &&
3326                                pfn_offset < pages_per_huge_page(h)) {
3327                        /*
3328                         * We use pfn_offset to avoid touching the pageframes
3329                         * of this compound page.
3330                         */
3331                        goto same_page;
3332                }
3333                spin_unlock(ptl);
3334        }
3335        *nr_pages = remainder;
3336        *position = vaddr;
3337
3338        return i ? i : -EFAULT;
3339}
3340
3341unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3342                unsigned long address, unsigned long end, pgprot_t newprot)
3343{
3344        struct mm_struct *mm = vma->vm_mm;
3345        unsigned long start = address;
3346        pte_t *ptep;
3347        pte_t pte;
3348        struct hstate *h = hstate_vma(vma);
3349        unsigned long pages = 0;
3350
3351        BUG_ON(address >= end);
3352        flush_cache_range(vma, address, end);
3353
3354        mmu_notifier_invalidate_range_start(mm, start, end);
3355        i_mmap_lock_write(vma->vm_file->f_mapping);
3356        for (; address < end; address += huge_page_size(h)) {
3357                spinlock_t *ptl;
3358                ptep = huge_pte_offset(mm, address);
3359                if (!ptep)
3360                        continue;
3361                ptl = huge_pte_lock(h, mm, ptep);
3362                if (huge_pmd_unshare(mm, &address, ptep)) {
3363                        pages++;
3364                        spin_unlock(ptl);
3365                        continue;
3366                }
3367                if (!huge_pte_none(huge_ptep_get(ptep))) {
3368                        pte = huge_ptep_get_and_clear(mm, address, ptep);
3369                        pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3370                        pte = arch_make_huge_pte(pte, vma, NULL, 0);
3371                        set_huge_pte_at(mm, address, ptep, pte);
3372                        pages++;
3373                }
3374                spin_unlock(ptl);
3375        }
3376        /*
3377         * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
3378         * may have cleared our pud entry and done put_page on the page table:
3379         * once we release i_mmap_rwsem, another task can do the final put_page
3380         * and that page table be reused and filled with junk.
3381         */
3382        flush_tlb_range(vma, start, end);
3383        mmu_notifier_invalidate_range(mm, start, end);
3384        i_mmap_unlock_write(vma->vm_file->f_mapping);
3385        mmu_notifier_invalidate_range_end(mm, start, end);
3386
3387        return pages << h->order;
3388}
3389
3390int hugetlb_reserve_pages(struct inode *inode,
3391                                        long from, long to,
3392                                        struct vm_area_struct *vma,
3393                                        vm_flags_t vm_flags)
3394{
3395        long ret, chg;
3396        struct hstate *h = hstate_inode(inode);
3397        struct hugepage_subpool *spool = subpool_inode(inode);
3398        struct resv_map *resv_map;
3399
3400        /*
3401         * Only apply hugepage reservation if asked. At fault time, an
3402         * attempt will be made for VM_NORESERVE to allocate a page
3403         * without using reserves
3404         */
3405        if (vm_flags & VM_NORESERVE)
3406                return 0;
3407
3408        /*
3409         * Shared mappings base their reservation on the number of pages that
3410         * are already allocated on behalf of the file. Private mappings need
3411         * to reserve the full area even if read-only as mprotect() may be
3412         * called to make the mapping read-write. Assume !vma is a shm mapping
3413         */
3414        if (!vma || vma->vm_flags & VM_MAYSHARE) {
3415                resv_map = inode_resv_map(inode);
3416
3417                chg = region_chg(resv_map, from, to);
3418
3419        } else {
3420                resv_map = resv_map_alloc();
3421                if (!resv_map)
3422                        return -ENOMEM;
3423
3424                chg = to - from;
3425
3426                set_vma_resv_map(vma, resv_map);
3427                set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3428        }
3429
3430        if (chg < 0) {
3431                ret = chg;
3432                goto out_err;
3433        }
3434
3435        /* There must be enough pages in the subpool for the mapping */
3436        if (hugepage_subpool_get_pages(spool, chg)) {
3437                ret = -ENOSPC;
3438                goto out_err;
3439        }
3440
3441        /*
3442         * Check enough hugepages are available for the reservation.
3443         * Hand the pages back to the subpool if there are not
3444         */
3445        ret = hugetlb_acct_memory(h, chg);
3446        if (ret < 0) {
3447                hugepage_subpool_put_pages(spool, chg);
3448                goto out_err;
3449        }
3450
3451        /*
3452         * Account for the reservations made. Shared mappings record regions
3453         * that have reservations as they are shared by multiple VMAs.
3454         * When the last VMA disappears, the region map says how much
3455         * the reservation was and the page cache tells how much of
3456         * the reservation was consumed. Private mappings are per-VMA and
3457         * only the consumed reservations are tracked. When the VMA
3458         * disappears, the original reservation is the VMA size and the
3459         * consumed reservations are stored in the map. Hence, nothing
3460         * else has to be done for private mappings here
3461         */
3462        if (!vma || vma->vm_flags & VM_MAYSHARE)
3463                region_add(resv_map, from, to);
3464        return 0;
3465out_err:
3466        if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3467                kref_put(&resv_map->refs, resv_map_release);
3468        return ret;
3469}
3470
3471void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3472{
3473        struct hstate *h = hstate_inode(inode);
3474        struct resv_map *resv_map = inode_resv_map(inode);
3475        long chg = 0;
3476        struct hugepage_subpool *spool = subpool_inode(inode);
3477
3478        if (resv_map)
3479                chg = region_truncate(resv_map, offset);
3480        spin_lock(&inode->i_lock);
3481        inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3482        spin_unlock(&inode->i_lock);
3483
3484        hugepage_subpool_put_pages(spool, (chg - freed));
3485        hugetlb_acct_memory(h, -(chg - freed));
3486}
3487
3488#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
3489static unsigned long page_table_shareable(struct vm_area_struct *svma,
3490                                struct vm_area_struct *vma,
3491                                unsigned long addr, pgoff_t idx)
3492{
3493        unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
3494                                svma->vm_start;
3495        unsigned long sbase = saddr & PUD_MASK;
3496        unsigned long s_end = sbase + PUD_SIZE;
3497
3498        /* Allow segments to share if only one is marked locked */
3499        unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
3500        unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
3501
3502        /*
3503         * match the virtual addresses, permission and the alignment of the
3504         * page table page.
3505         */
3506        if (pmd_index(addr) != pmd_index(saddr) ||
3507            vm_flags != svm_flags ||
3508            sbase < svma->vm_start || svma->vm_end < s_end)
3509                return 0;
3510
3511        return saddr;
3512}
3513
3514static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
3515{
3516        unsigned long base = addr & PUD_MASK;
3517        unsigned long end = base + PUD_SIZE;
3518
3519        /*
3520         * check on proper vm_flags and page table alignment
3521         */
3522        if (vma->vm_flags & VM_MAYSHARE &&
3523            vma->vm_start <= base && end <= vma->vm_end)
3524                return 1;
3525        return 0;
3526}
3527
3528/*
3529 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
3530 * and returns the corresponding pte. While this is not necessary for the
3531 * !shared pmd case because we can allocate the pmd later as well, it makes the
3532 * code much cleaner. pmd allocation is essential for the shared case because
3533 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
3534 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
3535 * bad pmd for sharing.
3536 */
3537pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3538{
3539        struct vm_area_struct *vma = find_vma(mm, addr);
3540        struct address_space *mapping = vma->vm_file->f_mapping;
3541        pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
3542                        vma->vm_pgoff;
3543        struct vm_area_struct *svma;
3544        unsigned long saddr;
3545        pte_t *spte = NULL;
3546        pte_t *pte;
3547        spinlock_t *ptl;
3548
3549        if (!vma_shareable(vma, addr))
3550                return (pte_t *)pmd_alloc(mm, pud, addr);
3551
3552        i_mmap_lock_write(mapping);
3553        vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
3554                if (svma == vma)
3555                        continue;
3556
3557                saddr = page_table_shareable(svma, vma, addr, idx);
3558                if (saddr) {
3559                        spte = huge_pte_offset(svma->vm_mm, saddr);
3560                        if (spte) {
3561                                get_page(virt_to_page(spte));
3562                                break;
3563                        }
3564                }
3565        }
3566
3567        if (!spte)
3568                goto out;
3569
3570        ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
3571        spin_lock(ptl);
3572        if (pud_none(*pud))
3573                pud_populate(mm, pud,
3574                                (pmd_t *)((unsigned long)spte & PAGE_MASK));
3575        else
3576                put_page(virt_to_page(spte));
3577        spin_unlock(ptl);
3578out:
3579        pte = (pte_t *)pmd_alloc(mm, pud, addr);
3580        i_mmap_unlock_write(mapping);
3581        return pte;
3582}
3583
3584/*
3585 * unmap huge page backed by shared pte.
3586 *
3587 * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
3588 * indicated by page_count > 1, unmap is achieved by clearing pud and
3589 * decrementing the ref count. If count == 1, the pte page is not shared.
3590 *
3591 * called with page table lock held.
3592 *
3593 * returns: 1 successfully unmapped a shared pte page
3594 *          0 the underlying pte page is not shared, or it is the last user
3595 */
3596int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
3597{
3598        pgd_t *pgd = pgd_offset(mm, *addr);
3599        pud_t *pud = pud_offset(pgd, *addr);
3600
3601        BUG_ON(page_count(virt_to_page(ptep)) == 0);
3602        if (page_count(virt_to_page(ptep)) == 1)
3603                return 0;
3604
3605        pud_clear(pud);
3606        put_page(virt_to_page(ptep));
3607        *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
3608        return 1;
3609}
3610#define want_pmd_share()        (1)
3611#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3612pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3613{
3614        return NULL;
3615}
3616#define want_pmd_share()        (0)
3617#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3618
3619#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
3620pte_t *huge_pte_alloc(struct mm_struct *mm,
3621                        unsigned long addr, unsigned long sz)
3622{
3623        pgd_t *pgd;
3624        pud_t *pud;
3625        pte_t *pte = NULL;
3626
3627        pgd = pgd_offset(mm, addr);
3628        pud = pud_alloc(mm, pgd, addr);
3629        if (pud) {
3630                if (sz == PUD_SIZE) {
3631                        pte = (pte_t *)pud;
3632                } else {
3633                        BUG_ON(sz != PMD_SIZE);
3634                        if (want_pmd_share() && pud_none(*pud))
3635                                pte = huge_pmd_share(mm, addr, pud);
3636                        else
3637                                pte = (pte_t *)pmd_alloc(mm, pud, addr);
3638                }
3639        }
3640        BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
3641
3642        return pte;
3643}
3644
3645pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
3646{
3647        pgd_t *pgd;
3648        pud_t *pud;
3649        pmd_t *pmd = NULL;
3650
3651        pgd = pgd_offset(mm, addr);
3652        if (pgd_present(*pgd)) {
3653                pud = pud_offset(pgd, addr);
3654                if (pud_present(*pud)) {
3655                        if (pud_huge(*pud))
3656                                return (pte_t *)pud;
3657                        pmd = pmd_offset(pud, addr);
3658                }
3659        }
3660        return (pte_t *) pmd;
3661}
3662
3663struct page *
3664follow_huge_pmd(struct mm_struct *mm, unsigned long address,
3665                pmd_t *pmd, int write)
3666{
3667        struct page *page;
3668
3669        page = pte_page(*(pte_t *)pmd);
3670        if (page)
3671                page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
3672        return page;
3673}
3674
3675struct page *
3676follow_huge_pud(struct mm_struct *mm, unsigned long address,
3677                pud_t *pud, int write)
3678{
3679        struct page *page;
3680
3681        page = pte_page(*(pte_t *)pud);
3682        if (page)
3683                page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
3684        return page;
3685}
3686
3687#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
3688
3689/* Can be overriden by architectures */
3690struct page * __weak
3691follow_huge_pud(struct mm_struct *mm, unsigned long address,
3692               pud_t *pud, int write)
3693{
3694        BUG();
3695        return NULL;
3696}
3697
3698#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
3699
3700#ifdef CONFIG_MEMORY_FAILURE
3701
3702/* Should be called in hugetlb_lock */
3703static int is_hugepage_on_freelist(struct page *hpage)
3704{
3705        struct page *page;
3706        struct page *tmp;
3707        struct hstate *h = page_hstate(hpage);
3708        int nid = page_to_nid(hpage);
3709
3710        list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
3711                if (page == hpage)
3712                        return 1;
3713        return 0;
3714}
3715
3716/*
3717 * This function is called from memory failure code.
3718 * Assume the caller holds page lock of the head page.
3719 */
3720int dequeue_hwpoisoned_huge_page(struct page *hpage)
3721{
3722        struct hstate *h = page_hstate(hpage);
3723        int nid = page_to_nid(hpage);
3724        int ret = -EBUSY;
3725
3726        spin_lock(&hugetlb_lock);
3727        if (is_hugepage_on_freelist(hpage)) {
3728                /*
3729                 * Hwpoisoned hugepage isn't linked to activelist or freelist,
3730                 * but dangling hpage->lru can trigger list-debug warnings
3731                 * (this happens when we call unpoison_memory() on it),
3732                 * so let it point to itself with list_del_init().
3733                 */
3734                list_del_init(&hpage->lru);
3735                set_page_refcounted(hpage);
3736                h->free_huge_pages--;
3737                h->free_huge_pages_node[nid]--;
3738                ret = 0;
3739        }
3740        spin_unlock(&hugetlb_lock);
3741        return ret;
3742}
3743#endif
3744
3745bool isolate_huge_page(struct page *page, struct list_head *list)
3746{
3747        VM_BUG_ON_PAGE(!PageHead(page), page);
3748        if (!get_page_unless_zero(page))
3749                return false;
3750        spin_lock(&hugetlb_lock);
3751        list_move_tail(&page->lru, list);
3752        spin_unlock(&hugetlb_lock);
3753        return true;
3754}
3755
3756void putback_active_hugepage(struct page *page)
3757{
3758        VM_BUG_ON_PAGE(!PageHead(page), page);
3759        spin_lock(&hugetlb_lock);
3760        list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
3761        spin_unlock(&hugetlb_lock);
3762        put_page(page);
3763}
3764
3765bool is_hugepage_active(struct page *page)
3766{
3767        VM_BUG_ON_PAGE(!PageHuge(page), page);
3768        /*
3769         * This function can be called for a tail page because the caller,
3770         * scan_movable_pages, scans through a given pfn-range which typically
3771         * covers one memory block. In systems using gigantic hugepage (1GB
3772         * for x86_64,) a hugepage is larger than a memory block, and we don't
3773         * support migrating such large hugepages for now, so return false
3774         * when called for tail pages.
3775         */
3776        if (PageTail(page))
3777                return false;
3778        /*
3779         * Refcount of a hwpoisoned hugepages is 1, but they are not active,
3780         * so we should return false for them.
3781         */
3782        if (unlikely(PageHWPoison(page)))
3783                return false;
3784        return page_count(page) > 0;
3785}
3786