linux/mm/hugetlb.c
<<
>>
Prefs
   1/*
   2 * Generic hugetlb support.
   3 * (C) Nadia Yvette Chambers, April 2004
   4 */
   5#include <linux/list.h>
   6#include <linux/init.h>
   7#include <linux/mm.h>
   8#include <linux/seq_file.h>
   9#include <linux/sysctl.h>
  10#include <linux/highmem.h>
  11#include <linux/mmu_notifier.h>
  12#include <linux/nodemask.h>
  13#include <linux/pagemap.h>
  14#include <linux/mempolicy.h>
  15#include <linux/compiler.h>
  16#include <linux/cpuset.h>
  17#include <linux/mutex.h>
  18#include <linux/bootmem.h>
  19#include <linux/sysfs.h>
  20#include <linux/slab.h>
  21#include <linux/rmap.h>
  22#include <linux/swap.h>
  23#include <linux/swapops.h>
  24#include <linux/page-isolation.h>
  25#include <linux/jhash.h>
  26
  27#include <asm/page.h>
  28#include <asm/pgtable.h>
  29#include <asm/tlb.h>
  30
  31#include <linux/io.h>
  32#include <linux/hugetlb.h>
  33#include <linux/hugetlb_cgroup.h>
  34#include <linux/node.h>
  35#include "internal.h"
  36
  37int hugepages_treat_as_movable;
  38
  39int hugetlb_max_hstate __read_mostly;
  40unsigned int default_hstate_idx;
  41struct hstate hstates[HUGE_MAX_HSTATE];
  42/*
  43 * Minimum page order among possible hugepage sizes, set to a proper value
  44 * at boot time.
  45 */
  46static unsigned int minimum_order __read_mostly = UINT_MAX;
  47
  48__initdata LIST_HEAD(huge_boot_pages);
  49
  50/* for command line parsing */
  51static struct hstate * __initdata parsed_hstate;
  52static unsigned long __initdata default_hstate_max_huge_pages;
  53static unsigned long __initdata default_hstate_size;
  54static bool __initdata parsed_valid_hugepagesz = true;
  55
  56/*
  57 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
  58 * free_huge_pages, and surplus_huge_pages.
  59 */
  60DEFINE_SPINLOCK(hugetlb_lock);
  61
  62/*
  63 * Serializes faults on the same logical page.  This is used to
  64 * prevent spurious OOMs when the hugepage pool is fully utilized.
  65 */
  66static int num_fault_mutexes;
  67struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
  68
  69/* Forward declaration */
  70static int hugetlb_acct_memory(struct hstate *h, long delta);
  71
  72static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
  73{
  74        bool free = (spool->count == 0) && (spool->used_hpages == 0);
  75
  76        spin_unlock(&spool->lock);
  77
  78        /* If no pages are used, and no other handles to the subpool
  79         * remain, give up any reservations mased on minimum size and
  80         * free the subpool */
  81        if (free) {
  82                if (spool->min_hpages != -1)
  83                        hugetlb_acct_memory(spool->hstate,
  84                                                -spool->min_hpages);
  85                kfree(spool);
  86        }
  87}
  88
  89struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
  90                                                long min_hpages)
  91{
  92        struct hugepage_subpool *spool;
  93
  94        spool = kzalloc(sizeof(*spool), GFP_KERNEL);
  95        if (!spool)
  96                return NULL;
  97
  98        spin_lock_init(&spool->lock);
  99        spool->count = 1;
 100        spool->max_hpages = max_hpages;
 101        spool->hstate = h;
 102        spool->min_hpages = min_hpages;
 103
 104        if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
 105                kfree(spool);
 106                return NULL;
 107        }
 108        spool->rsv_hpages = min_hpages;
 109
 110        return spool;
 111}
 112
 113void hugepage_put_subpool(struct hugepage_subpool *spool)
 114{
 115        spin_lock(&spool->lock);
 116        BUG_ON(!spool->count);
 117        spool->count--;
 118        unlock_or_release_subpool(spool);
 119}
 120
 121/*
 122 * Subpool accounting for allocating and reserving pages.
 123 * Return -ENOMEM if there are not enough resources to satisfy the
 124 * the request.  Otherwise, return the number of pages by which the
 125 * global pools must be adjusted (upward).  The returned value may
 126 * only be different than the passed value (delta) in the case where
 127 * a subpool minimum size must be manitained.
 128 */
 129static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
 130                                      long delta)
 131{
 132        long ret = delta;
 133
 134        if (!spool)
 135                return ret;
 136
 137        spin_lock(&spool->lock);
 138
 139        if (spool->max_hpages != -1) {          /* maximum size accounting */
 140                if ((spool->used_hpages + delta) <= spool->max_hpages)
 141                        spool->used_hpages += delta;
 142                else {
 143                        ret = -ENOMEM;
 144                        goto unlock_ret;
 145                }
 146        }
 147
 148        /* minimum size accounting */
 149        if (spool->min_hpages != -1 && spool->rsv_hpages) {
 150                if (delta > spool->rsv_hpages) {
 151                        /*
 152                         * Asking for more reserves than those already taken on
 153                         * behalf of subpool.  Return difference.
 154                         */
 155                        ret = delta - spool->rsv_hpages;
 156                        spool->rsv_hpages = 0;
 157                } else {
 158                        ret = 0;        /* reserves already accounted for */
 159                        spool->rsv_hpages -= delta;
 160                }
 161        }
 162
 163unlock_ret:
 164        spin_unlock(&spool->lock);
 165        return ret;
 166}
 167
 168/*
 169 * Subpool accounting for freeing and unreserving pages.
 170 * Return the number of global page reservations that must be dropped.
 171 * The return value may only be different than the passed value (delta)
 172 * in the case where a subpool minimum size must be maintained.
 173 */
 174static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
 175                                       long delta)
 176{
 177        long ret = delta;
 178
 179        if (!spool)
 180                return delta;
 181
 182        spin_lock(&spool->lock);
 183
 184        if (spool->max_hpages != -1)            /* maximum size accounting */
 185                spool->used_hpages -= delta;
 186
 187         /* minimum size accounting */
 188        if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
 189                if (spool->rsv_hpages + delta <= spool->min_hpages)
 190                        ret = 0;
 191                else
 192                        ret = spool->rsv_hpages + delta - spool->min_hpages;
 193
 194                spool->rsv_hpages += delta;
 195                if (spool->rsv_hpages > spool->min_hpages)
 196                        spool->rsv_hpages = spool->min_hpages;
 197        }
 198
 199        /*
 200         * If hugetlbfs_put_super couldn't free spool due to an outstanding
 201         * quota reference, free it now.
 202         */
 203        unlock_or_release_subpool(spool);
 204
 205        return ret;
 206}
 207
 208static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
 209{
 210        return HUGETLBFS_SB(inode->i_sb)->spool;
 211}
 212
 213static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
 214{
 215        return subpool_inode(file_inode(vma->vm_file));
 216}
 217
 218/*
 219 * Region tracking -- allows tracking of reservations and instantiated pages
 220 *                    across the pages in a mapping.
 221 *
 222 * The region data structures are embedded into a resv_map and protected
 223 * by a resv_map's lock.  The set of regions within the resv_map represent
 224 * reservations for huge pages, or huge pages that have already been
 225 * instantiated within the map.  The from and to elements are huge page
 226 * indicies into the associated mapping.  from indicates the starting index
 227 * of the region.  to represents the first index past the end of  the region.
 228 *
 229 * For example, a file region structure with from == 0 and to == 4 represents
 230 * four huge pages in a mapping.  It is important to note that the to element
 231 * represents the first element past the end of the region. This is used in
 232 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
 233 *
 234 * Interval notation of the form [from, to) will be used to indicate that
 235 * the endpoint from is inclusive and to is exclusive.
 236 */
 237struct file_region {
 238        struct list_head link;
 239        long from;
 240        long to;
 241};
 242
 243/*
 244 * Add the huge page range represented by [f, t) to the reserve
 245 * map.  In the normal case, existing regions will be expanded
 246 * to accommodate the specified range.  Sufficient regions should
 247 * exist for expansion due to the previous call to region_chg
 248 * with the same range.  However, it is possible that region_del
 249 * could have been called after region_chg and modifed the map
 250 * in such a way that no region exists to be expanded.  In this
 251 * case, pull a region descriptor from the cache associated with
 252 * the map and use that for the new range.
 253 *
 254 * Return the number of new huge pages added to the map.  This
 255 * number is greater than or equal to zero.
 256 */
 257static long region_add(struct resv_map *resv, long f, long t)
 258{
 259        struct list_head *head = &resv->regions;
 260        struct file_region *rg, *nrg, *trg;
 261        long add = 0;
 262
 263        spin_lock(&resv->lock);
 264        /* Locate the region we are either in or before. */
 265        list_for_each_entry(rg, head, link)
 266                if (f <= rg->to)
 267                        break;
 268
 269        /*
 270         * If no region exists which can be expanded to include the
 271         * specified range, the list must have been modified by an
 272         * interleving call to region_del().  Pull a region descriptor
 273         * from the cache and use it for this range.
 274         */
 275        if (&rg->link == head || t < rg->from) {
 276                VM_BUG_ON(resv->region_cache_count <= 0);
 277
 278                resv->region_cache_count--;
 279                nrg = list_first_entry(&resv->region_cache, struct file_region,
 280                                        link);
 281                list_del(&nrg->link);
 282
 283                nrg->from = f;
 284                nrg->to = t;
 285                list_add(&nrg->link, rg->link.prev);
 286
 287                add += t - f;
 288                goto out_locked;
 289        }
 290
 291        /* Round our left edge to the current segment if it encloses us. */
 292        if (f > rg->from)
 293                f = rg->from;
 294
 295        /* Check for and consume any regions we now overlap with. */
 296        nrg = rg;
 297        list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
 298                if (&rg->link == head)
 299                        break;
 300                if (rg->from > t)
 301                        break;
 302
 303                /* If this area reaches higher then extend our area to
 304                 * include it completely.  If this is not the first area
 305                 * which we intend to reuse, free it. */
 306                if (rg->to > t)
 307                        t = rg->to;
 308                if (rg != nrg) {
 309                        /* Decrement return value by the deleted range.
 310                         * Another range will span this area so that by
 311                         * end of routine add will be >= zero
 312                         */
 313                        add -= (rg->to - rg->from);
 314                        list_del(&rg->link);
 315                        kfree(rg);
 316                }
 317        }
 318
 319        add += (nrg->from - f);         /* Added to beginning of region */
 320        nrg->from = f;
 321        add += t - nrg->to;             /* Added to end of region */
 322        nrg->to = t;
 323
 324out_locked:
 325        resv->adds_in_progress--;
 326        spin_unlock(&resv->lock);
 327        VM_BUG_ON(add < 0);
 328        return add;
 329}
 330
 331/*
 332 * Examine the existing reserve map and determine how many
 333 * huge pages in the specified range [f, t) are NOT currently
 334 * represented.  This routine is called before a subsequent
 335 * call to region_add that will actually modify the reserve
 336 * map to add the specified range [f, t).  region_chg does
 337 * not change the number of huge pages represented by the
 338 * map.  However, if the existing regions in the map can not
 339 * be expanded to represent the new range, a new file_region
 340 * structure is added to the map as a placeholder.  This is
 341 * so that the subsequent region_add call will have all the
 342 * regions it needs and will not fail.
 343 *
 344 * Upon entry, region_chg will also examine the cache of region descriptors
 345 * associated with the map.  If there are not enough descriptors cached, one
 346 * will be allocated for the in progress add operation.
 347 *
 348 * Returns the number of huge pages that need to be added to the existing
 349 * reservation map for the range [f, t).  This number is greater or equal to
 350 * zero.  -ENOMEM is returned if a new file_region structure or cache entry
 351 * is needed and can not be allocated.
 352 */
 353static long region_chg(struct resv_map *resv, long f, long t)
 354{
 355        struct list_head *head = &resv->regions;
 356        struct file_region *rg, *nrg = NULL;
 357        long chg = 0;
 358
 359retry:
 360        spin_lock(&resv->lock);
 361retry_locked:
 362        resv->adds_in_progress++;
 363
 364        /*
 365         * Check for sufficient descriptors in the cache to accommodate
 366         * the number of in progress add operations.
 367         */
 368        if (resv->adds_in_progress > resv->region_cache_count) {
 369                struct file_region *trg;
 370
 371                VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
 372                /* Must drop lock to allocate a new descriptor. */
 373                resv->adds_in_progress--;
 374                spin_unlock(&resv->lock);
 375
 376                trg = kmalloc(sizeof(*trg), GFP_KERNEL);
 377                if (!trg) {
 378                        kfree(nrg);
 379                        return -ENOMEM;
 380                }
 381
 382                spin_lock(&resv->lock);
 383                list_add(&trg->link, &resv->region_cache);
 384                resv->region_cache_count++;
 385                goto retry_locked;
 386        }
 387
 388        /* Locate the region we are before or in. */
 389        list_for_each_entry(rg, head, link)
 390                if (f <= rg->to)
 391                        break;
 392
 393        /* If we are below the current region then a new region is required.
 394         * Subtle, allocate a new region at the position but make it zero
 395         * size such that we can guarantee to record the reservation. */
 396        if (&rg->link == head || t < rg->from) {
 397                if (!nrg) {
 398                        resv->adds_in_progress--;
 399                        spin_unlock(&resv->lock);
 400                        nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
 401                        if (!nrg)
 402                                return -ENOMEM;
 403
 404                        nrg->from = f;
 405                        nrg->to   = f;
 406                        INIT_LIST_HEAD(&nrg->link);
 407                        goto retry;
 408                }
 409
 410                list_add(&nrg->link, rg->link.prev);
 411                chg = t - f;
 412                goto out_nrg;
 413        }
 414
 415        /* Round our left edge to the current segment if it encloses us. */
 416        if (f > rg->from)
 417                f = rg->from;
 418        chg = t - f;
 419
 420        /* Check for and consume any regions we now overlap with. */
 421        list_for_each_entry(rg, rg->link.prev, link) {
 422                if (&rg->link == head)
 423                        break;
 424                if (rg->from > t)
 425                        goto out;
 426
 427                /* We overlap with this area, if it extends further than
 428                 * us then we must extend ourselves.  Account for its
 429                 * existing reservation. */
 430                if (rg->to > t) {
 431                        chg += rg->to - t;
 432                        t = rg->to;
 433                }
 434                chg -= rg->to - rg->from;
 435        }
 436
 437out:
 438        spin_unlock(&resv->lock);
 439        /*  We already know we raced and no longer need the new region */
 440        kfree(nrg);
 441        return chg;
 442out_nrg:
 443        spin_unlock(&resv->lock);
 444        return chg;
 445}
 446
 447/*
 448 * Abort the in progress add operation.  The adds_in_progress field
 449 * of the resv_map keeps track of the operations in progress between
 450 * calls to region_chg and region_add.  Operations are sometimes
 451 * aborted after the call to region_chg.  In such cases, region_abort
 452 * is called to decrement the adds_in_progress counter.
 453 *
 454 * NOTE: The range arguments [f, t) are not needed or used in this
 455 * routine.  They are kept to make reading the calling code easier as
 456 * arguments will match the associated region_chg call.
 457 */
 458static void region_abort(struct resv_map *resv, long f, long t)
 459{
 460        spin_lock(&resv->lock);
 461        VM_BUG_ON(!resv->region_cache_count);
 462        resv->adds_in_progress--;
 463        spin_unlock(&resv->lock);
 464}
 465
 466/*
 467 * Delete the specified range [f, t) from the reserve map.  If the
 468 * t parameter is LONG_MAX, this indicates that ALL regions after f
 469 * should be deleted.  Locate the regions which intersect [f, t)
 470 * and either trim, delete or split the existing regions.
 471 *
 472 * Returns the number of huge pages deleted from the reserve map.
 473 * In the normal case, the return value is zero or more.  In the
 474 * case where a region must be split, a new region descriptor must
 475 * be allocated.  If the allocation fails, -ENOMEM will be returned.
 476 * NOTE: If the parameter t == LONG_MAX, then we will never split
 477 * a region and possibly return -ENOMEM.  Callers specifying
 478 * t == LONG_MAX do not need to check for -ENOMEM error.
 479 */
 480static long region_del(struct resv_map *resv, long f, long t)
 481{
 482        struct list_head *head = &resv->regions;
 483        struct file_region *rg, *trg;
 484        struct file_region *nrg = NULL;
 485        long del = 0;
 486
 487retry:
 488        spin_lock(&resv->lock);
 489        list_for_each_entry_safe(rg, trg, head, link) {
 490                /*
 491                 * Skip regions before the range to be deleted.  file_region
 492                 * ranges are normally of the form [from, to).  However, there
 493                 * may be a "placeholder" entry in the map which is of the form
 494                 * (from, to) with from == to.  Check for placeholder entries
 495                 * at the beginning of the range to be deleted.
 496                 */
 497                if (rg->to <= f && (rg->to != rg->from || rg->to != f))
 498                        continue;
 499
 500                if (rg->from >= t)
 501                        break;
 502
 503                if (f > rg->from && t < rg->to) { /* Must split region */
 504                        /*
 505                         * Check for an entry in the cache before dropping
 506                         * lock and attempting allocation.
 507                         */
 508                        if (!nrg &&
 509                            resv->region_cache_count > resv->adds_in_progress) {
 510                                nrg = list_first_entry(&resv->region_cache,
 511                                                        struct file_region,
 512                                                        link);
 513                                list_del(&nrg->link);
 514                                resv->region_cache_count--;
 515                        }
 516
 517                        if (!nrg) {
 518                                spin_unlock(&resv->lock);
 519                                nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
 520                                if (!nrg)
 521                                        return -ENOMEM;
 522                                goto retry;
 523                        }
 524
 525                        del += t - f;
 526
 527                        /* New entry for end of split region */
 528                        nrg->from = t;
 529                        nrg->to = rg->to;
 530                        INIT_LIST_HEAD(&nrg->link);
 531
 532                        /* Original entry is trimmed */
 533                        rg->to = f;
 534
 535                        list_add(&nrg->link, &rg->link);
 536                        nrg = NULL;
 537                        break;
 538                }
 539
 540                if (f <= rg->from && t >= rg->to) { /* Remove entire region */
 541                        del += rg->to - rg->from;
 542                        list_del(&rg->link);
 543                        kfree(rg);
 544                        continue;
 545                }
 546
 547                if (f <= rg->from) {    /* Trim beginning of region */
 548                        del += t - rg->from;
 549                        rg->from = t;
 550                } else {                /* Trim end of region */
 551                        del += rg->to - f;
 552                        rg->to = f;
 553                }
 554        }
 555
 556        spin_unlock(&resv->lock);
 557        kfree(nrg);
 558        return del;
 559}
 560
 561/*
 562 * A rare out of memory error was encountered which prevented removal of
 563 * the reserve map region for a page.  The huge page itself was free'ed
 564 * and removed from the page cache.  This routine will adjust the subpool
 565 * usage count, and the global reserve count if needed.  By incrementing
 566 * these counts, the reserve map entry which could not be deleted will
 567 * appear as a "reserved" entry instead of simply dangling with incorrect
 568 * counts.
 569 */
 570void hugetlb_fix_reserve_counts(struct inode *inode)
 571{
 572        struct hugepage_subpool *spool = subpool_inode(inode);
 573        long rsv_adjust;
 574
 575        rsv_adjust = hugepage_subpool_get_pages(spool, 1);
 576        if (rsv_adjust) {
 577                struct hstate *h = hstate_inode(inode);
 578
 579                hugetlb_acct_memory(h, 1);
 580        }
 581}
 582
 583/*
 584 * Count and return the number of huge pages in the reserve map
 585 * that intersect with the range [f, t).
 586 */
 587static long region_count(struct resv_map *resv, long f, long t)
 588{
 589        struct list_head *head = &resv->regions;
 590        struct file_region *rg;
 591        long chg = 0;
 592
 593        spin_lock(&resv->lock);
 594        /* Locate each segment we overlap with, and count that overlap. */
 595        list_for_each_entry(rg, head, link) {
 596                long seg_from;
 597                long seg_to;
 598
 599                if (rg->to <= f)
 600                        continue;
 601                if (rg->from >= t)
 602                        break;
 603
 604                seg_from = max(rg->from, f);
 605                seg_to = min(rg->to, t);
 606
 607                chg += seg_to - seg_from;
 608        }
 609        spin_unlock(&resv->lock);
 610
 611        return chg;
 612}
 613
 614/*
 615 * Convert the address within this vma to the page offset within
 616 * the mapping, in pagecache page units; huge pages here.
 617 */
 618static pgoff_t vma_hugecache_offset(struct hstate *h,
 619                        struct vm_area_struct *vma, unsigned long address)
 620{
 621        return ((address - vma->vm_start) >> huge_page_shift(h)) +
 622                        (vma->vm_pgoff >> huge_page_order(h));
 623}
 624
 625pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
 626                                     unsigned long address)
 627{
 628        return vma_hugecache_offset(hstate_vma(vma), vma, address);
 629}
 630EXPORT_SYMBOL_GPL(linear_hugepage_index);
 631
 632/*
 633 * Return the size of the pages allocated when backing a VMA. In the majority
 634 * cases this will be same size as used by the page table entries.
 635 */
 636unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
 637{
 638        struct hstate *hstate;
 639
 640        if (!is_vm_hugetlb_page(vma))
 641                return PAGE_SIZE;
 642
 643        hstate = hstate_vma(vma);
 644
 645        return 1UL << huge_page_shift(hstate);
 646}
 647EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
 648
 649/*
 650 * Return the page size being used by the MMU to back a VMA. In the majority
 651 * of cases, the page size used by the kernel matches the MMU size. On
 652 * architectures where it differs, an architecture-specific version of this
 653 * function is required.
 654 */
 655#ifndef vma_mmu_pagesize
 656unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
 657{
 658        return vma_kernel_pagesize(vma);
 659}
 660#endif
 661
 662/*
 663 * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
 664 * bits of the reservation map pointer, which are always clear due to
 665 * alignment.
 666 */
 667#define HPAGE_RESV_OWNER    (1UL << 0)
 668#define HPAGE_RESV_UNMAPPED (1UL << 1)
 669#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
 670
 671/*
 672 * These helpers are used to track how many pages are reserved for
 673 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
 674 * is guaranteed to have their future faults succeed.
 675 *
 676 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
 677 * the reserve counters are updated with the hugetlb_lock held. It is safe
 678 * to reset the VMA at fork() time as it is not in use yet and there is no
 679 * chance of the global counters getting corrupted as a result of the values.
 680 *
 681 * The private mapping reservation is represented in a subtly different
 682 * manner to a shared mapping.  A shared mapping has a region map associated
 683 * with the underlying file, this region map represents the backing file
 684 * pages which have ever had a reservation assigned which this persists even
 685 * after the page is instantiated.  A private mapping has a region map
 686 * associated with the original mmap which is attached to all VMAs which
 687 * reference it, this region map represents those offsets which have consumed
 688 * reservation ie. where pages have been instantiated.
 689 */
 690static unsigned long get_vma_private_data(struct vm_area_struct *vma)
 691{
 692        return (unsigned long)vma->vm_private_data;
 693}
 694
 695static void set_vma_private_data(struct vm_area_struct *vma,
 696                                                        unsigned long value)
 697{
 698        vma->vm_private_data = (void *)value;
 699}
 700
 701struct resv_map *resv_map_alloc(void)
 702{
 703        struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
 704        struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
 705
 706        if (!resv_map || !rg) {
 707                kfree(resv_map);
 708                kfree(rg);
 709                return NULL;
 710        }
 711
 712        kref_init(&resv_map->refs);
 713        spin_lock_init(&resv_map->lock);
 714        INIT_LIST_HEAD(&resv_map->regions);
 715
 716        resv_map->adds_in_progress = 0;
 717
 718        INIT_LIST_HEAD(&resv_map->region_cache);
 719        list_add(&rg->link, &resv_map->region_cache);
 720        resv_map->region_cache_count = 1;
 721
 722        return resv_map;
 723}
 724
 725void resv_map_release(struct kref *ref)
 726{
 727        struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
 728        struct list_head *head = &resv_map->region_cache;
 729        struct file_region *rg, *trg;
 730
 731        /* Clear out any active regions before we release the map. */
 732        region_del(resv_map, 0, LONG_MAX);
 733
 734        /* ... and any entries left in the cache */
 735        list_for_each_entry_safe(rg, trg, head, link) {
 736                list_del(&rg->link);
 737                kfree(rg);
 738        }
 739
 740        VM_BUG_ON(resv_map->adds_in_progress);
 741
 742        kfree(resv_map);
 743}
 744
 745static inline struct resv_map *inode_resv_map(struct inode *inode)
 746{
 747        return inode->i_mapping->private_data;
 748}
 749
 750static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
 751{
 752        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
 753        if (vma->vm_flags & VM_MAYSHARE) {
 754                struct address_space *mapping = vma->vm_file->f_mapping;
 755                struct inode *inode = mapping->host;
 756
 757                return inode_resv_map(inode);
 758
 759        } else {
 760                return (struct resv_map *)(get_vma_private_data(vma) &
 761                                                        ~HPAGE_RESV_MASK);
 762        }
 763}
 764
 765static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
 766{
 767        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
 768        VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
 769
 770        set_vma_private_data(vma, (get_vma_private_data(vma) &
 771                                HPAGE_RESV_MASK) | (unsigned long)map);
 772}
 773
 774static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
 775{
 776        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
 777        VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
 778
 779        set_vma_private_data(vma, get_vma_private_data(vma) | flags);
 780}
 781
 782static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
 783{
 784        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
 785
 786        return (get_vma_private_data(vma) & flag) != 0;
 787}
 788
 789/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
 790void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 791{
 792        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
 793        if (!(vma->vm_flags & VM_MAYSHARE))
 794                vma->vm_private_data = (void *)0;
 795}
 796
 797/* Returns true if the VMA has associated reserve pages */
 798static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
 799{
 800        if (vma->vm_flags & VM_NORESERVE) {
 801                /*
 802                 * This address is already reserved by other process(chg == 0),
 803                 * so, we should decrement reserved count. Without decrementing,
 804                 * reserve count remains after releasing inode, because this
 805                 * allocated page will go into page cache and is regarded as
 806                 * coming from reserved pool in releasing step.  Currently, we
 807                 * don't have any other solution to deal with this situation
 808                 * properly, so add work-around here.
 809                 */
 810                if (vma->vm_flags & VM_MAYSHARE && chg == 0)
 811                        return true;
 812                else
 813                        return false;
 814        }
 815
 816        /* Shared mappings always use reserves */
 817        if (vma->vm_flags & VM_MAYSHARE) {
 818                /*
 819                 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
 820                 * be a region map for all pages.  The only situation where
 821                 * there is no region map is if a hole was punched via
 822                 * fallocate.  In this case, there really are no reverves to
 823                 * use.  This situation is indicated if chg != 0.
 824                 */
 825                if (chg)
 826                        return false;
 827                else
 828                        return true;
 829        }
 830
 831        /*
 832         * Only the process that called mmap() has reserves for
 833         * private mappings.
 834         */
 835        if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
 836                /*
 837                 * Like the shared case above, a hole punch or truncate
 838                 * could have been performed on the private mapping.
 839                 * Examine the value of chg to determine if reserves
 840                 * actually exist or were previously consumed.
 841                 * Very Subtle - The value of chg comes from a previous
 842                 * call to vma_needs_reserves().  The reserve map for
 843                 * private mappings has different (opposite) semantics
 844                 * than that of shared mappings.  vma_needs_reserves()
 845                 * has already taken this difference in semantics into
 846                 * account.  Therefore, the meaning of chg is the same
 847                 * as in the shared case above.  Code could easily be
 848                 * combined, but keeping it separate draws attention to
 849                 * subtle differences.
 850                 */
 851                if (chg)
 852                        return false;
 853                else
 854                        return true;
 855        }
 856
 857        return false;
 858}
 859
 860static void enqueue_huge_page(struct hstate *h, struct page *page)
 861{
 862        int nid = page_to_nid(page);
 863        list_move(&page->lru, &h->hugepage_freelists[nid]);
 864        h->free_huge_pages++;
 865        h->free_huge_pages_node[nid]++;
 866}
 867
 868static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
 869{
 870        struct page *page;
 871
 872        list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
 873                if (!is_migrate_isolate_page(page))
 874                        break;
 875        /*
 876         * if 'non-isolated free hugepage' not found on the list,
 877         * the allocation fails.
 878         */
 879        if (&h->hugepage_freelists[nid] == &page->lru)
 880                return NULL;
 881        list_move(&page->lru, &h->hugepage_activelist);
 882        set_page_refcounted(page);
 883        h->free_huge_pages--;
 884        h->free_huge_pages_node[nid]--;
 885        return page;
 886}
 887
 888/* Movability of hugepages depends on migration support. */
 889static inline gfp_t htlb_alloc_mask(struct hstate *h)
 890{
 891        if (hugepages_treat_as_movable || hugepage_migration_supported(h))
 892                return GFP_HIGHUSER_MOVABLE;
 893        else
 894                return GFP_HIGHUSER;
 895}
 896
 897static struct page *dequeue_huge_page_vma(struct hstate *h,
 898                                struct vm_area_struct *vma,
 899                                unsigned long address, int avoid_reserve,
 900                                long chg)
 901{
 902        struct page *page = NULL;
 903        struct mempolicy *mpol;
 904        nodemask_t *nodemask;
 905        struct zonelist *zonelist;
 906        struct zone *zone;
 907        struct zoneref *z;
 908        unsigned int cpuset_mems_cookie;
 909
 910        /*
 911         * A child process with MAP_PRIVATE mappings created by their parent
 912         * have no page reserves. This check ensures that reservations are
 913         * not "stolen". The child may still get SIGKILLed
 914         */
 915        if (!vma_has_reserves(vma, chg) &&
 916                        h->free_huge_pages - h->resv_huge_pages == 0)
 917                goto err;
 918
 919        /* If reserves cannot be used, ensure enough pages are in the pool */
 920        if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
 921                goto err;
 922
 923retry_cpuset:
 924        cpuset_mems_cookie = read_mems_allowed_begin();
 925        zonelist = huge_zonelist(vma, address,
 926                                        htlb_alloc_mask(h), &mpol, &nodemask);
 927
 928        for_each_zone_zonelist_nodemask(zone, z, zonelist,
 929                                                MAX_NR_ZONES - 1, nodemask) {
 930                if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
 931                        page = dequeue_huge_page_node(h, zone_to_nid(zone));
 932                        if (page) {
 933                                if (avoid_reserve)
 934                                        break;
 935                                if (!vma_has_reserves(vma, chg))
 936                                        break;
 937
 938                                SetPagePrivate(page);
 939                                h->resv_huge_pages--;
 940                                break;
 941                        }
 942                }
 943        }
 944
 945        mpol_cond_put(mpol);
 946        if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
 947                goto retry_cpuset;
 948        return page;
 949
 950err:
 951        return NULL;
 952}
 953
 954/*
 955 * common helper functions for hstate_next_node_to_{alloc|free}.
 956 * We may have allocated or freed a huge page based on a different
 957 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
 958 * be outside of *nodes_allowed.  Ensure that we use an allowed
 959 * node for alloc or free.
 960 */
 961static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
 962{
 963        nid = next_node_in(nid, *nodes_allowed);
 964        VM_BUG_ON(nid >= MAX_NUMNODES);
 965
 966        return nid;
 967}
 968
 969static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
 970{
 971        if (!node_isset(nid, *nodes_allowed))
 972                nid = next_node_allowed(nid, nodes_allowed);
 973        return nid;
 974}
 975
 976/*
 977 * returns the previously saved node ["this node"] from which to
 978 * allocate a persistent huge page for the pool and advance the
 979 * next node from which to allocate, handling wrap at end of node
 980 * mask.
 981 */
 982static int hstate_next_node_to_alloc(struct hstate *h,
 983                                        nodemask_t *nodes_allowed)
 984{
 985        int nid;
 986
 987        VM_BUG_ON(!nodes_allowed);
 988
 989        nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
 990        h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
 991
 992        return nid;
 993}
 994
 995/*
 996 * helper for free_pool_huge_page() - return the previously saved
 997 * node ["this node"] from which to free a huge page.  Advance the
 998 * next node id whether or not we find a free huge page to free so
 999 * that the next attempt to free addresses the next node.
1000 */
1001static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1002{
1003        int nid;
1004
1005        VM_BUG_ON(!nodes_allowed);
1006
1007        nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1008        h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1009
1010        return nid;
1011}
1012
1013#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
1014        for (nr_nodes = nodes_weight(*mask);                            \
1015                nr_nodes > 0 &&                                         \
1016                ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
1017                nr_nodes--)
1018
1019#define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
1020        for (nr_nodes = nodes_weight(*mask);                            \
1021                nr_nodes > 0 &&                                         \
1022                ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
1023                nr_nodes--)
1024
1025#if defined(CONFIG_ARCH_HAS_GIGANTIC_PAGE) && \
1026        ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || \
1027        defined(CONFIG_CMA))
1028static void destroy_compound_gigantic_page(struct page *page,
1029                                        unsigned int order)
1030{
1031        int i;
1032        int nr_pages = 1 << order;
1033        struct page *p = page + 1;
1034
1035        atomic_set(compound_mapcount_ptr(page), 0);
1036        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1037                clear_compound_head(p);
1038                set_page_refcounted(p);
1039        }
1040
1041        set_compound_order(page, 0);
1042        __ClearPageHead(page);
1043}
1044
1045static void free_gigantic_page(struct page *page, unsigned int order)
1046{
1047        free_contig_range(page_to_pfn(page), 1 << order);
1048}
1049
1050static int __alloc_gigantic_page(unsigned long start_pfn,
1051                                unsigned long nr_pages)
1052{
1053        unsigned long end_pfn = start_pfn + nr_pages;
1054        return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1055}
1056
1057static bool pfn_range_valid_gigantic(struct zone *z,
1058                        unsigned long start_pfn, unsigned long nr_pages)
1059{
1060        unsigned long i, end_pfn = start_pfn + nr_pages;
1061        struct page *page;
1062
1063        for (i = start_pfn; i < end_pfn; i++) {
1064                if (!pfn_valid(i))
1065                        return false;
1066
1067                page = pfn_to_page(i);
1068
1069                if (page_zone(page) != z)
1070                        return false;
1071
1072                if (PageReserved(page))
1073                        return false;
1074
1075                if (page_count(page) > 0)
1076                        return false;
1077
1078                if (PageHuge(page))
1079                        return false;
1080        }
1081
1082        return true;
1083}
1084
1085static bool zone_spans_last_pfn(const struct zone *zone,
1086                        unsigned long start_pfn, unsigned long nr_pages)
1087{
1088        unsigned long last_pfn = start_pfn + nr_pages - 1;
1089        return zone_spans_pfn(zone, last_pfn);
1090}
1091
1092static struct page *alloc_gigantic_page(int nid, unsigned int order)
1093{
1094        unsigned long nr_pages = 1 << order;
1095        unsigned long ret, pfn, flags;
1096        struct zone *z;
1097
1098        z = NODE_DATA(nid)->node_zones;
1099        for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
1100                spin_lock_irqsave(&z->lock, flags);
1101
1102                pfn = ALIGN(z->zone_start_pfn, nr_pages);
1103                while (zone_spans_last_pfn(z, pfn, nr_pages)) {
1104                        if (pfn_range_valid_gigantic(z, pfn, nr_pages)) {
1105                                /*
1106                                 * We release the zone lock here because
1107                                 * alloc_contig_range() will also lock the zone
1108                                 * at some point. If there's an allocation
1109                                 * spinning on this lock, it may win the race
1110                                 * and cause alloc_contig_range() to fail...
1111                                 */
1112                                spin_unlock_irqrestore(&z->lock, flags);
1113                                ret = __alloc_gigantic_page(pfn, nr_pages);
1114                                if (!ret)
1115                                        return pfn_to_page(pfn);
1116                                spin_lock_irqsave(&z->lock, flags);
1117                        }
1118                        pfn += nr_pages;
1119                }
1120
1121                spin_unlock_irqrestore(&z->lock, flags);
1122        }
1123
1124        return NULL;
1125}
1126
1127static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1128static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1129
1130static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1131{
1132        struct page *page;
1133
1134        page = alloc_gigantic_page(nid, huge_page_order(h));
1135        if (page) {
1136                prep_compound_gigantic_page(page, huge_page_order(h));
1137                prep_new_huge_page(h, page, nid);
1138        }
1139
1140        return page;
1141}
1142
1143static int alloc_fresh_gigantic_page(struct hstate *h,
1144                                nodemask_t *nodes_allowed)
1145{
1146        struct page *page = NULL;
1147        int nr_nodes, node;
1148
1149        for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1150                page = alloc_fresh_gigantic_page_node(h, node);
1151                if (page)
1152                        return 1;
1153        }
1154
1155        return 0;
1156}
1157
1158static inline bool gigantic_page_supported(void) { return true; }
1159#else
1160static inline bool gigantic_page_supported(void) { return false; }
1161static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1162static inline void destroy_compound_gigantic_page(struct page *page,
1163                                                unsigned int order) { }
1164static inline int alloc_fresh_gigantic_page(struct hstate *h,
1165                                        nodemask_t *nodes_allowed) { return 0; }
1166#endif
1167
1168static void update_and_free_page(struct hstate *h, struct page *page)
1169{
1170        int i;
1171
1172        if (hstate_is_gigantic(h) && !gigantic_page_supported())
1173                return;
1174
1175        h->nr_huge_pages--;
1176        h->nr_huge_pages_node[page_to_nid(page)]--;
1177        for (i = 0; i < pages_per_huge_page(h); i++) {
1178                page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1179                                1 << PG_referenced | 1 << PG_dirty |
1180                                1 << PG_active | 1 << PG_private |
1181                                1 << PG_writeback);
1182        }
1183        VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1184        set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1185        set_page_refcounted(page);
1186        if (hstate_is_gigantic(h)) {
1187                destroy_compound_gigantic_page(page, huge_page_order(h));
1188                free_gigantic_page(page, huge_page_order(h));
1189        } else {
1190                __free_pages(page, huge_page_order(h));
1191        }
1192}
1193
1194struct hstate *size_to_hstate(unsigned long size)
1195{
1196        struct hstate *h;
1197
1198        for_each_hstate(h) {
1199                if (huge_page_size(h) == size)
1200                        return h;
1201        }
1202        return NULL;
1203}
1204
1205/*
1206 * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1207 * to hstate->hugepage_activelist.)
1208 *
1209 * This function can be called for tail pages, but never returns true for them.
1210 */
1211bool page_huge_active(struct page *page)
1212{
1213        VM_BUG_ON_PAGE(!PageHuge(page), page);
1214        return PageHead(page) && PagePrivate(&page[1]);
1215}
1216
1217/* never called for tail page */
1218static void set_page_huge_active(struct page *page)
1219{
1220        VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1221        SetPagePrivate(&page[1]);
1222}
1223
1224static void clear_page_huge_active(struct page *page)
1225{
1226        VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1227        ClearPagePrivate(&page[1]);
1228}
1229
1230void free_huge_page(struct page *page)
1231{
1232        /*
1233         * Can't pass hstate in here because it is called from the
1234         * compound page destructor.
1235         */
1236        struct hstate *h = page_hstate(page);
1237        int nid = page_to_nid(page);
1238        struct hugepage_subpool *spool =
1239                (struct hugepage_subpool *)page_private(page);
1240        bool restore_reserve;
1241
1242        set_page_private(page, 0);
1243        page->mapping = NULL;
1244        VM_BUG_ON_PAGE(page_count(page), page);
1245        VM_BUG_ON_PAGE(page_mapcount(page), page);
1246        restore_reserve = PagePrivate(page);
1247        ClearPagePrivate(page);
1248
1249        /*
1250         * A return code of zero implies that the subpool will be under its
1251         * minimum size if the reservation is not restored after page is free.
1252         * Therefore, force restore_reserve operation.
1253         */
1254        if (hugepage_subpool_put_pages(spool, 1) == 0)
1255                restore_reserve = true;
1256
1257        spin_lock(&hugetlb_lock);
1258        clear_page_huge_active(page);
1259        hugetlb_cgroup_uncharge_page(hstate_index(h),
1260                                     pages_per_huge_page(h), page);
1261        if (restore_reserve)
1262                h->resv_huge_pages++;
1263
1264        if (h->surplus_huge_pages_node[nid]) {
1265                /* remove the page from active list */
1266                list_del(&page->lru);
1267                update_and_free_page(h, page);
1268                h->surplus_huge_pages--;
1269                h->surplus_huge_pages_node[nid]--;
1270        } else {
1271                arch_clear_hugepage_flags(page);
1272                enqueue_huge_page(h, page);
1273        }
1274        spin_unlock(&hugetlb_lock);
1275}
1276
1277static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1278{
1279        INIT_LIST_HEAD(&page->lru);
1280        set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1281        spin_lock(&hugetlb_lock);
1282        set_hugetlb_cgroup(page, NULL);
1283        h->nr_huge_pages++;
1284        h->nr_huge_pages_node[nid]++;
1285        spin_unlock(&hugetlb_lock);
1286        put_page(page); /* free it into the hugepage allocator */
1287}
1288
1289static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1290{
1291        int i;
1292        int nr_pages = 1 << order;
1293        struct page *p = page + 1;
1294
1295        /* we rely on prep_new_huge_page to set the destructor */
1296        set_compound_order(page, order);
1297        __ClearPageReserved(page);
1298        __SetPageHead(page);
1299        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1300                /*
1301                 * For gigantic hugepages allocated through bootmem at
1302                 * boot, it's safer to be consistent with the not-gigantic
1303                 * hugepages and clear the PG_reserved bit from all tail pages
1304                 * too.  Otherwse drivers using get_user_pages() to access tail
1305                 * pages may get the reference counting wrong if they see
1306                 * PG_reserved set on a tail page (despite the head page not
1307                 * having PG_reserved set).  Enforcing this consistency between
1308                 * head and tail pages allows drivers to optimize away a check
1309                 * on the head page when they need know if put_page() is needed
1310                 * after get_user_pages().
1311                 */
1312                __ClearPageReserved(p);
1313                set_page_count(p, 0);
1314                set_compound_head(p, page);
1315        }
1316        atomic_set(compound_mapcount_ptr(page), -1);
1317}
1318
1319/*
1320 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1321 * transparent huge pages.  See the PageTransHuge() documentation for more
1322 * details.
1323 */
1324int PageHuge(struct page *page)
1325{
1326        if (!PageCompound(page))
1327                return 0;
1328
1329        page = compound_head(page);
1330        return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1331}
1332EXPORT_SYMBOL_GPL(PageHuge);
1333
1334/*
1335 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1336 * normal or transparent huge pages.
1337 */
1338int PageHeadHuge(struct page *page_head)
1339{
1340        if (!PageHead(page_head))
1341                return 0;
1342
1343        return get_compound_page_dtor(page_head) == free_huge_page;
1344}
1345
1346pgoff_t __basepage_index(struct page *page)
1347{
1348        struct page *page_head = compound_head(page);
1349        pgoff_t index = page_index(page_head);
1350        unsigned long compound_idx;
1351
1352        if (!PageHuge(page_head))
1353                return page_index(page);
1354
1355        if (compound_order(page_head) >= MAX_ORDER)
1356                compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1357        else
1358                compound_idx = page - page_head;
1359
1360        return (index << compound_order(page_head)) + compound_idx;
1361}
1362
1363static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1364{
1365        struct page *page;
1366
1367        page = __alloc_pages_node(nid,
1368                htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1369                                                __GFP_REPEAT|__GFP_NOWARN,
1370                huge_page_order(h));
1371        if (page) {
1372                prep_new_huge_page(h, page, nid);
1373        }
1374
1375        return page;
1376}
1377
1378static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1379{
1380        struct page *page;
1381        int nr_nodes, node;
1382        int ret = 0;
1383
1384        for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1385                page = alloc_fresh_huge_page_node(h, node);
1386                if (page) {
1387                        ret = 1;
1388                        break;
1389                }
1390        }
1391
1392        if (ret)
1393                count_vm_event(HTLB_BUDDY_PGALLOC);
1394        else
1395                count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1396
1397        return ret;
1398}
1399
1400/*
1401 * Free huge page from pool from next node to free.
1402 * Attempt to keep persistent huge pages more or less
1403 * balanced over allowed nodes.
1404 * Called with hugetlb_lock locked.
1405 */
1406static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1407                                                         bool acct_surplus)
1408{
1409        int nr_nodes, node;
1410        int ret = 0;
1411
1412        for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1413                /*
1414                 * If we're returning unused surplus pages, only examine
1415                 * nodes with surplus pages.
1416                 */
1417                if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1418                    !list_empty(&h->hugepage_freelists[node])) {
1419                        struct page *page =
1420                                list_entry(h->hugepage_freelists[node].next,
1421                                          struct page, lru);
1422                        list_del(&page->lru);
1423                        h->free_huge_pages--;
1424                        h->free_huge_pages_node[node]--;
1425                        if (acct_surplus) {
1426                                h->surplus_huge_pages--;
1427                                h->surplus_huge_pages_node[node]--;
1428                        }
1429                        update_and_free_page(h, page);
1430                        ret = 1;
1431                        break;
1432                }
1433        }
1434
1435        return ret;
1436}
1437
1438/*
1439 * Dissolve a given free hugepage into free buddy pages. This function does
1440 * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
1441 * number of free hugepages would be reduced below the number of reserved
1442 * hugepages.
1443 */
1444static int dissolve_free_huge_page(struct page *page)
1445{
1446        int rc = 0;
1447
1448        spin_lock(&hugetlb_lock);
1449        if (PageHuge(page) && !page_count(page)) {
1450                struct page *head = compound_head(page);
1451                struct hstate *h = page_hstate(head);
1452                int nid = page_to_nid(head);
1453                if (h->free_huge_pages - h->resv_huge_pages == 0) {
1454                        rc = -EBUSY;
1455                        goto out;
1456                }
1457                list_del(&head->lru);
1458                h->free_huge_pages--;
1459                h->free_huge_pages_node[nid]--;
1460                h->max_huge_pages--;
1461                update_and_free_page(h, head);
1462        }
1463out:
1464        spin_unlock(&hugetlb_lock);
1465        return rc;
1466}
1467
1468/*
1469 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1470 * make specified memory blocks removable from the system.
1471 * Note that this will dissolve a free gigantic hugepage completely, if any
1472 * part of it lies within the given range.
1473 * Also note that if dissolve_free_huge_page() returns with an error, all
1474 * free hugepages that were dissolved before that error are lost.
1475 */
1476int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1477{
1478        unsigned long pfn;
1479        struct page *page;
1480        int rc = 0;
1481
1482        if (!hugepages_supported())
1483                return rc;
1484
1485        for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1486                page = pfn_to_page(pfn);
1487                if (PageHuge(page) && !page_count(page)) {
1488                        rc = dissolve_free_huge_page(page);
1489                        if (rc)
1490                                break;
1491                }
1492        }
1493
1494        return rc;
1495}
1496
1497/*
1498 * There are 3 ways this can get called:
1499 * 1. With vma+addr: we use the VMA's memory policy
1500 * 2. With !vma, but nid=NUMA_NO_NODE:  We try to allocate a huge
1501 *    page from any node, and let the buddy allocator itself figure
1502 *    it out.
1503 * 3. With !vma, but nid!=NUMA_NO_NODE.  We allocate a huge page
1504 *    strictly from 'nid'
1505 */
1506static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1507                struct vm_area_struct *vma, unsigned long addr, int nid)
1508{
1509        int order = huge_page_order(h);
1510        gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN;
1511        unsigned int cpuset_mems_cookie;
1512
1513        /*
1514         * We need a VMA to get a memory policy.  If we do not
1515         * have one, we use the 'nid' argument.
1516         *
1517         * The mempolicy stuff below has some non-inlined bits
1518         * and calls ->vm_ops.  That makes it hard to optimize at
1519         * compile-time, even when NUMA is off and it does
1520         * nothing.  This helps the compiler optimize it out.
1521         */
1522        if (!IS_ENABLED(CONFIG_NUMA) || !vma) {
1523                /*
1524                 * If a specific node is requested, make sure to
1525                 * get memory from there, but only when a node
1526                 * is explicitly specified.
1527                 */
1528                if (nid != NUMA_NO_NODE)
1529                        gfp |= __GFP_THISNODE;
1530                /*
1531                 * Make sure to call something that can handle
1532                 * nid=NUMA_NO_NODE
1533                 */
1534                return alloc_pages_node(nid, gfp, order);
1535        }
1536
1537        /*
1538         * OK, so we have a VMA.  Fetch the mempolicy and try to
1539         * allocate a huge page with it.  We will only reach this
1540         * when CONFIG_NUMA=y.
1541         */
1542        do {
1543                struct page *page;
1544                struct mempolicy *mpol;
1545                struct zonelist *zl;
1546                nodemask_t *nodemask;
1547
1548                cpuset_mems_cookie = read_mems_allowed_begin();
1549                zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask);
1550                mpol_cond_put(mpol);
1551                page = __alloc_pages_nodemask(gfp, order, zl, nodemask);
1552                if (page)
1553                        return page;
1554        } while (read_mems_allowed_retry(cpuset_mems_cookie));
1555
1556        return NULL;
1557}
1558
1559/*
1560 * There are two ways to allocate a huge page:
1561 * 1. When you have a VMA and an address (like a fault)
1562 * 2. When you have no VMA (like when setting /proc/.../nr_hugepages)
1563 *
1564 * 'vma' and 'addr' are only for (1).  'nid' is always NUMA_NO_NODE in
1565 * this case which signifies that the allocation should be done with
1566 * respect for the VMA's memory policy.
1567 *
1568 * For (2), we ignore 'vma' and 'addr' and use 'nid' exclusively. This
1569 * implies that memory policies will not be taken in to account.
1570 */
1571static struct page *__alloc_buddy_huge_page(struct hstate *h,
1572                struct vm_area_struct *vma, unsigned long addr, int nid)
1573{
1574        struct page *page;
1575        unsigned int r_nid;
1576
1577        if (hstate_is_gigantic(h))
1578                return NULL;
1579
1580        /*
1581         * Make sure that anyone specifying 'nid' is not also specifying a VMA.
1582         * This makes sure the caller is picking _one_ of the modes with which
1583         * we can call this function, not both.
1584         */
1585        if (vma || (addr != -1)) {
1586                VM_WARN_ON_ONCE(addr == -1);
1587                VM_WARN_ON_ONCE(nid != NUMA_NO_NODE);
1588        }
1589        /*
1590         * Assume we will successfully allocate the surplus page to
1591         * prevent racing processes from causing the surplus to exceed
1592         * overcommit
1593         *
1594         * This however introduces a different race, where a process B
1595         * tries to grow the static hugepage pool while alloc_pages() is
1596         * called by process A. B will only examine the per-node
1597         * counters in determining if surplus huge pages can be
1598         * converted to normal huge pages in adjust_pool_surplus(). A
1599         * won't be able to increment the per-node counter, until the
1600         * lock is dropped by B, but B doesn't drop hugetlb_lock until
1601         * no more huge pages can be converted from surplus to normal
1602         * state (and doesn't try to convert again). Thus, we have a
1603         * case where a surplus huge page exists, the pool is grown, and
1604         * the surplus huge page still exists after, even though it
1605         * should just have been converted to a normal huge page. This
1606         * does not leak memory, though, as the hugepage will be freed
1607         * once it is out of use. It also does not allow the counters to
1608         * go out of whack in adjust_pool_surplus() as we don't modify
1609         * the node values until we've gotten the hugepage and only the
1610         * per-node value is checked there.
1611         */
1612        spin_lock(&hugetlb_lock);
1613        if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1614                spin_unlock(&hugetlb_lock);
1615                return NULL;
1616        } else {
1617                h->nr_huge_pages++;
1618                h->surplus_huge_pages++;
1619        }
1620        spin_unlock(&hugetlb_lock);
1621
1622        page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid);
1623
1624        spin_lock(&hugetlb_lock);
1625        if (page) {
1626                INIT_LIST_HEAD(&page->lru);
1627                r_nid = page_to_nid(page);
1628                set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1629                set_hugetlb_cgroup(page, NULL);
1630                /*
1631                 * We incremented the global counters already
1632                 */
1633                h->nr_huge_pages_node[r_nid]++;
1634                h->surplus_huge_pages_node[r_nid]++;
1635                __count_vm_event(HTLB_BUDDY_PGALLOC);
1636        } else {
1637                h->nr_huge_pages--;
1638                h->surplus_huge_pages--;
1639                __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1640        }
1641        spin_unlock(&hugetlb_lock);
1642
1643        return page;
1644}
1645
1646/*
1647 * Allocate a huge page from 'nid'.  Note, 'nid' may be
1648 * NUMA_NO_NODE, which means that it may be allocated
1649 * anywhere.
1650 */
1651static
1652struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid)
1653{
1654        unsigned long addr = -1;
1655
1656        return __alloc_buddy_huge_page(h, NULL, addr, nid);
1657}
1658
1659/*
1660 * Use the VMA's mpolicy to allocate a huge page from the buddy.
1661 */
1662static
1663struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
1664                struct vm_area_struct *vma, unsigned long addr)
1665{
1666        return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE);
1667}
1668
1669/*
1670 * This allocation function is useful in the context where vma is irrelevant.
1671 * E.g. soft-offlining uses this function because it only cares physical
1672 * address of error page.
1673 */
1674struct page *alloc_huge_page_node(struct hstate *h, int nid)
1675{
1676        struct page *page = NULL;
1677
1678        spin_lock(&hugetlb_lock);
1679        if (h->free_huge_pages - h->resv_huge_pages > 0)
1680                page = dequeue_huge_page_node(h, nid);
1681        spin_unlock(&hugetlb_lock);
1682
1683        if (!page)
1684                page = __alloc_buddy_huge_page_no_mpol(h, nid);
1685
1686        return page;
1687}
1688
1689/*
1690 * Increase the hugetlb pool such that it can accommodate a reservation
1691 * of size 'delta'.
1692 */
1693static int gather_surplus_pages(struct hstate *h, int delta)
1694{
1695        struct list_head surplus_list;
1696        struct page *page, *tmp;
1697        int ret, i;
1698        int needed, allocated;
1699        bool alloc_ok = true;
1700
1701        needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1702        if (needed <= 0) {
1703                h->resv_huge_pages += delta;
1704                return 0;
1705        }
1706
1707        allocated = 0;
1708        INIT_LIST_HEAD(&surplus_list);
1709
1710        ret = -ENOMEM;
1711retry:
1712        spin_unlock(&hugetlb_lock);
1713        for (i = 0; i < needed; i++) {
1714                page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE);
1715                if (!page) {
1716                        alloc_ok = false;
1717                        break;
1718                }
1719                list_add(&page->lru, &surplus_list);
1720        }
1721        allocated += i;
1722
1723        /*
1724         * After retaking hugetlb_lock, we need to recalculate 'needed'
1725         * because either resv_huge_pages or free_huge_pages may have changed.
1726         */
1727        spin_lock(&hugetlb_lock);
1728        needed = (h->resv_huge_pages + delta) -
1729                        (h->free_huge_pages + allocated);
1730        if (needed > 0) {
1731                if (alloc_ok)
1732                        goto retry;
1733                /*
1734                 * We were not able to allocate enough pages to
1735                 * satisfy the entire reservation so we free what
1736                 * we've allocated so far.
1737                 */
1738                goto free;
1739        }
1740        /*
1741         * The surplus_list now contains _at_least_ the number of extra pages
1742         * needed to accommodate the reservation.  Add the appropriate number
1743         * of pages to the hugetlb pool and free the extras back to the buddy
1744         * allocator.  Commit the entire reservation here to prevent another
1745         * process from stealing the pages as they are added to the pool but
1746         * before they are reserved.
1747         */
1748        needed += allocated;
1749        h->resv_huge_pages += delta;
1750        ret = 0;
1751
1752        /* Free the needed pages to the hugetlb pool */
1753        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1754                if ((--needed) < 0)
1755                        break;
1756                /*
1757                 * This page is now managed by the hugetlb allocator and has
1758                 * no users -- drop the buddy allocator's reference.
1759                 */
1760                put_page_testzero(page);
1761                VM_BUG_ON_PAGE(page_count(page), page);
1762                enqueue_huge_page(h, page);
1763        }
1764free:
1765        spin_unlock(&hugetlb_lock);
1766
1767        /* Free unnecessary surplus pages to the buddy allocator */
1768        list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1769                put_page(page);
1770        spin_lock(&hugetlb_lock);
1771
1772        return ret;
1773}
1774
1775/*
1776 * When releasing a hugetlb pool reservation, any surplus pages that were
1777 * allocated to satisfy the reservation must be explicitly freed if they were
1778 * never used.
1779 * Called with hugetlb_lock held.
1780 */
1781static void return_unused_surplus_pages(struct hstate *h,
1782                                        unsigned long unused_resv_pages)
1783{
1784        unsigned long nr_pages;
1785
1786        /* Uncommit the reservation */
1787        h->resv_huge_pages -= unused_resv_pages;
1788
1789        /* Cannot return gigantic pages currently */
1790        if (hstate_is_gigantic(h))
1791                return;
1792
1793        nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1794
1795        /*
1796         * We want to release as many surplus pages as possible, spread
1797         * evenly across all nodes with memory. Iterate across these nodes
1798         * until we can no longer free unreserved surplus pages. This occurs
1799         * when the nodes with surplus pages have no free pages.
1800         * free_pool_huge_page() will balance the the freed pages across the
1801         * on-line nodes with memory and will handle the hstate accounting.
1802         */
1803        while (nr_pages--) {
1804                if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1805                        break;
1806                cond_resched_lock(&hugetlb_lock);
1807        }
1808}
1809
1810
1811/*
1812 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1813 * are used by the huge page allocation routines to manage reservations.
1814 *
1815 * vma_needs_reservation is called to determine if the huge page at addr
1816 * within the vma has an associated reservation.  If a reservation is
1817 * needed, the value 1 is returned.  The caller is then responsible for
1818 * managing the global reservation and subpool usage counts.  After
1819 * the huge page has been allocated, vma_commit_reservation is called
1820 * to add the page to the reservation map.  If the page allocation fails,
1821 * the reservation must be ended instead of committed.  vma_end_reservation
1822 * is called in such cases.
1823 *
1824 * In the normal case, vma_commit_reservation returns the same value
1825 * as the preceding vma_needs_reservation call.  The only time this
1826 * is not the case is if a reserve map was changed between calls.  It
1827 * is the responsibility of the caller to notice the difference and
1828 * take appropriate action.
1829 *
1830 * vma_add_reservation is used in error paths where a reservation must
1831 * be restored when a newly allocated huge page must be freed.  It is
1832 * to be called after calling vma_needs_reservation to determine if a
1833 * reservation exists.
1834 */
1835enum vma_resv_mode {
1836        VMA_NEEDS_RESV,
1837        VMA_COMMIT_RESV,
1838        VMA_END_RESV,
1839        VMA_ADD_RESV,
1840};
1841static long __vma_reservation_common(struct hstate *h,
1842                                struct vm_area_struct *vma, unsigned long addr,
1843                                enum vma_resv_mode mode)
1844{
1845        struct resv_map *resv;
1846        pgoff_t idx;
1847        long ret;
1848
1849        resv = vma_resv_map(vma);
1850        if (!resv)
1851                return 1;
1852
1853        idx = vma_hugecache_offset(h, vma, addr);
1854        switch (mode) {
1855        case VMA_NEEDS_RESV:
1856                ret = region_chg(resv, idx, idx + 1);
1857                break;
1858        case VMA_COMMIT_RESV:
1859                ret = region_add(resv, idx, idx + 1);
1860                break;
1861        case VMA_END_RESV:
1862                region_abort(resv, idx, idx + 1);
1863                ret = 0;
1864                break;
1865        case VMA_ADD_RESV:
1866                if (vma->vm_flags & VM_MAYSHARE)
1867                        ret = region_add(resv, idx, idx + 1);
1868                else {
1869                        region_abort(resv, idx, idx + 1);
1870                        ret = region_del(resv, idx, idx + 1);
1871                }
1872                break;
1873        default:
1874                BUG();
1875        }
1876
1877        if (vma->vm_flags & VM_MAYSHARE)
1878                return ret;
1879        else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1880                /*
1881                 * In most cases, reserves always exist for private mappings.
1882                 * However, a file associated with mapping could have been
1883                 * hole punched or truncated after reserves were consumed.
1884                 * As subsequent fault on such a range will not use reserves.
1885                 * Subtle - The reserve map for private mappings has the
1886                 * opposite meaning than that of shared mappings.  If NO
1887                 * entry is in the reserve map, it means a reservation exists.
1888                 * If an entry exists in the reserve map, it means the
1889                 * reservation has already been consumed.  As a result, the
1890                 * return value of this routine is the opposite of the
1891                 * value returned from reserve map manipulation routines above.
1892                 */
1893                if (ret)
1894                        return 0;
1895                else
1896                        return 1;
1897        }
1898        else
1899                return ret < 0 ? ret : 0;
1900}
1901
1902static long vma_needs_reservation(struct hstate *h,
1903                        struct vm_area_struct *vma, unsigned long addr)
1904{
1905        return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1906}
1907
1908static long vma_commit_reservation(struct hstate *h,
1909                        struct vm_area_struct *vma, unsigned long addr)
1910{
1911        return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1912}
1913
1914static void vma_end_reservation(struct hstate *h,
1915                        struct vm_area_struct *vma, unsigned long addr)
1916{
1917        (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1918}
1919
1920static long vma_add_reservation(struct hstate *h,
1921                        struct vm_area_struct *vma, unsigned long addr)
1922{
1923        return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
1924}
1925
1926/*
1927 * This routine is called to restore a reservation on error paths.  In the
1928 * specific error paths, a huge page was allocated (via alloc_huge_page)
1929 * and is about to be freed.  If a reservation for the page existed,
1930 * alloc_huge_page would have consumed the reservation and set PagePrivate
1931 * in the newly allocated page.  When the page is freed via free_huge_page,
1932 * the global reservation count will be incremented if PagePrivate is set.
1933 * However, free_huge_page can not adjust the reserve map.  Adjust the
1934 * reserve map here to be consistent with global reserve count adjustments
1935 * to be made by free_huge_page.
1936 */
1937static void restore_reserve_on_error(struct hstate *h,
1938                        struct vm_area_struct *vma, unsigned long address,
1939                        struct page *page)
1940{
1941        if (unlikely(PagePrivate(page))) {
1942                long rc = vma_needs_reservation(h, vma, address);
1943
1944                if (unlikely(rc < 0)) {
1945                        /*
1946                         * Rare out of memory condition in reserve map
1947                         * manipulation.  Clear PagePrivate so that
1948                         * global reserve count will not be incremented
1949                         * by free_huge_page.  This will make it appear
1950                         * as though the reservation for this page was
1951                         * consumed.  This may prevent the task from
1952                         * faulting in the page at a later time.  This
1953                         * is better than inconsistent global huge page
1954                         * accounting of reserve counts.
1955                         */
1956                        ClearPagePrivate(page);
1957                } else if (rc) {
1958                        rc = vma_add_reservation(h, vma, address);
1959                        if (unlikely(rc < 0))
1960                                /*
1961                                 * See above comment about rare out of
1962                                 * memory condition.
1963                                 */
1964                                ClearPagePrivate(page);
1965                } else
1966                        vma_end_reservation(h, vma, address);
1967        }
1968}
1969
1970struct page *alloc_huge_page(struct vm_area_struct *vma,
1971                                    unsigned long addr, int avoid_reserve)
1972{
1973        struct hugepage_subpool *spool = subpool_vma(vma);
1974        struct hstate *h = hstate_vma(vma);
1975        struct page *page;
1976        long map_chg, map_commit;
1977        long gbl_chg;
1978        int ret, idx;
1979        struct hugetlb_cgroup *h_cg;
1980
1981        idx = hstate_index(h);
1982        /*
1983         * Examine the region/reserve map to determine if the process
1984         * has a reservation for the page to be allocated.  A return
1985         * code of zero indicates a reservation exists (no change).
1986         */
1987        map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
1988        if (map_chg < 0)
1989                return ERR_PTR(-ENOMEM);
1990
1991        /*
1992         * Processes that did not create the mapping will have no
1993         * reserves as indicated by the region/reserve map. Check
1994         * that the allocation will not exceed the subpool limit.
1995         * Allocations for MAP_NORESERVE mappings also need to be
1996         * checked against any subpool limit.
1997         */
1998        if (map_chg || avoid_reserve) {
1999                gbl_chg = hugepage_subpool_get_pages(spool, 1);
2000                if (gbl_chg < 0) {
2001                        vma_end_reservation(h, vma, addr);
2002                        return ERR_PTR(-ENOSPC);
2003                }
2004
2005                /*
2006                 * Even though there was no reservation in the region/reserve
2007                 * map, there could be reservations associated with the
2008                 * subpool that can be used.  This would be indicated if the
2009                 * return value of hugepage_subpool_get_pages() is zero.
2010                 * However, if avoid_reserve is specified we still avoid even
2011                 * the subpool reservations.
2012                 */
2013                if (avoid_reserve)
2014                        gbl_chg = 1;
2015        }
2016
2017        ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2018        if (ret)
2019                goto out_subpool_put;
2020
2021        spin_lock(&hugetlb_lock);
2022        /*
2023         * glb_chg is passed to indicate whether or not a page must be taken
2024         * from the global free pool (global change).  gbl_chg == 0 indicates
2025         * a reservation exists for the allocation.
2026         */
2027        page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2028        if (!page) {
2029                spin_unlock(&hugetlb_lock);
2030                page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
2031                if (!page)
2032                        goto out_uncharge_cgroup;
2033                if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2034                        SetPagePrivate(page);
2035                        h->resv_huge_pages--;
2036                }
2037                spin_lock(&hugetlb_lock);
2038                list_move(&page->lru, &h->hugepage_activelist);
2039                /* Fall through */
2040        }
2041        hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2042        spin_unlock(&hugetlb_lock);
2043
2044        set_page_private(page, (unsigned long)spool);
2045
2046        map_commit = vma_commit_reservation(h, vma, addr);
2047        if (unlikely(map_chg > map_commit)) {
2048                /*
2049                 * The page was added to the reservation map between
2050                 * vma_needs_reservation and vma_commit_reservation.
2051                 * This indicates a race with hugetlb_reserve_pages.
2052                 * Adjust for the subpool count incremented above AND
2053                 * in hugetlb_reserve_pages for the same page.  Also,
2054                 * the reservation count added in hugetlb_reserve_pages
2055                 * no longer applies.
2056                 */
2057                long rsv_adjust;
2058
2059                rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2060                hugetlb_acct_memory(h, -rsv_adjust);
2061        }
2062        return page;
2063
2064out_uncharge_cgroup:
2065        hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2066out_subpool_put:
2067        if (map_chg || avoid_reserve)
2068                hugepage_subpool_put_pages(spool, 1);
2069        vma_end_reservation(h, vma, addr);
2070        return ERR_PTR(-ENOSPC);
2071}
2072
2073/*
2074 * alloc_huge_page()'s wrapper which simply returns the page if allocation
2075 * succeeds, otherwise NULL. This function is called from new_vma_page(),
2076 * where no ERR_VALUE is expected to be returned.
2077 */
2078struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
2079                                unsigned long addr, int avoid_reserve)
2080{
2081        struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
2082        if (IS_ERR(page))
2083                page = NULL;
2084        return page;
2085}
2086
2087int __weak alloc_bootmem_huge_page(struct hstate *h)
2088{
2089        struct huge_bootmem_page *m;
2090        int nr_nodes, node;
2091
2092        for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2093                void *addr;
2094
2095                addr = memblock_virt_alloc_try_nid_nopanic(
2096                                huge_page_size(h), huge_page_size(h),
2097                                0, BOOTMEM_ALLOC_ACCESSIBLE, node);
2098                if (addr) {
2099                        /*
2100                         * Use the beginning of the huge page to store the
2101                         * huge_bootmem_page struct (until gather_bootmem
2102                         * puts them into the mem_map).
2103                         */
2104                        m = addr;
2105                        goto found;
2106                }
2107        }
2108        return 0;
2109
2110found:
2111        BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2112        /* Put them into a private list first because mem_map is not up yet */
2113        list_add(&m->list, &huge_boot_pages);
2114        m->hstate = h;
2115        return 1;
2116}
2117
2118static void __init prep_compound_huge_page(struct page *page,
2119                unsigned int order)
2120{
2121        if (unlikely(order > (MAX_ORDER - 1)))
2122                prep_compound_gigantic_page(page, order);
2123        else
2124                prep_compound_page(page, order);
2125}
2126
2127/* Put bootmem huge pages into the standard lists after mem_map is up */
2128static void __init gather_bootmem_prealloc(void)
2129{
2130        struct huge_bootmem_page *m;
2131
2132        list_for_each_entry(m, &huge_boot_pages, list) {
2133                struct hstate *h = m->hstate;
2134                struct page *page;
2135
2136#ifdef CONFIG_HIGHMEM
2137                page = pfn_to_page(m->phys >> PAGE_SHIFT);
2138                memblock_free_late(__pa(m),
2139                                   sizeof(struct huge_bootmem_page));
2140#else
2141                page = virt_to_page(m);
2142#endif
2143                WARN_ON(page_count(page) != 1);
2144                prep_compound_huge_page(page, h->order);
2145                WARN_ON(PageReserved(page));
2146                prep_new_huge_page(h, page, page_to_nid(page));
2147                /*
2148                 * If we had gigantic hugepages allocated at boot time, we need
2149                 * to restore the 'stolen' pages to totalram_pages in order to
2150                 * fix confusing memory reports from free(1) and another
2151                 * side-effects, like CommitLimit going negative.
2152                 */
2153                if (hstate_is_gigantic(h))
2154                        adjust_managed_page_count(page, 1 << h->order);
2155        }
2156}
2157
2158static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2159{
2160        unsigned long i;
2161
2162        for (i = 0; i < h->max_huge_pages; ++i) {
2163                if (hstate_is_gigantic(h)) {
2164                        if (!alloc_bootmem_huge_page(h))
2165                                break;
2166                } else if (!alloc_fresh_huge_page(h,
2167                                         &node_states[N_MEMORY]))
2168                        break;
2169        }
2170        h->max_huge_pages = i;
2171}
2172
2173static void __init hugetlb_init_hstates(void)
2174{
2175        struct hstate *h;
2176
2177        for_each_hstate(h) {
2178                if (minimum_order > huge_page_order(h))
2179                        minimum_order = huge_page_order(h);
2180
2181                /* oversize hugepages were init'ed in early boot */
2182                if (!hstate_is_gigantic(h))
2183                        hugetlb_hstate_alloc_pages(h);
2184        }
2185        VM_BUG_ON(minimum_order == UINT_MAX);
2186}
2187
2188static char * __init memfmt(char *buf, unsigned long n)
2189{
2190        if (n >= (1UL << 30))
2191                sprintf(buf, "%lu GB", n >> 30);
2192        else if (n >= (1UL << 20))
2193                sprintf(buf, "%lu MB", n >> 20);
2194        else
2195                sprintf(buf, "%lu KB", n >> 10);
2196        return buf;
2197}
2198
2199static void __init report_hugepages(void)
2200{
2201        struct hstate *h;
2202
2203        for_each_hstate(h) {
2204                char buf[32];
2205                pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2206                        memfmt(buf, huge_page_size(h)),
2207                        h->free_huge_pages);
2208        }
2209}
2210
2211#ifdef CONFIG_HIGHMEM
2212static void try_to_free_low(struct hstate *h, unsigned long count,
2213                                                nodemask_t *nodes_allowed)
2214{
2215        int i;
2216
2217        if (hstate_is_gigantic(h))
2218                return;
2219
2220        for_each_node_mask(i, *nodes_allowed) {
2221                struct page *page, *next;
2222                struct list_head *freel = &h->hugepage_freelists[i];
2223                list_for_each_entry_safe(page, next, freel, lru) {
2224                        if (count >= h->nr_huge_pages)
2225                                return;
2226                        if (PageHighMem(page))
2227                                continue;
2228                        list_del(&page->lru);
2229                        update_and_free_page(h, page);
2230                        h->free_huge_pages--;
2231                        h->free_huge_pages_node[page_to_nid(page)]--;
2232                }
2233        }
2234}
2235#else
2236static inline void try_to_free_low(struct hstate *h, unsigned long count,
2237                                                nodemask_t *nodes_allowed)
2238{
2239}
2240#endif
2241
2242/*
2243 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2244 * balanced by operating on them in a round-robin fashion.
2245 * Returns 1 if an adjustment was made.
2246 */
2247static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2248                                int delta)
2249{
2250        int nr_nodes, node;
2251
2252        VM_BUG_ON(delta != -1 && delta != 1);
2253
2254        if (delta < 0) {
2255                for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2256                        if (h->surplus_huge_pages_node[node])
2257                                goto found;
2258                }
2259        } else {
2260                for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2261                        if (h->surplus_huge_pages_node[node] <
2262                                        h->nr_huge_pages_node[node])
2263                                goto found;
2264                }
2265        }
2266        return 0;
2267
2268found:
2269        h->surplus_huge_pages += delta;
2270        h->surplus_huge_pages_node[node] += delta;
2271        return 1;
2272}
2273
2274#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2275static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2276                                                nodemask_t *nodes_allowed)
2277{
2278        unsigned long min_count, ret;
2279
2280        if (hstate_is_gigantic(h) && !gigantic_page_supported())
2281                return h->max_huge_pages;
2282
2283        /*
2284         * Increase the pool size
2285         * First take pages out of surplus state.  Then make up the
2286         * remaining difference by allocating fresh huge pages.
2287         *
2288         * We might race with __alloc_buddy_huge_page() here and be unable
2289         * to convert a surplus huge page to a normal huge page. That is
2290         * not critical, though, it just means the overall size of the
2291         * pool might be one hugepage larger than it needs to be, but
2292         * within all the constraints specified by the sysctls.
2293         */
2294        spin_lock(&hugetlb_lock);
2295        while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2296                if (!adjust_pool_surplus(h, nodes_allowed, -1))
2297                        break;
2298        }
2299
2300        while (count > persistent_huge_pages(h)) {
2301                /*
2302                 * If this allocation races such that we no longer need the
2303                 * page, free_huge_page will handle it by freeing the page
2304                 * and reducing the surplus.
2305                 */
2306                spin_unlock(&hugetlb_lock);
2307
2308                /* yield cpu to avoid soft lockup */
2309                cond_resched();
2310
2311                if (hstate_is_gigantic(h))
2312                        ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2313                else
2314                        ret = alloc_fresh_huge_page(h, nodes_allowed);
2315                spin_lock(&hugetlb_lock);
2316                if (!ret)
2317                        goto out;
2318
2319                /* Bail for signals. Probably ctrl-c from user */
2320                if (signal_pending(current))
2321                        goto out;
2322        }
2323
2324        /*
2325         * Decrease the pool size
2326         * First return free pages to the buddy allocator (being careful
2327         * to keep enough around to satisfy reservations).  Then place
2328         * pages into surplus state as needed so the pool will shrink
2329         * to the desired size as pages become free.
2330         *
2331         * By placing pages into the surplus state independent of the
2332         * overcommit value, we are allowing the surplus pool size to
2333         * exceed overcommit. There are few sane options here. Since
2334         * __alloc_buddy_huge_page() is checking the global counter,
2335         * though, we'll note that we're not allowed to exceed surplus
2336         * and won't grow the pool anywhere else. Not until one of the
2337         * sysctls are changed, or the surplus pages go out of use.
2338         */
2339        min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2340        min_count = max(count, min_count);
2341        try_to_free_low(h, min_count, nodes_allowed);
2342        while (min_count < persistent_huge_pages(h)) {
2343                if (!free_pool_huge_page(h, nodes_allowed, 0))
2344                        break;
2345                cond_resched_lock(&hugetlb_lock);
2346        }
2347        while (count < persistent_huge_pages(h)) {
2348                if (!adjust_pool_surplus(h, nodes_allowed, 1))
2349                        break;
2350        }
2351out:
2352        ret = persistent_huge_pages(h);
2353        spin_unlock(&hugetlb_lock);
2354        return ret;
2355}
2356
2357#define HSTATE_ATTR_RO(_name) \
2358        static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2359
2360#define HSTATE_ATTR(_name) \
2361        static struct kobj_attribute _name##_attr = \
2362                __ATTR(_name, 0644, _name##_show, _name##_store)
2363
2364static struct kobject *hugepages_kobj;
2365static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2366
2367static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2368
2369static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2370{
2371        int i;
2372
2373        for (i = 0; i < HUGE_MAX_HSTATE; i++)
2374                if (hstate_kobjs[i] == kobj) {
2375                        if (nidp)
2376                                *nidp = NUMA_NO_NODE;
2377                        return &hstates[i];
2378                }
2379
2380        return kobj_to_node_hstate(kobj, nidp);
2381}
2382
2383static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2384                                        struct kobj_attribute *attr, char *buf)
2385{
2386        struct hstate *h;
2387        unsigned long nr_huge_pages;
2388        int nid;
2389
2390        h = kobj_to_hstate(kobj, &nid);
2391        if (nid == NUMA_NO_NODE)
2392                nr_huge_pages = h->nr_huge_pages;
2393        else
2394                nr_huge_pages = h->nr_huge_pages_node[nid];
2395
2396        return sprintf(buf, "%lu\n", nr_huge_pages);
2397}
2398
2399static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2400                                           struct hstate *h, int nid,
2401                                           unsigned long count, size_t len)
2402{
2403        int err;
2404        NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2405
2406        if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2407                err = -EINVAL;
2408                goto out;
2409        }
2410
2411        if (nid == NUMA_NO_NODE) {
2412                /*
2413                 * global hstate attribute
2414                 */
2415                if (!(obey_mempolicy &&
2416                                init_nodemask_of_mempolicy(nodes_allowed))) {
2417                        NODEMASK_FREE(nodes_allowed);
2418                        nodes_allowed = &node_states[N_MEMORY];
2419                }
2420        } else if (nodes_allowed) {
2421                /*
2422                 * per node hstate attribute: adjust count to global,
2423                 * but restrict alloc/free to the specified node.
2424                 */
2425                count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2426                init_nodemask_of_node(nodes_allowed, nid);
2427        } else
2428                nodes_allowed = &node_states[N_MEMORY];
2429
2430        h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2431
2432        if (nodes_allowed != &node_states[N_MEMORY])
2433                NODEMASK_FREE(nodes_allowed);
2434
2435        return len;
2436out:
2437        NODEMASK_FREE(nodes_allowed);
2438        return err;
2439}
2440
2441static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2442                                         struct kobject *kobj, const char *buf,
2443                                         size_t len)
2444{
2445        struct hstate *h;
2446        unsigned long count;
2447        int nid;
2448        int err;
2449
2450        err = kstrtoul(buf, 10, &count);
2451        if (err)
2452                return err;
2453
2454        h = kobj_to_hstate(kobj, &nid);
2455        return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2456}
2457
2458static ssize_t nr_hugepages_show(struct kobject *kobj,
2459                                       struct kobj_attribute *attr, char *buf)
2460{
2461        return nr_hugepages_show_common(kobj, attr, buf);
2462}
2463
2464static ssize_t nr_hugepages_store(struct kobject *kobj,
2465               struct kobj_attribute *attr, const char *buf, size_t len)
2466{
2467        return nr_hugepages_store_common(false, kobj, buf, len);
2468}
2469HSTATE_ATTR(nr_hugepages);
2470
2471#ifdef CONFIG_NUMA
2472
2473/*
2474 * hstate attribute for optionally mempolicy-based constraint on persistent
2475 * huge page alloc/free.
2476 */
2477static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2478                                       struct kobj_attribute *attr, char *buf)
2479{
2480        return nr_hugepages_show_common(kobj, attr, buf);
2481}
2482
2483static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2484               struct kobj_attribute *attr, const char *buf, size_t len)
2485{
2486        return nr_hugepages_store_common(true, kobj, buf, len);
2487}
2488HSTATE_ATTR(nr_hugepages_mempolicy);
2489#endif
2490
2491
2492static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2493                                        struct kobj_attribute *attr, char *buf)
2494{
2495        struct hstate *h = kobj_to_hstate(kobj, NULL);
2496        return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2497}
2498
2499static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2500                struct kobj_attribute *attr, const char *buf, size_t count)
2501{
2502        int err;
2503        unsigned long input;
2504        struct hstate *h = kobj_to_hstate(kobj, NULL);
2505
2506        if (hstate_is_gigantic(h))
2507                return -EINVAL;
2508
2509        err = kstrtoul(buf, 10, &input);
2510        if (err)
2511                return err;
2512
2513        spin_lock(&hugetlb_lock);
2514        h->nr_overcommit_huge_pages = input;
2515        spin_unlock(&hugetlb_lock);
2516
2517        return count;
2518}
2519HSTATE_ATTR(nr_overcommit_hugepages);
2520
2521static ssize_t free_hugepages_show(struct kobject *kobj,
2522                                        struct kobj_attribute *attr, char *buf)
2523{
2524        struct hstate *h;
2525        unsigned long free_huge_pages;
2526        int nid;
2527
2528        h = kobj_to_hstate(kobj, &nid);
2529        if (nid == NUMA_NO_NODE)
2530                free_huge_pages = h->free_huge_pages;
2531        else
2532                free_huge_pages = h->free_huge_pages_node[nid];
2533
2534        return sprintf(buf, "%lu\n", free_huge_pages);
2535}
2536HSTATE_ATTR_RO(free_hugepages);
2537
2538static ssize_t resv_hugepages_show(struct kobject *kobj,
2539                                        struct kobj_attribute *attr, char *buf)
2540{
2541        struct hstate *h = kobj_to_hstate(kobj, NULL);
2542        return sprintf(buf, "%lu\n", h->resv_huge_pages);
2543}
2544HSTATE_ATTR_RO(resv_hugepages);
2545
2546static ssize_t surplus_hugepages_show(struct kobject *kobj,
2547                                        struct kobj_attribute *attr, char *buf)
2548{
2549        struct hstate *h;
2550        unsigned long surplus_huge_pages;
2551        int nid;
2552
2553        h = kobj_to_hstate(kobj, &nid);
2554        if (nid == NUMA_NO_NODE)
2555                surplus_huge_pages = h->surplus_huge_pages;
2556        else
2557                surplus_huge_pages = h->surplus_huge_pages_node[nid];
2558
2559        return sprintf(buf, "%lu\n", surplus_huge_pages);
2560}
2561HSTATE_ATTR_RO(surplus_hugepages);
2562
2563static struct attribute *hstate_attrs[] = {
2564        &nr_hugepages_attr.attr,
2565        &nr_overcommit_hugepages_attr.attr,
2566        &free_hugepages_attr.attr,
2567        &resv_hugepages_attr.attr,
2568        &surplus_hugepages_attr.attr,
2569#ifdef CONFIG_NUMA
2570        &nr_hugepages_mempolicy_attr.attr,
2571#endif
2572        NULL,
2573};
2574
2575static struct attribute_group hstate_attr_group = {
2576        .attrs = hstate_attrs,
2577};
2578
2579static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2580                                    struct kobject **hstate_kobjs,
2581                                    struct attribute_group *hstate_attr_group)
2582{
2583        int retval;
2584        int hi = hstate_index(h);
2585
2586        hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2587        if (!hstate_kobjs[hi])
2588                return -ENOMEM;
2589
2590        retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2591        if (retval)
2592                kobject_put(hstate_kobjs[hi]);
2593
2594        return retval;
2595}
2596
2597static void __init hugetlb_sysfs_init(void)
2598{
2599        struct hstate *h;
2600        int err;
2601
2602        hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2603        if (!hugepages_kobj)
2604                return;
2605
2606        for_each_hstate(h) {
2607                err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2608                                         hstate_kobjs, &hstate_attr_group);
2609                if (err)
2610                        pr_err("Hugetlb: Unable to add hstate %s", h->name);
2611        }
2612}
2613
2614#ifdef CONFIG_NUMA
2615
2616/*
2617 * node_hstate/s - associate per node hstate attributes, via their kobjects,
2618 * with node devices in node_devices[] using a parallel array.  The array
2619 * index of a node device or _hstate == node id.
2620 * This is here to avoid any static dependency of the node device driver, in
2621 * the base kernel, on the hugetlb module.
2622 */
2623struct node_hstate {
2624        struct kobject          *hugepages_kobj;
2625        struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
2626};
2627static struct node_hstate node_hstates[MAX_NUMNODES];
2628
2629/*
2630 * A subset of global hstate attributes for node devices
2631 */
2632static struct attribute *per_node_hstate_attrs[] = {
2633        &nr_hugepages_attr.attr,
2634        &free_hugepages_attr.attr,
2635        &surplus_hugepages_attr.attr,
2636        NULL,
2637};
2638
2639static struct attribute_group per_node_hstate_attr_group = {
2640        .attrs = per_node_hstate_attrs,
2641};
2642
2643/*
2644 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2645 * Returns node id via non-NULL nidp.
2646 */
2647static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2648{
2649        int nid;
2650
2651        for (nid = 0; nid < nr_node_ids; nid++) {
2652                struct node_hstate *nhs = &node_hstates[nid];
2653                int i;
2654                for (i = 0; i < HUGE_MAX_HSTATE; i++)
2655                        if (nhs->hstate_kobjs[i] == kobj) {
2656                                if (nidp)
2657                                        *nidp = nid;
2658                                return &hstates[i];
2659                        }
2660        }
2661
2662        BUG();
2663        return NULL;
2664}
2665
2666/*
2667 * Unregister hstate attributes from a single node device.
2668 * No-op if no hstate attributes attached.
2669 */
2670static void hugetlb_unregister_node(struct node *node)
2671{
2672        struct hstate *h;
2673        struct node_hstate *nhs = &node_hstates[node->dev.id];
2674
2675        if (!nhs->hugepages_kobj)
2676                return;         /* no hstate attributes */
2677
2678        for_each_hstate(h) {
2679                int idx = hstate_index(h);
2680                if (nhs->hstate_kobjs[idx]) {
2681                        kobject_put(nhs->hstate_kobjs[idx]);
2682                        nhs->hstate_kobjs[idx] = NULL;
2683                }
2684        }
2685
2686        kobject_put(nhs->hugepages_kobj);
2687        nhs->hugepages_kobj = NULL;
2688}
2689
2690
2691/*
2692 * Register hstate attributes for a single node device.
2693 * No-op if attributes already registered.
2694 */
2695static void hugetlb_register_node(struct node *node)
2696{
2697        struct hstate *h;
2698        struct node_hstate *nhs = &node_hstates[node->dev.id];
2699        int err;
2700
2701        if (nhs->hugepages_kobj)
2702                return;         /* already allocated */
2703
2704        nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2705                                                        &node->dev.kobj);
2706        if (!nhs->hugepages_kobj)
2707                return;
2708
2709        for_each_hstate(h) {
2710                err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2711                                                nhs->hstate_kobjs,
2712                                                &per_node_hstate_attr_group);
2713                if (err) {
2714                        pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2715                                h->name, node->dev.id);
2716                        hugetlb_unregister_node(node);
2717                        break;
2718                }
2719        }
2720}
2721
2722/*
2723 * hugetlb init time:  register hstate attributes for all registered node
2724 * devices of nodes that have memory.  All on-line nodes should have
2725 * registered their associated device by this time.
2726 */
2727static void __init hugetlb_register_all_nodes(void)
2728{
2729        int nid;
2730
2731        for_each_node_state(nid, N_MEMORY) {
2732                struct node *node = node_devices[nid];
2733                if (node->dev.id == nid)
2734                        hugetlb_register_node(node);
2735        }
2736
2737        /*
2738         * Let the node device driver know we're here so it can
2739         * [un]register hstate attributes on node hotplug.
2740         */
2741        register_hugetlbfs_with_node(hugetlb_register_node,
2742                                     hugetlb_unregister_node);
2743}
2744#else   /* !CONFIG_NUMA */
2745
2746static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2747{
2748        BUG();
2749        if (nidp)
2750                *nidp = -1;
2751        return NULL;
2752}
2753
2754static void hugetlb_register_all_nodes(void) { }
2755
2756#endif
2757
2758static int __init hugetlb_init(void)
2759{
2760        int i;
2761
2762        if (!hugepages_supported())
2763                return 0;
2764
2765        if (!size_to_hstate(default_hstate_size)) {
2766                default_hstate_size = HPAGE_SIZE;
2767                if (!size_to_hstate(default_hstate_size))
2768                        hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2769        }
2770        default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2771        if (default_hstate_max_huge_pages) {
2772                if (!default_hstate.max_huge_pages)
2773                        default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2774        }
2775
2776        hugetlb_init_hstates();
2777        gather_bootmem_prealloc();
2778        report_hugepages();
2779
2780        hugetlb_sysfs_init();
2781        hugetlb_register_all_nodes();
2782        hugetlb_cgroup_file_init();
2783
2784#ifdef CONFIG_SMP
2785        num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2786#else
2787        num_fault_mutexes = 1;
2788#endif
2789        hugetlb_fault_mutex_table =
2790                kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2791        BUG_ON(!hugetlb_fault_mutex_table);
2792
2793        for (i = 0; i < num_fault_mutexes; i++)
2794                mutex_init(&hugetlb_fault_mutex_table[i]);
2795        return 0;
2796}
2797subsys_initcall(hugetlb_init);
2798
2799/* Should be called on processing a hugepagesz=... option */
2800void __init hugetlb_bad_size(void)
2801{
2802        parsed_valid_hugepagesz = false;
2803}
2804
2805void __init hugetlb_add_hstate(unsigned int order)
2806{
2807        struct hstate *h;
2808        unsigned long i;
2809
2810        if (size_to_hstate(PAGE_SIZE << order)) {
2811                pr_warn("hugepagesz= specified twice, ignoring\n");
2812                return;
2813        }
2814        BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2815        BUG_ON(order == 0);
2816        h = &hstates[hugetlb_max_hstate++];
2817        h->order = order;
2818        h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2819        h->nr_huge_pages = 0;
2820        h->free_huge_pages = 0;
2821        for (i = 0; i < MAX_NUMNODES; ++i)
2822                INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2823        INIT_LIST_HEAD(&h->hugepage_activelist);
2824        h->next_nid_to_alloc = first_memory_node;
2825        h->next_nid_to_free = first_memory_node;
2826        snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2827                                        huge_page_size(h)/1024);
2828
2829        parsed_hstate = h;
2830}
2831
2832static int __init hugetlb_nrpages_setup(char *s)
2833{
2834        unsigned long *mhp;
2835        static unsigned long *last_mhp;
2836
2837        if (!parsed_valid_hugepagesz) {
2838                pr_warn("hugepages = %s preceded by "
2839                        "an unsupported hugepagesz, ignoring\n", s);
2840                parsed_valid_hugepagesz = true;
2841                return 1;
2842        }
2843        /*
2844         * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2845         * so this hugepages= parameter goes to the "default hstate".
2846         */
2847        else if (!hugetlb_max_hstate)
2848                mhp = &default_hstate_max_huge_pages;
2849        else
2850                mhp = &parsed_hstate->max_huge_pages;
2851
2852        if (mhp == last_mhp) {
2853                pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2854                return 1;
2855        }
2856
2857        if (sscanf(s, "%lu", mhp) <= 0)
2858                *mhp = 0;
2859
2860        /*
2861         * Global state is always initialized later in hugetlb_init.
2862         * But we need to allocate >= MAX_ORDER hstates here early to still
2863         * use the bootmem allocator.
2864         */
2865        if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2866                hugetlb_hstate_alloc_pages(parsed_hstate);
2867
2868        last_mhp = mhp;
2869
2870        return 1;
2871}
2872__setup("hugepages=", hugetlb_nrpages_setup);
2873
2874static int __init hugetlb_default_setup(char *s)
2875{
2876        default_hstate_size = memparse(s, &s);
2877        return 1;
2878}
2879__setup("default_hugepagesz=", hugetlb_default_setup);
2880
2881static unsigned int cpuset_mems_nr(unsigned int *array)
2882{
2883        int node;
2884        unsigned int nr = 0;
2885
2886        for_each_node_mask(node, cpuset_current_mems_allowed)
2887                nr += array[node];
2888
2889        return nr;
2890}
2891
2892#ifdef CONFIG_SYSCTL
2893static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2894                         struct ctl_table *table, int write,
2895                         void __user *buffer, size_t *length, loff_t *ppos)
2896{
2897        struct hstate *h = &default_hstate;
2898        unsigned long tmp = h->max_huge_pages;
2899        int ret;
2900
2901        if (!hugepages_supported())
2902                return -EOPNOTSUPP;
2903
2904        table->data = &tmp;
2905        table->maxlen = sizeof(unsigned long);
2906        ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2907        if (ret)
2908                goto out;
2909
2910        if (write)
2911                ret = __nr_hugepages_store_common(obey_mempolicy, h,
2912                                                  NUMA_NO_NODE, tmp, *length);
2913out:
2914        return ret;
2915}
2916
2917int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2918                          void __user *buffer, size_t *length, loff_t *ppos)
2919{
2920
2921        return hugetlb_sysctl_handler_common(false, table, write,
2922                                                        buffer, length, ppos);
2923}
2924
2925#ifdef CONFIG_NUMA
2926int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2927                          void __user *buffer, size_t *length, loff_t *ppos)
2928{
2929        return hugetlb_sysctl_handler_common(true, table, write,
2930                                                        buffer, length, ppos);
2931}
2932#endif /* CONFIG_NUMA */
2933
2934int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2935                        void __user *buffer,
2936                        size_t *length, loff_t *ppos)
2937{
2938        struct hstate *h = &default_hstate;
2939        unsigned long tmp;
2940        int ret;
2941
2942        if (!hugepages_supported())
2943                return -EOPNOTSUPP;
2944
2945        tmp = h->nr_overcommit_huge_pages;
2946
2947        if (write && hstate_is_gigantic(h))
2948                return -EINVAL;
2949
2950        table->data = &tmp;
2951        table->maxlen = sizeof(unsigned long);
2952        ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2953        if (ret)
2954                goto out;
2955
2956        if (write) {
2957                spin_lock(&hugetlb_lock);
2958                h->nr_overcommit_huge_pages = tmp;
2959                spin_unlock(&hugetlb_lock);
2960        }
2961out:
2962        return ret;
2963}
2964
2965#endif /* CONFIG_SYSCTL */
2966
2967void hugetlb_report_meminfo(struct seq_file *m)
2968{
2969        struct hstate *h = &default_hstate;
2970        if (!hugepages_supported())
2971                return;
2972        seq_printf(m,
2973                        "HugePages_Total:   %5lu\n"
2974                        "HugePages_Free:    %5lu\n"
2975                        "HugePages_Rsvd:    %5lu\n"
2976                        "HugePages_Surp:    %5lu\n"
2977                        "Hugepagesize:   %8lu kB\n",
2978                        h->nr_huge_pages,
2979                        h->free_huge_pages,
2980                        h->resv_huge_pages,
2981                        h->surplus_huge_pages,
2982                        1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2983}
2984
2985int hugetlb_report_node_meminfo(int nid, char *buf)
2986{
2987        struct hstate *h = &default_hstate;
2988        if (!hugepages_supported())
2989                return 0;
2990        return sprintf(buf,
2991                "Node %d HugePages_Total: %5u\n"
2992                "Node %d HugePages_Free:  %5u\n"
2993                "Node %d HugePages_Surp:  %5u\n",
2994                nid, h->nr_huge_pages_node[nid],
2995                nid, h->free_huge_pages_node[nid],
2996                nid, h->surplus_huge_pages_node[nid]);
2997}
2998
2999void hugetlb_show_meminfo(void)
3000{
3001        struct hstate *h;
3002        int nid;
3003
3004        if (!hugepages_supported())
3005                return;
3006
3007        for_each_node_state(nid, N_MEMORY)
3008                for_each_hstate(h)
3009                        pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3010                                nid,
3011                                h->nr_huge_pages_node[nid],
3012                                h->free_huge_pages_node[nid],
3013                                h->surplus_huge_pages_node[nid],
3014                                1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3015}
3016
3017void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3018{
3019        seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3020                   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3021}
3022
3023/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3024unsigned long hugetlb_total_pages(void)
3025{
3026        struct hstate *h;
3027        unsigned long nr_total_pages = 0;
3028
3029        for_each_hstate(h)
3030                nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3031        return nr_total_pages;
3032}
3033
3034static int hugetlb_acct_memory(struct hstate *h, long delta)
3035{
3036        int ret = -ENOMEM;
3037
3038        spin_lock(&hugetlb_lock);
3039        /*
3040         * When cpuset is configured, it breaks the strict hugetlb page
3041         * reservation as the accounting is done on a global variable. Such
3042         * reservation is completely rubbish in the presence of cpuset because
3043         * the reservation is not checked against page availability for the
3044         * current cpuset. Application can still potentially OOM'ed by kernel
3045         * with lack of free htlb page in cpuset that the task is in.
3046         * Attempt to enforce strict accounting with cpuset is almost
3047         * impossible (or too ugly) because cpuset is too fluid that
3048         * task or memory node can be dynamically moved between cpusets.
3049         *
3050         * The change of semantics for shared hugetlb mapping with cpuset is
3051         * undesirable. However, in order to preserve some of the semantics,
3052         * we fall back to check against current free page availability as
3053         * a best attempt and hopefully to minimize the impact of changing
3054         * semantics that cpuset has.
3055         */
3056        if (delta > 0) {
3057                if (gather_surplus_pages(h, delta) < 0)
3058                        goto out;
3059
3060                if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3061                        return_unused_surplus_pages(h, delta);
3062                        goto out;
3063                }
3064        }
3065
3066        ret = 0;
3067        if (delta < 0)
3068                return_unused_surplus_pages(h, (unsigned long) -delta);
3069
3070out:
3071        spin_unlock(&hugetlb_lock);
3072        return ret;
3073}
3074
3075static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3076{
3077        struct resv_map *resv = vma_resv_map(vma);
3078
3079        /*
3080         * This new VMA should share its siblings reservation map if present.
3081         * The VMA will only ever have a valid reservation map pointer where
3082         * it is being copied for another still existing VMA.  As that VMA
3083         * has a reference to the reservation map it cannot disappear until
3084         * after this open call completes.  It is therefore safe to take a
3085         * new reference here without additional locking.
3086         */
3087        if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3088                kref_get(&resv->refs);
3089}
3090
3091static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3092{
3093        struct hstate *h = hstate_vma(vma);
3094        struct resv_map *resv = vma_resv_map(vma);
3095        struct hugepage_subpool *spool = subpool_vma(vma);
3096        unsigned long reserve, start, end;
3097        long gbl_reserve;
3098
3099        if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3100                return;
3101
3102        start = vma_hugecache_offset(h, vma, vma->vm_start);
3103        end = vma_hugecache_offset(h, vma, vma->vm_end);
3104
3105        reserve = (end - start) - region_count(resv, start, end);
3106
3107        kref_put(&resv->refs, resv_map_release);
3108
3109        if (reserve) {
3110                /*
3111                 * Decrement reserve counts.  The global reserve count may be
3112                 * adjusted if the subpool has a minimum size.
3113                 */
3114                gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3115                hugetlb_acct_memory(h, -gbl_reserve);
3116        }
3117}
3118
3119/*
3120 * We cannot handle pagefaults against hugetlb pages at all.  They cause
3121 * handle_mm_fault() to try to instantiate regular-sized pages in the
3122 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
3123 * this far.
3124 */
3125static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3126{
3127        BUG();
3128        return 0;
3129}
3130
3131const struct vm_operations_struct hugetlb_vm_ops = {
3132        .fault = hugetlb_vm_op_fault,
3133        .open = hugetlb_vm_op_open,
3134        .close = hugetlb_vm_op_close,
3135};
3136
3137static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3138                                int writable)
3139{
3140        pte_t entry;
3141
3142        if (writable) {
3143                entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3144                                         vma->vm_page_prot)));
3145        } else {
3146                entry = huge_pte_wrprotect(mk_huge_pte(page,
3147                                           vma->vm_page_prot));
3148        }
3149        entry = pte_mkyoung(entry);
3150        entry = pte_mkhuge(entry);
3151        entry = arch_make_huge_pte(entry, vma, page, writable);
3152
3153        return entry;
3154}
3155
3156static void set_huge_ptep_writable(struct vm_area_struct *vma,
3157                                   unsigned long address, pte_t *ptep)
3158{
3159        pte_t entry;
3160
3161        entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3162        if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3163                update_mmu_cache(vma, address, ptep);
3164}
3165
3166static int is_hugetlb_entry_migration(pte_t pte)
3167{
3168        swp_entry_t swp;
3169
3170        if (huge_pte_none(pte) || pte_present(pte))
3171                return 0;
3172        swp = pte_to_swp_entry(pte);
3173        if (non_swap_entry(swp) && is_migration_entry(swp))
3174                return 1;
3175        else
3176                return 0;
3177}
3178
3179static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3180{
3181        swp_entry_t swp;
3182
3183        if (huge_pte_none(pte) || pte_present(pte))
3184                return 0;
3185        swp = pte_to_swp_entry(pte);
3186        if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3187                return 1;
3188        else
3189                return 0;
3190}
3191
3192int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3193                            struct vm_area_struct *vma)
3194{
3195        pte_t *src_pte, *dst_pte, entry;
3196        struct page *ptepage;
3197        unsigned long addr;
3198        int cow;
3199        struct hstate *h = hstate_vma(vma);
3200        unsigned long sz = huge_page_size(h);
3201        unsigned long mmun_start;       /* For mmu_notifiers */
3202        unsigned long mmun_end;         /* For mmu_notifiers */
3203        int ret = 0;
3204
3205        cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3206
3207        mmun_start = vma->vm_start;
3208        mmun_end = vma->vm_end;
3209        if (cow)
3210                mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3211
3212        for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3213                spinlock_t *src_ptl, *dst_ptl;
3214                src_pte = huge_pte_offset(src, addr);
3215                if (!src_pte)
3216                        continue;
3217                dst_pte = huge_pte_alloc(dst, addr, sz);
3218                if (!dst_pte) {
3219                        ret = -ENOMEM;
3220                        break;
3221                }
3222
3223                /* If the pagetables are shared don't copy or take references */
3224                if (dst_pte == src_pte)
3225                        continue;
3226
3227                dst_ptl = huge_pte_lock(h, dst, dst_pte);
3228                src_ptl = huge_pte_lockptr(h, src, src_pte);
3229                spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3230                entry = huge_ptep_get(src_pte);
3231                if (huge_pte_none(entry)) { /* skip none entry */
3232                        ;
3233                } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3234                                    is_hugetlb_entry_hwpoisoned(entry))) {
3235                        swp_entry_t swp_entry = pte_to_swp_entry(entry);
3236
3237                        if (is_write_migration_entry(swp_entry) && cow) {
3238                                /*
3239                                 * COW mappings require pages in both
3240                                 * parent and child to be set to read.
3241                                 */
3242                                make_migration_entry_read(&swp_entry);
3243                                entry = swp_entry_to_pte(swp_entry);
3244                                set_huge_pte_at(src, addr, src_pte, entry);
3245                        }
3246                        set_huge_pte_at(dst, addr, dst_pte, entry);
3247                } else {
3248                        if (cow) {
3249                                huge_ptep_set_wrprotect(src, addr, src_pte);
3250                                mmu_notifier_invalidate_range(src, mmun_start,
3251                                                                   mmun_end);
3252                        }
3253                        entry = huge_ptep_get(src_pte);
3254                        ptepage = pte_page(entry);
3255                        get_page(ptepage);
3256                        page_dup_rmap(ptepage, true);
3257                        set_huge_pte_at(dst, addr, dst_pte, entry);
3258                        hugetlb_count_add(pages_per_huge_page(h), dst);
3259                }
3260                spin_unlock(src_ptl);
3261                spin_unlock(dst_ptl);
3262        }
3263
3264        if (cow)
3265                mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3266
3267        return ret;
3268}
3269
3270void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3271                            unsigned long start, unsigned long end,
3272                            struct page *ref_page)
3273{
3274        struct mm_struct *mm = vma->vm_mm;
3275        unsigned long address;
3276        pte_t *ptep;
3277        pte_t pte;
3278        spinlock_t *ptl;
3279        struct page *page;
3280        struct hstate *h = hstate_vma(vma);
3281        unsigned long sz = huge_page_size(h);
3282        const unsigned long mmun_start = start; /* For mmu_notifiers */
3283        const unsigned long mmun_end   = end;   /* For mmu_notifiers */
3284
3285        WARN_ON(!is_vm_hugetlb_page(vma));
3286        BUG_ON(start & ~huge_page_mask(h));
3287        BUG_ON(end & ~huge_page_mask(h));
3288
3289        tlb_start_vma(tlb, vma);
3290        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3291        address = start;
3292        for (; address < end; address += sz) {
3293                ptep = huge_pte_offset(mm, address);
3294                if (!ptep)
3295                        continue;
3296
3297                ptl = huge_pte_lock(h, mm, ptep);
3298                if (huge_pmd_unshare(mm, &address, ptep)) {
3299                        spin_unlock(ptl);
3300                        continue;
3301                }
3302
3303                pte = huge_ptep_get(ptep);
3304                if (huge_pte_none(pte)) {
3305                        spin_unlock(ptl);
3306                        continue;
3307                }
3308
3309                /*
3310                 * Migrating hugepage or HWPoisoned hugepage is already
3311                 * unmapped and its refcount is dropped, so just clear pte here.
3312                 */
3313                if (unlikely(!pte_present(pte))) {
3314                        huge_pte_clear(mm, address, ptep);
3315                        spin_unlock(ptl);
3316                        continue;
3317                }
3318
3319                page = pte_page(pte);
3320                /*
3321                 * If a reference page is supplied, it is because a specific
3322                 * page is being unmapped, not a range. Ensure the page we
3323                 * are about to unmap is the actual page of interest.
3324                 */
3325                if (ref_page) {
3326                        if (page != ref_page) {
3327                                spin_unlock(ptl);
3328                                continue;
3329                        }
3330                        /*
3331                         * Mark the VMA as having unmapped its page so that
3332                         * future faults in this VMA will fail rather than
3333                         * looking like data was lost
3334                         */
3335                        set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3336                }
3337
3338                pte = huge_ptep_get_and_clear(mm, address, ptep);
3339                tlb_remove_tlb_entry(tlb, ptep, address);
3340                if (huge_pte_dirty(pte))
3341                        set_page_dirty(page);
3342
3343                hugetlb_count_sub(pages_per_huge_page(h), mm);
3344                page_remove_rmap(page, true);
3345
3346                spin_unlock(ptl);
3347                tlb_remove_page_size(tlb, page, huge_page_size(h));
3348                /*
3349                 * Bail out after unmapping reference page if supplied
3350                 */
3351                if (ref_page)
3352                        break;
3353        }
3354        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3355        tlb_end_vma(tlb, vma);
3356}
3357
3358void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3359                          struct vm_area_struct *vma, unsigned long start,
3360                          unsigned long end, struct page *ref_page)
3361{
3362        __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3363
3364        /*
3365         * Clear this flag so that x86's huge_pmd_share page_table_shareable
3366         * test will fail on a vma being torn down, and not grab a page table
3367         * on its way out.  We're lucky that the flag has such an appropriate
3368         * name, and can in fact be safely cleared here. We could clear it
3369         * before the __unmap_hugepage_range above, but all that's necessary
3370         * is to clear it before releasing the i_mmap_rwsem. This works
3371         * because in the context this is called, the VMA is about to be
3372         * destroyed and the i_mmap_rwsem is held.
3373         */
3374        vma->vm_flags &= ~VM_MAYSHARE;
3375}
3376
3377void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3378                          unsigned long end, struct page *ref_page)
3379{
3380        struct mm_struct *mm;
3381        struct mmu_gather tlb;
3382
3383        mm = vma->vm_mm;
3384
3385        tlb_gather_mmu(&tlb, mm, start, end);
3386        __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3387        tlb_finish_mmu(&tlb, start, end);
3388}
3389
3390/*
3391 * This is called when the original mapper is failing to COW a MAP_PRIVATE
3392 * mappping it owns the reserve page for. The intention is to unmap the page
3393 * from other VMAs and let the children be SIGKILLed if they are faulting the
3394 * same region.
3395 */
3396static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3397                              struct page *page, unsigned long address)
3398{
3399        struct hstate *h = hstate_vma(vma);
3400        struct vm_area_struct *iter_vma;
3401        struct address_space *mapping;
3402        pgoff_t pgoff;
3403
3404        /*
3405         * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3406         * from page cache lookup which is in HPAGE_SIZE units.
3407         */
3408        address = address & huge_page_mask(h);
3409        pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3410                        vma->vm_pgoff;
3411        mapping = vma->vm_file->f_mapping;
3412
3413        /*
3414         * Take the mapping lock for the duration of the table walk. As
3415         * this mapping should be shared between all the VMAs,
3416         * __unmap_hugepage_range() is called as the lock is already held
3417         */
3418        i_mmap_lock_write(mapping);
3419        vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3420                /* Do not unmap the current VMA */
3421                if (iter_vma == vma)
3422                        continue;
3423
3424                /*
3425                 * Shared VMAs have their own reserves and do not affect
3426                 * MAP_PRIVATE accounting but it is possible that a shared
3427                 * VMA is using the same page so check and skip such VMAs.
3428                 */
3429                if (iter_vma->vm_flags & VM_MAYSHARE)
3430                        continue;
3431
3432                /*
3433                 * Unmap the page from other VMAs without their own reserves.
3434                 * They get marked to be SIGKILLed if they fault in these
3435                 * areas. This is because a future no-page fault on this VMA
3436                 * could insert a zeroed page instead of the data existing
3437                 * from the time of fork. This would look like data corruption
3438                 */
3439                if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3440                        unmap_hugepage_range(iter_vma, address,
3441                                             address + huge_page_size(h), page);
3442        }
3443        i_mmap_unlock_write(mapping);
3444}
3445
3446/*
3447 * Hugetlb_cow() should be called with page lock of the original hugepage held.
3448 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3449 * cannot race with other handlers or page migration.
3450 * Keep the pte_same checks anyway to make transition from the mutex easier.
3451 */
3452static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3453                        unsigned long address, pte_t *ptep, pte_t pte,
3454                        struct page *pagecache_page, spinlock_t *ptl)
3455{
3456        struct hstate *h = hstate_vma(vma);
3457        struct page *old_page, *new_page;
3458        int ret = 0, outside_reserve = 0;
3459        unsigned long mmun_start;       /* For mmu_notifiers */
3460        unsigned long mmun_end;         /* For mmu_notifiers */
3461
3462        old_page = pte_page(pte);
3463
3464retry_avoidcopy:
3465        /* If no-one else is actually using this page, avoid the copy
3466         * and just make the page writable */
3467        if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3468                page_move_anon_rmap(old_page, vma);
3469                set_huge_ptep_writable(vma, address, ptep);
3470                return 0;
3471        }
3472
3473        /*
3474         * If the process that created a MAP_PRIVATE mapping is about to
3475         * perform a COW due to a shared page count, attempt to satisfy
3476         * the allocation without using the existing reserves. The pagecache
3477         * page is used to determine if the reserve at this address was
3478         * consumed or not. If reserves were used, a partial faulted mapping
3479         * at the time of fork() could consume its reserves on COW instead
3480         * of the full address range.
3481         */
3482        if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3483                        old_page != pagecache_page)
3484                outside_reserve = 1;
3485
3486        get_page(old_page);
3487
3488        /*
3489         * Drop page table lock as buddy allocator may be called. It will
3490         * be acquired again before returning to the caller, as expected.
3491         */
3492        spin_unlock(ptl);
3493        new_page = alloc_huge_page(vma, address, outside_reserve);
3494
3495        if (IS_ERR(new_page)) {
3496                /*
3497                 * If a process owning a MAP_PRIVATE mapping fails to COW,
3498                 * it is due to references held by a child and an insufficient
3499                 * huge page pool. To guarantee the original mappers
3500                 * reliability, unmap the page from child processes. The child
3501                 * may get SIGKILLed if it later faults.
3502                 */
3503                if (outside_reserve) {
3504                        put_page(old_page);
3505                        BUG_ON(huge_pte_none(pte));
3506                        unmap_ref_private(mm, vma, old_page, address);
3507                        BUG_ON(huge_pte_none(pte));
3508                        spin_lock(ptl);
3509                        ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3510                        if (likely(ptep &&
3511                                   pte_same(huge_ptep_get(ptep), pte)))
3512                                goto retry_avoidcopy;
3513                        /*
3514                         * race occurs while re-acquiring page table
3515                         * lock, and our job is done.
3516                         */
3517                        return 0;
3518                }
3519
3520                ret = (PTR_ERR(new_page) == -ENOMEM) ?
3521                        VM_FAULT_OOM : VM_FAULT_SIGBUS;
3522                goto out_release_old;
3523        }
3524
3525        /*
3526         * When the original hugepage is shared one, it does not have
3527         * anon_vma prepared.
3528         */
3529        if (unlikely(anon_vma_prepare(vma))) {
3530                ret = VM_FAULT_OOM;
3531                goto out_release_all;
3532        }
3533
3534        copy_user_huge_page(new_page, old_page, address, vma,
3535                            pages_per_huge_page(h));
3536        __SetPageUptodate(new_page);
3537        set_page_huge_active(new_page);
3538
3539        mmun_start = address & huge_page_mask(h);
3540        mmun_end = mmun_start + huge_page_size(h);
3541        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3542
3543        /*
3544         * Retake the page table lock to check for racing updates
3545         * before the page tables are altered
3546         */
3547        spin_lock(ptl);
3548        ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3549        if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3550                ClearPagePrivate(new_page);
3551
3552                /* Break COW */
3553                huge_ptep_clear_flush(vma, address, ptep);
3554                mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3555                set_huge_pte_at(mm, address, ptep,
3556                                make_huge_pte(vma, new_page, 1));
3557                page_remove_rmap(old_page, true);
3558                hugepage_add_new_anon_rmap(new_page, vma, address);
3559                /* Make the old page be freed below */
3560                new_page = old_page;
3561        }
3562        spin_unlock(ptl);
3563        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3564out_release_all:
3565        restore_reserve_on_error(h, vma, address, new_page);
3566        put_page(new_page);
3567out_release_old:
3568        put_page(old_page);
3569
3570        spin_lock(ptl); /* Caller expects lock to be held */
3571        return ret;
3572}
3573
3574/* Return the pagecache page at a given address within a VMA */
3575static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3576                        struct vm_area_struct *vma, unsigned long address)
3577{
3578        struct address_space *mapping;
3579        pgoff_t idx;
3580
3581        mapping = vma->vm_file->f_mapping;
3582        idx = vma_hugecache_offset(h, vma, address);
3583
3584        return find_lock_page(mapping, idx);
3585}
3586
3587/*
3588 * Return whether there is a pagecache page to back given address within VMA.
3589 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3590 */
3591static bool hugetlbfs_pagecache_present(struct hstate *h,
3592                        struct vm_area_struct *vma, unsigned long address)
3593{
3594        struct address_space *mapping;
3595        pgoff_t idx;
3596        struct page *page;
3597
3598        mapping = vma->vm_file->f_mapping;
3599        idx = vma_hugecache_offset(h, vma, address);
3600
3601        page = find_get_page(mapping, idx);
3602        if (page)
3603                put_page(page);
3604        return page != NULL;
3605}
3606
3607int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3608                           pgoff_t idx)
3609{
3610        struct inode *inode = mapping->host;
3611        struct hstate *h = hstate_inode(inode);
3612        int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3613
3614        if (err)
3615                return err;
3616        ClearPagePrivate(page);
3617
3618        spin_lock(&inode->i_lock);
3619        inode->i_blocks += blocks_per_huge_page(h);
3620        spin_unlock(&inode->i_lock);
3621        return 0;
3622}
3623
3624static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3625                           struct address_space *mapping, pgoff_t idx,
3626                           unsigned long address, pte_t *ptep, unsigned int flags)
3627{
3628        struct hstate *h = hstate_vma(vma);
3629        int ret = VM_FAULT_SIGBUS;
3630        int anon_rmap = 0;
3631        unsigned long size;
3632        struct page *page;
3633        pte_t new_pte;
3634        spinlock_t *ptl;
3635
3636        /*
3637         * Currently, we are forced to kill the process in the event the
3638         * original mapper has unmapped pages from the child due to a failed
3639         * COW. Warn that such a situation has occurred as it may not be obvious
3640         */
3641        if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3642                pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3643                           current->pid);
3644                return ret;
3645        }
3646
3647        /*
3648         * Use page lock to guard against racing truncation
3649         * before we get page_table_lock.
3650         */
3651retry:
3652        page = find_lock_page(mapping, idx);
3653        if (!page) {
3654                size = i_size_read(mapping->host) >> huge_page_shift(h);
3655                if (idx >= size)
3656                        goto out;
3657                page = alloc_huge_page(vma, address, 0);
3658                if (IS_ERR(page)) {
3659                        ret = PTR_ERR(page);
3660                        if (ret == -ENOMEM)
3661                                ret = VM_FAULT_OOM;
3662                        else
3663                                ret = VM_FAULT_SIGBUS;
3664                        goto out;
3665                }
3666                clear_huge_page(page, address, pages_per_huge_page(h));
3667                __SetPageUptodate(page);
3668                set_page_huge_active(page);
3669
3670                if (vma->vm_flags & VM_MAYSHARE) {
3671                        int err = huge_add_to_page_cache(page, mapping, idx);
3672                        if (err) {
3673                                put_page(page);
3674                                if (err == -EEXIST)
3675                                        goto retry;
3676                                goto out;
3677                        }
3678                } else {
3679                        lock_page(page);
3680                        if (unlikely(anon_vma_prepare(vma))) {
3681                                ret = VM_FAULT_OOM;
3682                                goto backout_unlocked;
3683                        }
3684                        anon_rmap = 1;
3685                }
3686        } else {
3687                /*
3688                 * If memory error occurs between mmap() and fault, some process
3689                 * don't have hwpoisoned swap entry for errored virtual address.
3690                 * So we need to block hugepage fault by PG_hwpoison bit check.
3691                 */
3692                if (unlikely(PageHWPoison(page))) {
3693                        ret = VM_FAULT_HWPOISON |
3694                                VM_FAULT_SET_HINDEX(hstate_index(h));
3695                        goto backout_unlocked;
3696                }
3697        }
3698
3699        /*
3700         * If we are going to COW a private mapping later, we examine the
3701         * pending reservations for this page now. This will ensure that
3702         * any allocations necessary to record that reservation occur outside
3703         * the spinlock.
3704         */
3705        if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3706                if (vma_needs_reservation(h, vma, address) < 0) {
3707                        ret = VM_FAULT_OOM;
3708                        goto backout_unlocked;
3709                }
3710                /* Just decrements count, does not deallocate */
3711                vma_end_reservation(h, vma, address);
3712        }
3713
3714        ptl = huge_pte_lockptr(h, mm, ptep);
3715        spin_lock(ptl);
3716        size = i_size_read(mapping->host) >> huge_page_shift(h);
3717        if (idx >= size)
3718                goto backout;
3719
3720        ret = 0;
3721        if (!huge_pte_none(huge_ptep_get(ptep)))
3722                goto backout;
3723
3724        if (anon_rmap) {
3725                ClearPagePrivate(page);
3726                hugepage_add_new_anon_rmap(page, vma, address);
3727        } else
3728                page_dup_rmap(page, true);
3729        new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3730                                && (vma->vm_flags & VM_SHARED)));
3731        set_huge_pte_at(mm, address, ptep, new_pte);
3732
3733        hugetlb_count_add(pages_per_huge_page(h), mm);
3734        if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3735                /* Optimization, do the COW without a second fault */
3736                ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3737        }
3738
3739        spin_unlock(ptl);
3740        unlock_page(page);
3741out:
3742        return ret;
3743
3744backout:
3745        spin_unlock(ptl);
3746backout_unlocked:
3747        unlock_page(page);
3748        restore_reserve_on_error(h, vma, address, page);
3749        put_page(page);
3750        goto out;
3751}
3752
3753#ifdef CONFIG_SMP
3754u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3755                            struct vm_area_struct *vma,
3756                            struct address_space *mapping,
3757                            pgoff_t idx, unsigned long address)
3758{
3759        unsigned long key[2];
3760        u32 hash;
3761
3762        if (vma->vm_flags & VM_SHARED) {
3763                key[0] = (unsigned long) mapping;
3764                key[1] = idx;
3765        } else {
3766                key[0] = (unsigned long) mm;
3767                key[1] = address >> huge_page_shift(h);
3768        }
3769
3770        hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3771
3772        return hash & (num_fault_mutexes - 1);
3773}
3774#else
3775/*
3776 * For uniprocesor systems we always use a single mutex, so just
3777 * return 0 and avoid the hashing overhead.
3778 */
3779u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3780                            struct vm_area_struct *vma,
3781                            struct address_space *mapping,
3782                            pgoff_t idx, unsigned long address)
3783{
3784        return 0;
3785}
3786#endif
3787
3788int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3789                        unsigned long address, unsigned int flags)
3790{
3791        pte_t *ptep, entry;
3792        spinlock_t *ptl;
3793        int ret;
3794        u32 hash;
3795        pgoff_t idx;
3796        struct page *page = NULL;
3797        struct page *pagecache_page = NULL;
3798        struct hstate *h = hstate_vma(vma);
3799        struct address_space *mapping;
3800        int need_wait_lock = 0;
3801
3802        address &= huge_page_mask(h);
3803
3804        ptep = huge_pte_offset(mm, address);
3805        if (ptep) {
3806                entry = huge_ptep_get(ptep);
3807                if (unlikely(is_hugetlb_entry_migration(entry))) {
3808                        migration_entry_wait_huge(vma, mm, ptep);
3809                        return 0;
3810                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3811                        return VM_FAULT_HWPOISON_LARGE |
3812                                VM_FAULT_SET_HINDEX(hstate_index(h));
3813        } else {
3814                ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3815                if (!ptep)
3816                        return VM_FAULT_OOM;
3817        }
3818
3819        mapping = vma->vm_file->f_mapping;
3820        idx = vma_hugecache_offset(h, vma, address);
3821
3822        /*
3823         * Serialize hugepage allocation and instantiation, so that we don't
3824         * get spurious allocation failures if two CPUs race to instantiate
3825         * the same page in the page cache.
3826         */
3827        hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3828        mutex_lock(&hugetlb_fault_mutex_table[hash]);
3829
3830        entry = huge_ptep_get(ptep);
3831        if (huge_pte_none(entry)) {
3832                ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3833                goto out_mutex;
3834        }
3835
3836        ret = 0;
3837
3838        /*
3839         * entry could be a migration/hwpoison entry at this point, so this
3840         * check prevents the kernel from going below assuming that we have
3841         * a active hugepage in pagecache. This goto expects the 2nd page fault,
3842         * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3843         * handle it.
3844         */
3845        if (!pte_present(entry))
3846                goto out_mutex;
3847
3848        /*
3849         * If we are going to COW the mapping later, we examine the pending
3850         * reservations for this page now. This will ensure that any
3851         * allocations necessary to record that reservation occur outside the
3852         * spinlock. For private mappings, we also lookup the pagecache
3853         * page now as it is used to determine if a reservation has been
3854         * consumed.
3855         */
3856        if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3857                if (vma_needs_reservation(h, vma, address) < 0) {
3858                        ret = VM_FAULT_OOM;
3859                        goto out_mutex;
3860                }
3861                /* Just decrements count, does not deallocate */
3862                vma_end_reservation(h, vma, address);
3863
3864                if (!(vma->vm_flags & VM_MAYSHARE))
3865                        pagecache_page = hugetlbfs_pagecache_page(h,
3866                                                                vma, address);
3867        }
3868
3869        ptl = huge_pte_lock(h, mm, ptep);
3870
3871        /* Check for a racing update before calling hugetlb_cow */
3872        if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3873                goto out_ptl;
3874
3875        /*
3876         * hugetlb_cow() requires page locks of pte_page(entry) and
3877         * pagecache_page, so here we need take the former one
3878         * when page != pagecache_page or !pagecache_page.
3879         */
3880        page = pte_page(entry);
3881        if (page != pagecache_page)
3882                if (!trylock_page(page)) {
3883                        need_wait_lock = 1;
3884                        goto out_ptl;
3885                }
3886
3887        get_page(page);
3888
3889        if (flags & FAULT_FLAG_WRITE) {
3890                if (!huge_pte_write(entry)) {
3891                        ret = hugetlb_cow(mm, vma, address, ptep, entry,
3892                                        pagecache_page, ptl);
3893                        goto out_put_page;
3894                }
3895                entry = huge_pte_mkdirty(entry);
3896        }
3897        entry = pte_mkyoung(entry);
3898        if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3899                                                flags & FAULT_FLAG_WRITE))
3900                update_mmu_cache(vma, address, ptep);
3901out_put_page:
3902        if (page != pagecache_page)
3903                unlock_page(page);
3904        put_page(page);
3905out_ptl:
3906        spin_unlock(ptl);
3907
3908        if (pagecache_page) {
3909                unlock_page(pagecache_page);
3910                put_page(pagecache_page);
3911        }
3912out_mutex:
3913        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3914        /*
3915         * Generally it's safe to hold refcount during waiting page lock. But
3916         * here we just wait to defer the next page fault to avoid busy loop and
3917         * the page is not used after unlocked before returning from the current
3918         * page fault. So we are safe from accessing freed page, even if we wait
3919         * here without taking refcount.
3920         */
3921        if (need_wait_lock)
3922                wait_on_page_locked(page);
3923        return ret;
3924}
3925
3926long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3927                         struct page **pages, struct vm_area_struct **vmas,
3928                         unsigned long *position, unsigned long *nr_pages,
3929                         long i, unsigned int flags)
3930{
3931        unsigned long pfn_offset;
3932        unsigned long vaddr = *position;
3933        unsigned long remainder = *nr_pages;
3934        struct hstate *h = hstate_vma(vma);
3935
3936        while (vaddr < vma->vm_end && remainder) {
3937                pte_t *pte;
3938                spinlock_t *ptl = NULL;
3939                int absent;
3940                struct page *page;
3941
3942                /*
3943                 * If we have a pending SIGKILL, don't keep faulting pages and
3944                 * potentially allocating memory.
3945                 */
3946                if (unlikely(fatal_signal_pending(current))) {
3947                        remainder = 0;
3948                        break;
3949                }
3950
3951                /*
3952                 * Some archs (sparc64, sh*) have multiple pte_ts to
3953                 * each hugepage.  We have to make sure we get the
3954                 * first, for the page indexing below to work.
3955                 *
3956                 * Note that page table lock is not held when pte is null.
3957                 */
3958                pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3959                if (pte)
3960                        ptl = huge_pte_lock(h, mm, pte);
3961                absent = !pte || huge_pte_none(huge_ptep_get(pte));
3962
3963                /*
3964                 * When coredumping, it suits get_dump_page if we just return
3965                 * an error where there's an empty slot with no huge pagecache
3966                 * to back it.  This way, we avoid allocating a hugepage, and
3967                 * the sparse dumpfile avoids allocating disk blocks, but its
3968                 * huge holes still show up with zeroes where they need to be.
3969                 */
3970                if (absent && (flags & FOLL_DUMP) &&
3971                    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3972                        if (pte)
3973                                spin_unlock(ptl);
3974                        remainder = 0;
3975                        break;
3976                }
3977
3978                /*
3979                 * We need call hugetlb_fault for both hugepages under migration
3980                 * (in which case hugetlb_fault waits for the migration,) and
3981                 * hwpoisoned hugepages (in which case we need to prevent the
3982                 * caller from accessing to them.) In order to do this, we use
3983                 * here is_swap_pte instead of is_hugetlb_entry_migration and
3984                 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3985                 * both cases, and because we can't follow correct pages
3986                 * directly from any kind of swap entries.
3987                 */
3988                if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3989                    ((flags & FOLL_WRITE) &&
3990                      !huge_pte_write(huge_ptep_get(pte)))) {
3991                        int ret;
3992
3993                        if (pte)
3994                                spin_unlock(ptl);
3995                        ret = hugetlb_fault(mm, vma, vaddr,
3996                                (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3997                        if (!(ret & VM_FAULT_ERROR))
3998                                continue;
3999
4000                        remainder = 0;
4001                        break;
4002                }
4003
4004                pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4005                page = pte_page(huge_ptep_get(pte));
4006same_page:
4007                if (pages) {
4008                        pages[i] = mem_map_offset(page, pfn_offset);
4009                        get_page(pages[i]);
4010                }
4011
4012                if (vmas)
4013                        vmas[i] = vma;
4014
4015                vaddr += PAGE_SIZE;
4016                ++pfn_offset;
4017                --remainder;
4018                ++i;
4019                if (vaddr < vma->vm_end && remainder &&
4020                                pfn_offset < pages_per_huge_page(h)) {
4021                        /*
4022                         * We use pfn_offset to avoid touching the pageframes
4023                         * of this compound page.
4024                         */
4025                        goto same_page;
4026                }
4027                spin_unlock(ptl);
4028        }
4029        *nr_pages = remainder;
4030        *position = vaddr;
4031
4032        return i ? i : -EFAULT;
4033}
4034
4035#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4036/*
4037 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
4038 * implement this.
4039 */
4040#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4041#endif
4042
4043unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4044                unsigned long address, unsigned long end, pgprot_t newprot)
4045{
4046        struct mm_struct *mm = vma->vm_mm;
4047        unsigned long start = address;
4048        pte_t *ptep;
4049        pte_t pte;
4050        struct hstate *h = hstate_vma(vma);
4051        unsigned long pages = 0;
4052
4053        BUG_ON(address >= end);
4054        flush_cache_range(vma, address, end);
4055
4056        mmu_notifier_invalidate_range_start(mm, start, end);
4057        i_mmap_lock_write(vma->vm_file->f_mapping);
4058        for (; address < end; address += huge_page_size(h)) {
4059                spinlock_t *ptl;
4060                ptep = huge_pte_offset(mm, address);
4061                if (!ptep)
4062                        continue;
4063                ptl = huge_pte_lock(h, mm, ptep);
4064                if (huge_pmd_unshare(mm, &address, ptep)) {
4065                        pages++;
4066                        spin_unlock(ptl);
4067                        continue;
4068                }
4069                pte = huge_ptep_get(ptep);
4070                if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4071                        spin_unlock(ptl);
4072                        continue;
4073                }
4074                if (unlikely(is_hugetlb_entry_migration(pte))) {
4075                        swp_entry_t entry = pte_to_swp_entry(pte);
4076
4077                        if (is_write_migration_entry(entry)) {
4078                                pte_t newpte;
4079
4080                                make_migration_entry_read(&entry);
4081                                newpte = swp_entry_to_pte(entry);
4082                                set_huge_pte_at(mm, address, ptep, newpte);
4083                                pages++;
4084                        }
4085                        spin_unlock(ptl);
4086                        continue;
4087                }
4088                if (!huge_pte_none(pte)) {
4089                        pte = huge_ptep_get_and_clear(mm, address, ptep);
4090                        pte = pte_mkhuge(huge_pte_modify(pte, newprot));
4091                        pte = arch_make_huge_pte(pte, vma, NULL, 0);
4092                        set_huge_pte_at(mm, address, ptep, pte);
4093                        pages++;
4094                }
4095                spin_unlock(ptl);
4096        }
4097        /*
4098         * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4099         * may have cleared our pud entry and done put_page on the page table:
4100         * once we release i_mmap_rwsem, another task can do the final put_page
4101         * and that page table be reused and filled with junk.
4102         */
4103        flush_hugetlb_tlb_range(vma, start, end);
4104        mmu_notifier_invalidate_range(mm, start, end);
4105        i_mmap_unlock_write(vma->vm_file->f_mapping);
4106        mmu_notifier_invalidate_range_end(mm, start, end);
4107
4108        return pages << h->order;
4109}
4110
4111int hugetlb_reserve_pages(struct inode *inode,
4112                                        long from, long to,
4113                                        struct vm_area_struct *vma,
4114                                        vm_flags_t vm_flags)
4115{
4116        long ret, chg;
4117        struct hstate *h = hstate_inode(inode);
4118        struct hugepage_subpool *spool = subpool_inode(inode);
4119        struct resv_map *resv_map;
4120        long gbl_reserve;
4121
4122        /*
4123         * Only apply hugepage reservation if asked. At fault time, an
4124         * attempt will be made for VM_NORESERVE to allocate a page
4125         * without using reserves
4126         */
4127        if (vm_flags & VM_NORESERVE)
4128                return 0;
4129
4130        /*
4131         * Shared mappings base their reservation on the number of pages that
4132         * are already allocated on behalf of the file. Private mappings need
4133         * to reserve the full area even if read-only as mprotect() may be
4134         * called to make the mapping read-write. Assume !vma is a shm mapping
4135         */
4136        if (!vma || vma->vm_flags & VM_MAYSHARE) {
4137                resv_map = inode_resv_map(inode);
4138
4139                chg = region_chg(resv_map, from, to);
4140
4141        } else {
4142                resv_map = resv_map_alloc();
4143                if (!resv_map)
4144                        return -ENOMEM;
4145
4146                chg = to - from;
4147
4148                set_vma_resv_map(vma, resv_map);
4149                set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4150        }
4151
4152        if (chg < 0) {
4153                ret = chg;
4154                goto out_err;
4155        }
4156
4157        /*
4158         * There must be enough pages in the subpool for the mapping. If
4159         * the subpool has a minimum size, there may be some global
4160         * reservations already in place (gbl_reserve).
4161         */
4162        gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4163        if (gbl_reserve < 0) {
4164                ret = -ENOSPC;
4165                goto out_err;
4166        }
4167
4168        /*
4169         * Check enough hugepages are available for the reservation.
4170         * Hand the pages back to the subpool if there are not
4171         */
4172        ret = hugetlb_acct_memory(h, gbl_reserve);
4173        if (ret < 0) {
4174                /* put back original number of pages, chg */
4175                (void)hugepage_subpool_put_pages(spool, chg);
4176                goto out_err;
4177        }
4178
4179        /*
4180         * Account for the reservations made. Shared mappings record regions
4181         * that have reservations as they are shared by multiple VMAs.
4182         * When the last VMA disappears, the region map says how much
4183         * the reservation was and the page cache tells how much of
4184         * the reservation was consumed. Private mappings are per-VMA and
4185         * only the consumed reservations are tracked. When the VMA
4186         * disappears, the original reservation is the VMA size and the
4187         * consumed reservations are stored in the map. Hence, nothing
4188         * else has to be done for private mappings here
4189         */
4190        if (!vma || vma->vm_flags & VM_MAYSHARE) {
4191                long add = region_add(resv_map, from, to);
4192
4193                if (unlikely(chg > add)) {
4194                        /*
4195                         * pages in this range were added to the reserve
4196                         * map between region_chg and region_add.  This
4197                         * indicates a race with alloc_huge_page.  Adjust
4198                         * the subpool and reserve counts modified above
4199                         * based on the difference.
4200                         */
4201                        long rsv_adjust;
4202
4203                        rsv_adjust = hugepage_subpool_put_pages(spool,
4204                                                                chg - add);
4205                        hugetlb_acct_memory(h, -rsv_adjust);
4206                }
4207        }
4208        return 0;
4209out_err:
4210        if (!vma || vma->vm_flags & VM_MAYSHARE)
4211                region_abort(resv_map, from, to);
4212        if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4213                kref_put(&resv_map->refs, resv_map_release);
4214        return ret;
4215}
4216
4217long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4218                                                                long freed)
4219{
4220        struct hstate *h = hstate_inode(inode);
4221        struct resv_map *resv_map = inode_resv_map(inode);
4222        long chg = 0;
4223        struct hugepage_subpool *spool = subpool_inode(inode);
4224        long gbl_reserve;
4225
4226        if (resv_map) {
4227                chg = region_del(resv_map, start, end);
4228                /*
4229                 * region_del() can fail in the rare case where a region
4230                 * must be split and another region descriptor can not be
4231                 * allocated.  If end == LONG_MAX, it will not fail.
4232                 */
4233                if (chg < 0)
4234                        return chg;
4235        }
4236
4237        spin_lock(&inode->i_lock);
4238        inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4239        spin_unlock(&inode->i_lock);
4240
4241        /*
4242         * If the subpool has a minimum size, the number of global
4243         * reservations to be released may be adjusted.
4244         */
4245        gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4246        hugetlb_acct_memory(h, -gbl_reserve);
4247
4248        return 0;
4249}
4250
4251#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4252static unsigned long page_table_shareable(struct vm_area_struct *svma,
4253                                struct vm_area_struct *vma,
4254                                unsigned long addr, pgoff_t idx)
4255{
4256        unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4257                                svma->vm_start;
4258        unsigned long sbase = saddr & PUD_MASK;
4259        unsigned long s_end = sbase + PUD_SIZE;
4260
4261        /* Allow segments to share if only one is marked locked */
4262        unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4263        unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4264
4265        /*
4266         * match the virtual addresses, permission and the alignment of the
4267         * page table page.
4268         */
4269        if (pmd_index(addr) != pmd_index(saddr) ||
4270            vm_flags != svm_flags ||
4271            sbase < svma->vm_start || svma->vm_end < s_end)
4272                return 0;
4273
4274        return saddr;
4275}
4276
4277static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4278{
4279        unsigned long base = addr & PUD_MASK;
4280        unsigned long end = base + PUD_SIZE;
4281
4282        /*
4283         * check on proper vm_flags and page table alignment
4284         */
4285        if (vma->vm_flags & VM_MAYSHARE &&
4286            vma->vm_start <= base && end <= vma->vm_end)
4287                return true;
4288        return false;
4289}
4290
4291/*
4292 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4293 * and returns the corresponding pte. While this is not necessary for the
4294 * !shared pmd case because we can allocate the pmd later as well, it makes the
4295 * code much cleaner. pmd allocation is essential for the shared case because
4296 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4297 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4298 * bad pmd for sharing.
4299 */
4300pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4301{
4302        struct vm_area_struct *vma = find_vma(mm, addr);
4303        struct address_space *mapping = vma->vm_file->f_mapping;
4304        pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4305                        vma->vm_pgoff;
4306        struct vm_area_struct *svma;
4307        unsigned long saddr;
4308        pte_t *spte = NULL;
4309        pte_t *pte;
4310        spinlock_t *ptl;
4311
4312        if (!vma_shareable(vma, addr))
4313                return (pte_t *)pmd_alloc(mm, pud, addr);
4314
4315        i_mmap_lock_write(mapping);
4316        vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4317                if (svma == vma)
4318                        continue;
4319
4320                saddr = page_table_shareable(svma, vma, addr, idx);
4321                if (saddr) {
4322                        spte = huge_pte_offset(svma->vm_mm, saddr);
4323                        if (spte) {
4324                                get_page(virt_to_page(spte));
4325                                break;
4326                        }
4327                }
4328        }
4329
4330        if (!spte)
4331                goto out;
4332
4333        ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
4334        spin_lock(ptl);
4335        if (pud_none(*pud)) {
4336                pud_populate(mm, pud,
4337                                (pmd_t *)((unsigned long)spte & PAGE_MASK));
4338                mm_inc_nr_pmds(mm);
4339        } else {
4340                put_page(virt_to_page(spte));
4341        }
4342        spin_unlock(ptl);
4343out:
4344        pte = (pte_t *)pmd_alloc(mm, pud, addr);
4345        i_mmap_unlock_write(mapping);
4346        return pte;
4347}
4348
4349/*
4350 * unmap huge page backed by shared pte.
4351 *
4352 * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
4353 * indicated by page_count > 1, unmap is achieved by clearing pud and
4354 * decrementing the ref count. If count == 1, the pte page is not shared.
4355 *
4356 * called with page table lock held.
4357 *
4358 * returns: 1 successfully unmapped a shared pte page
4359 *          0 the underlying pte page is not shared, or it is the last user
4360 */
4361int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4362{
4363        pgd_t *pgd = pgd_offset(mm, *addr);
4364        pud_t *pud = pud_offset(pgd, *addr);
4365
4366        BUG_ON(page_count(virt_to_page(ptep)) == 0);
4367        if (page_count(virt_to_page(ptep)) == 1)
4368                return 0;
4369
4370        pud_clear(pud);
4371        put_page(virt_to_page(ptep));
4372        mm_dec_nr_pmds(mm);
4373        *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4374        return 1;
4375}
4376#define want_pmd_share()        (1)
4377#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4378pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4379{
4380        return NULL;
4381}
4382
4383int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4384{
4385        return 0;
4386}
4387#define want_pmd_share()        (0)
4388#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4389
4390#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4391pte_t *huge_pte_alloc(struct mm_struct *mm,
4392                        unsigned long addr, unsigned long sz)
4393{
4394        pgd_t *pgd;
4395        pud_t *pud;
4396        pte_t *pte = NULL;
4397
4398        pgd = pgd_offset(mm, addr);
4399        pud = pud_alloc(mm, pgd, addr);
4400        if (pud) {
4401                if (sz == PUD_SIZE) {
4402                        pte = (pte_t *)pud;
4403                } else {
4404                        BUG_ON(sz != PMD_SIZE);
4405                        if (want_pmd_share() && pud_none(*pud))
4406                                pte = huge_pmd_share(mm, addr, pud);
4407                        else
4408                                pte = (pte_t *)pmd_alloc(mm, pud, addr);
4409                }
4410        }
4411        BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
4412
4413        return pte;
4414}
4415
4416pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
4417{
4418        pgd_t *pgd;
4419        pud_t *pud;
4420        pmd_t *pmd = NULL;
4421
4422        pgd = pgd_offset(mm, addr);
4423        if (pgd_present(*pgd)) {
4424                pud = pud_offset(pgd, addr);
4425                if (pud_present(*pud)) {
4426                        if (pud_huge(*pud))
4427                                return (pte_t *)pud;
4428                        pmd = pmd_offset(pud, addr);
4429                }
4430        }
4431        return (pte_t *) pmd;
4432}
4433
4434#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4435
4436/*
4437 * These functions are overwritable if your architecture needs its own
4438 * behavior.
4439 */
4440struct page * __weak
4441follow_huge_addr(struct mm_struct *mm, unsigned long address,
4442                              int write)
4443{
4444        return ERR_PTR(-EINVAL);
4445}
4446
4447struct page * __weak
4448follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4449                pmd_t *pmd, int flags)
4450{
4451        struct page *page = NULL;
4452        spinlock_t *ptl;
4453retry:
4454        ptl = pmd_lockptr(mm, pmd);
4455        spin_lock(ptl);
4456        /*
4457         * make sure that the address range covered by this pmd is not
4458         * unmapped from other threads.
4459         */
4460        if (!pmd_huge(*pmd))
4461                goto out;
4462        if (pmd_present(*pmd)) {
4463                page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4464                if (flags & FOLL_GET)
4465                        get_page(page);
4466        } else {
4467                if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
4468                        spin_unlock(ptl);
4469                        __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4470                        goto retry;
4471                }
4472                /*
4473                 * hwpoisoned entry is treated as no_page_table in
4474                 * follow_page_mask().
4475                 */
4476        }
4477out:
4478        spin_unlock(ptl);
4479        return page;
4480}
4481
4482struct page * __weak
4483follow_huge_pud(struct mm_struct *mm, unsigned long address,
4484                pud_t *pud, int flags)
4485{
4486        if (flags & FOLL_GET)
4487                return NULL;
4488
4489        return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4490}
4491
4492#ifdef CONFIG_MEMORY_FAILURE
4493
4494/*
4495 * This function is called from memory failure code.
4496 */
4497int dequeue_hwpoisoned_huge_page(struct page *hpage)
4498{
4499        struct hstate *h = page_hstate(hpage);
4500        int nid = page_to_nid(hpage);
4501        int ret = -EBUSY;
4502
4503        spin_lock(&hugetlb_lock);
4504        /*
4505         * Just checking !page_huge_active is not enough, because that could be
4506         * an isolated/hwpoisoned hugepage (which have >0 refcount).
4507         */
4508        if (!page_huge_active(hpage) && !page_count(hpage)) {
4509                /*
4510                 * Hwpoisoned hugepage isn't linked to activelist or freelist,
4511                 * but dangling hpage->lru can trigger list-debug warnings
4512                 * (this happens when we call unpoison_memory() on it),
4513                 * so let it point to itself with list_del_init().
4514                 */
4515                list_del_init(&hpage->lru);
4516                set_page_refcounted(hpage);
4517                h->free_huge_pages--;
4518                h->free_huge_pages_node[nid]--;
4519                ret = 0;
4520        }
4521        spin_unlock(&hugetlb_lock);
4522        return ret;
4523}
4524#endif
4525
4526bool isolate_huge_page(struct page *page, struct list_head *list)
4527{
4528        bool ret = true;
4529
4530        VM_BUG_ON_PAGE(!PageHead(page), page);
4531        spin_lock(&hugetlb_lock);
4532        if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4533                ret = false;
4534                goto unlock;
4535        }
4536        clear_page_huge_active(page);
4537        list_move_tail(&page->lru, list);
4538unlock:
4539        spin_unlock(&hugetlb_lock);
4540        return ret;
4541}
4542
4543void putback_active_hugepage(struct page *page)
4544{
4545        VM_BUG_ON_PAGE(!PageHead(page), page);
4546        spin_lock(&hugetlb_lock);
4547        set_page_huge_active(page);
4548        list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4549        spin_unlock(&hugetlb_lock);
4550        put_page(page);
4551}
4552