linux/mm/hugetlb.c
<<
>>
Prefs
   1/*
   2 * Generic hugetlb support.
   3 * (C) Nadia Yvette Chambers, April 2004
   4 */
   5#include <linux/list.h>
   6#include <linux/init.h>
   7#include <linux/mm.h>
   8#include <linux/seq_file.h>
   9#include <linux/sysctl.h>
  10#include <linux/highmem.h>
  11#include <linux/mmu_notifier.h>
  12#include <linux/nodemask.h>
  13#include <linux/pagemap.h>
  14#include <linux/mempolicy.h>
  15#include <linux/compiler.h>
  16#include <linux/cpuset.h>
  17#include <linux/mutex.h>
  18#include <linux/bootmem.h>
  19#include <linux/sysfs.h>
  20#include <linux/slab.h>
  21#include <linux/mmdebug.h>
  22#include <linux/sched/signal.h>
  23#include <linux/rmap.h>
  24#include <linux/string_helpers.h>
  25#include <linux/swap.h>
  26#include <linux/swapops.h>
  27#include <linux/jhash.h>
  28
  29#include <asm/page.h>
  30#include <asm/pgtable.h>
  31#include <asm/tlb.h>
  32
  33#include <linux/io.h>
  34#include <linux/hugetlb.h>
  35#include <linux/hugetlb_cgroup.h>
  36#include <linux/node.h>
  37#include <linux/userfaultfd_k.h>
  38#include <linux/page_owner.h>
  39#include "internal.h"
  40
  41int hugetlb_max_hstate __read_mostly;
  42unsigned int default_hstate_idx;
  43struct hstate hstates[HUGE_MAX_HSTATE];
  44/*
  45 * Minimum page order among possible hugepage sizes, set to a proper value
  46 * at boot time.
  47 */
  48static unsigned int minimum_order __read_mostly = UINT_MAX;
  49
  50__initdata LIST_HEAD(huge_boot_pages);
  51
  52/* for command line parsing */
  53static struct hstate * __initdata parsed_hstate;
  54static unsigned long __initdata default_hstate_max_huge_pages;
  55static unsigned long __initdata default_hstate_size;
  56static bool __initdata parsed_valid_hugepagesz = true;
  57
  58/*
  59 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
  60 * free_huge_pages, and surplus_huge_pages.
  61 */
  62DEFINE_SPINLOCK(hugetlb_lock);
  63
  64/*
  65 * Serializes faults on the same logical page.  This is used to
  66 * prevent spurious OOMs when the hugepage pool is fully utilized.
  67 */
  68static int num_fault_mutexes;
  69struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
  70
  71/* Forward declaration */
  72static int hugetlb_acct_memory(struct hstate *h, long delta);
  73
  74static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
  75{
  76        bool free = (spool->count == 0) && (spool->used_hpages == 0);
  77
  78        spin_unlock(&spool->lock);
  79
  80        /* If no pages are used, and no other handles to the subpool
  81         * remain, give up any reservations mased on minimum size and
  82         * free the subpool */
  83        if (free) {
  84                if (spool->min_hpages != -1)
  85                        hugetlb_acct_memory(spool->hstate,
  86                                                -spool->min_hpages);
  87                kfree(spool);
  88        }
  89}
  90
  91struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
  92                                                long min_hpages)
  93{
  94        struct hugepage_subpool *spool;
  95
  96        spool = kzalloc(sizeof(*spool), GFP_KERNEL);
  97        if (!spool)
  98                return NULL;
  99
 100        spin_lock_init(&spool->lock);
 101        spool->count = 1;
 102        spool->max_hpages = max_hpages;
 103        spool->hstate = h;
 104        spool->min_hpages = min_hpages;
 105
 106        if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
 107                kfree(spool);
 108                return NULL;
 109        }
 110        spool->rsv_hpages = min_hpages;
 111
 112        return spool;
 113}
 114
 115void hugepage_put_subpool(struct hugepage_subpool *spool)
 116{
 117        spin_lock(&spool->lock);
 118        BUG_ON(!spool->count);
 119        spool->count--;
 120        unlock_or_release_subpool(spool);
 121}
 122
 123/*
 124 * Subpool accounting for allocating and reserving pages.
 125 * Return -ENOMEM if there are not enough resources to satisfy the
 126 * the request.  Otherwise, return the number of pages by which the
 127 * global pools must be adjusted (upward).  The returned value may
 128 * only be different than the passed value (delta) in the case where
 129 * a subpool minimum size must be manitained.
 130 */
 131static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
 132                                      long delta)
 133{
 134        long ret = delta;
 135
 136        if (!spool)
 137                return ret;
 138
 139        spin_lock(&spool->lock);
 140
 141        if (spool->max_hpages != -1) {          /* maximum size accounting */
 142                if ((spool->used_hpages + delta) <= spool->max_hpages)
 143                        spool->used_hpages += delta;
 144                else {
 145                        ret = -ENOMEM;
 146                        goto unlock_ret;
 147                }
 148        }
 149
 150        /* minimum size accounting */
 151        if (spool->min_hpages != -1 && spool->rsv_hpages) {
 152                if (delta > spool->rsv_hpages) {
 153                        /*
 154                         * Asking for more reserves than those already taken on
 155                         * behalf of subpool.  Return difference.
 156                         */
 157                        ret = delta - spool->rsv_hpages;
 158                        spool->rsv_hpages = 0;
 159                } else {
 160                        ret = 0;        /* reserves already accounted for */
 161                        spool->rsv_hpages -= delta;
 162                }
 163        }
 164
 165unlock_ret:
 166        spin_unlock(&spool->lock);
 167        return ret;
 168}
 169
 170/*
 171 * Subpool accounting for freeing and unreserving pages.
 172 * Return the number of global page reservations that must be dropped.
 173 * The return value may only be different than the passed value (delta)
 174 * in the case where a subpool minimum size must be maintained.
 175 */
 176static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
 177                                       long delta)
 178{
 179        long ret = delta;
 180
 181        if (!spool)
 182                return delta;
 183
 184        spin_lock(&spool->lock);
 185
 186        if (spool->max_hpages != -1)            /* maximum size accounting */
 187                spool->used_hpages -= delta;
 188
 189         /* minimum size accounting */
 190        if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
 191                if (spool->rsv_hpages + delta <= spool->min_hpages)
 192                        ret = 0;
 193                else
 194                        ret = spool->rsv_hpages + delta - spool->min_hpages;
 195
 196                spool->rsv_hpages += delta;
 197                if (spool->rsv_hpages > spool->min_hpages)
 198                        spool->rsv_hpages = spool->min_hpages;
 199        }
 200
 201        /*
 202         * If hugetlbfs_put_super couldn't free spool due to an outstanding
 203         * quota reference, free it now.
 204         */
 205        unlock_or_release_subpool(spool);
 206
 207        return ret;
 208}
 209
 210static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
 211{
 212        return HUGETLBFS_SB(inode->i_sb)->spool;
 213}
 214
 215static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
 216{
 217        return subpool_inode(file_inode(vma->vm_file));
 218}
 219
 220/*
 221 * Region tracking -- allows tracking of reservations and instantiated pages
 222 *                    across the pages in a mapping.
 223 *
 224 * The region data structures are embedded into a resv_map and protected
 225 * by a resv_map's lock.  The set of regions within the resv_map represent
 226 * reservations for huge pages, or huge pages that have already been
 227 * instantiated within the map.  The from and to elements are huge page
 228 * indicies into the associated mapping.  from indicates the starting index
 229 * of the region.  to represents the first index past the end of  the region.
 230 *
 231 * For example, a file region structure with from == 0 and to == 4 represents
 232 * four huge pages in a mapping.  It is important to note that the to element
 233 * represents the first element past the end of the region. This is used in
 234 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
 235 *
 236 * Interval notation of the form [from, to) will be used to indicate that
 237 * the endpoint from is inclusive and to is exclusive.
 238 */
 239struct file_region {
 240        struct list_head link;
 241        long from;
 242        long to;
 243};
 244
 245/*
 246 * Add the huge page range represented by [f, t) to the reserve
 247 * map.  In the normal case, existing regions will be expanded
 248 * to accommodate the specified range.  Sufficient regions should
 249 * exist for expansion due to the previous call to region_chg
 250 * with the same range.  However, it is possible that region_del
 251 * could have been called after region_chg and modifed the map
 252 * in such a way that no region exists to be expanded.  In this
 253 * case, pull a region descriptor from the cache associated with
 254 * the map and use that for the new range.
 255 *
 256 * Return the number of new huge pages added to the map.  This
 257 * number is greater than or equal to zero.
 258 */
 259static long region_add(struct resv_map *resv, long f, long t)
 260{
 261        struct list_head *head = &resv->regions;
 262        struct file_region *rg, *nrg, *trg;
 263        long add = 0;
 264
 265        spin_lock(&resv->lock);
 266        /* Locate the region we are either in or before. */
 267        list_for_each_entry(rg, head, link)
 268                if (f <= rg->to)
 269                        break;
 270
 271        /*
 272         * If no region exists which can be expanded to include the
 273         * specified range, the list must have been modified by an
 274         * interleving call to region_del().  Pull a region descriptor
 275         * from the cache and use it for this range.
 276         */
 277        if (&rg->link == head || t < rg->from) {
 278                VM_BUG_ON(resv->region_cache_count <= 0);
 279
 280                resv->region_cache_count--;
 281                nrg = list_first_entry(&resv->region_cache, struct file_region,
 282                                        link);
 283                list_del(&nrg->link);
 284
 285                nrg->from = f;
 286                nrg->to = t;
 287                list_add(&nrg->link, rg->link.prev);
 288
 289                add += t - f;
 290                goto out_locked;
 291        }
 292
 293        /* Round our left edge to the current segment if it encloses us. */
 294        if (f > rg->from)
 295                f = rg->from;
 296
 297        /* Check for and consume any regions we now overlap with. */
 298        nrg = rg;
 299        list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
 300                if (&rg->link == head)
 301                        break;
 302                if (rg->from > t)
 303                        break;
 304
 305                /* If this area reaches higher then extend our area to
 306                 * include it completely.  If this is not the first area
 307                 * which we intend to reuse, free it. */
 308                if (rg->to > t)
 309                        t = rg->to;
 310                if (rg != nrg) {
 311                        /* Decrement return value by the deleted range.
 312                         * Another range will span this area so that by
 313                         * end of routine add will be >= zero
 314                         */
 315                        add -= (rg->to - rg->from);
 316                        list_del(&rg->link);
 317                        kfree(rg);
 318                }
 319        }
 320
 321        add += (nrg->from - f);         /* Added to beginning of region */
 322        nrg->from = f;
 323        add += t - nrg->to;             /* Added to end of region */
 324        nrg->to = t;
 325
 326out_locked:
 327        resv->adds_in_progress--;
 328        spin_unlock(&resv->lock);
 329        VM_BUG_ON(add < 0);
 330        return add;
 331}
 332
 333/*
 334 * Examine the existing reserve map and determine how many
 335 * huge pages in the specified range [f, t) are NOT currently
 336 * represented.  This routine is called before a subsequent
 337 * call to region_add that will actually modify the reserve
 338 * map to add the specified range [f, t).  region_chg does
 339 * not change the number of huge pages represented by the
 340 * map.  However, if the existing regions in the map can not
 341 * be expanded to represent the new range, a new file_region
 342 * structure is added to the map as a placeholder.  This is
 343 * so that the subsequent region_add call will have all the
 344 * regions it needs and will not fail.
 345 *
 346 * Upon entry, region_chg will also examine the cache of region descriptors
 347 * associated with the map.  If there are not enough descriptors cached, one
 348 * will be allocated for the in progress add operation.
 349 *
 350 * Returns the number of huge pages that need to be added to the existing
 351 * reservation map for the range [f, t).  This number is greater or equal to
 352 * zero.  -ENOMEM is returned if a new file_region structure or cache entry
 353 * is needed and can not be allocated.
 354 */
 355static long region_chg(struct resv_map *resv, long f, long t)
 356{
 357        struct list_head *head = &resv->regions;
 358        struct file_region *rg, *nrg = NULL;
 359        long chg = 0;
 360
 361retry:
 362        spin_lock(&resv->lock);
 363retry_locked:
 364        resv->adds_in_progress++;
 365
 366        /*
 367         * Check for sufficient descriptors in the cache to accommodate
 368         * the number of in progress add operations.
 369         */
 370        if (resv->adds_in_progress > resv->region_cache_count) {
 371                struct file_region *trg;
 372
 373                VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
 374                /* Must drop lock to allocate a new descriptor. */
 375                resv->adds_in_progress--;
 376                spin_unlock(&resv->lock);
 377
 378                trg = kmalloc(sizeof(*trg), GFP_KERNEL);
 379                if (!trg) {
 380                        kfree(nrg);
 381                        return -ENOMEM;
 382                }
 383
 384                spin_lock(&resv->lock);
 385                list_add(&trg->link, &resv->region_cache);
 386                resv->region_cache_count++;
 387                goto retry_locked;
 388        }
 389
 390        /* Locate the region we are before or in. */
 391        list_for_each_entry(rg, head, link)
 392                if (f <= rg->to)
 393                        break;
 394
 395        /* If we are below the current region then a new region is required.
 396         * Subtle, allocate a new region at the position but make it zero
 397         * size such that we can guarantee to record the reservation. */
 398        if (&rg->link == head || t < rg->from) {
 399                if (!nrg) {
 400                        resv->adds_in_progress--;
 401                        spin_unlock(&resv->lock);
 402                        nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
 403                        if (!nrg)
 404                                return -ENOMEM;
 405
 406                        nrg->from = f;
 407                        nrg->to   = f;
 408                        INIT_LIST_HEAD(&nrg->link);
 409                        goto retry;
 410                }
 411
 412                list_add(&nrg->link, rg->link.prev);
 413                chg = t - f;
 414                goto out_nrg;
 415        }
 416
 417        /* Round our left edge to the current segment if it encloses us. */
 418        if (f > rg->from)
 419                f = rg->from;
 420        chg = t - f;
 421
 422        /* Check for and consume any regions we now overlap with. */
 423        list_for_each_entry(rg, rg->link.prev, link) {
 424                if (&rg->link == head)
 425                        break;
 426                if (rg->from > t)
 427                        goto out;
 428
 429                /* We overlap with this area, if it extends further than
 430                 * us then we must extend ourselves.  Account for its
 431                 * existing reservation. */
 432                if (rg->to > t) {
 433                        chg += rg->to - t;
 434                        t = rg->to;
 435                }
 436                chg -= rg->to - rg->from;
 437        }
 438
 439out:
 440        spin_unlock(&resv->lock);
 441        /*  We already know we raced and no longer need the new region */
 442        kfree(nrg);
 443        return chg;
 444out_nrg:
 445        spin_unlock(&resv->lock);
 446        return chg;
 447}
 448
 449/*
 450 * Abort the in progress add operation.  The adds_in_progress field
 451 * of the resv_map keeps track of the operations in progress between
 452 * calls to region_chg and region_add.  Operations are sometimes
 453 * aborted after the call to region_chg.  In such cases, region_abort
 454 * is called to decrement the adds_in_progress counter.
 455 *
 456 * NOTE: The range arguments [f, t) are not needed or used in this
 457 * routine.  They are kept to make reading the calling code easier as
 458 * arguments will match the associated region_chg call.
 459 */
 460static void region_abort(struct resv_map *resv, long f, long t)
 461{
 462        spin_lock(&resv->lock);
 463        VM_BUG_ON(!resv->region_cache_count);
 464        resv->adds_in_progress--;
 465        spin_unlock(&resv->lock);
 466}
 467
 468/*
 469 * Delete the specified range [f, t) from the reserve map.  If the
 470 * t parameter is LONG_MAX, this indicates that ALL regions after f
 471 * should be deleted.  Locate the regions which intersect [f, t)
 472 * and either trim, delete or split the existing regions.
 473 *
 474 * Returns the number of huge pages deleted from the reserve map.
 475 * In the normal case, the return value is zero or more.  In the
 476 * case where a region must be split, a new region descriptor must
 477 * be allocated.  If the allocation fails, -ENOMEM will be returned.
 478 * NOTE: If the parameter t == LONG_MAX, then we will never split
 479 * a region and possibly return -ENOMEM.  Callers specifying
 480 * t == LONG_MAX do not need to check for -ENOMEM error.
 481 */
 482static long region_del(struct resv_map *resv, long f, long t)
 483{
 484        struct list_head *head = &resv->regions;
 485        struct file_region *rg, *trg;
 486        struct file_region *nrg = NULL;
 487        long del = 0;
 488
 489retry:
 490        spin_lock(&resv->lock);
 491        list_for_each_entry_safe(rg, trg, head, link) {
 492                /*
 493                 * Skip regions before the range to be deleted.  file_region
 494                 * ranges are normally of the form [from, to).  However, there
 495                 * may be a "placeholder" entry in the map which is of the form
 496                 * (from, to) with from == to.  Check for placeholder entries
 497                 * at the beginning of the range to be deleted.
 498                 */
 499                if (rg->to <= f && (rg->to != rg->from || rg->to != f))
 500                        continue;
 501
 502                if (rg->from >= t)
 503                        break;
 504
 505                if (f > rg->from && t < rg->to) { /* Must split region */
 506                        /*
 507                         * Check for an entry in the cache before dropping
 508                         * lock and attempting allocation.
 509                         */
 510                        if (!nrg &&
 511                            resv->region_cache_count > resv->adds_in_progress) {
 512                                nrg = list_first_entry(&resv->region_cache,
 513                                                        struct file_region,
 514                                                        link);
 515                                list_del(&nrg->link);
 516                                resv->region_cache_count--;
 517                        }
 518
 519                        if (!nrg) {
 520                                spin_unlock(&resv->lock);
 521                                nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
 522                                if (!nrg)
 523                                        return -ENOMEM;
 524                                goto retry;
 525                        }
 526
 527                        del += t - f;
 528
 529                        /* New entry for end of split region */
 530                        nrg->from = t;
 531                        nrg->to = rg->to;
 532                        INIT_LIST_HEAD(&nrg->link);
 533
 534                        /* Original entry is trimmed */
 535                        rg->to = f;
 536
 537                        list_add(&nrg->link, &rg->link);
 538                        nrg = NULL;
 539                        break;
 540                }
 541
 542                if (f <= rg->from && t >= rg->to) { /* Remove entire region */
 543                        del += rg->to - rg->from;
 544                        list_del(&rg->link);
 545                        kfree(rg);
 546                        continue;
 547                }
 548
 549                if (f <= rg->from) {    /* Trim beginning of region */
 550                        del += t - rg->from;
 551                        rg->from = t;
 552                } else {                /* Trim end of region */
 553                        del += rg->to - f;
 554                        rg->to = f;
 555                }
 556        }
 557
 558        spin_unlock(&resv->lock);
 559        kfree(nrg);
 560        return del;
 561}
 562
 563/*
 564 * A rare out of memory error was encountered which prevented removal of
 565 * the reserve map region for a page.  The huge page itself was free'ed
 566 * and removed from the page cache.  This routine will adjust the subpool
 567 * usage count, and the global reserve count if needed.  By incrementing
 568 * these counts, the reserve map entry which could not be deleted will
 569 * appear as a "reserved" entry instead of simply dangling with incorrect
 570 * counts.
 571 */
 572void hugetlb_fix_reserve_counts(struct inode *inode)
 573{
 574        struct hugepage_subpool *spool = subpool_inode(inode);
 575        long rsv_adjust;
 576
 577        rsv_adjust = hugepage_subpool_get_pages(spool, 1);
 578        if (rsv_adjust) {
 579                struct hstate *h = hstate_inode(inode);
 580
 581                hugetlb_acct_memory(h, 1);
 582        }
 583}
 584
 585/*
 586 * Count and return the number of huge pages in the reserve map
 587 * that intersect with the range [f, t).
 588 */
 589static long region_count(struct resv_map *resv, long f, long t)
 590{
 591        struct list_head *head = &resv->regions;
 592        struct file_region *rg;
 593        long chg = 0;
 594
 595        spin_lock(&resv->lock);
 596        /* Locate each segment we overlap with, and count that overlap. */
 597        list_for_each_entry(rg, head, link) {
 598                long seg_from;
 599                long seg_to;
 600
 601                if (rg->to <= f)
 602                        continue;
 603                if (rg->from >= t)
 604                        break;
 605
 606                seg_from = max(rg->from, f);
 607                seg_to = min(rg->to, t);
 608
 609                chg += seg_to - seg_from;
 610        }
 611        spin_unlock(&resv->lock);
 612
 613        return chg;
 614}
 615
 616/*
 617 * Convert the address within this vma to the page offset within
 618 * the mapping, in pagecache page units; huge pages here.
 619 */
 620static pgoff_t vma_hugecache_offset(struct hstate *h,
 621                        struct vm_area_struct *vma, unsigned long address)
 622{
 623        return ((address - vma->vm_start) >> huge_page_shift(h)) +
 624                        (vma->vm_pgoff >> huge_page_order(h));
 625}
 626
 627pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
 628                                     unsigned long address)
 629{
 630        return vma_hugecache_offset(hstate_vma(vma), vma, address);
 631}
 632EXPORT_SYMBOL_GPL(linear_hugepage_index);
 633
 634/*
 635 * Return the size of the pages allocated when backing a VMA. In the majority
 636 * cases this will be same size as used by the page table entries.
 637 */
 638unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
 639{
 640        if (vma->vm_ops && vma->vm_ops->pagesize)
 641                return vma->vm_ops->pagesize(vma);
 642        return PAGE_SIZE;
 643}
 644EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
 645
 646/*
 647 * Return the page size being used by the MMU to back a VMA. In the majority
 648 * of cases, the page size used by the kernel matches the MMU size. On
 649 * architectures where it differs, an architecture-specific 'strong'
 650 * version of this symbol is required.
 651 */
 652__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
 653{
 654        return vma_kernel_pagesize(vma);
 655}
 656
 657/*
 658 * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
 659 * bits of the reservation map pointer, which are always clear due to
 660 * alignment.
 661 */
 662#define HPAGE_RESV_OWNER    (1UL << 0)
 663#define HPAGE_RESV_UNMAPPED (1UL << 1)
 664#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
 665
 666/*
 667 * These helpers are used to track how many pages are reserved for
 668 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
 669 * is guaranteed to have their future faults succeed.
 670 *
 671 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
 672 * the reserve counters are updated with the hugetlb_lock held. It is safe
 673 * to reset the VMA at fork() time as it is not in use yet and there is no
 674 * chance of the global counters getting corrupted as a result of the values.
 675 *
 676 * The private mapping reservation is represented in a subtly different
 677 * manner to a shared mapping.  A shared mapping has a region map associated
 678 * with the underlying file, this region map represents the backing file
 679 * pages which have ever had a reservation assigned which this persists even
 680 * after the page is instantiated.  A private mapping has a region map
 681 * associated with the original mmap which is attached to all VMAs which
 682 * reference it, this region map represents those offsets which have consumed
 683 * reservation ie. where pages have been instantiated.
 684 */
 685static unsigned long get_vma_private_data(struct vm_area_struct *vma)
 686{
 687        return (unsigned long)vma->vm_private_data;
 688}
 689
 690static void set_vma_private_data(struct vm_area_struct *vma,
 691                                                        unsigned long value)
 692{
 693        vma->vm_private_data = (void *)value;
 694}
 695
 696struct resv_map *resv_map_alloc(void)
 697{
 698        struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
 699        struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
 700
 701        if (!resv_map || !rg) {
 702                kfree(resv_map);
 703                kfree(rg);
 704                return NULL;
 705        }
 706
 707        kref_init(&resv_map->refs);
 708        spin_lock_init(&resv_map->lock);
 709        INIT_LIST_HEAD(&resv_map->regions);
 710
 711        resv_map->adds_in_progress = 0;
 712
 713        INIT_LIST_HEAD(&resv_map->region_cache);
 714        list_add(&rg->link, &resv_map->region_cache);
 715        resv_map->region_cache_count = 1;
 716
 717        return resv_map;
 718}
 719
 720void resv_map_release(struct kref *ref)
 721{
 722        struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
 723        struct list_head *head = &resv_map->region_cache;
 724        struct file_region *rg, *trg;
 725
 726        /* Clear out any active regions before we release the map. */
 727        region_del(resv_map, 0, LONG_MAX);
 728
 729        /* ... and any entries left in the cache */
 730        list_for_each_entry_safe(rg, trg, head, link) {
 731                list_del(&rg->link);
 732                kfree(rg);
 733        }
 734
 735        VM_BUG_ON(resv_map->adds_in_progress);
 736
 737        kfree(resv_map);
 738}
 739
 740static inline struct resv_map *inode_resv_map(struct inode *inode)
 741{
 742        return inode->i_mapping->private_data;
 743}
 744
 745static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
 746{
 747        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
 748        if (vma->vm_flags & VM_MAYSHARE) {
 749                struct address_space *mapping = vma->vm_file->f_mapping;
 750                struct inode *inode = mapping->host;
 751
 752                return inode_resv_map(inode);
 753
 754        } else {
 755                return (struct resv_map *)(get_vma_private_data(vma) &
 756                                                        ~HPAGE_RESV_MASK);
 757        }
 758}
 759
 760static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
 761{
 762        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
 763        VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
 764
 765        set_vma_private_data(vma, (get_vma_private_data(vma) &
 766                                HPAGE_RESV_MASK) | (unsigned long)map);
 767}
 768
 769static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
 770{
 771        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
 772        VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
 773
 774        set_vma_private_data(vma, get_vma_private_data(vma) | flags);
 775}
 776
 777static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
 778{
 779        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
 780
 781        return (get_vma_private_data(vma) & flag) != 0;
 782}
 783
 784/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
 785void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 786{
 787        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
 788        if (!(vma->vm_flags & VM_MAYSHARE))
 789                vma->vm_private_data = (void *)0;
 790}
 791
 792/* Returns true if the VMA has associated reserve pages */
 793static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
 794{
 795        if (vma->vm_flags & VM_NORESERVE) {
 796                /*
 797                 * This address is already reserved by other process(chg == 0),
 798                 * so, we should decrement reserved count. Without decrementing,
 799                 * reserve count remains after releasing inode, because this
 800                 * allocated page will go into page cache and is regarded as
 801                 * coming from reserved pool in releasing step.  Currently, we
 802                 * don't have any other solution to deal with this situation
 803                 * properly, so add work-around here.
 804                 */
 805                if (vma->vm_flags & VM_MAYSHARE && chg == 0)
 806                        return true;
 807                else
 808                        return false;
 809        }
 810
 811        /* Shared mappings always use reserves */
 812        if (vma->vm_flags & VM_MAYSHARE) {
 813                /*
 814                 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
 815                 * be a region map for all pages.  The only situation where
 816                 * there is no region map is if a hole was punched via
 817                 * fallocate.  In this case, there really are no reverves to
 818                 * use.  This situation is indicated if chg != 0.
 819                 */
 820                if (chg)
 821                        return false;
 822                else
 823                        return true;
 824        }
 825
 826        /*
 827         * Only the process that called mmap() has reserves for
 828         * private mappings.
 829         */
 830        if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
 831                /*
 832                 * Like the shared case above, a hole punch or truncate
 833                 * could have been performed on the private mapping.
 834                 * Examine the value of chg to determine if reserves
 835                 * actually exist or were previously consumed.
 836                 * Very Subtle - The value of chg comes from a previous
 837                 * call to vma_needs_reserves().  The reserve map for
 838                 * private mappings has different (opposite) semantics
 839                 * than that of shared mappings.  vma_needs_reserves()
 840                 * has already taken this difference in semantics into
 841                 * account.  Therefore, the meaning of chg is the same
 842                 * as in the shared case above.  Code could easily be
 843                 * combined, but keeping it separate draws attention to
 844                 * subtle differences.
 845                 */
 846                if (chg)
 847                        return false;
 848                else
 849                        return true;
 850        }
 851
 852        return false;
 853}
 854
 855static void enqueue_huge_page(struct hstate *h, struct page *page)
 856{
 857        int nid = page_to_nid(page);
 858        list_move(&page->lru, &h->hugepage_freelists[nid]);
 859        h->free_huge_pages++;
 860        h->free_huge_pages_node[nid]++;
 861}
 862
 863static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
 864{
 865        struct page *page;
 866
 867        list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
 868                if (!PageHWPoison(page))
 869                        break;
 870        /*
 871         * if 'non-isolated free hugepage' not found on the list,
 872         * the allocation fails.
 873         */
 874        if (&h->hugepage_freelists[nid] == &page->lru)
 875                return NULL;
 876        list_move(&page->lru, &h->hugepage_activelist);
 877        set_page_refcounted(page);
 878        h->free_huge_pages--;
 879        h->free_huge_pages_node[nid]--;
 880        return page;
 881}
 882
 883static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
 884                nodemask_t *nmask)
 885{
 886        unsigned int cpuset_mems_cookie;
 887        struct zonelist *zonelist;
 888        struct zone *zone;
 889        struct zoneref *z;
 890        int node = -1;
 891
 892        zonelist = node_zonelist(nid, gfp_mask);
 893
 894retry_cpuset:
 895        cpuset_mems_cookie = read_mems_allowed_begin();
 896        for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
 897                struct page *page;
 898
 899                if (!cpuset_zone_allowed(zone, gfp_mask))
 900                        continue;
 901                /*
 902                 * no need to ask again on the same node. Pool is node rather than
 903                 * zone aware
 904                 */
 905                if (zone_to_nid(zone) == node)
 906                        continue;
 907                node = zone_to_nid(zone);
 908
 909                page = dequeue_huge_page_node_exact(h, node);
 910                if (page)
 911                        return page;
 912        }
 913        if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
 914                goto retry_cpuset;
 915
 916        return NULL;
 917}
 918
 919/* Movability of hugepages depends on migration support. */
 920static inline gfp_t htlb_alloc_mask(struct hstate *h)
 921{
 922        if (hugepage_migration_supported(h))
 923                return GFP_HIGHUSER_MOVABLE;
 924        else
 925                return GFP_HIGHUSER;
 926}
 927
 928static struct page *dequeue_huge_page_vma(struct hstate *h,
 929                                struct vm_area_struct *vma,
 930                                unsigned long address, int avoid_reserve,
 931                                long chg)
 932{
 933        struct page *page;
 934        struct mempolicy *mpol;
 935        gfp_t gfp_mask;
 936        nodemask_t *nodemask;
 937        int nid;
 938
 939        /*
 940         * A child process with MAP_PRIVATE mappings created by their parent
 941         * have no page reserves. This check ensures that reservations are
 942         * not "stolen". The child may still get SIGKILLed
 943         */
 944        if (!vma_has_reserves(vma, chg) &&
 945                        h->free_huge_pages - h->resv_huge_pages == 0)
 946                goto err;
 947
 948        /* If reserves cannot be used, ensure enough pages are in the pool */
 949        if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
 950                goto err;
 951
 952        gfp_mask = htlb_alloc_mask(h);
 953        nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
 954        page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
 955        if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
 956                SetPagePrivate(page);
 957                h->resv_huge_pages--;
 958        }
 959
 960        mpol_cond_put(mpol);
 961        return page;
 962
 963err:
 964        return NULL;
 965}
 966
 967/*
 968 * common helper functions for hstate_next_node_to_{alloc|free}.
 969 * We may have allocated or freed a huge page based on a different
 970 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
 971 * be outside of *nodes_allowed.  Ensure that we use an allowed
 972 * node for alloc or free.
 973 */
 974static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
 975{
 976        nid = next_node_in(nid, *nodes_allowed);
 977        VM_BUG_ON(nid >= MAX_NUMNODES);
 978
 979        return nid;
 980}
 981
 982static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
 983{
 984        if (!node_isset(nid, *nodes_allowed))
 985                nid = next_node_allowed(nid, nodes_allowed);
 986        return nid;
 987}
 988
 989/*
 990 * returns the previously saved node ["this node"] from which to
 991 * allocate a persistent huge page for the pool and advance the
 992 * next node from which to allocate, handling wrap at end of node
 993 * mask.
 994 */
 995static int hstate_next_node_to_alloc(struct hstate *h,
 996                                        nodemask_t *nodes_allowed)
 997{
 998        int nid;
 999
1000        VM_BUG_ON(!nodes_allowed);
1001
1002        nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1003        h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1004
1005        return nid;
1006}
1007
1008/*
1009 * helper for free_pool_huge_page() - return the previously saved
1010 * node ["this node"] from which to free a huge page.  Advance the
1011 * next node id whether or not we find a free huge page to free so
1012 * that the next attempt to free addresses the next node.
1013 */
1014static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1015{
1016        int nid;
1017
1018        VM_BUG_ON(!nodes_allowed);
1019
1020        nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1021        h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1022
1023        return nid;
1024}
1025
1026#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
1027        for (nr_nodes = nodes_weight(*mask);                            \
1028                nr_nodes > 0 &&                                         \
1029                ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
1030                nr_nodes--)
1031
1032#define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
1033        for (nr_nodes = nodes_weight(*mask);                            \
1034                nr_nodes > 0 &&                                         \
1035                ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
1036                nr_nodes--)
1037
1038#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1039static void destroy_compound_gigantic_page(struct page *page,
1040                                        unsigned int order)
1041{
1042        int i;
1043        int nr_pages = 1 << order;
1044        struct page *p = page + 1;
1045
1046        atomic_set(compound_mapcount_ptr(page), 0);
1047        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1048                clear_compound_head(p);
1049                set_page_refcounted(p);
1050        }
1051
1052        set_compound_order(page, 0);
1053        __ClearPageHead(page);
1054}
1055
1056static void free_gigantic_page(struct page *page, unsigned int order)
1057{
1058        free_contig_range(page_to_pfn(page), 1 << order);
1059}
1060
1061static int __alloc_gigantic_page(unsigned long start_pfn,
1062                                unsigned long nr_pages, gfp_t gfp_mask)
1063{
1064        unsigned long end_pfn = start_pfn + nr_pages;
1065        return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
1066                                  gfp_mask);
1067}
1068
1069static bool pfn_range_valid_gigantic(struct zone *z,
1070                        unsigned long start_pfn, unsigned long nr_pages)
1071{
1072        unsigned long i, end_pfn = start_pfn + nr_pages;
1073        struct page *page;
1074
1075        for (i = start_pfn; i < end_pfn; i++) {
1076                if (!pfn_valid(i))
1077                        return false;
1078
1079                page = pfn_to_page(i);
1080
1081                if (page_zone(page) != z)
1082                        return false;
1083
1084                if (PageReserved(page))
1085                        return false;
1086
1087                if (page_count(page) > 0)
1088                        return false;
1089
1090                if (PageHuge(page))
1091                        return false;
1092        }
1093
1094        return true;
1095}
1096
1097static bool zone_spans_last_pfn(const struct zone *zone,
1098                        unsigned long start_pfn, unsigned long nr_pages)
1099{
1100        unsigned long last_pfn = start_pfn + nr_pages - 1;
1101        return zone_spans_pfn(zone, last_pfn);
1102}
1103
1104static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1105                int nid, nodemask_t *nodemask)
1106{
1107        unsigned int order = huge_page_order(h);
1108        unsigned long nr_pages = 1 << order;
1109        unsigned long ret, pfn, flags;
1110        struct zonelist *zonelist;
1111        struct zone *zone;
1112        struct zoneref *z;
1113
1114        zonelist = node_zonelist(nid, gfp_mask);
1115        for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
1116                spin_lock_irqsave(&zone->lock, flags);
1117
1118                pfn = ALIGN(zone->zone_start_pfn, nr_pages);
1119                while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
1120                        if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
1121                                /*
1122                                 * We release the zone lock here because
1123                                 * alloc_contig_range() will also lock the zone
1124                                 * at some point. If there's an allocation
1125                                 * spinning on this lock, it may win the race
1126                                 * and cause alloc_contig_range() to fail...
1127                                 */
1128                                spin_unlock_irqrestore(&zone->lock, flags);
1129                                ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
1130                                if (!ret)
1131                                        return pfn_to_page(pfn);
1132                                spin_lock_irqsave(&zone->lock, flags);
1133                        }
1134                        pfn += nr_pages;
1135                }
1136
1137                spin_unlock_irqrestore(&zone->lock, flags);
1138        }
1139
1140        return NULL;
1141}
1142
1143static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1144static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1145
1146#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1147static inline bool gigantic_page_supported(void) { return false; }
1148static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1149                int nid, nodemask_t *nodemask) { return NULL; }
1150static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1151static inline void destroy_compound_gigantic_page(struct page *page,
1152                                                unsigned int order) { }
1153#endif
1154
1155static void update_and_free_page(struct hstate *h, struct page *page)
1156{
1157        int i;
1158
1159        if (hstate_is_gigantic(h) && !gigantic_page_supported())
1160                return;
1161
1162        h->nr_huge_pages--;
1163        h->nr_huge_pages_node[page_to_nid(page)]--;
1164        for (i = 0; i < pages_per_huge_page(h); i++) {
1165                page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1166                                1 << PG_referenced | 1 << PG_dirty |
1167                                1 << PG_active | 1 << PG_private |
1168                                1 << PG_writeback);
1169        }
1170        VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1171        set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1172        set_page_refcounted(page);
1173        if (hstate_is_gigantic(h)) {
1174                destroy_compound_gigantic_page(page, huge_page_order(h));
1175                free_gigantic_page(page, huge_page_order(h));
1176        } else {
1177                __free_pages(page, huge_page_order(h));
1178        }
1179}
1180
1181struct hstate *size_to_hstate(unsigned long size)
1182{
1183        struct hstate *h;
1184
1185        for_each_hstate(h) {
1186                if (huge_page_size(h) == size)
1187                        return h;
1188        }
1189        return NULL;
1190}
1191
1192/*
1193 * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1194 * to hstate->hugepage_activelist.)
1195 *
1196 * This function can be called for tail pages, but never returns true for them.
1197 */
1198bool page_huge_active(struct page *page)
1199{
1200        VM_BUG_ON_PAGE(!PageHuge(page), page);
1201        return PageHead(page) && PagePrivate(&page[1]);
1202}
1203
1204/* never called for tail page */
1205static void set_page_huge_active(struct page *page)
1206{
1207        VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1208        SetPagePrivate(&page[1]);
1209}
1210
1211static void clear_page_huge_active(struct page *page)
1212{
1213        VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1214        ClearPagePrivate(&page[1]);
1215}
1216
1217/*
1218 * Internal hugetlb specific page flag. Do not use outside of the hugetlb
1219 * code
1220 */
1221static inline bool PageHugeTemporary(struct page *page)
1222{
1223        if (!PageHuge(page))
1224                return false;
1225
1226        return (unsigned long)page[2].mapping == -1U;
1227}
1228
1229static inline void SetPageHugeTemporary(struct page *page)
1230{
1231        page[2].mapping = (void *)-1U;
1232}
1233
1234static inline void ClearPageHugeTemporary(struct page *page)
1235{
1236        page[2].mapping = NULL;
1237}
1238
1239void free_huge_page(struct page *page)
1240{
1241        /*
1242         * Can't pass hstate in here because it is called from the
1243         * compound page destructor.
1244         */
1245        struct hstate *h = page_hstate(page);
1246        int nid = page_to_nid(page);
1247        struct hugepage_subpool *spool =
1248                (struct hugepage_subpool *)page_private(page);
1249        bool restore_reserve;
1250
1251        set_page_private(page, 0);
1252        page->mapping = NULL;
1253        VM_BUG_ON_PAGE(page_count(page), page);
1254        VM_BUG_ON_PAGE(page_mapcount(page), page);
1255        restore_reserve = PagePrivate(page);
1256        ClearPagePrivate(page);
1257
1258        /*
1259         * A return code of zero implies that the subpool will be under its
1260         * minimum size if the reservation is not restored after page is free.
1261         * Therefore, force restore_reserve operation.
1262         */
1263        if (hugepage_subpool_put_pages(spool, 1) == 0)
1264                restore_reserve = true;
1265
1266        spin_lock(&hugetlb_lock);
1267        clear_page_huge_active(page);
1268        hugetlb_cgroup_uncharge_page(hstate_index(h),
1269                                     pages_per_huge_page(h), page);
1270        if (restore_reserve)
1271                h->resv_huge_pages++;
1272
1273        if (PageHugeTemporary(page)) {
1274                list_del(&page->lru);
1275                ClearPageHugeTemporary(page);
1276                update_and_free_page(h, page);
1277        } else if (h->surplus_huge_pages_node[nid]) {
1278                /* remove the page from active list */
1279                list_del(&page->lru);
1280                update_and_free_page(h, page);
1281                h->surplus_huge_pages--;
1282                h->surplus_huge_pages_node[nid]--;
1283        } else {
1284                arch_clear_hugepage_flags(page);
1285                enqueue_huge_page(h, page);
1286        }
1287        spin_unlock(&hugetlb_lock);
1288}
1289
1290static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1291{
1292        INIT_LIST_HEAD(&page->lru);
1293        set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1294        spin_lock(&hugetlb_lock);
1295        set_hugetlb_cgroup(page, NULL);
1296        h->nr_huge_pages++;
1297        h->nr_huge_pages_node[nid]++;
1298        spin_unlock(&hugetlb_lock);
1299}
1300
1301static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1302{
1303        int i;
1304        int nr_pages = 1 << order;
1305        struct page *p = page + 1;
1306
1307        /* we rely on prep_new_huge_page to set the destructor */
1308        set_compound_order(page, order);
1309        __ClearPageReserved(page);
1310        __SetPageHead(page);
1311        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1312                /*
1313                 * For gigantic hugepages allocated through bootmem at
1314                 * boot, it's safer to be consistent with the not-gigantic
1315                 * hugepages and clear the PG_reserved bit from all tail pages
1316                 * too.  Otherwse drivers using get_user_pages() to access tail
1317                 * pages may get the reference counting wrong if they see
1318                 * PG_reserved set on a tail page (despite the head page not
1319                 * having PG_reserved set).  Enforcing this consistency between
1320                 * head and tail pages allows drivers to optimize away a check
1321                 * on the head page when they need know if put_page() is needed
1322                 * after get_user_pages().
1323                 */
1324                __ClearPageReserved(p);
1325                set_page_count(p, 0);
1326                set_compound_head(p, page);
1327        }
1328        atomic_set(compound_mapcount_ptr(page), -1);
1329}
1330
1331/*
1332 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1333 * transparent huge pages.  See the PageTransHuge() documentation for more
1334 * details.
1335 */
1336int PageHuge(struct page *page)
1337{
1338        if (!PageCompound(page))
1339                return 0;
1340
1341        page = compound_head(page);
1342        return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1343}
1344EXPORT_SYMBOL_GPL(PageHuge);
1345
1346/*
1347 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1348 * normal or transparent huge pages.
1349 */
1350int PageHeadHuge(struct page *page_head)
1351{
1352        if (!PageHead(page_head))
1353                return 0;
1354
1355        return get_compound_page_dtor(page_head) == free_huge_page;
1356}
1357
1358pgoff_t __basepage_index(struct page *page)
1359{
1360        struct page *page_head = compound_head(page);
1361        pgoff_t index = page_index(page_head);
1362        unsigned long compound_idx;
1363
1364        if (!PageHuge(page_head))
1365                return page_index(page);
1366
1367        if (compound_order(page_head) >= MAX_ORDER)
1368                compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1369        else
1370                compound_idx = page - page_head;
1371
1372        return (index << compound_order(page_head)) + compound_idx;
1373}
1374
1375static struct page *alloc_buddy_huge_page(struct hstate *h,
1376                gfp_t gfp_mask, int nid, nodemask_t *nmask)
1377{
1378        int order = huge_page_order(h);
1379        struct page *page;
1380
1381        gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
1382        if (nid == NUMA_NO_NODE)
1383                nid = numa_mem_id();
1384        page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1385        if (page)
1386                __count_vm_event(HTLB_BUDDY_PGALLOC);
1387        else
1388                __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1389
1390        return page;
1391}
1392
1393/*
1394 * Common helper to allocate a fresh hugetlb page. All specific allocators
1395 * should use this function to get new hugetlb pages
1396 */
1397static struct page *alloc_fresh_huge_page(struct hstate *h,
1398                gfp_t gfp_mask, int nid, nodemask_t *nmask)
1399{
1400        struct page *page;
1401
1402        if (hstate_is_gigantic(h))
1403                page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1404        else
1405                page = alloc_buddy_huge_page(h, gfp_mask,
1406                                nid, nmask);
1407        if (!page)
1408                return NULL;
1409
1410        if (hstate_is_gigantic(h))
1411                prep_compound_gigantic_page(page, huge_page_order(h));
1412        prep_new_huge_page(h, page, page_to_nid(page));
1413
1414        return page;
1415}
1416
1417/*
1418 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1419 * manner.
1420 */
1421static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1422{
1423        struct page *page;
1424        int nr_nodes, node;
1425        gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1426
1427        for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1428                page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed);
1429                if (page)
1430                        break;
1431        }
1432
1433        if (!page)
1434                return 0;
1435
1436        put_page(page); /* free it into the hugepage allocator */
1437
1438        return 1;
1439}
1440
1441/*
1442 * Free huge page from pool from next node to free.
1443 * Attempt to keep persistent huge pages more or less
1444 * balanced over allowed nodes.
1445 * Called with hugetlb_lock locked.
1446 */
1447static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1448                                                         bool acct_surplus)
1449{
1450        int nr_nodes, node;
1451        int ret = 0;
1452
1453        for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1454                /*
1455                 * If we're returning unused surplus pages, only examine
1456                 * nodes with surplus pages.
1457                 */
1458                if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1459                    !list_empty(&h->hugepage_freelists[node])) {
1460                        struct page *page =
1461                                list_entry(h->hugepage_freelists[node].next,
1462                                          struct page, lru);
1463                        list_del(&page->lru);
1464                        h->free_huge_pages--;
1465                        h->free_huge_pages_node[node]--;
1466                        if (acct_surplus) {
1467                                h->surplus_huge_pages--;
1468                                h->surplus_huge_pages_node[node]--;
1469                        }
1470                        update_and_free_page(h, page);
1471                        ret = 1;
1472                        break;
1473                }
1474        }
1475
1476        return ret;
1477}
1478
1479/*
1480 * Dissolve a given free hugepage into free buddy pages. This function does
1481 * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
1482 * dissolution fails because a give page is not a free hugepage, or because
1483 * free hugepages are fully reserved.
1484 */
1485int dissolve_free_huge_page(struct page *page)
1486{
1487        int rc = -EBUSY;
1488
1489        spin_lock(&hugetlb_lock);
1490        if (PageHuge(page) && !page_count(page)) {
1491                struct page *head = compound_head(page);
1492                struct hstate *h = page_hstate(head);
1493                int nid = page_to_nid(head);
1494                if (h->free_huge_pages - h->resv_huge_pages == 0)
1495                        goto out;
1496                /*
1497                 * Move PageHWPoison flag from head page to the raw error page,
1498                 * which makes any subpages rather than the error page reusable.
1499                 */
1500                if (PageHWPoison(head) && page != head) {
1501                        SetPageHWPoison(page);
1502                        ClearPageHWPoison(head);
1503                }
1504                list_del(&head->lru);
1505                h->free_huge_pages--;
1506                h->free_huge_pages_node[nid]--;
1507                h->max_huge_pages--;
1508                update_and_free_page(h, head);
1509                rc = 0;
1510        }
1511out:
1512        spin_unlock(&hugetlb_lock);
1513        return rc;
1514}
1515
1516/*
1517 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1518 * make specified memory blocks removable from the system.
1519 * Note that this will dissolve a free gigantic hugepage completely, if any
1520 * part of it lies within the given range.
1521 * Also note that if dissolve_free_huge_page() returns with an error, all
1522 * free hugepages that were dissolved before that error are lost.
1523 */
1524int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1525{
1526        unsigned long pfn;
1527        struct page *page;
1528        int rc = 0;
1529
1530        if (!hugepages_supported())
1531                return rc;
1532
1533        for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1534                page = pfn_to_page(pfn);
1535                if (PageHuge(page) && !page_count(page)) {
1536                        rc = dissolve_free_huge_page(page);
1537                        if (rc)
1538                                break;
1539                }
1540        }
1541
1542        return rc;
1543}
1544
1545/*
1546 * Allocates a fresh surplus page from the page allocator.
1547 */
1548static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1549                int nid, nodemask_t *nmask)
1550{
1551        struct page *page = NULL;
1552
1553        if (hstate_is_gigantic(h))
1554                return NULL;
1555
1556        spin_lock(&hugetlb_lock);
1557        if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1558                goto out_unlock;
1559        spin_unlock(&hugetlb_lock);
1560
1561        page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1562        if (!page)
1563                return NULL;
1564
1565        spin_lock(&hugetlb_lock);
1566        /*
1567         * We could have raced with the pool size change.
1568         * Double check that and simply deallocate the new page
1569         * if we would end up overcommiting the surpluses. Abuse
1570         * temporary page to workaround the nasty free_huge_page
1571         * codeflow
1572         */
1573        if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1574                SetPageHugeTemporary(page);
1575                put_page(page);
1576                page = NULL;
1577        } else {
1578                h->surplus_huge_pages++;
1579                h->surplus_huge_pages_node[page_to_nid(page)]++;
1580        }
1581
1582out_unlock:
1583        spin_unlock(&hugetlb_lock);
1584
1585        return page;
1586}
1587
1588static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1589                int nid, nodemask_t *nmask)
1590{
1591        struct page *page;
1592
1593        if (hstate_is_gigantic(h))
1594                return NULL;
1595
1596        page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1597        if (!page)
1598                return NULL;
1599
1600        /*
1601         * We do not account these pages as surplus because they are only
1602         * temporary and will be released properly on the last reference
1603         */
1604        SetPageHugeTemporary(page);
1605
1606        return page;
1607}
1608
1609/*
1610 * Use the VMA's mpolicy to allocate a huge page from the buddy.
1611 */
1612static
1613struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1614                struct vm_area_struct *vma, unsigned long addr)
1615{
1616        struct page *page;
1617        struct mempolicy *mpol;
1618        gfp_t gfp_mask = htlb_alloc_mask(h);
1619        int nid;
1620        nodemask_t *nodemask;
1621
1622        nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1623        page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1624        mpol_cond_put(mpol);
1625
1626        return page;
1627}
1628
1629/* page migration callback function */
1630struct page *alloc_huge_page_node(struct hstate *h, int nid)
1631{
1632        gfp_t gfp_mask = htlb_alloc_mask(h);
1633        struct page *page = NULL;
1634
1635        if (nid != NUMA_NO_NODE)
1636                gfp_mask |= __GFP_THISNODE;
1637
1638        spin_lock(&hugetlb_lock);
1639        if (h->free_huge_pages - h->resv_huge_pages > 0)
1640                page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
1641        spin_unlock(&hugetlb_lock);
1642
1643        if (!page)
1644                page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
1645
1646        return page;
1647}
1648
1649/* page migration callback function */
1650struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1651                nodemask_t *nmask)
1652{
1653        gfp_t gfp_mask = htlb_alloc_mask(h);
1654
1655        spin_lock(&hugetlb_lock);
1656        if (h->free_huge_pages - h->resv_huge_pages > 0) {
1657                struct page *page;
1658
1659                page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1660                if (page) {
1661                        spin_unlock(&hugetlb_lock);
1662                        return page;
1663                }
1664        }
1665        spin_unlock(&hugetlb_lock);
1666
1667        return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
1668}
1669
1670/* mempolicy aware migration callback */
1671struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1672                unsigned long address)
1673{
1674        struct mempolicy *mpol;
1675        nodemask_t *nodemask;
1676        struct page *page;
1677        gfp_t gfp_mask;
1678        int node;
1679
1680        gfp_mask = htlb_alloc_mask(h);
1681        node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1682        page = alloc_huge_page_nodemask(h, node, nodemask);
1683        mpol_cond_put(mpol);
1684
1685        return page;
1686}
1687
1688/*
1689 * Increase the hugetlb pool such that it can accommodate a reservation
1690 * of size 'delta'.
1691 */
1692static int gather_surplus_pages(struct hstate *h, int delta)
1693{
1694        struct list_head surplus_list;
1695        struct page *page, *tmp;
1696        int ret, i;
1697        int needed, allocated;
1698        bool alloc_ok = true;
1699
1700        needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1701        if (needed <= 0) {
1702                h->resv_huge_pages += delta;
1703                return 0;
1704        }
1705
1706        allocated = 0;
1707        INIT_LIST_HEAD(&surplus_list);
1708
1709        ret = -ENOMEM;
1710retry:
1711        spin_unlock(&hugetlb_lock);
1712        for (i = 0; i < needed; i++) {
1713                page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
1714                                NUMA_NO_NODE, NULL);
1715                if (!page) {
1716                        alloc_ok = false;
1717                        break;
1718                }
1719                list_add(&page->lru, &surplus_list);
1720                cond_resched();
1721        }
1722        allocated += i;
1723
1724        /*
1725         * After retaking hugetlb_lock, we need to recalculate 'needed'
1726         * because either resv_huge_pages or free_huge_pages may have changed.
1727         */
1728        spin_lock(&hugetlb_lock);
1729        needed = (h->resv_huge_pages + delta) -
1730                        (h->free_huge_pages + allocated);
1731        if (needed > 0) {
1732                if (alloc_ok)
1733                        goto retry;
1734                /*
1735                 * We were not able to allocate enough pages to
1736                 * satisfy the entire reservation so we free what
1737                 * we've allocated so far.
1738                 */
1739                goto free;
1740        }
1741        /*
1742         * The surplus_list now contains _at_least_ the number of extra pages
1743         * needed to accommodate the reservation.  Add the appropriate number
1744         * of pages to the hugetlb pool and free the extras back to the buddy
1745         * allocator.  Commit the entire reservation here to prevent another
1746         * process from stealing the pages as they are added to the pool but
1747         * before they are reserved.
1748         */
1749        needed += allocated;
1750        h->resv_huge_pages += delta;
1751        ret = 0;
1752
1753        /* Free the needed pages to the hugetlb pool */
1754        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1755                if ((--needed) < 0)
1756                        break;
1757                /*
1758                 * This page is now managed by the hugetlb allocator and has
1759                 * no users -- drop the buddy allocator's reference.
1760                 */
1761                put_page_testzero(page);
1762                VM_BUG_ON_PAGE(page_count(page), page);
1763                enqueue_huge_page(h, page);
1764        }
1765free:
1766        spin_unlock(&hugetlb_lock);
1767
1768        /* Free unnecessary surplus pages to the buddy allocator */
1769        list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1770                put_page(page);
1771        spin_lock(&hugetlb_lock);
1772
1773        return ret;
1774}
1775
1776/*
1777 * This routine has two main purposes:
1778 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1779 *    in unused_resv_pages.  This corresponds to the prior adjustments made
1780 *    to the associated reservation map.
1781 * 2) Free any unused surplus pages that may have been allocated to satisfy
1782 *    the reservation.  As many as unused_resv_pages may be freed.
1783 *
1784 * Called with hugetlb_lock held.  However, the lock could be dropped (and
1785 * reacquired) during calls to cond_resched_lock.  Whenever dropping the lock,
1786 * we must make sure nobody else can claim pages we are in the process of
1787 * freeing.  Do this by ensuring resv_huge_page always is greater than the
1788 * number of huge pages we plan to free when dropping the lock.
1789 */
1790static void return_unused_surplus_pages(struct hstate *h,
1791                                        unsigned long unused_resv_pages)
1792{
1793        unsigned long nr_pages;
1794
1795        /* Cannot return gigantic pages currently */
1796        if (hstate_is_gigantic(h))
1797                goto out;
1798
1799        /*
1800         * Part (or even all) of the reservation could have been backed
1801         * by pre-allocated pages. Only free surplus pages.
1802         */
1803        nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1804
1805        /*
1806         * We want to release as many surplus pages as possible, spread
1807         * evenly across all nodes with memory. Iterate across these nodes
1808         * until we can no longer free unreserved surplus pages. This occurs
1809         * when the nodes with surplus pages have no free pages.
1810         * free_pool_huge_page() will balance the the freed pages across the
1811         * on-line nodes with memory and will handle the hstate accounting.
1812         *
1813         * Note that we decrement resv_huge_pages as we free the pages.  If
1814         * we drop the lock, resv_huge_pages will still be sufficiently large
1815         * to cover subsequent pages we may free.
1816         */
1817        while (nr_pages--) {
1818                h->resv_huge_pages--;
1819                unused_resv_pages--;
1820                if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1821                        goto out;
1822                cond_resched_lock(&hugetlb_lock);
1823        }
1824
1825out:
1826        /* Fully uncommit the reservation */
1827        h->resv_huge_pages -= unused_resv_pages;
1828}
1829
1830
1831/*
1832 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1833 * are used by the huge page allocation routines to manage reservations.
1834 *
1835 * vma_needs_reservation is called to determine if the huge page at addr
1836 * within the vma has an associated reservation.  If a reservation is
1837 * needed, the value 1 is returned.  The caller is then responsible for
1838 * managing the global reservation and subpool usage counts.  After
1839 * the huge page has been allocated, vma_commit_reservation is called
1840 * to add the page to the reservation map.  If the page allocation fails,
1841 * the reservation must be ended instead of committed.  vma_end_reservation
1842 * is called in such cases.
1843 *
1844 * In the normal case, vma_commit_reservation returns the same value
1845 * as the preceding vma_needs_reservation call.  The only time this
1846 * is not the case is if a reserve map was changed between calls.  It
1847 * is the responsibility of the caller to notice the difference and
1848 * take appropriate action.
1849 *
1850 * vma_add_reservation is used in error paths where a reservation must
1851 * be restored when a newly allocated huge page must be freed.  It is
1852 * to be called after calling vma_needs_reservation to determine if a
1853 * reservation exists.
1854 */
1855enum vma_resv_mode {
1856        VMA_NEEDS_RESV,
1857        VMA_COMMIT_RESV,
1858        VMA_END_RESV,
1859        VMA_ADD_RESV,
1860};
1861static long __vma_reservation_common(struct hstate *h,
1862                                struct vm_area_struct *vma, unsigned long addr,
1863                                enum vma_resv_mode mode)
1864{
1865        struct resv_map *resv;
1866        pgoff_t idx;
1867        long ret;
1868
1869        resv = vma_resv_map(vma);
1870        if (!resv)
1871                return 1;
1872
1873        idx = vma_hugecache_offset(h, vma, addr);
1874        switch (mode) {
1875        case VMA_NEEDS_RESV:
1876                ret = region_chg(resv, idx, idx + 1);
1877                break;
1878        case VMA_COMMIT_RESV:
1879                ret = region_add(resv, idx, idx + 1);
1880                break;
1881        case VMA_END_RESV:
1882                region_abort(resv, idx, idx + 1);
1883                ret = 0;
1884                break;
1885        case VMA_ADD_RESV:
1886                if (vma->vm_flags & VM_MAYSHARE)
1887                        ret = region_add(resv, idx, idx + 1);
1888                else {
1889                        region_abort(resv, idx, idx + 1);
1890                        ret = region_del(resv, idx, idx + 1);
1891                }
1892                break;
1893        default:
1894                BUG();
1895        }
1896
1897        if (vma->vm_flags & VM_MAYSHARE)
1898                return ret;
1899        else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1900                /*
1901                 * In most cases, reserves always exist for private mappings.
1902                 * However, a file associated with mapping could have been
1903                 * hole punched or truncated after reserves were consumed.
1904                 * As subsequent fault on such a range will not use reserves.
1905                 * Subtle - The reserve map for private mappings has the
1906                 * opposite meaning than that of shared mappings.  If NO
1907                 * entry is in the reserve map, it means a reservation exists.
1908                 * If an entry exists in the reserve map, it means the
1909                 * reservation has already been consumed.  As a result, the
1910                 * return value of this routine is the opposite of the
1911                 * value returned from reserve map manipulation routines above.
1912                 */
1913                if (ret)
1914                        return 0;
1915                else
1916                        return 1;
1917        }
1918        else
1919                return ret < 0 ? ret : 0;
1920}
1921
1922static long vma_needs_reservation(struct hstate *h,
1923                        struct vm_area_struct *vma, unsigned long addr)
1924{
1925        return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1926}
1927
1928static long vma_commit_reservation(struct hstate *h,
1929                        struct vm_area_struct *vma, unsigned long addr)
1930{
1931        return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1932}
1933
1934static void vma_end_reservation(struct hstate *h,
1935                        struct vm_area_struct *vma, unsigned long addr)
1936{
1937        (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1938}
1939
1940static long vma_add_reservation(struct hstate *h,
1941                        struct vm_area_struct *vma, unsigned long addr)
1942{
1943        return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
1944}
1945
1946/*
1947 * This routine is called to restore a reservation on error paths.  In the
1948 * specific error paths, a huge page was allocated (via alloc_huge_page)
1949 * and is about to be freed.  If a reservation for the page existed,
1950 * alloc_huge_page would have consumed the reservation and set PagePrivate
1951 * in the newly allocated page.  When the page is freed via free_huge_page,
1952 * the global reservation count will be incremented if PagePrivate is set.
1953 * However, free_huge_page can not adjust the reserve map.  Adjust the
1954 * reserve map here to be consistent with global reserve count adjustments
1955 * to be made by free_huge_page.
1956 */
1957static void restore_reserve_on_error(struct hstate *h,
1958                        struct vm_area_struct *vma, unsigned long address,
1959                        struct page *page)
1960{
1961        if (unlikely(PagePrivate(page))) {
1962                long rc = vma_needs_reservation(h, vma, address);
1963
1964                if (unlikely(rc < 0)) {
1965                        /*
1966                         * Rare out of memory condition in reserve map
1967                         * manipulation.  Clear PagePrivate so that
1968                         * global reserve count will not be incremented
1969                         * by free_huge_page.  This will make it appear
1970                         * as though the reservation for this page was
1971                         * consumed.  This may prevent the task from
1972                         * faulting in the page at a later time.  This
1973                         * is better than inconsistent global huge page
1974                         * accounting of reserve counts.
1975                         */
1976                        ClearPagePrivate(page);
1977                } else if (rc) {
1978                        rc = vma_add_reservation(h, vma, address);
1979                        if (unlikely(rc < 0))
1980                                /*
1981                                 * See above comment about rare out of
1982                                 * memory condition.
1983                                 */
1984                                ClearPagePrivate(page);
1985                } else
1986                        vma_end_reservation(h, vma, address);
1987        }
1988}
1989
1990struct page *alloc_huge_page(struct vm_area_struct *vma,
1991                                    unsigned long addr, int avoid_reserve)
1992{
1993        struct hugepage_subpool *spool = subpool_vma(vma);
1994        struct hstate *h = hstate_vma(vma);
1995        struct page *page;
1996        long map_chg, map_commit;
1997        long gbl_chg;
1998        int ret, idx;
1999        struct hugetlb_cgroup *h_cg;
2000
2001        idx = hstate_index(h);
2002        /*
2003         * Examine the region/reserve map to determine if the process
2004         * has a reservation for the page to be allocated.  A return
2005         * code of zero indicates a reservation exists (no change).
2006         */
2007        map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2008        if (map_chg < 0)
2009                return ERR_PTR(-ENOMEM);
2010
2011        /*
2012         * Processes that did not create the mapping will have no
2013         * reserves as indicated by the region/reserve map. Check
2014         * that the allocation will not exceed the subpool limit.
2015         * Allocations for MAP_NORESERVE mappings also need to be
2016         * checked against any subpool limit.
2017         */
2018        if (map_chg || avoid_reserve) {
2019                gbl_chg = hugepage_subpool_get_pages(spool, 1);
2020                if (gbl_chg < 0) {
2021                        vma_end_reservation(h, vma, addr);
2022                        return ERR_PTR(-ENOSPC);
2023                }
2024
2025                /*
2026                 * Even though there was no reservation in the region/reserve
2027                 * map, there could be reservations associated with the
2028                 * subpool that can be used.  This would be indicated if the
2029                 * return value of hugepage_subpool_get_pages() is zero.
2030                 * However, if avoid_reserve is specified we still avoid even
2031                 * the subpool reservations.
2032                 */
2033                if (avoid_reserve)
2034                        gbl_chg = 1;
2035        }
2036
2037        ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2038        if (ret)
2039                goto out_subpool_put;
2040
2041        spin_lock(&hugetlb_lock);
2042        /*
2043         * glb_chg is passed to indicate whether or not a page must be taken
2044         * from the global free pool (global change).  gbl_chg == 0 indicates
2045         * a reservation exists for the allocation.
2046         */
2047        page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2048        if (!page) {
2049                spin_unlock(&hugetlb_lock);
2050                page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2051                if (!page)
2052                        goto out_uncharge_cgroup;
2053                if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2054                        SetPagePrivate(page);
2055                        h->resv_huge_pages--;
2056                }
2057                spin_lock(&hugetlb_lock);
2058                list_move(&page->lru, &h->hugepage_activelist);
2059                /* Fall through */
2060        }
2061        hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2062        spin_unlock(&hugetlb_lock);
2063
2064        set_page_private(page, (unsigned long)spool);
2065
2066        map_commit = vma_commit_reservation(h, vma, addr);
2067        if (unlikely(map_chg > map_commit)) {
2068                /*
2069                 * The page was added to the reservation map between
2070                 * vma_needs_reservation and vma_commit_reservation.
2071                 * This indicates a race with hugetlb_reserve_pages.
2072                 * Adjust for the subpool count incremented above AND
2073                 * in hugetlb_reserve_pages for the same page.  Also,
2074                 * the reservation count added in hugetlb_reserve_pages
2075                 * no longer applies.
2076                 */
2077                long rsv_adjust;
2078
2079                rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2080                hugetlb_acct_memory(h, -rsv_adjust);
2081        }
2082        return page;
2083
2084out_uncharge_cgroup:
2085        hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2086out_subpool_put:
2087        if (map_chg || avoid_reserve)
2088                hugepage_subpool_put_pages(spool, 1);
2089        vma_end_reservation(h, vma, addr);
2090        return ERR_PTR(-ENOSPC);
2091}
2092
2093int alloc_bootmem_huge_page(struct hstate *h)
2094        __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2095int __alloc_bootmem_huge_page(struct hstate *h)
2096{
2097        struct huge_bootmem_page *m;
2098        int nr_nodes, node;
2099
2100        for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2101                void *addr;
2102
2103                addr = memblock_virt_alloc_try_nid_raw(
2104                                huge_page_size(h), huge_page_size(h),
2105                                0, BOOTMEM_ALLOC_ACCESSIBLE, node);
2106                if (addr) {
2107                        /*
2108                         * Use the beginning of the huge page to store the
2109                         * huge_bootmem_page struct (until gather_bootmem
2110                         * puts them into the mem_map).
2111                         */
2112                        m = addr;
2113                        goto found;
2114                }
2115        }
2116        return 0;
2117
2118found:
2119        BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2120        /* Put them into a private list first because mem_map is not up yet */
2121        INIT_LIST_HEAD(&m->list);
2122        list_add(&m->list, &huge_boot_pages);
2123        m->hstate = h;
2124        return 1;
2125}
2126
2127static void __init prep_compound_huge_page(struct page *page,
2128                unsigned int order)
2129{
2130        if (unlikely(order > (MAX_ORDER - 1)))
2131                prep_compound_gigantic_page(page, order);
2132        else
2133                prep_compound_page(page, order);
2134}
2135
2136/* Put bootmem huge pages into the standard lists after mem_map is up */
2137static void __init gather_bootmem_prealloc(void)
2138{
2139        struct huge_bootmem_page *m;
2140
2141        list_for_each_entry(m, &huge_boot_pages, list) {
2142                struct page *page = virt_to_page(m);
2143                struct hstate *h = m->hstate;
2144
2145                WARN_ON(page_count(page) != 1);
2146                prep_compound_huge_page(page, h->order);
2147                WARN_ON(PageReserved(page));
2148                prep_new_huge_page(h, page, page_to_nid(page));
2149                put_page(page); /* free it into the hugepage allocator */
2150
2151                /*
2152                 * If we had gigantic hugepages allocated at boot time, we need
2153                 * to restore the 'stolen' pages to totalram_pages in order to
2154                 * fix confusing memory reports from free(1) and another
2155                 * side-effects, like CommitLimit going negative.
2156                 */
2157                if (hstate_is_gigantic(h))
2158                        adjust_managed_page_count(page, 1 << h->order);
2159                cond_resched();
2160        }
2161}
2162
2163static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2164{
2165        unsigned long i;
2166
2167        for (i = 0; i < h->max_huge_pages; ++i) {
2168                if (hstate_is_gigantic(h)) {
2169                        if (!alloc_bootmem_huge_page(h))
2170                                break;
2171                } else if (!alloc_pool_huge_page(h,
2172                                         &node_states[N_MEMORY]))
2173                        break;
2174                cond_resched();
2175        }
2176        if (i < h->max_huge_pages) {
2177                char buf[32];
2178
2179                string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2180                pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
2181                        h->max_huge_pages, buf, i);
2182                h->max_huge_pages = i;
2183        }
2184}
2185
2186static void __init hugetlb_init_hstates(void)
2187{
2188        struct hstate *h;
2189
2190        for_each_hstate(h) {
2191                if (minimum_order > huge_page_order(h))
2192                        minimum_order = huge_page_order(h);
2193
2194                /* oversize hugepages were init'ed in early boot */
2195                if (!hstate_is_gigantic(h))
2196                        hugetlb_hstate_alloc_pages(h);
2197        }
2198        VM_BUG_ON(minimum_order == UINT_MAX);
2199}
2200
2201static void __init report_hugepages(void)
2202{
2203        struct hstate *h;
2204
2205        for_each_hstate(h) {
2206                char buf[32];
2207
2208                string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2209                pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2210                        buf, h->free_huge_pages);
2211        }
2212}
2213
2214#ifdef CONFIG_HIGHMEM
2215static void try_to_free_low(struct hstate *h, unsigned long count,
2216                                                nodemask_t *nodes_allowed)
2217{
2218        int i;
2219
2220        if (hstate_is_gigantic(h))
2221                return;
2222
2223        for_each_node_mask(i, *nodes_allowed) {
2224                struct page *page, *next;
2225                struct list_head *freel = &h->hugepage_freelists[i];
2226                list_for_each_entry_safe(page, next, freel, lru) {
2227                        if (count >= h->nr_huge_pages)
2228                                return;
2229                        if (PageHighMem(page))
2230                                continue;
2231                        list_del(&page->lru);
2232                        update_and_free_page(h, page);
2233                        h->free_huge_pages--;
2234                        h->free_huge_pages_node[page_to_nid(page)]--;
2235                }
2236        }
2237}
2238#else
2239static inline void try_to_free_low(struct hstate *h, unsigned long count,
2240                                                nodemask_t *nodes_allowed)
2241{
2242}
2243#endif
2244
2245/*
2246 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2247 * balanced by operating on them in a round-robin fashion.
2248 * Returns 1 if an adjustment was made.
2249 */
2250static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2251                                int delta)
2252{
2253        int nr_nodes, node;
2254
2255        VM_BUG_ON(delta != -1 && delta != 1);
2256
2257        if (delta < 0) {
2258                for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2259                        if (h->surplus_huge_pages_node[node])
2260                                goto found;
2261                }
2262        } else {
2263                for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2264                        if (h->surplus_huge_pages_node[node] <
2265                                        h->nr_huge_pages_node[node])
2266                                goto found;
2267                }
2268        }
2269        return 0;
2270
2271found:
2272        h->surplus_huge_pages += delta;
2273        h->surplus_huge_pages_node[node] += delta;
2274        return 1;
2275}
2276
2277#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2278static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2279                                                nodemask_t *nodes_allowed)
2280{
2281        unsigned long min_count, ret;
2282
2283        if (hstate_is_gigantic(h) && !gigantic_page_supported())
2284                return h->max_huge_pages;
2285
2286        /*
2287         * Increase the pool size
2288         * First take pages out of surplus state.  Then make up the
2289         * remaining difference by allocating fresh huge pages.
2290         *
2291         * We might race with alloc_surplus_huge_page() here and be unable
2292         * to convert a surplus huge page to a normal huge page. That is
2293         * not critical, though, it just means the overall size of the
2294         * pool might be one hugepage larger than it needs to be, but
2295         * within all the constraints specified by the sysctls.
2296         */
2297        spin_lock(&hugetlb_lock);
2298        while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2299                if (!adjust_pool_surplus(h, nodes_allowed, -1))
2300                        break;
2301        }
2302
2303        while (count > persistent_huge_pages(h)) {
2304                /*
2305                 * If this allocation races such that we no longer need the
2306                 * page, free_huge_page will handle it by freeing the page
2307                 * and reducing the surplus.
2308                 */
2309                spin_unlock(&hugetlb_lock);
2310
2311                /* yield cpu to avoid soft lockup */
2312                cond_resched();
2313
2314                ret = alloc_pool_huge_page(h, nodes_allowed);
2315                spin_lock(&hugetlb_lock);
2316                if (!ret)
2317                        goto out;
2318
2319                /* Bail for signals. Probably ctrl-c from user */
2320                if (signal_pending(current))
2321                        goto out;
2322        }
2323
2324        /*
2325         * Decrease the pool size
2326         * First return free pages to the buddy allocator (being careful
2327         * to keep enough around to satisfy reservations).  Then place
2328         * pages into surplus state as needed so the pool will shrink
2329         * to the desired size as pages become free.
2330         *
2331         * By placing pages into the surplus state independent of the
2332         * overcommit value, we are allowing the surplus pool size to
2333         * exceed overcommit. There are few sane options here. Since
2334         * alloc_surplus_huge_page() is checking the global counter,
2335         * though, we'll note that we're not allowed to exceed surplus
2336         * and won't grow the pool anywhere else. Not until one of the
2337         * sysctls are changed, or the surplus pages go out of use.
2338         */
2339        min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2340        min_count = max(count, min_count);
2341        try_to_free_low(h, min_count, nodes_allowed);
2342        while (min_count < persistent_huge_pages(h)) {
2343                if (!free_pool_huge_page(h, nodes_allowed, 0))
2344                        break;
2345                cond_resched_lock(&hugetlb_lock);
2346        }
2347        while (count < persistent_huge_pages(h)) {
2348                if (!adjust_pool_surplus(h, nodes_allowed, 1))
2349                        break;
2350        }
2351out:
2352        ret = persistent_huge_pages(h);
2353        spin_unlock(&hugetlb_lock);
2354        return ret;
2355}
2356
2357#define HSTATE_ATTR_RO(_name) \
2358        static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2359
2360#define HSTATE_ATTR(_name) \
2361        static struct kobj_attribute _name##_attr = \
2362                __ATTR(_name, 0644, _name##_show, _name##_store)
2363
2364static struct kobject *hugepages_kobj;
2365static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2366
2367static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2368
2369static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2370{
2371        int i;
2372
2373        for (i = 0; i < HUGE_MAX_HSTATE; i++)
2374                if (hstate_kobjs[i] == kobj) {
2375                        if (nidp)
2376                                *nidp = NUMA_NO_NODE;
2377                        return &hstates[i];
2378                }
2379
2380        return kobj_to_node_hstate(kobj, nidp);
2381}
2382
2383static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2384                                        struct kobj_attribute *attr, char *buf)
2385{
2386        struct hstate *h;
2387        unsigned long nr_huge_pages;
2388        int nid;
2389
2390        h = kobj_to_hstate(kobj, &nid);
2391        if (nid == NUMA_NO_NODE)
2392                nr_huge_pages = h->nr_huge_pages;
2393        else
2394                nr_huge_pages = h->nr_huge_pages_node[nid];
2395
2396        return sprintf(buf, "%lu\n", nr_huge_pages);
2397}
2398
2399static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2400                                           struct hstate *h, int nid,
2401                                           unsigned long count, size_t len)
2402{
2403        int err;
2404        NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2405
2406        if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2407                err = -EINVAL;
2408                goto out;
2409        }
2410
2411        if (nid == NUMA_NO_NODE) {
2412                /*
2413                 * global hstate attribute
2414                 */
2415                if (!(obey_mempolicy &&
2416                                init_nodemask_of_mempolicy(nodes_allowed))) {
2417                        NODEMASK_FREE(nodes_allowed);
2418                        nodes_allowed = &node_states[N_MEMORY];
2419                }
2420        } else if (nodes_allowed) {
2421                /*
2422                 * per node hstate attribute: adjust count to global,
2423                 * but restrict alloc/free to the specified node.
2424                 */
2425                count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2426                init_nodemask_of_node(nodes_allowed, nid);
2427        } else
2428                nodes_allowed = &node_states[N_MEMORY];
2429
2430        h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2431
2432        if (nodes_allowed != &node_states[N_MEMORY])
2433                NODEMASK_FREE(nodes_allowed);
2434
2435        return len;
2436out:
2437        NODEMASK_FREE(nodes_allowed);
2438        return err;
2439}
2440
2441static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2442                                         struct kobject *kobj, const char *buf,
2443                                         size_t len)
2444{
2445        struct hstate *h;
2446        unsigned long count;
2447        int nid;
2448        int err;
2449
2450        err = kstrtoul(buf, 10, &count);
2451        if (err)
2452                return err;
2453
2454        h = kobj_to_hstate(kobj, &nid);
2455        return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2456}
2457
2458static ssize_t nr_hugepages_show(struct kobject *kobj,
2459                                       struct kobj_attribute *attr, char *buf)
2460{
2461        return nr_hugepages_show_common(kobj, attr, buf);
2462}
2463
2464static ssize_t nr_hugepages_store(struct kobject *kobj,
2465               struct kobj_attribute *attr, const char *buf, size_t len)
2466{
2467        return nr_hugepages_store_common(false, kobj, buf, len);
2468}
2469HSTATE_ATTR(nr_hugepages);
2470
2471#ifdef CONFIG_NUMA
2472
2473/*
2474 * hstate attribute for optionally mempolicy-based constraint on persistent
2475 * huge page alloc/free.
2476 */
2477static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2478                                       struct kobj_attribute *attr, char *buf)
2479{
2480        return nr_hugepages_show_common(kobj, attr, buf);
2481}
2482
2483static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2484               struct kobj_attribute *attr, const char *buf, size_t len)
2485{
2486        return nr_hugepages_store_common(true, kobj, buf, len);
2487}
2488HSTATE_ATTR(nr_hugepages_mempolicy);
2489#endif
2490
2491
2492static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2493                                        struct kobj_attribute *attr, char *buf)
2494{
2495        struct hstate *h = kobj_to_hstate(kobj, NULL);
2496        return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2497}
2498
2499static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2500                struct kobj_attribute *attr, const char *buf, size_t count)
2501{
2502        int err;
2503        unsigned long input;
2504        struct hstate *h = kobj_to_hstate(kobj, NULL);
2505
2506        if (hstate_is_gigantic(h))
2507                return -EINVAL;
2508
2509        err = kstrtoul(buf, 10, &input);
2510        if (err)
2511                return err;
2512
2513        spin_lock(&hugetlb_lock);
2514        h->nr_overcommit_huge_pages = input;
2515        spin_unlock(&hugetlb_lock);
2516
2517        return count;
2518}
2519HSTATE_ATTR(nr_overcommit_hugepages);
2520
2521static ssize_t free_hugepages_show(struct kobject *kobj,
2522                                        struct kobj_attribute *attr, char *buf)
2523{
2524        struct hstate *h;
2525        unsigned long free_huge_pages;
2526        int nid;
2527
2528        h = kobj_to_hstate(kobj, &nid);
2529        if (nid == NUMA_NO_NODE)
2530                free_huge_pages = h->free_huge_pages;
2531        else
2532                free_huge_pages = h->free_huge_pages_node[nid];
2533
2534        return sprintf(buf, "%lu\n", free_huge_pages);
2535}
2536HSTATE_ATTR_RO(free_hugepages);
2537
2538static ssize_t resv_hugepages_show(struct kobject *kobj,
2539                                        struct kobj_attribute *attr, char *buf)
2540{
2541        struct hstate *h = kobj_to_hstate(kobj, NULL);
2542        return sprintf(buf, "%lu\n", h->resv_huge_pages);
2543}
2544HSTATE_ATTR_RO(resv_hugepages);
2545
2546static ssize_t surplus_hugepages_show(struct kobject *kobj,
2547                                        struct kobj_attribute *attr, char *buf)
2548{
2549        struct hstate *h;
2550        unsigned long surplus_huge_pages;
2551        int nid;
2552
2553        h = kobj_to_hstate(kobj, &nid);
2554        if (nid == NUMA_NO_NODE)
2555                surplus_huge_pages = h->surplus_huge_pages;
2556        else
2557                surplus_huge_pages = h->surplus_huge_pages_node[nid];
2558
2559        return sprintf(buf, "%lu\n", surplus_huge_pages);
2560}
2561HSTATE_ATTR_RO(surplus_hugepages);
2562
2563static struct attribute *hstate_attrs[] = {
2564        &nr_hugepages_attr.attr,
2565        &nr_overcommit_hugepages_attr.attr,
2566        &free_hugepages_attr.attr,
2567        &resv_hugepages_attr.attr,
2568        &surplus_hugepages_attr.attr,
2569#ifdef CONFIG_NUMA
2570        &nr_hugepages_mempolicy_attr.attr,
2571#endif
2572        NULL,
2573};
2574
2575static const struct attribute_group hstate_attr_group = {
2576        .attrs = hstate_attrs,
2577};
2578
2579static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2580                                    struct kobject **hstate_kobjs,
2581                                    const struct attribute_group *hstate_attr_group)
2582{
2583        int retval;
2584        int hi = hstate_index(h);
2585
2586        hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2587        if (!hstate_kobjs[hi])
2588                return -ENOMEM;
2589
2590        retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2591        if (retval)
2592                kobject_put(hstate_kobjs[hi]);
2593
2594        return retval;
2595}
2596
2597static void __init hugetlb_sysfs_init(void)
2598{
2599        struct hstate *h;
2600        int err;
2601
2602        hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2603        if (!hugepages_kobj)
2604                return;
2605
2606        for_each_hstate(h) {
2607                err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2608                                         hstate_kobjs, &hstate_attr_group);
2609                if (err)
2610                        pr_err("Hugetlb: Unable to add hstate %s", h->name);
2611        }
2612}
2613
2614#ifdef CONFIG_NUMA
2615
2616/*
2617 * node_hstate/s - associate per node hstate attributes, via their kobjects,
2618 * with node devices in node_devices[] using a parallel array.  The array
2619 * index of a node device or _hstate == node id.
2620 * This is here to avoid any static dependency of the node device driver, in
2621 * the base kernel, on the hugetlb module.
2622 */
2623struct node_hstate {
2624        struct kobject          *hugepages_kobj;
2625        struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
2626};
2627static struct node_hstate node_hstates[MAX_NUMNODES];
2628
2629/*
2630 * A subset of global hstate attributes for node devices
2631 */
2632static struct attribute *per_node_hstate_attrs[] = {
2633        &nr_hugepages_attr.attr,
2634        &free_hugepages_attr.attr,
2635        &surplus_hugepages_attr.attr,
2636        NULL,
2637};
2638
2639static const struct attribute_group per_node_hstate_attr_group = {
2640        .attrs = per_node_hstate_attrs,
2641};
2642
2643/*
2644 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2645 * Returns node id via non-NULL nidp.
2646 */
2647static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2648{
2649        int nid;
2650
2651        for (nid = 0; nid < nr_node_ids; nid++) {
2652                struct node_hstate *nhs = &node_hstates[nid];
2653                int i;
2654                for (i = 0; i < HUGE_MAX_HSTATE; i++)
2655                        if (nhs->hstate_kobjs[i] == kobj) {
2656                                if (nidp)
2657                                        *nidp = nid;
2658                                return &hstates[i];
2659                        }
2660        }
2661
2662        BUG();
2663        return NULL;
2664}
2665
2666/*
2667 * Unregister hstate attributes from a single node device.
2668 * No-op if no hstate attributes attached.
2669 */
2670static void hugetlb_unregister_node(struct node *node)
2671{
2672        struct hstate *h;
2673        struct node_hstate *nhs = &node_hstates[node->dev.id];
2674
2675        if (!nhs->hugepages_kobj)
2676                return;         /* no hstate attributes */
2677
2678        for_each_hstate(h) {
2679                int idx = hstate_index(h);
2680                if (nhs->hstate_kobjs[idx]) {
2681                        kobject_put(nhs->hstate_kobjs[idx]);
2682                        nhs->hstate_kobjs[idx] = NULL;
2683                }
2684        }
2685
2686        kobject_put(nhs->hugepages_kobj);
2687        nhs->hugepages_kobj = NULL;
2688}
2689
2690
2691/*
2692 * Register hstate attributes for a single node device.
2693 * No-op if attributes already registered.
2694 */
2695static void hugetlb_register_node(struct node *node)
2696{
2697        struct hstate *h;
2698        struct node_hstate *nhs = &node_hstates[node->dev.id];
2699        int err;
2700
2701        if (nhs->hugepages_kobj)
2702                return;         /* already allocated */
2703
2704        nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2705                                                        &node->dev.kobj);
2706        if (!nhs->hugepages_kobj)
2707                return;
2708
2709        for_each_hstate(h) {
2710                err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2711                                                nhs->hstate_kobjs,
2712                                                &per_node_hstate_attr_group);
2713                if (err) {
2714                        pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2715                                h->name, node->dev.id);
2716                        hugetlb_unregister_node(node);
2717                        break;
2718                }
2719        }
2720}
2721
2722/*
2723 * hugetlb init time:  register hstate attributes for all registered node
2724 * devices of nodes that have memory.  All on-line nodes should have
2725 * registered their associated device by this time.
2726 */
2727static void __init hugetlb_register_all_nodes(void)
2728{
2729        int nid;
2730
2731        for_each_node_state(nid, N_MEMORY) {
2732                struct node *node = node_devices[nid];
2733                if (node->dev.id == nid)
2734                        hugetlb_register_node(node);
2735        }
2736
2737        /*
2738         * Let the node device driver know we're here so it can
2739         * [un]register hstate attributes on node hotplug.
2740         */
2741        register_hugetlbfs_with_node(hugetlb_register_node,
2742                                     hugetlb_unregister_node);
2743}
2744#else   /* !CONFIG_NUMA */
2745
2746static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2747{
2748        BUG();
2749        if (nidp)
2750                *nidp = -1;
2751        return NULL;
2752}
2753
2754static void hugetlb_register_all_nodes(void) { }
2755
2756#endif
2757
2758static int __init hugetlb_init(void)
2759{
2760        int i;
2761
2762        if (!hugepages_supported())
2763                return 0;
2764
2765        if (!size_to_hstate(default_hstate_size)) {
2766                if (default_hstate_size != 0) {
2767                        pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
2768                               default_hstate_size, HPAGE_SIZE);
2769                }
2770
2771                default_hstate_size = HPAGE_SIZE;
2772                if (!size_to_hstate(default_hstate_size))
2773                        hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2774        }
2775        default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2776        if (default_hstate_max_huge_pages) {
2777                if (!default_hstate.max_huge_pages)
2778                        default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2779        }
2780
2781        hugetlb_init_hstates();
2782        gather_bootmem_prealloc();
2783        report_hugepages();
2784
2785        hugetlb_sysfs_init();
2786        hugetlb_register_all_nodes();
2787        hugetlb_cgroup_file_init();
2788
2789#ifdef CONFIG_SMP
2790        num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2791#else
2792        num_fault_mutexes = 1;
2793#endif
2794        hugetlb_fault_mutex_table =
2795                kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
2796                              GFP_KERNEL);
2797        BUG_ON(!hugetlb_fault_mutex_table);
2798
2799        for (i = 0; i < num_fault_mutexes; i++)
2800                mutex_init(&hugetlb_fault_mutex_table[i]);
2801        return 0;
2802}
2803subsys_initcall(hugetlb_init);
2804
2805/* Should be called on processing a hugepagesz=... option */
2806void __init hugetlb_bad_size(void)
2807{
2808        parsed_valid_hugepagesz = false;
2809}
2810
2811void __init hugetlb_add_hstate(unsigned int order)
2812{
2813        struct hstate *h;
2814        unsigned long i;
2815
2816        if (size_to_hstate(PAGE_SIZE << order)) {
2817                pr_warn("hugepagesz= specified twice, ignoring\n");
2818                return;
2819        }
2820        BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2821        BUG_ON(order == 0);
2822        h = &hstates[hugetlb_max_hstate++];
2823        h->order = order;
2824        h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2825        h->nr_huge_pages = 0;
2826        h->free_huge_pages = 0;
2827        for (i = 0; i < MAX_NUMNODES; ++i)
2828                INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2829        INIT_LIST_HEAD(&h->hugepage_activelist);
2830        h->next_nid_to_alloc = first_memory_node;
2831        h->next_nid_to_free = first_memory_node;
2832        snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2833                                        huge_page_size(h)/1024);
2834
2835        parsed_hstate = h;
2836}
2837
2838static int __init hugetlb_nrpages_setup(char *s)
2839{
2840        unsigned long *mhp;
2841        static unsigned long *last_mhp;
2842
2843        if (!parsed_valid_hugepagesz) {
2844                pr_warn("hugepages = %s preceded by "
2845                        "an unsupported hugepagesz, ignoring\n", s);
2846                parsed_valid_hugepagesz = true;
2847                return 1;
2848        }
2849        /*
2850         * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2851         * so this hugepages= parameter goes to the "default hstate".
2852         */
2853        else if (!hugetlb_max_hstate)
2854                mhp = &default_hstate_max_huge_pages;
2855        else
2856                mhp = &parsed_hstate->max_huge_pages;
2857
2858        if (mhp == last_mhp) {
2859                pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2860                return 1;
2861        }
2862
2863        if (sscanf(s, "%lu", mhp) <= 0)
2864                *mhp = 0;
2865
2866        /*
2867         * Global state is always initialized later in hugetlb_init.
2868         * But we need to allocate >= MAX_ORDER hstates here early to still
2869         * use the bootmem allocator.
2870         */
2871        if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2872                hugetlb_hstate_alloc_pages(parsed_hstate);
2873
2874        last_mhp = mhp;
2875
2876        return 1;
2877}
2878__setup("hugepages=", hugetlb_nrpages_setup);
2879
2880static int __init hugetlb_default_setup(char *s)
2881{
2882        default_hstate_size = memparse(s, &s);
2883        return 1;
2884}
2885__setup("default_hugepagesz=", hugetlb_default_setup);
2886
2887static unsigned int cpuset_mems_nr(unsigned int *array)
2888{
2889        int node;
2890        unsigned int nr = 0;
2891
2892        for_each_node_mask(node, cpuset_current_mems_allowed)
2893                nr += array[node];
2894
2895        return nr;
2896}
2897
2898#ifdef CONFIG_SYSCTL
2899static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2900                         struct ctl_table *table, int write,
2901                         void __user *buffer, size_t *length, loff_t *ppos)
2902{
2903        struct hstate *h = &default_hstate;
2904        unsigned long tmp = h->max_huge_pages;
2905        int ret;
2906
2907        if (!hugepages_supported())
2908                return -EOPNOTSUPP;
2909
2910        table->data = &tmp;
2911        table->maxlen = sizeof(unsigned long);
2912        ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2913        if (ret)
2914                goto out;
2915
2916        if (write)
2917                ret = __nr_hugepages_store_common(obey_mempolicy, h,
2918                                                  NUMA_NO_NODE, tmp, *length);
2919out:
2920        return ret;
2921}
2922
2923int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2924                          void __user *buffer, size_t *length, loff_t *ppos)
2925{
2926
2927        return hugetlb_sysctl_handler_common(false, table, write,
2928                                                        buffer, length, ppos);
2929}
2930
2931#ifdef CONFIG_NUMA
2932int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2933                          void __user *buffer, size_t *length, loff_t *ppos)
2934{
2935        return hugetlb_sysctl_handler_common(true, table, write,
2936                                                        buffer, length, ppos);
2937}
2938#endif /* CONFIG_NUMA */
2939
2940int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2941                        void __user *buffer,
2942                        size_t *length, loff_t *ppos)
2943{
2944        struct hstate *h = &default_hstate;
2945        unsigned long tmp;
2946        int ret;
2947
2948        if (!hugepages_supported())
2949                return -EOPNOTSUPP;
2950
2951        tmp = h->nr_overcommit_huge_pages;
2952
2953        if (write && hstate_is_gigantic(h))
2954                return -EINVAL;
2955
2956        table->data = &tmp;
2957        table->maxlen = sizeof(unsigned long);
2958        ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2959        if (ret)
2960                goto out;
2961
2962        if (write) {
2963                spin_lock(&hugetlb_lock);
2964                h->nr_overcommit_huge_pages = tmp;
2965                spin_unlock(&hugetlb_lock);
2966        }
2967out:
2968        return ret;
2969}
2970
2971#endif /* CONFIG_SYSCTL */
2972
2973void hugetlb_report_meminfo(struct seq_file *m)
2974{
2975        struct hstate *h;
2976        unsigned long total = 0;
2977
2978        if (!hugepages_supported())
2979                return;
2980
2981        for_each_hstate(h) {
2982                unsigned long count = h->nr_huge_pages;
2983
2984                total += (PAGE_SIZE << huge_page_order(h)) * count;
2985
2986                if (h == &default_hstate)
2987                        seq_printf(m,
2988                                   "HugePages_Total:   %5lu\n"
2989                                   "HugePages_Free:    %5lu\n"
2990                                   "HugePages_Rsvd:    %5lu\n"
2991                                   "HugePages_Surp:    %5lu\n"
2992                                   "Hugepagesize:   %8lu kB\n",
2993                                   count,
2994                                   h->free_huge_pages,
2995                                   h->resv_huge_pages,
2996                                   h->surplus_huge_pages,
2997                                   (PAGE_SIZE << huge_page_order(h)) / 1024);
2998        }
2999
3000        seq_printf(m, "Hugetlb:        %8lu kB\n", total / 1024);
3001}
3002
3003int hugetlb_report_node_meminfo(int nid, char *buf)
3004{
3005        struct hstate *h = &default_hstate;
3006        if (!hugepages_supported())
3007                return 0;
3008        return sprintf(buf,
3009                "Node %d HugePages_Total: %5u\n"
3010                "Node %d HugePages_Free:  %5u\n"
3011                "Node %d HugePages_Surp:  %5u\n",
3012                nid, h->nr_huge_pages_node[nid],
3013                nid, h->free_huge_pages_node[nid],
3014                nid, h->surplus_huge_pages_node[nid]);
3015}
3016
3017void hugetlb_show_meminfo(void)
3018{
3019        struct hstate *h;
3020        int nid;
3021
3022        if (!hugepages_supported())
3023                return;
3024
3025        for_each_node_state(nid, N_MEMORY)
3026                for_each_hstate(h)
3027                        pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3028                                nid,
3029                                h->nr_huge_pages_node[nid],
3030                                h->free_huge_pages_node[nid],
3031                                h->surplus_huge_pages_node[nid],
3032                                1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3033}
3034
3035void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3036{
3037        seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3038                   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3039}
3040
3041/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3042unsigned long hugetlb_total_pages(void)
3043{
3044        struct hstate *h;
3045        unsigned long nr_total_pages = 0;
3046
3047        for_each_hstate(h)
3048                nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3049        return nr_total_pages;
3050}
3051
3052static int hugetlb_acct_memory(struct hstate *h, long delta)
3053{
3054        int ret = -ENOMEM;
3055
3056        spin_lock(&hugetlb_lock);
3057        /*
3058         * When cpuset is configured, it breaks the strict hugetlb page
3059         * reservation as the accounting is done on a global variable. Such
3060         * reservation is completely rubbish in the presence of cpuset because
3061         * the reservation is not checked against page availability for the
3062         * current cpuset. Application can still potentially OOM'ed by kernel
3063         * with lack of free htlb page in cpuset that the task is in.
3064         * Attempt to enforce strict accounting with cpuset is almost
3065         * impossible (or too ugly) because cpuset is too fluid that
3066         * task or memory node can be dynamically moved between cpusets.
3067         *
3068         * The change of semantics for shared hugetlb mapping with cpuset is
3069         * undesirable. However, in order to preserve some of the semantics,
3070         * we fall back to check against current free page availability as
3071         * a best attempt and hopefully to minimize the impact of changing
3072         * semantics that cpuset has.
3073         */
3074        if (delta > 0) {
3075                if (gather_surplus_pages(h, delta) < 0)
3076                        goto out;
3077
3078                if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3079                        return_unused_surplus_pages(h, delta);
3080                        goto out;
3081                }
3082        }
3083
3084        ret = 0;
3085        if (delta < 0)
3086                return_unused_surplus_pages(h, (unsigned long) -delta);
3087
3088out:
3089        spin_unlock(&hugetlb_lock);
3090        return ret;
3091}
3092
3093static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3094{
3095        struct resv_map *resv = vma_resv_map(vma);
3096
3097        /*
3098         * This new VMA should share its siblings reservation map if present.
3099         * The VMA will only ever have a valid reservation map pointer where
3100         * it is being copied for another still existing VMA.  As that VMA
3101         * has a reference to the reservation map it cannot disappear until
3102         * after this open call completes.  It is therefore safe to take a
3103         * new reference here without additional locking.
3104         */
3105        if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3106                kref_get(&resv->refs);
3107}
3108
3109static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3110{
3111        struct hstate *h = hstate_vma(vma);
3112        struct resv_map *resv = vma_resv_map(vma);
3113        struct hugepage_subpool *spool = subpool_vma(vma);
3114        unsigned long reserve, start, end;
3115        long gbl_reserve;
3116
3117        if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3118                return;
3119
3120        start = vma_hugecache_offset(h, vma, vma->vm_start);
3121        end = vma_hugecache_offset(h, vma, vma->vm_end);
3122
3123        reserve = (end - start) - region_count(resv, start, end);
3124
3125        kref_put(&resv->refs, resv_map_release);
3126
3127        if (reserve) {
3128                /*
3129                 * Decrement reserve counts.  The global reserve count may be
3130                 * adjusted if the subpool has a minimum size.
3131                 */
3132                gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3133                hugetlb_acct_memory(h, -gbl_reserve);
3134        }
3135}
3136
3137static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3138{
3139        if (addr & ~(huge_page_mask(hstate_vma(vma))))
3140                return -EINVAL;
3141        return 0;
3142}
3143
3144static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3145{
3146        struct hstate *hstate = hstate_vma(vma);
3147
3148        return 1UL << huge_page_shift(hstate);
3149}
3150
3151/*
3152 * We cannot handle pagefaults against hugetlb pages at all.  They cause
3153 * handle_mm_fault() to try to instantiate regular-sized pages in the
3154 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
3155 * this far.
3156 */
3157static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3158{
3159        BUG();
3160        return 0;
3161}
3162
3163/*
3164 * When a new function is introduced to vm_operations_struct and added
3165 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
3166 * This is because under System V memory model, mappings created via
3167 * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3168 * their original vm_ops are overwritten with shm_vm_ops.
3169 */
3170const struct vm_operations_struct hugetlb_vm_ops = {
3171        .fault = hugetlb_vm_op_fault,
3172        .open = hugetlb_vm_op_open,
3173        .close = hugetlb_vm_op_close,
3174        .split = hugetlb_vm_op_split,
3175        .pagesize = hugetlb_vm_op_pagesize,
3176};
3177
3178static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3179                                int writable)
3180{
3181        pte_t entry;
3182
3183        if (writable) {
3184                entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3185                                         vma->vm_page_prot)));
3186        } else {
3187                entry = huge_pte_wrprotect(mk_huge_pte(page,
3188                                           vma->vm_page_prot));
3189        }
3190        entry = pte_mkyoung(entry);
3191        entry = pte_mkhuge(entry);
3192        entry = arch_make_huge_pte(entry, vma, page, writable);
3193
3194        return entry;
3195}
3196
3197static void set_huge_ptep_writable(struct vm_area_struct *vma,
3198                                   unsigned long address, pte_t *ptep)
3199{
3200        pte_t entry;
3201
3202        entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3203        if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3204                update_mmu_cache(vma, address, ptep);
3205}
3206
3207bool is_hugetlb_entry_migration(pte_t pte)
3208{
3209        swp_entry_t swp;
3210
3211        if (huge_pte_none(pte) || pte_present(pte))
3212                return false;
3213        swp = pte_to_swp_entry(pte);
3214        if (non_swap_entry(swp) && is_migration_entry(swp))
3215                return true;
3216        else
3217                return false;
3218}
3219
3220static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3221{
3222        swp_entry_t swp;
3223
3224        if (huge_pte_none(pte) || pte_present(pte))
3225                return 0;
3226        swp = pte_to_swp_entry(pte);
3227        if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3228                return 1;
3229        else
3230                return 0;
3231}
3232
3233int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3234                            struct vm_area_struct *vma)
3235{
3236        pte_t *src_pte, *dst_pte, entry;
3237        struct page *ptepage;
3238        unsigned long addr;
3239        int cow;
3240        struct hstate *h = hstate_vma(vma);
3241        unsigned long sz = huge_page_size(h);
3242        unsigned long mmun_start;       /* For mmu_notifiers */
3243        unsigned long mmun_end;         /* For mmu_notifiers */
3244        int ret = 0;
3245
3246        cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3247
3248        mmun_start = vma->vm_start;
3249        mmun_end = vma->vm_end;
3250        if (cow)
3251                mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3252
3253        for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3254                spinlock_t *src_ptl, *dst_ptl;
3255                src_pte = huge_pte_offset(src, addr, sz);
3256                if (!src_pte)
3257                        continue;
3258                dst_pte = huge_pte_alloc(dst, addr, sz);
3259                if (!dst_pte) {
3260                        ret = -ENOMEM;
3261                        break;
3262                }
3263
3264                /* If the pagetables are shared don't copy or take references */
3265                if (dst_pte == src_pte)
3266                        continue;
3267
3268                dst_ptl = huge_pte_lock(h, dst, dst_pte);
3269                src_ptl = huge_pte_lockptr(h, src, src_pte);
3270                spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3271                entry = huge_ptep_get(src_pte);
3272                if (huge_pte_none(entry)) { /* skip none entry */
3273                        ;
3274                } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3275                                    is_hugetlb_entry_hwpoisoned(entry))) {
3276                        swp_entry_t swp_entry = pte_to_swp_entry(entry);
3277
3278                        if (is_write_migration_entry(swp_entry) && cow) {
3279                                /*
3280                                 * COW mappings require pages in both
3281                                 * parent and child to be set to read.
3282                                 */
3283                                make_migration_entry_read(&swp_entry);
3284                                entry = swp_entry_to_pte(swp_entry);
3285                                set_huge_swap_pte_at(src, addr, src_pte,
3286                                                     entry, sz);
3287                        }
3288                        set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3289                } else {
3290                        if (cow) {
3291                                /*
3292                                 * No need to notify as we are downgrading page
3293                                 * table protection not changing it to point
3294                                 * to a new page.
3295                                 *
3296                                 * See Documentation/vm/mmu_notifier.rst
3297                                 */
3298                                huge_ptep_set_wrprotect(src, addr, src_pte);
3299                        }
3300                        entry = huge_ptep_get(src_pte);
3301                        ptepage = pte_page(entry);
3302                        get_page(ptepage);
3303                        page_dup_rmap(ptepage, true);
3304                        set_huge_pte_at(dst, addr, dst_pte, entry);
3305                        hugetlb_count_add(pages_per_huge_page(h), dst);
3306                }
3307                spin_unlock(src_ptl);
3308                spin_unlock(dst_ptl);
3309        }
3310
3311        if (cow)
3312                mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3313
3314        return ret;
3315}
3316
3317void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3318                            unsigned long start, unsigned long end,
3319                            struct page *ref_page)
3320{
3321        struct mm_struct *mm = vma->vm_mm;
3322        unsigned long address;
3323        pte_t *ptep;
3324        pte_t pte;
3325        spinlock_t *ptl;
3326        struct page *page;
3327        struct hstate *h = hstate_vma(vma);
3328        unsigned long sz = huge_page_size(h);
3329        unsigned long mmun_start = start;       /* For mmu_notifiers */
3330        unsigned long mmun_end   = end;         /* For mmu_notifiers */
3331
3332        WARN_ON(!is_vm_hugetlb_page(vma));
3333        BUG_ON(start & ~huge_page_mask(h));
3334        BUG_ON(end & ~huge_page_mask(h));
3335
3336        /*
3337         * This is a hugetlb vma, all the pte entries should point
3338         * to huge page.
3339         */
3340        tlb_remove_check_page_size_change(tlb, sz);
3341        tlb_start_vma(tlb, vma);
3342
3343        /*
3344         * If sharing possible, alert mmu notifiers of worst case.
3345         */
3346        adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end);
3347        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3348        address = start;
3349        for (; address < end; address += sz) {
3350                ptep = huge_pte_offset(mm, address, sz);
3351                if (!ptep)
3352                        continue;
3353
3354                ptl = huge_pte_lock(h, mm, ptep);
3355                if (huge_pmd_unshare(mm, &address, ptep)) {
3356                        spin_unlock(ptl);
3357                        /*
3358                         * We just unmapped a page of PMDs by clearing a PUD.
3359                         * The caller's TLB flush range should cover this area.
3360                         */
3361                        continue;
3362                }
3363
3364                pte = huge_ptep_get(ptep);
3365                if (huge_pte_none(pte)) {
3366                        spin_unlock(ptl);
3367                        continue;
3368                }
3369
3370                /*
3371                 * Migrating hugepage or HWPoisoned hugepage is already
3372                 * unmapped and its refcount is dropped, so just clear pte here.
3373                 */
3374                if (unlikely(!pte_present(pte))) {
3375                        huge_pte_clear(mm, address, ptep, sz);
3376                        spin_unlock(ptl);
3377                        continue;
3378                }
3379
3380                page = pte_page(pte);
3381                /*
3382                 * If a reference page is supplied, it is because a specific
3383                 * page is being unmapped, not a range. Ensure the page we
3384                 * are about to unmap is the actual page of interest.
3385                 */
3386                if (ref_page) {
3387                        if (page != ref_page) {
3388                                spin_unlock(ptl);
3389                                continue;
3390                        }
3391                        /*
3392                         * Mark the VMA as having unmapped its page so that
3393                         * future faults in this VMA will fail rather than
3394                         * looking like data was lost
3395                         */
3396                        set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3397                }
3398
3399                pte = huge_ptep_get_and_clear(mm, address, ptep);
3400                tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3401                if (huge_pte_dirty(pte))
3402                        set_page_dirty(page);
3403
3404                hugetlb_count_sub(pages_per_huge_page(h), mm);
3405                page_remove_rmap(page, true);
3406
3407                spin_unlock(ptl);
3408                tlb_remove_page_size(tlb, page, huge_page_size(h));
3409                /*
3410                 * Bail out after unmapping reference page if supplied
3411                 */
3412                if (ref_page)
3413                        break;
3414        }
3415        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3416        tlb_end_vma(tlb, vma);
3417}
3418
3419void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3420                          struct vm_area_struct *vma, unsigned long start,
3421                          unsigned long end, struct page *ref_page)
3422{
3423        __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3424
3425        /*
3426         * Clear this flag so that x86's huge_pmd_share page_table_shareable
3427         * test will fail on a vma being torn down, and not grab a page table
3428         * on its way out.  We're lucky that the flag has such an appropriate
3429         * name, and can in fact be safely cleared here. We could clear it
3430         * before the __unmap_hugepage_range above, but all that's necessary
3431         * is to clear it before releasing the i_mmap_rwsem. This works
3432         * because in the context this is called, the VMA is about to be
3433         * destroyed and the i_mmap_rwsem is held.
3434         */
3435        vma->vm_flags &= ~VM_MAYSHARE;
3436}
3437
3438void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3439                          unsigned long end, struct page *ref_page)
3440{
3441        struct mm_struct *mm;
3442        struct mmu_gather tlb;
3443        unsigned long tlb_start = start;
3444        unsigned long tlb_end = end;
3445
3446        /*
3447         * If shared PMDs were possibly used within this vma range, adjust
3448         * start/end for worst case tlb flushing.
3449         * Note that we can not be sure if PMDs are shared until we try to
3450         * unmap pages.  However, we want to make sure TLB flushing covers
3451         * the largest possible range.
3452         */
3453        adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
3454
3455        mm = vma->vm_mm;
3456
3457        tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
3458        __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3459        tlb_finish_mmu(&tlb, tlb_start, tlb_end);
3460}
3461
3462/*
3463 * This is called when the original mapper is failing to COW a MAP_PRIVATE
3464 * mappping it owns the reserve page for. The intention is to unmap the page
3465 * from other VMAs and let the children be SIGKILLed if they are faulting the
3466 * same region.
3467 */
3468static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3469                              struct page *page, unsigned long address)
3470{
3471        struct hstate *h = hstate_vma(vma);
3472        struct vm_area_struct *iter_vma;
3473        struct address_space *mapping;
3474        pgoff_t pgoff;
3475
3476        /*
3477         * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3478         * from page cache lookup which is in HPAGE_SIZE units.
3479         */
3480        address = address & huge_page_mask(h);
3481        pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3482                        vma->vm_pgoff;
3483        mapping = vma->vm_file->f_mapping;
3484
3485        /*
3486         * Take the mapping lock for the duration of the table walk. As
3487         * this mapping should be shared between all the VMAs,
3488         * __unmap_hugepage_range() is called as the lock is already held
3489         */
3490        i_mmap_lock_write(mapping);
3491        vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3492                /* Do not unmap the current VMA */
3493                if (iter_vma == vma)
3494                        continue;
3495
3496                /*
3497                 * Shared VMAs have their own reserves and do not affect
3498                 * MAP_PRIVATE accounting but it is possible that a shared
3499                 * VMA is using the same page so check and skip such VMAs.
3500                 */
3501                if (iter_vma->vm_flags & VM_MAYSHARE)
3502                        continue;
3503
3504                /*
3505                 * Unmap the page from other VMAs without their own reserves.
3506                 * They get marked to be SIGKILLed if they fault in these
3507                 * areas. This is because a future no-page fault on this VMA
3508                 * could insert a zeroed page instead of the data existing
3509                 * from the time of fork. This would look like data corruption
3510                 */
3511                if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3512                        unmap_hugepage_range(iter_vma, address,
3513                                             address + huge_page_size(h), page);
3514        }
3515        i_mmap_unlock_write(mapping);
3516}
3517
3518/*
3519 * Hugetlb_cow() should be called with page lock of the original hugepage held.
3520 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3521 * cannot race with other handlers or page migration.
3522 * Keep the pte_same checks anyway to make transition from the mutex easier.
3523 */
3524static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3525                       unsigned long address, pte_t *ptep,
3526                       struct page *pagecache_page, spinlock_t *ptl)
3527{
3528        pte_t pte;
3529        struct hstate *h = hstate_vma(vma);
3530        struct page *old_page, *new_page;
3531        int outside_reserve = 0;
3532        vm_fault_t ret = 0;
3533        unsigned long mmun_start;       /* For mmu_notifiers */
3534        unsigned long mmun_end;         /* For mmu_notifiers */
3535        unsigned long haddr = address & huge_page_mask(h);
3536
3537        pte = huge_ptep_get(ptep);
3538        old_page = pte_page(pte);
3539
3540retry_avoidcopy:
3541        /* If no-one else is actually using this page, avoid the copy
3542         * and just make the page writable */
3543        if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3544                page_move_anon_rmap(old_page, vma);
3545                set_huge_ptep_writable(vma, haddr, ptep);
3546                return 0;
3547        }
3548
3549        /*
3550         * If the process that created a MAP_PRIVATE mapping is about to
3551         * perform a COW due to a shared page count, attempt to satisfy
3552         * the allocation without using the existing reserves. The pagecache
3553         * page is used to determine if the reserve at this address was
3554         * consumed or not. If reserves were used, a partial faulted mapping
3555         * at the time of fork() could consume its reserves on COW instead
3556         * of the full address range.
3557         */
3558        if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3559                        old_page != pagecache_page)
3560                outside_reserve = 1;
3561
3562        get_page(old_page);
3563
3564        /*
3565         * Drop page table lock as buddy allocator may be called. It will
3566         * be acquired again before returning to the caller, as expected.
3567         */
3568        spin_unlock(ptl);
3569        new_page = alloc_huge_page(vma, haddr, outside_reserve);
3570
3571        if (IS_ERR(new_page)) {
3572                /*
3573                 * If a process owning a MAP_PRIVATE mapping fails to COW,
3574                 * it is due to references held by a child and an insufficient
3575                 * huge page pool. To guarantee the original mappers
3576                 * reliability, unmap the page from child processes. The child
3577                 * may get SIGKILLed if it later faults.
3578                 */
3579                if (outside_reserve) {
3580                        put_page(old_page);
3581                        BUG_ON(huge_pte_none(pte));
3582                        unmap_ref_private(mm, vma, old_page, haddr);
3583                        BUG_ON(huge_pte_none(pte));
3584                        spin_lock(ptl);
3585                        ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3586                        if (likely(ptep &&
3587                                   pte_same(huge_ptep_get(ptep), pte)))
3588                                goto retry_avoidcopy;
3589                        /*
3590                         * race occurs while re-acquiring page table
3591                         * lock, and our job is done.
3592                         */
3593                        return 0;
3594                }
3595
3596                ret = vmf_error(PTR_ERR(new_page));
3597                goto out_release_old;
3598        }
3599
3600        /*
3601         * When the original hugepage is shared one, it does not have
3602         * anon_vma prepared.
3603         */
3604        if (unlikely(anon_vma_prepare(vma))) {
3605                ret = VM_FAULT_OOM;
3606                goto out_release_all;
3607        }
3608
3609        copy_user_huge_page(new_page, old_page, address, vma,
3610                            pages_per_huge_page(h));
3611        __SetPageUptodate(new_page);
3612        set_page_huge_active(new_page);
3613
3614        mmun_start = haddr;
3615        mmun_end = mmun_start + huge_page_size(h);
3616        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3617
3618        /*
3619         * Retake the page table lock to check for racing updates
3620         * before the page tables are altered
3621         */
3622        spin_lock(ptl);
3623        ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3624        if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3625                ClearPagePrivate(new_page);
3626
3627                /* Break COW */
3628                huge_ptep_clear_flush(vma, haddr, ptep);
3629                mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3630                set_huge_pte_at(mm, haddr, ptep,
3631                                make_huge_pte(vma, new_page, 1));
3632                page_remove_rmap(old_page, true);
3633                hugepage_add_new_anon_rmap(new_page, vma, haddr);
3634                /* Make the old page be freed below */
3635                new_page = old_page;
3636        }
3637        spin_unlock(ptl);
3638        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3639out_release_all:
3640        restore_reserve_on_error(h, vma, haddr, new_page);
3641        put_page(new_page);
3642out_release_old:
3643        put_page(old_page);
3644
3645        spin_lock(ptl); /* Caller expects lock to be held */
3646        return ret;
3647}
3648
3649/* Return the pagecache page at a given address within a VMA */
3650static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3651                        struct vm_area_struct *vma, unsigned long address)
3652{
3653        struct address_space *mapping;
3654        pgoff_t idx;
3655
3656        mapping = vma->vm_file->f_mapping;
3657        idx = vma_hugecache_offset(h, vma, address);
3658
3659        return find_lock_page(mapping, idx);
3660}
3661
3662/*
3663 * Return whether there is a pagecache page to back given address within VMA.
3664 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3665 */
3666static bool hugetlbfs_pagecache_present(struct hstate *h,
3667                        struct vm_area_struct *vma, unsigned long address)
3668{
3669        struct address_space *mapping;
3670        pgoff_t idx;
3671        struct page *page;
3672
3673        mapping = vma->vm_file->f_mapping;
3674        idx = vma_hugecache_offset(h, vma, address);
3675
3676        page = find_get_page(mapping, idx);
3677        if (page)
3678                put_page(page);
3679        return page != NULL;
3680}
3681
3682int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3683                           pgoff_t idx)
3684{
3685        struct inode *inode = mapping->host;
3686        struct hstate *h = hstate_inode(inode);
3687        int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3688
3689        if (err)
3690                return err;
3691        ClearPagePrivate(page);
3692
3693        spin_lock(&inode->i_lock);
3694        inode->i_blocks += blocks_per_huge_page(h);
3695        spin_unlock(&inode->i_lock);
3696        return 0;
3697}
3698
3699static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
3700                        struct vm_area_struct *vma,
3701                        struct address_space *mapping, pgoff_t idx,
3702                        unsigned long address, pte_t *ptep, unsigned int flags)
3703{
3704        struct hstate *h = hstate_vma(vma);
3705        vm_fault_t ret = VM_FAULT_SIGBUS;
3706        int anon_rmap = 0;
3707        unsigned long size;
3708        struct page *page;
3709        pte_t new_pte;
3710        spinlock_t *ptl;
3711        unsigned long haddr = address & huge_page_mask(h);
3712
3713        /*
3714         * Currently, we are forced to kill the process in the event the
3715         * original mapper has unmapped pages from the child due to a failed
3716         * COW. Warn that such a situation has occurred as it may not be obvious
3717         */
3718        if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3719                pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3720                           current->pid);
3721                return ret;
3722        }
3723
3724        /*
3725         * Use page lock to guard against racing truncation
3726         * before we get page_table_lock.
3727         */
3728retry:
3729        page = find_lock_page(mapping, idx);
3730        if (!page) {
3731                size = i_size_read(mapping->host) >> huge_page_shift(h);
3732                if (idx >= size)
3733                        goto out;
3734
3735                /*
3736                 * Check for page in userfault range
3737                 */
3738                if (userfaultfd_missing(vma)) {
3739                        u32 hash;
3740                        struct vm_fault vmf = {
3741                                .vma = vma,
3742                                .address = haddr,
3743                                .flags = flags,
3744                                /*
3745                                 * Hard to debug if it ends up being
3746                                 * used by a callee that assumes
3747                                 * something about the other
3748                                 * uninitialized fields... same as in
3749                                 * memory.c
3750                                 */
3751                        };
3752
3753                        /*
3754                         * hugetlb_fault_mutex must be dropped before
3755                         * handling userfault.  Reacquire after handling
3756                         * fault to make calling code simpler.
3757                         */
3758                        hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
3759                                                        idx, haddr);
3760                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3761                        ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3762                        mutex_lock(&hugetlb_fault_mutex_table[hash]);
3763                        goto out;
3764                }
3765
3766                page = alloc_huge_page(vma, haddr, 0);
3767                if (IS_ERR(page)) {
3768                        ret = vmf_error(PTR_ERR(page));
3769                        goto out;
3770                }
3771                clear_huge_page(page, address, pages_per_huge_page(h));
3772                __SetPageUptodate(page);
3773                set_page_huge_active(page);
3774
3775                if (vma->vm_flags & VM_MAYSHARE) {
3776                        int err = huge_add_to_page_cache(page, mapping, idx);
3777                        if (err) {
3778                                put_page(page);
3779                                if (err == -EEXIST)
3780                                        goto retry;
3781                                goto out;
3782                        }
3783                } else {
3784                        lock_page(page);
3785                        if (unlikely(anon_vma_prepare(vma))) {
3786                                ret = VM_FAULT_OOM;
3787                                goto backout_unlocked;
3788                        }
3789                        anon_rmap = 1;
3790                }
3791        } else {
3792                /*
3793                 * If memory error occurs between mmap() and fault, some process
3794                 * don't have hwpoisoned swap entry for errored virtual address.
3795                 * So we need to block hugepage fault by PG_hwpoison bit check.
3796                 */
3797                if (unlikely(PageHWPoison(page))) {
3798                        ret = VM_FAULT_HWPOISON |
3799                                VM_FAULT_SET_HINDEX(hstate_index(h));
3800                        goto backout_unlocked;
3801                }
3802        }
3803
3804        /*
3805         * If we are going to COW a private mapping later, we examine the
3806         * pending reservations for this page now. This will ensure that
3807         * any allocations necessary to record that reservation occur outside
3808         * the spinlock.
3809         */
3810        if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3811                if (vma_needs_reservation(h, vma, haddr) < 0) {
3812                        ret = VM_FAULT_OOM;
3813                        goto backout_unlocked;
3814                }
3815                /* Just decrements count, does not deallocate */
3816                vma_end_reservation(h, vma, haddr);
3817        }
3818
3819        ptl = huge_pte_lock(h, mm, ptep);
3820        size = i_size_read(mapping->host) >> huge_page_shift(h);
3821        if (idx >= size)
3822                goto backout;
3823
3824        ret = 0;
3825        if (!huge_pte_none(huge_ptep_get(ptep)))
3826                goto backout;
3827
3828        if (anon_rmap) {
3829                ClearPagePrivate(page);
3830                hugepage_add_new_anon_rmap(page, vma, haddr);
3831        } else
3832                page_dup_rmap(page, true);
3833        new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3834                                && (vma->vm_flags & VM_SHARED)));
3835        set_huge_pte_at(mm, haddr, ptep, new_pte);
3836
3837        hugetlb_count_add(pages_per_huge_page(h), mm);
3838        if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3839                /* Optimization, do the COW without a second fault */
3840                ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
3841        }
3842
3843        spin_unlock(ptl);
3844        unlock_page(page);
3845out:
3846        return ret;
3847
3848backout:
3849        spin_unlock(ptl);
3850backout_unlocked:
3851        unlock_page(page);
3852        restore_reserve_on_error(h, vma, haddr, page);
3853        put_page(page);
3854        goto out;
3855}
3856
3857#ifdef CONFIG_SMP
3858u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3859                            struct vm_area_struct *vma,
3860                            struct address_space *mapping,
3861                            pgoff_t idx, unsigned long address)
3862{
3863        unsigned long key[2];
3864        u32 hash;
3865
3866        if (vma->vm_flags & VM_SHARED) {
3867                key[0] = (unsigned long) mapping;
3868                key[1] = idx;
3869        } else {
3870                key[0] = (unsigned long) mm;
3871                key[1] = address >> huge_page_shift(h);
3872        }
3873
3874        hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3875
3876        return hash & (num_fault_mutexes - 1);
3877}
3878#else
3879/*
3880 * For uniprocesor systems we always use a single mutex, so just
3881 * return 0 and avoid the hashing overhead.
3882 */
3883u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3884                            struct vm_area_struct *vma,
3885                            struct address_space *mapping,
3886                            pgoff_t idx, unsigned long address)
3887{
3888        return 0;
3889}
3890#endif
3891
3892vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3893                        unsigned long address, unsigned int flags)
3894{
3895        pte_t *ptep, entry;
3896        spinlock_t *ptl;
3897        vm_fault_t ret;
3898        u32 hash;
3899        pgoff_t idx;
3900        struct page *page = NULL;
3901        struct page *pagecache_page = NULL;
3902        struct hstate *h = hstate_vma(vma);
3903        struct address_space *mapping;
3904        int need_wait_lock = 0;
3905        unsigned long haddr = address & huge_page_mask(h);
3906
3907        ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3908        if (ptep) {
3909                entry = huge_ptep_get(ptep);
3910                if (unlikely(is_hugetlb_entry_migration(entry))) {
3911                        migration_entry_wait_huge(vma, mm, ptep);
3912                        return 0;
3913                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3914                        return VM_FAULT_HWPOISON_LARGE |
3915                                VM_FAULT_SET_HINDEX(hstate_index(h));
3916        } else {
3917                ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
3918                if (!ptep)
3919                        return VM_FAULT_OOM;
3920        }
3921
3922        mapping = vma->vm_file->f_mapping;
3923        idx = vma_hugecache_offset(h, vma, haddr);
3924
3925        /*
3926         * Serialize hugepage allocation and instantiation, so that we don't
3927         * get spurious allocation failures if two CPUs race to instantiate
3928         * the same page in the page cache.
3929         */
3930        hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
3931        mutex_lock(&hugetlb_fault_mutex_table[hash]);
3932
3933        entry = huge_ptep_get(ptep);
3934        if (huge_pte_none(entry)) {
3935                ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3936                goto out_mutex;
3937        }
3938
3939        ret = 0;
3940
3941        /*
3942         * entry could be a migration/hwpoison entry at this point, so this
3943         * check prevents the kernel from going below assuming that we have
3944         * a active hugepage in pagecache. This goto expects the 2nd page fault,
3945         * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3946         * handle it.
3947         */
3948        if (!pte_present(entry))
3949                goto out_mutex;
3950
3951        /*
3952         * If we are going to COW the mapping later, we examine the pending
3953         * reservations for this page now. This will ensure that any
3954         * allocations necessary to record that reservation occur outside the
3955         * spinlock. For private mappings, we also lookup the pagecache
3956         * page now as it is used to determine if a reservation has been
3957         * consumed.
3958         */
3959        if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3960                if (vma_needs_reservation(h, vma, haddr) < 0) {
3961                        ret = VM_FAULT_OOM;
3962                        goto out_mutex;
3963                }
3964                /* Just decrements count, does not deallocate */
3965                vma_end_reservation(h, vma, haddr);
3966
3967                if (!(vma->vm_flags & VM_MAYSHARE))
3968                        pagecache_page = hugetlbfs_pagecache_page(h,
3969                                                                vma, haddr);
3970        }
3971
3972        ptl = huge_pte_lock(h, mm, ptep);
3973
3974        /* Check for a racing update before calling hugetlb_cow */
3975        if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3976                goto out_ptl;
3977
3978        /*
3979         * hugetlb_cow() requires page locks of pte_page(entry) and
3980         * pagecache_page, so here we need take the former one
3981         * when page != pagecache_page or !pagecache_page.
3982         */
3983        page = pte_page(entry);
3984        if (page != pagecache_page)
3985                if (!trylock_page(page)) {
3986                        need_wait_lock = 1;
3987                        goto out_ptl;
3988                }
3989
3990        get_page(page);
3991
3992        if (flags & FAULT_FLAG_WRITE) {
3993                if (!huge_pte_write(entry)) {
3994                        ret = hugetlb_cow(mm, vma, address, ptep,
3995                                          pagecache_page, ptl);
3996                        goto out_put_page;
3997                }
3998                entry = huge_pte_mkdirty(entry);
3999        }
4000        entry = pte_mkyoung(entry);
4001        if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4002                                                flags & FAULT_FLAG_WRITE))
4003                update_mmu_cache(vma, haddr, ptep);
4004out_put_page:
4005        if (page != pagecache_page)
4006                unlock_page(page);
4007        put_page(page);
4008out_ptl:
4009        spin_unlock(ptl);
4010
4011        if (pagecache_page) {
4012                unlock_page(pagecache_page);
4013                put_page(pagecache_page);
4014        }
4015out_mutex:
4016        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4017        /*
4018         * Generally it's safe to hold refcount during waiting page lock. But
4019         * here we just wait to defer the next page fault to avoid busy loop and
4020         * the page is not used after unlocked before returning from the current
4021         * page fault. So we are safe from accessing freed page, even if we wait
4022         * here without taking refcount.
4023         */
4024        if (need_wait_lock)
4025                wait_on_page_locked(page);
4026        return ret;
4027}
4028
4029/*
4030 * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
4031 * modifications for huge pages.
4032 */
4033int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4034                            pte_t *dst_pte,
4035                            struct vm_area_struct *dst_vma,
4036                            unsigned long dst_addr,
4037                            unsigned long src_addr,
4038                            struct page **pagep)
4039{
4040        struct address_space *mapping;
4041        pgoff_t idx;
4042        unsigned long size;
4043        int vm_shared = dst_vma->vm_flags & VM_SHARED;
4044        struct hstate *h = hstate_vma(dst_vma);
4045        pte_t _dst_pte;
4046        spinlock_t *ptl;
4047        int ret;
4048        struct page *page;
4049
4050        if (!*pagep) {
4051                ret = -ENOMEM;
4052                page = alloc_huge_page(dst_vma, dst_addr, 0);
4053                if (IS_ERR(page))
4054                        goto out;
4055
4056                ret = copy_huge_page_from_user(page,
4057                                                (const void __user *) src_addr,
4058                                                pages_per_huge_page(h), false);
4059
4060                /* fallback to copy_from_user outside mmap_sem */
4061                if (unlikely(ret)) {
4062                        ret = -EFAULT;
4063                        *pagep = page;
4064                        /* don't free the page */
4065                        goto out;
4066                }
4067        } else {
4068                page = *pagep;
4069                *pagep = NULL;
4070        }
4071
4072        /*
4073         * The memory barrier inside __SetPageUptodate makes sure that
4074         * preceding stores to the page contents become visible before
4075         * the set_pte_at() write.
4076         */
4077        __SetPageUptodate(page);
4078        set_page_huge_active(page);
4079
4080        mapping = dst_vma->vm_file->f_mapping;
4081        idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4082
4083        /*
4084         * If shared, add to page cache
4085         */
4086        if (vm_shared) {
4087                size = i_size_read(mapping->host) >> huge_page_shift(h);
4088                ret = -EFAULT;
4089                if (idx >= size)
4090                        goto out_release_nounlock;
4091
4092                /*
4093                 * Serialization between remove_inode_hugepages() and
4094                 * huge_add_to_page_cache() below happens through the
4095                 * hugetlb_fault_mutex_table that here must be hold by
4096                 * the caller.
4097                 */
4098                ret = huge_add_to_page_cache(page, mapping, idx);
4099                if (ret)
4100                        goto out_release_nounlock;
4101        }
4102
4103        ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4104        spin_lock(ptl);
4105
4106        /*
4107         * Recheck the i_size after holding PT lock to make sure not
4108         * to leave any page mapped (as page_mapped()) beyond the end
4109         * of the i_size (remove_inode_hugepages() is strict about
4110         * enforcing that). If we bail out here, we'll also leave a
4111         * page in the radix tree in the vm_shared case beyond the end
4112         * of the i_size, but remove_inode_hugepages() will take care
4113         * of it as soon as we drop the hugetlb_fault_mutex_table.
4114         */
4115        size = i_size_read(mapping->host) >> huge_page_shift(h);
4116        ret = -EFAULT;
4117        if (idx >= size)
4118                goto out_release_unlock;
4119
4120        ret = -EEXIST;
4121        if (!huge_pte_none(huge_ptep_get(dst_pte)))
4122                goto out_release_unlock;
4123
4124        if (vm_shared) {
4125                page_dup_rmap(page, true);
4126        } else {
4127                ClearPagePrivate(page);
4128                hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4129        }
4130
4131        _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4132        if (dst_vma->vm_flags & VM_WRITE)
4133                _dst_pte = huge_pte_mkdirty(_dst_pte);
4134        _dst_pte = pte_mkyoung(_dst_pte);
4135
4136        set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4137
4138        (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4139                                        dst_vma->vm_flags & VM_WRITE);
4140        hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4141
4142        /* No need to invalidate - it was non-present before */
4143        update_mmu_cache(dst_vma, dst_addr, dst_pte);
4144
4145        spin_unlock(ptl);
4146        if (vm_shared)
4147                unlock_page(page);
4148        ret = 0;
4149out:
4150        return ret;
4151out_release_unlock:
4152        spin_unlock(ptl);
4153        if (vm_shared)
4154                unlock_page(page);
4155out_release_nounlock:
4156        put_page(page);
4157        goto out;
4158}
4159
4160long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4161                         struct page **pages, struct vm_area_struct **vmas,
4162                         unsigned long *position, unsigned long *nr_pages,
4163                         long i, unsigned int flags, int *nonblocking)
4164{
4165        unsigned long pfn_offset;
4166        unsigned long vaddr = *position;
4167        unsigned long remainder = *nr_pages;
4168        struct hstate *h = hstate_vma(vma);
4169        int err = -EFAULT;
4170
4171        while (vaddr < vma->vm_end && remainder) {
4172                pte_t *pte;
4173                spinlock_t *ptl = NULL;
4174                int absent;
4175                struct page *page;
4176
4177                /*
4178                 * If we have a pending SIGKILL, don't keep faulting pages and
4179                 * potentially allocating memory.
4180                 */
4181                if (unlikely(fatal_signal_pending(current))) {
4182                        remainder = 0;
4183                        break;
4184                }
4185
4186                /*
4187                 * Some archs (sparc64, sh*) have multiple pte_ts to
4188                 * each hugepage.  We have to make sure we get the
4189                 * first, for the page indexing below to work.
4190                 *
4191                 * Note that page table lock is not held when pte is null.
4192                 */
4193                pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4194                                      huge_page_size(h));
4195                if (pte)
4196                        ptl = huge_pte_lock(h, mm, pte);
4197                absent = !pte || huge_pte_none(huge_ptep_get(pte));
4198
4199                /*
4200                 * When coredumping, it suits get_dump_page if we just return
4201                 * an error where there's an empty slot with no huge pagecache
4202                 * to back it.  This way, we avoid allocating a hugepage, and
4203                 * the sparse dumpfile avoids allocating disk blocks, but its
4204                 * huge holes still show up with zeroes where they need to be.
4205                 */
4206                if (absent && (flags & FOLL_DUMP) &&
4207                    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4208                        if (pte)
4209                                spin_unlock(ptl);
4210                        remainder = 0;
4211                        break;
4212                }
4213
4214                /*
4215                 * We need call hugetlb_fault for both hugepages under migration
4216                 * (in which case hugetlb_fault waits for the migration,) and
4217                 * hwpoisoned hugepages (in which case we need to prevent the
4218                 * caller from accessing to them.) In order to do this, we use
4219                 * here is_swap_pte instead of is_hugetlb_entry_migration and
4220                 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
4221                 * both cases, and because we can't follow correct pages
4222                 * directly from any kind of swap entries.
4223                 */
4224                if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4225                    ((flags & FOLL_WRITE) &&
4226                      !huge_pte_write(huge_ptep_get(pte)))) {
4227                        vm_fault_t ret;
4228                        unsigned int fault_flags = 0;
4229
4230                        if (pte)
4231                                spin_unlock(ptl);
4232                        if (flags & FOLL_WRITE)
4233                                fault_flags |= FAULT_FLAG_WRITE;
4234                        if (nonblocking)
4235                                fault_flags |= FAULT_FLAG_ALLOW_RETRY;
4236                        if (flags & FOLL_NOWAIT)
4237                                fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4238                                        FAULT_FLAG_RETRY_NOWAIT;
4239                        if (flags & FOLL_TRIED) {
4240                                VM_WARN_ON_ONCE(fault_flags &
4241                                                FAULT_FLAG_ALLOW_RETRY);
4242                                fault_flags |= FAULT_FLAG_TRIED;
4243                        }
4244                        ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4245                        if (ret & VM_FAULT_ERROR) {
4246                                err = vm_fault_to_errno(ret, flags);
4247                                remainder = 0;
4248                                break;
4249                        }
4250                        if (ret & VM_FAULT_RETRY) {
4251                                if (nonblocking)
4252                                        *nonblocking = 0;
4253                                *nr_pages = 0;
4254                                /*
4255                                 * VM_FAULT_RETRY must not return an
4256                                 * error, it will return zero
4257                                 * instead.
4258                                 *
4259                                 * No need to update "position" as the
4260                                 * caller will not check it after
4261                                 * *nr_pages is set to 0.
4262                                 */
4263                                return i;
4264                        }
4265                        continue;
4266                }
4267
4268                pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4269                page = pte_page(huge_ptep_get(pte));
4270same_page:
4271                if (pages) {
4272                        pages[i] = mem_map_offset(page, pfn_offset);
4273                        get_page(pages[i]);
4274                }
4275
4276                if (vmas)
4277                        vmas[i] = vma;
4278
4279                vaddr += PAGE_SIZE;
4280                ++pfn_offset;
4281                --remainder;
4282                ++i;
4283                if (vaddr < vma->vm_end && remainder &&
4284                                pfn_offset < pages_per_huge_page(h)) {
4285                        /*
4286                         * We use pfn_offset to avoid touching the pageframes
4287                         * of this compound page.
4288                         */
4289                        goto same_page;
4290                }
4291                spin_unlock(ptl);
4292        }
4293        *nr_pages = remainder;
4294        /*
4295         * setting position is actually required only if remainder is
4296         * not zero but it's faster not to add a "if (remainder)"
4297         * branch.
4298         */
4299        *position = vaddr;
4300
4301        return i ? i : err;
4302}
4303
4304#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4305/*
4306 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
4307 * implement this.
4308 */
4309#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4310#endif
4311
4312unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4313                unsigned long address, unsigned long end, pgprot_t newprot)
4314{
4315        struct mm_struct *mm = vma->vm_mm;
4316        unsigned long start = address;
4317        pte_t *ptep;
4318        pte_t pte;
4319        struct hstate *h = hstate_vma(vma);
4320        unsigned long pages = 0;
4321        unsigned long f_start = start;
4322        unsigned long f_end = end;
4323        bool shared_pmd = false;
4324
4325        /*
4326         * In the case of shared PMDs, the area to flush could be beyond
4327         * start/end.  Set f_start/f_end to cover the maximum possible
4328         * range if PMD sharing is possible.
4329         */
4330        adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end);
4331
4332        BUG_ON(address >= end);
4333        flush_cache_range(vma, f_start, f_end);
4334
4335        mmu_notifier_invalidate_range_start(mm, f_start, f_end);
4336        i_mmap_lock_write(vma->vm_file->f_mapping);
4337        for (; address < end; address += huge_page_size(h)) {
4338                spinlock_t *ptl;
4339                ptep = huge_pte_offset(mm, address, huge_page_size(h));
4340                if (!ptep)
4341                        continue;
4342                ptl = huge_pte_lock(h, mm, ptep);
4343                if (huge_pmd_unshare(mm, &address, ptep)) {
4344                        pages++;
4345                        spin_unlock(ptl);
4346                        shared_pmd = true;
4347                        continue;
4348                }
4349                pte = huge_ptep_get(ptep);
4350                if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4351                        spin_unlock(ptl);
4352                        continue;
4353                }
4354                if (unlikely(is_hugetlb_entry_migration(pte))) {
4355                        swp_entry_t entry = pte_to_swp_entry(pte);
4356
4357                        if (is_write_migration_entry(entry)) {
4358                                pte_t newpte;
4359
4360                                make_migration_entry_read(&entry);
4361                                newpte = swp_entry_to_pte(entry);
4362                                set_huge_swap_pte_at(mm, address, ptep,
4363                                                     newpte, huge_page_size(h));
4364                                pages++;
4365                        }
4366                        spin_unlock(ptl);
4367                        continue;
4368                }
4369                if (!huge_pte_none(pte)) {
4370                        pte = huge_ptep_get_and_clear(mm, address, ptep);
4371                        pte = pte_mkhuge(huge_pte_modify(pte, newprot));
4372                        pte = arch_make_huge_pte(pte, vma, NULL, 0);
4373                        set_huge_pte_at(mm, address, ptep, pte);
4374                        pages++;
4375                }
4376                spin_unlock(ptl);
4377        }
4378        /*
4379         * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4380         * may have cleared our pud entry and done put_page on the page table:
4381         * once we release i_mmap_rwsem, another task can do the final put_page
4382         * and that page table be reused and filled with junk.  If we actually
4383         * did unshare a page of pmds, flush the range corresponding to the pud.
4384         */
4385        if (shared_pmd)
4386                flush_hugetlb_tlb_range(vma, f_start, f_end);
4387        else
4388                flush_hugetlb_tlb_range(vma, start, end);
4389        /*
4390         * No need to call mmu_notifier_invalidate_range() we are downgrading
4391         * page table protection not changing it to point to a new page.
4392         *
4393         * See Documentation/vm/mmu_notifier.rst
4394         */
4395        i_mmap_unlock_write(vma->vm_file->f_mapping);
4396        mmu_notifier_invalidate_range_end(mm, f_start, f_end);
4397
4398        return pages << h->order;
4399}
4400
4401int hugetlb_reserve_pages(struct inode *inode,
4402                                        long from, long to,
4403                                        struct vm_area_struct *vma,
4404                                        vm_flags_t vm_flags)
4405{
4406        long ret, chg;
4407        struct hstate *h = hstate_inode(inode);
4408        struct hugepage_subpool *spool = subpool_inode(inode);
4409        struct resv_map *resv_map;
4410        long gbl_reserve;
4411
4412        /* This should never happen */
4413        if (from > to) {
4414                VM_WARN(1, "%s called with a negative range\n", __func__);
4415                return -EINVAL;
4416        }
4417
4418        /*
4419         * Only apply hugepage reservation if asked. At fault time, an
4420         * attempt will be made for VM_NORESERVE to allocate a page
4421         * without using reserves
4422         */
4423        if (vm_flags & VM_NORESERVE)
4424                return 0;
4425
4426        /*
4427         * Shared mappings base their reservation on the number of pages that
4428         * are already allocated on behalf of the file. Private mappings need
4429         * to reserve the full area even if read-only as mprotect() may be
4430         * called to make the mapping read-write. Assume !vma is a shm mapping
4431         */
4432        if (!vma || vma->vm_flags & VM_MAYSHARE) {
4433                resv_map = inode_resv_map(inode);
4434
4435                chg = region_chg(resv_map, from, to);
4436
4437        } else {
4438                resv_map = resv_map_alloc();
4439                if (!resv_map)
4440                        return -ENOMEM;
4441
4442                chg = to - from;
4443
4444                set_vma_resv_map(vma, resv_map);
4445                set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4446        }
4447
4448        if (chg < 0) {
4449                ret = chg;
4450                goto out_err;
4451        }
4452
4453        /*
4454         * There must be enough pages in the subpool for the mapping. If
4455         * the subpool has a minimum size, there may be some global
4456         * reservations already in place (gbl_reserve).
4457         */
4458        gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4459        if (gbl_reserve < 0) {
4460                ret = -ENOSPC;
4461                goto out_err;
4462        }
4463
4464        /*
4465         * Check enough hugepages are available for the reservation.
4466         * Hand the pages back to the subpool if there are not
4467         */
4468        ret = hugetlb_acct_memory(h, gbl_reserve);
4469        if (ret < 0) {
4470                /* put back original number of pages, chg */
4471                (void)hugepage_subpool_put_pages(spool, chg);
4472                goto out_err;
4473        }
4474
4475        /*
4476         * Account for the reservations made. Shared mappings record regions
4477         * that have reservations as they are shared by multiple VMAs.
4478         * When the last VMA disappears, the region map says how much
4479         * the reservation was and the page cache tells how much of
4480         * the reservation was consumed. Private mappings are per-VMA and
4481         * only the consumed reservations are tracked. When the VMA
4482         * disappears, the original reservation is the VMA size and the
4483         * consumed reservations are stored in the map. Hence, nothing
4484         * else has to be done for private mappings here
4485         */
4486        if (!vma || vma->vm_flags & VM_MAYSHARE) {
4487                long add = region_add(resv_map, from, to);
4488
4489                if (unlikely(chg > add)) {
4490                        /*
4491                         * pages in this range were added to the reserve
4492                         * map between region_chg and region_add.  This
4493                         * indicates a race with alloc_huge_page.  Adjust
4494                         * the subpool and reserve counts modified above
4495                         * based on the difference.
4496                         */
4497                        long rsv_adjust;
4498
4499                        rsv_adjust = hugepage_subpool_put_pages(spool,
4500                                                                chg - add);
4501                        hugetlb_acct_memory(h, -rsv_adjust);
4502                }
4503        }
4504        return 0;
4505out_err:
4506        if (!vma || vma->vm_flags & VM_MAYSHARE)
4507                /* Don't call region_abort if region_chg failed */
4508                if (chg >= 0)
4509                        region_abort(resv_map, from, to);
4510        if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4511                kref_put(&resv_map->refs, resv_map_release);
4512        return ret;
4513}
4514
4515long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4516                                                                long freed)
4517{
4518        struct hstate *h = hstate_inode(inode);
4519        struct resv_map *resv_map = inode_resv_map(inode);
4520        long chg = 0;
4521        struct hugepage_subpool *spool = subpool_inode(inode);
4522        long gbl_reserve;
4523
4524        if (resv_map) {
4525                chg = region_del(resv_map, start, end);
4526                /*
4527                 * region_del() can fail in the rare case where a region
4528                 * must be split and another region descriptor can not be
4529                 * allocated.  If end == LONG_MAX, it will not fail.
4530                 */
4531                if (chg < 0)
4532                        return chg;
4533        }
4534
4535        spin_lock(&inode->i_lock);
4536        inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4537        spin_unlock(&inode->i_lock);
4538
4539        /*
4540         * If the subpool has a minimum size, the number of global
4541         * reservations to be released may be adjusted.
4542         */
4543        gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4544        hugetlb_acct_memory(h, -gbl_reserve);
4545
4546        return 0;
4547}
4548
4549#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4550static unsigned long page_table_shareable(struct vm_area_struct *svma,
4551                                struct vm_area_struct *vma,
4552                                unsigned long addr, pgoff_t idx)
4553{
4554        unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4555                                svma->vm_start;
4556        unsigned long sbase = saddr & PUD_MASK;
4557        unsigned long s_end = sbase + PUD_SIZE;
4558
4559        /* Allow segments to share if only one is marked locked */
4560        unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4561        unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4562
4563        /*
4564         * match the virtual addresses, permission and the alignment of the
4565         * page table page.
4566         */
4567        if (pmd_index(addr) != pmd_index(saddr) ||
4568            vm_flags != svm_flags ||
4569            sbase < svma->vm_start || svma->vm_end < s_end)
4570                return 0;
4571
4572        return saddr;
4573}
4574
4575static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4576{
4577        unsigned long base = addr & PUD_MASK;
4578        unsigned long end = base + PUD_SIZE;
4579
4580        /*
4581         * check on proper vm_flags and page table alignment
4582         */
4583        if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
4584                return true;
4585        return false;
4586}
4587
4588/*
4589 * Determine if start,end range within vma could be mapped by shared pmd.
4590 * If yes, adjust start and end to cover range associated with possible
4591 * shared pmd mappings.
4592 */
4593void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4594                                unsigned long *start, unsigned long *end)
4595{
4596        unsigned long check_addr = *start;
4597
4598        if (!(vma->vm_flags & VM_MAYSHARE))
4599                return;
4600
4601        for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
4602                unsigned long a_start = check_addr & PUD_MASK;
4603                unsigned long a_end = a_start + PUD_SIZE;
4604
4605                /*
4606                 * If sharing is possible, adjust start/end if necessary.
4607                 */
4608                if (range_in_vma(vma, a_start, a_end)) {
4609                        if (a_start < *start)
4610                                *start = a_start;
4611                        if (a_end > *end)
4612                                *end = a_end;
4613                }
4614        }
4615}
4616
4617/*
4618 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4619 * and returns the corresponding pte. While this is not necessary for the
4620 * !shared pmd case because we can allocate the pmd later as well, it makes the
4621 * code much cleaner. pmd allocation is essential for the shared case because
4622 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4623 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4624 * bad pmd for sharing.
4625 */
4626pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4627{
4628        struct vm_area_struct *vma = find_vma(mm, addr);
4629        struct address_space *mapping = vma->vm_file->f_mapping;
4630        pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4631                        vma->vm_pgoff;
4632        struct vm_area_struct *svma;
4633        unsigned long saddr;
4634        pte_t *spte = NULL;
4635        pte_t *pte;
4636        spinlock_t *ptl;
4637
4638        if (!vma_shareable(vma, addr))
4639                return (pte_t *)pmd_alloc(mm, pud, addr);
4640
4641        i_mmap_lock_write(mapping);
4642        vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4643                if (svma == vma)
4644                        continue;
4645
4646                saddr = page_table_shareable(svma, vma, addr, idx);
4647                if (saddr) {
4648                        spte = huge_pte_offset(svma->vm_mm, saddr,
4649                                               vma_mmu_pagesize(svma));
4650                        if (spte) {
4651                                get_page(virt_to_page(spte));
4652                                break;
4653                        }
4654                }
4655        }
4656
4657        if (!spte)
4658                goto out;
4659
4660        ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
4661        if (pud_none(*pud)) {
4662                pud_populate(mm, pud,
4663                                (pmd_t *)((unsigned long)spte & PAGE_MASK));
4664                mm_inc_nr_pmds(mm);
4665        } else {
4666                put_page(virt_to_page(spte));
4667        }
4668        spin_unlock(ptl);
4669out:
4670        pte = (pte_t *)pmd_alloc(mm, pud, addr);
4671        i_mmap_unlock_write(mapping);
4672        return pte;
4673}
4674
4675/*
4676 * unmap huge page backed by shared pte.
4677 *
4678 * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
4679 * indicated by page_count > 1, unmap is achieved by clearing pud and
4680 * decrementing the ref count. If count == 1, the pte page is not shared.
4681 *
4682 * called with page table lock held.
4683 *
4684 * returns: 1 successfully unmapped a shared pte page
4685 *          0 the underlying pte page is not shared, or it is the last user
4686 */
4687int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4688{
4689        pgd_t *pgd = pgd_offset(mm, *addr);
4690        p4d_t *p4d = p4d_offset(pgd, *addr);
4691        pud_t *pud = pud_offset(p4d, *addr);
4692
4693        BUG_ON(page_count(virt_to_page(ptep)) == 0);
4694        if (page_count(virt_to_page(ptep)) == 1)
4695                return 0;
4696
4697        pud_clear(pud);
4698        put_page(virt_to_page(ptep));
4699        mm_dec_nr_pmds(mm);
4700        *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4701        return 1;
4702}
4703#define want_pmd_share()        (1)
4704#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4705pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4706{
4707        return NULL;
4708}
4709
4710int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4711{
4712        return 0;
4713}
4714
4715void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4716                                unsigned long *start, unsigned long *end)
4717{
4718}
4719#define want_pmd_share()        (0)
4720#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4721
4722#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4723pte_t *huge_pte_alloc(struct mm_struct *mm,
4724                        unsigned long addr, unsigned long sz)
4725{
4726        pgd_t *pgd;
4727        p4d_t *p4d;
4728        pud_t *pud;
4729        pte_t *pte = NULL;
4730
4731        pgd = pgd_offset(mm, addr);
4732        p4d = p4d_alloc(mm, pgd, addr);
4733        if (!p4d)
4734                return NULL;
4735        pud = pud_alloc(mm, p4d, addr);
4736        if (pud) {
4737                if (sz == PUD_SIZE) {
4738                        pte = (pte_t *)pud;
4739                } else {
4740                        BUG_ON(sz != PMD_SIZE);
4741                        if (want_pmd_share() && pud_none(*pud))
4742                                pte = huge_pmd_share(mm, addr, pud);
4743                        else
4744                                pte = (pte_t *)pmd_alloc(mm, pud, addr);
4745                }
4746        }
4747        BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
4748
4749        return pte;
4750}
4751
4752/*
4753 * huge_pte_offset() - Walk the page table to resolve the hugepage
4754 * entry at address @addr
4755 *
4756 * Return: Pointer to page table or swap entry (PUD or PMD) for
4757 * address @addr, or NULL if a p*d_none() entry is encountered and the
4758 * size @sz doesn't match the hugepage size at this level of the page
4759 * table.
4760 */
4761pte_t *huge_pte_offset(struct mm_struct *mm,
4762                       unsigned long addr, unsigned long sz)
4763{
4764        pgd_t *pgd;
4765        p4d_t *p4d;
4766        pud_t *pud;
4767        pmd_t *pmd;
4768
4769        pgd = pgd_offset(mm, addr);
4770        if (!pgd_present(*pgd))
4771                return NULL;
4772        p4d = p4d_offset(pgd, addr);
4773        if (!p4d_present(*p4d))
4774                return NULL;
4775
4776        pud = pud_offset(p4d, addr);
4777        if (sz != PUD_SIZE && pud_none(*pud))
4778                return NULL;
4779        /* hugepage or swap? */
4780        if (pud_huge(*pud) || !pud_present(*pud))
4781                return (pte_t *)pud;
4782
4783        pmd = pmd_offset(pud, addr);
4784        if (sz != PMD_SIZE && pmd_none(*pmd))
4785                return NULL;
4786        /* hugepage or swap? */
4787        if (pmd_huge(*pmd) || !pmd_present(*pmd))
4788                return (pte_t *)pmd;
4789
4790        return NULL;
4791}
4792
4793#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4794
4795/*
4796 * These functions are overwritable if your architecture needs its own
4797 * behavior.
4798 */
4799struct page * __weak
4800follow_huge_addr(struct mm_struct *mm, unsigned long address,
4801                              int write)
4802{
4803        return ERR_PTR(-EINVAL);
4804}
4805
4806struct page * __weak
4807follow_huge_pd(struct vm_area_struct *vma,
4808               unsigned long address, hugepd_t hpd, int flags, int pdshift)
4809{
4810        WARN(1, "hugepd follow called with no support for hugepage directory format\n");
4811        return NULL;
4812}
4813
4814struct page * __weak
4815follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4816                pmd_t *pmd, int flags)
4817{
4818        struct page *page = NULL;
4819        spinlock_t *ptl;
4820        pte_t pte;
4821retry:
4822        ptl = pmd_lockptr(mm, pmd);
4823        spin_lock(ptl);
4824        /*
4825         * make sure that the address range covered by this pmd is not
4826         * unmapped from other threads.
4827         */
4828        if (!pmd_huge(*pmd))
4829                goto out;
4830        pte = huge_ptep_get((pte_t *)pmd);
4831        if (pte_present(pte)) {
4832                page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4833                if (flags & FOLL_GET)
4834                        get_page(page);
4835        } else {
4836                if (is_hugetlb_entry_migration(pte)) {
4837                        spin_unlock(ptl);
4838                        __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4839                        goto retry;
4840                }
4841                /*
4842                 * hwpoisoned entry is treated as no_page_table in
4843                 * follow_page_mask().
4844                 */
4845        }
4846out:
4847        spin_unlock(ptl);
4848        return page;
4849}
4850
4851struct page * __weak
4852follow_huge_pud(struct mm_struct *mm, unsigned long address,
4853                pud_t *pud, int flags)
4854{
4855        if (flags & FOLL_GET)
4856                return NULL;
4857
4858        return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4859}
4860
4861struct page * __weak
4862follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
4863{
4864        if (flags & FOLL_GET)
4865                return NULL;
4866
4867        return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
4868}
4869
4870bool isolate_huge_page(struct page *page, struct list_head *list)
4871{
4872        bool ret = true;
4873
4874        VM_BUG_ON_PAGE(!PageHead(page), page);
4875        spin_lock(&hugetlb_lock);
4876        if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4877                ret = false;
4878                goto unlock;
4879        }
4880        clear_page_huge_active(page);
4881        list_move_tail(&page->lru, list);
4882unlock:
4883        spin_unlock(&hugetlb_lock);
4884        return ret;
4885}
4886
4887void putback_active_hugepage(struct page *page)
4888{
4889        VM_BUG_ON_PAGE(!PageHead(page), page);
4890        spin_lock(&hugetlb_lock);
4891        set_page_huge_active(page);
4892        list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4893        spin_unlock(&hugetlb_lock);
4894        put_page(page);
4895}
4896
4897void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
4898{
4899        struct hstate *h = page_hstate(oldpage);
4900
4901        hugetlb_cgroup_migrate(oldpage, newpage);
4902        set_page_owner_migrate_reason(newpage, reason);
4903
4904        /*
4905         * transfer temporary state of the new huge page. This is
4906         * reverse to other transitions because the newpage is going to
4907         * be final while the old one will be freed so it takes over
4908         * the temporary status.
4909         *
4910         * Also note that we have to transfer the per-node surplus state
4911         * here as well otherwise the global surplus count will not match
4912         * the per-node's.
4913         */
4914        if (PageHugeTemporary(newpage)) {
4915                int old_nid = page_to_nid(oldpage);
4916                int new_nid = page_to_nid(newpage);
4917
4918                SetPageHugeTemporary(oldpage);
4919                ClearPageHugeTemporary(newpage);
4920
4921                spin_lock(&hugetlb_lock);
4922                if (h->surplus_huge_pages_node[old_nid]) {
4923                        h->surplus_huge_pages_node[old_nid]--;
4924                        h->surplus_huge_pages_node[new_nid]++;
4925                }
4926                spin_unlock(&hugetlb_lock);
4927        }
4928}
4929