linux/mm/gup.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/kernel.h>
   3#include <linux/errno.h>
   4#include <linux/err.h>
   5#include <linux/spinlock.h>
   6
   7#include <linux/mm.h>
   8#include <linux/memremap.h>
   9#include <linux/pagemap.h>
  10#include <linux/rmap.h>
  11#include <linux/swap.h>
  12#include <linux/swapops.h>
  13#include <linux/secretmem.h>
  14
  15#include <linux/sched/signal.h>
  16#include <linux/rwsem.h>
  17#include <linux/hugetlb.h>
  18#include <linux/migrate.h>
  19#include <linux/mm_inline.h>
  20#include <linux/sched/mm.h>
  21
  22#include <asm/mmu_context.h>
  23#include <asm/tlbflush.h>
  24
  25#include "internal.h"
  26
  27struct follow_page_context {
  28        struct dev_pagemap *pgmap;
  29        unsigned int page_mask;
  30};
  31
  32static void hpage_pincount_add(struct page *page, int refs)
  33{
  34        VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
  35        VM_BUG_ON_PAGE(page != compound_head(page), page);
  36
  37        atomic_add(refs, compound_pincount_ptr(page));
  38}
  39
  40static void hpage_pincount_sub(struct page *page, int refs)
  41{
  42        VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
  43        VM_BUG_ON_PAGE(page != compound_head(page), page);
  44
  45        atomic_sub(refs, compound_pincount_ptr(page));
  46}
  47
  48/* Equivalent to calling put_page() @refs times. */
  49static void put_page_refs(struct page *page, int refs)
  50{
  51#ifdef CONFIG_DEBUG_VM
  52        if (VM_WARN_ON_ONCE_PAGE(page_ref_count(page) < refs, page))
  53                return;
  54#endif
  55
  56        /*
  57         * Calling put_page() for each ref is unnecessarily slow. Only the last
  58         * ref needs a put_page().
  59         */
  60        if (refs > 1)
  61                page_ref_sub(page, refs - 1);
  62        put_page(page);
  63}
  64
  65/*
  66 * Return the compound head page with ref appropriately incremented,
  67 * or NULL if that failed.
  68 */
  69static inline struct page *try_get_compound_head(struct page *page, int refs)
  70{
  71        struct page *head = compound_head(page);
  72
  73        if (WARN_ON_ONCE(page_ref_count(head) < 0))
  74                return NULL;
  75        if (unlikely(!page_cache_add_speculative(head, refs)))
  76                return NULL;
  77
  78        /*
  79         * At this point we have a stable reference to the head page; but it
  80         * could be that between the compound_head() lookup and the refcount
  81         * increment, the compound page was split, in which case we'd end up
  82         * holding a reference on a page that has nothing to do with the page
  83         * we were given anymore.
  84         * So now that the head page is stable, recheck that the pages still
  85         * belong together.
  86         */
  87        if (unlikely(compound_head(page) != head)) {
  88                put_page_refs(head, refs);
  89                return NULL;
  90        }
  91
  92        return head;
  93}
  94
  95/**
  96 * try_grab_compound_head() - attempt to elevate a page's refcount, by a
  97 * flags-dependent amount.
  98 *
  99 * Even though the name includes "compound_head", this function is still
 100 * appropriate for callers that have a non-compound @page to get.
 101 *
 102 * @page:  pointer to page to be grabbed
 103 * @refs:  the value to (effectively) add to the page's refcount
 104 * @flags: gup flags: these are the FOLL_* flag values.
 105 *
 106 * "grab" names in this file mean, "look at flags to decide whether to use
 107 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
 108 *
 109 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
 110 * same time. (That's true throughout the get_user_pages*() and
 111 * pin_user_pages*() APIs.) Cases:
 112 *
 113 *    FOLL_GET: page's refcount will be incremented by @refs.
 114 *
 115 *    FOLL_PIN on compound pages that are > two pages long: page's refcount will
 116 *    be incremented by @refs, and page[2].hpage_pinned_refcount will be
 117 *    incremented by @refs * GUP_PIN_COUNTING_BIAS.
 118 *
 119 *    FOLL_PIN on normal pages, or compound pages that are two pages long:
 120 *    page's refcount will be incremented by @refs * GUP_PIN_COUNTING_BIAS.
 121 *
 122 * Return: head page (with refcount appropriately incremented) for success, or
 123 * NULL upon failure. If neither FOLL_GET nor FOLL_PIN was set, that's
 124 * considered failure, and furthermore, a likely bug in the caller, so a warning
 125 * is also emitted.
 126 */
 127struct page *try_grab_compound_head(struct page *page,
 128                                    int refs, unsigned int flags)
 129{
 130        if (flags & FOLL_GET)
 131                return try_get_compound_head(page, refs);
 132        else if (flags & FOLL_PIN) {
 133                /*
 134                 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
 135                 * right zone, so fail and let the caller fall back to the slow
 136                 * path.
 137                 */
 138                if (unlikely((flags & FOLL_LONGTERM) &&
 139                             !is_pinnable_page(page)))
 140                        return NULL;
 141
 142                /*
 143                 * CAUTION: Don't use compound_head() on the page before this
 144                 * point, the result won't be stable.
 145                 */
 146                page = try_get_compound_head(page, refs);
 147                if (!page)
 148                        return NULL;
 149
 150                /*
 151                 * When pinning a compound page of order > 1 (which is what
 152                 * hpage_pincount_available() checks for), use an exact count to
 153                 * track it, via hpage_pincount_add/_sub().
 154                 *
 155                 * However, be sure to *also* increment the normal page refcount
 156                 * field at least once, so that the page really is pinned.
 157                 * That's why the refcount from the earlier
 158                 * try_get_compound_head() is left intact.
 159                 */
 160                if (hpage_pincount_available(page))
 161                        hpage_pincount_add(page, refs);
 162                else
 163                        page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
 164
 165                mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
 166                                    refs);
 167
 168                return page;
 169        }
 170
 171        WARN_ON_ONCE(1);
 172        return NULL;
 173}
 174
 175static void put_compound_head(struct page *page, int refs, unsigned int flags)
 176{
 177        if (flags & FOLL_PIN) {
 178                mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
 179                                    refs);
 180
 181                if (hpage_pincount_available(page))
 182                        hpage_pincount_sub(page, refs);
 183                else
 184                        refs *= GUP_PIN_COUNTING_BIAS;
 185        }
 186
 187        put_page_refs(page, refs);
 188}
 189
 190/**
 191 * try_grab_page() - elevate a page's refcount by a flag-dependent amount
 192 *
 193 * This might not do anything at all, depending on the flags argument.
 194 *
 195 * "grab" names in this file mean, "look at flags to decide whether to use
 196 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
 197 *
 198 * @page:    pointer to page to be grabbed
 199 * @flags:   gup flags: these are the FOLL_* flag values.
 200 *
 201 * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
 202 * time. Cases: please see the try_grab_compound_head() documentation, with
 203 * "refs=1".
 204 *
 205 * Return: true for success, or if no action was required (if neither FOLL_PIN
 206 * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
 207 * FOLL_PIN was set, but the page could not be grabbed.
 208 */
 209bool __must_check try_grab_page(struct page *page, unsigned int flags)
 210{
 211        if (!(flags & (FOLL_GET | FOLL_PIN)))
 212                return true;
 213
 214        return try_grab_compound_head(page, 1, flags);
 215}
 216
 217/**
 218 * unpin_user_page() - release a dma-pinned page
 219 * @page:            pointer to page to be released
 220 *
 221 * Pages that were pinned via pin_user_pages*() must be released via either
 222 * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
 223 * that such pages can be separately tracked and uniquely handled. In
 224 * particular, interactions with RDMA and filesystems need special handling.
 225 */
 226void unpin_user_page(struct page *page)
 227{
 228        put_compound_head(compound_head(page), 1, FOLL_PIN);
 229}
 230EXPORT_SYMBOL(unpin_user_page);
 231
 232static inline void compound_range_next(unsigned long i, unsigned long npages,
 233                                       struct page **list, struct page **head,
 234                                       unsigned int *ntails)
 235{
 236        struct page *next, *page;
 237        unsigned int nr = 1;
 238
 239        if (i >= npages)
 240                return;
 241
 242        next = *list + i;
 243        page = compound_head(next);
 244        if (PageCompound(page) && compound_order(page) >= 1)
 245                nr = min_t(unsigned int,
 246                           page + compound_nr(page) - next, npages - i);
 247
 248        *head = page;
 249        *ntails = nr;
 250}
 251
 252#define for_each_compound_range(__i, __list, __npages, __head, __ntails) \
 253        for (__i = 0, \
 254             compound_range_next(__i, __npages, __list, &(__head), &(__ntails)); \
 255             __i < __npages; __i += __ntails, \
 256             compound_range_next(__i, __npages, __list, &(__head), &(__ntails)))
 257
 258static inline void compound_next(unsigned long i, unsigned long npages,
 259                                 struct page **list, struct page **head,
 260                                 unsigned int *ntails)
 261{
 262        struct page *page;
 263        unsigned int nr;
 264
 265        if (i >= npages)
 266                return;
 267
 268        page = compound_head(list[i]);
 269        for (nr = i + 1; nr < npages; nr++) {
 270                if (compound_head(list[nr]) != page)
 271                        break;
 272        }
 273
 274        *head = page;
 275        *ntails = nr - i;
 276}
 277
 278#define for_each_compound_head(__i, __list, __npages, __head, __ntails) \
 279        for (__i = 0, \
 280             compound_next(__i, __npages, __list, &(__head), &(__ntails)); \
 281             __i < __npages; __i += __ntails, \
 282             compound_next(__i, __npages, __list, &(__head), &(__ntails)))
 283
 284/**
 285 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
 286 * @pages:  array of pages to be maybe marked dirty, and definitely released.
 287 * @npages: number of pages in the @pages array.
 288 * @make_dirty: whether to mark the pages dirty
 289 *
 290 * "gup-pinned page" refers to a page that has had one of the get_user_pages()
 291 * variants called on that page.
 292 *
 293 * For each page in the @pages array, make that page (or its head page, if a
 294 * compound page) dirty, if @make_dirty is true, and if the page was previously
 295 * listed as clean. In any case, releases all pages using unpin_user_page(),
 296 * possibly via unpin_user_pages(), for the non-dirty case.
 297 *
 298 * Please see the unpin_user_page() documentation for details.
 299 *
 300 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
 301 * required, then the caller should a) verify that this is really correct,
 302 * because _lock() is usually required, and b) hand code it:
 303 * set_page_dirty_lock(), unpin_user_page().
 304 *
 305 */
 306void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
 307                                 bool make_dirty)
 308{
 309        unsigned long index;
 310        struct page *head;
 311        unsigned int ntails;
 312
 313        if (!make_dirty) {
 314                unpin_user_pages(pages, npages);
 315                return;
 316        }
 317
 318        for_each_compound_head(index, pages, npages, head, ntails) {
 319                /*
 320                 * Checking PageDirty at this point may race with
 321                 * clear_page_dirty_for_io(), but that's OK. Two key
 322                 * cases:
 323                 *
 324                 * 1) This code sees the page as already dirty, so it
 325                 * skips the call to set_page_dirty(). That could happen
 326                 * because clear_page_dirty_for_io() called
 327                 * page_mkclean(), followed by set_page_dirty().
 328                 * However, now the page is going to get written back,
 329                 * which meets the original intention of setting it
 330                 * dirty, so all is well: clear_page_dirty_for_io() goes
 331                 * on to call TestClearPageDirty(), and write the page
 332                 * back.
 333                 *
 334                 * 2) This code sees the page as clean, so it calls
 335                 * set_page_dirty(). The page stays dirty, despite being
 336                 * written back, so it gets written back again in the
 337                 * next writeback cycle. This is harmless.
 338                 */
 339                if (!PageDirty(head))
 340                        set_page_dirty_lock(head);
 341                put_compound_head(head, ntails, FOLL_PIN);
 342        }
 343}
 344EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
 345
 346/**
 347 * unpin_user_page_range_dirty_lock() - release and optionally dirty
 348 * gup-pinned page range
 349 *
 350 * @page:  the starting page of a range maybe marked dirty, and definitely released.
 351 * @npages: number of consecutive pages to release.
 352 * @make_dirty: whether to mark the pages dirty
 353 *
 354 * "gup-pinned page range" refers to a range of pages that has had one of the
 355 * pin_user_pages() variants called on that page.
 356 *
 357 * For the page ranges defined by [page .. page+npages], make that range (or
 358 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
 359 * page range was previously listed as clean.
 360 *
 361 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
 362 * required, then the caller should a) verify that this is really correct,
 363 * because _lock() is usually required, and b) hand code it:
 364 * set_page_dirty_lock(), unpin_user_page().
 365 *
 366 */
 367void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
 368                                      bool make_dirty)
 369{
 370        unsigned long index;
 371        struct page *head;
 372        unsigned int ntails;
 373
 374        for_each_compound_range(index, &page, npages, head, ntails) {
 375                if (make_dirty && !PageDirty(head))
 376                        set_page_dirty_lock(head);
 377                put_compound_head(head, ntails, FOLL_PIN);
 378        }
 379}
 380EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
 381
 382/**
 383 * unpin_user_pages() - release an array of gup-pinned pages.
 384 * @pages:  array of pages to be marked dirty and released.
 385 * @npages: number of pages in the @pages array.
 386 *
 387 * For each page in the @pages array, release the page using unpin_user_page().
 388 *
 389 * Please see the unpin_user_page() documentation for details.
 390 */
 391void unpin_user_pages(struct page **pages, unsigned long npages)
 392{
 393        unsigned long index;
 394        struct page *head;
 395        unsigned int ntails;
 396
 397        /*
 398         * If this WARN_ON() fires, then the system *might* be leaking pages (by
 399         * leaving them pinned), but probably not. More likely, gup/pup returned
 400         * a hard -ERRNO error to the caller, who erroneously passed it here.
 401         */
 402        if (WARN_ON(IS_ERR_VALUE(npages)))
 403                return;
 404
 405        for_each_compound_head(index, pages, npages, head, ntails)
 406                put_compound_head(head, ntails, FOLL_PIN);
 407}
 408EXPORT_SYMBOL(unpin_user_pages);
 409
 410/*
 411 * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
 412 * lifecycle.  Avoid setting the bit unless necessary, or it might cause write
 413 * cache bouncing on large SMP machines for concurrent pinned gups.
 414 */
 415static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
 416{
 417        if (!test_bit(MMF_HAS_PINNED, mm_flags))
 418                set_bit(MMF_HAS_PINNED, mm_flags);
 419}
 420
 421#ifdef CONFIG_MMU
 422static struct page *no_page_table(struct vm_area_struct *vma,
 423                unsigned int flags)
 424{
 425        /*
 426         * When core dumping an enormous anonymous area that nobody
 427         * has touched so far, we don't want to allocate unnecessary pages or
 428         * page tables.  Return error instead of NULL to skip handle_mm_fault,
 429         * then get_dump_page() will return NULL to leave a hole in the dump.
 430         * But we can only make this optimization where a hole would surely
 431         * be zero-filled if handle_mm_fault() actually did handle it.
 432         */
 433        if ((flags & FOLL_DUMP) &&
 434                        (vma_is_anonymous(vma) || !vma->vm_ops->fault))
 435                return ERR_PTR(-EFAULT);
 436        return NULL;
 437}
 438
 439static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
 440                pte_t *pte, unsigned int flags)
 441{
 442        /* No page to get reference */
 443        if (flags & FOLL_GET)
 444                return -EFAULT;
 445
 446        if (flags & FOLL_TOUCH) {
 447                pte_t entry = *pte;
 448
 449                if (flags & FOLL_WRITE)
 450                        entry = pte_mkdirty(entry);
 451                entry = pte_mkyoung(entry);
 452
 453                if (!pte_same(*pte, entry)) {
 454                        set_pte_at(vma->vm_mm, address, pte, entry);
 455                        update_mmu_cache(vma, address, pte);
 456                }
 457        }
 458
 459        /* Proper page table entry exists, but no corresponding struct page */
 460        return -EEXIST;
 461}
 462
 463/*
 464 * FOLL_FORCE can write to even unwritable pte's, but only
 465 * after we've gone through a COW cycle and they are dirty.
 466 */
 467static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
 468{
 469        return pte_write(pte) ||
 470                ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
 471}
 472
 473static struct page *follow_page_pte(struct vm_area_struct *vma,
 474                unsigned long address, pmd_t *pmd, unsigned int flags,
 475                struct dev_pagemap **pgmap)
 476{
 477        struct mm_struct *mm = vma->vm_mm;
 478        struct page *page;
 479        spinlock_t *ptl;
 480        pte_t *ptep, pte;
 481        int ret;
 482
 483        /* FOLL_GET and FOLL_PIN are mutually exclusive. */
 484        if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
 485                         (FOLL_PIN | FOLL_GET)))
 486                return ERR_PTR(-EINVAL);
 487retry:
 488        if (unlikely(pmd_bad(*pmd)))
 489                return no_page_table(vma, flags);
 490
 491        ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
 492        pte = *ptep;
 493        if (!pte_present(pte)) {
 494                swp_entry_t entry;
 495                /*
 496                 * KSM's break_ksm() relies upon recognizing a ksm page
 497                 * even while it is being migrated, so for that case we
 498                 * need migration_entry_wait().
 499                 */
 500                if (likely(!(flags & FOLL_MIGRATION)))
 501                        goto no_page;
 502                if (pte_none(pte))
 503                        goto no_page;
 504                entry = pte_to_swp_entry(pte);
 505                if (!is_migration_entry(entry))
 506                        goto no_page;
 507                pte_unmap_unlock(ptep, ptl);
 508                migration_entry_wait(mm, pmd, address);
 509                goto retry;
 510        }
 511        if ((flags & FOLL_NUMA) && pte_protnone(pte))
 512                goto no_page;
 513        if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
 514                pte_unmap_unlock(ptep, ptl);
 515                return NULL;
 516        }
 517
 518        page = vm_normal_page(vma, address, pte);
 519        if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
 520                /*
 521                 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
 522                 * case since they are only valid while holding the pgmap
 523                 * reference.
 524                 */
 525                *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
 526                if (*pgmap)
 527                        page = pte_page(pte);
 528                else
 529                        goto no_page;
 530        } else if (unlikely(!page)) {
 531                if (flags & FOLL_DUMP) {
 532                        /* Avoid special (like zero) pages in core dumps */
 533                        page = ERR_PTR(-EFAULT);
 534                        goto out;
 535                }
 536
 537                if (is_zero_pfn(pte_pfn(pte))) {
 538                        page = pte_page(pte);
 539                } else {
 540                        ret = follow_pfn_pte(vma, address, ptep, flags);
 541                        page = ERR_PTR(ret);
 542                        goto out;
 543                }
 544        }
 545
 546        /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
 547        if (unlikely(!try_grab_page(page, flags))) {
 548                page = ERR_PTR(-ENOMEM);
 549                goto out;
 550        }
 551        /*
 552         * We need to make the page accessible if and only if we are going
 553         * to access its content (the FOLL_PIN case).  Please see
 554         * Documentation/core-api/pin_user_pages.rst for details.
 555         */
 556        if (flags & FOLL_PIN) {
 557                ret = arch_make_page_accessible(page);
 558                if (ret) {
 559                        unpin_user_page(page);
 560                        page = ERR_PTR(ret);
 561                        goto out;
 562                }
 563        }
 564        if (flags & FOLL_TOUCH) {
 565                if ((flags & FOLL_WRITE) &&
 566                    !pte_dirty(pte) && !PageDirty(page))
 567                        set_page_dirty(page);
 568                /*
 569                 * pte_mkyoung() would be more correct here, but atomic care
 570                 * is needed to avoid losing the dirty bit: it is easier to use
 571                 * mark_page_accessed().
 572                 */
 573                mark_page_accessed(page);
 574        }
 575        if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
 576                /* Do not mlock pte-mapped THP */
 577                if (PageTransCompound(page))
 578                        goto out;
 579
 580                /*
 581                 * The preliminary mapping check is mainly to avoid the
 582                 * pointless overhead of lock_page on the ZERO_PAGE
 583                 * which might bounce very badly if there is contention.
 584                 *
 585                 * If the page is already locked, we don't need to
 586                 * handle it now - vmscan will handle it later if and
 587                 * when it attempts to reclaim the page.
 588                 */
 589                if (page->mapping && trylock_page(page)) {
 590                        lru_add_drain();  /* push cached pages to LRU */
 591                        /*
 592                         * Because we lock page here, and migration is
 593                         * blocked by the pte's page reference, and we
 594                         * know the page is still mapped, we don't even
 595                         * need to check for file-cache page truncation.
 596                         */
 597                        mlock_vma_page(page);
 598                        unlock_page(page);
 599                }
 600        }
 601out:
 602        pte_unmap_unlock(ptep, ptl);
 603        return page;
 604no_page:
 605        pte_unmap_unlock(ptep, ptl);
 606        if (!pte_none(pte))
 607                return NULL;
 608        return no_page_table(vma, flags);
 609}
 610
 611static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 612                                    unsigned long address, pud_t *pudp,
 613                                    unsigned int flags,
 614                                    struct follow_page_context *ctx)
 615{
 616        pmd_t *pmd, pmdval;
 617        spinlock_t *ptl;
 618        struct page *page;
 619        struct mm_struct *mm = vma->vm_mm;
 620
 621        pmd = pmd_offset(pudp, address);
 622        /*
 623         * The READ_ONCE() will stabilize the pmdval in a register or
 624         * on the stack so that it will stop changing under the code.
 625         */
 626        pmdval = READ_ONCE(*pmd);
 627        if (pmd_none(pmdval))
 628                return no_page_table(vma, flags);
 629        if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
 630                page = follow_huge_pmd(mm, address, pmd, flags);
 631                if (page)
 632                        return page;
 633                return no_page_table(vma, flags);
 634        }
 635        if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
 636                page = follow_huge_pd(vma, address,
 637                                      __hugepd(pmd_val(pmdval)), flags,
 638                                      PMD_SHIFT);
 639                if (page)
 640                        return page;
 641                return no_page_table(vma, flags);
 642        }
 643retry:
 644        if (!pmd_present(pmdval)) {
 645                if (likely(!(flags & FOLL_MIGRATION)))
 646                        return no_page_table(vma, flags);
 647                VM_BUG_ON(thp_migration_supported() &&
 648                                  !is_pmd_migration_entry(pmdval));
 649                if (is_pmd_migration_entry(pmdval))
 650                        pmd_migration_entry_wait(mm, pmd);
 651                pmdval = READ_ONCE(*pmd);
 652                /*
 653                 * MADV_DONTNEED may convert the pmd to null because
 654                 * mmap_lock is held in read mode
 655                 */
 656                if (pmd_none(pmdval))
 657                        return no_page_table(vma, flags);
 658                goto retry;
 659        }
 660        if (pmd_devmap(pmdval)) {
 661                ptl = pmd_lock(mm, pmd);
 662                page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
 663                spin_unlock(ptl);
 664                if (page)
 665                        return page;
 666        }
 667        if (likely(!pmd_trans_huge(pmdval)))
 668                return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
 669
 670        if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
 671                return no_page_table(vma, flags);
 672
 673retry_locked:
 674        ptl = pmd_lock(mm, pmd);
 675        if (unlikely(pmd_none(*pmd))) {
 676                spin_unlock(ptl);
 677                return no_page_table(vma, flags);
 678        }
 679        if (unlikely(!pmd_present(*pmd))) {
 680                spin_unlock(ptl);
 681                if (likely(!(flags & FOLL_MIGRATION)))
 682                        return no_page_table(vma, flags);
 683                pmd_migration_entry_wait(mm, pmd);
 684                goto retry_locked;
 685        }
 686        if (unlikely(!pmd_trans_huge(*pmd))) {
 687                spin_unlock(ptl);
 688                return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
 689        }
 690        if (flags & FOLL_SPLIT_PMD) {
 691                int ret;
 692                page = pmd_page(*pmd);
 693                if (is_huge_zero_page(page)) {
 694                        spin_unlock(ptl);
 695                        ret = 0;
 696                        split_huge_pmd(vma, pmd, address);
 697                        if (pmd_trans_unstable(pmd))
 698                                ret = -EBUSY;
 699                } else {
 700                        spin_unlock(ptl);
 701                        split_huge_pmd(vma, pmd, address);
 702                        ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
 703                }
 704
 705                return ret ? ERR_PTR(ret) :
 706                        follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
 707        }
 708        page = follow_trans_huge_pmd(vma, address, pmd, flags);
 709        spin_unlock(ptl);
 710        ctx->page_mask = HPAGE_PMD_NR - 1;
 711        return page;
 712}
 713
 714static struct page *follow_pud_mask(struct vm_area_struct *vma,
 715                                    unsigned long address, p4d_t *p4dp,
 716                                    unsigned int flags,
 717                                    struct follow_page_context *ctx)
 718{
 719        pud_t *pud;
 720        spinlock_t *ptl;
 721        struct page *page;
 722        struct mm_struct *mm = vma->vm_mm;
 723
 724        pud = pud_offset(p4dp, address);
 725        if (pud_none(*pud))
 726                return no_page_table(vma, flags);
 727        if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
 728                page = follow_huge_pud(mm, address, pud, flags);
 729                if (page)
 730                        return page;
 731                return no_page_table(vma, flags);
 732        }
 733        if (is_hugepd(__hugepd(pud_val(*pud)))) {
 734                page = follow_huge_pd(vma, address,
 735                                      __hugepd(pud_val(*pud)), flags,
 736                                      PUD_SHIFT);
 737                if (page)
 738                        return page;
 739                return no_page_table(vma, flags);
 740        }
 741        if (pud_devmap(*pud)) {
 742                ptl = pud_lock(mm, pud);
 743                page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
 744                spin_unlock(ptl);
 745                if (page)
 746                        return page;
 747        }
 748        if (unlikely(pud_bad(*pud)))
 749                return no_page_table(vma, flags);
 750
 751        return follow_pmd_mask(vma, address, pud, flags, ctx);
 752}
 753
 754static struct page *follow_p4d_mask(struct vm_area_struct *vma,
 755                                    unsigned long address, pgd_t *pgdp,
 756                                    unsigned int flags,
 757                                    struct follow_page_context *ctx)
 758{
 759        p4d_t *p4d;
 760        struct page *page;
 761
 762        p4d = p4d_offset(pgdp, address);
 763        if (p4d_none(*p4d))
 764                return no_page_table(vma, flags);
 765        BUILD_BUG_ON(p4d_huge(*p4d));
 766        if (unlikely(p4d_bad(*p4d)))
 767                return no_page_table(vma, flags);
 768
 769        if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
 770                page = follow_huge_pd(vma, address,
 771                                      __hugepd(p4d_val(*p4d)), flags,
 772                                      P4D_SHIFT);
 773                if (page)
 774                        return page;
 775                return no_page_table(vma, flags);
 776        }
 777        return follow_pud_mask(vma, address, p4d, flags, ctx);
 778}
 779
 780/**
 781 * follow_page_mask - look up a page descriptor from a user-virtual address
 782 * @vma: vm_area_struct mapping @address
 783 * @address: virtual address to look up
 784 * @flags: flags modifying lookup behaviour
 785 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
 786 *       pointer to output page_mask
 787 *
 788 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
 789 *
 790 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
 791 * the device's dev_pagemap metadata to avoid repeating expensive lookups.
 792 *
 793 * On output, the @ctx->page_mask is set according to the size of the page.
 794 *
 795 * Return: the mapped (struct page *), %NULL if no mapping exists, or
 796 * an error pointer if there is a mapping to something not represented
 797 * by a page descriptor (see also vm_normal_page()).
 798 */
 799static struct page *follow_page_mask(struct vm_area_struct *vma,
 800                              unsigned long address, unsigned int flags,
 801                              struct follow_page_context *ctx)
 802{
 803        pgd_t *pgd;
 804        struct page *page;
 805        struct mm_struct *mm = vma->vm_mm;
 806
 807        ctx->page_mask = 0;
 808
 809        /* make this handle hugepd */
 810        page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
 811        if (!IS_ERR(page)) {
 812                WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN));
 813                return page;
 814        }
 815
 816        pgd = pgd_offset(mm, address);
 817
 818        if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
 819                return no_page_table(vma, flags);
 820
 821        if (pgd_huge(*pgd)) {
 822                page = follow_huge_pgd(mm, address, pgd, flags);
 823                if (page)
 824                        return page;
 825                return no_page_table(vma, flags);
 826        }
 827        if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
 828                page = follow_huge_pd(vma, address,
 829                                      __hugepd(pgd_val(*pgd)), flags,
 830                                      PGDIR_SHIFT);
 831                if (page)
 832                        return page;
 833                return no_page_table(vma, flags);
 834        }
 835
 836        return follow_p4d_mask(vma, address, pgd, flags, ctx);
 837}
 838
 839struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
 840                         unsigned int foll_flags)
 841{
 842        struct follow_page_context ctx = { NULL };
 843        struct page *page;
 844
 845        if (vma_is_secretmem(vma))
 846                return NULL;
 847
 848        page = follow_page_mask(vma, address, foll_flags, &ctx);
 849        if (ctx.pgmap)
 850                put_dev_pagemap(ctx.pgmap);
 851        return page;
 852}
 853
 854static int get_gate_page(struct mm_struct *mm, unsigned long address,
 855                unsigned int gup_flags, struct vm_area_struct **vma,
 856                struct page **page)
 857{
 858        pgd_t *pgd;
 859        p4d_t *p4d;
 860        pud_t *pud;
 861        pmd_t *pmd;
 862        pte_t *pte;
 863        int ret = -EFAULT;
 864
 865        /* user gate pages are read-only */
 866        if (gup_flags & FOLL_WRITE)
 867                return -EFAULT;
 868        if (address > TASK_SIZE)
 869                pgd = pgd_offset_k(address);
 870        else
 871                pgd = pgd_offset_gate(mm, address);
 872        if (pgd_none(*pgd))
 873                return -EFAULT;
 874        p4d = p4d_offset(pgd, address);
 875        if (p4d_none(*p4d))
 876                return -EFAULT;
 877        pud = pud_offset(p4d, address);
 878        if (pud_none(*pud))
 879                return -EFAULT;
 880        pmd = pmd_offset(pud, address);
 881        if (!pmd_present(*pmd))
 882                return -EFAULT;
 883        VM_BUG_ON(pmd_trans_huge(*pmd));
 884        pte = pte_offset_map(pmd, address);
 885        if (pte_none(*pte))
 886                goto unmap;
 887        *vma = get_gate_vma(mm);
 888        if (!page)
 889                goto out;
 890        *page = vm_normal_page(*vma, address, *pte);
 891        if (!*page) {
 892                if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
 893                        goto unmap;
 894                *page = pte_page(*pte);
 895        }
 896        if (unlikely(!try_grab_page(*page, gup_flags))) {
 897                ret = -ENOMEM;
 898                goto unmap;
 899        }
 900out:
 901        ret = 0;
 902unmap:
 903        pte_unmap(pte);
 904        return ret;
 905}
 906
 907/*
 908 * mmap_lock must be held on entry.  If @locked != NULL and *@flags
 909 * does not include FOLL_NOWAIT, the mmap_lock may be released.  If it
 910 * is, *@locked will be set to 0 and -EBUSY returned.
 911 */
 912static int faultin_page(struct vm_area_struct *vma,
 913                unsigned long address, unsigned int *flags, int *locked)
 914{
 915        unsigned int fault_flags = 0;
 916        vm_fault_t ret;
 917
 918        /* mlock all present pages, but do not fault in new pages */
 919        if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
 920                return -ENOENT;
 921        if (*flags & FOLL_WRITE)
 922                fault_flags |= FAULT_FLAG_WRITE;
 923        if (*flags & FOLL_REMOTE)
 924                fault_flags |= FAULT_FLAG_REMOTE;
 925        if (locked)
 926                fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 927        if (*flags & FOLL_NOWAIT)
 928                fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
 929        if (*flags & FOLL_TRIED) {
 930                /*
 931                 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
 932                 * can co-exist
 933                 */
 934                fault_flags |= FAULT_FLAG_TRIED;
 935        }
 936
 937        ret = handle_mm_fault(vma, address, fault_flags, NULL);
 938        if (ret & VM_FAULT_ERROR) {
 939                int err = vm_fault_to_errno(ret, *flags);
 940
 941                if (err)
 942                        return err;
 943                BUG();
 944        }
 945
 946        if (ret & VM_FAULT_RETRY) {
 947                if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
 948                        *locked = 0;
 949                return -EBUSY;
 950        }
 951
 952        /*
 953         * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
 954         * necessary, even if maybe_mkwrite decided not to set pte_write. We
 955         * can thus safely do subsequent page lookups as if they were reads.
 956         * But only do so when looping for pte_write is futile: in some cases
 957         * userspace may also be wanting to write to the gotten user page,
 958         * which a read fault here might prevent (a readonly page might get
 959         * reCOWed by userspace write).
 960         */
 961        if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
 962                *flags |= FOLL_COW;
 963        return 0;
 964}
 965
 966static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
 967{
 968        vm_flags_t vm_flags = vma->vm_flags;
 969        int write = (gup_flags & FOLL_WRITE);
 970        int foreign = (gup_flags & FOLL_REMOTE);
 971
 972        if (vm_flags & (VM_IO | VM_PFNMAP))
 973                return -EFAULT;
 974
 975        if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
 976                return -EFAULT;
 977
 978        if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
 979                return -EOPNOTSUPP;
 980
 981        if (vma_is_secretmem(vma))
 982                return -EFAULT;
 983
 984        if (write) {
 985                if (!(vm_flags & VM_WRITE)) {
 986                        if (!(gup_flags & FOLL_FORCE))
 987                                return -EFAULT;
 988                        /*
 989                         * We used to let the write,force case do COW in a
 990                         * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
 991                         * set a breakpoint in a read-only mapping of an
 992                         * executable, without corrupting the file (yet only
 993                         * when that file had been opened for writing!).
 994                         * Anon pages in shared mappings are surprising: now
 995                         * just reject it.
 996                         */
 997                        if (!is_cow_mapping(vm_flags))
 998                                return -EFAULT;
 999                }
1000        } else if (!(vm_flags & VM_READ)) {
1001                if (!(gup_flags & FOLL_FORCE))
1002                        return -EFAULT;
1003                /*
1004                 * Is there actually any vma we can reach here which does not
1005                 * have VM_MAYREAD set?
1006                 */
1007                if (!(vm_flags & VM_MAYREAD))
1008                        return -EFAULT;
1009        }
1010        /*
1011         * gups are always data accesses, not instruction
1012         * fetches, so execute=false here
1013         */
1014        if (!arch_vma_access_permitted(vma, write, false, foreign))
1015                return -EFAULT;
1016        return 0;
1017}
1018
1019/**
1020 * __get_user_pages() - pin user pages in memory
1021 * @mm:         mm_struct of target mm
1022 * @start:      starting user address
1023 * @nr_pages:   number of pages from start to pin
1024 * @gup_flags:  flags modifying pin behaviour
1025 * @pages:      array that receives pointers to the pages pinned.
1026 *              Should be at least nr_pages long. Or NULL, if caller
1027 *              only intends to ensure the pages are faulted in.
1028 * @vmas:       array of pointers to vmas corresponding to each page.
1029 *              Or NULL if the caller does not require them.
1030 * @locked:     whether we're still with the mmap_lock held
1031 *
1032 * Returns either number of pages pinned (which may be less than the
1033 * number requested), or an error. Details about the return value:
1034 *
1035 * -- If nr_pages is 0, returns 0.
1036 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1037 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1038 *    pages pinned. Again, this may be less than nr_pages.
1039 * -- 0 return value is possible when the fault would need to be retried.
1040 *
1041 * The caller is responsible for releasing returned @pages, via put_page().
1042 *
1043 * @vmas are valid only as long as mmap_lock is held.
1044 *
1045 * Must be called with mmap_lock held.  It may be released.  See below.
1046 *
1047 * __get_user_pages walks a process's page tables and takes a reference to
1048 * each struct page that each user address corresponds to at a given
1049 * instant. That is, it takes the page that would be accessed if a user
1050 * thread accesses the given user virtual address at that instant.
1051 *
1052 * This does not guarantee that the page exists in the user mappings when
1053 * __get_user_pages returns, and there may even be a completely different
1054 * page there in some cases (eg. if mmapped pagecache has been invalidated
1055 * and subsequently re faulted). However it does guarantee that the page
1056 * won't be freed completely. And mostly callers simply care that the page
1057 * contains data that was valid *at some point in time*. Typically, an IO
1058 * or similar operation cannot guarantee anything stronger anyway because
1059 * locks can't be held over the syscall boundary.
1060 *
1061 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1062 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1063 * appropriate) must be called after the page is finished with, and
1064 * before put_page is called.
1065 *
1066 * If @locked != NULL, *@locked will be set to 0 when mmap_lock is
1067 * released by an up_read().  That can happen if @gup_flags does not
1068 * have FOLL_NOWAIT.
1069 *
1070 * A caller using such a combination of @locked and @gup_flags
1071 * must therefore hold the mmap_lock for reading only, and recognize
1072 * when it's been released.  Otherwise, it must be held for either
1073 * reading or writing and will not be released.
1074 *
1075 * In most cases, get_user_pages or get_user_pages_fast should be used
1076 * instead of __get_user_pages. __get_user_pages should be used only if
1077 * you need some special @gup_flags.
1078 */
1079static long __get_user_pages(struct mm_struct *mm,
1080                unsigned long start, unsigned long nr_pages,
1081                unsigned int gup_flags, struct page **pages,
1082                struct vm_area_struct **vmas, int *locked)
1083{
1084        long ret = 0, i = 0;
1085        struct vm_area_struct *vma = NULL;
1086        struct follow_page_context ctx = { NULL };
1087
1088        if (!nr_pages)
1089                return 0;
1090
1091        start = untagged_addr(start);
1092
1093        VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
1094
1095        /*
1096         * If FOLL_FORCE is set then do not force a full fault as the hinting
1097         * fault information is unrelated to the reference behaviour of a task
1098         * using the address space
1099         */
1100        if (!(gup_flags & FOLL_FORCE))
1101                gup_flags |= FOLL_NUMA;
1102
1103        do {
1104                struct page *page;
1105                unsigned int foll_flags = gup_flags;
1106                unsigned int page_increm;
1107
1108                /* first iteration or cross vma bound */
1109                if (!vma || start >= vma->vm_end) {
1110                        vma = find_extend_vma(mm, start);
1111                        if (!vma && in_gate_area(mm, start)) {
1112                                ret = get_gate_page(mm, start & PAGE_MASK,
1113                                                gup_flags, &vma,
1114                                                pages ? &pages[i] : NULL);
1115                                if (ret)
1116                                        goto out;
1117                                ctx.page_mask = 0;
1118                                goto next_page;
1119                        }
1120
1121                        if (!vma) {
1122                                ret = -EFAULT;
1123                                goto out;
1124                        }
1125                        ret = check_vma_flags(vma, gup_flags);
1126                        if (ret)
1127                                goto out;
1128
1129                        if (is_vm_hugetlb_page(vma)) {
1130                                i = follow_hugetlb_page(mm, vma, pages, vmas,
1131                                                &start, &nr_pages, i,
1132                                                gup_flags, locked);
1133                                if (locked && *locked == 0) {
1134                                        /*
1135                                         * We've got a VM_FAULT_RETRY
1136                                         * and we've lost mmap_lock.
1137                                         * We must stop here.
1138                                         */
1139                                        BUG_ON(gup_flags & FOLL_NOWAIT);
1140                                        goto out;
1141                                }
1142                                continue;
1143                        }
1144                }
1145retry:
1146                /*
1147                 * If we have a pending SIGKILL, don't keep faulting pages and
1148                 * potentially allocating memory.
1149                 */
1150                if (fatal_signal_pending(current)) {
1151                        ret = -EINTR;
1152                        goto out;
1153                }
1154                cond_resched();
1155
1156                page = follow_page_mask(vma, start, foll_flags, &ctx);
1157                if (!page) {
1158                        ret = faultin_page(vma, start, &foll_flags, locked);
1159                        switch (ret) {
1160                        case 0:
1161                                goto retry;
1162                        case -EBUSY:
1163                                ret = 0;
1164                                fallthrough;
1165                        case -EFAULT:
1166                        case -ENOMEM:
1167                        case -EHWPOISON:
1168                                goto out;
1169                        case -ENOENT:
1170                                goto next_page;
1171                        }
1172                        BUG();
1173                } else if (PTR_ERR(page) == -EEXIST) {
1174                        /*
1175                         * Proper page table entry exists, but no corresponding
1176                         * struct page.
1177                         */
1178                        goto next_page;
1179                } else if (IS_ERR(page)) {
1180                        ret = PTR_ERR(page);
1181                        goto out;
1182                }
1183                if (pages) {
1184                        pages[i] = page;
1185                        flush_anon_page(vma, page, start);
1186                        flush_dcache_page(page);
1187                        ctx.page_mask = 0;
1188                }
1189next_page:
1190                if (vmas) {
1191                        vmas[i] = vma;
1192                        ctx.page_mask = 0;
1193                }
1194                page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
1195                if (page_increm > nr_pages)
1196                        page_increm = nr_pages;
1197                i += page_increm;
1198                start += page_increm * PAGE_SIZE;
1199                nr_pages -= page_increm;
1200        } while (nr_pages);
1201out:
1202        if (ctx.pgmap)
1203                put_dev_pagemap(ctx.pgmap);
1204        return i ? i : ret;
1205}
1206
1207static bool vma_permits_fault(struct vm_area_struct *vma,
1208                              unsigned int fault_flags)
1209{
1210        bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
1211        bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
1212        vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
1213
1214        if (!(vm_flags & vma->vm_flags))
1215                return false;
1216
1217        /*
1218         * The architecture might have a hardware protection
1219         * mechanism other than read/write that can deny access.
1220         *
1221         * gup always represents data access, not instruction
1222         * fetches, so execute=false here:
1223         */
1224        if (!arch_vma_access_permitted(vma, write, false, foreign))
1225                return false;
1226
1227        return true;
1228}
1229
1230/**
1231 * fixup_user_fault() - manually resolve a user page fault
1232 * @mm:         mm_struct of target mm
1233 * @address:    user address
1234 * @fault_flags:flags to pass down to handle_mm_fault()
1235 * @unlocked:   did we unlock the mmap_lock while retrying, maybe NULL if caller
1236 *              does not allow retry. If NULL, the caller must guarantee
1237 *              that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
1238 *
1239 * This is meant to be called in the specific scenario where for locking reasons
1240 * we try to access user memory in atomic context (within a pagefault_disable()
1241 * section), this returns -EFAULT, and we want to resolve the user fault before
1242 * trying again.
1243 *
1244 * Typically this is meant to be used by the futex code.
1245 *
1246 * The main difference with get_user_pages() is that this function will
1247 * unconditionally call handle_mm_fault() which will in turn perform all the
1248 * necessary SW fixup of the dirty and young bits in the PTE, while
1249 * get_user_pages() only guarantees to update these in the struct page.
1250 *
1251 * This is important for some architectures where those bits also gate the
1252 * access permission to the page because they are maintained in software.  On
1253 * such architectures, gup() will not be enough to make a subsequent access
1254 * succeed.
1255 *
1256 * This function will not return with an unlocked mmap_lock. So it has not the
1257 * same semantics wrt the @mm->mmap_lock as does filemap_fault().
1258 */
1259int fixup_user_fault(struct mm_struct *mm,
1260                     unsigned long address, unsigned int fault_flags,
1261                     bool *unlocked)
1262{
1263        struct vm_area_struct *vma;
1264        vm_fault_t ret;
1265
1266        address = untagged_addr(address);
1267
1268        if (unlocked)
1269                fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1270
1271retry:
1272        vma = find_extend_vma(mm, address);
1273        if (!vma || address < vma->vm_start)
1274                return -EFAULT;
1275
1276        if (!vma_permits_fault(vma, fault_flags))
1277                return -EFAULT;
1278
1279        if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1280            fatal_signal_pending(current))
1281                return -EINTR;
1282
1283        ret = handle_mm_fault(vma, address, fault_flags, NULL);
1284        if (ret & VM_FAULT_ERROR) {
1285                int err = vm_fault_to_errno(ret, 0);
1286
1287                if (err)
1288                        return err;
1289                BUG();
1290        }
1291
1292        if (ret & VM_FAULT_RETRY) {
1293                mmap_read_lock(mm);
1294                *unlocked = true;
1295                fault_flags |= FAULT_FLAG_TRIED;
1296                goto retry;
1297        }
1298
1299        return 0;
1300}
1301EXPORT_SYMBOL_GPL(fixup_user_fault);
1302
1303/*
1304 * Please note that this function, unlike __get_user_pages will not
1305 * return 0 for nr_pages > 0 without FOLL_NOWAIT
1306 */
1307static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
1308                                                unsigned long start,
1309                                                unsigned long nr_pages,
1310                                                struct page **pages,
1311                                                struct vm_area_struct **vmas,
1312                                                int *locked,
1313                                                unsigned int flags)
1314{
1315        long ret, pages_done;
1316        bool lock_dropped;
1317
1318        if (locked) {
1319                /* if VM_FAULT_RETRY can be returned, vmas become invalid */
1320                BUG_ON(vmas);
1321                /* check caller initialized locked */
1322                BUG_ON(*locked != 1);
1323        }
1324
1325        if (flags & FOLL_PIN)
1326                mm_set_has_pinned_flag(&mm->flags);
1327
1328        /*
1329         * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1330         * is to set FOLL_GET if the caller wants pages[] filled in (but has
1331         * carelessly failed to specify FOLL_GET), so keep doing that, but only
1332         * for FOLL_GET, not for the newer FOLL_PIN.
1333         *
1334         * FOLL_PIN always expects pages to be non-null, but no need to assert
1335         * that here, as any failures will be obvious enough.
1336         */
1337        if (pages && !(flags & FOLL_PIN))
1338                flags |= FOLL_GET;
1339
1340        pages_done = 0;
1341        lock_dropped = false;
1342        for (;;) {
1343                ret = __get_user_pages(mm, start, nr_pages, flags, pages,
1344                                       vmas, locked);
1345                if (!locked)
1346                        /* VM_FAULT_RETRY couldn't trigger, bypass */
1347                        return ret;
1348
1349                /* VM_FAULT_RETRY cannot return errors */
1350                if (!*locked) {
1351                        BUG_ON(ret < 0);
1352                        BUG_ON(ret >= nr_pages);
1353                }
1354
1355                if (ret > 0) {
1356                        nr_pages -= ret;
1357                        pages_done += ret;
1358                        if (!nr_pages)
1359                                break;
1360                }
1361                if (*locked) {
1362                        /*
1363                         * VM_FAULT_RETRY didn't trigger or it was a
1364                         * FOLL_NOWAIT.
1365                         */
1366                        if (!pages_done)
1367                                pages_done = ret;
1368                        break;
1369                }
1370                /*
1371                 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1372                 * For the prefault case (!pages) we only update counts.
1373                 */
1374                if (likely(pages))
1375                        pages += ret;
1376                start += ret << PAGE_SHIFT;
1377                lock_dropped = true;
1378
1379retry:
1380                /*
1381                 * Repeat on the address that fired VM_FAULT_RETRY
1382                 * with both FAULT_FLAG_ALLOW_RETRY and
1383                 * FAULT_FLAG_TRIED.  Note that GUP can be interrupted
1384                 * by fatal signals, so we need to check it before we
1385                 * start trying again otherwise it can loop forever.
1386                 */
1387
1388                if (fatal_signal_pending(current)) {
1389                        if (!pages_done)
1390                                pages_done = -EINTR;
1391                        break;
1392                }
1393
1394                ret = mmap_read_lock_killable(mm);
1395                if (ret) {
1396                        BUG_ON(ret > 0);
1397                        if (!pages_done)
1398                                pages_done = ret;
1399                        break;
1400                }
1401
1402                *locked = 1;
1403                ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
1404                                       pages, NULL, locked);
1405                if (!*locked) {
1406                        /* Continue to retry until we succeeded */
1407                        BUG_ON(ret != 0);
1408                        goto retry;
1409                }
1410                if (ret != 1) {
1411                        BUG_ON(ret > 1);
1412                        if (!pages_done)
1413                                pages_done = ret;
1414                        break;
1415                }
1416                nr_pages--;
1417                pages_done++;
1418                if (!nr_pages)
1419                        break;
1420                if (likely(pages))
1421                        pages++;
1422                start += PAGE_SIZE;
1423        }
1424        if (lock_dropped && *locked) {
1425                /*
1426                 * We must let the caller know we temporarily dropped the lock
1427                 * and so the critical section protected by it was lost.
1428                 */
1429                mmap_read_unlock(mm);
1430                *locked = 0;
1431        }
1432        return pages_done;
1433}
1434
1435/**
1436 * populate_vma_page_range() -  populate a range of pages in the vma.
1437 * @vma:   target vma
1438 * @start: start address
1439 * @end:   end address
1440 * @locked: whether the mmap_lock is still held
1441 *
1442 * This takes care of mlocking the pages too if VM_LOCKED is set.
1443 *
1444 * Return either number of pages pinned in the vma, or a negative error
1445 * code on error.
1446 *
1447 * vma->vm_mm->mmap_lock must be held.
1448 *
1449 * If @locked is NULL, it may be held for read or write and will
1450 * be unperturbed.
1451 *
1452 * If @locked is non-NULL, it must held for read only and may be
1453 * released.  If it's released, *@locked will be set to 0.
1454 */
1455long populate_vma_page_range(struct vm_area_struct *vma,
1456                unsigned long start, unsigned long end, int *locked)
1457{
1458        struct mm_struct *mm = vma->vm_mm;
1459        unsigned long nr_pages = (end - start) / PAGE_SIZE;
1460        int gup_flags;
1461
1462        VM_BUG_ON(!PAGE_ALIGNED(start));
1463        VM_BUG_ON(!PAGE_ALIGNED(end));
1464        VM_BUG_ON_VMA(start < vma->vm_start, vma);
1465        VM_BUG_ON_VMA(end   > vma->vm_end, vma);
1466        mmap_assert_locked(mm);
1467
1468        gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
1469        if (vma->vm_flags & VM_LOCKONFAULT)
1470                gup_flags &= ~FOLL_POPULATE;
1471        /*
1472         * We want to touch writable mappings with a write fault in order
1473         * to break COW, except for shared mappings because these don't COW
1474         * and we would not want to dirty them for nothing.
1475         */
1476        if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1477                gup_flags |= FOLL_WRITE;
1478
1479        /*
1480         * We want mlock to succeed for regions that have any permissions
1481         * other than PROT_NONE.
1482         */
1483        if (vma_is_accessible(vma))
1484                gup_flags |= FOLL_FORCE;
1485
1486        /*
1487         * We made sure addr is within a VMA, so the following will
1488         * not result in a stack expansion that recurses back here.
1489         */
1490        return __get_user_pages(mm, start, nr_pages, gup_flags,
1491                                NULL, NULL, locked);
1492}
1493
1494/*
1495 * faultin_vma_page_range() - populate (prefault) page tables inside the
1496 *                            given VMA range readable/writable
1497 *
1498 * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1499 *
1500 * @vma: target vma
1501 * @start: start address
1502 * @end: end address
1503 * @write: whether to prefault readable or writable
1504 * @locked: whether the mmap_lock is still held
1505 *
1506 * Returns either number of processed pages in the vma, or a negative error
1507 * code on error (see __get_user_pages()).
1508 *
1509 * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
1510 * covered by the VMA.
1511 *
1512 * If @locked is NULL, it may be held for read or write and will be unperturbed.
1513 *
1514 * If @locked is non-NULL, it must held for read only and may be released.  If
1515 * it's released, *@locked will be set to 0.
1516 */
1517long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
1518                            unsigned long end, bool write, int *locked)
1519{
1520        struct mm_struct *mm = vma->vm_mm;
1521        unsigned long nr_pages = (end - start) / PAGE_SIZE;
1522        int gup_flags;
1523
1524        VM_BUG_ON(!PAGE_ALIGNED(start));
1525        VM_BUG_ON(!PAGE_ALIGNED(end));
1526        VM_BUG_ON_VMA(start < vma->vm_start, vma);
1527        VM_BUG_ON_VMA(end > vma->vm_end, vma);
1528        mmap_assert_locked(mm);
1529
1530        /*
1531         * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1532         *             the page dirty with FOLL_WRITE -- which doesn't make a
1533         *             difference with !FOLL_FORCE, because the page is writable
1534         *             in the page table.
1535         * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
1536         *                a poisoned page.
1537         * FOLL_POPULATE: Always populate memory with VM_LOCKONFAULT.
1538         * !FOLL_FORCE: Require proper access permissions.
1539         */
1540        gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK | FOLL_HWPOISON;
1541        if (write)
1542                gup_flags |= FOLL_WRITE;
1543
1544        /*
1545         * We want to report -EINVAL instead of -EFAULT for any permission
1546         * problems or incompatible mappings.
1547         */
1548        if (check_vma_flags(vma, gup_flags))
1549                return -EINVAL;
1550
1551        return __get_user_pages(mm, start, nr_pages, gup_flags,
1552                                NULL, NULL, locked);
1553}
1554
1555/*
1556 * __mm_populate - populate and/or mlock pages within a range of address space.
1557 *
1558 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1559 * flags. VMAs must be already marked with the desired vm_flags, and
1560 * mmap_lock must not be held.
1561 */
1562int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1563{
1564        struct mm_struct *mm = current->mm;
1565        unsigned long end, nstart, nend;
1566        struct vm_area_struct *vma = NULL;
1567        int locked = 0;
1568        long ret = 0;
1569
1570        end = start + len;
1571
1572        for (nstart = start; nstart < end; nstart = nend) {
1573                /*
1574                 * We want to fault in pages for [nstart; end) address range.
1575                 * Find first corresponding VMA.
1576                 */
1577                if (!locked) {
1578                        locked = 1;
1579                        mmap_read_lock(mm);
1580                        vma = find_vma(mm, nstart);
1581                } else if (nstart >= vma->vm_end)
1582                        vma = vma->vm_next;
1583                if (!vma || vma->vm_start >= end)
1584                        break;
1585                /*
1586                 * Set [nstart; nend) to intersection of desired address
1587                 * range with the first VMA. Also, skip undesirable VMA types.
1588                 */
1589                nend = min(end, vma->vm_end);
1590                if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1591                        continue;
1592                if (nstart < vma->vm_start)
1593                        nstart = vma->vm_start;
1594                /*
1595                 * Now fault in a range of pages. populate_vma_page_range()
1596                 * double checks the vma flags, so that it won't mlock pages
1597                 * if the vma was already munlocked.
1598                 */
1599                ret = populate_vma_page_range(vma, nstart, nend, &locked);
1600                if (ret < 0) {
1601                        if (ignore_errors) {
1602                                ret = 0;
1603                                continue;       /* continue at next VMA */
1604                        }
1605                        break;
1606                }
1607                nend = nstart + ret * PAGE_SIZE;
1608                ret = 0;
1609        }
1610        if (locked)
1611                mmap_read_unlock(mm);
1612        return ret;     /* 0 or negative error code */
1613}
1614#else /* CONFIG_MMU */
1615static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
1616                unsigned long nr_pages, struct page **pages,
1617                struct vm_area_struct **vmas, int *locked,
1618                unsigned int foll_flags)
1619{
1620        struct vm_area_struct *vma;
1621        unsigned long vm_flags;
1622        long i;
1623
1624        /* calculate required read or write permissions.
1625         * If FOLL_FORCE is set, we only require the "MAY" flags.
1626         */
1627        vm_flags  = (foll_flags & FOLL_WRITE) ?
1628                        (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1629        vm_flags &= (foll_flags & FOLL_FORCE) ?
1630                        (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1631
1632        for (i = 0; i < nr_pages; i++) {
1633                vma = find_vma(mm, start);
1634                if (!vma)
1635                        goto finish_or_fault;
1636
1637                /* protect what we can, including chardevs */
1638                if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1639                    !(vm_flags & vma->vm_flags))
1640                        goto finish_or_fault;
1641
1642                if (pages) {
1643                        pages[i] = virt_to_page(start);
1644                        if (pages[i])
1645                                get_page(pages[i]);
1646                }
1647                if (vmas)
1648                        vmas[i] = vma;
1649                start = (start + PAGE_SIZE) & PAGE_MASK;
1650        }
1651
1652        return i;
1653
1654finish_or_fault:
1655        return i ? : -EFAULT;
1656}
1657#endif /* !CONFIG_MMU */
1658
1659/**
1660 * get_dump_page() - pin user page in memory while writing it to core dump
1661 * @addr: user address
1662 *
1663 * Returns struct page pointer of user page pinned for dump,
1664 * to be freed afterwards by put_page().
1665 *
1666 * Returns NULL on any kind of failure - a hole must then be inserted into
1667 * the corefile, to preserve alignment with its headers; and also returns
1668 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1669 * allowing a hole to be left in the corefile to save disk space.
1670 *
1671 * Called without mmap_lock (takes and releases the mmap_lock by itself).
1672 */
1673#ifdef CONFIG_ELF_CORE
1674struct page *get_dump_page(unsigned long addr)
1675{
1676        struct mm_struct *mm = current->mm;
1677        struct page *page;
1678        int locked = 1;
1679        int ret;
1680
1681        if (mmap_read_lock_killable(mm))
1682                return NULL;
1683        ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
1684                                      FOLL_FORCE | FOLL_DUMP | FOLL_GET);
1685        if (locked)
1686                mmap_read_unlock(mm);
1687        return (ret == 1) ? page : NULL;
1688}
1689#endif /* CONFIG_ELF_CORE */
1690
1691#ifdef CONFIG_MIGRATION
1692/*
1693 * Check whether all pages are pinnable, if so return number of pages.  If some
1694 * pages are not pinnable, migrate them, and unpin all pages. Return zero if
1695 * pages were migrated, or if some pages were not successfully isolated.
1696 * Return negative error if migration fails.
1697 */
1698static long check_and_migrate_movable_pages(unsigned long nr_pages,
1699                                            struct page **pages,
1700                                            unsigned int gup_flags)
1701{
1702        unsigned long i;
1703        unsigned long isolation_error_count = 0;
1704        bool drain_allow = true;
1705        LIST_HEAD(movable_page_list);
1706        long ret = 0;
1707        struct page *prev_head = NULL;
1708        struct page *head;
1709        struct migration_target_control mtc = {
1710                .nid = NUMA_NO_NODE,
1711                .gfp_mask = GFP_USER | __GFP_NOWARN,
1712        };
1713
1714        for (i = 0; i < nr_pages; i++) {
1715                head = compound_head(pages[i]);
1716                if (head == prev_head)
1717                        continue;
1718                prev_head = head;
1719                /*
1720                 * If we get a movable page, since we are going to be pinning
1721                 * these entries, try to move them out if possible.
1722                 */
1723                if (!is_pinnable_page(head)) {
1724                        if (PageHuge(head)) {
1725                                if (!isolate_huge_page(head, &movable_page_list))
1726                                        isolation_error_count++;
1727                        } else {
1728                                if (!PageLRU(head) && drain_allow) {
1729                                        lru_add_drain_all();
1730                                        drain_allow = false;
1731                                }
1732
1733                                if (isolate_lru_page(head)) {
1734                                        isolation_error_count++;
1735                                        continue;
1736                                }
1737                                list_add_tail(&head->lru, &movable_page_list);
1738                                mod_node_page_state(page_pgdat(head),
1739                                                    NR_ISOLATED_ANON +
1740                                                    page_is_file_lru(head),
1741                                                    thp_nr_pages(head));
1742                        }
1743                }
1744        }
1745
1746        /*
1747         * If list is empty, and no isolation errors, means that all pages are
1748         * in the correct zone.
1749         */
1750        if (list_empty(&movable_page_list) && !isolation_error_count)
1751                return nr_pages;
1752
1753        if (gup_flags & FOLL_PIN) {
1754                unpin_user_pages(pages, nr_pages);
1755        } else {
1756                for (i = 0; i < nr_pages; i++)
1757                        put_page(pages[i]);
1758        }
1759        if (!list_empty(&movable_page_list)) {
1760                ret = migrate_pages(&movable_page_list, alloc_migration_target,
1761                                    NULL, (unsigned long)&mtc, MIGRATE_SYNC,
1762                                    MR_LONGTERM_PIN, NULL);
1763                if (ret && !list_empty(&movable_page_list))
1764                        putback_movable_pages(&movable_page_list);
1765        }
1766
1767        return ret > 0 ? -ENOMEM : ret;
1768}
1769#else
1770static long check_and_migrate_movable_pages(unsigned long nr_pages,
1771                                            struct page **pages,
1772                                            unsigned int gup_flags)
1773{
1774        return nr_pages;
1775}
1776#endif /* CONFIG_MIGRATION */
1777
1778/*
1779 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
1780 * allows us to process the FOLL_LONGTERM flag.
1781 */
1782static long __gup_longterm_locked(struct mm_struct *mm,
1783                                  unsigned long start,
1784                                  unsigned long nr_pages,
1785                                  struct page **pages,
1786                                  struct vm_area_struct **vmas,
1787                                  unsigned int gup_flags)
1788{
1789        unsigned int flags;
1790        long rc;
1791
1792        if (!(gup_flags & FOLL_LONGTERM))
1793                return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1794                                               NULL, gup_flags);
1795        flags = memalloc_pin_save();
1796        do {
1797                rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1798                                             NULL, gup_flags);
1799                if (rc <= 0)
1800                        break;
1801                rc = check_and_migrate_movable_pages(rc, pages, gup_flags);
1802        } while (!rc);
1803        memalloc_pin_restore(flags);
1804
1805        return rc;
1806}
1807
1808static bool is_valid_gup_flags(unsigned int gup_flags)
1809{
1810        /*
1811         * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
1812         * never directly by the caller, so enforce that with an assertion:
1813         */
1814        if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
1815                return false;
1816        /*
1817         * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying
1818         * that is, FOLL_LONGTERM is a specific case, more restrictive case of
1819         * FOLL_PIN.
1820         */
1821        if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
1822                return false;
1823
1824        return true;
1825}
1826
1827#ifdef CONFIG_MMU
1828static long __get_user_pages_remote(struct mm_struct *mm,
1829                                    unsigned long start, unsigned long nr_pages,
1830                                    unsigned int gup_flags, struct page **pages,
1831                                    struct vm_area_struct **vmas, int *locked)
1832{
1833        /*
1834         * Parts of FOLL_LONGTERM behavior are incompatible with
1835         * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
1836         * vmas. However, this only comes up if locked is set, and there are
1837         * callers that do request FOLL_LONGTERM, but do not set locked. So,
1838         * allow what we can.
1839         */
1840        if (gup_flags & FOLL_LONGTERM) {
1841                if (WARN_ON_ONCE(locked))
1842                        return -EINVAL;
1843                /*
1844                 * This will check the vmas (even if our vmas arg is NULL)
1845                 * and return -ENOTSUPP if DAX isn't allowed in this case:
1846                 */
1847                return __gup_longterm_locked(mm, start, nr_pages, pages,
1848                                             vmas, gup_flags | FOLL_TOUCH |
1849                                             FOLL_REMOTE);
1850        }
1851
1852        return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1853                                       locked,
1854                                       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
1855}
1856
1857/**
1858 * get_user_pages_remote() - pin user pages in memory
1859 * @mm:         mm_struct of target mm
1860 * @start:      starting user address
1861 * @nr_pages:   number of pages from start to pin
1862 * @gup_flags:  flags modifying lookup behaviour
1863 * @pages:      array that receives pointers to the pages pinned.
1864 *              Should be at least nr_pages long. Or NULL, if caller
1865 *              only intends to ensure the pages are faulted in.
1866 * @vmas:       array of pointers to vmas corresponding to each page.
1867 *              Or NULL if the caller does not require them.
1868 * @locked:     pointer to lock flag indicating whether lock is held and
1869 *              subsequently whether VM_FAULT_RETRY functionality can be
1870 *              utilised. Lock must initially be held.
1871 *
1872 * Returns either number of pages pinned (which may be less than the
1873 * number requested), or an error. Details about the return value:
1874 *
1875 * -- If nr_pages is 0, returns 0.
1876 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1877 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1878 *    pages pinned. Again, this may be less than nr_pages.
1879 *
1880 * The caller is responsible for releasing returned @pages, via put_page().
1881 *
1882 * @vmas are valid only as long as mmap_lock is held.
1883 *
1884 * Must be called with mmap_lock held for read or write.
1885 *
1886 * get_user_pages_remote walks a process's page tables and takes a reference
1887 * to each struct page that each user address corresponds to at a given
1888 * instant. That is, it takes the page that would be accessed if a user
1889 * thread accesses the given user virtual address at that instant.
1890 *
1891 * This does not guarantee that the page exists in the user mappings when
1892 * get_user_pages_remote returns, and there may even be a completely different
1893 * page there in some cases (eg. if mmapped pagecache has been invalidated
1894 * and subsequently re faulted). However it does guarantee that the page
1895 * won't be freed completely. And mostly callers simply care that the page
1896 * contains data that was valid *at some point in time*. Typically, an IO
1897 * or similar operation cannot guarantee anything stronger anyway because
1898 * locks can't be held over the syscall boundary.
1899 *
1900 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
1901 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
1902 * be called after the page is finished with, and before put_page is called.
1903 *
1904 * get_user_pages_remote is typically used for fewer-copy IO operations,
1905 * to get a handle on the memory by some means other than accesses
1906 * via the user virtual addresses. The pages may be submitted for
1907 * DMA to devices or accessed via their kernel linear mapping (via the
1908 * kmap APIs). Care should be taken to use the correct cache flushing APIs.
1909 *
1910 * See also get_user_pages_fast, for performance critical applications.
1911 *
1912 * get_user_pages_remote should be phased out in favor of
1913 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
1914 * should use get_user_pages_remote because it cannot pass
1915 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
1916 */
1917long get_user_pages_remote(struct mm_struct *mm,
1918                unsigned long start, unsigned long nr_pages,
1919                unsigned int gup_flags, struct page **pages,
1920                struct vm_area_struct **vmas, int *locked)
1921{
1922        if (!is_valid_gup_flags(gup_flags))
1923                return -EINVAL;
1924
1925        return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
1926                                       pages, vmas, locked);
1927}
1928EXPORT_SYMBOL(get_user_pages_remote);
1929
1930#else /* CONFIG_MMU */
1931long get_user_pages_remote(struct mm_struct *mm,
1932                           unsigned long start, unsigned long nr_pages,
1933                           unsigned int gup_flags, struct page **pages,
1934                           struct vm_area_struct **vmas, int *locked)
1935{
1936        return 0;
1937}
1938
1939static long __get_user_pages_remote(struct mm_struct *mm,
1940                                    unsigned long start, unsigned long nr_pages,
1941                                    unsigned int gup_flags, struct page **pages,
1942                                    struct vm_area_struct **vmas, int *locked)
1943{
1944        return 0;
1945}
1946#endif /* !CONFIG_MMU */
1947
1948/**
1949 * get_user_pages() - pin user pages in memory
1950 * @start:      starting user address
1951 * @nr_pages:   number of pages from start to pin
1952 * @gup_flags:  flags modifying lookup behaviour
1953 * @pages:      array that receives pointers to the pages pinned.
1954 *              Should be at least nr_pages long. Or NULL, if caller
1955 *              only intends to ensure the pages are faulted in.
1956 * @vmas:       array of pointers to vmas corresponding to each page.
1957 *              Or NULL if the caller does not require them.
1958 *
1959 * This is the same as get_user_pages_remote(), just with a less-flexible
1960 * calling convention where we assume that the mm being operated on belongs to
1961 * the current task, and doesn't allow passing of a locked parameter.  We also
1962 * obviously don't pass FOLL_REMOTE in here.
1963 */
1964long get_user_pages(unsigned long start, unsigned long nr_pages,
1965                unsigned int gup_flags, struct page **pages,
1966                struct vm_area_struct **vmas)
1967{
1968        if (!is_valid_gup_flags(gup_flags))
1969                return -EINVAL;
1970
1971        return __gup_longterm_locked(current->mm, start, nr_pages,
1972                                     pages, vmas, gup_flags | FOLL_TOUCH);
1973}
1974EXPORT_SYMBOL(get_user_pages);
1975
1976/**
1977 * get_user_pages_locked() - variant of get_user_pages()
1978 *
1979 * @start:      starting user address
1980 * @nr_pages:   number of pages from start to pin
1981 * @gup_flags:  flags modifying lookup behaviour
1982 * @pages:      array that receives pointers to the pages pinned.
1983 *              Should be at least nr_pages long. Or NULL, if caller
1984 *              only intends to ensure the pages are faulted in.
1985 * @locked:     pointer to lock flag indicating whether lock is held and
1986 *              subsequently whether VM_FAULT_RETRY functionality can be
1987 *              utilised. Lock must initially be held.
1988 *
1989 * It is suitable to replace the form:
1990 *
1991 *      mmap_read_lock(mm);
1992 *      do_something()
1993 *      get_user_pages(mm, ..., pages, NULL);
1994 *      mmap_read_unlock(mm);
1995 *
1996 *  to:
1997 *
1998 *      int locked = 1;
1999 *      mmap_read_lock(mm);
2000 *      do_something()
2001 *      get_user_pages_locked(mm, ..., pages, &locked);
2002 *      if (locked)
2003 *          mmap_read_unlock(mm);
2004 *
2005 * We can leverage the VM_FAULT_RETRY functionality in the page fault
2006 * paths better by using either get_user_pages_locked() or
2007 * get_user_pages_unlocked().
2008 *
2009 */
2010long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
2011                           unsigned int gup_flags, struct page **pages,
2012                           int *locked)
2013{
2014        /*
2015         * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2016         * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2017         * vmas.  As there are no users of this flag in this call we simply
2018         * disallow this option for now.
2019         */
2020        if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2021                return -EINVAL;
2022        /*
2023         * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
2024         * never directly by the caller, so enforce that:
2025         */
2026        if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
2027                return -EINVAL;
2028
2029        return __get_user_pages_locked(current->mm, start, nr_pages,
2030                                       pages, NULL, locked,
2031                                       gup_flags | FOLL_TOUCH);
2032}
2033EXPORT_SYMBOL(get_user_pages_locked);
2034
2035/*
2036 * get_user_pages_unlocked() is suitable to replace the form:
2037 *
2038 *      mmap_read_lock(mm);
2039 *      get_user_pages(mm, ..., pages, NULL);
2040 *      mmap_read_unlock(mm);
2041 *
2042 *  with:
2043 *
2044 *      get_user_pages_unlocked(mm, ..., pages);
2045 *
2046 * It is functionally equivalent to get_user_pages_fast so
2047 * get_user_pages_fast should be used instead if specific gup_flags
2048 * (e.g. FOLL_FORCE) are not required.
2049 */
2050long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2051                             struct page **pages, unsigned int gup_flags)
2052{
2053        struct mm_struct *mm = current->mm;
2054        int locked = 1;
2055        long ret;
2056
2057        /*
2058         * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2059         * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2060         * vmas.  As there are no users of this flag in this call we simply
2061         * disallow this option for now.
2062         */
2063        if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2064                return -EINVAL;
2065
2066        mmap_read_lock(mm);
2067        ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL,
2068                                      &locked, gup_flags | FOLL_TOUCH);
2069        if (locked)
2070                mmap_read_unlock(mm);
2071        return ret;
2072}
2073EXPORT_SYMBOL(get_user_pages_unlocked);
2074
2075/*
2076 * Fast GUP
2077 *
2078 * get_user_pages_fast attempts to pin user pages by walking the page
2079 * tables directly and avoids taking locks. Thus the walker needs to be
2080 * protected from page table pages being freed from under it, and should
2081 * block any THP splits.
2082 *
2083 * One way to achieve this is to have the walker disable interrupts, and
2084 * rely on IPIs from the TLB flushing code blocking before the page table
2085 * pages are freed. This is unsuitable for architectures that do not need
2086 * to broadcast an IPI when invalidating TLBs.
2087 *
2088 * Another way to achieve this is to batch up page table containing pages
2089 * belonging to more than one mm_user, then rcu_sched a callback to free those
2090 * pages. Disabling interrupts will allow the fast_gup walker to both block
2091 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2092 * (which is a relatively rare event). The code below adopts this strategy.
2093 *
2094 * Before activating this code, please be aware that the following assumptions
2095 * are currently made:
2096 *
2097 *  *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
2098 *  free pages containing page tables or TLB flushing requires IPI broadcast.
2099 *
2100 *  *) ptes can be read atomically by the architecture.
2101 *
2102 *  *) access_ok is sufficient to validate userspace address ranges.
2103 *
2104 * The last two assumptions can be relaxed by the addition of helper functions.
2105 *
2106 * This code is based heavily on the PowerPC implementation by Nick Piggin.
2107 */
2108#ifdef CONFIG_HAVE_FAST_GUP
2109
2110static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
2111                                            unsigned int flags,
2112                                            struct page **pages)
2113{
2114        while ((*nr) - nr_start) {
2115                struct page *page = pages[--(*nr)];
2116
2117                ClearPageReferenced(page);
2118                if (flags & FOLL_PIN)
2119                        unpin_user_page(page);
2120                else
2121                        put_page(page);
2122        }
2123}
2124
2125#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2126static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2127                         unsigned int flags, struct page **pages, int *nr)
2128{
2129        struct dev_pagemap *pgmap = NULL;
2130        int nr_start = *nr, ret = 0;
2131        pte_t *ptep, *ptem;
2132
2133        ptem = ptep = pte_offset_map(&pmd, addr);
2134        do {
2135                pte_t pte = ptep_get_lockless(ptep);
2136                struct page *head, *page;
2137
2138                /*
2139                 * Similar to the PMD case below, NUMA hinting must take slow
2140                 * path using the pte_protnone check.
2141                 */
2142                if (pte_protnone(pte))
2143                        goto pte_unmap;
2144
2145                if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2146                        goto pte_unmap;
2147
2148                if (pte_devmap(pte)) {
2149                        if (unlikely(flags & FOLL_LONGTERM))
2150                                goto pte_unmap;
2151
2152                        pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2153                        if (unlikely(!pgmap)) {
2154                                undo_dev_pagemap(nr, nr_start, flags, pages);
2155                                goto pte_unmap;
2156                        }
2157                } else if (pte_special(pte))
2158                        goto pte_unmap;
2159
2160                VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2161                page = pte_page(pte);
2162
2163                head = try_grab_compound_head(page, 1, flags);
2164                if (!head)
2165                        goto pte_unmap;
2166
2167                if (unlikely(page_is_secretmem(page))) {
2168                        put_compound_head(head, 1, flags);
2169                        goto pte_unmap;
2170                }
2171
2172                if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2173                        put_compound_head(head, 1, flags);
2174                        goto pte_unmap;
2175                }
2176
2177                VM_BUG_ON_PAGE(compound_head(page) != head, page);
2178
2179                /*
2180                 * We need to make the page accessible if and only if we are
2181                 * going to access its content (the FOLL_PIN case).  Please
2182                 * see Documentation/core-api/pin_user_pages.rst for
2183                 * details.
2184                 */
2185                if (flags & FOLL_PIN) {
2186                        ret = arch_make_page_accessible(page);
2187                        if (ret) {
2188                                unpin_user_page(page);
2189                                goto pte_unmap;
2190                        }
2191                }
2192                SetPageReferenced(page);
2193                pages[*nr] = page;
2194                (*nr)++;
2195
2196        } while (ptep++, addr += PAGE_SIZE, addr != end);
2197
2198        ret = 1;
2199
2200pte_unmap:
2201        if (pgmap)
2202                put_dev_pagemap(pgmap);
2203        pte_unmap(ptem);
2204        return ret;
2205}
2206#else
2207
2208/*
2209 * If we can't determine whether or not a pte is special, then fail immediately
2210 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2211 * to be special.
2212 *
2213 * For a futex to be placed on a THP tail page, get_futex_key requires a
2214 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2215 * useful to have gup_huge_pmd even if we can't operate on ptes.
2216 */
2217static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2218                         unsigned int flags, struct page **pages, int *nr)
2219{
2220        return 0;
2221}
2222#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2223
2224#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
2225static int __gup_device_huge(unsigned long pfn, unsigned long addr,
2226                             unsigned long end, unsigned int flags,
2227                             struct page **pages, int *nr)
2228{
2229        int nr_start = *nr;
2230        struct dev_pagemap *pgmap = NULL;
2231        int ret = 1;
2232
2233        do {
2234                struct page *page = pfn_to_page(pfn);
2235
2236                pgmap = get_dev_pagemap(pfn, pgmap);
2237                if (unlikely(!pgmap)) {
2238                        undo_dev_pagemap(nr, nr_start, flags, pages);
2239                        ret = 0;
2240                        break;
2241                }
2242                SetPageReferenced(page);
2243                pages[*nr] = page;
2244                if (unlikely(!try_grab_page(page, flags))) {
2245                        undo_dev_pagemap(nr, nr_start, flags, pages);
2246                        ret = 0;
2247                        break;
2248                }
2249                (*nr)++;
2250                pfn++;
2251        } while (addr += PAGE_SIZE, addr != end);
2252
2253        put_dev_pagemap(pgmap);
2254        return ret;
2255}
2256
2257static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2258                                 unsigned long end, unsigned int flags,
2259                                 struct page **pages, int *nr)
2260{
2261        unsigned long fault_pfn;
2262        int nr_start = *nr;
2263
2264        fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2265        if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2266                return 0;
2267
2268        if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2269                undo_dev_pagemap(nr, nr_start, flags, pages);
2270                return 0;
2271        }
2272        return 1;
2273}
2274
2275static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2276                                 unsigned long end, unsigned int flags,
2277                                 struct page **pages, int *nr)
2278{
2279        unsigned long fault_pfn;
2280        int nr_start = *nr;
2281
2282        fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2283        if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2284                return 0;
2285
2286        if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2287                undo_dev_pagemap(nr, nr_start, flags, pages);
2288                return 0;
2289        }
2290        return 1;
2291}
2292#else
2293static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2294                                 unsigned long end, unsigned int flags,
2295                                 struct page **pages, int *nr)
2296{
2297        BUILD_BUG();
2298        return 0;
2299}
2300
2301static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
2302                                 unsigned long end, unsigned int flags,
2303                                 struct page **pages, int *nr)
2304{
2305        BUILD_BUG();
2306        return 0;
2307}
2308#endif
2309
2310static int record_subpages(struct page *page, unsigned long addr,
2311                           unsigned long end, struct page **pages)
2312{
2313        int nr;
2314
2315        for (nr = 0; addr != end; addr += PAGE_SIZE)
2316                pages[nr++] = page++;
2317
2318        return nr;
2319}
2320
2321#ifdef CONFIG_ARCH_HAS_HUGEPD
2322static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2323                                      unsigned long sz)
2324{
2325        unsigned long __boundary = (addr + sz) & ~(sz-1);
2326        return (__boundary - 1 < end - 1) ? __boundary : end;
2327}
2328
2329static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
2330                       unsigned long end, unsigned int flags,
2331                       struct page **pages, int *nr)
2332{
2333        unsigned long pte_end;
2334        struct page *head, *page;
2335        pte_t pte;
2336        int refs;
2337
2338        pte_end = (addr + sz) & ~(sz-1);
2339        if (pte_end < end)
2340                end = pte_end;
2341
2342        pte = huge_ptep_get(ptep);
2343
2344        if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2345                return 0;
2346
2347        /* hugepages are never "special" */
2348        VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2349
2350        head = pte_page(pte);
2351        page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
2352        refs = record_subpages(page, addr, end, pages + *nr);
2353
2354        head = try_grab_compound_head(head, refs, flags);
2355        if (!head)
2356                return 0;
2357
2358        if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2359                put_compound_head(head, refs, flags);
2360                return 0;
2361        }
2362
2363        *nr += refs;
2364        SetPageReferenced(head);
2365        return 1;
2366}
2367
2368static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2369                unsigned int pdshift, unsigned long end, unsigned int flags,
2370                struct page **pages, int *nr)
2371{
2372        pte_t *ptep;
2373        unsigned long sz = 1UL << hugepd_shift(hugepd);
2374        unsigned long next;
2375
2376        ptep = hugepte_offset(hugepd, addr, pdshift);
2377        do {
2378                next = hugepte_addr_end(addr, end, sz);
2379                if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
2380                        return 0;
2381        } while (ptep++, addr = next, addr != end);
2382
2383        return 1;
2384}
2385#else
2386static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2387                unsigned int pdshift, unsigned long end, unsigned int flags,
2388                struct page **pages, int *nr)
2389{
2390        return 0;
2391}
2392#endif /* CONFIG_ARCH_HAS_HUGEPD */
2393
2394static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2395                        unsigned long end, unsigned int flags,
2396                        struct page **pages, int *nr)
2397{
2398        struct page *head, *page;
2399        int refs;
2400
2401        if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2402                return 0;
2403
2404        if (pmd_devmap(orig)) {
2405                if (unlikely(flags & FOLL_LONGTERM))
2406                        return 0;
2407                return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
2408                                             pages, nr);
2409        }
2410
2411        page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2412        refs = record_subpages(page, addr, end, pages + *nr);
2413
2414        head = try_grab_compound_head(pmd_page(orig), refs, flags);
2415        if (!head)
2416                return 0;
2417
2418        if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2419                put_compound_head(head, refs, flags);
2420                return 0;
2421        }
2422
2423        *nr += refs;
2424        SetPageReferenced(head);
2425        return 1;
2426}
2427
2428static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2429                        unsigned long end, unsigned int flags,
2430                        struct page **pages, int *nr)
2431{
2432        struct page *head, *page;
2433        int refs;
2434
2435        if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2436                return 0;
2437
2438        if (pud_devmap(orig)) {
2439                if (unlikely(flags & FOLL_LONGTERM))
2440                        return 0;
2441                return __gup_device_huge_pud(orig, pudp, addr, end, flags,
2442                                             pages, nr);
2443        }
2444
2445        page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2446        refs = record_subpages(page, addr, end, pages + *nr);
2447
2448        head = try_grab_compound_head(pud_page(orig), refs, flags);
2449        if (!head)
2450                return 0;
2451
2452        if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2453                put_compound_head(head, refs, flags);
2454                return 0;
2455        }
2456
2457        *nr += refs;
2458        SetPageReferenced(head);
2459        return 1;
2460}
2461
2462static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
2463                        unsigned long end, unsigned int flags,
2464                        struct page **pages, int *nr)
2465{
2466        int refs;
2467        struct page *head, *page;
2468
2469        if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
2470                return 0;
2471
2472        BUILD_BUG_ON(pgd_devmap(orig));
2473
2474        page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
2475        refs = record_subpages(page, addr, end, pages + *nr);
2476
2477        head = try_grab_compound_head(pgd_page(orig), refs, flags);
2478        if (!head)
2479                return 0;
2480
2481        if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
2482                put_compound_head(head, refs, flags);
2483                return 0;
2484        }
2485
2486        *nr += refs;
2487        SetPageReferenced(head);
2488        return 1;
2489}
2490
2491static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
2492                unsigned int flags, struct page **pages, int *nr)
2493{
2494        unsigned long next;
2495        pmd_t *pmdp;
2496
2497        pmdp = pmd_offset_lockless(pudp, pud, addr);
2498        do {
2499                pmd_t pmd = READ_ONCE(*pmdp);
2500
2501                next = pmd_addr_end(addr, end);
2502                if (!pmd_present(pmd))
2503                        return 0;
2504
2505                if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
2506                             pmd_devmap(pmd))) {
2507                        /*
2508                         * NUMA hinting faults need to be handled in the GUP
2509                         * slowpath for accounting purposes and so that they
2510                         * can be serialised against THP migration.
2511                         */
2512                        if (pmd_protnone(pmd))
2513                                return 0;
2514
2515                        if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
2516                                pages, nr))
2517                                return 0;
2518
2519                } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
2520                        /*
2521                         * architecture have different format for hugetlbfs
2522                         * pmd format and THP pmd format
2523                         */
2524                        if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
2525                                         PMD_SHIFT, next, flags, pages, nr))
2526                                return 0;
2527                } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
2528                        return 0;
2529        } while (pmdp++, addr = next, addr != end);
2530
2531        return 1;
2532}
2533
2534static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
2535                         unsigned int flags, struct page **pages, int *nr)
2536{
2537        unsigned long next;
2538        pud_t *pudp;
2539
2540        pudp = pud_offset_lockless(p4dp, p4d, addr);
2541        do {
2542                pud_t pud = READ_ONCE(*pudp);
2543
2544                next = pud_addr_end(addr, end);
2545                if (unlikely(!pud_present(pud)))
2546                        return 0;
2547                if (unlikely(pud_huge(pud))) {
2548                        if (!gup_huge_pud(pud, pudp, addr, next, flags,
2549                                          pages, nr))
2550                                return 0;
2551                } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
2552                        if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
2553                                         PUD_SHIFT, next, flags, pages, nr))
2554                                return 0;
2555                } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
2556                        return 0;
2557        } while (pudp++, addr = next, addr != end);
2558
2559        return 1;
2560}
2561
2562static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
2563                         unsigned int flags, struct page **pages, int *nr)
2564{
2565        unsigned long next;
2566        p4d_t *p4dp;
2567
2568        p4dp = p4d_offset_lockless(pgdp, pgd, addr);
2569        do {
2570                p4d_t p4d = READ_ONCE(*p4dp);
2571
2572                next = p4d_addr_end(addr, end);
2573                if (p4d_none(p4d))
2574                        return 0;
2575                BUILD_BUG_ON(p4d_huge(p4d));
2576                if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
2577                        if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
2578                                         P4D_SHIFT, next, flags, pages, nr))
2579                                return 0;
2580                } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
2581                        return 0;
2582        } while (p4dp++, addr = next, addr != end);
2583
2584        return 1;
2585}
2586
2587static void gup_pgd_range(unsigned long addr, unsigned long end,
2588                unsigned int flags, struct page **pages, int *nr)
2589{
2590        unsigned long next;
2591        pgd_t *pgdp;
2592
2593        pgdp = pgd_offset(current->mm, addr);
2594        do {
2595                pgd_t pgd = READ_ONCE(*pgdp);
2596
2597                next = pgd_addr_end(addr, end);
2598                if (pgd_none(pgd))
2599                        return;
2600                if (unlikely(pgd_huge(pgd))) {
2601                        if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
2602                                          pages, nr))
2603                                return;
2604                } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
2605                        if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
2606                                         PGDIR_SHIFT, next, flags, pages, nr))
2607                                return;
2608                } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
2609                        return;
2610        } while (pgdp++, addr = next, addr != end);
2611}
2612#else
2613static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2614                unsigned int flags, struct page **pages, int *nr)
2615{
2616}
2617#endif /* CONFIG_HAVE_FAST_GUP */
2618
2619#ifndef gup_fast_permitted
2620/*
2621 * Check if it's allowed to use get_user_pages_fast_only() for the range, or
2622 * we need to fall back to the slow version:
2623 */
2624static bool gup_fast_permitted(unsigned long start, unsigned long end)
2625{
2626        return true;
2627}
2628#endif
2629
2630static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
2631                                   unsigned int gup_flags, struct page **pages)
2632{
2633        int ret;
2634
2635        /*
2636         * FIXME: FOLL_LONGTERM does not work with
2637         * get_user_pages_unlocked() (see comments in that function)
2638         */
2639        if (gup_flags & FOLL_LONGTERM) {
2640                mmap_read_lock(current->mm);
2641                ret = __gup_longterm_locked(current->mm,
2642                                            start, nr_pages,
2643                                            pages, NULL, gup_flags);
2644                mmap_read_unlock(current->mm);
2645        } else {
2646                ret = get_user_pages_unlocked(start, nr_pages,
2647                                              pages, gup_flags);
2648        }
2649
2650        return ret;
2651}
2652
2653static unsigned long lockless_pages_from_mm(unsigned long start,
2654                                            unsigned long end,
2655                                            unsigned int gup_flags,
2656                                            struct page **pages)
2657{
2658        unsigned long flags;
2659        int nr_pinned = 0;
2660        unsigned seq;
2661
2662        if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
2663            !gup_fast_permitted(start, end))
2664                return 0;
2665
2666        if (gup_flags & FOLL_PIN) {
2667                seq = raw_read_seqcount(&current->mm->write_protect_seq);
2668                if (seq & 1)
2669                        return 0;
2670        }
2671
2672        /*
2673         * Disable interrupts. The nested form is used, in order to allow full,
2674         * general purpose use of this routine.
2675         *
2676         * With interrupts disabled, we block page table pages from being freed
2677         * from under us. See struct mmu_table_batch comments in
2678         * include/asm-generic/tlb.h for more details.
2679         *
2680         * We do not adopt an rcu_read_lock() here as we also want to block IPIs
2681         * that come from THPs splitting.
2682         */
2683        local_irq_save(flags);
2684        gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
2685        local_irq_restore(flags);
2686
2687        /*
2688         * When pinning pages for DMA there could be a concurrent write protect
2689         * from fork() via copy_page_range(), in this case always fail fast GUP.
2690         */
2691        if (gup_flags & FOLL_PIN) {
2692                if (read_seqcount_retry(&current->mm->write_protect_seq, seq)) {
2693                        unpin_user_pages(pages, nr_pinned);
2694                        return 0;
2695                }
2696        }
2697        return nr_pinned;
2698}
2699
2700static int internal_get_user_pages_fast(unsigned long start,
2701                                        unsigned long nr_pages,
2702                                        unsigned int gup_flags,
2703                                        struct page **pages)
2704{
2705        unsigned long len, end;
2706        unsigned long nr_pinned;
2707        int ret;
2708
2709        if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
2710                                       FOLL_FORCE | FOLL_PIN | FOLL_GET |
2711                                       FOLL_FAST_ONLY)))
2712                return -EINVAL;
2713
2714        if (gup_flags & FOLL_PIN)
2715                mm_set_has_pinned_flag(&current->mm->flags);
2716
2717        if (!(gup_flags & FOLL_FAST_ONLY))
2718                might_lock_read(&current->mm->mmap_lock);
2719
2720        start = untagged_addr(start) & PAGE_MASK;
2721        len = nr_pages << PAGE_SHIFT;
2722        if (check_add_overflow(start, len, &end))
2723                return 0;
2724        if (unlikely(!access_ok((void __user *)start, len)))
2725                return -EFAULT;
2726
2727        nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
2728        if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
2729                return nr_pinned;
2730
2731        /* Slow path: try to get the remaining pages with get_user_pages */
2732        start += nr_pinned << PAGE_SHIFT;
2733        pages += nr_pinned;
2734        ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags,
2735                                      pages);
2736        if (ret < 0) {
2737                /*
2738                 * The caller has to unpin the pages we already pinned so
2739                 * returning -errno is not an option
2740                 */
2741                if (nr_pinned)
2742                        return nr_pinned;
2743                return ret;
2744        }
2745        return ret + nr_pinned;
2746}
2747
2748/**
2749 * get_user_pages_fast_only() - pin user pages in memory
2750 * @start:      starting user address
2751 * @nr_pages:   number of pages from start to pin
2752 * @gup_flags:  flags modifying pin behaviour
2753 * @pages:      array that receives pointers to the pages pinned.
2754 *              Should be at least nr_pages long.
2755 *
2756 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
2757 * the regular GUP.
2758 * Note a difference with get_user_pages_fast: this always returns the
2759 * number of pages pinned, 0 if no pages were pinned.
2760 *
2761 * If the architecture does not support this function, simply return with no
2762 * pages pinned.
2763 *
2764 * Careful, careful! COW breaking can go either way, so a non-write
2765 * access can get ambiguous page results. If you call this function without
2766 * 'write' set, you'd better be sure that you're ok with that ambiguity.
2767 */
2768int get_user_pages_fast_only(unsigned long start, int nr_pages,
2769                             unsigned int gup_flags, struct page **pages)
2770{
2771        int nr_pinned;
2772        /*
2773         * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
2774         * because gup fast is always a "pin with a +1 page refcount" request.
2775         *
2776         * FOLL_FAST_ONLY is required in order to match the API description of
2777         * this routine: no fall back to regular ("slow") GUP.
2778         */
2779        gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
2780
2781        nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2782                                                 pages);
2783
2784        /*
2785         * As specified in the API description above, this routine is not
2786         * allowed to return negative values. However, the common core
2787         * routine internal_get_user_pages_fast() *can* return -errno.
2788         * Therefore, correct for that here:
2789         */
2790        if (nr_pinned < 0)
2791                nr_pinned = 0;
2792
2793        return nr_pinned;
2794}
2795EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
2796
2797/**
2798 * get_user_pages_fast() - pin user pages in memory
2799 * @start:      starting user address
2800 * @nr_pages:   number of pages from start to pin
2801 * @gup_flags:  flags modifying pin behaviour
2802 * @pages:      array that receives pointers to the pages pinned.
2803 *              Should be at least nr_pages long.
2804 *
2805 * Attempt to pin user pages in memory without taking mm->mmap_lock.
2806 * If not successful, it will fall back to taking the lock and
2807 * calling get_user_pages().
2808 *
2809 * Returns number of pages pinned. This may be fewer than the number requested.
2810 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
2811 * -errno.
2812 */
2813int get_user_pages_fast(unsigned long start, int nr_pages,
2814                        unsigned int gup_flags, struct page **pages)
2815{
2816        if (!is_valid_gup_flags(gup_flags))
2817                return -EINVAL;
2818
2819        /*
2820         * The caller may or may not have explicitly set FOLL_GET; either way is
2821         * OK. However, internally (within mm/gup.c), gup fast variants must set
2822         * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
2823         * request.
2824         */
2825        gup_flags |= FOLL_GET;
2826        return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2827}
2828EXPORT_SYMBOL_GPL(get_user_pages_fast);
2829
2830/**
2831 * pin_user_pages_fast() - pin user pages in memory without taking locks
2832 *
2833 * @start:      starting user address
2834 * @nr_pages:   number of pages from start to pin
2835 * @gup_flags:  flags modifying pin behaviour
2836 * @pages:      array that receives pointers to the pages pinned.
2837 *              Should be at least nr_pages long.
2838 *
2839 * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
2840 * get_user_pages_fast() for documentation on the function arguments, because
2841 * the arguments here are identical.
2842 *
2843 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2844 * see Documentation/core-api/pin_user_pages.rst for further details.
2845 */
2846int pin_user_pages_fast(unsigned long start, int nr_pages,
2847                        unsigned int gup_flags, struct page **pages)
2848{
2849        /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2850        if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2851                return -EINVAL;
2852
2853        gup_flags |= FOLL_PIN;
2854        return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2855}
2856EXPORT_SYMBOL_GPL(pin_user_pages_fast);
2857
2858/*
2859 * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
2860 * is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
2861 *
2862 * The API rules are the same, too: no negative values may be returned.
2863 */
2864int pin_user_pages_fast_only(unsigned long start, int nr_pages,
2865                             unsigned int gup_flags, struct page **pages)
2866{
2867        int nr_pinned;
2868
2869        /*
2870         * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API
2871         * rules require returning 0, rather than -errno:
2872         */
2873        if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2874                return 0;
2875        /*
2876         * FOLL_FAST_ONLY is required in order to match the API description of
2877         * this routine: no fall back to regular ("slow") GUP.
2878         */
2879        gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY);
2880        nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2881                                                 pages);
2882        /*
2883         * This routine is not allowed to return negative values. However,
2884         * internal_get_user_pages_fast() *can* return -errno. Therefore,
2885         * correct for that here:
2886         */
2887        if (nr_pinned < 0)
2888                nr_pinned = 0;
2889
2890        return nr_pinned;
2891}
2892EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
2893
2894/**
2895 * pin_user_pages_remote() - pin pages of a remote process
2896 *
2897 * @mm:         mm_struct of target mm
2898 * @start:      starting user address
2899 * @nr_pages:   number of pages from start to pin
2900 * @gup_flags:  flags modifying lookup behaviour
2901 * @pages:      array that receives pointers to the pages pinned.
2902 *              Should be at least nr_pages long. Or NULL, if caller
2903 *              only intends to ensure the pages are faulted in.
2904 * @vmas:       array of pointers to vmas corresponding to each page.
2905 *              Or NULL if the caller does not require them.
2906 * @locked:     pointer to lock flag indicating whether lock is held and
2907 *              subsequently whether VM_FAULT_RETRY functionality can be
2908 *              utilised. Lock must initially be held.
2909 *
2910 * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
2911 * get_user_pages_remote() for documentation on the function arguments, because
2912 * the arguments here are identical.
2913 *
2914 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2915 * see Documentation/core-api/pin_user_pages.rst for details.
2916 */
2917long pin_user_pages_remote(struct mm_struct *mm,
2918                           unsigned long start, unsigned long nr_pages,
2919                           unsigned int gup_flags, struct page **pages,
2920                           struct vm_area_struct **vmas, int *locked)
2921{
2922        /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2923        if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2924                return -EINVAL;
2925
2926        gup_flags |= FOLL_PIN;
2927        return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
2928                                       pages, vmas, locked);
2929}
2930EXPORT_SYMBOL(pin_user_pages_remote);
2931
2932/**
2933 * pin_user_pages() - pin user pages in memory for use by other devices
2934 *
2935 * @start:      starting user address
2936 * @nr_pages:   number of pages from start to pin
2937 * @gup_flags:  flags modifying lookup behaviour
2938 * @pages:      array that receives pointers to the pages pinned.
2939 *              Should be at least nr_pages long. Or NULL, if caller
2940 *              only intends to ensure the pages are faulted in.
2941 * @vmas:       array of pointers to vmas corresponding to each page.
2942 *              Or NULL if the caller does not require them.
2943 *
2944 * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
2945 * FOLL_PIN is set.
2946 *
2947 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2948 * see Documentation/core-api/pin_user_pages.rst for details.
2949 */
2950long pin_user_pages(unsigned long start, unsigned long nr_pages,
2951                    unsigned int gup_flags, struct page **pages,
2952                    struct vm_area_struct **vmas)
2953{
2954        /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2955        if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2956                return -EINVAL;
2957
2958        gup_flags |= FOLL_PIN;
2959        return __gup_longterm_locked(current->mm, start, nr_pages,
2960                                     pages, vmas, gup_flags);
2961}
2962EXPORT_SYMBOL(pin_user_pages);
2963
2964/*
2965 * pin_user_pages_unlocked() is the FOLL_PIN variant of
2966 * get_user_pages_unlocked(). Behavior is the same, except that this one sets
2967 * FOLL_PIN and rejects FOLL_GET.
2968 */
2969long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2970                             struct page **pages, unsigned int gup_flags)
2971{
2972        /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2973        if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2974                return -EINVAL;
2975
2976        gup_flags |= FOLL_PIN;
2977        return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
2978}
2979EXPORT_SYMBOL(pin_user_pages_unlocked);
2980
2981/*
2982 * pin_user_pages_locked() is the FOLL_PIN variant of get_user_pages_locked().
2983 * Behavior is the same, except that this one sets FOLL_PIN and rejects
2984 * FOLL_GET.
2985 */
2986long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
2987                           unsigned int gup_flags, struct page **pages,
2988                           int *locked)
2989{
2990        /*
2991         * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2992         * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2993         * vmas.  As there are no users of this flag in this call we simply
2994         * disallow this option for now.
2995         */
2996        if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2997                return -EINVAL;
2998
2999        /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3000        if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3001                return -EINVAL;
3002
3003        gup_flags |= FOLL_PIN;
3004        return __get_user_pages_locked(current->mm, start, nr_pages,
3005                                       pages, NULL, locked,
3006                                       gup_flags | FOLL_TOUCH);
3007}
3008EXPORT_SYMBOL(pin_user_pages_locked);
3009