linux/mm/gup.c
<<
>>
Prefs
   1#include <linux/kernel.h>
   2#include <linux/errno.h>
   3#include <linux/err.h>
   4#include <linux/spinlock.h>
   5
   6#include <linux/mm.h>
   7#include <linux/pagemap.h>
   8#include <linux/rmap.h>
   9#include <linux/swap.h>
  10#include <linux/swapops.h>
  11
  12#include <linux/sched.h>
  13#include <linux/rwsem.h>
  14#include <linux/hugetlb.h>
  15#include <asm/pgtable.h>
  16
  17#include "internal.h"
  18
  19static struct page *no_page_table(struct vm_area_struct *vma,
  20                unsigned int flags)
  21{
  22        /*
  23         * When core dumping an enormous anonymous area that nobody
  24         * has touched so far, we don't want to allocate unnecessary pages or
  25         * page tables.  Return error instead of NULL to skip handle_mm_fault,
  26         * then get_dump_page() will return NULL to leave a hole in the dump.
  27         * But we can only make this optimization where a hole would surely
  28         * be zero-filled if handle_mm_fault() actually did handle it.
  29         */
  30        if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
  31                return ERR_PTR(-EFAULT);
  32        return NULL;
  33}
  34
  35static struct page *follow_page_pte(struct vm_area_struct *vma,
  36                unsigned long address, pmd_t *pmd, unsigned int flags)
  37{
  38        struct mm_struct *mm = vma->vm_mm;
  39        struct page *page;
  40        spinlock_t *ptl;
  41        pte_t *ptep, pte;
  42
  43retry:
  44        if (unlikely(pmd_bad(*pmd)))
  45                return no_page_table(vma, flags);
  46
  47        ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
  48        pte = *ptep;
  49        if (!pte_present(pte)) {
  50                swp_entry_t entry;
  51                /*
  52                 * KSM's break_ksm() relies upon recognizing a ksm page
  53                 * even while it is being migrated, so for that case we
  54                 * need migration_entry_wait().
  55                 */
  56                if (likely(!(flags & FOLL_MIGRATION)))
  57                        goto no_page;
  58                if (pte_none(pte))
  59                        goto no_page;
  60                entry = pte_to_swp_entry(pte);
  61                if (!is_migration_entry(entry))
  62                        goto no_page;
  63                pte_unmap_unlock(ptep, ptl);
  64                migration_entry_wait(mm, pmd, address);
  65                goto retry;
  66        }
  67        if ((flags & FOLL_NUMA) && pte_protnone(pte))
  68                goto no_page;
  69        if ((flags & FOLL_WRITE) && !pte_write(pte)) {
  70                pte_unmap_unlock(ptep, ptl);
  71                return NULL;
  72        }
  73
  74        page = vm_normal_page(vma, address, pte);
  75        if (unlikely(!page)) {
  76                if ((flags & FOLL_DUMP) ||
  77                    !is_zero_pfn(pte_pfn(pte)))
  78                        goto bad_page;
  79                page = pte_page(pte);
  80        }
  81
  82        if (flags & FOLL_GET)
  83                get_page_foll(page);
  84        if (flags & FOLL_TOUCH) {
  85                if ((flags & FOLL_WRITE) &&
  86                    !pte_dirty(pte) && !PageDirty(page))
  87                        set_page_dirty(page);
  88                /*
  89                 * pte_mkyoung() would be more correct here, but atomic care
  90                 * is needed to avoid losing the dirty bit: it is easier to use
  91                 * mark_page_accessed().
  92                 */
  93                mark_page_accessed(page);
  94        }
  95        if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
  96                /*
  97                 * The preliminary mapping check is mainly to avoid the
  98                 * pointless overhead of lock_page on the ZERO_PAGE
  99                 * which might bounce very badly if there is contention.
 100                 *
 101                 * If the page is already locked, we don't need to
 102                 * handle it now - vmscan will handle it later if and
 103                 * when it attempts to reclaim the page.
 104                 */
 105                if (page->mapping && trylock_page(page)) {
 106                        lru_add_drain();  /* push cached pages to LRU */
 107                        /*
 108                         * Because we lock page here, and migration is
 109                         * blocked by the pte's page reference, and we
 110                         * know the page is still mapped, we don't even
 111                         * need to check for file-cache page truncation.
 112                         */
 113                        mlock_vma_page(page);
 114                        unlock_page(page);
 115                }
 116        }
 117        pte_unmap_unlock(ptep, ptl);
 118        return page;
 119bad_page:
 120        pte_unmap_unlock(ptep, ptl);
 121        return ERR_PTR(-EFAULT);
 122
 123no_page:
 124        pte_unmap_unlock(ptep, ptl);
 125        if (!pte_none(pte))
 126                return NULL;
 127        return no_page_table(vma, flags);
 128}
 129
 130/**
 131 * follow_page_mask - look up a page descriptor from a user-virtual address
 132 * @vma: vm_area_struct mapping @address
 133 * @address: virtual address to look up
 134 * @flags: flags modifying lookup behaviour
 135 * @page_mask: on output, *page_mask is set according to the size of the page
 136 *
 137 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
 138 *
 139 * Returns the mapped (struct page *), %NULL if no mapping exists, or
 140 * an error pointer if there is a mapping to something not represented
 141 * by a page descriptor (see also vm_normal_page()).
 142 */
 143struct page *follow_page_mask(struct vm_area_struct *vma,
 144                              unsigned long address, unsigned int flags,
 145                              unsigned int *page_mask)
 146{
 147        pgd_t *pgd;
 148        pud_t *pud;
 149        pmd_t *pmd;
 150        spinlock_t *ptl;
 151        struct page *page;
 152        struct mm_struct *mm = vma->vm_mm;
 153
 154        *page_mask = 0;
 155
 156        page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
 157        if (!IS_ERR(page)) {
 158                BUG_ON(flags & FOLL_GET);
 159                return page;
 160        }
 161
 162        pgd = pgd_offset(mm, address);
 163        if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
 164                return no_page_table(vma, flags);
 165
 166        pud = pud_offset(pgd, address);
 167        if (pud_none(*pud))
 168                return no_page_table(vma, flags);
 169        if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
 170                page = follow_huge_pud(mm, address, pud, flags);
 171                if (page)
 172                        return page;
 173                return no_page_table(vma, flags);
 174        }
 175        if (unlikely(pud_bad(*pud)))
 176                return no_page_table(vma, flags);
 177
 178        pmd = pmd_offset(pud, address);
 179        if (pmd_none(*pmd))
 180                return no_page_table(vma, flags);
 181        if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
 182                page = follow_huge_pmd(mm, address, pmd, flags);
 183                if (page)
 184                        return page;
 185                return no_page_table(vma, flags);
 186        }
 187        if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
 188                return no_page_table(vma, flags);
 189        if (pmd_trans_huge(*pmd)) {
 190                if (flags & FOLL_SPLIT) {
 191                        split_huge_page_pmd(vma, address, pmd);
 192                        return follow_page_pte(vma, address, pmd, flags);
 193                }
 194                ptl = pmd_lock(mm, pmd);
 195                if (likely(pmd_trans_huge(*pmd))) {
 196                        if (unlikely(pmd_trans_splitting(*pmd))) {
 197                                spin_unlock(ptl);
 198                                wait_split_huge_page(vma->anon_vma, pmd);
 199                        } else {
 200                                page = follow_trans_huge_pmd(vma, address,
 201                                                             pmd, flags);
 202                                spin_unlock(ptl);
 203                                *page_mask = HPAGE_PMD_NR - 1;
 204                                return page;
 205                        }
 206                } else
 207                        spin_unlock(ptl);
 208        }
 209        return follow_page_pte(vma, address, pmd, flags);
 210}
 211
 212static int get_gate_page(struct mm_struct *mm, unsigned long address,
 213                unsigned int gup_flags, struct vm_area_struct **vma,
 214                struct page **page)
 215{
 216        pgd_t *pgd;
 217        pud_t *pud;
 218        pmd_t *pmd;
 219        pte_t *pte;
 220        int ret = -EFAULT;
 221
 222        /* user gate pages are read-only */
 223        if (gup_flags & FOLL_WRITE)
 224                return -EFAULT;
 225        if (address > TASK_SIZE)
 226                pgd = pgd_offset_k(address);
 227        else
 228                pgd = pgd_offset_gate(mm, address);
 229        BUG_ON(pgd_none(*pgd));
 230        pud = pud_offset(pgd, address);
 231        BUG_ON(pud_none(*pud));
 232        pmd = pmd_offset(pud, address);
 233        if (pmd_none(*pmd))
 234                return -EFAULT;
 235        VM_BUG_ON(pmd_trans_huge(*pmd));
 236        pte = pte_offset_map(pmd, address);
 237        if (pte_none(*pte))
 238                goto unmap;
 239        *vma = get_gate_vma(mm);
 240        if (!page)
 241                goto out;
 242        *page = vm_normal_page(*vma, address, *pte);
 243        if (!*page) {
 244                if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
 245                        goto unmap;
 246                *page = pte_page(*pte);
 247        }
 248        get_page(*page);
 249out:
 250        ret = 0;
 251unmap:
 252        pte_unmap(pte);
 253        return ret;
 254}
 255
 256/*
 257 * mmap_sem must be held on entry.  If @nonblocking != NULL and
 258 * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
 259 * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
 260 */
 261static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
 262                unsigned long address, unsigned int *flags, int *nonblocking)
 263{
 264        struct mm_struct *mm = vma->vm_mm;
 265        unsigned int fault_flags = 0;
 266        int ret;
 267
 268        /* For mlock, just skip the stack guard page. */
 269        if ((*flags & FOLL_MLOCK) &&
 270                        (stack_guard_page_start(vma, address) ||
 271                         stack_guard_page_end(vma, address + PAGE_SIZE)))
 272                return -ENOENT;
 273        if (*flags & FOLL_WRITE)
 274                fault_flags |= FAULT_FLAG_WRITE;
 275        if (nonblocking)
 276                fault_flags |= FAULT_FLAG_ALLOW_RETRY;
 277        if (*flags & FOLL_NOWAIT)
 278                fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
 279        if (*flags & FOLL_TRIED) {
 280                VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
 281                fault_flags |= FAULT_FLAG_TRIED;
 282        }
 283
 284        ret = handle_mm_fault(mm, vma, address, fault_flags);
 285        if (ret & VM_FAULT_ERROR) {
 286                if (ret & VM_FAULT_OOM)
 287                        return -ENOMEM;
 288                if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
 289                        return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
 290                if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
 291                        return -EFAULT;
 292                BUG();
 293        }
 294
 295        if (tsk) {
 296                if (ret & VM_FAULT_MAJOR)
 297                        tsk->maj_flt++;
 298                else
 299                        tsk->min_flt++;
 300        }
 301
 302        if (ret & VM_FAULT_RETRY) {
 303                if (nonblocking)
 304                        *nonblocking = 0;
 305                return -EBUSY;
 306        }
 307
 308        /*
 309         * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
 310         * necessary, even if maybe_mkwrite decided not to set pte_write. We
 311         * can thus safely do subsequent page lookups as if they were reads.
 312         * But only do so when looping for pte_write is futile: in some cases
 313         * userspace may also be wanting to write to the gotten user page,
 314         * which a read fault here might prevent (a readonly page might get
 315         * reCOWed by userspace write).
 316         */
 317        if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
 318                *flags &= ~FOLL_WRITE;
 319        return 0;
 320}
 321
 322static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
 323{
 324        vm_flags_t vm_flags = vma->vm_flags;
 325
 326        if (vm_flags & (VM_IO | VM_PFNMAP))
 327                return -EFAULT;
 328
 329        if (gup_flags & FOLL_WRITE) {
 330                if (!(vm_flags & VM_WRITE)) {
 331                        if (!(gup_flags & FOLL_FORCE))
 332                                return -EFAULT;
 333                        /*
 334                         * We used to let the write,force case do COW in a
 335                         * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
 336                         * set a breakpoint in a read-only mapping of an
 337                         * executable, without corrupting the file (yet only
 338                         * when that file had been opened for writing!).
 339                         * Anon pages in shared mappings are surprising: now
 340                         * just reject it.
 341                         */
 342                        if (!is_cow_mapping(vm_flags)) {
 343                                WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
 344                                return -EFAULT;
 345                        }
 346                }
 347        } else if (!(vm_flags & VM_READ)) {
 348                if (!(gup_flags & FOLL_FORCE))
 349                        return -EFAULT;
 350                /*
 351                 * Is there actually any vma we can reach here which does not
 352                 * have VM_MAYREAD set?
 353                 */
 354                if (!(vm_flags & VM_MAYREAD))
 355                        return -EFAULT;
 356        }
 357        return 0;
 358}
 359
 360/**
 361 * __get_user_pages() - pin user pages in memory
 362 * @tsk:        task_struct of target task
 363 * @mm:         mm_struct of target mm
 364 * @start:      starting user address
 365 * @nr_pages:   number of pages from start to pin
 366 * @gup_flags:  flags modifying pin behaviour
 367 * @pages:      array that receives pointers to the pages pinned.
 368 *              Should be at least nr_pages long. Or NULL, if caller
 369 *              only intends to ensure the pages are faulted in.
 370 * @vmas:       array of pointers to vmas corresponding to each page.
 371 *              Or NULL if the caller does not require them.
 372 * @nonblocking: whether waiting for disk IO or mmap_sem contention
 373 *
 374 * Returns number of pages pinned. This may be fewer than the number
 375 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 376 * were pinned, returns -errno. Each page returned must be released
 377 * with a put_page() call when it is finished with. vmas will only
 378 * remain valid while mmap_sem is held.
 379 *
 380 * Must be called with mmap_sem held.  It may be released.  See below.
 381 *
 382 * __get_user_pages walks a process's page tables and takes a reference to
 383 * each struct page that each user address corresponds to at a given
 384 * instant. That is, it takes the page that would be accessed if a user
 385 * thread accesses the given user virtual address at that instant.
 386 *
 387 * This does not guarantee that the page exists in the user mappings when
 388 * __get_user_pages returns, and there may even be a completely different
 389 * page there in some cases (eg. if mmapped pagecache has been invalidated
 390 * and subsequently re faulted). However it does guarantee that the page
 391 * won't be freed completely. And mostly callers simply care that the page
 392 * contains data that was valid *at some point in time*. Typically, an IO
 393 * or similar operation cannot guarantee anything stronger anyway because
 394 * locks can't be held over the syscall boundary.
 395 *
 396 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
 397 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
 398 * appropriate) must be called after the page is finished with, and
 399 * before put_page is called.
 400 *
 401 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
 402 * or mmap_sem contention, and if waiting is needed to pin all pages,
 403 * *@nonblocking will be set to 0.  Further, if @gup_flags does not
 404 * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
 405 * this case.
 406 *
 407 * A caller using such a combination of @nonblocking and @gup_flags
 408 * must therefore hold the mmap_sem for reading only, and recognize
 409 * when it's been released.  Otherwise, it must be held for either
 410 * reading or writing and will not be released.
 411 *
 412 * In most cases, get_user_pages or get_user_pages_fast should be used
 413 * instead of __get_user_pages. __get_user_pages should be used only if
 414 * you need some special @gup_flags.
 415 */
 416long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 417                unsigned long start, unsigned long nr_pages,
 418                unsigned int gup_flags, struct page **pages,
 419                struct vm_area_struct **vmas, int *nonblocking)
 420{
 421        long i = 0;
 422        unsigned int page_mask;
 423        struct vm_area_struct *vma = NULL;
 424
 425        if (!nr_pages)
 426                return 0;
 427
 428        VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
 429
 430        /*
 431         * If FOLL_FORCE is set then do not force a full fault as the hinting
 432         * fault information is unrelated to the reference behaviour of a task
 433         * using the address space
 434         */
 435        if (!(gup_flags & FOLL_FORCE))
 436                gup_flags |= FOLL_NUMA;
 437
 438        do {
 439                struct page *page;
 440                unsigned int foll_flags = gup_flags;
 441                unsigned int page_increm;
 442
 443                /* first iteration or cross vma bound */
 444                if (!vma || start >= vma->vm_end) {
 445                        vma = find_extend_vma(mm, start);
 446                        if (!vma && in_gate_area(mm, start)) {
 447                                int ret;
 448                                ret = get_gate_page(mm, start & PAGE_MASK,
 449                                                gup_flags, &vma,
 450                                                pages ? &pages[i] : NULL);
 451                                if (ret)
 452                                        return i ? : ret;
 453                                page_mask = 0;
 454                                goto next_page;
 455                        }
 456
 457                        if (!vma || check_vma_flags(vma, gup_flags))
 458                                return i ? : -EFAULT;
 459                        if (is_vm_hugetlb_page(vma)) {
 460                                i = follow_hugetlb_page(mm, vma, pages, vmas,
 461                                                &start, &nr_pages, i,
 462                                                gup_flags);
 463                                continue;
 464                        }
 465                }
 466retry:
 467                /*
 468                 * If we have a pending SIGKILL, don't keep faulting pages and
 469                 * potentially allocating memory.
 470                 */
 471                if (unlikely(fatal_signal_pending(current)))
 472                        return i ? i : -ERESTARTSYS;
 473                cond_resched();
 474                page = follow_page_mask(vma, start, foll_flags, &page_mask);
 475                if (!page) {
 476                        int ret;
 477                        ret = faultin_page(tsk, vma, start, &foll_flags,
 478                                        nonblocking);
 479                        switch (ret) {
 480                        case 0:
 481                                goto retry;
 482                        case -EFAULT:
 483                        case -ENOMEM:
 484                        case -EHWPOISON:
 485                                return i ? i : ret;
 486                        case -EBUSY:
 487                                return i;
 488                        case -ENOENT:
 489                                goto next_page;
 490                        }
 491                        BUG();
 492                }
 493                if (IS_ERR(page))
 494                        return i ? i : PTR_ERR(page);
 495                if (pages) {
 496                        pages[i] = page;
 497                        flush_anon_page(vma, page, start);
 498                        flush_dcache_page(page);
 499                        page_mask = 0;
 500                }
 501next_page:
 502                if (vmas) {
 503                        vmas[i] = vma;
 504                        page_mask = 0;
 505                }
 506                page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
 507                if (page_increm > nr_pages)
 508                        page_increm = nr_pages;
 509                i += page_increm;
 510                start += page_increm * PAGE_SIZE;
 511                nr_pages -= page_increm;
 512        } while (nr_pages);
 513        return i;
 514}
 515EXPORT_SYMBOL(__get_user_pages);
 516
 517/*
 518 * fixup_user_fault() - manually resolve a user page fault
 519 * @tsk:        the task_struct to use for page fault accounting, or
 520 *              NULL if faults are not to be recorded.
 521 * @mm:         mm_struct of target mm
 522 * @address:    user address
 523 * @fault_flags:flags to pass down to handle_mm_fault()
 524 *
 525 * This is meant to be called in the specific scenario where for locking reasons
 526 * we try to access user memory in atomic context (within a pagefault_disable()
 527 * section), this returns -EFAULT, and we want to resolve the user fault before
 528 * trying again.
 529 *
 530 * Typically this is meant to be used by the futex code.
 531 *
 532 * The main difference with get_user_pages() is that this function will
 533 * unconditionally call handle_mm_fault() which will in turn perform all the
 534 * necessary SW fixup of the dirty and young bits in the PTE, while
 535 * handle_mm_fault() only guarantees to update these in the struct page.
 536 *
 537 * This is important for some architectures where those bits also gate the
 538 * access permission to the page because they are maintained in software.  On
 539 * such architectures, gup() will not be enough to make a subsequent access
 540 * succeed.
 541 *
 542 * This has the same semantics wrt the @mm->mmap_sem as does filemap_fault().
 543 */
 544int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
 545                     unsigned long address, unsigned int fault_flags)
 546{
 547        struct vm_area_struct *vma;
 548        vm_flags_t vm_flags;
 549        int ret;
 550
 551        vma = find_extend_vma(mm, address);
 552        if (!vma || address < vma->vm_start)
 553                return -EFAULT;
 554
 555        vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
 556        if (!(vm_flags & vma->vm_flags))
 557                return -EFAULT;
 558
 559        ret = handle_mm_fault(mm, vma, address, fault_flags);
 560        if (ret & VM_FAULT_ERROR) {
 561                if (ret & VM_FAULT_OOM)
 562                        return -ENOMEM;
 563                if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
 564                        return -EHWPOISON;
 565                if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
 566                        return -EFAULT;
 567                BUG();
 568        }
 569        if (tsk) {
 570                if (ret & VM_FAULT_MAJOR)
 571                        tsk->maj_flt++;
 572                else
 573                        tsk->min_flt++;
 574        }
 575        return 0;
 576}
 577
 578static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
 579                                                struct mm_struct *mm,
 580                                                unsigned long start,
 581                                                unsigned long nr_pages,
 582                                                int write, int force,
 583                                                struct page **pages,
 584                                                struct vm_area_struct **vmas,
 585                                                int *locked, bool notify_drop,
 586                                                unsigned int flags)
 587{
 588        long ret, pages_done;
 589        bool lock_dropped;
 590
 591        if (locked) {
 592                /* if VM_FAULT_RETRY can be returned, vmas become invalid */
 593                BUG_ON(vmas);
 594                /* check caller initialized locked */
 595                BUG_ON(*locked != 1);
 596        }
 597
 598        if (pages)
 599                flags |= FOLL_GET;
 600        if (write)
 601                flags |= FOLL_WRITE;
 602        if (force)
 603                flags |= FOLL_FORCE;
 604
 605        pages_done = 0;
 606        lock_dropped = false;
 607        for (;;) {
 608                ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
 609                                       vmas, locked);
 610                if (!locked)
 611                        /* VM_FAULT_RETRY couldn't trigger, bypass */
 612                        return ret;
 613
 614                /* VM_FAULT_RETRY cannot return errors */
 615                if (!*locked) {
 616                        BUG_ON(ret < 0);
 617                        BUG_ON(ret >= nr_pages);
 618                }
 619
 620                if (!pages)
 621                        /* If it's a prefault don't insist harder */
 622                        return ret;
 623
 624                if (ret > 0) {
 625                        nr_pages -= ret;
 626                        pages_done += ret;
 627                        if (!nr_pages)
 628                                break;
 629                }
 630                if (*locked) {
 631                        /* VM_FAULT_RETRY didn't trigger */
 632                        if (!pages_done)
 633                                pages_done = ret;
 634                        break;
 635                }
 636                /* VM_FAULT_RETRY triggered, so seek to the faulting offset */
 637                pages += ret;
 638                start += ret << PAGE_SHIFT;
 639
 640                /*
 641                 * Repeat on the address that fired VM_FAULT_RETRY
 642                 * without FAULT_FLAG_ALLOW_RETRY but with
 643                 * FAULT_FLAG_TRIED.
 644                 */
 645                *locked = 1;
 646                lock_dropped = true;
 647                down_read(&mm->mmap_sem);
 648                ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
 649                                       pages, NULL, NULL);
 650                if (ret != 1) {
 651                        BUG_ON(ret > 1);
 652                        if (!pages_done)
 653                                pages_done = ret;
 654                        break;
 655                }
 656                nr_pages--;
 657                pages_done++;
 658                if (!nr_pages)
 659                        break;
 660                pages++;
 661                start += PAGE_SIZE;
 662        }
 663        if (notify_drop && lock_dropped && *locked) {
 664                /*
 665                 * We must let the caller know we temporarily dropped the lock
 666                 * and so the critical section protected by it was lost.
 667                 */
 668                up_read(&mm->mmap_sem);
 669                *locked = 0;
 670        }
 671        return pages_done;
 672}
 673
 674/*
 675 * We can leverage the VM_FAULT_RETRY functionality in the page fault
 676 * paths better by using either get_user_pages_locked() or
 677 * get_user_pages_unlocked().
 678 *
 679 * get_user_pages_locked() is suitable to replace the form:
 680 *
 681 *      down_read(&mm->mmap_sem);
 682 *      do_something()
 683 *      get_user_pages(tsk, mm, ..., pages, NULL);
 684 *      up_read(&mm->mmap_sem);
 685 *
 686 *  to:
 687 *
 688 *      int locked = 1;
 689 *      down_read(&mm->mmap_sem);
 690 *      do_something()
 691 *      get_user_pages_locked(tsk, mm, ..., pages, &locked);
 692 *      if (locked)
 693 *          up_read(&mm->mmap_sem);
 694 */
 695long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
 696                           unsigned long start, unsigned long nr_pages,
 697                           int write, int force, struct page **pages,
 698                           int *locked)
 699{
 700        return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
 701                                       pages, NULL, locked, true, FOLL_TOUCH);
 702}
 703EXPORT_SYMBOL(get_user_pages_locked);
 704
 705/*
 706 * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
 707 * pass additional gup_flags as last parameter (like FOLL_HWPOISON).
 708 *
 709 * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the
 710 * caller if required (just like with __get_user_pages). "FOLL_GET",
 711 * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed
 712 * according to the parameters "pages", "write", "force"
 713 * respectively.
 714 */
 715__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
 716                                               unsigned long start, unsigned long nr_pages,
 717                                               int write, int force, struct page **pages,
 718                                               unsigned int gup_flags)
 719{
 720        long ret;
 721        int locked = 1;
 722        down_read(&mm->mmap_sem);
 723        ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
 724                                      pages, NULL, &locked, false, gup_flags);
 725        if (locked)
 726                up_read(&mm->mmap_sem);
 727        return ret;
 728}
 729EXPORT_SYMBOL(__get_user_pages_unlocked);
 730
 731/*
 732 * get_user_pages_unlocked() is suitable to replace the form:
 733 *
 734 *      down_read(&mm->mmap_sem);
 735 *      get_user_pages(tsk, mm, ..., pages, NULL);
 736 *      up_read(&mm->mmap_sem);
 737 *
 738 *  with:
 739 *
 740 *      get_user_pages_unlocked(tsk, mm, ..., pages);
 741 *
 742 * It is functionally equivalent to get_user_pages_fast so
 743 * get_user_pages_fast should be used instead, if the two parameters
 744 * "tsk" and "mm" are respectively equal to current and current->mm,
 745 * or if "force" shall be set to 1 (get_user_pages_fast misses the
 746 * "force" parameter).
 747 */
 748long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
 749                             unsigned long start, unsigned long nr_pages,
 750                             int write, int force, struct page **pages)
 751{
 752        return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
 753                                         force, pages, FOLL_TOUCH);
 754}
 755EXPORT_SYMBOL(get_user_pages_unlocked);
 756
 757/*
 758 * get_user_pages() - pin user pages in memory
 759 * @tsk:        the task_struct to use for page fault accounting, or
 760 *              NULL if faults are not to be recorded.
 761 * @mm:         mm_struct of target mm
 762 * @start:      starting user address
 763 * @nr_pages:   number of pages from start to pin
 764 * @write:      whether pages will be written to by the caller
 765 * @force:      whether to force access even when user mapping is currently
 766 *              protected (but never forces write access to shared mapping).
 767 * @pages:      array that receives pointers to the pages pinned.
 768 *              Should be at least nr_pages long. Or NULL, if caller
 769 *              only intends to ensure the pages are faulted in.
 770 * @vmas:       array of pointers to vmas corresponding to each page.
 771 *              Or NULL if the caller does not require them.
 772 *
 773 * Returns number of pages pinned. This may be fewer than the number
 774 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 775 * were pinned, returns -errno. Each page returned must be released
 776 * with a put_page() call when it is finished with. vmas will only
 777 * remain valid while mmap_sem is held.
 778 *
 779 * Must be called with mmap_sem held for read or write.
 780 *
 781 * get_user_pages walks a process's page tables and takes a reference to
 782 * each struct page that each user address corresponds to at a given
 783 * instant. That is, it takes the page that would be accessed if a user
 784 * thread accesses the given user virtual address at that instant.
 785 *
 786 * This does not guarantee that the page exists in the user mappings when
 787 * get_user_pages returns, and there may even be a completely different
 788 * page there in some cases (eg. if mmapped pagecache has been invalidated
 789 * and subsequently re faulted). However it does guarantee that the page
 790 * won't be freed completely. And mostly callers simply care that the page
 791 * contains data that was valid *at some point in time*. Typically, an IO
 792 * or similar operation cannot guarantee anything stronger anyway because
 793 * locks can't be held over the syscall boundary.
 794 *
 795 * If write=0, the page must not be written to. If the page is written to,
 796 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
 797 * after the page is finished with, and before put_page is called.
 798 *
 799 * get_user_pages is typically used for fewer-copy IO operations, to get a
 800 * handle on the memory by some means other than accesses via the user virtual
 801 * addresses. The pages may be submitted for DMA to devices or accessed via
 802 * their kernel linear mapping (via the kmap APIs). Care should be taken to
 803 * use the correct cache flushing APIs.
 804 *
 805 * See also get_user_pages_fast, for performance critical applications.
 806 *
 807 * get_user_pages should be phased out in favor of
 808 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
 809 * should use get_user_pages because it cannot pass
 810 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
 811 */
 812long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 813                unsigned long start, unsigned long nr_pages, int write,
 814                int force, struct page **pages, struct vm_area_struct **vmas)
 815{
 816        return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
 817                                       pages, vmas, NULL, false, FOLL_TOUCH);
 818}
 819EXPORT_SYMBOL(get_user_pages);
 820
 821/**
 822 * get_dump_page() - pin user page in memory while writing it to core dump
 823 * @addr: user address
 824 *
 825 * Returns struct page pointer of user page pinned for dump,
 826 * to be freed afterwards by page_cache_release() or put_page().
 827 *
 828 * Returns NULL on any kind of failure - a hole must then be inserted into
 829 * the corefile, to preserve alignment with its headers; and also returns
 830 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
 831 * allowing a hole to be left in the corefile to save diskspace.
 832 *
 833 * Called without mmap_sem, but after all other threads have been killed.
 834 */
 835#ifdef CONFIG_ELF_CORE
 836struct page *get_dump_page(unsigned long addr)
 837{
 838        struct vm_area_struct *vma;
 839        struct page *page;
 840
 841        if (__get_user_pages(current, current->mm, addr, 1,
 842                             FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
 843                             NULL) < 1)
 844                return NULL;
 845        flush_cache_page(vma, addr, page_to_pfn(page));
 846        return page;
 847}
 848#endif /* CONFIG_ELF_CORE */
 849
 850/*
 851 * Generic RCU Fast GUP
 852 *
 853 * get_user_pages_fast attempts to pin user pages by walking the page
 854 * tables directly and avoids taking locks. Thus the walker needs to be
 855 * protected from page table pages being freed from under it, and should
 856 * block any THP splits.
 857 *
 858 * One way to achieve this is to have the walker disable interrupts, and
 859 * rely on IPIs from the TLB flushing code blocking before the page table
 860 * pages are freed. This is unsuitable for architectures that do not need
 861 * to broadcast an IPI when invalidating TLBs.
 862 *
 863 * Another way to achieve this is to batch up page table containing pages
 864 * belonging to more than one mm_user, then rcu_sched a callback to free those
 865 * pages. Disabling interrupts will allow the fast_gup walker to both block
 866 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
 867 * (which is a relatively rare event). The code below adopts this strategy.
 868 *
 869 * Before activating this code, please be aware that the following assumptions
 870 * are currently made:
 871 *
 872 *  *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free
 873 *      pages containing page tables.
 874 *
 875 *  *) THP splits will broadcast an IPI, this can be achieved by overriding
 876 *      pmdp_splitting_flush.
 877 *
 878 *  *) ptes can be read atomically by the architecture.
 879 *
 880 *  *) access_ok is sufficient to validate userspace address ranges.
 881 *
 882 * The last two assumptions can be relaxed by the addition of helper functions.
 883 *
 884 * This code is based heavily on the PowerPC implementation by Nick Piggin.
 885 */
 886#ifdef CONFIG_HAVE_GENERIC_RCU_GUP
 887
 888#ifdef __HAVE_ARCH_PTE_SPECIAL
 889static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
 890                         int write, struct page **pages, int *nr)
 891{
 892        pte_t *ptep, *ptem;
 893        int ret = 0;
 894
 895        ptem = ptep = pte_offset_map(&pmd, addr);
 896        do {
 897                /*
 898                 * In the line below we are assuming that the pte can be read
 899                 * atomically. If this is not the case for your architecture,
 900                 * please wrap this in a helper function!
 901                 *
 902                 * for an example see gup_get_pte in arch/x86/mm/gup.c
 903                 */
 904                pte_t pte = ACCESS_ONCE(*ptep);
 905                struct page *page;
 906
 907                /*
 908                 * Similar to the PMD case below, NUMA hinting must take slow
 909                 * path using the pte_protnone check.
 910                 */
 911                if (!pte_present(pte) || pte_special(pte) ||
 912                        pte_protnone(pte) || (write && !pte_write(pte)))
 913                        goto pte_unmap;
 914
 915                VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
 916                page = pte_page(pte);
 917
 918                if (!page_cache_get_speculative(page))
 919                        goto pte_unmap;
 920
 921                if (unlikely(pte_val(pte) != pte_val(*ptep))) {
 922                        put_page(page);
 923                        goto pte_unmap;
 924                }
 925
 926                pages[*nr] = page;
 927                (*nr)++;
 928
 929        } while (ptep++, addr += PAGE_SIZE, addr != end);
 930
 931        ret = 1;
 932
 933pte_unmap:
 934        pte_unmap(ptem);
 935        return ret;
 936}
 937#else
 938
 939/*
 940 * If we can't determine whether or not a pte is special, then fail immediately
 941 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
 942 * to be special.
 943 *
 944 * For a futex to be placed on a THP tail page, get_futex_key requires a
 945 * __get_user_pages_fast implementation that can pin pages. Thus it's still
 946 * useful to have gup_huge_pmd even if we can't operate on ptes.
 947 */
 948static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
 949                         int write, struct page **pages, int *nr)
 950{
 951        return 0;
 952}
 953#endif /* __HAVE_ARCH_PTE_SPECIAL */
 954
 955static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
 956                unsigned long end, int write, struct page **pages, int *nr)
 957{
 958        struct page *head, *page, *tail;
 959        int refs;
 960
 961        if (write && !pmd_write(orig))
 962                return 0;
 963
 964        refs = 0;
 965        head = pmd_page(orig);
 966        page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
 967        tail = page;
 968        do {
 969                VM_BUG_ON_PAGE(compound_head(page) != head, page);
 970                pages[*nr] = page;
 971                (*nr)++;
 972                page++;
 973                refs++;
 974        } while (addr += PAGE_SIZE, addr != end);
 975
 976        if (!page_cache_add_speculative(head, refs)) {
 977                *nr -= refs;
 978                return 0;
 979        }
 980
 981        if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
 982                *nr -= refs;
 983                while (refs--)
 984                        put_page(head);
 985                return 0;
 986        }
 987
 988        /*
 989         * Any tail pages need their mapcount reference taken before we
 990         * return. (This allows the THP code to bump their ref count when
 991         * they are split into base pages).
 992         */
 993        while (refs--) {
 994                if (PageTail(tail))
 995                        get_huge_page_tail(tail);
 996                tail++;
 997        }
 998
 999        return 1;
1000}
1001
1002static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
1003                unsigned long end, int write, struct page **pages, int *nr)
1004{
1005        struct page *head, *page, *tail;
1006        int refs;
1007
1008        if (write && !pud_write(orig))
1009                return 0;
1010
1011        refs = 0;
1012        head = pud_page(orig);
1013        page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
1014        tail = page;
1015        do {
1016                VM_BUG_ON_PAGE(compound_head(page) != head, page);
1017                pages[*nr] = page;
1018                (*nr)++;
1019                page++;
1020                refs++;
1021        } while (addr += PAGE_SIZE, addr != end);
1022
1023        if (!page_cache_add_speculative(head, refs)) {
1024                *nr -= refs;
1025                return 0;
1026        }
1027
1028        if (unlikely(pud_val(orig) != pud_val(*pudp))) {
1029                *nr -= refs;
1030                while (refs--)
1031                        put_page(head);
1032                return 0;
1033        }
1034
1035        while (refs--) {
1036                if (PageTail(tail))
1037                        get_huge_page_tail(tail);
1038                tail++;
1039        }
1040
1041        return 1;
1042}
1043
1044static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
1045                        unsigned long end, int write,
1046                        struct page **pages, int *nr)
1047{
1048        int refs;
1049        struct page *head, *page, *tail;
1050
1051        if (write && !pgd_write(orig))
1052                return 0;
1053
1054        refs = 0;
1055        head = pgd_page(orig);
1056        page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
1057        tail = page;
1058        do {
1059                VM_BUG_ON_PAGE(compound_head(page) != head, page);
1060                pages[*nr] = page;
1061                (*nr)++;
1062                page++;
1063                refs++;
1064        } while (addr += PAGE_SIZE, addr != end);
1065
1066        if (!page_cache_add_speculative(head, refs)) {
1067                *nr -= refs;
1068                return 0;
1069        }
1070
1071        if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
1072                *nr -= refs;
1073                while (refs--)
1074                        put_page(head);
1075                return 0;
1076        }
1077
1078        while (refs--) {
1079                if (PageTail(tail))
1080                        get_huge_page_tail(tail);
1081                tail++;
1082        }
1083
1084        return 1;
1085}
1086
1087static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
1088                int write, struct page **pages, int *nr)
1089{
1090        unsigned long next;
1091        pmd_t *pmdp;
1092
1093        pmdp = pmd_offset(&pud, addr);
1094        do {
1095                pmd_t pmd = READ_ONCE(*pmdp);
1096
1097                next = pmd_addr_end(addr, end);
1098                if (pmd_none(pmd) || pmd_trans_splitting(pmd))
1099                        return 0;
1100
1101                if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
1102                        /*
1103                         * NUMA hinting faults need to be handled in the GUP
1104                         * slowpath for accounting purposes and so that they
1105                         * can be serialised against THP migration.
1106                         */
1107                        if (pmd_protnone(pmd))
1108                                return 0;
1109
1110                        if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
1111                                pages, nr))
1112                                return 0;
1113
1114                } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
1115                        /*
1116                         * architecture have different format for hugetlbfs
1117                         * pmd format and THP pmd format
1118                         */
1119                        if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
1120                                         PMD_SHIFT, next, write, pages, nr))
1121                                return 0;
1122                } else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
1123                                return 0;
1124        } while (pmdp++, addr = next, addr != end);
1125
1126        return 1;
1127}
1128
1129static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
1130                         int write, struct page **pages, int *nr)
1131{
1132        unsigned long next;
1133        pud_t *pudp;
1134
1135        pudp = pud_offset(&pgd, addr);
1136        do {
1137                pud_t pud = READ_ONCE(*pudp);
1138
1139                next = pud_addr_end(addr, end);
1140                if (pud_none(pud))
1141                        return 0;
1142                if (unlikely(pud_huge(pud))) {
1143                        if (!gup_huge_pud(pud, pudp, addr, next, write,
1144                                          pages, nr))
1145                                return 0;
1146                } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
1147                        if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
1148                                         PUD_SHIFT, next, write, pages, nr))
1149                                return 0;
1150                } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
1151                        return 0;
1152        } while (pudp++, addr = next, addr != end);
1153
1154        return 1;
1155}
1156
1157/*
1158 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
1159 * the regular GUP. It will only return non-negative values.
1160 */
1161int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1162                          struct page **pages)
1163{
1164        struct mm_struct *mm = current->mm;
1165        unsigned long addr, len, end;
1166        unsigned long next, flags;
1167        pgd_t *pgdp;
1168        int nr = 0;
1169
1170        start &= PAGE_MASK;
1171        addr = start;
1172        len = (unsigned long) nr_pages << PAGE_SHIFT;
1173        end = start + len;
1174
1175        if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
1176                                        start, len)))
1177                return 0;
1178
1179        /*
1180         * Disable interrupts.  We use the nested form as we can already have
1181         * interrupts disabled by get_futex_key.
1182         *
1183         * With interrupts disabled, we block page table pages from being
1184         * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h
1185         * for more details.
1186         *
1187         * We do not adopt an rcu_read_lock(.) here as we also want to
1188         * block IPIs that come from THPs splitting.
1189         */
1190
1191        local_irq_save(flags);
1192        pgdp = pgd_offset(mm, addr);
1193        do {
1194                pgd_t pgd = ACCESS_ONCE(*pgdp);
1195
1196                next = pgd_addr_end(addr, end);
1197                if (pgd_none(pgd))
1198                        break;
1199                if (unlikely(pgd_huge(pgd))) {
1200                        if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
1201                                          pages, &nr))
1202                                break;
1203                } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
1204                        if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
1205                                         PGDIR_SHIFT, next, write, pages, &nr))
1206                                break;
1207                } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
1208                        break;
1209        } while (pgdp++, addr = next, addr != end);
1210        local_irq_restore(flags);
1211
1212        return nr;
1213}
1214
1215/**
1216 * get_user_pages_fast() - pin user pages in memory
1217 * @start:      starting user address
1218 * @nr_pages:   number of pages from start to pin
1219 * @write:      whether pages will be written to
1220 * @pages:      array that receives pointers to the pages pinned.
1221 *              Should be at least nr_pages long.
1222 *
1223 * Attempt to pin user pages in memory without taking mm->mmap_sem.
1224 * If not successful, it will fall back to taking the lock and
1225 * calling get_user_pages().
1226 *
1227 * Returns number of pages pinned. This may be fewer than the number
1228 * requested. If nr_pages is 0 or negative, returns 0. If no pages
1229 * were pinned, returns -errno.
1230 */
1231int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1232                        struct page **pages)
1233{
1234        struct mm_struct *mm = current->mm;
1235        int nr, ret;
1236
1237        start &= PAGE_MASK;
1238        nr = __get_user_pages_fast(start, nr_pages, write, pages);
1239        ret = nr;
1240
1241        if (nr < nr_pages) {
1242                /* Try to get the remaining pages with get_user_pages */
1243                start += nr << PAGE_SHIFT;
1244                pages += nr;
1245
1246                ret = get_user_pages_unlocked(current, mm, start,
1247                                              nr_pages - nr, write, 0, pages);
1248
1249                /* Have to be a bit careful with return values */
1250                if (nr > 0) {
1251                        if (ret < 0)
1252                                ret = nr;
1253                        else
1254                                ret += nr;
1255                }
1256        }
1257
1258        return ret;
1259}
1260
1261#endif /* CONFIG_HAVE_GENERIC_RCU_GUP */
1262