linux/fs/userfaultfd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  fs/userfaultfd.c
   4 *
   5 *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
   6 *  Copyright (C) 2008-2009 Red Hat, Inc.
   7 *  Copyright (C) 2015  Red Hat, Inc.
   8 *
   9 *  Some part derived from fs/eventfd.c (anon inode setup) and
  10 *  mm/ksm.c (mm hashing).
  11 */
  12
  13#include <linux/list.h>
  14#include <linux/hashtable.h>
  15#include <linux/sched/signal.h>
  16#include <linux/sched/mm.h>
  17#include <linux/mm.h>
  18#include <linux/mmu_notifier.h>
  19#include <linux/poll.h>
  20#include <linux/slab.h>
  21#include <linux/seq_file.h>
  22#include <linux/file.h>
  23#include <linux/bug.h>
  24#include <linux/anon_inodes.h>
  25#include <linux/syscalls.h>
  26#include <linux/userfaultfd_k.h>
  27#include <linux/mempolicy.h>
  28#include <linux/ioctl.h>
  29#include <linux/security.h>
  30#include <linux/hugetlb.h>
  31
  32int sysctl_unprivileged_userfaultfd __read_mostly;
  33
  34static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
  35
  36enum userfaultfd_state {
  37        UFFD_STATE_WAIT_API,
  38        UFFD_STATE_RUNNING,
  39};
  40
  41/*
  42 * Start with fault_pending_wqh and fault_wqh so they're more likely
  43 * to be in the same cacheline.
  44 *
  45 * Locking order:
  46 *      fd_wqh.lock
  47 *              fault_pending_wqh.lock
  48 *                      fault_wqh.lock
  49 *              event_wqh.lock
  50 *
  51 * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
  52 * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
  53 * also taken in IRQ context.
  54 */
  55struct userfaultfd_ctx {
  56        /* waitqueue head for the pending (i.e. not read) userfaults */
  57        wait_queue_head_t fault_pending_wqh;
  58        /* waitqueue head for the userfaults */
  59        wait_queue_head_t fault_wqh;
  60        /* waitqueue head for the pseudo fd to wakeup poll/read */
  61        wait_queue_head_t fd_wqh;
  62        /* waitqueue head for events */
  63        wait_queue_head_t event_wqh;
  64        /* a refile sequence protected by fault_pending_wqh lock */
  65        seqcount_spinlock_t refile_seq;
  66        /* pseudo fd refcounting */
  67        refcount_t refcount;
  68        /* userfaultfd syscall flags */
  69        unsigned int flags;
  70        /* features requested from the userspace */
  71        unsigned int features;
  72        /* state machine */
  73        enum userfaultfd_state state;
  74        /* released */
  75        bool released;
  76        /* memory mappings are changing because of non-cooperative event */
  77        bool mmap_changing;
  78        /* mm with one ore more vmas attached to this userfaultfd_ctx */
  79        struct mm_struct *mm;
  80};
  81
  82struct userfaultfd_fork_ctx {
  83        struct userfaultfd_ctx *orig;
  84        struct userfaultfd_ctx *new;
  85        struct list_head list;
  86};
  87
  88struct userfaultfd_unmap_ctx {
  89        struct userfaultfd_ctx *ctx;
  90        unsigned long start;
  91        unsigned long end;
  92        struct list_head list;
  93};
  94
  95struct userfaultfd_wait_queue {
  96        struct uffd_msg msg;
  97        wait_queue_entry_t wq;
  98        struct userfaultfd_ctx *ctx;
  99        bool waken;
 100};
 101
 102struct userfaultfd_wake_range {
 103        unsigned long start;
 104        unsigned long len;
 105};
 106
 107static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
 108                                     int wake_flags, void *key)
 109{
 110        struct userfaultfd_wake_range *range = key;
 111        int ret;
 112        struct userfaultfd_wait_queue *uwq;
 113        unsigned long start, len;
 114
 115        uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
 116        ret = 0;
 117        /* len == 0 means wake all */
 118        start = range->start;
 119        len = range->len;
 120        if (len && (start > uwq->msg.arg.pagefault.address ||
 121                    start + len <= uwq->msg.arg.pagefault.address))
 122                goto out;
 123        WRITE_ONCE(uwq->waken, true);
 124        /*
 125         * The Program-Order guarantees provided by the scheduler
 126         * ensure uwq->waken is visible before the task is woken.
 127         */
 128        ret = wake_up_state(wq->private, mode);
 129        if (ret) {
 130                /*
 131                 * Wake only once, autoremove behavior.
 132                 *
 133                 * After the effect of list_del_init is visible to the other
 134                 * CPUs, the waitqueue may disappear from under us, see the
 135                 * !list_empty_careful() in handle_userfault().
 136                 *
 137                 * try_to_wake_up() has an implicit smp_mb(), and the
 138                 * wq->private is read before calling the extern function
 139                 * "wake_up_state" (which in turns calls try_to_wake_up).
 140                 */
 141                list_del_init(&wq->entry);
 142        }
 143out:
 144        return ret;
 145}
 146
 147/**
 148 * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
 149 * context.
 150 * @ctx: [in] Pointer to the userfaultfd context.
 151 */
 152static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
 153{
 154        refcount_inc(&ctx->refcount);
 155}
 156
 157/**
 158 * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
 159 * context.
 160 * @ctx: [in] Pointer to userfaultfd context.
 161 *
 162 * The userfaultfd context reference must have been previously acquired either
 163 * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
 164 */
 165static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
 166{
 167        if (refcount_dec_and_test(&ctx->refcount)) {
 168                VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
 169                VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
 170                VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
 171                VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
 172                VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
 173                VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
 174                VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
 175                VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
 176                mmdrop(ctx->mm);
 177                kmem_cache_free(userfaultfd_ctx_cachep, ctx);
 178        }
 179}
 180
 181static inline void msg_init(struct uffd_msg *msg)
 182{
 183        BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
 184        /*
 185         * Must use memset to zero out the paddings or kernel data is
 186         * leaked to userland.
 187         */
 188        memset(msg, 0, sizeof(struct uffd_msg));
 189}
 190
 191static inline struct uffd_msg userfault_msg(unsigned long address,
 192                                            unsigned int flags,
 193                                            unsigned long reason,
 194                                            unsigned int features)
 195{
 196        struct uffd_msg msg;
 197        msg_init(&msg);
 198        msg.event = UFFD_EVENT_PAGEFAULT;
 199        msg.arg.pagefault.address = address;
 200        /*
 201         * These flags indicate why the userfault occurred:
 202         * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault.
 203         * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault.
 204         * - Neither of these flags being set indicates a MISSING fault.
 205         *
 206         * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write
 207         * fault. Otherwise, it was a read fault.
 208         */
 209        if (flags & FAULT_FLAG_WRITE)
 210                msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
 211        if (reason & VM_UFFD_WP)
 212                msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
 213        if (reason & VM_UFFD_MINOR)
 214                msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR;
 215        if (features & UFFD_FEATURE_THREAD_ID)
 216                msg.arg.pagefault.feat.ptid = task_pid_vnr(current);
 217        return msg;
 218}
 219
 220#ifdef CONFIG_HUGETLB_PAGE
 221/*
 222 * Same functionality as userfaultfd_must_wait below with modifications for
 223 * hugepmd ranges.
 224 */
 225static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
 226                                         struct vm_area_struct *vma,
 227                                         unsigned long address,
 228                                         unsigned long flags,
 229                                         unsigned long reason)
 230{
 231        struct mm_struct *mm = ctx->mm;
 232        pte_t *ptep, pte;
 233        bool ret = true;
 234
 235        mmap_assert_locked(mm);
 236
 237        ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
 238
 239        if (!ptep)
 240                goto out;
 241
 242        ret = false;
 243        pte = huge_ptep_get(ptep);
 244
 245        /*
 246         * Lockless access: we're in a wait_event so it's ok if it
 247         * changes under us.
 248         */
 249        if (huge_pte_none(pte))
 250                ret = true;
 251        if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
 252                ret = true;
 253out:
 254        return ret;
 255}
 256#else
 257static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
 258                                         struct vm_area_struct *vma,
 259                                         unsigned long address,
 260                                         unsigned long flags,
 261                                         unsigned long reason)
 262{
 263        return false;   /* should never get here */
 264}
 265#endif /* CONFIG_HUGETLB_PAGE */
 266
 267/*
 268 * Verify the pagetables are still not ok after having reigstered into
 269 * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
 270 * userfault that has already been resolved, if userfaultfd_read and
 271 * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
 272 * threads.
 273 */
 274static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
 275                                         unsigned long address,
 276                                         unsigned long flags,
 277                                         unsigned long reason)
 278{
 279        struct mm_struct *mm = ctx->mm;
 280        pgd_t *pgd;
 281        p4d_t *p4d;
 282        pud_t *pud;
 283        pmd_t *pmd, _pmd;
 284        pte_t *pte;
 285        bool ret = true;
 286
 287        mmap_assert_locked(mm);
 288
 289        pgd = pgd_offset(mm, address);
 290        if (!pgd_present(*pgd))
 291                goto out;
 292        p4d = p4d_offset(pgd, address);
 293        if (!p4d_present(*p4d))
 294                goto out;
 295        pud = pud_offset(p4d, address);
 296        if (!pud_present(*pud))
 297                goto out;
 298        pmd = pmd_offset(pud, address);
 299        /*
 300         * READ_ONCE must function as a barrier with narrower scope
 301         * and it must be equivalent to:
 302         *      _pmd = *pmd; barrier();
 303         *
 304         * This is to deal with the instability (as in
 305         * pmd_trans_unstable) of the pmd.
 306         */
 307        _pmd = READ_ONCE(*pmd);
 308        if (pmd_none(_pmd))
 309                goto out;
 310
 311        ret = false;
 312        if (!pmd_present(_pmd))
 313                goto out;
 314
 315        if (pmd_trans_huge(_pmd)) {
 316                if (!pmd_write(_pmd) && (reason & VM_UFFD_WP))
 317                        ret = true;
 318                goto out;
 319        }
 320
 321        /*
 322         * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it
 323         * and use the standard pte_offset_map() instead of parsing _pmd.
 324         */
 325        pte = pte_offset_map(pmd, address);
 326        /*
 327         * Lockless access: we're in a wait_event so it's ok if it
 328         * changes under us.
 329         */
 330        if (pte_none(*pte))
 331                ret = true;
 332        if (!pte_write(*pte) && (reason & VM_UFFD_WP))
 333                ret = true;
 334        pte_unmap(pte);
 335
 336out:
 337        return ret;
 338}
 339
 340static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags)
 341{
 342        if (flags & FAULT_FLAG_INTERRUPTIBLE)
 343                return TASK_INTERRUPTIBLE;
 344
 345        if (flags & FAULT_FLAG_KILLABLE)
 346                return TASK_KILLABLE;
 347
 348        return TASK_UNINTERRUPTIBLE;
 349}
 350
 351/*
 352 * The locking rules involved in returning VM_FAULT_RETRY depending on
 353 * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
 354 * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
 355 * recommendation in __lock_page_or_retry is not an understatement.
 356 *
 357 * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released
 358 * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
 359 * not set.
 360 *
 361 * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
 362 * set, VM_FAULT_RETRY can still be returned if and only if there are
 363 * fatal_signal_pending()s, and the mmap_lock must be released before
 364 * returning it.
 365 */
 366vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
 367{
 368        struct mm_struct *mm = vmf->vma->vm_mm;
 369        struct userfaultfd_ctx *ctx;
 370        struct userfaultfd_wait_queue uwq;
 371        vm_fault_t ret = VM_FAULT_SIGBUS;
 372        bool must_wait;
 373        unsigned int blocking_state;
 374
 375        /*
 376         * We don't do userfault handling for the final child pid update.
 377         *
 378         * We also don't do userfault handling during
 379         * coredumping. hugetlbfs has the special
 380         * follow_hugetlb_page() to skip missing pages in the
 381         * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
 382         * the no_page_table() helper in follow_page_mask(), but the
 383         * shmem_vm_ops->fault method is invoked even during
 384         * coredumping without mmap_lock and it ends up here.
 385         */
 386        if (current->flags & (PF_EXITING|PF_DUMPCORE))
 387                goto out;
 388
 389        /*
 390         * Coredumping runs without mmap_lock so we can only check that
 391         * the mmap_lock is held, if PF_DUMPCORE was not set.
 392         */
 393        mmap_assert_locked(mm);
 394
 395        ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
 396        if (!ctx)
 397                goto out;
 398
 399        BUG_ON(ctx->mm != mm);
 400
 401        /* Any unrecognized flag is a bug. */
 402        VM_BUG_ON(reason & ~__VM_UFFD_FLAGS);
 403        /* 0 or > 1 flags set is a bug; we expect exactly 1. */
 404        VM_BUG_ON(!reason || (reason & (reason - 1)));
 405
 406        if (ctx->features & UFFD_FEATURE_SIGBUS)
 407                goto out;
 408        if ((vmf->flags & FAULT_FLAG_USER) == 0 &&
 409            ctx->flags & UFFD_USER_MODE_ONLY) {
 410                printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd "
 411                        "sysctl knob to 1 if kernel faults must be handled "
 412                        "without obtaining CAP_SYS_PTRACE capability\n");
 413                goto out;
 414        }
 415
 416        /*
 417         * If it's already released don't get it. This avoids to loop
 418         * in __get_user_pages if userfaultfd_release waits on the
 419         * caller of handle_userfault to release the mmap_lock.
 420         */
 421        if (unlikely(READ_ONCE(ctx->released))) {
 422                /*
 423                 * Don't return VM_FAULT_SIGBUS in this case, so a non
 424                 * cooperative manager can close the uffd after the
 425                 * last UFFDIO_COPY, without risking to trigger an
 426                 * involuntary SIGBUS if the process was starting the
 427                 * userfaultfd while the userfaultfd was still armed
 428                 * (but after the last UFFDIO_COPY). If the uffd
 429                 * wasn't already closed when the userfault reached
 430                 * this point, that would normally be solved by
 431                 * userfaultfd_must_wait returning 'false'.
 432                 *
 433                 * If we were to return VM_FAULT_SIGBUS here, the non
 434                 * cooperative manager would be instead forced to
 435                 * always call UFFDIO_UNREGISTER before it can safely
 436                 * close the uffd.
 437                 */
 438                ret = VM_FAULT_NOPAGE;
 439                goto out;
 440        }
 441
 442        /*
 443         * Check that we can return VM_FAULT_RETRY.
 444         *
 445         * NOTE: it should become possible to return VM_FAULT_RETRY
 446         * even if FAULT_FLAG_TRIED is set without leading to gup()
 447         * -EBUSY failures, if the userfaultfd is to be extended for
 448         * VM_UFFD_WP tracking and we intend to arm the userfault
 449         * without first stopping userland access to the memory. For
 450         * VM_UFFD_MISSING userfaults this is enough for now.
 451         */
 452        if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
 453                /*
 454                 * Validate the invariant that nowait must allow retry
 455                 * to be sure not to return SIGBUS erroneously on
 456                 * nowait invocations.
 457                 */
 458                BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
 459#ifdef CONFIG_DEBUG_VM
 460                if (printk_ratelimit()) {
 461                        printk(KERN_WARNING
 462                               "FAULT_FLAG_ALLOW_RETRY missing %x\n",
 463                               vmf->flags);
 464                        dump_stack();
 465                }
 466#endif
 467                goto out;
 468        }
 469
 470        /*
 471         * Handle nowait, not much to do other than tell it to retry
 472         * and wait.
 473         */
 474        ret = VM_FAULT_RETRY;
 475        if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
 476                goto out;
 477
 478        /* take the reference before dropping the mmap_lock */
 479        userfaultfd_ctx_get(ctx);
 480
 481        init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
 482        uwq.wq.private = current;
 483        uwq.msg = userfault_msg(vmf->address, vmf->flags, reason,
 484                        ctx->features);
 485        uwq.ctx = ctx;
 486        uwq.waken = false;
 487
 488        blocking_state = userfaultfd_get_blocking_state(vmf->flags);
 489
 490        spin_lock_irq(&ctx->fault_pending_wqh.lock);
 491        /*
 492         * After the __add_wait_queue the uwq is visible to userland
 493         * through poll/read().
 494         */
 495        __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
 496        /*
 497         * The smp_mb() after __set_current_state prevents the reads
 498         * following the spin_unlock to happen before the list_add in
 499         * __add_wait_queue.
 500         */
 501        set_current_state(blocking_state);
 502        spin_unlock_irq(&ctx->fault_pending_wqh.lock);
 503
 504        if (!is_vm_hugetlb_page(vmf->vma))
 505                must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
 506                                                  reason);
 507        else
 508                must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
 509                                                       vmf->address,
 510                                                       vmf->flags, reason);
 511        mmap_read_unlock(mm);
 512
 513        if (likely(must_wait && !READ_ONCE(ctx->released))) {
 514                wake_up_poll(&ctx->fd_wqh, EPOLLIN);
 515                schedule();
 516        }
 517
 518        __set_current_state(TASK_RUNNING);
 519
 520        /*
 521         * Here we race with the list_del; list_add in
 522         * userfaultfd_ctx_read(), however because we don't ever run
 523         * list_del_init() to refile across the two lists, the prev
 524         * and next pointers will never point to self. list_add also
 525         * would never let any of the two pointers to point to
 526         * self. So list_empty_careful won't risk to see both pointers
 527         * pointing to self at any time during the list refile. The
 528         * only case where list_del_init() is called is the full
 529         * removal in the wake function and there we don't re-list_add
 530         * and it's fine not to block on the spinlock. The uwq on this
 531         * kernel stack can be released after the list_del_init.
 532         */
 533        if (!list_empty_careful(&uwq.wq.entry)) {
 534                spin_lock_irq(&ctx->fault_pending_wqh.lock);
 535                /*
 536                 * No need of list_del_init(), the uwq on the stack
 537                 * will be freed shortly anyway.
 538                 */
 539                list_del(&uwq.wq.entry);
 540                spin_unlock_irq(&ctx->fault_pending_wqh.lock);
 541        }
 542
 543        /*
 544         * ctx may go away after this if the userfault pseudo fd is
 545         * already released.
 546         */
 547        userfaultfd_ctx_put(ctx);
 548
 549out:
 550        return ret;
 551}
 552
 553static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
 554                                              struct userfaultfd_wait_queue *ewq)
 555{
 556        struct userfaultfd_ctx *release_new_ctx;
 557
 558        if (WARN_ON_ONCE(current->flags & PF_EXITING))
 559                goto out;
 560
 561        ewq->ctx = ctx;
 562        init_waitqueue_entry(&ewq->wq, current);
 563        release_new_ctx = NULL;
 564
 565        spin_lock_irq(&ctx->event_wqh.lock);
 566        /*
 567         * After the __add_wait_queue the uwq is visible to userland
 568         * through poll/read().
 569         */
 570        __add_wait_queue(&ctx->event_wqh, &ewq->wq);
 571        for (;;) {
 572                set_current_state(TASK_KILLABLE);
 573                if (ewq->msg.event == 0)
 574                        break;
 575                if (READ_ONCE(ctx->released) ||
 576                    fatal_signal_pending(current)) {
 577                        /*
 578                         * &ewq->wq may be queued in fork_event, but
 579                         * __remove_wait_queue ignores the head
 580                         * parameter. It would be a problem if it
 581                         * didn't.
 582                         */
 583                        __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
 584                        if (ewq->msg.event == UFFD_EVENT_FORK) {
 585                                struct userfaultfd_ctx *new;
 586
 587                                new = (struct userfaultfd_ctx *)
 588                                        (unsigned long)
 589                                        ewq->msg.arg.reserved.reserved1;
 590                                release_new_ctx = new;
 591                        }
 592                        break;
 593                }
 594
 595                spin_unlock_irq(&ctx->event_wqh.lock);
 596
 597                wake_up_poll(&ctx->fd_wqh, EPOLLIN);
 598                schedule();
 599
 600                spin_lock_irq(&ctx->event_wqh.lock);
 601        }
 602        __set_current_state(TASK_RUNNING);
 603        spin_unlock_irq(&ctx->event_wqh.lock);
 604
 605        if (release_new_ctx) {
 606                struct vm_area_struct *vma;
 607                struct mm_struct *mm = release_new_ctx->mm;
 608
 609                /* the various vma->vm_userfaultfd_ctx still points to it */
 610                mmap_write_lock(mm);
 611                for (vma = mm->mmap; vma; vma = vma->vm_next)
 612                        if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
 613                                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
 614                                vma->vm_flags &= ~__VM_UFFD_FLAGS;
 615                        }
 616                mmap_write_unlock(mm);
 617
 618                userfaultfd_ctx_put(release_new_ctx);
 619        }
 620
 621        /*
 622         * ctx may go away after this if the userfault pseudo fd is
 623         * already released.
 624         */
 625out:
 626        WRITE_ONCE(ctx->mmap_changing, false);
 627        userfaultfd_ctx_put(ctx);
 628}
 629
 630static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
 631                                       struct userfaultfd_wait_queue *ewq)
 632{
 633        ewq->msg.event = 0;
 634        wake_up_locked(&ctx->event_wqh);
 635        __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
 636}
 637
 638int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
 639{
 640        struct userfaultfd_ctx *ctx = NULL, *octx;
 641        struct userfaultfd_fork_ctx *fctx;
 642
 643        octx = vma->vm_userfaultfd_ctx.ctx;
 644        if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
 645                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
 646                vma->vm_flags &= ~__VM_UFFD_FLAGS;
 647                return 0;
 648        }
 649
 650        list_for_each_entry(fctx, fcs, list)
 651                if (fctx->orig == octx) {
 652                        ctx = fctx->new;
 653                        break;
 654                }
 655
 656        if (!ctx) {
 657                fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
 658                if (!fctx)
 659                        return -ENOMEM;
 660
 661                ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
 662                if (!ctx) {
 663                        kfree(fctx);
 664                        return -ENOMEM;
 665                }
 666
 667                refcount_set(&ctx->refcount, 1);
 668                ctx->flags = octx->flags;
 669                ctx->state = UFFD_STATE_RUNNING;
 670                ctx->features = octx->features;
 671                ctx->released = false;
 672                ctx->mmap_changing = false;
 673                ctx->mm = vma->vm_mm;
 674                mmgrab(ctx->mm);
 675
 676                userfaultfd_ctx_get(octx);
 677                WRITE_ONCE(octx->mmap_changing, true);
 678                fctx->orig = octx;
 679                fctx->new = ctx;
 680                list_add_tail(&fctx->list, fcs);
 681        }
 682
 683        vma->vm_userfaultfd_ctx.ctx = ctx;
 684        return 0;
 685}
 686
 687static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
 688{
 689        struct userfaultfd_ctx *ctx = fctx->orig;
 690        struct userfaultfd_wait_queue ewq;
 691
 692        msg_init(&ewq.msg);
 693
 694        ewq.msg.event = UFFD_EVENT_FORK;
 695        ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
 696
 697        userfaultfd_event_wait_completion(ctx, &ewq);
 698}
 699
 700void dup_userfaultfd_complete(struct list_head *fcs)
 701{
 702        struct userfaultfd_fork_ctx *fctx, *n;
 703
 704        list_for_each_entry_safe(fctx, n, fcs, list) {
 705                dup_fctx(fctx);
 706                list_del(&fctx->list);
 707                kfree(fctx);
 708        }
 709}
 710
 711void mremap_userfaultfd_prep(struct vm_area_struct *vma,
 712                             struct vm_userfaultfd_ctx *vm_ctx)
 713{
 714        struct userfaultfd_ctx *ctx;
 715
 716        ctx = vma->vm_userfaultfd_ctx.ctx;
 717
 718        if (!ctx)
 719                return;
 720
 721        if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
 722                vm_ctx->ctx = ctx;
 723                userfaultfd_ctx_get(ctx);
 724                WRITE_ONCE(ctx->mmap_changing, true);
 725        } else {
 726                /* Drop uffd context if remap feature not enabled */
 727                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
 728                vma->vm_flags &= ~__VM_UFFD_FLAGS;
 729        }
 730}
 731
 732void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
 733                                 unsigned long from, unsigned long to,
 734                                 unsigned long len)
 735{
 736        struct userfaultfd_ctx *ctx = vm_ctx->ctx;
 737        struct userfaultfd_wait_queue ewq;
 738
 739        if (!ctx)
 740                return;
 741
 742        if (to & ~PAGE_MASK) {
 743                userfaultfd_ctx_put(ctx);
 744                return;
 745        }
 746
 747        msg_init(&ewq.msg);
 748
 749        ewq.msg.event = UFFD_EVENT_REMAP;
 750        ewq.msg.arg.remap.from = from;
 751        ewq.msg.arg.remap.to = to;
 752        ewq.msg.arg.remap.len = len;
 753
 754        userfaultfd_event_wait_completion(ctx, &ewq);
 755}
 756
 757bool userfaultfd_remove(struct vm_area_struct *vma,
 758                        unsigned long start, unsigned long end)
 759{
 760        struct mm_struct *mm = vma->vm_mm;
 761        struct userfaultfd_ctx *ctx;
 762        struct userfaultfd_wait_queue ewq;
 763
 764        ctx = vma->vm_userfaultfd_ctx.ctx;
 765        if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
 766                return true;
 767
 768        userfaultfd_ctx_get(ctx);
 769        WRITE_ONCE(ctx->mmap_changing, true);
 770        mmap_read_unlock(mm);
 771
 772        msg_init(&ewq.msg);
 773
 774        ewq.msg.event = UFFD_EVENT_REMOVE;
 775        ewq.msg.arg.remove.start = start;
 776        ewq.msg.arg.remove.end = end;
 777
 778        userfaultfd_event_wait_completion(ctx, &ewq);
 779
 780        return false;
 781}
 782
 783static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
 784                          unsigned long start, unsigned long end)
 785{
 786        struct userfaultfd_unmap_ctx *unmap_ctx;
 787
 788        list_for_each_entry(unmap_ctx, unmaps, list)
 789                if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
 790                    unmap_ctx->end == end)
 791                        return true;
 792
 793        return false;
 794}
 795
 796int userfaultfd_unmap_prep(struct vm_area_struct *vma,
 797                           unsigned long start, unsigned long end,
 798                           struct list_head *unmaps)
 799{
 800        for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
 801                struct userfaultfd_unmap_ctx *unmap_ctx;
 802                struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
 803
 804                if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
 805                    has_unmap_ctx(ctx, unmaps, start, end))
 806                        continue;
 807
 808                unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
 809                if (!unmap_ctx)
 810                        return -ENOMEM;
 811
 812                userfaultfd_ctx_get(ctx);
 813                WRITE_ONCE(ctx->mmap_changing, true);
 814                unmap_ctx->ctx = ctx;
 815                unmap_ctx->start = start;
 816                unmap_ctx->end = end;
 817                list_add_tail(&unmap_ctx->list, unmaps);
 818        }
 819
 820        return 0;
 821}
 822
 823void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
 824{
 825        struct userfaultfd_unmap_ctx *ctx, *n;
 826        struct userfaultfd_wait_queue ewq;
 827
 828        list_for_each_entry_safe(ctx, n, uf, list) {
 829                msg_init(&ewq.msg);
 830
 831                ewq.msg.event = UFFD_EVENT_UNMAP;
 832                ewq.msg.arg.remove.start = ctx->start;
 833                ewq.msg.arg.remove.end = ctx->end;
 834
 835                userfaultfd_event_wait_completion(ctx->ctx, &ewq);
 836
 837                list_del(&ctx->list);
 838                kfree(ctx);
 839        }
 840}
 841
 842static int userfaultfd_release(struct inode *inode, struct file *file)
 843{
 844        struct userfaultfd_ctx *ctx = file->private_data;
 845        struct mm_struct *mm = ctx->mm;
 846        struct vm_area_struct *vma, *prev;
 847        /* len == 0 means wake all */
 848        struct userfaultfd_wake_range range = { .len = 0, };
 849        unsigned long new_flags;
 850
 851        WRITE_ONCE(ctx->released, true);
 852
 853        if (!mmget_not_zero(mm))
 854                goto wakeup;
 855
 856        /*
 857         * Flush page faults out of all CPUs. NOTE: all page faults
 858         * must be retried without returning VM_FAULT_SIGBUS if
 859         * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
 860         * changes while handle_userfault released the mmap_lock. So
 861         * it's critical that released is set to true (above), before
 862         * taking the mmap_lock for writing.
 863         */
 864        mmap_write_lock(mm);
 865        prev = NULL;
 866        for (vma = mm->mmap; vma; vma = vma->vm_next) {
 867                cond_resched();
 868                BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
 869                       !!(vma->vm_flags & __VM_UFFD_FLAGS));
 870                if (vma->vm_userfaultfd_ctx.ctx != ctx) {
 871                        prev = vma;
 872                        continue;
 873                }
 874                new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
 875                prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
 876                                 new_flags, vma->anon_vma,
 877                                 vma->vm_file, vma->vm_pgoff,
 878                                 vma_policy(vma),
 879                                 NULL_VM_UFFD_CTX);
 880                if (prev)
 881                        vma = prev;
 882                else
 883                        prev = vma;
 884                vma->vm_flags = new_flags;
 885                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
 886        }
 887        mmap_write_unlock(mm);
 888        mmput(mm);
 889wakeup:
 890        /*
 891         * After no new page faults can wait on this fault_*wqh, flush
 892         * the last page faults that may have been already waiting on
 893         * the fault_*wqh.
 894         */
 895        spin_lock_irq(&ctx->fault_pending_wqh.lock);
 896        __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
 897        __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
 898        spin_unlock_irq(&ctx->fault_pending_wqh.lock);
 899
 900        /* Flush pending events that may still wait on event_wqh */
 901        wake_up_all(&ctx->event_wqh);
 902
 903        wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
 904        userfaultfd_ctx_put(ctx);
 905        return 0;
 906}
 907
 908/* fault_pending_wqh.lock must be hold by the caller */
 909static inline struct userfaultfd_wait_queue *find_userfault_in(
 910                wait_queue_head_t *wqh)
 911{
 912        wait_queue_entry_t *wq;
 913        struct userfaultfd_wait_queue *uwq;
 914
 915        lockdep_assert_held(&wqh->lock);
 916
 917        uwq = NULL;
 918        if (!waitqueue_active(wqh))
 919                goto out;
 920        /* walk in reverse to provide FIFO behavior to read userfaults */
 921        wq = list_last_entry(&wqh->head, typeof(*wq), entry);
 922        uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
 923out:
 924        return uwq;
 925}
 926
 927static inline struct userfaultfd_wait_queue *find_userfault(
 928                struct userfaultfd_ctx *ctx)
 929{
 930        return find_userfault_in(&ctx->fault_pending_wqh);
 931}
 932
 933static inline struct userfaultfd_wait_queue *find_userfault_evt(
 934                struct userfaultfd_ctx *ctx)
 935{
 936        return find_userfault_in(&ctx->event_wqh);
 937}
 938
 939static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
 940{
 941        struct userfaultfd_ctx *ctx = file->private_data;
 942        __poll_t ret;
 943
 944        poll_wait(file, &ctx->fd_wqh, wait);
 945
 946        switch (ctx->state) {
 947        case UFFD_STATE_WAIT_API:
 948                return EPOLLERR;
 949        case UFFD_STATE_RUNNING:
 950                /*
 951                 * poll() never guarantees that read won't block.
 952                 * userfaults can be waken before they're read().
 953                 */
 954                if (unlikely(!(file->f_flags & O_NONBLOCK)))
 955                        return EPOLLERR;
 956                /*
 957                 * lockless access to see if there are pending faults
 958                 * __pollwait last action is the add_wait_queue but
 959                 * the spin_unlock would allow the waitqueue_active to
 960                 * pass above the actual list_add inside
 961                 * add_wait_queue critical section. So use a full
 962                 * memory barrier to serialize the list_add write of
 963                 * add_wait_queue() with the waitqueue_active read
 964                 * below.
 965                 */
 966                ret = 0;
 967                smp_mb();
 968                if (waitqueue_active(&ctx->fault_pending_wqh))
 969                        ret = EPOLLIN;
 970                else if (waitqueue_active(&ctx->event_wqh))
 971                        ret = EPOLLIN;
 972
 973                return ret;
 974        default:
 975                WARN_ON_ONCE(1);
 976                return EPOLLERR;
 977        }
 978}
 979
 980static const struct file_operations userfaultfd_fops;
 981
 982static int resolve_userfault_fork(struct userfaultfd_ctx *new,
 983                                  struct inode *inode,
 984                                  struct uffd_msg *msg)
 985{
 986        int fd;
 987
 988        fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new,
 989                        O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode);
 990        if (fd < 0)
 991                return fd;
 992
 993        msg->arg.reserved.reserved1 = 0;
 994        msg->arg.fork.ufd = fd;
 995        return 0;
 996}
 997
 998static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
 999                                    struct uffd_msg *msg, struct inode *inode)
1000{
1001        ssize_t ret;
1002        DECLARE_WAITQUEUE(wait, current);
1003        struct userfaultfd_wait_queue *uwq;
1004        /*
1005         * Handling fork event requires sleeping operations, so
1006         * we drop the event_wqh lock, then do these ops, then
1007         * lock it back and wake up the waiter. While the lock is
1008         * dropped the ewq may go away so we keep track of it
1009         * carefully.
1010         */
1011        LIST_HEAD(fork_event);
1012        struct userfaultfd_ctx *fork_nctx = NULL;
1013
1014        /* always take the fd_wqh lock before the fault_pending_wqh lock */
1015        spin_lock_irq(&ctx->fd_wqh.lock);
1016        __add_wait_queue(&ctx->fd_wqh, &wait);
1017        for (;;) {
1018                set_current_state(TASK_INTERRUPTIBLE);
1019                spin_lock(&ctx->fault_pending_wqh.lock);
1020                uwq = find_userfault(ctx);
1021                if (uwq) {
1022                        /*
1023                         * Use a seqcount to repeat the lockless check
1024                         * in wake_userfault() to avoid missing
1025                         * wakeups because during the refile both
1026                         * waitqueue could become empty if this is the
1027                         * only userfault.
1028                         */
1029                        write_seqcount_begin(&ctx->refile_seq);
1030
1031                        /*
1032                         * The fault_pending_wqh.lock prevents the uwq
1033                         * to disappear from under us.
1034                         *
1035                         * Refile this userfault from
1036                         * fault_pending_wqh to fault_wqh, it's not
1037                         * pending anymore after we read it.
1038                         *
1039                         * Use list_del() by hand (as
1040                         * userfaultfd_wake_function also uses
1041                         * list_del_init() by hand) to be sure nobody
1042                         * changes __remove_wait_queue() to use
1043                         * list_del_init() in turn breaking the
1044                         * !list_empty_careful() check in
1045                         * handle_userfault(). The uwq->wq.head list
1046                         * must never be empty at any time during the
1047                         * refile, or the waitqueue could disappear
1048                         * from under us. The "wait_queue_head_t"
1049                         * parameter of __remove_wait_queue() is unused
1050                         * anyway.
1051                         */
1052                        list_del(&uwq->wq.entry);
1053                        add_wait_queue(&ctx->fault_wqh, &uwq->wq);
1054
1055                        write_seqcount_end(&ctx->refile_seq);
1056
1057                        /* careful to always initialize msg if ret == 0 */
1058                        *msg = uwq->msg;
1059                        spin_unlock(&ctx->fault_pending_wqh.lock);
1060                        ret = 0;
1061                        break;
1062                }
1063                spin_unlock(&ctx->fault_pending_wqh.lock);
1064
1065                spin_lock(&ctx->event_wqh.lock);
1066                uwq = find_userfault_evt(ctx);
1067                if (uwq) {
1068                        *msg = uwq->msg;
1069
1070                        if (uwq->msg.event == UFFD_EVENT_FORK) {
1071                                fork_nctx = (struct userfaultfd_ctx *)
1072                                        (unsigned long)
1073                                        uwq->msg.arg.reserved.reserved1;
1074                                list_move(&uwq->wq.entry, &fork_event);
1075                                /*
1076                                 * fork_nctx can be freed as soon as
1077                                 * we drop the lock, unless we take a
1078                                 * reference on it.
1079                                 */
1080                                userfaultfd_ctx_get(fork_nctx);
1081                                spin_unlock(&ctx->event_wqh.lock);
1082                                ret = 0;
1083                                break;
1084                        }
1085
1086                        userfaultfd_event_complete(ctx, uwq);
1087                        spin_unlock(&ctx->event_wqh.lock);
1088                        ret = 0;
1089                        break;
1090                }
1091                spin_unlock(&ctx->event_wqh.lock);
1092
1093                if (signal_pending(current)) {
1094                        ret = -ERESTARTSYS;
1095                        break;
1096                }
1097                if (no_wait) {
1098                        ret = -EAGAIN;
1099                        break;
1100                }
1101                spin_unlock_irq(&ctx->fd_wqh.lock);
1102                schedule();
1103                spin_lock_irq(&ctx->fd_wqh.lock);
1104        }
1105        __remove_wait_queue(&ctx->fd_wqh, &wait);
1106        __set_current_state(TASK_RUNNING);
1107        spin_unlock_irq(&ctx->fd_wqh.lock);
1108
1109        if (!ret && msg->event == UFFD_EVENT_FORK) {
1110                ret = resolve_userfault_fork(fork_nctx, inode, msg);
1111                spin_lock_irq(&ctx->event_wqh.lock);
1112                if (!list_empty(&fork_event)) {
1113                        /*
1114                         * The fork thread didn't abort, so we can
1115                         * drop the temporary refcount.
1116                         */
1117                        userfaultfd_ctx_put(fork_nctx);
1118
1119                        uwq = list_first_entry(&fork_event,
1120                                               typeof(*uwq),
1121                                               wq.entry);
1122                        /*
1123                         * If fork_event list wasn't empty and in turn
1124                         * the event wasn't already released by fork
1125                         * (the event is allocated on fork kernel
1126                         * stack), put the event back to its place in
1127                         * the event_wq. fork_event head will be freed
1128                         * as soon as we return so the event cannot
1129                         * stay queued there no matter the current
1130                         * "ret" value.
1131                         */
1132                        list_del(&uwq->wq.entry);
1133                        __add_wait_queue(&ctx->event_wqh, &uwq->wq);
1134
1135                        /*
1136                         * Leave the event in the waitqueue and report
1137                         * error to userland if we failed to resolve
1138                         * the userfault fork.
1139                         */
1140                        if (likely(!ret))
1141                                userfaultfd_event_complete(ctx, uwq);
1142                } else {
1143                        /*
1144                         * Here the fork thread aborted and the
1145                         * refcount from the fork thread on fork_nctx
1146                         * has already been released. We still hold
1147                         * the reference we took before releasing the
1148                         * lock above. If resolve_userfault_fork
1149                         * failed we've to drop it because the
1150                         * fork_nctx has to be freed in such case. If
1151                         * it succeeded we'll hold it because the new
1152                         * uffd references it.
1153                         */
1154                        if (ret)
1155                                userfaultfd_ctx_put(fork_nctx);
1156                }
1157                spin_unlock_irq(&ctx->event_wqh.lock);
1158        }
1159
1160        return ret;
1161}
1162
1163static ssize_t userfaultfd_read(struct file *file, char __user *buf,
1164                                size_t count, loff_t *ppos)
1165{
1166        struct userfaultfd_ctx *ctx = file->private_data;
1167        ssize_t _ret, ret = 0;
1168        struct uffd_msg msg;
1169        int no_wait = file->f_flags & O_NONBLOCK;
1170        struct inode *inode = file_inode(file);
1171
1172        if (ctx->state == UFFD_STATE_WAIT_API)
1173                return -EINVAL;
1174
1175        for (;;) {
1176                if (count < sizeof(msg))
1177                        return ret ? ret : -EINVAL;
1178                _ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode);
1179                if (_ret < 0)
1180                        return ret ? ret : _ret;
1181                if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
1182                        return ret ? ret : -EFAULT;
1183                ret += sizeof(msg);
1184                buf += sizeof(msg);
1185                count -= sizeof(msg);
1186                /*
1187                 * Allow to read more than one fault at time but only
1188                 * block if waiting for the very first one.
1189                 */
1190                no_wait = O_NONBLOCK;
1191        }
1192}
1193
1194static void __wake_userfault(struct userfaultfd_ctx *ctx,
1195                             struct userfaultfd_wake_range *range)
1196{
1197        spin_lock_irq(&ctx->fault_pending_wqh.lock);
1198        /* wake all in the range and autoremove */
1199        if (waitqueue_active(&ctx->fault_pending_wqh))
1200                __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
1201                                     range);
1202        if (waitqueue_active(&ctx->fault_wqh))
1203                __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
1204        spin_unlock_irq(&ctx->fault_pending_wqh.lock);
1205}
1206
1207static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
1208                                           struct userfaultfd_wake_range *range)
1209{
1210        unsigned seq;
1211        bool need_wakeup;
1212
1213        /*
1214         * To be sure waitqueue_active() is not reordered by the CPU
1215         * before the pagetable update, use an explicit SMP memory
1216         * barrier here. PT lock release or mmap_read_unlock(mm) still
1217         * have release semantics that can allow the
1218         * waitqueue_active() to be reordered before the pte update.
1219         */
1220        smp_mb();
1221
1222        /*
1223         * Use waitqueue_active because it's very frequent to
1224         * change the address space atomically even if there are no
1225         * userfaults yet. So we take the spinlock only when we're
1226         * sure we've userfaults to wake.
1227         */
1228        do {
1229                seq = read_seqcount_begin(&ctx->refile_seq);
1230                need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
1231                        waitqueue_active(&ctx->fault_wqh);
1232                cond_resched();
1233        } while (read_seqcount_retry(&ctx->refile_seq, seq));
1234        if (need_wakeup)
1235                __wake_userfault(ctx, range);
1236}
1237
1238static __always_inline int validate_range(struct mm_struct *mm,
1239                                          __u64 start, __u64 len)
1240{
1241        __u64 task_size = mm->task_size;
1242
1243        if (start & ~PAGE_MASK)
1244                return -EINVAL;
1245        if (len & ~PAGE_MASK)
1246                return -EINVAL;
1247        if (!len)
1248                return -EINVAL;
1249        if (start < mmap_min_addr)
1250                return -EINVAL;
1251        if (start >= task_size)
1252                return -EINVAL;
1253        if (len > task_size - start)
1254                return -EINVAL;
1255        return 0;
1256}
1257
1258static inline bool vma_can_userfault(struct vm_area_struct *vma,
1259                                     unsigned long vm_flags)
1260{
1261        /* FIXME: add WP support to hugetlbfs and shmem */
1262        if (vm_flags & VM_UFFD_WP) {
1263                if (is_vm_hugetlb_page(vma) || vma_is_shmem(vma))
1264                        return false;
1265        }
1266
1267        if (vm_flags & VM_UFFD_MINOR) {
1268                if (!(is_vm_hugetlb_page(vma) || vma_is_shmem(vma)))
1269                        return false;
1270        }
1271
1272        return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
1273               vma_is_shmem(vma);
1274}
1275
1276static int userfaultfd_register(struct userfaultfd_ctx *ctx,
1277                                unsigned long arg)
1278{
1279        struct mm_struct *mm = ctx->mm;
1280        struct vm_area_struct *vma, *prev, *cur;
1281        int ret;
1282        struct uffdio_register uffdio_register;
1283        struct uffdio_register __user *user_uffdio_register;
1284        unsigned long vm_flags, new_flags;
1285        bool found;
1286        bool basic_ioctls;
1287        unsigned long start, end, vma_end;
1288
1289        user_uffdio_register = (struct uffdio_register __user *) arg;
1290
1291        ret = -EFAULT;
1292        if (copy_from_user(&uffdio_register, user_uffdio_register,
1293                           sizeof(uffdio_register)-sizeof(__u64)))
1294                goto out;
1295
1296        ret = -EINVAL;
1297        if (!uffdio_register.mode)
1298                goto out;
1299        if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES)
1300                goto out;
1301        vm_flags = 0;
1302        if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
1303                vm_flags |= VM_UFFD_MISSING;
1304        if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
1305#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
1306                goto out;
1307#endif
1308                vm_flags |= VM_UFFD_WP;
1309        }
1310        if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) {
1311#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
1312                goto out;
1313#endif
1314                vm_flags |= VM_UFFD_MINOR;
1315        }
1316
1317        ret = validate_range(mm, uffdio_register.range.start,
1318                             uffdio_register.range.len);
1319        if (ret)
1320                goto out;
1321
1322        start = uffdio_register.range.start;
1323        end = start + uffdio_register.range.len;
1324
1325        ret = -ENOMEM;
1326        if (!mmget_not_zero(mm))
1327                goto out;
1328
1329        mmap_write_lock(mm);
1330        vma = find_vma_prev(mm, start, &prev);
1331        if (!vma)
1332                goto out_unlock;
1333
1334        /* check that there's at least one vma in the range */
1335        ret = -EINVAL;
1336        if (vma->vm_start >= end)
1337                goto out_unlock;
1338
1339        /*
1340         * If the first vma contains huge pages, make sure start address
1341         * is aligned to huge page size.
1342         */
1343        if (is_vm_hugetlb_page(vma)) {
1344                unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1345
1346                if (start & (vma_hpagesize - 1))
1347                        goto out_unlock;
1348        }
1349
1350        /*
1351         * Search for not compatible vmas.
1352         */
1353        found = false;
1354        basic_ioctls = false;
1355        for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
1356                cond_resched();
1357
1358                BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1359                       !!(cur->vm_flags & __VM_UFFD_FLAGS));
1360
1361                /* check not compatible vmas */
1362                ret = -EINVAL;
1363                if (!vma_can_userfault(cur, vm_flags))
1364                        goto out_unlock;
1365
1366                /*
1367                 * UFFDIO_COPY will fill file holes even without
1368                 * PROT_WRITE. This check enforces that if this is a
1369                 * MAP_SHARED, the process has write permission to the backing
1370                 * file. If VM_MAYWRITE is set it also enforces that on a
1371                 * MAP_SHARED vma: there is no F_WRITE_SEAL and no further
1372                 * F_WRITE_SEAL can be taken until the vma is destroyed.
1373                 */
1374                ret = -EPERM;
1375                if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
1376                        goto out_unlock;
1377
1378                /*
1379                 * If this vma contains ending address, and huge pages
1380                 * check alignment.
1381                 */
1382                if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
1383                    end > cur->vm_start) {
1384                        unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
1385
1386                        ret = -EINVAL;
1387
1388                        if (end & (vma_hpagesize - 1))
1389                                goto out_unlock;
1390                }
1391                if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE))
1392                        goto out_unlock;
1393
1394                /*
1395                 * Check that this vma isn't already owned by a
1396                 * different userfaultfd. We can't allow more than one
1397                 * userfaultfd to own a single vma simultaneously or we
1398                 * wouldn't know which one to deliver the userfaults to.
1399                 */
1400                ret = -EBUSY;
1401                if (cur->vm_userfaultfd_ctx.ctx &&
1402                    cur->vm_userfaultfd_ctx.ctx != ctx)
1403                        goto out_unlock;
1404
1405                /*
1406                 * Note vmas containing huge pages
1407                 */
1408                if (is_vm_hugetlb_page(cur))
1409                        basic_ioctls = true;
1410
1411                found = true;
1412        }
1413        BUG_ON(!found);
1414
1415        if (vma->vm_start < start)
1416                prev = vma;
1417
1418        ret = 0;
1419        do {
1420                cond_resched();
1421
1422                BUG_ON(!vma_can_userfault(vma, vm_flags));
1423                BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
1424                       vma->vm_userfaultfd_ctx.ctx != ctx);
1425                WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1426
1427                /*
1428                 * Nothing to do: this vma is already registered into this
1429                 * userfaultfd and with the right tracking mode too.
1430                 */
1431                if (vma->vm_userfaultfd_ctx.ctx == ctx &&
1432                    (vma->vm_flags & vm_flags) == vm_flags)
1433                        goto skip;
1434
1435                if (vma->vm_start > start)
1436                        start = vma->vm_start;
1437                vma_end = min(end, vma->vm_end);
1438
1439                new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
1440                prev = vma_merge(mm, prev, start, vma_end, new_flags,
1441                                 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
1442                                 vma_policy(vma),
1443                                 ((struct vm_userfaultfd_ctx){ ctx }));
1444                if (prev) {
1445                        vma = prev;
1446                        goto next;
1447                }
1448                if (vma->vm_start < start) {
1449                        ret = split_vma(mm, vma, start, 1);
1450                        if (ret)
1451                                break;
1452                }
1453                if (vma->vm_end > end) {
1454                        ret = split_vma(mm, vma, end, 0);
1455                        if (ret)
1456                                break;
1457                }
1458        next:
1459                /*
1460                 * In the vma_merge() successful mprotect-like case 8:
1461                 * the next vma was merged into the current one and
1462                 * the current one has not been updated yet.
1463                 */
1464                vma->vm_flags = new_flags;
1465                vma->vm_userfaultfd_ctx.ctx = ctx;
1466
1467                if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
1468                        hugetlb_unshare_all_pmds(vma);
1469
1470        skip:
1471                prev = vma;
1472                start = vma->vm_end;
1473                vma = vma->vm_next;
1474        } while (vma && vma->vm_start < end);
1475out_unlock:
1476        mmap_write_unlock(mm);
1477        mmput(mm);
1478        if (!ret) {
1479                __u64 ioctls_out;
1480
1481                ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
1482                    UFFD_API_RANGE_IOCTLS;
1483
1484                /*
1485                 * Declare the WP ioctl only if the WP mode is
1486                 * specified and all checks passed with the range
1487                 */
1488                if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP))
1489                        ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT);
1490
1491                /* CONTINUE ioctl is only supported for MINOR ranges. */
1492                if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR))
1493                        ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE);
1494
1495                /*
1496                 * Now that we scanned all vmas we can already tell
1497                 * userland which ioctls methods are guaranteed to
1498                 * succeed on this range.
1499                 */
1500                if (put_user(ioctls_out, &user_uffdio_register->ioctls))
1501                        ret = -EFAULT;
1502        }
1503out:
1504        return ret;
1505}
1506
1507static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
1508                                  unsigned long arg)
1509{
1510        struct mm_struct *mm = ctx->mm;
1511        struct vm_area_struct *vma, *prev, *cur;
1512        int ret;
1513        struct uffdio_range uffdio_unregister;
1514        unsigned long new_flags;
1515        bool found;
1516        unsigned long start, end, vma_end;
1517        const void __user *buf = (void __user *)arg;
1518
1519        ret = -EFAULT;
1520        if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
1521                goto out;
1522
1523        ret = validate_range(mm, uffdio_unregister.start,
1524                             uffdio_unregister.len);
1525        if (ret)
1526                goto out;
1527
1528        start = uffdio_unregister.start;
1529        end = start + uffdio_unregister.len;
1530
1531        ret = -ENOMEM;
1532        if (!mmget_not_zero(mm))
1533                goto out;
1534
1535        mmap_write_lock(mm);
1536        vma = find_vma_prev(mm, start, &prev);
1537        if (!vma)
1538                goto out_unlock;
1539
1540        /* check that there's at least one vma in the range */
1541        ret = -EINVAL;
1542        if (vma->vm_start >= end)
1543                goto out_unlock;
1544
1545        /*
1546         * If the first vma contains huge pages, make sure start address
1547         * is aligned to huge page size.
1548         */
1549        if (is_vm_hugetlb_page(vma)) {
1550                unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1551
1552                if (start & (vma_hpagesize - 1))
1553                        goto out_unlock;
1554        }
1555
1556        /*
1557         * Search for not compatible vmas.
1558         */
1559        found = false;
1560        ret = -EINVAL;
1561        for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
1562                cond_resched();
1563
1564                BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1565                       !!(cur->vm_flags & __VM_UFFD_FLAGS));
1566
1567                /*
1568                 * Check not compatible vmas, not strictly required
1569                 * here as not compatible vmas cannot have an
1570                 * userfaultfd_ctx registered on them, but this
1571                 * provides for more strict behavior to notice
1572                 * unregistration errors.
1573                 */
1574                if (!vma_can_userfault(cur, cur->vm_flags))
1575                        goto out_unlock;
1576
1577                found = true;
1578        }
1579        BUG_ON(!found);
1580
1581        if (vma->vm_start < start)
1582                prev = vma;
1583
1584        ret = 0;
1585        do {
1586                cond_resched();
1587
1588                BUG_ON(!vma_can_userfault(vma, vma->vm_flags));
1589
1590                /*
1591                 * Nothing to do: this vma is already registered into this
1592                 * userfaultfd and with the right tracking mode too.
1593                 */
1594                if (!vma->vm_userfaultfd_ctx.ctx)
1595                        goto skip;
1596
1597                WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1598
1599                if (vma->vm_start > start)
1600                        start = vma->vm_start;
1601                vma_end = min(end, vma->vm_end);
1602
1603                if (userfaultfd_missing(vma)) {
1604                        /*
1605                         * Wake any concurrent pending userfault while
1606                         * we unregister, so they will not hang
1607                         * permanently and it avoids userland to call
1608                         * UFFDIO_WAKE explicitly.
1609                         */
1610                        struct userfaultfd_wake_range range;
1611                        range.start = start;
1612                        range.len = vma_end - start;
1613                        wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
1614                }
1615
1616                new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
1617                prev = vma_merge(mm, prev, start, vma_end, new_flags,
1618                                 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
1619                                 vma_policy(vma),
1620                                 NULL_VM_UFFD_CTX);
1621                if (prev) {
1622                        vma = prev;
1623                        goto next;
1624                }
1625                if (vma->vm_start < start) {
1626                        ret = split_vma(mm, vma, start, 1);
1627                        if (ret)
1628                                break;
1629                }
1630                if (vma->vm_end > end) {
1631                        ret = split_vma(mm, vma, end, 0);
1632                        if (ret)
1633                                break;
1634                }
1635        next:
1636                /*
1637                 * In the vma_merge() successful mprotect-like case 8:
1638                 * the next vma was merged into the current one and
1639                 * the current one has not been updated yet.
1640                 */
1641                vma->vm_flags = new_flags;
1642                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
1643
1644        skip:
1645                prev = vma;
1646                start = vma->vm_end;
1647                vma = vma->vm_next;
1648        } while (vma && vma->vm_start < end);
1649out_unlock:
1650        mmap_write_unlock(mm);
1651        mmput(mm);
1652out:
1653        return ret;
1654}
1655
1656/*
1657 * userfaultfd_wake may be used in combination with the
1658 * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
1659 */
1660static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
1661                            unsigned long arg)
1662{
1663        int ret;
1664        struct uffdio_range uffdio_wake;
1665        struct userfaultfd_wake_range range;
1666        const void __user *buf = (void __user *)arg;
1667
1668        ret = -EFAULT;
1669        if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
1670                goto out;
1671
1672        ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
1673        if (ret)
1674                goto out;
1675
1676        range.start = uffdio_wake.start;
1677        range.len = uffdio_wake.len;
1678
1679        /*
1680         * len == 0 means wake all and we don't want to wake all here,
1681         * so check it again to be sure.
1682         */
1683        VM_BUG_ON(!range.len);
1684
1685        wake_userfault(ctx, &range);
1686        ret = 0;
1687
1688out:
1689        return ret;
1690}
1691
1692static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1693                            unsigned long arg)
1694{
1695        __s64 ret;
1696        struct uffdio_copy uffdio_copy;
1697        struct uffdio_copy __user *user_uffdio_copy;
1698        struct userfaultfd_wake_range range;
1699
1700        user_uffdio_copy = (struct uffdio_copy __user *) arg;
1701
1702        ret = -EAGAIN;
1703        if (READ_ONCE(ctx->mmap_changing))
1704                goto out;
1705
1706        ret = -EFAULT;
1707        if (copy_from_user(&uffdio_copy, user_uffdio_copy,
1708                           /* don't copy "copy" last field */
1709                           sizeof(uffdio_copy)-sizeof(__s64)))
1710                goto out;
1711
1712        ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
1713        if (ret)
1714                goto out;
1715        /*
1716         * double check for wraparound just in case. copy_from_user()
1717         * will later check uffdio_copy.src + uffdio_copy.len to fit
1718         * in the userland range.
1719         */
1720        ret = -EINVAL;
1721        if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
1722                goto out;
1723        if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP))
1724                goto out;
1725        if (mmget_not_zero(ctx->mm)) {
1726                ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
1727                                   uffdio_copy.len, &ctx->mmap_changing,
1728                                   uffdio_copy.mode);
1729                mmput(ctx->mm);
1730        } else {
1731                return -ESRCH;
1732        }
1733        if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
1734                return -EFAULT;
1735        if (ret < 0)
1736                goto out;
1737        BUG_ON(!ret);
1738        /* len == 0 would wake all */
1739        range.len = ret;
1740        if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
1741                range.start = uffdio_copy.dst;
1742                wake_userfault(ctx, &range);
1743        }
1744        ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
1745out:
1746        return ret;
1747}
1748
1749static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1750                                unsigned long arg)
1751{
1752        __s64 ret;
1753        struct uffdio_zeropage uffdio_zeropage;
1754        struct uffdio_zeropage __user *user_uffdio_zeropage;
1755        struct userfaultfd_wake_range range;
1756
1757        user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
1758
1759        ret = -EAGAIN;
1760        if (READ_ONCE(ctx->mmap_changing))
1761                goto out;
1762
1763        ret = -EFAULT;
1764        if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
1765                           /* don't copy "zeropage" last field */
1766                           sizeof(uffdio_zeropage)-sizeof(__s64)))
1767                goto out;
1768
1769        ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
1770                             uffdio_zeropage.range.len);
1771        if (ret)
1772                goto out;
1773        ret = -EINVAL;
1774        if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
1775                goto out;
1776
1777        if (mmget_not_zero(ctx->mm)) {
1778                ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
1779                                     uffdio_zeropage.range.len,
1780                                     &ctx->mmap_changing);
1781                mmput(ctx->mm);
1782        } else {
1783                return -ESRCH;
1784        }
1785        if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
1786                return -EFAULT;
1787        if (ret < 0)
1788                goto out;
1789        /* len == 0 would wake all */
1790        BUG_ON(!ret);
1791        range.len = ret;
1792        if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
1793                range.start = uffdio_zeropage.range.start;
1794                wake_userfault(ctx, &range);
1795        }
1796        ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
1797out:
1798        return ret;
1799}
1800
1801static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
1802                                    unsigned long arg)
1803{
1804        int ret;
1805        struct uffdio_writeprotect uffdio_wp;
1806        struct uffdio_writeprotect __user *user_uffdio_wp;
1807        struct userfaultfd_wake_range range;
1808        bool mode_wp, mode_dontwake;
1809
1810        if (READ_ONCE(ctx->mmap_changing))
1811                return -EAGAIN;
1812
1813        user_uffdio_wp = (struct uffdio_writeprotect __user *) arg;
1814
1815        if (copy_from_user(&uffdio_wp, user_uffdio_wp,
1816                           sizeof(struct uffdio_writeprotect)))
1817                return -EFAULT;
1818
1819        ret = validate_range(ctx->mm, uffdio_wp.range.start,
1820                             uffdio_wp.range.len);
1821        if (ret)
1822                return ret;
1823
1824        if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE |
1825                               UFFDIO_WRITEPROTECT_MODE_WP))
1826                return -EINVAL;
1827
1828        mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP;
1829        mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE;
1830
1831        if (mode_wp && mode_dontwake)
1832                return -EINVAL;
1833
1834        ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
1835                                  uffdio_wp.range.len, mode_wp,
1836                                  &ctx->mmap_changing);
1837        if (ret)
1838                return ret;
1839
1840        if (!mode_wp && !mode_dontwake) {
1841                range.start = uffdio_wp.range.start;
1842                range.len = uffdio_wp.range.len;
1843                wake_userfault(ctx, &range);
1844        }
1845        return ret;
1846}
1847
1848static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
1849{
1850        __s64 ret;
1851        struct uffdio_continue uffdio_continue;
1852        struct uffdio_continue __user *user_uffdio_continue;
1853        struct userfaultfd_wake_range range;
1854
1855        user_uffdio_continue = (struct uffdio_continue __user *)arg;
1856
1857        ret = -EAGAIN;
1858        if (READ_ONCE(ctx->mmap_changing))
1859                goto out;
1860
1861        ret = -EFAULT;
1862        if (copy_from_user(&uffdio_continue, user_uffdio_continue,
1863                           /* don't copy the output fields */
1864                           sizeof(uffdio_continue) - (sizeof(__s64))))
1865                goto out;
1866
1867        ret = validate_range(ctx->mm, uffdio_continue.range.start,
1868                             uffdio_continue.range.len);
1869        if (ret)
1870                goto out;
1871
1872        ret = -EINVAL;
1873        /* double check for wraparound just in case. */
1874        if (uffdio_continue.range.start + uffdio_continue.range.len <=
1875            uffdio_continue.range.start) {
1876                goto out;
1877        }
1878        if (uffdio_continue.mode & ~UFFDIO_CONTINUE_MODE_DONTWAKE)
1879                goto out;
1880
1881        if (mmget_not_zero(ctx->mm)) {
1882                ret = mcopy_continue(ctx->mm, uffdio_continue.range.start,
1883                                     uffdio_continue.range.len,
1884                                     &ctx->mmap_changing);
1885                mmput(ctx->mm);
1886        } else {
1887                return -ESRCH;
1888        }
1889
1890        if (unlikely(put_user(ret, &user_uffdio_continue->mapped)))
1891                return -EFAULT;
1892        if (ret < 0)
1893                goto out;
1894
1895        /* len == 0 would wake all */
1896        BUG_ON(!ret);
1897        range.len = ret;
1898        if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) {
1899                range.start = uffdio_continue.range.start;
1900                wake_userfault(ctx, &range);
1901        }
1902        ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN;
1903
1904out:
1905        return ret;
1906}
1907
1908static inline unsigned int uffd_ctx_features(__u64 user_features)
1909{
1910        /*
1911         * For the current set of features the bits just coincide
1912         */
1913        return (unsigned int)user_features;
1914}
1915
1916/*
1917 * userland asks for a certain API version and we return which bits
1918 * and ioctl commands are implemented in this kernel for such API
1919 * version or -EINVAL if unknown.
1920 */
1921static int userfaultfd_api(struct userfaultfd_ctx *ctx,
1922                           unsigned long arg)
1923{
1924        struct uffdio_api uffdio_api;
1925        void __user *buf = (void __user *)arg;
1926        int ret;
1927        __u64 features;
1928
1929        ret = -EINVAL;
1930        if (ctx->state != UFFD_STATE_WAIT_API)
1931                goto out;
1932        ret = -EFAULT;
1933        if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
1934                goto out;
1935        features = uffdio_api.features;
1936        ret = -EINVAL;
1937        if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
1938                goto err_out;
1939        ret = -EPERM;
1940        if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
1941                goto err_out;
1942        /* report all available features and ioctls to userland */
1943        uffdio_api.features = UFFD_API_FEATURES;
1944#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
1945        uffdio_api.features &=
1946                ~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM);
1947#endif
1948#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
1949        uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP;
1950#endif
1951        uffdio_api.ioctls = UFFD_API_IOCTLS;
1952        ret = -EFAULT;
1953        if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
1954                goto out;
1955        ctx->state = UFFD_STATE_RUNNING;
1956        /* only enable the requested features for this uffd context */
1957        ctx->features = uffd_ctx_features(features);
1958        ret = 0;
1959out:
1960        return ret;
1961err_out:
1962        memset(&uffdio_api, 0, sizeof(uffdio_api));
1963        if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
1964                ret = -EFAULT;
1965        goto out;
1966}
1967
1968static long userfaultfd_ioctl(struct file *file, unsigned cmd,
1969                              unsigned long arg)
1970{
1971        int ret = -EINVAL;
1972        struct userfaultfd_ctx *ctx = file->private_data;
1973
1974        if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
1975                return -EINVAL;
1976
1977        switch(cmd) {
1978        case UFFDIO_API:
1979                ret = userfaultfd_api(ctx, arg);
1980                break;
1981        case UFFDIO_REGISTER:
1982                ret = userfaultfd_register(ctx, arg);
1983                break;
1984        case UFFDIO_UNREGISTER:
1985                ret = userfaultfd_unregister(ctx, arg);
1986                break;
1987        case UFFDIO_WAKE:
1988                ret = userfaultfd_wake(ctx, arg);
1989                break;
1990        case UFFDIO_COPY:
1991                ret = userfaultfd_copy(ctx, arg);
1992                break;
1993        case UFFDIO_ZEROPAGE:
1994                ret = userfaultfd_zeropage(ctx, arg);
1995                break;
1996        case UFFDIO_WRITEPROTECT:
1997                ret = userfaultfd_writeprotect(ctx, arg);
1998                break;
1999        case UFFDIO_CONTINUE:
2000                ret = userfaultfd_continue(ctx, arg);
2001                break;
2002        }
2003        return ret;
2004}
2005
2006#ifdef CONFIG_PROC_FS
2007static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
2008{
2009        struct userfaultfd_ctx *ctx = f->private_data;
2010        wait_queue_entry_t *wq;
2011        unsigned long pending = 0, total = 0;
2012
2013        spin_lock_irq(&ctx->fault_pending_wqh.lock);
2014        list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
2015                pending++;
2016                total++;
2017        }
2018        list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
2019                total++;
2020        }
2021        spin_unlock_irq(&ctx->fault_pending_wqh.lock);
2022
2023        /*
2024         * If more protocols will be added, there will be all shown
2025         * separated by a space. Like this:
2026         *      protocols: aa:... bb:...
2027         */
2028        seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
2029                   pending, total, UFFD_API, ctx->features,
2030                   UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
2031}
2032#endif
2033
2034static const struct file_operations userfaultfd_fops = {
2035#ifdef CONFIG_PROC_FS
2036        .show_fdinfo    = userfaultfd_show_fdinfo,
2037#endif
2038        .release        = userfaultfd_release,
2039        .poll           = userfaultfd_poll,
2040        .read           = userfaultfd_read,
2041        .unlocked_ioctl = userfaultfd_ioctl,
2042        .compat_ioctl   = compat_ptr_ioctl,
2043        .llseek         = noop_llseek,
2044};
2045
2046static void init_once_userfaultfd_ctx(void *mem)
2047{
2048        struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
2049
2050        init_waitqueue_head(&ctx->fault_pending_wqh);
2051        init_waitqueue_head(&ctx->fault_wqh);
2052        init_waitqueue_head(&ctx->event_wqh);
2053        init_waitqueue_head(&ctx->fd_wqh);
2054        seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
2055}
2056
2057SYSCALL_DEFINE1(userfaultfd, int, flags)
2058{
2059        struct userfaultfd_ctx *ctx;
2060        int fd;
2061
2062        if (!sysctl_unprivileged_userfaultfd &&
2063            (flags & UFFD_USER_MODE_ONLY) == 0 &&
2064            !capable(CAP_SYS_PTRACE)) {
2065                printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd "
2066                        "sysctl knob to 1 if kernel faults must be handled "
2067                        "without obtaining CAP_SYS_PTRACE capability\n");
2068                return -EPERM;
2069        }
2070
2071        BUG_ON(!current->mm);
2072
2073        /* Check the UFFD_* constants for consistency.  */
2074        BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS);
2075        BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
2076        BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
2077
2078        if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY))
2079                return -EINVAL;
2080
2081        ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
2082        if (!ctx)
2083                return -ENOMEM;
2084
2085        refcount_set(&ctx->refcount, 1);
2086        ctx->flags = flags;
2087        ctx->features = 0;
2088        ctx->state = UFFD_STATE_WAIT_API;
2089        ctx->released = false;
2090        ctx->mmap_changing = false;
2091        ctx->mm = current->mm;
2092        /* prevent the mm struct to be freed */
2093        mmgrab(ctx->mm);
2094
2095        fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx,
2096                        O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL);
2097        if (fd < 0) {
2098                mmdrop(ctx->mm);
2099                kmem_cache_free(userfaultfd_ctx_cachep, ctx);
2100        }
2101        return fd;
2102}
2103
2104static int __init userfaultfd_init(void)
2105{
2106        userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
2107                                                sizeof(struct userfaultfd_ctx),
2108                                                0,
2109                                                SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2110                                                init_once_userfaultfd_ctx);
2111        return 0;
2112}
2113__initcall(userfaultfd_init);
2114