1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/list.h>
14#include <linux/hashtable.h>
15#include <linux/sched/signal.h>
16#include <linux/sched/mm.h>
17#include <linux/mm.h>
18#include <linux/mmu_notifier.h>
19#include <linux/poll.h>
20#include <linux/slab.h>
21#include <linux/seq_file.h>
22#include <linux/file.h>
23#include <linux/bug.h>
24#include <linux/anon_inodes.h>
25#include <linux/syscalls.h>
26#include <linux/userfaultfd_k.h>
27#include <linux/mempolicy.h>
28#include <linux/ioctl.h>
29#include <linux/security.h>
30#include <linux/hugetlb.h>
31
32int sysctl_unprivileged_userfaultfd __read_mostly;
33
34static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
35
36enum userfaultfd_state {
37 UFFD_STATE_WAIT_API,
38 UFFD_STATE_RUNNING,
39};
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55struct userfaultfd_ctx {
56
57 wait_queue_head_t fault_pending_wqh;
58
59 wait_queue_head_t fault_wqh;
60
61 wait_queue_head_t fd_wqh;
62
63 wait_queue_head_t event_wqh;
64
65 seqcount_spinlock_t refile_seq;
66
67 refcount_t refcount;
68
69 unsigned int flags;
70
71 unsigned int features;
72
73 enum userfaultfd_state state;
74
75 bool released;
76
77 bool mmap_changing;
78
79 struct mm_struct *mm;
80};
81
82struct userfaultfd_fork_ctx {
83 struct userfaultfd_ctx *orig;
84 struct userfaultfd_ctx *new;
85 struct list_head list;
86};
87
88struct userfaultfd_unmap_ctx {
89 struct userfaultfd_ctx *ctx;
90 unsigned long start;
91 unsigned long end;
92 struct list_head list;
93};
94
95struct userfaultfd_wait_queue {
96 struct uffd_msg msg;
97 wait_queue_entry_t wq;
98 struct userfaultfd_ctx *ctx;
99 bool waken;
100};
101
102struct userfaultfd_wake_range {
103 unsigned long start;
104 unsigned long len;
105};
106
107static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
108 int wake_flags, void *key)
109{
110 struct userfaultfd_wake_range *range = key;
111 int ret;
112 struct userfaultfd_wait_queue *uwq;
113 unsigned long start, len;
114
115 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
116 ret = 0;
117
118 start = range->start;
119 len = range->len;
120 if (len && (start > uwq->msg.arg.pagefault.address ||
121 start + len <= uwq->msg.arg.pagefault.address))
122 goto out;
123 WRITE_ONCE(uwq->waken, true);
124
125
126
127
128 ret = wake_up_state(wq->private, mode);
129 if (ret) {
130
131
132
133
134
135
136
137
138
139
140
141 list_del_init(&wq->entry);
142 }
143out:
144 return ret;
145}
146
147
148
149
150
151
152static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
153{
154 refcount_inc(&ctx->refcount);
155}
156
157
158
159
160
161
162
163
164
165static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
166{
167 if (refcount_dec_and_test(&ctx->refcount)) {
168 VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
169 VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
170 VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
171 VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
172 VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
173 VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
174 VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
175 VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
176 mmdrop(ctx->mm);
177 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
178 }
179}
180
181static inline void msg_init(struct uffd_msg *msg)
182{
183 BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
184
185
186
187
188 memset(msg, 0, sizeof(struct uffd_msg));
189}
190
191static inline struct uffd_msg userfault_msg(unsigned long address,
192 unsigned int flags,
193 unsigned long reason,
194 unsigned int features)
195{
196 struct uffd_msg msg;
197 msg_init(&msg);
198 msg.event = UFFD_EVENT_PAGEFAULT;
199 msg.arg.pagefault.address = address;
200
201
202
203
204
205
206
207
208
209 if (flags & FAULT_FLAG_WRITE)
210 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
211 if (reason & VM_UFFD_WP)
212 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
213 if (reason & VM_UFFD_MINOR)
214 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR;
215 if (features & UFFD_FEATURE_THREAD_ID)
216 msg.arg.pagefault.feat.ptid = task_pid_vnr(current);
217 return msg;
218}
219
220#ifdef CONFIG_HUGETLB_PAGE
221
222
223
224
225static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
226 struct vm_area_struct *vma,
227 unsigned long address,
228 unsigned long flags,
229 unsigned long reason)
230{
231 struct mm_struct *mm = ctx->mm;
232 pte_t *ptep, pte;
233 bool ret = true;
234
235 mmap_assert_locked(mm);
236
237 ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
238
239 if (!ptep)
240 goto out;
241
242 ret = false;
243 pte = huge_ptep_get(ptep);
244
245
246
247
248
249 if (huge_pte_none(pte))
250 ret = true;
251 if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
252 ret = true;
253out:
254 return ret;
255}
256#else
257static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
258 struct vm_area_struct *vma,
259 unsigned long address,
260 unsigned long flags,
261 unsigned long reason)
262{
263 return false;
264}
265#endif
266
267
268
269
270
271
272
273
274static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
275 unsigned long address,
276 unsigned long flags,
277 unsigned long reason)
278{
279 struct mm_struct *mm = ctx->mm;
280 pgd_t *pgd;
281 p4d_t *p4d;
282 pud_t *pud;
283 pmd_t *pmd, _pmd;
284 pte_t *pte;
285 bool ret = true;
286
287 mmap_assert_locked(mm);
288
289 pgd = pgd_offset(mm, address);
290 if (!pgd_present(*pgd))
291 goto out;
292 p4d = p4d_offset(pgd, address);
293 if (!p4d_present(*p4d))
294 goto out;
295 pud = pud_offset(p4d, address);
296 if (!pud_present(*pud))
297 goto out;
298 pmd = pmd_offset(pud, address);
299
300
301
302
303
304
305
306
307 _pmd = READ_ONCE(*pmd);
308 if (pmd_none(_pmd))
309 goto out;
310
311 ret = false;
312 if (!pmd_present(_pmd))
313 goto out;
314
315 if (pmd_trans_huge(_pmd)) {
316 if (!pmd_write(_pmd) && (reason & VM_UFFD_WP))
317 ret = true;
318 goto out;
319 }
320
321
322
323
324
325 pte = pte_offset_map(pmd, address);
326
327
328
329
330 if (pte_none(*pte))
331 ret = true;
332 if (!pte_write(*pte) && (reason & VM_UFFD_WP))
333 ret = true;
334 pte_unmap(pte);
335
336out:
337 return ret;
338}
339
340static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags)
341{
342 if (flags & FAULT_FLAG_INTERRUPTIBLE)
343 return TASK_INTERRUPTIBLE;
344
345 if (flags & FAULT_FLAG_KILLABLE)
346 return TASK_KILLABLE;
347
348 return TASK_UNINTERRUPTIBLE;
349}
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
367{
368 struct mm_struct *mm = vmf->vma->vm_mm;
369 struct userfaultfd_ctx *ctx;
370 struct userfaultfd_wait_queue uwq;
371 vm_fault_t ret = VM_FAULT_SIGBUS;
372 bool must_wait;
373 unsigned int blocking_state;
374
375
376
377
378
379
380
381
382
383
384
385
386 if (current->flags & (PF_EXITING|PF_DUMPCORE))
387 goto out;
388
389
390
391
392
393 mmap_assert_locked(mm);
394
395 ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
396 if (!ctx)
397 goto out;
398
399 BUG_ON(ctx->mm != mm);
400
401
402 VM_BUG_ON(reason & ~__VM_UFFD_FLAGS);
403
404 VM_BUG_ON(!reason || (reason & (reason - 1)));
405
406 if (ctx->features & UFFD_FEATURE_SIGBUS)
407 goto out;
408 if ((vmf->flags & FAULT_FLAG_USER) == 0 &&
409 ctx->flags & UFFD_USER_MODE_ONLY) {
410 printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd "
411 "sysctl knob to 1 if kernel faults must be handled "
412 "without obtaining CAP_SYS_PTRACE capability\n");
413 goto out;
414 }
415
416
417
418
419
420
421 if (unlikely(READ_ONCE(ctx->released))) {
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438 ret = VM_FAULT_NOPAGE;
439 goto out;
440 }
441
442
443
444
445
446
447
448
449
450
451
452 if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
453
454
455
456
457
458 BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
459#ifdef CONFIG_DEBUG_VM
460 if (printk_ratelimit()) {
461 printk(KERN_WARNING
462 "FAULT_FLAG_ALLOW_RETRY missing %x\n",
463 vmf->flags);
464 dump_stack();
465 }
466#endif
467 goto out;
468 }
469
470
471
472
473
474 ret = VM_FAULT_RETRY;
475 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
476 goto out;
477
478
479 userfaultfd_ctx_get(ctx);
480
481 init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
482 uwq.wq.private = current;
483 uwq.msg = userfault_msg(vmf->address, vmf->flags, reason,
484 ctx->features);
485 uwq.ctx = ctx;
486 uwq.waken = false;
487
488 blocking_state = userfaultfd_get_blocking_state(vmf->flags);
489
490 spin_lock_irq(&ctx->fault_pending_wqh.lock);
491
492
493
494
495 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
496
497
498
499
500
501 set_current_state(blocking_state);
502 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
503
504 if (!is_vm_hugetlb_page(vmf->vma))
505 must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
506 reason);
507 else
508 must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
509 vmf->address,
510 vmf->flags, reason);
511 mmap_read_unlock(mm);
512
513 if (likely(must_wait && !READ_ONCE(ctx->released))) {
514 wake_up_poll(&ctx->fd_wqh, EPOLLIN);
515 schedule();
516 }
517
518 __set_current_state(TASK_RUNNING);
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533 if (!list_empty_careful(&uwq.wq.entry)) {
534 spin_lock_irq(&ctx->fault_pending_wqh.lock);
535
536
537
538
539 list_del(&uwq.wq.entry);
540 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
541 }
542
543
544
545
546
547 userfaultfd_ctx_put(ctx);
548
549out:
550 return ret;
551}
552
553static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
554 struct userfaultfd_wait_queue *ewq)
555{
556 struct userfaultfd_ctx *release_new_ctx;
557
558 if (WARN_ON_ONCE(current->flags & PF_EXITING))
559 goto out;
560
561 ewq->ctx = ctx;
562 init_waitqueue_entry(&ewq->wq, current);
563 release_new_ctx = NULL;
564
565 spin_lock_irq(&ctx->event_wqh.lock);
566
567
568
569
570 __add_wait_queue(&ctx->event_wqh, &ewq->wq);
571 for (;;) {
572 set_current_state(TASK_KILLABLE);
573 if (ewq->msg.event == 0)
574 break;
575 if (READ_ONCE(ctx->released) ||
576 fatal_signal_pending(current)) {
577
578
579
580
581
582
583 __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
584 if (ewq->msg.event == UFFD_EVENT_FORK) {
585 struct userfaultfd_ctx *new;
586
587 new = (struct userfaultfd_ctx *)
588 (unsigned long)
589 ewq->msg.arg.reserved.reserved1;
590 release_new_ctx = new;
591 }
592 break;
593 }
594
595 spin_unlock_irq(&ctx->event_wqh.lock);
596
597 wake_up_poll(&ctx->fd_wqh, EPOLLIN);
598 schedule();
599
600 spin_lock_irq(&ctx->event_wqh.lock);
601 }
602 __set_current_state(TASK_RUNNING);
603 spin_unlock_irq(&ctx->event_wqh.lock);
604
605 if (release_new_ctx) {
606 struct vm_area_struct *vma;
607 struct mm_struct *mm = release_new_ctx->mm;
608
609
610 mmap_write_lock(mm);
611 for (vma = mm->mmap; vma; vma = vma->vm_next)
612 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
613 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
614 vma->vm_flags &= ~__VM_UFFD_FLAGS;
615 }
616 mmap_write_unlock(mm);
617
618 userfaultfd_ctx_put(release_new_ctx);
619 }
620
621
622
623
624
625out:
626 WRITE_ONCE(ctx->mmap_changing, false);
627 userfaultfd_ctx_put(ctx);
628}
629
630static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
631 struct userfaultfd_wait_queue *ewq)
632{
633 ewq->msg.event = 0;
634 wake_up_locked(&ctx->event_wqh);
635 __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
636}
637
638int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
639{
640 struct userfaultfd_ctx *ctx = NULL, *octx;
641 struct userfaultfd_fork_ctx *fctx;
642
643 octx = vma->vm_userfaultfd_ctx.ctx;
644 if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
645 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
646 vma->vm_flags &= ~__VM_UFFD_FLAGS;
647 return 0;
648 }
649
650 list_for_each_entry(fctx, fcs, list)
651 if (fctx->orig == octx) {
652 ctx = fctx->new;
653 break;
654 }
655
656 if (!ctx) {
657 fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
658 if (!fctx)
659 return -ENOMEM;
660
661 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
662 if (!ctx) {
663 kfree(fctx);
664 return -ENOMEM;
665 }
666
667 refcount_set(&ctx->refcount, 1);
668 ctx->flags = octx->flags;
669 ctx->state = UFFD_STATE_RUNNING;
670 ctx->features = octx->features;
671 ctx->released = false;
672 ctx->mmap_changing = false;
673 ctx->mm = vma->vm_mm;
674 mmgrab(ctx->mm);
675
676 userfaultfd_ctx_get(octx);
677 WRITE_ONCE(octx->mmap_changing, true);
678 fctx->orig = octx;
679 fctx->new = ctx;
680 list_add_tail(&fctx->list, fcs);
681 }
682
683 vma->vm_userfaultfd_ctx.ctx = ctx;
684 return 0;
685}
686
687static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
688{
689 struct userfaultfd_ctx *ctx = fctx->orig;
690 struct userfaultfd_wait_queue ewq;
691
692 msg_init(&ewq.msg);
693
694 ewq.msg.event = UFFD_EVENT_FORK;
695 ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
696
697 userfaultfd_event_wait_completion(ctx, &ewq);
698}
699
700void dup_userfaultfd_complete(struct list_head *fcs)
701{
702 struct userfaultfd_fork_ctx *fctx, *n;
703
704 list_for_each_entry_safe(fctx, n, fcs, list) {
705 dup_fctx(fctx);
706 list_del(&fctx->list);
707 kfree(fctx);
708 }
709}
710
711void mremap_userfaultfd_prep(struct vm_area_struct *vma,
712 struct vm_userfaultfd_ctx *vm_ctx)
713{
714 struct userfaultfd_ctx *ctx;
715
716 ctx = vma->vm_userfaultfd_ctx.ctx;
717
718 if (!ctx)
719 return;
720
721 if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
722 vm_ctx->ctx = ctx;
723 userfaultfd_ctx_get(ctx);
724 WRITE_ONCE(ctx->mmap_changing, true);
725 } else {
726
727 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
728 vma->vm_flags &= ~__VM_UFFD_FLAGS;
729 }
730}
731
732void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
733 unsigned long from, unsigned long to,
734 unsigned long len)
735{
736 struct userfaultfd_ctx *ctx = vm_ctx->ctx;
737 struct userfaultfd_wait_queue ewq;
738
739 if (!ctx)
740 return;
741
742 if (to & ~PAGE_MASK) {
743 userfaultfd_ctx_put(ctx);
744 return;
745 }
746
747 msg_init(&ewq.msg);
748
749 ewq.msg.event = UFFD_EVENT_REMAP;
750 ewq.msg.arg.remap.from = from;
751 ewq.msg.arg.remap.to = to;
752 ewq.msg.arg.remap.len = len;
753
754 userfaultfd_event_wait_completion(ctx, &ewq);
755}
756
757bool userfaultfd_remove(struct vm_area_struct *vma,
758 unsigned long start, unsigned long end)
759{
760 struct mm_struct *mm = vma->vm_mm;
761 struct userfaultfd_ctx *ctx;
762 struct userfaultfd_wait_queue ewq;
763
764 ctx = vma->vm_userfaultfd_ctx.ctx;
765 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
766 return true;
767
768 userfaultfd_ctx_get(ctx);
769 WRITE_ONCE(ctx->mmap_changing, true);
770 mmap_read_unlock(mm);
771
772 msg_init(&ewq.msg);
773
774 ewq.msg.event = UFFD_EVENT_REMOVE;
775 ewq.msg.arg.remove.start = start;
776 ewq.msg.arg.remove.end = end;
777
778 userfaultfd_event_wait_completion(ctx, &ewq);
779
780 return false;
781}
782
783static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
784 unsigned long start, unsigned long end)
785{
786 struct userfaultfd_unmap_ctx *unmap_ctx;
787
788 list_for_each_entry(unmap_ctx, unmaps, list)
789 if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
790 unmap_ctx->end == end)
791 return true;
792
793 return false;
794}
795
796int userfaultfd_unmap_prep(struct vm_area_struct *vma,
797 unsigned long start, unsigned long end,
798 struct list_head *unmaps)
799{
800 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
801 struct userfaultfd_unmap_ctx *unmap_ctx;
802 struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
803
804 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
805 has_unmap_ctx(ctx, unmaps, start, end))
806 continue;
807
808 unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
809 if (!unmap_ctx)
810 return -ENOMEM;
811
812 userfaultfd_ctx_get(ctx);
813 WRITE_ONCE(ctx->mmap_changing, true);
814 unmap_ctx->ctx = ctx;
815 unmap_ctx->start = start;
816 unmap_ctx->end = end;
817 list_add_tail(&unmap_ctx->list, unmaps);
818 }
819
820 return 0;
821}
822
823void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
824{
825 struct userfaultfd_unmap_ctx *ctx, *n;
826 struct userfaultfd_wait_queue ewq;
827
828 list_for_each_entry_safe(ctx, n, uf, list) {
829 msg_init(&ewq.msg);
830
831 ewq.msg.event = UFFD_EVENT_UNMAP;
832 ewq.msg.arg.remove.start = ctx->start;
833 ewq.msg.arg.remove.end = ctx->end;
834
835 userfaultfd_event_wait_completion(ctx->ctx, &ewq);
836
837 list_del(&ctx->list);
838 kfree(ctx);
839 }
840}
841
842static int userfaultfd_release(struct inode *inode, struct file *file)
843{
844 struct userfaultfd_ctx *ctx = file->private_data;
845 struct mm_struct *mm = ctx->mm;
846 struct vm_area_struct *vma, *prev;
847
848 struct userfaultfd_wake_range range = { .len = 0, };
849 unsigned long new_flags;
850
851 WRITE_ONCE(ctx->released, true);
852
853 if (!mmget_not_zero(mm))
854 goto wakeup;
855
856
857
858
859
860
861
862
863
864 mmap_write_lock(mm);
865 prev = NULL;
866 for (vma = mm->mmap; vma; vma = vma->vm_next) {
867 cond_resched();
868 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
869 !!(vma->vm_flags & __VM_UFFD_FLAGS));
870 if (vma->vm_userfaultfd_ctx.ctx != ctx) {
871 prev = vma;
872 continue;
873 }
874 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
875 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
876 new_flags, vma->anon_vma,
877 vma->vm_file, vma->vm_pgoff,
878 vma_policy(vma),
879 NULL_VM_UFFD_CTX);
880 if (prev)
881 vma = prev;
882 else
883 prev = vma;
884 vma->vm_flags = new_flags;
885 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
886 }
887 mmap_write_unlock(mm);
888 mmput(mm);
889wakeup:
890
891
892
893
894
895 spin_lock_irq(&ctx->fault_pending_wqh.lock);
896 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
897 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
898 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
899
900
901 wake_up_all(&ctx->event_wqh);
902
903 wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
904 userfaultfd_ctx_put(ctx);
905 return 0;
906}
907
908
909static inline struct userfaultfd_wait_queue *find_userfault_in(
910 wait_queue_head_t *wqh)
911{
912 wait_queue_entry_t *wq;
913 struct userfaultfd_wait_queue *uwq;
914
915 lockdep_assert_held(&wqh->lock);
916
917 uwq = NULL;
918 if (!waitqueue_active(wqh))
919 goto out;
920
921 wq = list_last_entry(&wqh->head, typeof(*wq), entry);
922 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
923out:
924 return uwq;
925}
926
927static inline struct userfaultfd_wait_queue *find_userfault(
928 struct userfaultfd_ctx *ctx)
929{
930 return find_userfault_in(&ctx->fault_pending_wqh);
931}
932
933static inline struct userfaultfd_wait_queue *find_userfault_evt(
934 struct userfaultfd_ctx *ctx)
935{
936 return find_userfault_in(&ctx->event_wqh);
937}
938
939static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
940{
941 struct userfaultfd_ctx *ctx = file->private_data;
942 __poll_t ret;
943
944 poll_wait(file, &ctx->fd_wqh, wait);
945
946 switch (ctx->state) {
947 case UFFD_STATE_WAIT_API:
948 return EPOLLERR;
949 case UFFD_STATE_RUNNING:
950
951
952
953
954 if (unlikely(!(file->f_flags & O_NONBLOCK)))
955 return EPOLLERR;
956
957
958
959
960
961
962
963
964
965
966 ret = 0;
967 smp_mb();
968 if (waitqueue_active(&ctx->fault_pending_wqh))
969 ret = EPOLLIN;
970 else if (waitqueue_active(&ctx->event_wqh))
971 ret = EPOLLIN;
972
973 return ret;
974 default:
975 WARN_ON_ONCE(1);
976 return EPOLLERR;
977 }
978}
979
980static const struct file_operations userfaultfd_fops;
981
982static int resolve_userfault_fork(struct userfaultfd_ctx *new,
983 struct inode *inode,
984 struct uffd_msg *msg)
985{
986 int fd;
987
988 fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new,
989 O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode);
990 if (fd < 0)
991 return fd;
992
993 msg->arg.reserved.reserved1 = 0;
994 msg->arg.fork.ufd = fd;
995 return 0;
996}
997
998static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
999 struct uffd_msg *msg, struct inode *inode)
1000{
1001 ssize_t ret;
1002 DECLARE_WAITQUEUE(wait, current);
1003 struct userfaultfd_wait_queue *uwq;
1004
1005
1006
1007
1008
1009
1010
1011 LIST_HEAD(fork_event);
1012 struct userfaultfd_ctx *fork_nctx = NULL;
1013
1014
1015 spin_lock_irq(&ctx->fd_wqh.lock);
1016 __add_wait_queue(&ctx->fd_wqh, &wait);
1017 for (;;) {
1018 set_current_state(TASK_INTERRUPTIBLE);
1019 spin_lock(&ctx->fault_pending_wqh.lock);
1020 uwq = find_userfault(ctx);
1021 if (uwq) {
1022
1023
1024
1025
1026
1027
1028
1029 write_seqcount_begin(&ctx->refile_seq);
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052 list_del(&uwq->wq.entry);
1053 add_wait_queue(&ctx->fault_wqh, &uwq->wq);
1054
1055 write_seqcount_end(&ctx->refile_seq);
1056
1057
1058 *msg = uwq->msg;
1059 spin_unlock(&ctx->fault_pending_wqh.lock);
1060 ret = 0;
1061 break;
1062 }
1063 spin_unlock(&ctx->fault_pending_wqh.lock);
1064
1065 spin_lock(&ctx->event_wqh.lock);
1066 uwq = find_userfault_evt(ctx);
1067 if (uwq) {
1068 *msg = uwq->msg;
1069
1070 if (uwq->msg.event == UFFD_EVENT_FORK) {
1071 fork_nctx = (struct userfaultfd_ctx *)
1072 (unsigned long)
1073 uwq->msg.arg.reserved.reserved1;
1074 list_move(&uwq->wq.entry, &fork_event);
1075
1076
1077
1078
1079
1080 userfaultfd_ctx_get(fork_nctx);
1081 spin_unlock(&ctx->event_wqh.lock);
1082 ret = 0;
1083 break;
1084 }
1085
1086 userfaultfd_event_complete(ctx, uwq);
1087 spin_unlock(&ctx->event_wqh.lock);
1088 ret = 0;
1089 break;
1090 }
1091 spin_unlock(&ctx->event_wqh.lock);
1092
1093 if (signal_pending(current)) {
1094 ret = -ERESTARTSYS;
1095 break;
1096 }
1097 if (no_wait) {
1098 ret = -EAGAIN;
1099 break;
1100 }
1101 spin_unlock_irq(&ctx->fd_wqh.lock);
1102 schedule();
1103 spin_lock_irq(&ctx->fd_wqh.lock);
1104 }
1105 __remove_wait_queue(&ctx->fd_wqh, &wait);
1106 __set_current_state(TASK_RUNNING);
1107 spin_unlock_irq(&ctx->fd_wqh.lock);
1108
1109 if (!ret && msg->event == UFFD_EVENT_FORK) {
1110 ret = resolve_userfault_fork(fork_nctx, inode, msg);
1111 spin_lock_irq(&ctx->event_wqh.lock);
1112 if (!list_empty(&fork_event)) {
1113
1114
1115
1116
1117 userfaultfd_ctx_put(fork_nctx);
1118
1119 uwq = list_first_entry(&fork_event,
1120 typeof(*uwq),
1121 wq.entry);
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132 list_del(&uwq->wq.entry);
1133 __add_wait_queue(&ctx->event_wqh, &uwq->wq);
1134
1135
1136
1137
1138
1139
1140 if (likely(!ret))
1141 userfaultfd_event_complete(ctx, uwq);
1142 } else {
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154 if (ret)
1155 userfaultfd_ctx_put(fork_nctx);
1156 }
1157 spin_unlock_irq(&ctx->event_wqh.lock);
1158 }
1159
1160 return ret;
1161}
1162
1163static ssize_t userfaultfd_read(struct file *file, char __user *buf,
1164 size_t count, loff_t *ppos)
1165{
1166 struct userfaultfd_ctx *ctx = file->private_data;
1167 ssize_t _ret, ret = 0;
1168 struct uffd_msg msg;
1169 int no_wait = file->f_flags & O_NONBLOCK;
1170 struct inode *inode = file_inode(file);
1171
1172 if (ctx->state == UFFD_STATE_WAIT_API)
1173 return -EINVAL;
1174
1175 for (;;) {
1176 if (count < sizeof(msg))
1177 return ret ? ret : -EINVAL;
1178 _ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode);
1179 if (_ret < 0)
1180 return ret ? ret : _ret;
1181 if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
1182 return ret ? ret : -EFAULT;
1183 ret += sizeof(msg);
1184 buf += sizeof(msg);
1185 count -= sizeof(msg);
1186
1187
1188
1189
1190 no_wait = O_NONBLOCK;
1191 }
1192}
1193
1194static void __wake_userfault(struct userfaultfd_ctx *ctx,
1195 struct userfaultfd_wake_range *range)
1196{
1197 spin_lock_irq(&ctx->fault_pending_wqh.lock);
1198
1199 if (waitqueue_active(&ctx->fault_pending_wqh))
1200 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
1201 range);
1202 if (waitqueue_active(&ctx->fault_wqh))
1203 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
1204 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
1205}
1206
1207static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
1208 struct userfaultfd_wake_range *range)
1209{
1210 unsigned seq;
1211 bool need_wakeup;
1212
1213
1214
1215
1216
1217
1218
1219
1220 smp_mb();
1221
1222
1223
1224
1225
1226
1227
1228 do {
1229 seq = read_seqcount_begin(&ctx->refile_seq);
1230 need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
1231 waitqueue_active(&ctx->fault_wqh);
1232 cond_resched();
1233 } while (read_seqcount_retry(&ctx->refile_seq, seq));
1234 if (need_wakeup)
1235 __wake_userfault(ctx, range);
1236}
1237
1238static __always_inline int validate_range(struct mm_struct *mm,
1239 __u64 start, __u64 len)
1240{
1241 __u64 task_size = mm->task_size;
1242
1243 if (start & ~PAGE_MASK)
1244 return -EINVAL;
1245 if (len & ~PAGE_MASK)
1246 return -EINVAL;
1247 if (!len)
1248 return -EINVAL;
1249 if (start < mmap_min_addr)
1250 return -EINVAL;
1251 if (start >= task_size)
1252 return -EINVAL;
1253 if (len > task_size - start)
1254 return -EINVAL;
1255 return 0;
1256}
1257
1258static inline bool vma_can_userfault(struct vm_area_struct *vma,
1259 unsigned long vm_flags)
1260{
1261
1262 if (vm_flags & VM_UFFD_WP) {
1263 if (is_vm_hugetlb_page(vma) || vma_is_shmem(vma))
1264 return false;
1265 }
1266
1267 if (vm_flags & VM_UFFD_MINOR) {
1268 if (!(is_vm_hugetlb_page(vma) || vma_is_shmem(vma)))
1269 return false;
1270 }
1271
1272 return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
1273 vma_is_shmem(vma);
1274}
1275
1276static int userfaultfd_register(struct userfaultfd_ctx *ctx,
1277 unsigned long arg)
1278{
1279 struct mm_struct *mm = ctx->mm;
1280 struct vm_area_struct *vma, *prev, *cur;
1281 int ret;
1282 struct uffdio_register uffdio_register;
1283 struct uffdio_register __user *user_uffdio_register;
1284 unsigned long vm_flags, new_flags;
1285 bool found;
1286 bool basic_ioctls;
1287 unsigned long start, end, vma_end;
1288
1289 user_uffdio_register = (struct uffdio_register __user *) arg;
1290
1291 ret = -EFAULT;
1292 if (copy_from_user(&uffdio_register, user_uffdio_register,
1293 sizeof(uffdio_register)-sizeof(__u64)))
1294 goto out;
1295
1296 ret = -EINVAL;
1297 if (!uffdio_register.mode)
1298 goto out;
1299 if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES)
1300 goto out;
1301 vm_flags = 0;
1302 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
1303 vm_flags |= VM_UFFD_MISSING;
1304 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
1305#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
1306 goto out;
1307#endif
1308 vm_flags |= VM_UFFD_WP;
1309 }
1310 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) {
1311#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
1312 goto out;
1313#endif
1314 vm_flags |= VM_UFFD_MINOR;
1315 }
1316
1317 ret = validate_range(mm, uffdio_register.range.start,
1318 uffdio_register.range.len);
1319 if (ret)
1320 goto out;
1321
1322 start = uffdio_register.range.start;
1323 end = start + uffdio_register.range.len;
1324
1325 ret = -ENOMEM;
1326 if (!mmget_not_zero(mm))
1327 goto out;
1328
1329 mmap_write_lock(mm);
1330 vma = find_vma_prev(mm, start, &prev);
1331 if (!vma)
1332 goto out_unlock;
1333
1334
1335 ret = -EINVAL;
1336 if (vma->vm_start >= end)
1337 goto out_unlock;
1338
1339
1340
1341
1342
1343 if (is_vm_hugetlb_page(vma)) {
1344 unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1345
1346 if (start & (vma_hpagesize - 1))
1347 goto out_unlock;
1348 }
1349
1350
1351
1352
1353 found = false;
1354 basic_ioctls = false;
1355 for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
1356 cond_resched();
1357
1358 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1359 !!(cur->vm_flags & __VM_UFFD_FLAGS));
1360
1361
1362 ret = -EINVAL;
1363 if (!vma_can_userfault(cur, vm_flags))
1364 goto out_unlock;
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374 ret = -EPERM;
1375 if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
1376 goto out_unlock;
1377
1378
1379
1380
1381
1382 if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
1383 end > cur->vm_start) {
1384 unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
1385
1386 ret = -EINVAL;
1387
1388 if (end & (vma_hpagesize - 1))
1389 goto out_unlock;
1390 }
1391 if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE))
1392 goto out_unlock;
1393
1394
1395
1396
1397
1398
1399
1400 ret = -EBUSY;
1401 if (cur->vm_userfaultfd_ctx.ctx &&
1402 cur->vm_userfaultfd_ctx.ctx != ctx)
1403 goto out_unlock;
1404
1405
1406
1407
1408 if (is_vm_hugetlb_page(cur))
1409 basic_ioctls = true;
1410
1411 found = true;
1412 }
1413 BUG_ON(!found);
1414
1415 if (vma->vm_start < start)
1416 prev = vma;
1417
1418 ret = 0;
1419 do {
1420 cond_resched();
1421
1422 BUG_ON(!vma_can_userfault(vma, vm_flags));
1423 BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
1424 vma->vm_userfaultfd_ctx.ctx != ctx);
1425 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1426
1427
1428
1429
1430
1431 if (vma->vm_userfaultfd_ctx.ctx == ctx &&
1432 (vma->vm_flags & vm_flags) == vm_flags)
1433 goto skip;
1434
1435 if (vma->vm_start > start)
1436 start = vma->vm_start;
1437 vma_end = min(end, vma->vm_end);
1438
1439 new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
1440 prev = vma_merge(mm, prev, start, vma_end, new_flags,
1441 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
1442 vma_policy(vma),
1443 ((struct vm_userfaultfd_ctx){ ctx }));
1444 if (prev) {
1445 vma = prev;
1446 goto next;
1447 }
1448 if (vma->vm_start < start) {
1449 ret = split_vma(mm, vma, start, 1);
1450 if (ret)
1451 break;
1452 }
1453 if (vma->vm_end > end) {
1454 ret = split_vma(mm, vma, end, 0);
1455 if (ret)
1456 break;
1457 }
1458 next:
1459
1460
1461
1462
1463
1464 vma->vm_flags = new_flags;
1465 vma->vm_userfaultfd_ctx.ctx = ctx;
1466
1467 if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
1468 hugetlb_unshare_all_pmds(vma);
1469
1470 skip:
1471 prev = vma;
1472 start = vma->vm_end;
1473 vma = vma->vm_next;
1474 } while (vma && vma->vm_start < end);
1475out_unlock:
1476 mmap_write_unlock(mm);
1477 mmput(mm);
1478 if (!ret) {
1479 __u64 ioctls_out;
1480
1481 ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
1482 UFFD_API_RANGE_IOCTLS;
1483
1484
1485
1486
1487
1488 if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP))
1489 ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT);
1490
1491
1492 if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR))
1493 ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE);
1494
1495
1496
1497
1498
1499
1500 if (put_user(ioctls_out, &user_uffdio_register->ioctls))
1501 ret = -EFAULT;
1502 }
1503out:
1504 return ret;
1505}
1506
1507static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
1508 unsigned long arg)
1509{
1510 struct mm_struct *mm = ctx->mm;
1511 struct vm_area_struct *vma, *prev, *cur;
1512 int ret;
1513 struct uffdio_range uffdio_unregister;
1514 unsigned long new_flags;
1515 bool found;
1516 unsigned long start, end, vma_end;
1517 const void __user *buf = (void __user *)arg;
1518
1519 ret = -EFAULT;
1520 if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
1521 goto out;
1522
1523 ret = validate_range(mm, uffdio_unregister.start,
1524 uffdio_unregister.len);
1525 if (ret)
1526 goto out;
1527
1528 start = uffdio_unregister.start;
1529 end = start + uffdio_unregister.len;
1530
1531 ret = -ENOMEM;
1532 if (!mmget_not_zero(mm))
1533 goto out;
1534
1535 mmap_write_lock(mm);
1536 vma = find_vma_prev(mm, start, &prev);
1537 if (!vma)
1538 goto out_unlock;
1539
1540
1541 ret = -EINVAL;
1542 if (vma->vm_start >= end)
1543 goto out_unlock;
1544
1545
1546
1547
1548
1549 if (is_vm_hugetlb_page(vma)) {
1550 unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1551
1552 if (start & (vma_hpagesize - 1))
1553 goto out_unlock;
1554 }
1555
1556
1557
1558
1559 found = false;
1560 ret = -EINVAL;
1561 for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
1562 cond_resched();
1563
1564 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1565 !!(cur->vm_flags & __VM_UFFD_FLAGS));
1566
1567
1568
1569
1570
1571
1572
1573
1574 if (!vma_can_userfault(cur, cur->vm_flags))
1575 goto out_unlock;
1576
1577 found = true;
1578 }
1579 BUG_ON(!found);
1580
1581 if (vma->vm_start < start)
1582 prev = vma;
1583
1584 ret = 0;
1585 do {
1586 cond_resched();
1587
1588 BUG_ON(!vma_can_userfault(vma, vma->vm_flags));
1589
1590
1591
1592
1593
1594 if (!vma->vm_userfaultfd_ctx.ctx)
1595 goto skip;
1596
1597 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1598
1599 if (vma->vm_start > start)
1600 start = vma->vm_start;
1601 vma_end = min(end, vma->vm_end);
1602
1603 if (userfaultfd_missing(vma)) {
1604
1605
1606
1607
1608
1609
1610 struct userfaultfd_wake_range range;
1611 range.start = start;
1612 range.len = vma_end - start;
1613 wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
1614 }
1615
1616 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
1617 prev = vma_merge(mm, prev, start, vma_end, new_flags,
1618 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
1619 vma_policy(vma),
1620 NULL_VM_UFFD_CTX);
1621 if (prev) {
1622 vma = prev;
1623 goto next;
1624 }
1625 if (vma->vm_start < start) {
1626 ret = split_vma(mm, vma, start, 1);
1627 if (ret)
1628 break;
1629 }
1630 if (vma->vm_end > end) {
1631 ret = split_vma(mm, vma, end, 0);
1632 if (ret)
1633 break;
1634 }
1635 next:
1636
1637
1638
1639
1640
1641 vma->vm_flags = new_flags;
1642 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
1643
1644 skip:
1645 prev = vma;
1646 start = vma->vm_end;
1647 vma = vma->vm_next;
1648 } while (vma && vma->vm_start < end);
1649out_unlock:
1650 mmap_write_unlock(mm);
1651 mmput(mm);
1652out:
1653 return ret;
1654}
1655
1656
1657
1658
1659
1660static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
1661 unsigned long arg)
1662{
1663 int ret;
1664 struct uffdio_range uffdio_wake;
1665 struct userfaultfd_wake_range range;
1666 const void __user *buf = (void __user *)arg;
1667
1668 ret = -EFAULT;
1669 if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
1670 goto out;
1671
1672 ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
1673 if (ret)
1674 goto out;
1675
1676 range.start = uffdio_wake.start;
1677 range.len = uffdio_wake.len;
1678
1679
1680
1681
1682
1683 VM_BUG_ON(!range.len);
1684
1685 wake_userfault(ctx, &range);
1686 ret = 0;
1687
1688out:
1689 return ret;
1690}
1691
1692static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1693 unsigned long arg)
1694{
1695 __s64 ret;
1696 struct uffdio_copy uffdio_copy;
1697 struct uffdio_copy __user *user_uffdio_copy;
1698 struct userfaultfd_wake_range range;
1699
1700 user_uffdio_copy = (struct uffdio_copy __user *) arg;
1701
1702 ret = -EAGAIN;
1703 if (READ_ONCE(ctx->mmap_changing))
1704 goto out;
1705
1706 ret = -EFAULT;
1707 if (copy_from_user(&uffdio_copy, user_uffdio_copy,
1708
1709 sizeof(uffdio_copy)-sizeof(__s64)))
1710 goto out;
1711
1712 ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
1713 if (ret)
1714 goto out;
1715
1716
1717
1718
1719
1720 ret = -EINVAL;
1721 if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
1722 goto out;
1723 if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP))
1724 goto out;
1725 if (mmget_not_zero(ctx->mm)) {
1726 ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
1727 uffdio_copy.len, &ctx->mmap_changing,
1728 uffdio_copy.mode);
1729 mmput(ctx->mm);
1730 } else {
1731 return -ESRCH;
1732 }
1733 if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
1734 return -EFAULT;
1735 if (ret < 0)
1736 goto out;
1737 BUG_ON(!ret);
1738
1739 range.len = ret;
1740 if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
1741 range.start = uffdio_copy.dst;
1742 wake_userfault(ctx, &range);
1743 }
1744 ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
1745out:
1746 return ret;
1747}
1748
1749static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1750 unsigned long arg)
1751{
1752 __s64 ret;
1753 struct uffdio_zeropage uffdio_zeropage;
1754 struct uffdio_zeropage __user *user_uffdio_zeropage;
1755 struct userfaultfd_wake_range range;
1756
1757 user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
1758
1759 ret = -EAGAIN;
1760 if (READ_ONCE(ctx->mmap_changing))
1761 goto out;
1762
1763 ret = -EFAULT;
1764 if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
1765
1766 sizeof(uffdio_zeropage)-sizeof(__s64)))
1767 goto out;
1768
1769 ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
1770 uffdio_zeropage.range.len);
1771 if (ret)
1772 goto out;
1773 ret = -EINVAL;
1774 if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
1775 goto out;
1776
1777 if (mmget_not_zero(ctx->mm)) {
1778 ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
1779 uffdio_zeropage.range.len,
1780 &ctx->mmap_changing);
1781 mmput(ctx->mm);
1782 } else {
1783 return -ESRCH;
1784 }
1785 if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
1786 return -EFAULT;
1787 if (ret < 0)
1788 goto out;
1789
1790 BUG_ON(!ret);
1791 range.len = ret;
1792 if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
1793 range.start = uffdio_zeropage.range.start;
1794 wake_userfault(ctx, &range);
1795 }
1796 ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
1797out:
1798 return ret;
1799}
1800
1801static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
1802 unsigned long arg)
1803{
1804 int ret;
1805 struct uffdio_writeprotect uffdio_wp;
1806 struct uffdio_writeprotect __user *user_uffdio_wp;
1807 struct userfaultfd_wake_range range;
1808 bool mode_wp, mode_dontwake;
1809
1810 if (READ_ONCE(ctx->mmap_changing))
1811 return -EAGAIN;
1812
1813 user_uffdio_wp = (struct uffdio_writeprotect __user *) arg;
1814
1815 if (copy_from_user(&uffdio_wp, user_uffdio_wp,
1816 sizeof(struct uffdio_writeprotect)))
1817 return -EFAULT;
1818
1819 ret = validate_range(ctx->mm, uffdio_wp.range.start,
1820 uffdio_wp.range.len);
1821 if (ret)
1822 return ret;
1823
1824 if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE |
1825 UFFDIO_WRITEPROTECT_MODE_WP))
1826 return -EINVAL;
1827
1828 mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP;
1829 mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE;
1830
1831 if (mode_wp && mode_dontwake)
1832 return -EINVAL;
1833
1834 ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
1835 uffdio_wp.range.len, mode_wp,
1836 &ctx->mmap_changing);
1837 if (ret)
1838 return ret;
1839
1840 if (!mode_wp && !mode_dontwake) {
1841 range.start = uffdio_wp.range.start;
1842 range.len = uffdio_wp.range.len;
1843 wake_userfault(ctx, &range);
1844 }
1845 return ret;
1846}
1847
1848static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
1849{
1850 __s64 ret;
1851 struct uffdio_continue uffdio_continue;
1852 struct uffdio_continue __user *user_uffdio_continue;
1853 struct userfaultfd_wake_range range;
1854
1855 user_uffdio_continue = (struct uffdio_continue __user *)arg;
1856
1857 ret = -EAGAIN;
1858 if (READ_ONCE(ctx->mmap_changing))
1859 goto out;
1860
1861 ret = -EFAULT;
1862 if (copy_from_user(&uffdio_continue, user_uffdio_continue,
1863
1864 sizeof(uffdio_continue) - (sizeof(__s64))))
1865 goto out;
1866
1867 ret = validate_range(ctx->mm, uffdio_continue.range.start,
1868 uffdio_continue.range.len);
1869 if (ret)
1870 goto out;
1871
1872 ret = -EINVAL;
1873
1874 if (uffdio_continue.range.start + uffdio_continue.range.len <=
1875 uffdio_continue.range.start) {
1876 goto out;
1877 }
1878 if (uffdio_continue.mode & ~UFFDIO_CONTINUE_MODE_DONTWAKE)
1879 goto out;
1880
1881 if (mmget_not_zero(ctx->mm)) {
1882 ret = mcopy_continue(ctx->mm, uffdio_continue.range.start,
1883 uffdio_continue.range.len,
1884 &ctx->mmap_changing);
1885 mmput(ctx->mm);
1886 } else {
1887 return -ESRCH;
1888 }
1889
1890 if (unlikely(put_user(ret, &user_uffdio_continue->mapped)))
1891 return -EFAULT;
1892 if (ret < 0)
1893 goto out;
1894
1895
1896 BUG_ON(!ret);
1897 range.len = ret;
1898 if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) {
1899 range.start = uffdio_continue.range.start;
1900 wake_userfault(ctx, &range);
1901 }
1902 ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN;
1903
1904out:
1905 return ret;
1906}
1907
1908static inline unsigned int uffd_ctx_features(__u64 user_features)
1909{
1910
1911
1912
1913 return (unsigned int)user_features;
1914}
1915
1916
1917
1918
1919
1920
1921static int userfaultfd_api(struct userfaultfd_ctx *ctx,
1922 unsigned long arg)
1923{
1924 struct uffdio_api uffdio_api;
1925 void __user *buf = (void __user *)arg;
1926 int ret;
1927 __u64 features;
1928
1929 ret = -EINVAL;
1930 if (ctx->state != UFFD_STATE_WAIT_API)
1931 goto out;
1932 ret = -EFAULT;
1933 if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
1934 goto out;
1935 features = uffdio_api.features;
1936 ret = -EINVAL;
1937 if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
1938 goto err_out;
1939 ret = -EPERM;
1940 if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
1941 goto err_out;
1942
1943 uffdio_api.features = UFFD_API_FEATURES;
1944#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
1945 uffdio_api.features &=
1946 ~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM);
1947#endif
1948#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
1949 uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP;
1950#endif
1951 uffdio_api.ioctls = UFFD_API_IOCTLS;
1952 ret = -EFAULT;
1953 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
1954 goto out;
1955 ctx->state = UFFD_STATE_RUNNING;
1956
1957 ctx->features = uffd_ctx_features(features);
1958 ret = 0;
1959out:
1960 return ret;
1961err_out:
1962 memset(&uffdio_api, 0, sizeof(uffdio_api));
1963 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
1964 ret = -EFAULT;
1965 goto out;
1966}
1967
1968static long userfaultfd_ioctl(struct file *file, unsigned cmd,
1969 unsigned long arg)
1970{
1971 int ret = -EINVAL;
1972 struct userfaultfd_ctx *ctx = file->private_data;
1973
1974 if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
1975 return -EINVAL;
1976
1977 switch(cmd) {
1978 case UFFDIO_API:
1979 ret = userfaultfd_api(ctx, arg);
1980 break;
1981 case UFFDIO_REGISTER:
1982 ret = userfaultfd_register(ctx, arg);
1983 break;
1984 case UFFDIO_UNREGISTER:
1985 ret = userfaultfd_unregister(ctx, arg);
1986 break;
1987 case UFFDIO_WAKE:
1988 ret = userfaultfd_wake(ctx, arg);
1989 break;
1990 case UFFDIO_COPY:
1991 ret = userfaultfd_copy(ctx, arg);
1992 break;
1993 case UFFDIO_ZEROPAGE:
1994 ret = userfaultfd_zeropage(ctx, arg);
1995 break;
1996 case UFFDIO_WRITEPROTECT:
1997 ret = userfaultfd_writeprotect(ctx, arg);
1998 break;
1999 case UFFDIO_CONTINUE:
2000 ret = userfaultfd_continue(ctx, arg);
2001 break;
2002 }
2003 return ret;
2004}
2005
2006#ifdef CONFIG_PROC_FS
2007static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
2008{
2009 struct userfaultfd_ctx *ctx = f->private_data;
2010 wait_queue_entry_t *wq;
2011 unsigned long pending = 0, total = 0;
2012
2013 spin_lock_irq(&ctx->fault_pending_wqh.lock);
2014 list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
2015 pending++;
2016 total++;
2017 }
2018 list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
2019 total++;
2020 }
2021 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
2022
2023
2024
2025
2026
2027
2028 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
2029 pending, total, UFFD_API, ctx->features,
2030 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
2031}
2032#endif
2033
2034static const struct file_operations userfaultfd_fops = {
2035#ifdef CONFIG_PROC_FS
2036 .show_fdinfo = userfaultfd_show_fdinfo,
2037#endif
2038 .release = userfaultfd_release,
2039 .poll = userfaultfd_poll,
2040 .read = userfaultfd_read,
2041 .unlocked_ioctl = userfaultfd_ioctl,
2042 .compat_ioctl = compat_ptr_ioctl,
2043 .llseek = noop_llseek,
2044};
2045
2046static void init_once_userfaultfd_ctx(void *mem)
2047{
2048 struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
2049
2050 init_waitqueue_head(&ctx->fault_pending_wqh);
2051 init_waitqueue_head(&ctx->fault_wqh);
2052 init_waitqueue_head(&ctx->event_wqh);
2053 init_waitqueue_head(&ctx->fd_wqh);
2054 seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
2055}
2056
2057SYSCALL_DEFINE1(userfaultfd, int, flags)
2058{
2059 struct userfaultfd_ctx *ctx;
2060 int fd;
2061
2062 if (!sysctl_unprivileged_userfaultfd &&
2063 (flags & UFFD_USER_MODE_ONLY) == 0 &&
2064 !capable(CAP_SYS_PTRACE)) {
2065 printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd "
2066 "sysctl knob to 1 if kernel faults must be handled "
2067 "without obtaining CAP_SYS_PTRACE capability\n");
2068 return -EPERM;
2069 }
2070
2071 BUG_ON(!current->mm);
2072
2073
2074 BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS);
2075 BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
2076 BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
2077
2078 if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY))
2079 return -EINVAL;
2080
2081 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
2082 if (!ctx)
2083 return -ENOMEM;
2084
2085 refcount_set(&ctx->refcount, 1);
2086 ctx->flags = flags;
2087 ctx->features = 0;
2088 ctx->state = UFFD_STATE_WAIT_API;
2089 ctx->released = false;
2090 ctx->mmap_changing = false;
2091 ctx->mm = current->mm;
2092
2093 mmgrab(ctx->mm);
2094
2095 fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx,
2096 O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL);
2097 if (fd < 0) {
2098 mmdrop(ctx->mm);
2099 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
2100 }
2101 return fd;
2102}
2103
2104static int __init userfaultfd_init(void)
2105{
2106 userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
2107 sizeof(struct userfaultfd_ctx),
2108 0,
2109 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2110 init_once_userfaultfd_ctx);
2111 return 0;
2112}
2113__initcall(userfaultfd_init);
2114