1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/list.h>
14#include <linux/hashtable.h>
15#include <linux/sched/signal.h>
16#include <linux/sched/mm.h>
17#include <linux/mm.h>
18#include <linux/poll.h>
19#include <linux/slab.h>
20#include <linux/seq_file.h>
21#include <linux/file.h>
22#include <linux/bug.h>
23#include <linux/anon_inodes.h>
24#include <linux/syscalls.h>
25#include <linux/userfaultfd_k.h>
26#include <linux/mempolicy.h>
27#include <linux/ioctl.h>
28#include <linux/security.h>
29#include <linux/hugetlb.h>
30
31int sysctl_unprivileged_userfaultfd __read_mostly = 1;
32
33static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
34
35enum userfaultfd_state {
36 UFFD_STATE_WAIT_API,
37 UFFD_STATE_RUNNING,
38};
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54struct userfaultfd_ctx {
55
56 wait_queue_head_t fault_pending_wqh;
57
58 wait_queue_head_t fault_wqh;
59
60 wait_queue_head_t fd_wqh;
61
62 wait_queue_head_t event_wqh;
63
64 struct seqcount refile_seq;
65
66 refcount_t refcount;
67
68 unsigned int flags;
69
70 unsigned int features;
71
72 enum userfaultfd_state state;
73
74 bool released;
75
76 bool mmap_changing;
77
78 struct mm_struct *mm;
79};
80
81struct userfaultfd_fork_ctx {
82 struct userfaultfd_ctx *orig;
83 struct userfaultfd_ctx *new;
84 struct list_head list;
85};
86
87struct userfaultfd_unmap_ctx {
88 struct userfaultfd_ctx *ctx;
89 unsigned long start;
90 unsigned long end;
91 struct list_head list;
92};
93
94struct userfaultfd_wait_queue {
95 struct uffd_msg msg;
96 wait_queue_entry_t wq;
97 struct userfaultfd_ctx *ctx;
98 bool waken;
99};
100
101struct userfaultfd_wake_range {
102 unsigned long start;
103 unsigned long len;
104};
105
106static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
107 int wake_flags, void *key)
108{
109 struct userfaultfd_wake_range *range = key;
110 int ret;
111 struct userfaultfd_wait_queue *uwq;
112 unsigned long start, len;
113
114 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
115 ret = 0;
116
117 start = range->start;
118 len = range->len;
119 if (len && (start > uwq->msg.arg.pagefault.address ||
120 start + len <= uwq->msg.arg.pagefault.address))
121 goto out;
122 WRITE_ONCE(uwq->waken, true);
123
124
125
126
127 ret = wake_up_state(wq->private, mode);
128 if (ret) {
129
130
131
132
133
134
135
136
137
138
139
140 list_del_init(&wq->entry);
141 }
142out:
143 return ret;
144}
145
146
147
148
149
150
151static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
152{
153 refcount_inc(&ctx->refcount);
154}
155
156
157
158
159
160
161
162
163
164static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
165{
166 if (refcount_dec_and_test(&ctx->refcount)) {
167 VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
168 VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
169 VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
170 VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
171 VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
172 VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
173 VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
174 VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
175 mmdrop(ctx->mm);
176 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
177 }
178}
179
180static inline void msg_init(struct uffd_msg *msg)
181{
182 BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
183
184
185
186
187 memset(msg, 0, sizeof(struct uffd_msg));
188}
189
190static inline struct uffd_msg userfault_msg(unsigned long address,
191 unsigned int flags,
192 unsigned long reason,
193 unsigned int features)
194{
195 struct uffd_msg msg;
196 msg_init(&msg);
197 msg.event = UFFD_EVENT_PAGEFAULT;
198 msg.arg.pagefault.address = address;
199 if (flags & FAULT_FLAG_WRITE)
200
201
202
203
204
205
206
207 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
208 if (reason & VM_UFFD_WP)
209
210
211
212
213
214
215
216 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
217 if (features & UFFD_FEATURE_THREAD_ID)
218 msg.arg.pagefault.feat.ptid = task_pid_vnr(current);
219 return msg;
220}
221
222#ifdef CONFIG_HUGETLB_PAGE
223
224
225
226
227static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
228 struct vm_area_struct *vma,
229 unsigned long address,
230 unsigned long flags,
231 unsigned long reason)
232{
233 struct mm_struct *mm = ctx->mm;
234 pte_t *ptep, pte;
235 bool ret = true;
236
237 VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
238
239 ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
240
241 if (!ptep)
242 goto out;
243
244 ret = false;
245 pte = huge_ptep_get(ptep);
246
247
248
249
250
251 if (huge_pte_none(pte))
252 ret = true;
253 if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
254 ret = true;
255out:
256 return ret;
257}
258#else
259static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
260 struct vm_area_struct *vma,
261 unsigned long address,
262 unsigned long flags,
263 unsigned long reason)
264{
265 return false;
266}
267#endif
268
269
270
271
272
273
274
275
276static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
277 unsigned long address,
278 unsigned long flags,
279 unsigned long reason)
280{
281 struct mm_struct *mm = ctx->mm;
282 pgd_t *pgd;
283 p4d_t *p4d;
284 pud_t *pud;
285 pmd_t *pmd, _pmd;
286 pte_t *pte;
287 bool ret = true;
288
289 VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
290
291 pgd = pgd_offset(mm, address);
292 if (!pgd_present(*pgd))
293 goto out;
294 p4d = p4d_offset(pgd, address);
295 if (!p4d_present(*p4d))
296 goto out;
297 pud = pud_offset(p4d, address);
298 if (!pud_present(*pud))
299 goto out;
300 pmd = pmd_offset(pud, address);
301
302
303
304
305
306
307
308
309 _pmd = READ_ONCE(*pmd);
310 if (pmd_none(_pmd))
311 goto out;
312
313 ret = false;
314 if (!pmd_present(_pmd))
315 goto out;
316
317 if (pmd_trans_huge(_pmd))
318 goto out;
319
320
321
322
323
324 pte = pte_offset_map(pmd, address);
325
326
327
328
329 if (pte_none(*pte))
330 ret = true;
331 pte_unmap(pte);
332
333out:
334 return ret;
335}
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
353{
354 struct mm_struct *mm = vmf->vma->vm_mm;
355 struct userfaultfd_ctx *ctx;
356 struct userfaultfd_wait_queue uwq;
357 vm_fault_t ret = VM_FAULT_SIGBUS;
358 bool must_wait, return_to_userland;
359 long blocking_state;
360
361
362
363
364
365
366
367
368
369
370
371
372 if (current->flags & (PF_EXITING|PF_DUMPCORE))
373 goto out;
374
375
376
377
378
379 WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
380
381 ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
382 if (!ctx)
383 goto out;
384
385 BUG_ON(ctx->mm != mm);
386
387 VM_BUG_ON(reason & ~(VM_UFFD_MISSING|VM_UFFD_WP));
388 VM_BUG_ON(!(reason & VM_UFFD_MISSING) ^ !!(reason & VM_UFFD_WP));
389
390 if (ctx->features & UFFD_FEATURE_SIGBUS)
391 goto out;
392
393
394
395
396
397
398 if (unlikely(READ_ONCE(ctx->released))) {
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415 ret = VM_FAULT_NOPAGE;
416 goto out;
417 }
418
419
420
421
422
423
424
425
426
427
428
429 if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
430
431
432
433
434
435 BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
436#ifdef CONFIG_DEBUG_VM
437 if (printk_ratelimit()) {
438 printk(KERN_WARNING
439 "FAULT_FLAG_ALLOW_RETRY missing %x\n",
440 vmf->flags);
441 dump_stack();
442 }
443#endif
444 goto out;
445 }
446
447
448
449
450
451 ret = VM_FAULT_RETRY;
452 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
453 goto out;
454
455
456 userfaultfd_ctx_get(ctx);
457
458 init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
459 uwq.wq.private = current;
460 uwq.msg = userfault_msg(vmf->address, vmf->flags, reason,
461 ctx->features);
462 uwq.ctx = ctx;
463 uwq.waken = false;
464
465 return_to_userland =
466 (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
467 (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
468 blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
469 TASK_KILLABLE;
470
471 spin_lock_irq(&ctx->fault_pending_wqh.lock);
472
473
474
475
476 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
477
478
479
480
481
482 set_current_state(blocking_state);
483 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
484
485 if (!is_vm_hugetlb_page(vmf->vma))
486 must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
487 reason);
488 else
489 must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
490 vmf->address,
491 vmf->flags, reason);
492 up_read(&mm->mmap_sem);
493
494 if (likely(must_wait && !READ_ONCE(ctx->released) &&
495 (return_to_userland ? !signal_pending(current) :
496 !fatal_signal_pending(current)))) {
497 wake_up_poll(&ctx->fd_wqh, EPOLLIN);
498 schedule();
499 ret |= VM_FAULT_MAJOR;
500
501
502
503
504
505
506
507
508 while (!READ_ONCE(uwq.waken)) {
509
510
511
512
513
514
515 set_current_state(blocking_state);
516 if (READ_ONCE(uwq.waken) ||
517 READ_ONCE(ctx->released) ||
518 (return_to_userland ? signal_pending(current) :
519 fatal_signal_pending(current)))
520 break;
521 schedule();
522 }
523 }
524
525 __set_current_state(TASK_RUNNING);
526
527 if (return_to_userland) {
528 if (signal_pending(current) &&
529 !fatal_signal_pending(current)) {
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546 down_read(&mm->mmap_sem);
547 ret = VM_FAULT_NOPAGE;
548 }
549 }
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564 if (!list_empty_careful(&uwq.wq.entry)) {
565 spin_lock_irq(&ctx->fault_pending_wqh.lock);
566
567
568
569
570 list_del(&uwq.wq.entry);
571 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
572 }
573
574
575
576
577
578 userfaultfd_ctx_put(ctx);
579
580out:
581 return ret;
582}
583
584static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
585 struct userfaultfd_wait_queue *ewq)
586{
587 struct userfaultfd_ctx *release_new_ctx;
588
589 if (WARN_ON_ONCE(current->flags & PF_EXITING))
590 goto out;
591
592 ewq->ctx = ctx;
593 init_waitqueue_entry(&ewq->wq, current);
594 release_new_ctx = NULL;
595
596 spin_lock_irq(&ctx->event_wqh.lock);
597
598
599
600
601 __add_wait_queue(&ctx->event_wqh, &ewq->wq);
602 for (;;) {
603 set_current_state(TASK_KILLABLE);
604 if (ewq->msg.event == 0)
605 break;
606 if (READ_ONCE(ctx->released) ||
607 fatal_signal_pending(current)) {
608
609
610
611
612
613
614 __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
615 if (ewq->msg.event == UFFD_EVENT_FORK) {
616 struct userfaultfd_ctx *new;
617
618 new = (struct userfaultfd_ctx *)
619 (unsigned long)
620 ewq->msg.arg.reserved.reserved1;
621 release_new_ctx = new;
622 }
623 break;
624 }
625
626 spin_unlock_irq(&ctx->event_wqh.lock);
627
628 wake_up_poll(&ctx->fd_wqh, EPOLLIN);
629 schedule();
630
631 spin_lock_irq(&ctx->event_wqh.lock);
632 }
633 __set_current_state(TASK_RUNNING);
634 spin_unlock_irq(&ctx->event_wqh.lock);
635
636 if (release_new_ctx) {
637 struct vm_area_struct *vma;
638 struct mm_struct *mm = release_new_ctx->mm;
639
640
641 down_write(&mm->mmap_sem);
642
643 VM_WARN_ON(!mmget_still_valid(mm));
644 for (vma = mm->mmap; vma; vma = vma->vm_next)
645 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
646 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
647 vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
648 }
649 up_write(&mm->mmap_sem);
650
651 userfaultfd_ctx_put(release_new_ctx);
652 }
653
654
655
656
657
658out:
659 WRITE_ONCE(ctx->mmap_changing, false);
660 userfaultfd_ctx_put(ctx);
661}
662
663static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
664 struct userfaultfd_wait_queue *ewq)
665{
666 ewq->msg.event = 0;
667 wake_up_locked(&ctx->event_wqh);
668 __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
669}
670
671int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
672{
673 struct userfaultfd_ctx *ctx = NULL, *octx;
674 struct userfaultfd_fork_ctx *fctx;
675
676 octx = vma->vm_userfaultfd_ctx.ctx;
677 if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
678 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
679 vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
680 return 0;
681 }
682
683 list_for_each_entry(fctx, fcs, list)
684 if (fctx->orig == octx) {
685 ctx = fctx->new;
686 break;
687 }
688
689 if (!ctx) {
690 fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
691 if (!fctx)
692 return -ENOMEM;
693
694 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
695 if (!ctx) {
696 kfree(fctx);
697 return -ENOMEM;
698 }
699
700 refcount_set(&ctx->refcount, 1);
701 ctx->flags = octx->flags;
702 ctx->state = UFFD_STATE_RUNNING;
703 ctx->features = octx->features;
704 ctx->released = false;
705 ctx->mmap_changing = false;
706 ctx->mm = vma->vm_mm;
707 mmgrab(ctx->mm);
708
709 userfaultfd_ctx_get(octx);
710 WRITE_ONCE(octx->mmap_changing, true);
711 fctx->orig = octx;
712 fctx->new = ctx;
713 list_add_tail(&fctx->list, fcs);
714 }
715
716 vma->vm_userfaultfd_ctx.ctx = ctx;
717 return 0;
718}
719
720static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
721{
722 struct userfaultfd_ctx *ctx = fctx->orig;
723 struct userfaultfd_wait_queue ewq;
724
725 msg_init(&ewq.msg);
726
727 ewq.msg.event = UFFD_EVENT_FORK;
728 ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
729
730 userfaultfd_event_wait_completion(ctx, &ewq);
731}
732
733void dup_userfaultfd_complete(struct list_head *fcs)
734{
735 struct userfaultfd_fork_ctx *fctx, *n;
736
737 list_for_each_entry_safe(fctx, n, fcs, list) {
738 dup_fctx(fctx);
739 list_del(&fctx->list);
740 kfree(fctx);
741 }
742}
743
744void mremap_userfaultfd_prep(struct vm_area_struct *vma,
745 struct vm_userfaultfd_ctx *vm_ctx)
746{
747 struct userfaultfd_ctx *ctx;
748
749 ctx = vma->vm_userfaultfd_ctx.ctx;
750
751 if (!ctx)
752 return;
753
754 if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
755 vm_ctx->ctx = ctx;
756 userfaultfd_ctx_get(ctx);
757 WRITE_ONCE(ctx->mmap_changing, true);
758 } else {
759
760 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
761 vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
762 }
763}
764
765void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
766 unsigned long from, unsigned long to,
767 unsigned long len)
768{
769 struct userfaultfd_ctx *ctx = vm_ctx->ctx;
770 struct userfaultfd_wait_queue ewq;
771
772 if (!ctx)
773 return;
774
775 if (to & ~PAGE_MASK) {
776 userfaultfd_ctx_put(ctx);
777 return;
778 }
779
780 msg_init(&ewq.msg);
781
782 ewq.msg.event = UFFD_EVENT_REMAP;
783 ewq.msg.arg.remap.from = from;
784 ewq.msg.arg.remap.to = to;
785 ewq.msg.arg.remap.len = len;
786
787 userfaultfd_event_wait_completion(ctx, &ewq);
788}
789
790bool userfaultfd_remove(struct vm_area_struct *vma,
791 unsigned long start, unsigned long end)
792{
793 struct mm_struct *mm = vma->vm_mm;
794 struct userfaultfd_ctx *ctx;
795 struct userfaultfd_wait_queue ewq;
796
797 ctx = vma->vm_userfaultfd_ctx.ctx;
798 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
799 return true;
800
801 userfaultfd_ctx_get(ctx);
802 WRITE_ONCE(ctx->mmap_changing, true);
803 up_read(&mm->mmap_sem);
804
805 msg_init(&ewq.msg);
806
807 ewq.msg.event = UFFD_EVENT_REMOVE;
808 ewq.msg.arg.remove.start = start;
809 ewq.msg.arg.remove.end = end;
810
811 userfaultfd_event_wait_completion(ctx, &ewq);
812
813 return false;
814}
815
816static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
817 unsigned long start, unsigned long end)
818{
819 struct userfaultfd_unmap_ctx *unmap_ctx;
820
821 list_for_each_entry(unmap_ctx, unmaps, list)
822 if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
823 unmap_ctx->end == end)
824 return true;
825
826 return false;
827}
828
829int userfaultfd_unmap_prep(struct vm_area_struct *vma,
830 unsigned long start, unsigned long end,
831 struct list_head *unmaps)
832{
833 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
834 struct userfaultfd_unmap_ctx *unmap_ctx;
835 struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
836
837 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
838 has_unmap_ctx(ctx, unmaps, start, end))
839 continue;
840
841 unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
842 if (!unmap_ctx)
843 return -ENOMEM;
844
845 userfaultfd_ctx_get(ctx);
846 WRITE_ONCE(ctx->mmap_changing, true);
847 unmap_ctx->ctx = ctx;
848 unmap_ctx->start = start;
849 unmap_ctx->end = end;
850 list_add_tail(&unmap_ctx->list, unmaps);
851 }
852
853 return 0;
854}
855
856void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
857{
858 struct userfaultfd_unmap_ctx *ctx, *n;
859 struct userfaultfd_wait_queue ewq;
860
861 list_for_each_entry_safe(ctx, n, uf, list) {
862 msg_init(&ewq.msg);
863
864 ewq.msg.event = UFFD_EVENT_UNMAP;
865 ewq.msg.arg.remove.start = ctx->start;
866 ewq.msg.arg.remove.end = ctx->end;
867
868 userfaultfd_event_wait_completion(ctx->ctx, &ewq);
869
870 list_del(&ctx->list);
871 kfree(ctx);
872 }
873}
874
875static int userfaultfd_release(struct inode *inode, struct file *file)
876{
877 struct userfaultfd_ctx *ctx = file->private_data;
878 struct mm_struct *mm = ctx->mm;
879 struct vm_area_struct *vma, *prev;
880
881 struct userfaultfd_wake_range range = { .len = 0, };
882 unsigned long new_flags;
883 bool still_valid;
884
885 WRITE_ONCE(ctx->released, true);
886
887 if (!mmget_not_zero(mm))
888 goto wakeup;
889
890
891
892
893
894
895
896
897
898 down_write(&mm->mmap_sem);
899 still_valid = mmget_still_valid(mm);
900 prev = NULL;
901 for (vma = mm->mmap; vma; vma = vma->vm_next) {
902 cond_resched();
903 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
904 !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
905 if (vma->vm_userfaultfd_ctx.ctx != ctx) {
906 prev = vma;
907 continue;
908 }
909 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
910 if (still_valid) {
911 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
912 new_flags, vma->anon_vma,
913 vma->vm_file, vma->vm_pgoff,
914 vma_policy(vma),
915 NULL_VM_UFFD_CTX);
916 if (prev)
917 vma = prev;
918 else
919 prev = vma;
920 }
921 vma->vm_flags = new_flags;
922 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
923 }
924 up_write(&mm->mmap_sem);
925 mmput(mm);
926wakeup:
927
928
929
930
931
932 spin_lock_irq(&ctx->fault_pending_wqh.lock);
933 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
934 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
935 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
936
937
938 wake_up_all(&ctx->event_wqh);
939
940 wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
941 userfaultfd_ctx_put(ctx);
942 return 0;
943}
944
945
946static inline struct userfaultfd_wait_queue *find_userfault_in(
947 wait_queue_head_t *wqh)
948{
949 wait_queue_entry_t *wq;
950 struct userfaultfd_wait_queue *uwq;
951
952 lockdep_assert_held(&wqh->lock);
953
954 uwq = NULL;
955 if (!waitqueue_active(wqh))
956 goto out;
957
958 wq = list_last_entry(&wqh->head, typeof(*wq), entry);
959 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
960out:
961 return uwq;
962}
963
964static inline struct userfaultfd_wait_queue *find_userfault(
965 struct userfaultfd_ctx *ctx)
966{
967 return find_userfault_in(&ctx->fault_pending_wqh);
968}
969
970static inline struct userfaultfd_wait_queue *find_userfault_evt(
971 struct userfaultfd_ctx *ctx)
972{
973 return find_userfault_in(&ctx->event_wqh);
974}
975
976static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
977{
978 struct userfaultfd_ctx *ctx = file->private_data;
979 __poll_t ret;
980
981 poll_wait(file, &ctx->fd_wqh, wait);
982
983 switch (ctx->state) {
984 case UFFD_STATE_WAIT_API:
985 return EPOLLERR;
986 case UFFD_STATE_RUNNING:
987
988
989
990
991 if (unlikely(!(file->f_flags & O_NONBLOCK)))
992 return EPOLLERR;
993
994
995
996
997
998
999
1000
1001
1002
1003 ret = 0;
1004 smp_mb();
1005 if (waitqueue_active(&ctx->fault_pending_wqh))
1006 ret = EPOLLIN;
1007 else if (waitqueue_active(&ctx->event_wqh))
1008 ret = EPOLLIN;
1009
1010 return ret;
1011 default:
1012 WARN_ON_ONCE(1);
1013 return EPOLLERR;
1014 }
1015}
1016
1017static const struct file_operations userfaultfd_fops;
1018
1019static int resolve_userfault_fork(struct userfaultfd_ctx *ctx,
1020 struct userfaultfd_ctx *new,
1021 struct uffd_msg *msg)
1022{
1023 int fd;
1024
1025 fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, new,
1026 O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS));
1027 if (fd < 0)
1028 return fd;
1029
1030 msg->arg.reserved.reserved1 = 0;
1031 msg->arg.fork.ufd = fd;
1032 return 0;
1033}
1034
1035static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
1036 struct uffd_msg *msg)
1037{
1038 ssize_t ret;
1039 DECLARE_WAITQUEUE(wait, current);
1040 struct userfaultfd_wait_queue *uwq;
1041
1042
1043
1044
1045
1046
1047
1048 LIST_HEAD(fork_event);
1049 struct userfaultfd_ctx *fork_nctx = NULL;
1050
1051
1052 spin_lock_irq(&ctx->fd_wqh.lock);
1053 __add_wait_queue(&ctx->fd_wqh, &wait);
1054 for (;;) {
1055 set_current_state(TASK_INTERRUPTIBLE);
1056 spin_lock(&ctx->fault_pending_wqh.lock);
1057 uwq = find_userfault(ctx);
1058 if (uwq) {
1059
1060
1061
1062
1063
1064
1065
1066 write_seqcount_begin(&ctx->refile_seq);
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089 list_del(&uwq->wq.entry);
1090 add_wait_queue(&ctx->fault_wqh, &uwq->wq);
1091
1092 write_seqcount_end(&ctx->refile_seq);
1093
1094
1095 *msg = uwq->msg;
1096 spin_unlock(&ctx->fault_pending_wqh.lock);
1097 ret = 0;
1098 break;
1099 }
1100 spin_unlock(&ctx->fault_pending_wqh.lock);
1101
1102 spin_lock(&ctx->event_wqh.lock);
1103 uwq = find_userfault_evt(ctx);
1104 if (uwq) {
1105 *msg = uwq->msg;
1106
1107 if (uwq->msg.event == UFFD_EVENT_FORK) {
1108 fork_nctx = (struct userfaultfd_ctx *)
1109 (unsigned long)
1110 uwq->msg.arg.reserved.reserved1;
1111 list_move(&uwq->wq.entry, &fork_event);
1112
1113
1114
1115
1116
1117 userfaultfd_ctx_get(fork_nctx);
1118 spin_unlock(&ctx->event_wqh.lock);
1119 ret = 0;
1120 break;
1121 }
1122
1123 userfaultfd_event_complete(ctx, uwq);
1124 spin_unlock(&ctx->event_wqh.lock);
1125 ret = 0;
1126 break;
1127 }
1128 spin_unlock(&ctx->event_wqh.lock);
1129
1130 if (signal_pending(current)) {
1131 ret = -ERESTARTSYS;
1132 break;
1133 }
1134 if (no_wait) {
1135 ret = -EAGAIN;
1136 break;
1137 }
1138 spin_unlock_irq(&ctx->fd_wqh.lock);
1139 schedule();
1140 spin_lock_irq(&ctx->fd_wqh.lock);
1141 }
1142 __remove_wait_queue(&ctx->fd_wqh, &wait);
1143 __set_current_state(TASK_RUNNING);
1144 spin_unlock_irq(&ctx->fd_wqh.lock);
1145
1146 if (!ret && msg->event == UFFD_EVENT_FORK) {
1147 ret = resolve_userfault_fork(ctx, fork_nctx, msg);
1148 spin_lock_irq(&ctx->event_wqh.lock);
1149 if (!list_empty(&fork_event)) {
1150
1151
1152
1153
1154 userfaultfd_ctx_put(fork_nctx);
1155
1156 uwq = list_first_entry(&fork_event,
1157 typeof(*uwq),
1158 wq.entry);
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169 list_del(&uwq->wq.entry);
1170 __add_wait_queue(&ctx->event_wqh, &uwq->wq);
1171
1172
1173
1174
1175
1176
1177 if (likely(!ret))
1178 userfaultfd_event_complete(ctx, uwq);
1179 } else {
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191 if (ret)
1192 userfaultfd_ctx_put(fork_nctx);
1193 }
1194 spin_unlock_irq(&ctx->event_wqh.lock);
1195 }
1196
1197 return ret;
1198}
1199
1200static ssize_t userfaultfd_read(struct file *file, char __user *buf,
1201 size_t count, loff_t *ppos)
1202{
1203 struct userfaultfd_ctx *ctx = file->private_data;
1204 ssize_t _ret, ret = 0;
1205 struct uffd_msg msg;
1206 int no_wait = file->f_flags & O_NONBLOCK;
1207
1208 if (ctx->state == UFFD_STATE_WAIT_API)
1209 return -EINVAL;
1210
1211 for (;;) {
1212 if (count < sizeof(msg))
1213 return ret ? ret : -EINVAL;
1214 _ret = userfaultfd_ctx_read(ctx, no_wait, &msg);
1215 if (_ret < 0)
1216 return ret ? ret : _ret;
1217 if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
1218 return ret ? ret : -EFAULT;
1219 ret += sizeof(msg);
1220 buf += sizeof(msg);
1221 count -= sizeof(msg);
1222
1223
1224
1225
1226 no_wait = O_NONBLOCK;
1227 }
1228}
1229
1230static void __wake_userfault(struct userfaultfd_ctx *ctx,
1231 struct userfaultfd_wake_range *range)
1232{
1233 spin_lock_irq(&ctx->fault_pending_wqh.lock);
1234
1235 if (waitqueue_active(&ctx->fault_pending_wqh))
1236 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
1237 range);
1238 if (waitqueue_active(&ctx->fault_wqh))
1239 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
1240 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
1241}
1242
1243static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
1244 struct userfaultfd_wake_range *range)
1245{
1246 unsigned seq;
1247 bool need_wakeup;
1248
1249
1250
1251
1252
1253
1254
1255
1256 smp_mb();
1257
1258
1259
1260
1261
1262
1263
1264 do {
1265 seq = read_seqcount_begin(&ctx->refile_seq);
1266 need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
1267 waitqueue_active(&ctx->fault_wqh);
1268 cond_resched();
1269 } while (read_seqcount_retry(&ctx->refile_seq, seq));
1270 if (need_wakeup)
1271 __wake_userfault(ctx, range);
1272}
1273
1274static __always_inline int validate_range(struct mm_struct *mm,
1275 __u64 start, __u64 len)
1276{
1277 __u64 task_size = mm->task_size;
1278
1279 if (start & ~PAGE_MASK)
1280 return -EINVAL;
1281 if (len & ~PAGE_MASK)
1282 return -EINVAL;
1283 if (!len)
1284 return -EINVAL;
1285 if (start < mmap_min_addr)
1286 return -EINVAL;
1287 if (start >= task_size)
1288 return -EINVAL;
1289 if (len > task_size - start)
1290 return -EINVAL;
1291 return 0;
1292}
1293
1294static inline bool vma_can_userfault(struct vm_area_struct *vma)
1295{
1296 return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
1297 vma_is_shmem(vma);
1298}
1299
1300static int userfaultfd_register(struct userfaultfd_ctx *ctx,
1301 unsigned long arg)
1302{
1303 struct mm_struct *mm = ctx->mm;
1304 struct vm_area_struct *vma, *prev, *cur;
1305 int ret;
1306 struct uffdio_register uffdio_register;
1307 struct uffdio_register __user *user_uffdio_register;
1308 unsigned long vm_flags, new_flags;
1309 bool found;
1310 bool basic_ioctls;
1311 unsigned long start, end, vma_end;
1312
1313 user_uffdio_register = (struct uffdio_register __user *) arg;
1314
1315 ret = -EFAULT;
1316 if (copy_from_user(&uffdio_register, user_uffdio_register,
1317 sizeof(uffdio_register)-sizeof(__u64)))
1318 goto out;
1319
1320 ret = -EINVAL;
1321 if (!uffdio_register.mode)
1322 goto out;
1323 if (uffdio_register.mode & ~(UFFDIO_REGISTER_MODE_MISSING|
1324 UFFDIO_REGISTER_MODE_WP))
1325 goto out;
1326 vm_flags = 0;
1327 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
1328 vm_flags |= VM_UFFD_MISSING;
1329 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
1330 vm_flags |= VM_UFFD_WP;
1331
1332
1333
1334
1335 ret = -EINVAL;
1336 goto out;
1337 }
1338
1339 ret = validate_range(mm, uffdio_register.range.start,
1340 uffdio_register.range.len);
1341 if (ret)
1342 goto out;
1343
1344 start = uffdio_register.range.start;
1345 end = start + uffdio_register.range.len;
1346
1347 ret = -ENOMEM;
1348 if (!mmget_not_zero(mm))
1349 goto out;
1350
1351 down_write(&mm->mmap_sem);
1352 if (!mmget_still_valid(mm))
1353 goto out_unlock;
1354 vma = find_vma_prev(mm, start, &prev);
1355 if (!vma)
1356 goto out_unlock;
1357
1358
1359 ret = -EINVAL;
1360 if (vma->vm_start >= end)
1361 goto out_unlock;
1362
1363
1364
1365
1366
1367 if (is_vm_hugetlb_page(vma)) {
1368 unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1369
1370 if (start & (vma_hpagesize - 1))
1371 goto out_unlock;
1372 }
1373
1374
1375
1376
1377 found = false;
1378 basic_ioctls = false;
1379 for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
1380 cond_resched();
1381
1382 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1383 !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
1384
1385
1386 ret = -EINVAL;
1387 if (!vma_can_userfault(cur))
1388 goto out_unlock;
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398 ret = -EPERM;
1399 if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
1400 goto out_unlock;
1401
1402
1403
1404
1405
1406 if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
1407 end > cur->vm_start) {
1408 unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
1409
1410 ret = -EINVAL;
1411
1412 if (end & (vma_hpagesize - 1))
1413 goto out_unlock;
1414 }
1415
1416
1417
1418
1419
1420
1421
1422 ret = -EBUSY;
1423 if (cur->vm_userfaultfd_ctx.ctx &&
1424 cur->vm_userfaultfd_ctx.ctx != ctx)
1425 goto out_unlock;
1426
1427
1428
1429
1430 if (is_vm_hugetlb_page(cur))
1431 basic_ioctls = true;
1432
1433 found = true;
1434 }
1435 BUG_ON(!found);
1436
1437 if (vma->vm_start < start)
1438 prev = vma;
1439
1440 ret = 0;
1441 do {
1442 cond_resched();
1443
1444 BUG_ON(!vma_can_userfault(vma));
1445 BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
1446 vma->vm_userfaultfd_ctx.ctx != ctx);
1447 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1448
1449
1450
1451
1452
1453 if (vma->vm_userfaultfd_ctx.ctx == ctx &&
1454 (vma->vm_flags & vm_flags) == vm_flags)
1455 goto skip;
1456
1457 if (vma->vm_start > start)
1458 start = vma->vm_start;
1459 vma_end = min(end, vma->vm_end);
1460
1461 new_flags = (vma->vm_flags & ~vm_flags) | vm_flags;
1462 prev = vma_merge(mm, prev, start, vma_end, new_flags,
1463 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
1464 vma_policy(vma),
1465 ((struct vm_userfaultfd_ctx){ ctx }));
1466 if (prev) {
1467 vma = prev;
1468 goto next;
1469 }
1470 if (vma->vm_start < start) {
1471 ret = split_vma(mm, vma, start, 1);
1472 if (ret)
1473 break;
1474 }
1475 if (vma->vm_end > end) {
1476 ret = split_vma(mm, vma, end, 0);
1477 if (ret)
1478 break;
1479 }
1480 next:
1481
1482
1483
1484
1485
1486 vma->vm_flags = new_flags;
1487 vma->vm_userfaultfd_ctx.ctx = ctx;
1488
1489 skip:
1490 prev = vma;
1491 start = vma->vm_end;
1492 vma = vma->vm_next;
1493 } while (vma && vma->vm_start < end);
1494out_unlock:
1495 up_write(&mm->mmap_sem);
1496 mmput(mm);
1497 if (!ret) {
1498
1499
1500
1501
1502
1503 if (put_user(basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
1504 UFFD_API_RANGE_IOCTLS,
1505 &user_uffdio_register->ioctls))
1506 ret = -EFAULT;
1507 }
1508out:
1509 return ret;
1510}
1511
1512static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
1513 unsigned long arg)
1514{
1515 struct mm_struct *mm = ctx->mm;
1516 struct vm_area_struct *vma, *prev, *cur;
1517 int ret;
1518 struct uffdio_range uffdio_unregister;
1519 unsigned long new_flags;
1520 bool found;
1521 unsigned long start, end, vma_end;
1522 const void __user *buf = (void __user *)arg;
1523
1524 ret = -EFAULT;
1525 if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
1526 goto out;
1527
1528 ret = validate_range(mm, uffdio_unregister.start,
1529 uffdio_unregister.len);
1530 if (ret)
1531 goto out;
1532
1533 start = uffdio_unregister.start;
1534 end = start + uffdio_unregister.len;
1535
1536 ret = -ENOMEM;
1537 if (!mmget_not_zero(mm))
1538 goto out;
1539
1540 down_write(&mm->mmap_sem);
1541 if (!mmget_still_valid(mm))
1542 goto out_unlock;
1543 vma = find_vma_prev(mm, start, &prev);
1544 if (!vma)
1545 goto out_unlock;
1546
1547
1548 ret = -EINVAL;
1549 if (vma->vm_start >= end)
1550 goto out_unlock;
1551
1552
1553
1554
1555
1556 if (is_vm_hugetlb_page(vma)) {
1557 unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1558
1559 if (start & (vma_hpagesize - 1))
1560 goto out_unlock;
1561 }
1562
1563
1564
1565
1566 found = false;
1567 ret = -EINVAL;
1568 for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
1569 cond_resched();
1570
1571 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1572 !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
1573
1574
1575
1576
1577
1578
1579
1580
1581 if (!vma_can_userfault(cur))
1582 goto out_unlock;
1583
1584 found = true;
1585 }
1586 BUG_ON(!found);
1587
1588 if (vma->vm_start < start)
1589 prev = vma;
1590
1591 ret = 0;
1592 do {
1593 cond_resched();
1594
1595 BUG_ON(!vma_can_userfault(vma));
1596
1597
1598
1599
1600
1601 if (!vma->vm_userfaultfd_ctx.ctx)
1602 goto skip;
1603
1604 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1605
1606 if (vma->vm_start > start)
1607 start = vma->vm_start;
1608 vma_end = min(end, vma->vm_end);
1609
1610 if (userfaultfd_missing(vma)) {
1611
1612
1613
1614
1615
1616
1617 struct userfaultfd_wake_range range;
1618 range.start = start;
1619 range.len = vma_end - start;
1620 wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
1621 }
1622
1623 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
1624 prev = vma_merge(mm, prev, start, vma_end, new_flags,
1625 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
1626 vma_policy(vma),
1627 NULL_VM_UFFD_CTX);
1628 if (prev) {
1629 vma = prev;
1630 goto next;
1631 }
1632 if (vma->vm_start < start) {
1633 ret = split_vma(mm, vma, start, 1);
1634 if (ret)
1635 break;
1636 }
1637 if (vma->vm_end > end) {
1638 ret = split_vma(mm, vma, end, 0);
1639 if (ret)
1640 break;
1641 }
1642 next:
1643
1644
1645
1646
1647
1648 vma->vm_flags = new_flags;
1649 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
1650
1651 skip:
1652 prev = vma;
1653 start = vma->vm_end;
1654 vma = vma->vm_next;
1655 } while (vma && vma->vm_start < end);
1656out_unlock:
1657 up_write(&mm->mmap_sem);
1658 mmput(mm);
1659out:
1660 return ret;
1661}
1662
1663
1664
1665
1666
1667static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
1668 unsigned long arg)
1669{
1670 int ret;
1671 struct uffdio_range uffdio_wake;
1672 struct userfaultfd_wake_range range;
1673 const void __user *buf = (void __user *)arg;
1674
1675 ret = -EFAULT;
1676 if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
1677 goto out;
1678
1679 ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
1680 if (ret)
1681 goto out;
1682
1683 range.start = uffdio_wake.start;
1684 range.len = uffdio_wake.len;
1685
1686
1687
1688
1689
1690 VM_BUG_ON(!range.len);
1691
1692 wake_userfault(ctx, &range);
1693 ret = 0;
1694
1695out:
1696 return ret;
1697}
1698
1699static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1700 unsigned long arg)
1701{
1702 __s64 ret;
1703 struct uffdio_copy uffdio_copy;
1704 struct uffdio_copy __user *user_uffdio_copy;
1705 struct userfaultfd_wake_range range;
1706
1707 user_uffdio_copy = (struct uffdio_copy __user *) arg;
1708
1709 ret = -EAGAIN;
1710 if (READ_ONCE(ctx->mmap_changing))
1711 goto out;
1712
1713 ret = -EFAULT;
1714 if (copy_from_user(&uffdio_copy, user_uffdio_copy,
1715
1716 sizeof(uffdio_copy)-sizeof(__s64)))
1717 goto out;
1718
1719 ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
1720 if (ret)
1721 goto out;
1722
1723
1724
1725
1726
1727 ret = -EINVAL;
1728 if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
1729 goto out;
1730 if (uffdio_copy.mode & ~UFFDIO_COPY_MODE_DONTWAKE)
1731 goto out;
1732 if (mmget_not_zero(ctx->mm)) {
1733 ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
1734 uffdio_copy.len, &ctx->mmap_changing);
1735 mmput(ctx->mm);
1736 } else {
1737 return -ESRCH;
1738 }
1739 if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
1740 return -EFAULT;
1741 if (ret < 0)
1742 goto out;
1743 BUG_ON(!ret);
1744
1745 range.len = ret;
1746 if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
1747 range.start = uffdio_copy.dst;
1748 wake_userfault(ctx, &range);
1749 }
1750 ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
1751out:
1752 return ret;
1753}
1754
1755static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1756 unsigned long arg)
1757{
1758 __s64 ret;
1759 struct uffdio_zeropage uffdio_zeropage;
1760 struct uffdio_zeropage __user *user_uffdio_zeropage;
1761 struct userfaultfd_wake_range range;
1762
1763 user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
1764
1765 ret = -EAGAIN;
1766 if (READ_ONCE(ctx->mmap_changing))
1767 goto out;
1768
1769 ret = -EFAULT;
1770 if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
1771
1772 sizeof(uffdio_zeropage)-sizeof(__s64)))
1773 goto out;
1774
1775 ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
1776 uffdio_zeropage.range.len);
1777 if (ret)
1778 goto out;
1779 ret = -EINVAL;
1780 if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
1781 goto out;
1782
1783 if (mmget_not_zero(ctx->mm)) {
1784 ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
1785 uffdio_zeropage.range.len,
1786 &ctx->mmap_changing);
1787 mmput(ctx->mm);
1788 } else {
1789 return -ESRCH;
1790 }
1791 if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
1792 return -EFAULT;
1793 if (ret < 0)
1794 goto out;
1795
1796 BUG_ON(!ret);
1797 range.len = ret;
1798 if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
1799 range.start = uffdio_zeropage.range.start;
1800 wake_userfault(ctx, &range);
1801 }
1802 ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
1803out:
1804 return ret;
1805}
1806
1807static inline unsigned int uffd_ctx_features(__u64 user_features)
1808{
1809
1810
1811
1812 return (unsigned int)user_features;
1813}
1814
1815
1816
1817
1818
1819
1820static int userfaultfd_api(struct userfaultfd_ctx *ctx,
1821 unsigned long arg)
1822{
1823 struct uffdio_api uffdio_api;
1824 void __user *buf = (void __user *)arg;
1825 int ret;
1826 __u64 features;
1827
1828 ret = -EINVAL;
1829 if (ctx->state != UFFD_STATE_WAIT_API)
1830 goto out;
1831 ret = -EFAULT;
1832 if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
1833 goto out;
1834 features = uffdio_api.features;
1835 if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) {
1836 memset(&uffdio_api, 0, sizeof(uffdio_api));
1837 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
1838 goto out;
1839 ret = -EINVAL;
1840 goto out;
1841 }
1842
1843 uffdio_api.features = UFFD_API_FEATURES;
1844 uffdio_api.ioctls = UFFD_API_IOCTLS;
1845 ret = -EFAULT;
1846 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
1847 goto out;
1848 ctx->state = UFFD_STATE_RUNNING;
1849
1850 ctx->features = uffd_ctx_features(features);
1851 ret = 0;
1852out:
1853 return ret;
1854}
1855
1856static long userfaultfd_ioctl(struct file *file, unsigned cmd,
1857 unsigned long arg)
1858{
1859 int ret = -EINVAL;
1860 struct userfaultfd_ctx *ctx = file->private_data;
1861
1862 if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
1863 return -EINVAL;
1864
1865 switch(cmd) {
1866 case UFFDIO_API:
1867 ret = userfaultfd_api(ctx, arg);
1868 break;
1869 case UFFDIO_REGISTER:
1870 ret = userfaultfd_register(ctx, arg);
1871 break;
1872 case UFFDIO_UNREGISTER:
1873 ret = userfaultfd_unregister(ctx, arg);
1874 break;
1875 case UFFDIO_WAKE:
1876 ret = userfaultfd_wake(ctx, arg);
1877 break;
1878 case UFFDIO_COPY:
1879 ret = userfaultfd_copy(ctx, arg);
1880 break;
1881 case UFFDIO_ZEROPAGE:
1882 ret = userfaultfd_zeropage(ctx, arg);
1883 break;
1884 }
1885 return ret;
1886}
1887
1888#ifdef CONFIG_PROC_FS
1889static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
1890{
1891 struct userfaultfd_ctx *ctx = f->private_data;
1892 wait_queue_entry_t *wq;
1893 unsigned long pending = 0, total = 0;
1894
1895 spin_lock_irq(&ctx->fault_pending_wqh.lock);
1896 list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
1897 pending++;
1898 total++;
1899 }
1900 list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
1901 total++;
1902 }
1903 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
1904
1905
1906
1907
1908
1909
1910 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
1911 pending, total, UFFD_API, ctx->features,
1912 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
1913}
1914#endif
1915
1916static const struct file_operations userfaultfd_fops = {
1917#ifdef CONFIG_PROC_FS
1918 .show_fdinfo = userfaultfd_show_fdinfo,
1919#endif
1920 .release = userfaultfd_release,
1921 .poll = userfaultfd_poll,
1922 .read = userfaultfd_read,
1923 .unlocked_ioctl = userfaultfd_ioctl,
1924 .compat_ioctl = userfaultfd_ioctl,
1925 .llseek = noop_llseek,
1926};
1927
1928static void init_once_userfaultfd_ctx(void *mem)
1929{
1930 struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
1931
1932 init_waitqueue_head(&ctx->fault_pending_wqh);
1933 init_waitqueue_head(&ctx->fault_wqh);
1934 init_waitqueue_head(&ctx->event_wqh);
1935 init_waitqueue_head(&ctx->fd_wqh);
1936 seqcount_init(&ctx->refile_seq);
1937}
1938
1939SYSCALL_DEFINE1(userfaultfd, int, flags)
1940{
1941 struct userfaultfd_ctx *ctx;
1942 int fd;
1943
1944 if (!sysctl_unprivileged_userfaultfd && !capable(CAP_SYS_PTRACE))
1945 return -EPERM;
1946
1947 BUG_ON(!current->mm);
1948
1949
1950 BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
1951 BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
1952
1953 if (flags & ~UFFD_SHARED_FCNTL_FLAGS)
1954 return -EINVAL;
1955
1956 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
1957 if (!ctx)
1958 return -ENOMEM;
1959
1960 refcount_set(&ctx->refcount, 1);
1961 ctx->flags = flags;
1962 ctx->features = 0;
1963 ctx->state = UFFD_STATE_WAIT_API;
1964 ctx->released = false;
1965 ctx->mmap_changing = false;
1966 ctx->mm = current->mm;
1967
1968 mmgrab(ctx->mm);
1969
1970 fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, ctx,
1971 O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
1972 if (fd < 0) {
1973 mmdrop(ctx->mm);
1974 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
1975 }
1976 return fd;
1977}
1978
1979static int __init userfaultfd_init(void)
1980{
1981 userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
1982 sizeof(struct userfaultfd_ctx),
1983 0,
1984 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1985 init_once_userfaultfd_ctx);
1986 return 0;
1987}
1988__initcall(userfaultfd_init);
1989