1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/hashtable.h>
16#include <linux/sched.h>
17#include <linux/mm.h>
18#include <linux/poll.h>
19#include <linux/slab.h>
20#include <linux/seq_file.h>
21#include <linux/file.h>
22#include <linux/bug.h>
23#include <linux/anon_inodes.h>
24#include <linux/syscalls.h>
25#include <linux/userfaultfd_k.h>
26#include <linux/mempolicy.h>
27#include <linux/ioctl.h>
28#include <linux/security.h>
29
30static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
31
32enum userfaultfd_state {
33 UFFD_STATE_WAIT_API,
34 UFFD_STATE_RUNNING,
35};
36
37
38
39
40
41struct userfaultfd_ctx {
42
43 wait_queue_head_t fault_pending_wqh;
44
45 wait_queue_head_t fault_wqh;
46
47 wait_queue_head_t fd_wqh;
48
49 struct seqcount refile_seq;
50
51 atomic_t refcount;
52
53 unsigned int flags;
54
55 enum userfaultfd_state state;
56
57 bool released;
58
59 struct mm_struct *mm;
60};
61
62struct userfaultfd_wait_queue {
63 struct uffd_msg msg;
64 wait_queue_t wq;
65 struct userfaultfd_ctx *ctx;
66};
67
68struct userfaultfd_wake_range {
69 unsigned long start;
70 unsigned long len;
71};
72
73static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
74 int wake_flags, void *key)
75{
76 struct userfaultfd_wake_range *range = key;
77 int ret;
78 struct userfaultfd_wait_queue *uwq;
79 unsigned long start, len;
80
81 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
82 ret = 0;
83
84 start = range->start;
85 len = range->len;
86 if (len && (start > uwq->msg.arg.pagefault.address ||
87 start + len <= uwq->msg.arg.pagefault.address))
88 goto out;
89 ret = wake_up_state(wq->private, mode);
90 if (ret)
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105 list_del_init(&wq->task_list);
106out:
107 return ret;
108}
109
110
111
112
113
114
115
116
117static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
118{
119 if (!atomic_inc_not_zero(&ctx->refcount))
120 BUG();
121}
122
123
124
125
126
127
128
129
130
131static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
132{
133 if (atomic_dec_and_test(&ctx->refcount)) {
134 VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
135 VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
136 VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
137 VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
138 VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
139 VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
140 mmput(ctx->mm);
141 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
142 }
143}
144
145static inline void msg_init(struct uffd_msg *msg)
146{
147 BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
148
149
150
151
152 memset(msg, 0, sizeof(struct uffd_msg));
153}
154
155static inline struct uffd_msg userfault_msg(unsigned long address,
156 unsigned int flags,
157 unsigned long reason)
158{
159 struct uffd_msg msg;
160 msg_init(&msg);
161 msg.event = UFFD_EVENT_PAGEFAULT;
162 msg.arg.pagefault.address = address;
163 if (flags & FAULT_FLAG_WRITE)
164
165
166
167
168
169
170
171 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
172 if (reason & VM_UFFD_WP)
173
174
175
176
177
178
179
180 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
181 return msg;
182}
183
184
185
186
187
188
189
190
191static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
192 unsigned long address,
193 unsigned long flags,
194 unsigned long reason)
195{
196 struct mm_struct *mm = ctx->mm;
197 pgd_t *pgd;
198 pud_t *pud;
199 pmd_t *pmd, _pmd;
200 pte_t *pte;
201 bool ret = true;
202
203 VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
204
205 pgd = pgd_offset(mm, address);
206 if (!pgd_present(*pgd))
207 goto out;
208 pud = pud_offset(pgd, address);
209 if (!pud_present(*pud))
210 goto out;
211 pmd = pmd_offset(pud, address);
212
213
214
215
216
217
218
219
220 _pmd = READ_ONCE(*pmd);
221 if (!pmd_present(_pmd))
222 goto out;
223
224 ret = false;
225 if (pmd_trans_huge(_pmd))
226 goto out;
227
228
229
230
231
232 pte = pte_offset_map(pmd, address);
233
234
235
236
237 if (pte_none(*pte))
238 ret = true;
239 pte_unmap(pte);
240
241out:
242 return ret;
243}
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260int handle_userfault(struct vm_area_struct *vma, unsigned long address,
261 unsigned int flags, unsigned long reason)
262{
263 struct mm_struct *mm = vma->vm_mm;
264 struct userfaultfd_ctx *ctx;
265 struct userfaultfd_wait_queue uwq;
266 int ret;
267 bool must_wait, return_to_userland;
268
269 BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
270
271 ret = VM_FAULT_SIGBUS;
272 ctx = vma->vm_userfaultfd_ctx.ctx;
273 if (!ctx)
274 goto out;
275
276 BUG_ON(ctx->mm != mm);
277
278 VM_BUG_ON(reason & ~(VM_UFFD_MISSING|VM_UFFD_WP));
279 VM_BUG_ON(!(reason & VM_UFFD_MISSING) ^ !!(reason & VM_UFFD_WP));
280
281
282
283
284
285
286 if (unlikely(ACCESS_ONCE(ctx->released)))
287 goto out;
288
289
290
291
292 if (current->flags & PF_EXITING)
293 goto out;
294
295
296
297
298
299
300
301
302
303
304
305 if (unlikely(!(flags & FAULT_FLAG_ALLOW_RETRY))) {
306
307
308
309
310
311 BUG_ON(flags & FAULT_FLAG_RETRY_NOWAIT);
312#ifdef CONFIG_DEBUG_VM
313 if (printk_ratelimit()) {
314 printk(KERN_WARNING
315 "FAULT_FLAG_ALLOW_RETRY missing %x\n", flags);
316 dump_stack();
317 }
318#endif
319 goto out;
320 }
321
322
323
324
325
326 ret = VM_FAULT_RETRY;
327 if (flags & FAULT_FLAG_RETRY_NOWAIT)
328 goto out;
329
330
331 userfaultfd_ctx_get(ctx);
332
333 init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
334 uwq.wq.private = current;
335 uwq.msg = userfault_msg(address, flags, reason);
336 uwq.ctx = ctx;
337
338 return_to_userland = (flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
339 (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
340
341 spin_lock(&ctx->fault_pending_wqh.lock);
342
343
344
345
346 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
347
348
349
350
351
352 set_current_state(return_to_userland ? TASK_INTERRUPTIBLE :
353 TASK_KILLABLE);
354 spin_unlock(&ctx->fault_pending_wqh.lock);
355
356 must_wait = userfaultfd_must_wait(ctx, address, flags, reason);
357 up_read(&mm->mmap_sem);
358
359 if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&
360 (return_to_userland ? !signal_pending(current) :
361 !fatal_signal_pending(current)))) {
362 wake_up_poll(&ctx->fd_wqh, POLLIN);
363 schedule();
364 ret |= VM_FAULT_MAJOR;
365 }
366
367 __set_current_state(TASK_RUNNING);
368
369 if (return_to_userland) {
370 if (signal_pending(current) &&
371 !fatal_signal_pending(current)) {
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388 down_read(&mm->mmap_sem);
389 ret = 0;
390 }
391 }
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406 if (!list_empty_careful(&uwq.wq.task_list)) {
407 spin_lock(&ctx->fault_pending_wqh.lock);
408
409
410
411
412 list_del(&uwq.wq.task_list);
413 spin_unlock(&ctx->fault_pending_wqh.lock);
414 }
415
416
417
418
419
420 userfaultfd_ctx_put(ctx);
421
422out:
423 return ret;
424}
425
426static int userfaultfd_release(struct inode *inode, struct file *file)
427{
428 struct userfaultfd_ctx *ctx = file->private_data;
429 struct mm_struct *mm = ctx->mm;
430 struct vm_area_struct *vma, *prev;
431
432 struct userfaultfd_wake_range range = { .len = 0, };
433 unsigned long new_flags;
434
435 ACCESS_ONCE(ctx->released) = true;
436
437
438
439
440
441
442
443
444
445 down_write(&mm->mmap_sem);
446 prev = NULL;
447 for (vma = mm->mmap; vma; vma = vma->vm_next) {
448 cond_resched();
449 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
450 !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
451 if (vma->vm_userfaultfd_ctx.ctx != ctx) {
452 prev = vma;
453 continue;
454 }
455 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
456 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
457 new_flags, vma->anon_vma,
458 vma->vm_file, vma->vm_pgoff,
459 vma_policy(vma),
460 NULL_VM_UFFD_CTX);
461 if (prev)
462 vma = prev;
463 else
464 prev = vma;
465 vma->vm_flags = new_flags;
466 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
467 }
468 up_write(&mm->mmap_sem);
469
470
471
472
473
474
475 spin_lock(&ctx->fault_pending_wqh.lock);
476 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
477 __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
478 spin_unlock(&ctx->fault_pending_wqh.lock);
479
480 wake_up_poll(&ctx->fd_wqh, POLLHUP);
481 userfaultfd_ctx_put(ctx);
482 return 0;
483}
484
485
486static inline struct userfaultfd_wait_queue *find_userfault(
487 struct userfaultfd_ctx *ctx)
488{
489 wait_queue_t *wq;
490 struct userfaultfd_wait_queue *uwq;
491
492 VM_BUG_ON(!spin_is_locked(&ctx->fault_pending_wqh.lock));
493
494 uwq = NULL;
495 if (!waitqueue_active(&ctx->fault_pending_wqh))
496 goto out;
497
498 wq = list_last_entry(&ctx->fault_pending_wqh.task_list,
499 typeof(*wq), task_list);
500 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
501out:
502 return uwq;
503}
504
505static unsigned int userfaultfd_poll(struct file *file, poll_table *wait)
506{
507 struct userfaultfd_ctx *ctx = file->private_data;
508 unsigned int ret;
509
510 poll_wait(file, &ctx->fd_wqh, wait);
511
512 switch (ctx->state) {
513 case UFFD_STATE_WAIT_API:
514 return POLLERR;
515 case UFFD_STATE_RUNNING:
516
517
518
519
520 if (unlikely(!(file->f_flags & O_NONBLOCK)))
521 return POLLERR;
522
523
524
525
526
527
528
529
530
531
532 ret = 0;
533 smp_mb();
534 if (waitqueue_active(&ctx->fault_pending_wqh))
535 ret = POLLIN;
536 return ret;
537 default:
538 BUG();
539 }
540}
541
542static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
543 struct uffd_msg *msg)
544{
545 ssize_t ret;
546 DECLARE_WAITQUEUE(wait, current);
547 struct userfaultfd_wait_queue *uwq;
548
549
550 spin_lock(&ctx->fd_wqh.lock);
551 __add_wait_queue(&ctx->fd_wqh, &wait);
552 for (;;) {
553 set_current_state(TASK_INTERRUPTIBLE);
554 spin_lock(&ctx->fault_pending_wqh.lock);
555 uwq = find_userfault(ctx);
556 if (uwq) {
557
558
559
560
561
562
563
564 write_seqcount_begin(&ctx->refile_seq);
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587 list_del(&uwq->wq.task_list);
588 __add_wait_queue(&ctx->fault_wqh, &uwq->wq);
589
590 write_seqcount_end(&ctx->refile_seq);
591
592
593 *msg = uwq->msg;
594 spin_unlock(&ctx->fault_pending_wqh.lock);
595 ret = 0;
596 break;
597 }
598 spin_unlock(&ctx->fault_pending_wqh.lock);
599 if (signal_pending(current)) {
600 ret = -ERESTARTSYS;
601 break;
602 }
603 if (no_wait) {
604 ret = -EAGAIN;
605 break;
606 }
607 spin_unlock(&ctx->fd_wqh.lock);
608 schedule();
609 spin_lock(&ctx->fd_wqh.lock);
610 }
611 __remove_wait_queue(&ctx->fd_wqh, &wait);
612 __set_current_state(TASK_RUNNING);
613 spin_unlock(&ctx->fd_wqh.lock);
614
615 return ret;
616}
617
618static ssize_t userfaultfd_read(struct file *file, char __user *buf,
619 size_t count, loff_t *ppos)
620{
621 struct userfaultfd_ctx *ctx = file->private_data;
622 ssize_t _ret, ret = 0;
623 struct uffd_msg msg;
624 int no_wait = file->f_flags & O_NONBLOCK;
625
626 if (ctx->state == UFFD_STATE_WAIT_API)
627 return -EINVAL;
628
629 for (;;) {
630 if (count < sizeof(msg))
631 return ret ? ret : -EINVAL;
632 _ret = userfaultfd_ctx_read(ctx, no_wait, &msg);
633 if (_ret < 0)
634 return ret ? ret : _ret;
635 if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
636 return ret ? ret : -EFAULT;
637 ret += sizeof(msg);
638 buf += sizeof(msg);
639 count -= sizeof(msg);
640
641
642
643
644 no_wait = O_NONBLOCK;
645 }
646}
647
648static void __wake_userfault(struct userfaultfd_ctx *ctx,
649 struct userfaultfd_wake_range *range)
650{
651 unsigned long start, end;
652
653 start = range->start;
654 end = range->start + range->len;
655
656 spin_lock(&ctx->fault_pending_wqh.lock);
657
658 if (waitqueue_active(&ctx->fault_pending_wqh))
659 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
660 range);
661 if (waitqueue_active(&ctx->fault_wqh))
662 __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, range);
663 spin_unlock(&ctx->fault_pending_wqh.lock);
664}
665
666static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
667 struct userfaultfd_wake_range *range)
668{
669 unsigned seq;
670 bool need_wakeup;
671
672
673
674
675
676
677
678
679 smp_mb();
680
681
682
683
684
685
686
687 do {
688 seq = read_seqcount_begin(&ctx->refile_seq);
689 need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
690 waitqueue_active(&ctx->fault_wqh);
691 cond_resched();
692 } while (read_seqcount_retry(&ctx->refile_seq, seq));
693 if (need_wakeup)
694 __wake_userfault(ctx, range);
695}
696
697static __always_inline int validate_range(struct mm_struct *mm,
698 __u64 start, __u64 len)
699{
700 __u64 task_size = mm->task_size;
701
702 if (start & ~PAGE_MASK)
703 return -EINVAL;
704 if (len & ~PAGE_MASK)
705 return -EINVAL;
706 if (!len)
707 return -EINVAL;
708 if (start < mmap_min_addr)
709 return -EINVAL;
710 if (start >= task_size)
711 return -EINVAL;
712 if (len > task_size - start)
713 return -EINVAL;
714 return 0;
715}
716
717static int userfaultfd_register(struct userfaultfd_ctx *ctx,
718 unsigned long arg)
719{
720 struct mm_struct *mm = ctx->mm;
721 struct vm_area_struct *vma, *prev, *cur;
722 int ret;
723 struct uffdio_register uffdio_register;
724 struct uffdio_register __user *user_uffdio_register;
725 unsigned long vm_flags, new_flags;
726 bool found;
727 unsigned long start, end, vma_end;
728
729 user_uffdio_register = (struct uffdio_register __user *) arg;
730
731 ret = -EFAULT;
732 if (copy_from_user(&uffdio_register, user_uffdio_register,
733 sizeof(uffdio_register)-sizeof(__u64)))
734 goto out;
735
736 ret = -EINVAL;
737 if (!uffdio_register.mode)
738 goto out;
739 if (uffdio_register.mode & ~(UFFDIO_REGISTER_MODE_MISSING|
740 UFFDIO_REGISTER_MODE_WP))
741 goto out;
742 vm_flags = 0;
743 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
744 vm_flags |= VM_UFFD_MISSING;
745 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
746 vm_flags |= VM_UFFD_WP;
747
748
749
750
751 ret = -EINVAL;
752 goto out;
753 }
754
755 ret = validate_range(mm, uffdio_register.range.start,
756 uffdio_register.range.len);
757 if (ret)
758 goto out;
759
760 start = uffdio_register.range.start;
761 end = start + uffdio_register.range.len;
762
763 down_write(&mm->mmap_sem);
764 vma = find_vma_prev(mm, start, &prev);
765
766 ret = -ENOMEM;
767 if (!vma)
768 goto out_unlock;
769
770
771 ret = -EINVAL;
772 if (vma->vm_start >= end)
773 goto out_unlock;
774
775
776
777
778
779
780
781
782 found = false;
783 for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
784 cond_resched();
785
786 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
787 !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
788
789
790 ret = -EINVAL;
791 if (cur->vm_ops)
792 goto out_unlock;
793
794
795
796
797
798
799
800 ret = -EBUSY;
801 if (cur->vm_userfaultfd_ctx.ctx &&
802 cur->vm_userfaultfd_ctx.ctx != ctx)
803 goto out_unlock;
804
805 found = true;
806 }
807 BUG_ON(!found);
808
809 if (vma->vm_start < start)
810 prev = vma;
811
812 ret = 0;
813 do {
814 cond_resched();
815
816 BUG_ON(vma->vm_ops);
817 BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
818 vma->vm_userfaultfd_ctx.ctx != ctx);
819
820
821
822
823
824 if (vma->vm_userfaultfd_ctx.ctx == ctx &&
825 (vma->vm_flags & vm_flags) == vm_flags)
826 goto skip;
827
828 if (vma->vm_start > start)
829 start = vma->vm_start;
830 vma_end = min(end, vma->vm_end);
831
832 new_flags = (vma->vm_flags & ~vm_flags) | vm_flags;
833 prev = vma_merge(mm, prev, start, vma_end, new_flags,
834 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
835 vma_policy(vma),
836 ((struct vm_userfaultfd_ctx){ ctx }));
837 if (prev) {
838 vma = prev;
839 goto next;
840 }
841 if (vma->vm_start < start) {
842 ret = split_vma(mm, vma, start, 1);
843 if (ret)
844 break;
845 }
846 if (vma->vm_end > end) {
847 ret = split_vma(mm, vma, end, 0);
848 if (ret)
849 break;
850 }
851 next:
852
853
854
855
856
857 vma->vm_flags = new_flags;
858 vma->vm_userfaultfd_ctx.ctx = ctx;
859
860 skip:
861 prev = vma;
862 start = vma->vm_end;
863 vma = vma->vm_next;
864 } while (vma && vma->vm_start < end);
865out_unlock:
866 up_write(&mm->mmap_sem);
867 if (!ret) {
868
869
870
871
872
873 if (put_user(UFFD_API_RANGE_IOCTLS,
874 &user_uffdio_register->ioctls))
875 ret = -EFAULT;
876 }
877out:
878 return ret;
879}
880
881static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
882 unsigned long arg)
883{
884 struct mm_struct *mm = ctx->mm;
885 struct vm_area_struct *vma, *prev, *cur;
886 int ret;
887 struct uffdio_range uffdio_unregister;
888 unsigned long new_flags;
889 bool found;
890 unsigned long start, end, vma_end;
891 const void __user *buf = (void __user *)arg;
892
893 ret = -EFAULT;
894 if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
895 goto out;
896
897 ret = validate_range(mm, uffdio_unregister.start,
898 uffdio_unregister.len);
899 if (ret)
900 goto out;
901
902 start = uffdio_unregister.start;
903 end = start + uffdio_unregister.len;
904
905 down_write(&mm->mmap_sem);
906 vma = find_vma_prev(mm, start, &prev);
907
908 ret = -ENOMEM;
909 if (!vma)
910 goto out_unlock;
911
912
913 ret = -EINVAL;
914 if (vma->vm_start >= end)
915 goto out_unlock;
916
917
918
919
920
921
922
923
924 found = false;
925 ret = -EINVAL;
926 for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
927 cond_resched();
928
929 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
930 !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
931
932
933
934
935
936
937
938
939 if (cur->vm_ops)
940 goto out_unlock;
941
942 found = true;
943 }
944 BUG_ON(!found);
945
946 if (vma->vm_start < start)
947 prev = vma;
948
949 ret = 0;
950 do {
951 cond_resched();
952
953 BUG_ON(vma->vm_ops);
954
955
956
957
958
959 if (!vma->vm_userfaultfd_ctx.ctx)
960 goto skip;
961
962 if (vma->vm_start > start)
963 start = vma->vm_start;
964 vma_end = min(end, vma->vm_end);
965
966 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
967 prev = vma_merge(mm, prev, start, vma_end, new_flags,
968 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
969 vma_policy(vma),
970 NULL_VM_UFFD_CTX);
971 if (prev) {
972 vma = prev;
973 goto next;
974 }
975 if (vma->vm_start < start) {
976 ret = split_vma(mm, vma, start, 1);
977 if (ret)
978 break;
979 }
980 if (vma->vm_end > end) {
981 ret = split_vma(mm, vma, end, 0);
982 if (ret)
983 break;
984 }
985 next:
986
987
988
989
990
991 vma->vm_flags = new_flags;
992 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
993
994 skip:
995 prev = vma;
996 start = vma->vm_end;
997 vma = vma->vm_next;
998 } while (vma && vma->vm_start < end);
999out_unlock:
1000 up_write(&mm->mmap_sem);
1001out:
1002 return ret;
1003}
1004
1005
1006
1007
1008
1009static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
1010 unsigned long arg)
1011{
1012 int ret;
1013 struct uffdio_range uffdio_wake;
1014 struct userfaultfd_wake_range range;
1015 const void __user *buf = (void __user *)arg;
1016
1017 ret = -EFAULT;
1018 if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
1019 goto out;
1020
1021 ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
1022 if (ret)
1023 goto out;
1024
1025 range.start = uffdio_wake.start;
1026 range.len = uffdio_wake.len;
1027
1028
1029
1030
1031
1032 VM_BUG_ON(!range.len);
1033
1034 wake_userfault(ctx, &range);
1035 ret = 0;
1036
1037out:
1038 return ret;
1039}
1040
1041static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1042 unsigned long arg)
1043{
1044 __s64 ret;
1045 struct uffdio_copy uffdio_copy;
1046 struct uffdio_copy __user *user_uffdio_copy;
1047 struct userfaultfd_wake_range range;
1048
1049 user_uffdio_copy = (struct uffdio_copy __user *) arg;
1050
1051 ret = -EFAULT;
1052 if (copy_from_user(&uffdio_copy, user_uffdio_copy,
1053
1054 sizeof(uffdio_copy)-sizeof(__s64)))
1055 goto out;
1056
1057 ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
1058 if (ret)
1059 goto out;
1060
1061
1062
1063
1064
1065 ret = -EINVAL;
1066 if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
1067 goto out;
1068 if (uffdio_copy.mode & ~UFFDIO_COPY_MODE_DONTWAKE)
1069 goto out;
1070
1071 ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
1072 uffdio_copy.len);
1073 if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
1074 return -EFAULT;
1075 if (ret < 0)
1076 goto out;
1077 BUG_ON(!ret);
1078
1079 range.len = ret;
1080 if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
1081 range.start = uffdio_copy.dst;
1082 wake_userfault(ctx, &range);
1083 }
1084 ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
1085out:
1086 return ret;
1087}
1088
1089static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1090 unsigned long arg)
1091{
1092 __s64 ret;
1093 struct uffdio_zeropage uffdio_zeropage;
1094 struct uffdio_zeropage __user *user_uffdio_zeropage;
1095 struct userfaultfd_wake_range range;
1096
1097 user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
1098
1099 ret = -EFAULT;
1100 if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
1101
1102 sizeof(uffdio_zeropage)-sizeof(__s64)))
1103 goto out;
1104
1105 ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
1106 uffdio_zeropage.range.len);
1107 if (ret)
1108 goto out;
1109 ret = -EINVAL;
1110 if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
1111 goto out;
1112
1113 ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
1114 uffdio_zeropage.range.len);
1115 if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
1116 return -EFAULT;
1117 if (ret < 0)
1118 goto out;
1119
1120 BUG_ON(!ret);
1121 range.len = ret;
1122 if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
1123 range.start = uffdio_zeropage.range.start;
1124 wake_userfault(ctx, &range);
1125 }
1126 ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
1127out:
1128 return ret;
1129}
1130
1131
1132
1133
1134
1135
1136static int userfaultfd_api(struct userfaultfd_ctx *ctx,
1137 unsigned long arg)
1138{
1139 struct uffdio_api uffdio_api;
1140 void __user *buf = (void __user *)arg;
1141 int ret;
1142
1143 ret = -EINVAL;
1144 if (ctx->state != UFFD_STATE_WAIT_API)
1145 goto out;
1146 ret = -EFAULT;
1147 if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
1148 goto out;
1149 if (uffdio_api.api != UFFD_API || uffdio_api.features) {
1150 memset(&uffdio_api, 0, sizeof(uffdio_api));
1151 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
1152 goto out;
1153 ret = -EINVAL;
1154 goto out;
1155 }
1156 uffdio_api.features = UFFD_API_FEATURES;
1157 uffdio_api.ioctls = UFFD_API_IOCTLS;
1158 ret = -EFAULT;
1159 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
1160 goto out;
1161 ctx->state = UFFD_STATE_RUNNING;
1162 ret = 0;
1163out:
1164 return ret;
1165}
1166
1167static long userfaultfd_ioctl(struct file *file, unsigned cmd,
1168 unsigned long arg)
1169{
1170 int ret = -EINVAL;
1171 struct userfaultfd_ctx *ctx = file->private_data;
1172
1173 if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
1174 return -EINVAL;
1175
1176 switch(cmd) {
1177 case UFFDIO_API:
1178 ret = userfaultfd_api(ctx, arg);
1179 break;
1180 case UFFDIO_REGISTER:
1181 ret = userfaultfd_register(ctx, arg);
1182 break;
1183 case UFFDIO_UNREGISTER:
1184 ret = userfaultfd_unregister(ctx, arg);
1185 break;
1186 case UFFDIO_WAKE:
1187 ret = userfaultfd_wake(ctx, arg);
1188 break;
1189 case UFFDIO_COPY:
1190 ret = userfaultfd_copy(ctx, arg);
1191 break;
1192 case UFFDIO_ZEROPAGE:
1193 ret = userfaultfd_zeropage(ctx, arg);
1194 break;
1195 }
1196 return ret;
1197}
1198
1199#ifdef CONFIG_PROC_FS
1200static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
1201{
1202 struct userfaultfd_ctx *ctx = f->private_data;
1203 wait_queue_t *wq;
1204 struct userfaultfd_wait_queue *uwq;
1205 unsigned long pending = 0, total = 0;
1206
1207 spin_lock(&ctx->fault_pending_wqh.lock);
1208 list_for_each_entry(wq, &ctx->fault_pending_wqh.task_list, task_list) {
1209 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
1210 pending++;
1211 total++;
1212 }
1213 list_for_each_entry(wq, &ctx->fault_wqh.task_list, task_list) {
1214 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
1215 total++;
1216 }
1217 spin_unlock(&ctx->fault_pending_wqh.lock);
1218
1219
1220
1221
1222
1223
1224 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
1225 pending, total, UFFD_API, UFFD_API_FEATURES,
1226 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
1227}
1228#endif
1229
1230static const struct file_operations userfaultfd_fops = {
1231#ifdef CONFIG_PROC_FS
1232 .show_fdinfo = userfaultfd_show_fdinfo,
1233#endif
1234 .release = userfaultfd_release,
1235 .poll = userfaultfd_poll,
1236 .read = userfaultfd_read,
1237 .unlocked_ioctl = userfaultfd_ioctl,
1238 .compat_ioctl = userfaultfd_ioctl,
1239 .llseek = noop_llseek,
1240};
1241
1242static void init_once_userfaultfd_ctx(void *mem)
1243{
1244 struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
1245
1246 init_waitqueue_head(&ctx->fault_pending_wqh);
1247 init_waitqueue_head(&ctx->fault_wqh);
1248 init_waitqueue_head(&ctx->fd_wqh);
1249 seqcount_init(&ctx->refile_seq);
1250}
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266static struct file *userfaultfd_file_create(int flags)
1267{
1268 struct file *file;
1269 struct userfaultfd_ctx *ctx;
1270
1271 BUG_ON(!current->mm);
1272
1273
1274 BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
1275 BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
1276
1277 file = ERR_PTR(-EINVAL);
1278 if (flags & ~UFFD_SHARED_FCNTL_FLAGS)
1279 goto out;
1280
1281 file = ERR_PTR(-ENOMEM);
1282 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
1283 if (!ctx)
1284 goto out;
1285
1286 atomic_set(&ctx->refcount, 1);
1287 ctx->flags = flags;
1288 ctx->state = UFFD_STATE_WAIT_API;
1289 ctx->released = false;
1290 ctx->mm = current->mm;
1291
1292 atomic_inc(&ctx->mm->mm_users);
1293
1294 file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
1295 O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
1296 if (IS_ERR(file)) {
1297 mmput(ctx->mm);
1298 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
1299 }
1300out:
1301 return file;
1302}
1303
1304SYSCALL_DEFINE1(userfaultfd, int, flags)
1305{
1306 int fd, error;
1307 struct file *file;
1308
1309 error = get_unused_fd_flags(flags & UFFD_SHARED_FCNTL_FLAGS);
1310 if (error < 0)
1311 return error;
1312 fd = error;
1313
1314 file = userfaultfd_file_create(flags);
1315 if (IS_ERR(file)) {
1316 error = PTR_ERR(file);
1317 goto err_put_unused_fd;
1318 }
1319 fd_install(fd, file);
1320
1321 return fd;
1322
1323err_put_unused_fd:
1324 put_unused_fd(fd);
1325
1326 return error;
1327}
1328
1329static int __init userfaultfd_init(void)
1330{
1331 userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
1332 sizeof(struct userfaultfd_ctx),
1333 0,
1334 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1335 init_once_userfaultfd_ctx);
1336 return 0;
1337}
1338__initcall(userfaultfd_init);
1339