1
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/err.h>
5#include <linux/spinlock.h>
6
7#include <linux/mm.h>
8#include <linux/memremap.h>
9#include <linux/pagemap.h>
10#include <linux/rmap.h>
11#include <linux/swap.h>
12#include <linux/swapops.h>
13
14#include <linux/sched/signal.h>
15#include <linux/rwsem.h>
16#include <linux/hugetlb.h>
17#include <linux/migrate.h>
18#include <linux/mm_inline.h>
19#include <linux/sched/mm.h>
20
21#include <asm/mmu_context.h>
22#include <asm/pgtable.h>
23#include <asm/tlbflush.h>
24
25#include "internal.h"
26
27struct follow_page_context {
28 struct dev_pagemap *pgmap;
29 unsigned int page_mask;
30};
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
55 bool make_dirty)
56{
57 unsigned long index;
58
59
60
61
62
63
64
65 if (!make_dirty) {
66 put_user_pages(pages, npages);
67 return;
68 }
69
70 for (index = 0; index < npages; index++) {
71 struct page *page = compound_head(pages[index]);
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92 if (!PageDirty(page))
93 set_page_dirty_lock(page);
94 put_user_page(page);
95 }
96}
97EXPORT_SYMBOL(put_user_pages_dirty_lock);
98
99
100
101
102
103
104
105
106
107
108void put_user_pages(struct page **pages, unsigned long npages)
109{
110 unsigned long index;
111
112
113
114
115
116
117 for (index = 0; index < npages; index++)
118 put_user_page(pages[index]);
119}
120EXPORT_SYMBOL(put_user_pages);
121
122#ifdef CONFIG_MMU
123static struct page *no_page_table(struct vm_area_struct *vma,
124 unsigned int flags)
125{
126
127
128
129
130
131
132
133
134 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
135 return ERR_PTR(-EFAULT);
136 return NULL;
137}
138
139static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
140 pte_t *pte, unsigned int flags)
141{
142
143 if (flags & FOLL_GET)
144 return -EFAULT;
145
146 if (flags & FOLL_TOUCH) {
147 pte_t entry = *pte;
148
149 if (flags & FOLL_WRITE)
150 entry = pte_mkdirty(entry);
151 entry = pte_mkyoung(entry);
152
153 if (!pte_same(*pte, entry)) {
154 set_pte_at(vma->vm_mm, address, pte, entry);
155 update_mmu_cache(vma, address, pte);
156 }
157 }
158
159
160 return -EEXIST;
161}
162
163
164
165
166
167static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
168{
169 return pte_write(pte) ||
170 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
171}
172
173static struct page *follow_page_pte(struct vm_area_struct *vma,
174 unsigned long address, pmd_t *pmd, unsigned int flags,
175 struct dev_pagemap **pgmap)
176{
177 struct mm_struct *mm = vma->vm_mm;
178 struct page *page;
179 spinlock_t *ptl;
180 pte_t *ptep, pte;
181
182retry:
183 if (unlikely(pmd_bad(*pmd)))
184 return no_page_table(vma, flags);
185
186 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
187 pte = *ptep;
188 if (!pte_present(pte)) {
189 swp_entry_t entry;
190
191
192
193
194
195 if (likely(!(flags & FOLL_MIGRATION)))
196 goto no_page;
197 if (pte_none(pte))
198 goto no_page;
199 entry = pte_to_swp_entry(pte);
200 if (!is_migration_entry(entry))
201 goto no_page;
202 pte_unmap_unlock(ptep, ptl);
203 migration_entry_wait(mm, pmd, address);
204 goto retry;
205 }
206 if ((flags & FOLL_NUMA) && pte_protnone(pte))
207 goto no_page;
208 if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
209 pte_unmap_unlock(ptep, ptl);
210 return NULL;
211 }
212
213 page = vm_normal_page(vma, address, pte);
214 if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
215
216
217
218
219 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
220 if (*pgmap)
221 page = pte_page(pte);
222 else
223 goto no_page;
224 } else if (unlikely(!page)) {
225 if (flags & FOLL_DUMP) {
226
227 page = ERR_PTR(-EFAULT);
228 goto out;
229 }
230
231 if (is_zero_pfn(pte_pfn(pte))) {
232 page = pte_page(pte);
233 } else {
234 int ret;
235
236 ret = follow_pfn_pte(vma, address, ptep, flags);
237 page = ERR_PTR(ret);
238 goto out;
239 }
240 }
241
242 if (flags & FOLL_SPLIT && PageTransCompound(page)) {
243 int ret;
244 get_page(page);
245 pte_unmap_unlock(ptep, ptl);
246 lock_page(page);
247 ret = split_huge_page(page);
248 unlock_page(page);
249 put_page(page);
250 if (ret)
251 return ERR_PTR(ret);
252 goto retry;
253 }
254
255 if (flags & FOLL_GET) {
256 if (unlikely(!try_get_page(page))) {
257 page = ERR_PTR(-ENOMEM);
258 goto out;
259 }
260 }
261 if (flags & FOLL_TOUCH) {
262 if ((flags & FOLL_WRITE) &&
263 !pte_dirty(pte) && !PageDirty(page))
264 set_page_dirty(page);
265
266
267
268
269
270 mark_page_accessed(page);
271 }
272 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
273
274 if (PageTransCompound(page))
275 goto out;
276
277
278
279
280
281
282
283
284
285
286 if (page->mapping && trylock_page(page)) {
287 lru_add_drain();
288
289
290
291
292
293
294 mlock_vma_page(page);
295 unlock_page(page);
296 }
297 }
298out:
299 pte_unmap_unlock(ptep, ptl);
300 return page;
301no_page:
302 pte_unmap_unlock(ptep, ptl);
303 if (!pte_none(pte))
304 return NULL;
305 return no_page_table(vma, flags);
306}
307
308static struct page *follow_pmd_mask(struct vm_area_struct *vma,
309 unsigned long address, pud_t *pudp,
310 unsigned int flags,
311 struct follow_page_context *ctx)
312{
313 pmd_t *pmd, pmdval;
314 spinlock_t *ptl;
315 struct page *page;
316 struct mm_struct *mm = vma->vm_mm;
317
318 pmd = pmd_offset(pudp, address);
319
320
321
322
323 pmdval = READ_ONCE(*pmd);
324 if (pmd_none(pmdval))
325 return no_page_table(vma, flags);
326 if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) {
327 page = follow_huge_pmd(mm, address, pmd, flags);
328 if (page)
329 return page;
330 return no_page_table(vma, flags);
331 }
332 if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
333 page = follow_huge_pd(vma, address,
334 __hugepd(pmd_val(pmdval)), flags,
335 PMD_SHIFT);
336 if (page)
337 return page;
338 return no_page_table(vma, flags);
339 }
340retry:
341 if (!pmd_present(pmdval)) {
342 if (likely(!(flags & FOLL_MIGRATION)))
343 return no_page_table(vma, flags);
344 VM_BUG_ON(thp_migration_supported() &&
345 !is_pmd_migration_entry(pmdval));
346 if (is_pmd_migration_entry(pmdval))
347 pmd_migration_entry_wait(mm, pmd);
348 pmdval = READ_ONCE(*pmd);
349
350
351
352
353 if (pmd_none(pmdval))
354 return no_page_table(vma, flags);
355 goto retry;
356 }
357 if (pmd_devmap(pmdval)) {
358 ptl = pmd_lock(mm, pmd);
359 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
360 spin_unlock(ptl);
361 if (page)
362 return page;
363 }
364 if (likely(!pmd_trans_huge(pmdval)))
365 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
366
367 if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
368 return no_page_table(vma, flags);
369
370retry_locked:
371 ptl = pmd_lock(mm, pmd);
372 if (unlikely(pmd_none(*pmd))) {
373 spin_unlock(ptl);
374 return no_page_table(vma, flags);
375 }
376 if (unlikely(!pmd_present(*pmd))) {
377 spin_unlock(ptl);
378 if (likely(!(flags & FOLL_MIGRATION)))
379 return no_page_table(vma, flags);
380 pmd_migration_entry_wait(mm, pmd);
381 goto retry_locked;
382 }
383 if (unlikely(!pmd_trans_huge(*pmd))) {
384 spin_unlock(ptl);
385 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
386 }
387 if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
388 int ret;
389 page = pmd_page(*pmd);
390 if (is_huge_zero_page(page)) {
391 spin_unlock(ptl);
392 ret = 0;
393 split_huge_pmd(vma, pmd, address);
394 if (pmd_trans_unstable(pmd))
395 ret = -EBUSY;
396 } else if (flags & FOLL_SPLIT) {
397 if (unlikely(!try_get_page(page))) {
398 spin_unlock(ptl);
399 return ERR_PTR(-ENOMEM);
400 }
401 spin_unlock(ptl);
402 lock_page(page);
403 ret = split_huge_page(page);
404 unlock_page(page);
405 put_page(page);
406 if (pmd_none(*pmd))
407 return no_page_table(vma, flags);
408 } else {
409 spin_unlock(ptl);
410 split_huge_pmd(vma, pmd, address);
411 ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
412 }
413
414 return ret ? ERR_PTR(ret) :
415 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
416 }
417 page = follow_trans_huge_pmd(vma, address, pmd, flags);
418 spin_unlock(ptl);
419 ctx->page_mask = HPAGE_PMD_NR - 1;
420 return page;
421}
422
423static struct page *follow_pud_mask(struct vm_area_struct *vma,
424 unsigned long address, p4d_t *p4dp,
425 unsigned int flags,
426 struct follow_page_context *ctx)
427{
428 pud_t *pud;
429 spinlock_t *ptl;
430 struct page *page;
431 struct mm_struct *mm = vma->vm_mm;
432
433 pud = pud_offset(p4dp, address);
434 if (pud_none(*pud))
435 return no_page_table(vma, flags);
436 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
437 page = follow_huge_pud(mm, address, pud, flags);
438 if (page)
439 return page;
440 return no_page_table(vma, flags);
441 }
442 if (is_hugepd(__hugepd(pud_val(*pud)))) {
443 page = follow_huge_pd(vma, address,
444 __hugepd(pud_val(*pud)), flags,
445 PUD_SHIFT);
446 if (page)
447 return page;
448 return no_page_table(vma, flags);
449 }
450 if (pud_devmap(*pud)) {
451 ptl = pud_lock(mm, pud);
452 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
453 spin_unlock(ptl);
454 if (page)
455 return page;
456 }
457 if (unlikely(pud_bad(*pud)))
458 return no_page_table(vma, flags);
459
460 return follow_pmd_mask(vma, address, pud, flags, ctx);
461}
462
463static struct page *follow_p4d_mask(struct vm_area_struct *vma,
464 unsigned long address, pgd_t *pgdp,
465 unsigned int flags,
466 struct follow_page_context *ctx)
467{
468 p4d_t *p4d;
469 struct page *page;
470
471 p4d = p4d_offset(pgdp, address);
472 if (p4d_none(*p4d))
473 return no_page_table(vma, flags);
474 BUILD_BUG_ON(p4d_huge(*p4d));
475 if (unlikely(p4d_bad(*p4d)))
476 return no_page_table(vma, flags);
477
478 if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
479 page = follow_huge_pd(vma, address,
480 __hugepd(p4d_val(*p4d)), flags,
481 P4D_SHIFT);
482 if (page)
483 return page;
484 return no_page_table(vma, flags);
485 }
486 return follow_pud_mask(vma, address, p4d, flags, ctx);
487}
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508static struct page *follow_page_mask(struct vm_area_struct *vma,
509 unsigned long address, unsigned int flags,
510 struct follow_page_context *ctx)
511{
512 pgd_t *pgd;
513 struct page *page;
514 struct mm_struct *mm = vma->vm_mm;
515
516 ctx->page_mask = 0;
517
518
519 page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
520 if (!IS_ERR(page)) {
521 BUG_ON(flags & FOLL_GET);
522 return page;
523 }
524
525 pgd = pgd_offset(mm, address);
526
527 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
528 return no_page_table(vma, flags);
529
530 if (pgd_huge(*pgd)) {
531 page = follow_huge_pgd(mm, address, pgd, flags);
532 if (page)
533 return page;
534 return no_page_table(vma, flags);
535 }
536 if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
537 page = follow_huge_pd(vma, address,
538 __hugepd(pgd_val(*pgd)), flags,
539 PGDIR_SHIFT);
540 if (page)
541 return page;
542 return no_page_table(vma, flags);
543 }
544
545 return follow_p4d_mask(vma, address, pgd, flags, ctx);
546}
547
548struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
549 unsigned int foll_flags)
550{
551 struct follow_page_context ctx = { NULL };
552 struct page *page;
553
554 page = follow_page_mask(vma, address, foll_flags, &ctx);
555 if (ctx.pgmap)
556 put_dev_pagemap(ctx.pgmap);
557 return page;
558}
559
560static int get_gate_page(struct mm_struct *mm, unsigned long address,
561 unsigned int gup_flags, struct vm_area_struct **vma,
562 struct page **page)
563{
564 pgd_t *pgd;
565 p4d_t *p4d;
566 pud_t *pud;
567 pmd_t *pmd;
568 pte_t *pte;
569 int ret = -EFAULT;
570
571
572 if (gup_flags & FOLL_WRITE)
573 return -EFAULT;
574 if (address > TASK_SIZE)
575 pgd = pgd_offset_k(address);
576 else
577 pgd = pgd_offset_gate(mm, address);
578 if (pgd_none(*pgd))
579 return -EFAULT;
580 p4d = p4d_offset(pgd, address);
581 if (p4d_none(*p4d))
582 return -EFAULT;
583 pud = pud_offset(p4d, address);
584 if (pud_none(*pud))
585 return -EFAULT;
586 pmd = pmd_offset(pud, address);
587 if (!pmd_present(*pmd))
588 return -EFAULT;
589 VM_BUG_ON(pmd_trans_huge(*pmd));
590 pte = pte_offset_map(pmd, address);
591 if (pte_none(*pte))
592 goto unmap;
593 *vma = get_gate_vma(mm);
594 if (!page)
595 goto out;
596 *page = vm_normal_page(*vma, address, *pte);
597 if (!*page) {
598 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
599 goto unmap;
600 *page = pte_page(*pte);
601 }
602 if (unlikely(!try_get_page(*page))) {
603 ret = -ENOMEM;
604 goto unmap;
605 }
606out:
607 ret = 0;
608unmap:
609 pte_unmap(pte);
610 return ret;
611}
612
613
614
615
616
617
618static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
619 unsigned long address, unsigned int *flags, int *nonblocking)
620{
621 unsigned int fault_flags = 0;
622 vm_fault_t ret;
623
624
625 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
626 return -ENOENT;
627 if (*flags & FOLL_WRITE)
628 fault_flags |= FAULT_FLAG_WRITE;
629 if (*flags & FOLL_REMOTE)
630 fault_flags |= FAULT_FLAG_REMOTE;
631 if (nonblocking)
632 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
633 if (*flags & FOLL_NOWAIT)
634 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
635 if (*flags & FOLL_TRIED) {
636 VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
637 fault_flags |= FAULT_FLAG_TRIED;
638 }
639
640 ret = handle_mm_fault(vma, address, fault_flags);
641 if (ret & VM_FAULT_ERROR) {
642 int err = vm_fault_to_errno(ret, *flags);
643
644 if (err)
645 return err;
646 BUG();
647 }
648
649 if (tsk) {
650 if (ret & VM_FAULT_MAJOR)
651 tsk->maj_flt++;
652 else
653 tsk->min_flt++;
654 }
655
656 if (ret & VM_FAULT_RETRY) {
657 if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
658 *nonblocking = 0;
659 return -EBUSY;
660 }
661
662
663
664
665
666
667
668
669
670
671 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
672 *flags |= FOLL_COW;
673 return 0;
674}
675
676static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
677{
678 vm_flags_t vm_flags = vma->vm_flags;
679 int write = (gup_flags & FOLL_WRITE);
680 int foreign = (gup_flags & FOLL_REMOTE);
681
682 if (vm_flags & (VM_IO | VM_PFNMAP))
683 return -EFAULT;
684
685 if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
686 return -EFAULT;
687
688 if (write) {
689 if (!(vm_flags & VM_WRITE)) {
690 if (!(gup_flags & FOLL_FORCE))
691 return -EFAULT;
692
693
694
695
696
697
698
699
700
701 if (!is_cow_mapping(vm_flags))
702 return -EFAULT;
703 }
704 } else if (!(vm_flags & VM_READ)) {
705 if (!(gup_flags & FOLL_FORCE))
706 return -EFAULT;
707
708
709
710
711 if (!(vm_flags & VM_MAYREAD))
712 return -EFAULT;
713 }
714
715
716
717
718 if (!arch_vma_access_permitted(vma, write, false, foreign))
719 return -EFAULT;
720 return 0;
721}
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
780 unsigned long start, unsigned long nr_pages,
781 unsigned int gup_flags, struct page **pages,
782 struct vm_area_struct **vmas, int *nonblocking)
783{
784 long ret = 0, i = 0;
785 struct vm_area_struct *vma = NULL;
786 struct follow_page_context ctx = { NULL };
787
788 if (!nr_pages)
789 return 0;
790
791 start = untagged_addr(start);
792
793 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
794
795
796
797
798
799
800 if (!(gup_flags & FOLL_FORCE))
801 gup_flags |= FOLL_NUMA;
802
803 do {
804 struct page *page;
805 unsigned int foll_flags = gup_flags;
806 unsigned int page_increm;
807
808
809 if (!vma || start >= vma->vm_end) {
810 vma = find_extend_vma(mm, start);
811 if (!vma && in_gate_area(mm, start)) {
812 ret = get_gate_page(mm, start & PAGE_MASK,
813 gup_flags, &vma,
814 pages ? &pages[i] : NULL);
815 if (ret)
816 goto out;
817 ctx.page_mask = 0;
818 goto next_page;
819 }
820
821 if (!vma || check_vma_flags(vma, gup_flags)) {
822 ret = -EFAULT;
823 goto out;
824 }
825 if (is_vm_hugetlb_page(vma)) {
826 i = follow_hugetlb_page(mm, vma, pages, vmas,
827 &start, &nr_pages, i,
828 gup_flags, nonblocking);
829 continue;
830 }
831 }
832retry:
833
834
835
836
837 if (fatal_signal_pending(current)) {
838 ret = -ERESTARTSYS;
839 goto out;
840 }
841 cond_resched();
842
843 page = follow_page_mask(vma, start, foll_flags, &ctx);
844 if (!page) {
845 ret = faultin_page(tsk, vma, start, &foll_flags,
846 nonblocking);
847 switch (ret) {
848 case 0:
849 goto retry;
850 case -EBUSY:
851 ret = 0;
852
853 case -EFAULT:
854 case -ENOMEM:
855 case -EHWPOISON:
856 goto out;
857 case -ENOENT:
858 goto next_page;
859 }
860 BUG();
861 } else if (PTR_ERR(page) == -EEXIST) {
862
863
864
865
866 goto next_page;
867 } else if (IS_ERR(page)) {
868 ret = PTR_ERR(page);
869 goto out;
870 }
871 if (pages) {
872 pages[i] = page;
873 flush_anon_page(vma, page, start);
874 flush_dcache_page(page);
875 ctx.page_mask = 0;
876 }
877next_page:
878 if (vmas) {
879 vmas[i] = vma;
880 ctx.page_mask = 0;
881 }
882 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
883 if (page_increm > nr_pages)
884 page_increm = nr_pages;
885 i += page_increm;
886 start += page_increm * PAGE_SIZE;
887 nr_pages -= page_increm;
888 } while (nr_pages);
889out:
890 if (ctx.pgmap)
891 put_dev_pagemap(ctx.pgmap);
892 return i ? i : ret;
893}
894
895static bool vma_permits_fault(struct vm_area_struct *vma,
896 unsigned int fault_flags)
897{
898 bool write = !!(fault_flags & FAULT_FLAG_WRITE);
899 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
900 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
901
902 if (!(vm_flags & vma->vm_flags))
903 return false;
904
905
906
907
908
909
910
911
912 if (!arch_vma_access_permitted(vma, write, false, foreign))
913 return false;
914
915 return true;
916}
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
949 unsigned long address, unsigned int fault_flags,
950 bool *unlocked)
951{
952 struct vm_area_struct *vma;
953 vm_fault_t ret, major = 0;
954
955 address = untagged_addr(address);
956
957 if (unlocked)
958 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
959
960retry:
961 vma = find_extend_vma(mm, address);
962 if (!vma || address < vma->vm_start)
963 return -EFAULT;
964
965 if (!vma_permits_fault(vma, fault_flags))
966 return -EFAULT;
967
968 ret = handle_mm_fault(vma, address, fault_flags);
969 major |= ret & VM_FAULT_MAJOR;
970 if (ret & VM_FAULT_ERROR) {
971 int err = vm_fault_to_errno(ret, 0);
972
973 if (err)
974 return err;
975 BUG();
976 }
977
978 if (ret & VM_FAULT_RETRY) {
979 down_read(&mm->mmap_sem);
980 if (!(fault_flags & FAULT_FLAG_TRIED)) {
981 *unlocked = true;
982 fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
983 fault_flags |= FAULT_FLAG_TRIED;
984 goto retry;
985 }
986 }
987
988 if (tsk) {
989 if (major)
990 tsk->maj_flt++;
991 else
992 tsk->min_flt++;
993 }
994 return 0;
995}
996EXPORT_SYMBOL_GPL(fixup_user_fault);
997
998static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
999 struct mm_struct *mm,
1000 unsigned long start,
1001 unsigned long nr_pages,
1002 struct page **pages,
1003 struct vm_area_struct **vmas,
1004 int *locked,
1005 unsigned int flags)
1006{
1007 long ret, pages_done;
1008 bool lock_dropped;
1009
1010 if (locked) {
1011
1012 BUG_ON(vmas);
1013
1014 BUG_ON(*locked != 1);
1015 }
1016
1017 if (pages)
1018 flags |= FOLL_GET;
1019
1020 pages_done = 0;
1021 lock_dropped = false;
1022 for (;;) {
1023 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
1024 vmas, locked);
1025 if (!locked)
1026
1027 return ret;
1028
1029
1030 if (!*locked) {
1031 BUG_ON(ret < 0);
1032 BUG_ON(ret >= nr_pages);
1033 }
1034
1035 if (ret > 0) {
1036 nr_pages -= ret;
1037 pages_done += ret;
1038 if (!nr_pages)
1039 break;
1040 }
1041 if (*locked) {
1042
1043
1044
1045
1046 if (!pages_done)
1047 pages_done = ret;
1048 break;
1049 }
1050
1051
1052
1053
1054 if (likely(pages))
1055 pages += ret;
1056 start += ret << PAGE_SHIFT;
1057
1058
1059
1060
1061
1062
1063 *locked = 1;
1064 lock_dropped = true;
1065 down_read(&mm->mmap_sem);
1066 ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
1067 pages, NULL, NULL);
1068 if (ret != 1) {
1069 BUG_ON(ret > 1);
1070 if (!pages_done)
1071 pages_done = ret;
1072 break;
1073 }
1074 nr_pages--;
1075 pages_done++;
1076 if (!nr_pages)
1077 break;
1078 if (likely(pages))
1079 pages++;
1080 start += PAGE_SIZE;
1081 }
1082 if (lock_dropped && *locked) {
1083
1084
1085
1086
1087 up_read(&mm->mmap_sem);
1088 *locked = 0;
1089 }
1090 return pages_done;
1091}
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1150 unsigned long start, unsigned long nr_pages,
1151 unsigned int gup_flags, struct page **pages,
1152 struct vm_area_struct **vmas, int *locked)
1153{
1154
1155
1156
1157
1158
1159
1160 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
1161 return -EINVAL;
1162
1163 return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
1164 locked,
1165 gup_flags | FOLL_TOUCH | FOLL_REMOTE);
1166}
1167EXPORT_SYMBOL(get_user_pages_remote);
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188long populate_vma_page_range(struct vm_area_struct *vma,
1189 unsigned long start, unsigned long end, int *nonblocking)
1190{
1191 struct mm_struct *mm = vma->vm_mm;
1192 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1193 int gup_flags;
1194
1195 VM_BUG_ON(start & ~PAGE_MASK);
1196 VM_BUG_ON(end & ~PAGE_MASK);
1197 VM_BUG_ON_VMA(start < vma->vm_start, vma);
1198 VM_BUG_ON_VMA(end > vma->vm_end, vma);
1199 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
1200
1201 gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
1202 if (vma->vm_flags & VM_LOCKONFAULT)
1203 gup_flags &= ~FOLL_POPULATE;
1204
1205
1206
1207
1208
1209 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1210 gup_flags |= FOLL_WRITE;
1211
1212
1213
1214
1215
1216 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
1217 gup_flags |= FOLL_FORCE;
1218
1219
1220
1221
1222
1223 return __get_user_pages(current, mm, start, nr_pages, gup_flags,
1224 NULL, NULL, nonblocking);
1225}
1226
1227
1228
1229
1230
1231
1232
1233
1234int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1235{
1236 struct mm_struct *mm = current->mm;
1237 unsigned long end, nstart, nend;
1238 struct vm_area_struct *vma = NULL;
1239 int locked = 0;
1240 long ret = 0;
1241
1242 end = start + len;
1243
1244 for (nstart = start; nstart < end; nstart = nend) {
1245
1246
1247
1248
1249 if (!locked) {
1250 locked = 1;
1251 down_read(&mm->mmap_sem);
1252 vma = find_vma(mm, nstart);
1253 } else if (nstart >= vma->vm_end)
1254 vma = vma->vm_next;
1255 if (!vma || vma->vm_start >= end)
1256 break;
1257
1258
1259
1260
1261 nend = min(end, vma->vm_end);
1262 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1263 continue;
1264 if (nstart < vma->vm_start)
1265 nstart = vma->vm_start;
1266
1267
1268
1269
1270
1271 ret = populate_vma_page_range(vma, nstart, nend, &locked);
1272 if (ret < 0) {
1273 if (ignore_errors) {
1274 ret = 0;
1275 continue;
1276 }
1277 break;
1278 }
1279 nend = nstart + ret * PAGE_SIZE;
1280 ret = 0;
1281 }
1282 if (locked)
1283 up_read(&mm->mmap_sem);
1284 return ret;
1285}
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301#ifdef CONFIG_ELF_CORE
1302struct page *get_dump_page(unsigned long addr)
1303{
1304 struct vm_area_struct *vma;
1305 struct page *page;
1306
1307 if (__get_user_pages(current, current->mm, addr, 1,
1308 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
1309 NULL) < 1)
1310 return NULL;
1311 flush_cache_page(vma, addr, page_to_pfn(page));
1312 return page;
1313}
1314#endif
1315#else
1316static long __get_user_pages_locked(struct task_struct *tsk,
1317 struct mm_struct *mm, unsigned long start,
1318 unsigned long nr_pages, struct page **pages,
1319 struct vm_area_struct **vmas, int *locked,
1320 unsigned int foll_flags)
1321{
1322 struct vm_area_struct *vma;
1323 unsigned long vm_flags;
1324 int i;
1325
1326
1327
1328
1329 vm_flags = (foll_flags & FOLL_WRITE) ?
1330 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1331 vm_flags &= (foll_flags & FOLL_FORCE) ?
1332 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1333
1334 for (i = 0; i < nr_pages; i++) {
1335 vma = find_vma(mm, start);
1336 if (!vma)
1337 goto finish_or_fault;
1338
1339
1340 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1341 !(vm_flags & vma->vm_flags))
1342 goto finish_or_fault;
1343
1344 if (pages) {
1345 pages[i] = virt_to_page(start);
1346 if (pages[i])
1347 get_page(pages[i]);
1348 }
1349 if (vmas)
1350 vmas[i] = vma;
1351 start = (start + PAGE_SIZE) & PAGE_MASK;
1352 }
1353
1354 return i;
1355
1356finish_or_fault:
1357 return i ? : -EFAULT;
1358}
1359#endif
1360
1361#if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA)
1362static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
1363{
1364 long i;
1365 struct vm_area_struct *vma_prev = NULL;
1366
1367 for (i = 0; i < nr_pages; i++) {
1368 struct vm_area_struct *vma = vmas[i];
1369
1370 if (vma == vma_prev)
1371 continue;
1372
1373 vma_prev = vma;
1374
1375 if (vma_is_fsdax(vma))
1376 return true;
1377 }
1378 return false;
1379}
1380
1381#ifdef CONFIG_CMA
1382static struct page *new_non_cma_page(struct page *page, unsigned long private)
1383{
1384
1385
1386
1387
1388 int nid = page_to_nid(page);
1389
1390
1391
1392
1393
1394
1395
1396 gfp_t gfp_mask = GFP_USER | __GFP_NOWARN;
1397
1398 if (PageHighMem(page))
1399 gfp_mask |= __GFP_HIGHMEM;
1400
1401#ifdef CONFIG_HUGETLB_PAGE
1402 if (PageHuge(page)) {
1403 struct hstate *h = page_hstate(page);
1404
1405
1406
1407
1408 return alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
1409 }
1410#endif
1411 if (PageTransHuge(page)) {
1412 struct page *thp;
1413
1414
1415
1416 gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN;
1417
1418
1419
1420
1421
1422 thp_gfpmask &= ~__GFP_MOVABLE;
1423 thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
1424 if (!thp)
1425 return NULL;
1426 prep_transhuge_page(thp);
1427 return thp;
1428 }
1429
1430 return __alloc_pages_node(nid, gfp_mask, 0);
1431}
1432
1433static long check_and_migrate_cma_pages(struct task_struct *tsk,
1434 struct mm_struct *mm,
1435 unsigned long start,
1436 unsigned long nr_pages,
1437 struct page **pages,
1438 struct vm_area_struct **vmas,
1439 unsigned int gup_flags)
1440{
1441 unsigned long i;
1442 unsigned long step;
1443 bool drain_allow = true;
1444 bool migrate_allow = true;
1445 LIST_HEAD(cma_page_list);
1446
1447check_again:
1448 for (i = 0; i < nr_pages;) {
1449
1450 struct page *head = compound_head(pages[i]);
1451
1452
1453
1454
1455
1456 step = compound_nr(head) - (pages[i] - head);
1457
1458
1459
1460
1461
1462 if (is_migrate_cma_page(head)) {
1463 if (PageHuge(head))
1464 isolate_huge_page(head, &cma_page_list);
1465 else {
1466 if (!PageLRU(head) && drain_allow) {
1467 lru_add_drain_all();
1468 drain_allow = false;
1469 }
1470
1471 if (!isolate_lru_page(head)) {
1472 list_add_tail(&head->lru, &cma_page_list);
1473 mod_node_page_state(page_pgdat(head),
1474 NR_ISOLATED_ANON +
1475 page_is_file_cache(head),
1476 hpage_nr_pages(head));
1477 }
1478 }
1479 }
1480
1481 i += step;
1482 }
1483
1484 if (!list_empty(&cma_page_list)) {
1485
1486
1487
1488 for (i = 0; i < nr_pages; i++)
1489 put_page(pages[i]);
1490
1491 if (migrate_pages(&cma_page_list, new_non_cma_page,
1492 NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
1493
1494
1495
1496
1497 migrate_allow = false;
1498
1499 if (!list_empty(&cma_page_list))
1500 putback_movable_pages(&cma_page_list);
1501 }
1502
1503
1504
1505
1506
1507 nr_pages = __get_user_pages_locked(tsk, mm, start, nr_pages,
1508 pages, vmas, NULL,
1509 gup_flags);
1510
1511 if ((nr_pages > 0) && migrate_allow) {
1512 drain_allow = true;
1513 goto check_again;
1514 }
1515 }
1516
1517 return nr_pages;
1518}
1519#else
1520static long check_and_migrate_cma_pages(struct task_struct *tsk,
1521 struct mm_struct *mm,
1522 unsigned long start,
1523 unsigned long nr_pages,
1524 struct page **pages,
1525 struct vm_area_struct **vmas,
1526 unsigned int gup_flags)
1527{
1528 return nr_pages;
1529}
1530#endif
1531
1532
1533
1534
1535
1536static long __gup_longterm_locked(struct task_struct *tsk,
1537 struct mm_struct *mm,
1538 unsigned long start,
1539 unsigned long nr_pages,
1540 struct page **pages,
1541 struct vm_area_struct **vmas,
1542 unsigned int gup_flags)
1543{
1544 struct vm_area_struct **vmas_tmp = vmas;
1545 unsigned long flags = 0;
1546 long rc, i;
1547
1548 if (gup_flags & FOLL_LONGTERM) {
1549 if (!pages)
1550 return -EINVAL;
1551
1552 if (!vmas_tmp) {
1553 vmas_tmp = kcalloc(nr_pages,
1554 sizeof(struct vm_area_struct *),
1555 GFP_KERNEL);
1556 if (!vmas_tmp)
1557 return -ENOMEM;
1558 }
1559 flags = memalloc_nocma_save();
1560 }
1561
1562 rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages,
1563 vmas_tmp, NULL, gup_flags);
1564
1565 if (gup_flags & FOLL_LONGTERM) {
1566 memalloc_nocma_restore(flags);
1567 if (rc < 0)
1568 goto out;
1569
1570 if (check_dax_vmas(vmas_tmp, rc)) {
1571 for (i = 0; i < rc; i++)
1572 put_page(pages[i]);
1573 rc = -EOPNOTSUPP;
1574 goto out;
1575 }
1576
1577 rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages,
1578 vmas_tmp, gup_flags);
1579 }
1580
1581out:
1582 if (vmas_tmp != vmas)
1583 kfree(vmas_tmp);
1584 return rc;
1585}
1586#else
1587static __always_inline long __gup_longterm_locked(struct task_struct *tsk,
1588 struct mm_struct *mm,
1589 unsigned long start,
1590 unsigned long nr_pages,
1591 struct page **pages,
1592 struct vm_area_struct **vmas,
1593 unsigned int flags)
1594{
1595 return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
1596 NULL, flags);
1597}
1598#endif
1599
1600
1601
1602
1603
1604
1605
1606
1607long get_user_pages(unsigned long start, unsigned long nr_pages,
1608 unsigned int gup_flags, struct page **pages,
1609 struct vm_area_struct **vmas)
1610{
1611 return __gup_longterm_locked(current, current->mm, start, nr_pages,
1612 pages, vmas, gup_flags | FOLL_TOUCH);
1613}
1614EXPORT_SYMBOL(get_user_pages);
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1638 unsigned int gup_flags, struct page **pages,
1639 int *locked)
1640{
1641
1642
1643
1644
1645
1646
1647 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
1648 return -EINVAL;
1649
1650 return __get_user_pages_locked(current, current->mm, start, nr_pages,
1651 pages, NULL, locked,
1652 gup_flags | FOLL_TOUCH);
1653}
1654EXPORT_SYMBOL(get_user_pages_locked);
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1672 struct page **pages, unsigned int gup_flags)
1673{
1674 struct mm_struct *mm = current->mm;
1675 int locked = 1;
1676 long ret;
1677
1678
1679
1680
1681
1682
1683
1684 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
1685 return -EINVAL;
1686
1687 down_read(&mm->mmap_sem);
1688 ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
1689 &locked, gup_flags | FOLL_TOUCH);
1690 if (locked)
1691 up_read(&mm->mmap_sem);
1692 return ret;
1693}
1694EXPORT_SYMBOL(get_user_pages_unlocked);
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729#ifdef CONFIG_HAVE_FAST_GUP
1730#ifdef CONFIG_GUP_GET_PTE_LOW_HIGH
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762static inline pte_t gup_get_pte(pte_t *ptep)
1763{
1764 pte_t pte;
1765
1766 do {
1767 pte.pte_low = ptep->pte_low;
1768 smp_rmb();
1769 pte.pte_high = ptep->pte_high;
1770 smp_rmb();
1771 } while (unlikely(pte.pte_low != ptep->pte_low));
1772
1773 return pte;
1774}
1775#else
1776
1777
1778
1779static inline pte_t gup_get_pte(pte_t *ptep)
1780{
1781 return READ_ONCE(*ptep);
1782}
1783#endif
1784
1785static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
1786 struct page **pages)
1787{
1788 while ((*nr) - nr_start) {
1789 struct page *page = pages[--(*nr)];
1790
1791 ClearPageReferenced(page);
1792 put_page(page);
1793 }
1794}
1795
1796
1797
1798
1799
1800static inline struct page *try_get_compound_head(struct page *page, int refs)
1801{
1802 struct page *head = compound_head(page);
1803 if (WARN_ON_ONCE(page_ref_count(head) < 0))
1804 return NULL;
1805 if (unlikely(!page_cache_add_speculative(head, refs)))
1806 return NULL;
1807 return head;
1808}
1809
1810#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
1811static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1812 unsigned int flags, struct page **pages, int *nr)
1813{
1814 struct dev_pagemap *pgmap = NULL;
1815 int nr_start = *nr, ret = 0;
1816 pte_t *ptep, *ptem;
1817
1818 ptem = ptep = pte_offset_map(&pmd, addr);
1819 do {
1820 pte_t pte = gup_get_pte(ptep);
1821 struct page *head, *page;
1822
1823
1824
1825
1826
1827 if (pte_protnone(pte))
1828 goto pte_unmap;
1829
1830 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
1831 goto pte_unmap;
1832
1833 if (pte_devmap(pte)) {
1834 if (unlikely(flags & FOLL_LONGTERM))
1835 goto pte_unmap;
1836
1837 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
1838 if (unlikely(!pgmap)) {
1839 undo_dev_pagemap(nr, nr_start, pages);
1840 goto pte_unmap;
1841 }
1842 } else if (pte_special(pte))
1843 goto pte_unmap;
1844
1845 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1846 page = pte_page(pte);
1847
1848 head = try_get_compound_head(page, 1);
1849 if (!head)
1850 goto pte_unmap;
1851
1852 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
1853 put_page(head);
1854 goto pte_unmap;
1855 }
1856
1857 VM_BUG_ON_PAGE(compound_head(page) != head, page);
1858
1859 SetPageReferenced(page);
1860 pages[*nr] = page;
1861 (*nr)++;
1862
1863 } while (ptep++, addr += PAGE_SIZE, addr != end);
1864
1865 ret = 1;
1866
1867pte_unmap:
1868 if (pgmap)
1869 put_dev_pagemap(pgmap);
1870 pte_unmap(ptem);
1871 return ret;
1872}
1873#else
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1885 unsigned int flags, struct page **pages, int *nr)
1886{
1887 return 0;
1888}
1889#endif
1890
1891#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1892static int __gup_device_huge(unsigned long pfn, unsigned long addr,
1893 unsigned long end, struct page **pages, int *nr)
1894{
1895 int nr_start = *nr;
1896 struct dev_pagemap *pgmap = NULL;
1897
1898 do {
1899 struct page *page = pfn_to_page(pfn);
1900
1901 pgmap = get_dev_pagemap(pfn, pgmap);
1902 if (unlikely(!pgmap)) {
1903 undo_dev_pagemap(nr, nr_start, pages);
1904 return 0;
1905 }
1906 SetPageReferenced(page);
1907 pages[*nr] = page;
1908 get_page(page);
1909 (*nr)++;
1910 pfn++;
1911 } while (addr += PAGE_SIZE, addr != end);
1912
1913 if (pgmap)
1914 put_dev_pagemap(pgmap);
1915 return 1;
1916}
1917
1918static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
1919 unsigned long end, struct page **pages, int *nr)
1920{
1921 unsigned long fault_pfn;
1922 int nr_start = *nr;
1923
1924 fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1925 if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
1926 return 0;
1927
1928 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
1929 undo_dev_pagemap(nr, nr_start, pages);
1930 return 0;
1931 }
1932 return 1;
1933}
1934
1935static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
1936 unsigned long end, struct page **pages, int *nr)
1937{
1938 unsigned long fault_pfn;
1939 int nr_start = *nr;
1940
1941 fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
1942 if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
1943 return 0;
1944
1945 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
1946 undo_dev_pagemap(nr, nr_start, pages);
1947 return 0;
1948 }
1949 return 1;
1950}
1951#else
1952static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
1953 unsigned long end, struct page **pages, int *nr)
1954{
1955 BUILD_BUG();
1956 return 0;
1957}
1958
1959static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
1960 unsigned long end, struct page **pages, int *nr)
1961{
1962 BUILD_BUG();
1963 return 0;
1964}
1965#endif
1966
1967#ifdef CONFIG_ARCH_HAS_HUGEPD
1968static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
1969 unsigned long sz)
1970{
1971 unsigned long __boundary = (addr + sz) & ~(sz-1);
1972 return (__boundary - 1 < end - 1) ? __boundary : end;
1973}
1974
1975static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
1976 unsigned long end, unsigned int flags,
1977 struct page **pages, int *nr)
1978{
1979 unsigned long pte_end;
1980 struct page *head, *page;
1981 pte_t pte;
1982 int refs;
1983
1984 pte_end = (addr + sz) & ~(sz-1);
1985 if (pte_end < end)
1986 end = pte_end;
1987
1988 pte = READ_ONCE(*ptep);
1989
1990 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
1991 return 0;
1992
1993
1994 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1995
1996 refs = 0;
1997 head = pte_page(pte);
1998
1999 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
2000 do {
2001 VM_BUG_ON(compound_head(page) != head);
2002 pages[*nr] = page;
2003 (*nr)++;
2004 page++;
2005 refs++;
2006 } while (addr += PAGE_SIZE, addr != end);
2007
2008 head = try_get_compound_head(head, refs);
2009 if (!head) {
2010 *nr -= refs;
2011 return 0;
2012 }
2013
2014 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2015
2016 *nr -= refs;
2017 while (refs--)
2018 put_page(head);
2019 return 0;
2020 }
2021
2022 SetPageReferenced(head);
2023 return 1;
2024}
2025
2026static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2027 unsigned int pdshift, unsigned long end, unsigned int flags,
2028 struct page **pages, int *nr)
2029{
2030 pte_t *ptep;
2031 unsigned long sz = 1UL << hugepd_shift(hugepd);
2032 unsigned long next;
2033
2034 ptep = hugepte_offset(hugepd, addr, pdshift);
2035 do {
2036 next = hugepte_addr_end(addr, end, sz);
2037 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
2038 return 0;
2039 } while (ptep++, addr = next, addr != end);
2040
2041 return 1;
2042}
2043#else
2044static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2045 unsigned int pdshift, unsigned long end, unsigned int flags,
2046 struct page **pages, int *nr)
2047{
2048 return 0;
2049}
2050#endif
2051
2052static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2053 unsigned long end, unsigned int flags,
2054 struct page **pages, int *nr)
2055{
2056 struct page *head, *page;
2057 int refs;
2058
2059 if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2060 return 0;
2061
2062 if (pmd_devmap(orig)) {
2063 if (unlikely(flags & FOLL_LONGTERM))
2064 return 0;
2065 return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr);
2066 }
2067
2068 refs = 0;
2069 page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2070 do {
2071 pages[*nr] = page;
2072 (*nr)++;
2073 page++;
2074 refs++;
2075 } while (addr += PAGE_SIZE, addr != end);
2076
2077 head = try_get_compound_head(pmd_page(orig), refs);
2078 if (!head) {
2079 *nr -= refs;
2080 return 0;
2081 }
2082
2083 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2084 *nr -= refs;
2085 while (refs--)
2086 put_page(head);
2087 return 0;
2088 }
2089
2090 SetPageReferenced(head);
2091 return 1;
2092}
2093
2094static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2095 unsigned long end, unsigned int flags, struct page **pages, int *nr)
2096{
2097 struct page *head, *page;
2098 int refs;
2099
2100 if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2101 return 0;
2102
2103 if (pud_devmap(orig)) {
2104 if (unlikely(flags & FOLL_LONGTERM))
2105 return 0;
2106 return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
2107 }
2108
2109 refs = 0;
2110 page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2111 do {
2112 pages[*nr] = page;
2113 (*nr)++;
2114 page++;
2115 refs++;
2116 } while (addr += PAGE_SIZE, addr != end);
2117
2118 head = try_get_compound_head(pud_page(orig), refs);
2119 if (!head) {
2120 *nr -= refs;
2121 return 0;
2122 }
2123
2124 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2125 *nr -= refs;
2126 while (refs--)
2127 put_page(head);
2128 return 0;
2129 }
2130
2131 SetPageReferenced(head);
2132 return 1;
2133}
2134
2135static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
2136 unsigned long end, unsigned int flags,
2137 struct page **pages, int *nr)
2138{
2139 int refs;
2140 struct page *head, *page;
2141
2142 if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
2143 return 0;
2144
2145 BUILD_BUG_ON(pgd_devmap(orig));
2146 refs = 0;
2147 page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
2148 do {
2149 pages[*nr] = page;
2150 (*nr)++;
2151 page++;
2152 refs++;
2153 } while (addr += PAGE_SIZE, addr != end);
2154
2155 head = try_get_compound_head(pgd_page(orig), refs);
2156 if (!head) {
2157 *nr -= refs;
2158 return 0;
2159 }
2160
2161 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
2162 *nr -= refs;
2163 while (refs--)
2164 put_page(head);
2165 return 0;
2166 }
2167
2168 SetPageReferenced(head);
2169 return 1;
2170}
2171
2172static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
2173 unsigned int flags, struct page **pages, int *nr)
2174{
2175 unsigned long next;
2176 pmd_t *pmdp;
2177
2178 pmdp = pmd_offset(&pud, addr);
2179 do {
2180 pmd_t pmd = READ_ONCE(*pmdp);
2181
2182 next = pmd_addr_end(addr, end);
2183 if (!pmd_present(pmd))
2184 return 0;
2185
2186 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
2187 pmd_devmap(pmd))) {
2188
2189
2190
2191
2192
2193 if (pmd_protnone(pmd))
2194 return 0;
2195
2196 if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
2197 pages, nr))
2198 return 0;
2199
2200 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
2201
2202
2203
2204
2205 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
2206 PMD_SHIFT, next, flags, pages, nr))
2207 return 0;
2208 } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
2209 return 0;
2210 } while (pmdp++, addr = next, addr != end);
2211
2212 return 1;
2213}
2214
2215static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
2216 unsigned int flags, struct page **pages, int *nr)
2217{
2218 unsigned long next;
2219 pud_t *pudp;
2220
2221 pudp = pud_offset(&p4d, addr);
2222 do {
2223 pud_t pud = READ_ONCE(*pudp);
2224
2225 next = pud_addr_end(addr, end);
2226 if (pud_none(pud))
2227 return 0;
2228 if (unlikely(pud_huge(pud))) {
2229 if (!gup_huge_pud(pud, pudp, addr, next, flags,
2230 pages, nr))
2231 return 0;
2232 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
2233 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
2234 PUD_SHIFT, next, flags, pages, nr))
2235 return 0;
2236 } else if (!gup_pmd_range(pud, addr, next, flags, pages, nr))
2237 return 0;
2238 } while (pudp++, addr = next, addr != end);
2239
2240 return 1;
2241}
2242
2243static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
2244 unsigned int flags, struct page **pages, int *nr)
2245{
2246 unsigned long next;
2247 p4d_t *p4dp;
2248
2249 p4dp = p4d_offset(&pgd, addr);
2250 do {
2251 p4d_t p4d = READ_ONCE(*p4dp);
2252
2253 next = p4d_addr_end(addr, end);
2254 if (p4d_none(p4d))
2255 return 0;
2256 BUILD_BUG_ON(p4d_huge(p4d));
2257 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
2258 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
2259 P4D_SHIFT, next, flags, pages, nr))
2260 return 0;
2261 } else if (!gup_pud_range(p4d, addr, next, flags, pages, nr))
2262 return 0;
2263 } while (p4dp++, addr = next, addr != end);
2264
2265 return 1;
2266}
2267
2268static void gup_pgd_range(unsigned long addr, unsigned long end,
2269 unsigned int flags, struct page **pages, int *nr)
2270{
2271 unsigned long next;
2272 pgd_t *pgdp;
2273
2274 pgdp = pgd_offset(current->mm, addr);
2275 do {
2276 pgd_t pgd = READ_ONCE(*pgdp);
2277
2278 next = pgd_addr_end(addr, end);
2279 if (pgd_none(pgd))
2280 return;
2281 if (unlikely(pgd_huge(pgd))) {
2282 if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
2283 pages, nr))
2284 return;
2285 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
2286 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
2287 PGDIR_SHIFT, next, flags, pages, nr))
2288 return;
2289 } else if (!gup_p4d_range(pgd, addr, next, flags, pages, nr))
2290 return;
2291 } while (pgdp++, addr = next, addr != end);
2292}
2293#else
2294static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2295 unsigned int flags, struct page **pages, int *nr)
2296{
2297}
2298#endif
2299
2300#ifndef gup_fast_permitted
2301
2302
2303
2304
2305static bool gup_fast_permitted(unsigned long start, unsigned long end)
2306{
2307 return true;
2308}
2309#endif
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
2321 struct page **pages)
2322{
2323 unsigned long len, end;
2324 unsigned long flags;
2325 int nr = 0;
2326
2327 start = untagged_addr(start) & PAGE_MASK;
2328 len = (unsigned long) nr_pages << PAGE_SHIFT;
2329 end = start + len;
2330
2331 if (end <= start)
2332 return 0;
2333 if (unlikely(!access_ok((void __user *)start, len)))
2334 return 0;
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348 if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
2349 gup_fast_permitted(start, end)) {
2350 local_irq_save(flags);
2351 gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr);
2352 local_irq_restore(flags);
2353 }
2354
2355 return nr;
2356}
2357EXPORT_SYMBOL_GPL(__get_user_pages_fast);
2358
2359static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
2360 unsigned int gup_flags, struct page **pages)
2361{
2362 int ret;
2363
2364
2365
2366
2367
2368 if (gup_flags & FOLL_LONGTERM) {
2369 down_read(¤t->mm->mmap_sem);
2370 ret = __gup_longterm_locked(current, current->mm,
2371 start, nr_pages,
2372 pages, NULL, gup_flags);
2373 up_read(¤t->mm->mmap_sem);
2374 } else {
2375 ret = get_user_pages_unlocked(start, nr_pages,
2376 pages, gup_flags);
2377 }
2378
2379 return ret;
2380}
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398int get_user_pages_fast(unsigned long start, int nr_pages,
2399 unsigned int gup_flags, struct page **pages)
2400{
2401 unsigned long addr, len, end;
2402 int nr = 0, ret = 0;
2403
2404 if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM)))
2405 return -EINVAL;
2406
2407 start = untagged_addr(start) & PAGE_MASK;
2408 addr = start;
2409 len = (unsigned long) nr_pages << PAGE_SHIFT;
2410 end = start + len;
2411
2412 if (end <= start)
2413 return 0;
2414 if (unlikely(!access_ok((void __user *)start, len)))
2415 return -EFAULT;
2416
2417 if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
2418 gup_fast_permitted(start, end)) {
2419 local_irq_disable();
2420 gup_pgd_range(addr, end, gup_flags, pages, &nr);
2421 local_irq_enable();
2422 ret = nr;
2423 }
2424
2425 if (nr < nr_pages) {
2426
2427 start += nr << PAGE_SHIFT;
2428 pages += nr;
2429
2430 ret = __gup_longterm_unlocked(start, nr_pages - nr,
2431 gup_flags, pages);
2432
2433
2434 if (nr > 0) {
2435 if (ret < 0)
2436 ret = nr;
2437 else
2438 ret += nr;
2439 }
2440 }
2441
2442 return ret;
2443}
2444EXPORT_SYMBOL_GPL(get_user_pages_fast);
2445