1#include <linux/kernel.h>
2#include <linux/errno.h>
3#include <linux/err.h>
4#include <linux/spinlock.h>
5
6#include <linux/mm.h>
7#include <linux/pagemap.h>
8#include <linux/rmap.h>
9#include <linux/swap.h>
10#include <linux/swapops.h>
11
12#include <linux/sched.h>
13#include <linux/rwsem.h>
14#include <linux/hugetlb.h>
15#include <asm/pgtable.h>
16
17#include "internal.h"
18
19static struct page *no_page_table(struct vm_area_struct *vma,
20 unsigned int flags)
21{
22
23
24
25
26
27
28
29
30 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
31 return ERR_PTR(-EFAULT);
32 return NULL;
33}
34
35static struct page *follow_page_pte(struct vm_area_struct *vma,
36 unsigned long address, pmd_t *pmd, unsigned int flags)
37{
38 struct mm_struct *mm = vma->vm_mm;
39 struct page *page;
40 spinlock_t *ptl;
41 pte_t *ptep, pte;
42
43retry:
44 if (unlikely(pmd_bad(*pmd)))
45 return no_page_table(vma, flags);
46
47 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
48 pte = *ptep;
49 if (!pte_present(pte)) {
50 swp_entry_t entry;
51
52
53
54
55
56 if (likely(!(flags & FOLL_MIGRATION)))
57 goto no_page;
58 if (pte_none(pte) || pte_file(pte))
59 goto no_page;
60 entry = pte_to_swp_entry(pte);
61 if (!is_migration_entry(entry))
62 goto no_page;
63 pte_unmap_unlock(ptep, ptl);
64 migration_entry_wait(mm, pmd, address);
65 goto retry;
66 }
67 if ((flags & FOLL_NUMA) && pte_numa(pte))
68 goto no_page;
69 if ((flags & FOLL_WRITE) && !pte_write(pte)) {
70 pte_unmap_unlock(ptep, ptl);
71 return NULL;
72 }
73
74 page = vm_normal_page(vma, address, pte);
75 if (unlikely(!page)) {
76 if ((flags & FOLL_DUMP) ||
77 !is_zero_pfn(pte_pfn(pte)))
78 goto bad_page;
79 page = pte_page(pte);
80 }
81
82 if (flags & FOLL_GET)
83 get_page_foll(page);
84 if (flags & FOLL_TOUCH) {
85 if ((flags & FOLL_WRITE) &&
86 !pte_dirty(pte) && !PageDirty(page))
87 set_page_dirty(page);
88
89
90
91
92
93 mark_page_accessed(page);
94 }
95 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
96
97
98
99
100
101
102
103
104
105 if (page->mapping && trylock_page(page)) {
106 lru_add_drain();
107
108
109
110
111
112
113 mlock_vma_page(page);
114 unlock_page(page);
115 }
116 }
117 pte_unmap_unlock(ptep, ptl);
118 return page;
119bad_page:
120 pte_unmap_unlock(ptep, ptl);
121 return ERR_PTR(-EFAULT);
122
123no_page:
124 pte_unmap_unlock(ptep, ptl);
125 if (!pte_none(pte))
126 return NULL;
127 return no_page_table(vma, flags);
128}
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143struct page *follow_page_mask(struct vm_area_struct *vma,
144 unsigned long address, unsigned int flags,
145 unsigned int *page_mask)
146{
147 pgd_t *pgd;
148 pud_t *pud;
149 pmd_t *pmd;
150 spinlock_t *ptl;
151 struct page *page;
152 struct mm_struct *mm = vma->vm_mm;
153
154 *page_mask = 0;
155
156 page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
157 if (!IS_ERR(page)) {
158 BUG_ON(flags & FOLL_GET);
159 return page;
160 }
161
162 pgd = pgd_offset(mm, address);
163 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
164 return no_page_table(vma, flags);
165
166 pud = pud_offset(pgd, address);
167 if (pud_none(*pud))
168 return no_page_table(vma, flags);
169 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
170 if (flags & FOLL_GET)
171 return NULL;
172 page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
173 return page;
174 }
175 if (unlikely(pud_bad(*pud)))
176 return no_page_table(vma, flags);
177
178 pmd = pmd_offset(pud, address);
179 if (pmd_none(*pmd))
180 return no_page_table(vma, flags);
181 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
182 page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
183 if (flags & FOLL_GET) {
184
185
186
187
188
189 if (PageHead(page))
190 get_page(page);
191 else
192 page = NULL;
193 }
194 return page;
195 }
196 if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
197 return no_page_table(vma, flags);
198 if (pmd_trans_huge(*pmd)) {
199 if (flags & FOLL_SPLIT) {
200 split_huge_page_pmd(vma, address, pmd);
201 return follow_page_pte(vma, address, pmd, flags);
202 }
203 ptl = pmd_lock(mm, pmd);
204 if (likely(pmd_trans_huge(*pmd))) {
205 if (unlikely(pmd_trans_splitting(*pmd))) {
206 spin_unlock(ptl);
207 wait_split_huge_page(vma->anon_vma, pmd);
208 } else {
209 page = follow_trans_huge_pmd(vma, address,
210 pmd, flags);
211 spin_unlock(ptl);
212 *page_mask = HPAGE_PMD_NR - 1;
213 return page;
214 }
215 } else
216 spin_unlock(ptl);
217 }
218 return follow_page_pte(vma, address, pmd, flags);
219}
220
221static int get_gate_page(struct mm_struct *mm, unsigned long address,
222 unsigned int gup_flags, struct vm_area_struct **vma,
223 struct page **page)
224{
225 pgd_t *pgd;
226 pud_t *pud;
227 pmd_t *pmd;
228 pte_t *pte;
229 int ret = -EFAULT;
230
231
232 if (gup_flags & FOLL_WRITE)
233 return -EFAULT;
234 if (address > TASK_SIZE)
235 pgd = pgd_offset_k(address);
236 else
237 pgd = pgd_offset_gate(mm, address);
238 BUG_ON(pgd_none(*pgd));
239 pud = pud_offset(pgd, address);
240 BUG_ON(pud_none(*pud));
241 pmd = pmd_offset(pud, address);
242 if (pmd_none(*pmd))
243 return -EFAULT;
244 VM_BUG_ON(pmd_trans_huge(*pmd));
245 pte = pte_offset_map(pmd, address);
246 if (pte_none(*pte))
247 goto unmap;
248 *vma = get_gate_vma(mm);
249 if (!page)
250 goto out;
251 *page = vm_normal_page(*vma, address, *pte);
252 if (!*page) {
253 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
254 goto unmap;
255 *page = pte_page(*pte);
256 }
257 get_page(*page);
258out:
259 ret = 0;
260unmap:
261 pte_unmap(pte);
262 return ret;
263}
264
265
266
267
268
269
270static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
271 unsigned long address, unsigned int *flags, int *nonblocking)
272{
273 struct mm_struct *mm = vma->vm_mm;
274 unsigned int fault_flags = 0;
275 int ret;
276
277
278 if ((*flags & FOLL_MLOCK) &&
279 (stack_guard_page_start(vma, address) ||
280 stack_guard_page_end(vma, address + PAGE_SIZE)))
281 return -ENOENT;
282 if (*flags & FOLL_WRITE)
283 fault_flags |= FAULT_FLAG_WRITE;
284 if (nonblocking)
285 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
286 if (*flags & FOLL_NOWAIT)
287 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
288 if (*flags & FOLL_TRIED) {
289 VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
290 fault_flags |= FAULT_FLAG_TRIED;
291 }
292
293 ret = handle_mm_fault(mm, vma, address, fault_flags);
294 if (ret & VM_FAULT_ERROR) {
295 if (ret & VM_FAULT_OOM)
296 return -ENOMEM;
297 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
298 return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
299 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
300 return -EFAULT;
301 BUG();
302 }
303
304 if (tsk) {
305 if (ret & VM_FAULT_MAJOR)
306 tsk->maj_flt++;
307 else
308 tsk->min_flt++;
309 }
310
311 if (ret & VM_FAULT_RETRY) {
312 if (nonblocking)
313 *nonblocking = 0;
314 return -EBUSY;
315 }
316
317
318
319
320
321
322
323
324
325
326 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
327 *flags &= ~FOLL_WRITE;
328 return 0;
329}
330
331static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
332{
333 vm_flags_t vm_flags = vma->vm_flags;
334
335 if (vm_flags & (VM_IO | VM_PFNMAP))
336 return -EFAULT;
337
338 if (gup_flags & FOLL_WRITE) {
339 if (!(vm_flags & VM_WRITE)) {
340 if (!(gup_flags & FOLL_FORCE))
341 return -EFAULT;
342
343
344
345
346
347
348
349
350
351 if (!is_cow_mapping(vm_flags)) {
352 WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
353 return -EFAULT;
354 }
355 }
356 } else if (!(vm_flags & VM_READ)) {
357 if (!(gup_flags & FOLL_FORCE))
358 return -EFAULT;
359
360
361
362
363 if (!(vm_flags & VM_MAYREAD))
364 return -EFAULT;
365 }
366 return 0;
367}
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
426 unsigned long start, unsigned long nr_pages,
427 unsigned int gup_flags, struct page **pages,
428 struct vm_area_struct **vmas, int *nonblocking)
429{
430 long i = 0;
431 unsigned int page_mask;
432 struct vm_area_struct *vma = NULL;
433
434 if (!nr_pages)
435 return 0;
436
437 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
438
439
440
441
442
443
444 if (!(gup_flags & FOLL_FORCE))
445 gup_flags |= FOLL_NUMA;
446
447 do {
448 struct page *page;
449 unsigned int foll_flags = gup_flags;
450 unsigned int page_increm;
451
452
453 if (!vma || start >= vma->vm_end) {
454 vma = find_extend_vma(mm, start);
455 if (!vma && in_gate_area(mm, start)) {
456 int ret;
457 ret = get_gate_page(mm, start & PAGE_MASK,
458 gup_flags, &vma,
459 pages ? &pages[i] : NULL);
460 if (ret)
461 return i ? : ret;
462 page_mask = 0;
463 goto next_page;
464 }
465
466 if (!vma || check_vma_flags(vma, gup_flags))
467 return i ? : -EFAULT;
468 if (is_vm_hugetlb_page(vma)) {
469 i = follow_hugetlb_page(mm, vma, pages, vmas,
470 &start, &nr_pages, i,
471 gup_flags);
472 continue;
473 }
474 }
475retry:
476
477
478
479
480 if (unlikely(fatal_signal_pending(current)))
481 return i ? i : -ERESTARTSYS;
482 cond_resched();
483 page = follow_page_mask(vma, start, foll_flags, &page_mask);
484 if (!page) {
485 int ret;
486 ret = faultin_page(tsk, vma, start, &foll_flags,
487 nonblocking);
488 switch (ret) {
489 case 0:
490 goto retry;
491 case -EFAULT:
492 case -ENOMEM:
493 case -EHWPOISON:
494 return i ? i : ret;
495 case -EBUSY:
496 return i;
497 case -ENOENT:
498 goto next_page;
499 }
500 BUG();
501 }
502 if (IS_ERR(page))
503 return i ? i : PTR_ERR(page);
504 if (pages) {
505 pages[i] = page;
506 flush_anon_page(vma, page, start);
507 flush_dcache_page(page);
508 page_mask = 0;
509 }
510next_page:
511 if (vmas) {
512 vmas[i] = vma;
513 page_mask = 0;
514 }
515 page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
516 if (page_increm > nr_pages)
517 page_increm = nr_pages;
518 i += page_increm;
519 start += page_increm * PAGE_SIZE;
520 nr_pages -= page_increm;
521 } while (nr_pages);
522 return i;
523}
524EXPORT_SYMBOL(__get_user_pages);
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
554 unsigned long address, unsigned int fault_flags)
555{
556 struct vm_area_struct *vma;
557 vm_flags_t vm_flags;
558 int ret;
559
560 vma = find_extend_vma(mm, address);
561 if (!vma || address < vma->vm_start)
562 return -EFAULT;
563
564 vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
565 if (!(vm_flags & vma->vm_flags))
566 return -EFAULT;
567
568 ret = handle_mm_fault(mm, vma, address, fault_flags);
569 if (ret & VM_FAULT_ERROR) {
570 if (ret & VM_FAULT_OOM)
571 return -ENOMEM;
572 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
573 return -EHWPOISON;
574 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
575 return -EFAULT;
576 BUG();
577 }
578 if (tsk) {
579 if (ret & VM_FAULT_MAJOR)
580 tsk->maj_flt++;
581 else
582 tsk->min_flt++;
583 }
584 return 0;
585}
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
638 unsigned long start, unsigned long nr_pages, int write,
639 int force, struct page **pages, struct vm_area_struct **vmas)
640{
641 int flags = FOLL_TOUCH;
642
643 if (pages)
644 flags |= FOLL_GET;
645 if (write)
646 flags |= FOLL_WRITE;
647 if (force)
648 flags |= FOLL_FORCE;
649
650 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
651 NULL);
652}
653EXPORT_SYMBOL(get_user_pages);
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669#ifdef CONFIG_ELF_CORE
670struct page *get_dump_page(unsigned long addr)
671{
672 struct vm_area_struct *vma;
673 struct page *page;
674
675 if (__get_user_pages(current, current->mm, addr, 1,
676 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
677 NULL) < 1)
678 return NULL;
679 flush_cache_page(vma, addr, page_to_pfn(page));
680 return page;
681}
682#endif
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720#ifdef CONFIG_HAVE_GENERIC_RCU_GUP
721
722#ifdef __HAVE_ARCH_PTE_SPECIAL
723static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
724 int write, struct page **pages, int *nr)
725{
726 pte_t *ptep, *ptem;
727 int ret = 0;
728
729 ptem = ptep = pte_offset_map(&pmd, addr);
730 do {
731
732
733
734
735
736
737
738 pte_t pte = ACCESS_ONCE(*ptep);
739 struct page *page;
740
741
742
743
744
745 if (!pte_present(pte) || pte_special(pte) ||
746 pte_numa(pte) || (write && !pte_write(pte)))
747 goto pte_unmap;
748
749 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
750 page = pte_page(pte);
751
752 if (!page_cache_get_speculative(page))
753 goto pte_unmap;
754
755 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
756 put_page(page);
757 goto pte_unmap;
758 }
759
760 pages[*nr] = page;
761 (*nr)++;
762
763 } while (ptep++, addr += PAGE_SIZE, addr != end);
764
765 ret = 1;
766
767pte_unmap:
768 pte_unmap(ptem);
769 return ret;
770}
771#else
772
773
774
775
776
777
778
779
780
781
782static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
783 int write, struct page **pages, int *nr)
784{
785 return 0;
786}
787#endif
788
789static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
790 unsigned long end, int write, struct page **pages, int *nr)
791{
792 struct page *head, *page, *tail;
793 int refs;
794
795 if (write && !pmd_write(orig))
796 return 0;
797
798 refs = 0;
799 head = pmd_page(orig);
800 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
801 tail = page;
802 do {
803 VM_BUG_ON_PAGE(compound_head(page) != head, page);
804 pages[*nr] = page;
805 (*nr)++;
806 page++;
807 refs++;
808 } while (addr += PAGE_SIZE, addr != end);
809
810 if (!page_cache_add_speculative(head, refs)) {
811 *nr -= refs;
812 return 0;
813 }
814
815 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
816 *nr -= refs;
817 while (refs--)
818 put_page(head);
819 return 0;
820 }
821
822
823
824
825
826
827 while (refs--) {
828 if (PageTail(tail))
829 get_huge_page_tail(tail);
830 tail++;
831 }
832
833 return 1;
834}
835
836static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
837 unsigned long end, int write, struct page **pages, int *nr)
838{
839 struct page *head, *page, *tail;
840 int refs;
841
842 if (write && !pud_write(orig))
843 return 0;
844
845 refs = 0;
846 head = pud_page(orig);
847 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
848 tail = page;
849 do {
850 VM_BUG_ON_PAGE(compound_head(page) != head, page);
851 pages[*nr] = page;
852 (*nr)++;
853 page++;
854 refs++;
855 } while (addr += PAGE_SIZE, addr != end);
856
857 if (!page_cache_add_speculative(head, refs)) {
858 *nr -= refs;
859 return 0;
860 }
861
862 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
863 *nr -= refs;
864 while (refs--)
865 put_page(head);
866 return 0;
867 }
868
869 while (refs--) {
870 if (PageTail(tail))
871 get_huge_page_tail(tail);
872 tail++;
873 }
874
875 return 1;
876}
877
878static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
879 unsigned long end, int write,
880 struct page **pages, int *nr)
881{
882 int refs;
883 struct page *head, *page, *tail;
884
885 if (write && !pgd_write(orig))
886 return 0;
887
888 refs = 0;
889 head = pgd_page(orig);
890 page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
891 tail = page;
892 do {
893 VM_BUG_ON_PAGE(compound_head(page) != head, page);
894 pages[*nr] = page;
895 (*nr)++;
896 page++;
897 refs++;
898 } while (addr += PAGE_SIZE, addr != end);
899
900 if (!page_cache_add_speculative(head, refs)) {
901 *nr -= refs;
902 return 0;
903 }
904
905 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
906 *nr -= refs;
907 while (refs--)
908 put_page(head);
909 return 0;
910 }
911
912 while (refs--) {
913 if (PageTail(tail))
914 get_huge_page_tail(tail);
915 tail++;
916 }
917
918 return 1;
919}
920
921static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
922 int write, struct page **pages, int *nr)
923{
924 unsigned long next;
925 pmd_t *pmdp;
926
927 pmdp = pmd_offset(&pud, addr);
928 do {
929 pmd_t pmd = ACCESS_ONCE(*pmdp);
930
931 next = pmd_addr_end(addr, end);
932 if (pmd_none(pmd) || pmd_trans_splitting(pmd))
933 return 0;
934
935 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
936
937
938
939
940
941 if (pmd_numa(pmd))
942 return 0;
943
944 if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
945 pages, nr))
946 return 0;
947
948 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
949
950
951
952
953 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
954 PMD_SHIFT, next, write, pages, nr))
955 return 0;
956 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
957 return 0;
958 } while (pmdp++, addr = next, addr != end);
959
960 return 1;
961}
962
963static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
964 int write, struct page **pages, int *nr)
965{
966 unsigned long next;
967 pud_t *pudp;
968
969 pudp = pud_offset(&pgd, addr);
970 do {
971 pud_t pud = READ_ONCE(*pudp);
972
973 next = pud_addr_end(addr, end);
974 if (pud_none(pud))
975 return 0;
976 if (unlikely(pud_huge(pud))) {
977 if (!gup_huge_pud(pud, pudp, addr, next, write,
978 pages, nr))
979 return 0;
980 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
981 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
982 PUD_SHIFT, next, write, pages, nr))
983 return 0;
984 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
985 return 0;
986 } while (pudp++, addr = next, addr != end);
987
988 return 1;
989}
990
991
992
993
994
995int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
996 struct page **pages)
997{
998 struct mm_struct *mm = current->mm;
999 unsigned long addr, len, end;
1000 unsigned long next, flags;
1001 pgd_t *pgdp;
1002 int nr = 0;
1003
1004 start &= PAGE_MASK;
1005 addr = start;
1006 len = (unsigned long) nr_pages << PAGE_SHIFT;
1007 end = start + len;
1008
1009 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
1010 start, len)))
1011 return 0;
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025 local_irq_save(flags);
1026 pgdp = pgd_offset(mm, addr);
1027 do {
1028 pgd_t pgd = ACCESS_ONCE(*pgdp);
1029
1030 next = pgd_addr_end(addr, end);
1031 if (pgd_none(pgd))
1032 break;
1033 if (unlikely(pgd_huge(pgd))) {
1034 if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
1035 pages, &nr))
1036 break;
1037 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
1038 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
1039 PGDIR_SHIFT, next, write, pages, &nr))
1040 break;
1041 } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
1042 break;
1043 } while (pgdp++, addr = next, addr != end);
1044 local_irq_restore(flags);
1045
1046 return nr;
1047}
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1066 struct page **pages)
1067{
1068 struct mm_struct *mm = current->mm;
1069 int nr, ret;
1070
1071 start &= PAGE_MASK;
1072 nr = __get_user_pages_fast(start, nr_pages, write, pages);
1073 ret = nr;
1074
1075 if (nr < nr_pages) {
1076
1077 start += nr << PAGE_SHIFT;
1078 pages += nr;
1079
1080 down_read(&mm->mmap_sem);
1081 ret = get_user_pages(current, mm, start,
1082 nr_pages - nr, write, 0, pages, NULL);
1083 up_read(&mm->mmap_sem);
1084
1085
1086 if (nr > 0) {
1087 if (ret < 0)
1088 ret = nr;
1089 else
1090 ret += nr;
1091 }
1092 }
1093
1094 return ret;
1095}
1096
1097#endif
1098