1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/export.h>
19#include <linux/mm.h>
20#include <linux/sched/mm.h>
21#include <linux/vmacache.h>
22#include <linux/mman.h>
23#include <linux/swap.h>
24#include <linux/file.h>
25#include <linux/highmem.h>
26#include <linux/pagemap.h>
27#include <linux/slab.h>
28#include <linux/vmalloc.h>
29#include <linux/blkdev.h>
30#include <linux/backing-dev.h>
31#include <linux/compiler.h>
32#include <linux/mount.h>
33#include <linux/personality.h>
34#include <linux/security.h>
35#include <linux/syscalls.h>
36#include <linux/audit.h>
37#include <linux/printk.h>
38
39#include <linux/uaccess.h>
40#include <asm/tlb.h>
41#include <asm/tlbflush.h>
42#include <asm/mmu_context.h>
43#include "internal.h"
44
45void *high_memory;
46EXPORT_SYMBOL(high_memory);
47struct page *mem_map;
48unsigned long max_mapnr;
49EXPORT_SYMBOL(max_mapnr);
50unsigned long highest_memmap_pfn;
51int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
52int heap_stack_gap = 0;
53
54atomic_long_t mmap_pages_allocated;
55
56EXPORT_SYMBOL(mem_map);
57
58
59static struct kmem_cache *vm_region_jar;
60struct rb_root nommu_region_tree = RB_ROOT;
61DECLARE_RWSEM(nommu_region_sem);
62
63const struct vm_operations_struct generic_file_vm_ops = {
64};
65
66
67
68
69
70
71
72unsigned int kobjsize(const void *objp)
73{
74 struct page *page;
75
76
77
78
79
80 if (!objp || !virt_addr_valid(objp))
81 return 0;
82
83 page = virt_to_head_page(objp);
84
85
86
87
88
89 if (PageSlab(page))
90 return ksize(objp);
91
92
93
94
95
96
97
98 if (!PageCompound(page)) {
99 struct vm_area_struct *vma;
100
101 vma = find_vma(current->mm, (unsigned long)objp);
102 if (vma)
103 return vma->vm_end - vma->vm_start;
104 }
105
106
107
108
109
110 return PAGE_SIZE << compound_order(page);
111}
112
113static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
114 unsigned long start, unsigned long nr_pages,
115 unsigned int foll_flags, struct page **pages,
116 struct vm_area_struct **vmas, int *nonblocking)
117{
118 struct vm_area_struct *vma;
119 unsigned long vm_flags;
120 int i;
121
122
123
124
125 vm_flags = (foll_flags & FOLL_WRITE) ?
126 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
127 vm_flags &= (foll_flags & FOLL_FORCE) ?
128 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
129
130 for (i = 0; i < nr_pages; i++) {
131 vma = find_vma(mm, start);
132 if (!vma)
133 goto finish_or_fault;
134
135
136 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
137 !(vm_flags & vma->vm_flags))
138 goto finish_or_fault;
139
140 if (pages) {
141 pages[i] = virt_to_page(start);
142 if (pages[i])
143 get_page(pages[i]);
144 }
145 if (vmas)
146 vmas[i] = vma;
147 start = (start + PAGE_SIZE) & PAGE_MASK;
148 }
149
150 return i;
151
152finish_or_fault:
153 return i ? : -EFAULT;
154}
155
156
157
158
159
160
161
162
163long get_user_pages(unsigned long start, unsigned long nr_pages,
164 unsigned int gup_flags, struct page **pages,
165 struct vm_area_struct **vmas)
166{
167 return __get_user_pages(current, current->mm, start, nr_pages,
168 gup_flags, pages, vmas, NULL);
169}
170EXPORT_SYMBOL(get_user_pages);
171
172long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
173 unsigned int gup_flags, struct page **pages,
174 int *locked)
175{
176 return get_user_pages(start, nr_pages, gup_flags, pages, NULL);
177}
178EXPORT_SYMBOL(get_user_pages_locked);
179
180static long __get_user_pages_unlocked(struct task_struct *tsk,
181 struct mm_struct *mm, unsigned long start,
182 unsigned long nr_pages, struct page **pages,
183 unsigned int gup_flags)
184{
185 long ret;
186 down_read(&mm->mmap_sem);
187 ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages,
188 NULL, NULL);
189 up_read(&mm->mmap_sem);
190 return ret;
191}
192
193long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
194 struct page **pages, unsigned int gup_flags)
195{
196 return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
197 pages, gup_flags);
198}
199EXPORT_SYMBOL(get_user_pages_unlocked);
200
201
202
203
204
205
206
207
208
209
210
211int follow_pfn(struct vm_area_struct *vma, unsigned long address,
212 unsigned long *pfn)
213{
214 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
215 return -EINVAL;
216
217 *pfn = address >> PAGE_SHIFT;
218 return 0;
219}
220EXPORT_SYMBOL(follow_pfn);
221
222LIST_HEAD(vmap_area_list);
223
224void vfree(const void *addr)
225{
226 kfree(addr);
227}
228EXPORT_SYMBOL(vfree);
229
230void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
231{
232
233
234
235
236 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
237}
238EXPORT_SYMBOL(__vmalloc);
239
240void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags)
241{
242 return __vmalloc(size, flags, PAGE_KERNEL);
243}
244
245void *vmalloc_user(unsigned long size)
246{
247 void *ret;
248
249 ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
250 if (ret) {
251 struct vm_area_struct *vma;
252
253 down_write(¤t->mm->mmap_sem);
254 vma = find_vma(current->mm, (unsigned long)ret);
255 if (vma)
256 vma->vm_flags |= VM_USERMAP;
257 up_write(¤t->mm->mmap_sem);
258 }
259
260 return ret;
261}
262EXPORT_SYMBOL(vmalloc_user);
263
264struct page *vmalloc_to_page(const void *addr)
265{
266 return virt_to_page(addr);
267}
268EXPORT_SYMBOL(vmalloc_to_page);
269
270unsigned long vmalloc_to_pfn(const void *addr)
271{
272 return page_to_pfn(virt_to_page(addr));
273}
274EXPORT_SYMBOL(vmalloc_to_pfn);
275
276long vread(char *buf, char *addr, unsigned long count)
277{
278
279 if ((unsigned long) buf + count < count)
280 count = -(unsigned long) buf;
281
282 memcpy(buf, addr, count);
283 return count;
284}
285
286long vwrite(char *buf, char *addr, unsigned long count)
287{
288
289 if ((unsigned long) addr + count < count)
290 count = -(unsigned long) addr;
291
292 memcpy(addr, buf, count);
293 return count;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307void *vmalloc(unsigned long size)
308{
309 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
310}
311EXPORT_SYMBOL(vmalloc);
312
313
314
315
316
317
318
319
320
321
322
323
324
325void *vzalloc(unsigned long size)
326{
327 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
328 PAGE_KERNEL);
329}
330EXPORT_SYMBOL(vzalloc);
331
332
333
334
335
336
337
338
339
340
341
342
343void *vmalloc_node(unsigned long size, int node)
344{
345 return vmalloc(size);
346}
347EXPORT_SYMBOL(vmalloc_node);
348
349
350
351
352
353
354
355
356
357
358
359
360
361void *vzalloc_node(unsigned long size, int node)
362{
363 return vzalloc(size);
364}
365EXPORT_SYMBOL(vzalloc_node);
366
367#ifndef PAGE_KERNEL_EXEC
368# define PAGE_KERNEL_EXEC PAGE_KERNEL
369#endif
370
371
372
373
374
375
376
377
378
379
380
381
382
383void *vmalloc_exec(unsigned long size)
384{
385 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
386}
387
388
389
390
391
392
393
394
395void *vmalloc_32(unsigned long size)
396{
397 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
398}
399EXPORT_SYMBOL(vmalloc_32);
400
401
402
403
404
405
406
407
408
409
410
411void *vmalloc_32_user(unsigned long size)
412{
413
414
415
416
417 return vmalloc_user(size);
418}
419EXPORT_SYMBOL(vmalloc_32_user);
420
421void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
422{
423 BUG();
424 return NULL;
425}
426EXPORT_SYMBOL(vmap);
427
428void vunmap(const void *addr)
429{
430 BUG();
431}
432EXPORT_SYMBOL(vunmap);
433
434void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
435{
436 BUG();
437 return NULL;
438}
439EXPORT_SYMBOL(vm_map_ram);
440
441void vm_unmap_ram(const void *mem, unsigned int count)
442{
443 BUG();
444}
445EXPORT_SYMBOL(vm_unmap_ram);
446
447void vm_unmap_aliases(void)
448{
449}
450EXPORT_SYMBOL_GPL(vm_unmap_aliases);
451
452
453
454
455
456void __weak vmalloc_sync_mappings(void)
457{
458}
459
460void __weak vmalloc_sync_unmappings(void)
461{
462}
463
464struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
465{
466 BUG();
467 return NULL;
468}
469EXPORT_SYMBOL_GPL(alloc_vm_area);
470
471void free_vm_area(struct vm_struct *area)
472{
473 BUG();
474}
475EXPORT_SYMBOL_GPL(free_vm_area);
476
477int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
478 struct page *page)
479{
480 return -EINVAL;
481}
482EXPORT_SYMBOL(vm_insert_page);
483
484int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
485 unsigned long num)
486{
487 return -EINVAL;
488}
489EXPORT_SYMBOL(vm_map_pages);
490
491int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
492 unsigned long num)
493{
494 return -EINVAL;
495}
496EXPORT_SYMBOL(vm_map_pages_zero);
497
498
499
500
501
502
503
504
505SYSCALL_DEFINE1(brk, unsigned long, brk)
506{
507 struct mm_struct *mm = current->mm;
508
509 if (brk < mm->start_brk || brk > mm->context.end_brk)
510 return mm->brk;
511
512 if (mm->brk == brk)
513 return mm->brk;
514
515
516
517
518 if (brk <= mm->brk) {
519 mm->brk = brk;
520 return brk;
521 }
522
523
524
525
526 flush_icache_range(mm->brk, brk);
527 return mm->brk = brk;
528}
529
530
531
532
533void __init mmap_init(void)
534{
535 int ret;
536
537 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
538 VM_BUG_ON(ret);
539 vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
540}
541
542
543
544
545
546#ifdef CONFIG_DEBUG_NOMMU_REGIONS
547static noinline void validate_nommu_regions(void)
548{
549 struct vm_region *region, *last;
550 struct rb_node *p, *lastp;
551
552 lastp = rb_first(&nommu_region_tree);
553 if (!lastp)
554 return;
555
556 last = rb_entry(lastp, struct vm_region, vm_rb);
557 BUG_ON(last->vm_end <= last->vm_start);
558 BUG_ON(last->vm_top < last->vm_end);
559
560 while ((p = rb_next(lastp))) {
561 region = rb_entry(p, struct vm_region, vm_rb);
562 last = rb_entry(lastp, struct vm_region, vm_rb);
563
564 BUG_ON(region->vm_end <= region->vm_start);
565 BUG_ON(region->vm_top < region->vm_end);
566 BUG_ON(region->vm_start < last->vm_top);
567
568 lastp = p;
569 }
570}
571#else
572static void validate_nommu_regions(void)
573{
574}
575#endif
576
577
578
579
580static void add_nommu_region(struct vm_region *region)
581{
582 struct vm_region *pregion;
583 struct rb_node **p, *parent;
584
585 validate_nommu_regions();
586
587 parent = NULL;
588 p = &nommu_region_tree.rb_node;
589 while (*p) {
590 parent = *p;
591 pregion = rb_entry(parent, struct vm_region, vm_rb);
592 if (region->vm_start < pregion->vm_start)
593 p = &(*p)->rb_left;
594 else if (region->vm_start > pregion->vm_start)
595 p = &(*p)->rb_right;
596 else if (pregion == region)
597 return;
598 else
599 BUG();
600 }
601
602 rb_link_node(®ion->vm_rb, parent, p);
603 rb_insert_color(®ion->vm_rb, &nommu_region_tree);
604
605 validate_nommu_regions();
606}
607
608
609
610
611static void delete_nommu_region(struct vm_region *region)
612{
613 BUG_ON(!nommu_region_tree.rb_node);
614
615 validate_nommu_regions();
616 rb_erase(®ion->vm_rb, &nommu_region_tree);
617 validate_nommu_regions();
618}
619
620
621
622
623static void free_page_series(unsigned long from, unsigned long to)
624{
625 for (; from < to; from += PAGE_SIZE) {
626 struct page *page = virt_to_page(from);
627
628 atomic_long_dec(&mmap_pages_allocated);
629 put_page(page);
630 }
631}
632
633
634
635
636
637
638
639static void __put_nommu_region(struct vm_region *region)
640 __releases(nommu_region_sem)
641{
642 BUG_ON(!nommu_region_tree.rb_node);
643
644 if (--region->vm_usage == 0) {
645 if (region->vm_top > region->vm_start)
646 delete_nommu_region(region);
647 up_write(&nommu_region_sem);
648
649 if (region->vm_file)
650 fput(region->vm_file);
651
652
653
654 if (region->vm_flags & VM_MAPPED_COPY)
655 free_page_series(region->vm_start, region->vm_top);
656 kmem_cache_free(vm_region_jar, region);
657 } else {
658 up_write(&nommu_region_sem);
659 }
660}
661
662
663
664
665static void put_nommu_region(struct vm_region *region)
666{
667 down_write(&nommu_region_sem);
668 __put_nommu_region(region);
669}
670
671
672
673
674
675
676
677static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
678{
679 struct vm_area_struct *pvma, *prev;
680 struct address_space *mapping;
681 struct rb_node **p, *parent, *rb_prev;
682
683 BUG_ON(!vma->vm_region);
684
685 mm->map_count++;
686 vma->vm_mm = mm;
687
688
689 if (vma->vm_file) {
690 mapping = vma->vm_file->f_mapping;
691
692 i_mmap_lock_write(mapping);
693 flush_dcache_mmap_lock(mapping);
694 vma_interval_tree_insert(vma, &mapping->i_mmap);
695 flush_dcache_mmap_unlock(mapping);
696 i_mmap_unlock_write(mapping);
697 }
698
699
700 parent = rb_prev = NULL;
701 p = &mm->mm_rb.rb_node;
702 while (*p) {
703 parent = *p;
704 pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
705
706
707
708 if (vma->vm_start < pvma->vm_start)
709 p = &(*p)->rb_left;
710 else if (vma->vm_start > pvma->vm_start) {
711 rb_prev = parent;
712 p = &(*p)->rb_right;
713 } else if (vma->vm_end < pvma->vm_end)
714 p = &(*p)->rb_left;
715 else if (vma->vm_end > pvma->vm_end) {
716 rb_prev = parent;
717 p = &(*p)->rb_right;
718 } else if (vma < pvma)
719 p = &(*p)->rb_left;
720 else if (vma > pvma) {
721 rb_prev = parent;
722 p = &(*p)->rb_right;
723 } else
724 BUG();
725 }
726
727 rb_link_node(&vma->vm_rb, parent, p);
728 rb_insert_color(&vma->vm_rb, &mm->mm_rb);
729
730
731 prev = NULL;
732 if (rb_prev)
733 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
734
735 __vma_link_list(mm, vma, prev, parent);
736}
737
738
739
740
741static void delete_vma_from_mm(struct vm_area_struct *vma)
742{
743 int i;
744 struct address_space *mapping;
745 struct mm_struct *mm = vma->vm_mm;
746 struct task_struct *curr = current;
747
748 mm->map_count--;
749 for (i = 0; i < VMACACHE_SIZE; i++) {
750
751 if (curr->vmacache.vmas[i] == vma) {
752 vmacache_invalidate(mm);
753 break;
754 }
755 }
756
757
758 if (vma->vm_file) {
759 mapping = vma->vm_file->f_mapping;
760
761 i_mmap_lock_write(mapping);
762 flush_dcache_mmap_lock(mapping);
763 vma_interval_tree_remove(vma, &mapping->i_mmap);
764 flush_dcache_mmap_unlock(mapping);
765 i_mmap_unlock_write(mapping);
766 }
767
768
769 rb_erase(&vma->vm_rb, &mm->mm_rb);
770
771 if (vma->vm_prev)
772 vma->vm_prev->vm_next = vma->vm_next;
773 else
774 mm->mmap = vma->vm_next;
775
776 if (vma->vm_next)
777 vma->vm_next->vm_prev = vma->vm_prev;
778}
779
780
781
782
783static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
784{
785 if (vma->vm_ops && vma->vm_ops->close)
786 vma->vm_ops->close(vma);
787 if (vma->vm_file)
788 fput(vma->vm_file);
789 put_nommu_region(vma->vm_region);
790 vm_area_free(vma);
791}
792
793
794
795
796
797struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
798{
799 struct vm_area_struct *vma;
800
801
802 vma = vmacache_find(mm, addr);
803 if (likely(vma))
804 return vma;
805
806
807
808 for (vma = mm->mmap; vma; vma = vma->vm_next) {
809 if (vma->vm_start > addr)
810 return NULL;
811 if (vma->vm_end > addr) {
812 vmacache_update(addr, vma);
813 return vma;
814 }
815 }
816
817 return NULL;
818}
819EXPORT_SYMBOL(find_vma);
820
821
822
823
824
825struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
826{
827 return find_vma(mm, addr);
828}
829
830
831
832
833
834int expand_stack(struct vm_area_struct *vma, unsigned long address)
835{
836 return -ENOMEM;
837}
838
839
840
841
842
843static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
844 unsigned long addr,
845 unsigned long len)
846{
847 struct vm_area_struct *vma;
848 unsigned long end = addr + len;
849
850
851 vma = vmacache_find_exact(mm, addr, end);
852 if (vma)
853 return vma;
854
855
856
857 for (vma = mm->mmap; vma; vma = vma->vm_next) {
858 if (vma->vm_start < addr)
859 continue;
860 if (vma->vm_start > addr)
861 return NULL;
862 if (vma->vm_end == end) {
863 vmacache_update(addr, vma);
864 return vma;
865 }
866 }
867
868 return NULL;
869}
870
871
872
873
874
875static int validate_mmap_request(struct file *file,
876 unsigned long addr,
877 unsigned long len,
878 unsigned long prot,
879 unsigned long flags,
880 unsigned long pgoff,
881 unsigned long *_capabilities)
882{
883 unsigned long capabilities, rlen;
884 int ret;
885
886
887 if (flags & MAP_FIXED)
888 return -EINVAL;
889
890 if ((flags & MAP_TYPE) != MAP_PRIVATE &&
891 (flags & MAP_TYPE) != MAP_SHARED)
892 return -EINVAL;
893
894 if (!len)
895 return -EINVAL;
896
897
898 rlen = PAGE_ALIGN(len);
899 if (!rlen || rlen > TASK_SIZE)
900 return -ENOMEM;
901
902
903 if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
904 return -EOVERFLOW;
905
906 if (file) {
907
908 if (!file->f_op->mmap)
909 return -ENODEV;
910
911
912
913
914
915 if (file->f_op->mmap_capabilities) {
916 capabilities = file->f_op->mmap_capabilities(file);
917 } else {
918
919
920 switch (file_inode(file)->i_mode & S_IFMT) {
921 case S_IFREG:
922 case S_IFBLK:
923 capabilities = NOMMU_MAP_COPY;
924 break;
925
926 case S_IFCHR:
927 capabilities =
928 NOMMU_MAP_DIRECT |
929 NOMMU_MAP_READ |
930 NOMMU_MAP_WRITE;
931 break;
932
933 default:
934 return -EINVAL;
935 }
936 }
937
938
939
940 if (!file->f_op->get_unmapped_area)
941 capabilities &= ~NOMMU_MAP_DIRECT;
942 if (!(file->f_mode & FMODE_CAN_READ))
943 capabilities &= ~NOMMU_MAP_COPY;
944
945
946 if (!(file->f_mode & FMODE_READ))
947 return -EACCES;
948
949 if (flags & MAP_SHARED) {
950
951 if ((prot & PROT_WRITE) &&
952 !(file->f_mode & FMODE_WRITE))
953 return -EACCES;
954
955 if (IS_APPEND(file_inode(file)) &&
956 (file->f_mode & FMODE_WRITE))
957 return -EACCES;
958
959 if (locks_verify_locked(file))
960 return -EAGAIN;
961
962 if (!(capabilities & NOMMU_MAP_DIRECT))
963 return -ENODEV;
964
965
966 capabilities &= ~NOMMU_MAP_COPY;
967 } else {
968
969
970 if (!(capabilities & NOMMU_MAP_COPY))
971 return -ENODEV;
972
973
974
975 if (prot & PROT_WRITE)
976 capabilities &= ~NOMMU_MAP_DIRECT;
977 }
978
979 if (capabilities & NOMMU_MAP_DIRECT) {
980 if (((prot & PROT_READ) && !(capabilities & NOMMU_MAP_READ)) ||
981 ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
982 ((prot & PROT_EXEC) && !(capabilities & NOMMU_MAP_EXEC))
983 ) {
984 capabilities &= ~NOMMU_MAP_DIRECT;
985 if (flags & MAP_SHARED) {
986 pr_warn("MAP_SHARED not completely supported on !MMU\n");
987 return -EINVAL;
988 }
989 }
990 }
991
992
993
994 if (path_noexec(&file->f_path)) {
995 if (prot & PROT_EXEC)
996 return -EPERM;
997 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
998
999 if (current->personality & READ_IMPLIES_EXEC) {
1000 if (capabilities & NOMMU_MAP_EXEC)
1001 prot |= PROT_EXEC;
1002 }
1003 } else if ((prot & PROT_READ) &&
1004 (prot & PROT_EXEC) &&
1005 !(capabilities & NOMMU_MAP_EXEC)
1006 ) {
1007
1008 capabilities &= ~NOMMU_MAP_DIRECT;
1009 }
1010 } else {
1011
1012
1013
1014 capabilities = NOMMU_MAP_COPY;
1015
1016
1017 if ((prot & PROT_READ) &&
1018 (current->personality & READ_IMPLIES_EXEC))
1019 prot |= PROT_EXEC;
1020 }
1021
1022
1023 ret = security_mmap_addr(addr);
1024 if (ret < 0)
1025 return ret;
1026
1027
1028 *_capabilities = capabilities;
1029 return 0;
1030}
1031
1032
1033
1034
1035
1036static unsigned long determine_vm_flags(struct file *file,
1037 unsigned long prot,
1038 unsigned long flags,
1039 unsigned long capabilities)
1040{
1041 unsigned long vm_flags;
1042
1043 vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags);
1044
1045
1046 if (!(capabilities & NOMMU_MAP_DIRECT)) {
1047
1048 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1049 if (file && !(prot & PROT_WRITE))
1050 vm_flags |= VM_MAYSHARE;
1051 } else {
1052
1053
1054
1055 vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS);
1056 if (flags & MAP_SHARED)
1057 vm_flags |= VM_SHARED;
1058 }
1059
1060
1061
1062
1063
1064 if ((flags & MAP_PRIVATE) && current->ptrace)
1065 vm_flags &= ~VM_MAYSHARE;
1066
1067 return vm_flags;
1068}
1069
1070
1071
1072
1073
1074static int do_mmap_shared_file(struct vm_area_struct *vma)
1075{
1076 int ret;
1077
1078 ret = call_mmap(vma->vm_file, vma);
1079 if (ret == 0) {
1080 vma->vm_region->vm_top = vma->vm_region->vm_end;
1081 return 0;
1082 }
1083 if (ret != -ENOSYS)
1084 return ret;
1085
1086
1087
1088
1089 return -ENODEV;
1090}
1091
1092
1093
1094
1095static int do_mmap_private(struct vm_area_struct *vma,
1096 struct vm_region *region,
1097 unsigned long len,
1098 unsigned long capabilities)
1099{
1100 unsigned long total, point;
1101 void *base;
1102 int ret, order;
1103
1104
1105
1106
1107
1108 if (capabilities & NOMMU_MAP_DIRECT) {
1109 ret = call_mmap(vma->vm_file, vma);
1110 if (ret == 0) {
1111
1112 BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
1113 vma->vm_region->vm_top = vma->vm_region->vm_end;
1114 return 0;
1115 }
1116 if (ret != -ENOSYS)
1117 return ret;
1118
1119
1120
1121
1122 }
1123
1124
1125
1126
1127
1128
1129 order = get_order(len);
1130 total = 1 << order;
1131 point = len >> PAGE_SHIFT;
1132
1133
1134 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
1135 total = point;
1136
1137 base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
1138 if (!base)
1139 goto enomem;
1140
1141 atomic_long_add(total, &mmap_pages_allocated);
1142
1143 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1144 region->vm_start = (unsigned long) base;
1145 region->vm_end = region->vm_start + len;
1146 region->vm_top = region->vm_start + (total << PAGE_SHIFT);
1147
1148 vma->vm_start = region->vm_start;
1149 vma->vm_end = region->vm_start + len;
1150
1151 if (vma->vm_file) {
1152
1153 loff_t fpos;
1154
1155 fpos = vma->vm_pgoff;
1156 fpos <<= PAGE_SHIFT;
1157
1158 ret = kernel_read(vma->vm_file, base, len, &fpos);
1159 if (ret < 0)
1160 goto error_free;
1161
1162
1163 if (ret < len)
1164 memset(base + ret, 0, len - ret);
1165
1166 } else {
1167 vma_set_anonymous(vma);
1168 }
1169
1170 return 0;
1171
1172error_free:
1173 free_page_series(region->vm_start, region->vm_top);
1174 region->vm_start = vma->vm_start = 0;
1175 region->vm_end = vma->vm_end = 0;
1176 region->vm_top = 0;
1177 return ret;
1178
1179enomem:
1180 pr_err("Allocation of length %lu from process %d (%s) failed\n",
1181 len, current->pid, current->comm);
1182 show_free_areas(0, NULL);
1183 return -ENOMEM;
1184}
1185
1186
1187
1188
1189unsigned long do_mmap(struct file *file,
1190 unsigned long addr,
1191 unsigned long len,
1192 unsigned long prot,
1193 unsigned long flags,
1194 vm_flags_t vm_flags,
1195 unsigned long pgoff,
1196 unsigned long *populate,
1197 struct list_head *uf)
1198{
1199 struct vm_area_struct *vma;
1200 struct vm_region *region;
1201 struct rb_node *rb;
1202 unsigned long capabilities, result;
1203 int ret;
1204
1205 *populate = 0;
1206
1207
1208
1209 ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1210 &capabilities);
1211 if (ret < 0)
1212 return ret;
1213
1214
1215 addr = 0;
1216 len = PAGE_ALIGN(len);
1217
1218
1219
1220 vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
1221
1222
1223 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1224 if (!region)
1225 goto error_getting_region;
1226
1227 vma = vm_area_alloc(current->mm);
1228 if (!vma)
1229 goto error_getting_vma;
1230
1231 region->vm_usage = 1;
1232 region->vm_flags = vm_flags;
1233 region->vm_pgoff = pgoff;
1234
1235 vma->vm_flags = vm_flags;
1236 vma->vm_pgoff = pgoff;
1237
1238 if (file) {
1239 region->vm_file = get_file(file);
1240 vma->vm_file = get_file(file);
1241 }
1242
1243 down_write(&nommu_region_sem);
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253 if (vm_flags & VM_MAYSHARE) {
1254 struct vm_region *pregion;
1255 unsigned long pglen, rpglen, pgend, rpgend, start;
1256
1257 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1258 pgend = pgoff + pglen;
1259
1260 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1261 pregion = rb_entry(rb, struct vm_region, vm_rb);
1262
1263 if (!(pregion->vm_flags & VM_MAYSHARE))
1264 continue;
1265
1266
1267 if (file_inode(pregion->vm_file) !=
1268 file_inode(file))
1269 continue;
1270
1271 if (pregion->vm_pgoff >= pgend)
1272 continue;
1273
1274 rpglen = pregion->vm_end - pregion->vm_start;
1275 rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1276 rpgend = pregion->vm_pgoff + rpglen;
1277 if (pgoff >= rpgend)
1278 continue;
1279
1280
1281
1282 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1283 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1284
1285 if (!(capabilities & NOMMU_MAP_DIRECT))
1286 goto sharing_violation;
1287 continue;
1288 }
1289
1290
1291 pregion->vm_usage++;
1292 vma->vm_region = pregion;
1293 start = pregion->vm_start;
1294 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1295 vma->vm_start = start;
1296 vma->vm_end = start + len;
1297
1298 if (pregion->vm_flags & VM_MAPPED_COPY)
1299 vma->vm_flags |= VM_MAPPED_COPY;
1300 else {
1301 ret = do_mmap_shared_file(vma);
1302 if (ret < 0) {
1303 vma->vm_region = NULL;
1304 vma->vm_start = 0;
1305 vma->vm_end = 0;
1306 pregion->vm_usage--;
1307 pregion = NULL;
1308 goto error_just_free;
1309 }
1310 }
1311 fput(region->vm_file);
1312 kmem_cache_free(vm_region_jar, region);
1313 region = pregion;
1314 result = start;
1315 goto share;
1316 }
1317
1318
1319
1320
1321
1322 if (capabilities & NOMMU_MAP_DIRECT) {
1323 addr = file->f_op->get_unmapped_area(file, addr, len,
1324 pgoff, flags);
1325 if (IS_ERR_VALUE(addr)) {
1326 ret = addr;
1327 if (ret != -ENOSYS)
1328 goto error_just_free;
1329
1330
1331
1332
1333 ret = -ENODEV;
1334 if (!(capabilities & NOMMU_MAP_COPY))
1335 goto error_just_free;
1336
1337 capabilities &= ~NOMMU_MAP_DIRECT;
1338 } else {
1339 vma->vm_start = region->vm_start = addr;
1340 vma->vm_end = region->vm_end = addr + len;
1341 }
1342 }
1343 }
1344
1345 vma->vm_region = region;
1346
1347
1348
1349
1350 if (file && vma->vm_flags & VM_SHARED)
1351 ret = do_mmap_shared_file(vma);
1352 else
1353 ret = do_mmap_private(vma, region, len, capabilities);
1354 if (ret < 0)
1355 goto error_just_free;
1356 add_nommu_region(region);
1357
1358
1359 if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
1360 memset((void *)region->vm_start, 0,
1361 region->vm_end - region->vm_start);
1362
1363
1364 result = vma->vm_start;
1365
1366 current->mm->total_vm += len >> PAGE_SHIFT;
1367
1368share:
1369 add_vma_to_mm(current->mm, vma);
1370
1371
1372
1373 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1374 flush_icache_range(region->vm_start, region->vm_end);
1375 region->vm_icache_flushed = true;
1376 }
1377
1378 up_write(&nommu_region_sem);
1379
1380 return result;
1381
1382error_just_free:
1383 up_write(&nommu_region_sem);
1384error:
1385 if (region->vm_file)
1386 fput(region->vm_file);
1387 kmem_cache_free(vm_region_jar, region);
1388 if (vma->vm_file)
1389 fput(vma->vm_file);
1390 vm_area_free(vma);
1391 return ret;
1392
1393sharing_violation:
1394 up_write(&nommu_region_sem);
1395 pr_warn("Attempt to share mismatched mappings\n");
1396 ret = -EINVAL;
1397 goto error;
1398
1399error_getting_vma:
1400 kmem_cache_free(vm_region_jar, region);
1401 pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1402 len, current->pid);
1403 show_free_areas(0, NULL);
1404 return -ENOMEM;
1405
1406error_getting_region:
1407 pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1408 len, current->pid);
1409 show_free_areas(0, NULL);
1410 return -ENOMEM;
1411}
1412
1413unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1414 unsigned long prot, unsigned long flags,
1415 unsigned long fd, unsigned long pgoff)
1416{
1417 struct file *file = NULL;
1418 unsigned long retval = -EBADF;
1419
1420 audit_mmap_fd(fd, flags);
1421 if (!(flags & MAP_ANONYMOUS)) {
1422 file = fget(fd);
1423 if (!file)
1424 goto out;
1425 }
1426
1427 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1428
1429 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1430
1431 if (file)
1432 fput(file);
1433out:
1434 return retval;
1435}
1436
1437SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1438 unsigned long, prot, unsigned long, flags,
1439 unsigned long, fd, unsigned long, pgoff)
1440{
1441 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1442}
1443
1444#ifdef __ARCH_WANT_SYS_OLD_MMAP
1445struct mmap_arg_struct {
1446 unsigned long addr;
1447 unsigned long len;
1448 unsigned long prot;
1449 unsigned long flags;
1450 unsigned long fd;
1451 unsigned long offset;
1452};
1453
1454SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1455{
1456 struct mmap_arg_struct a;
1457
1458 if (copy_from_user(&a, arg, sizeof(a)))
1459 return -EFAULT;
1460 if (offset_in_page(a.offset))
1461 return -EINVAL;
1462
1463 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1464 a.offset >> PAGE_SHIFT);
1465}
1466#endif
1467
1468
1469
1470
1471
1472int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1473 unsigned long addr, int new_below)
1474{
1475 struct vm_area_struct *new;
1476 struct vm_region *region;
1477 unsigned long npages;
1478
1479
1480
1481 if (vma->vm_file)
1482 return -ENOMEM;
1483
1484 if (mm->map_count >= sysctl_max_map_count)
1485 return -ENOMEM;
1486
1487 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1488 if (!region)
1489 return -ENOMEM;
1490
1491 new = vm_area_dup(vma);
1492 if (!new) {
1493 kmem_cache_free(vm_region_jar, region);
1494 return -ENOMEM;
1495 }
1496
1497
1498 *region = *vma->vm_region;
1499 new->vm_region = region;
1500
1501 npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1502
1503 if (new_below) {
1504 region->vm_top = region->vm_end = new->vm_end = addr;
1505 } else {
1506 region->vm_start = new->vm_start = addr;
1507 region->vm_pgoff = new->vm_pgoff += npages;
1508 }
1509
1510 if (new->vm_ops && new->vm_ops->open)
1511 new->vm_ops->open(new);
1512
1513 delete_vma_from_mm(vma);
1514 down_write(&nommu_region_sem);
1515 delete_nommu_region(vma->vm_region);
1516 if (new_below) {
1517 vma->vm_region->vm_start = vma->vm_start = addr;
1518 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1519 } else {
1520 vma->vm_region->vm_end = vma->vm_end = addr;
1521 vma->vm_region->vm_top = addr;
1522 }
1523 add_nommu_region(vma->vm_region);
1524 add_nommu_region(new->vm_region);
1525 up_write(&nommu_region_sem);
1526 add_vma_to_mm(mm, vma);
1527 add_vma_to_mm(mm, new);
1528 return 0;
1529}
1530
1531
1532
1533
1534
1535static int shrink_vma(struct mm_struct *mm,
1536 struct vm_area_struct *vma,
1537 unsigned long from, unsigned long to)
1538{
1539 struct vm_region *region;
1540
1541
1542
1543 delete_vma_from_mm(vma);
1544 if (from > vma->vm_start)
1545 vma->vm_end = from;
1546 else
1547 vma->vm_start = to;
1548 add_vma_to_mm(mm, vma);
1549
1550
1551 region = vma->vm_region;
1552 BUG_ON(region->vm_usage != 1);
1553
1554 down_write(&nommu_region_sem);
1555 delete_nommu_region(region);
1556 if (from > region->vm_start) {
1557 to = region->vm_top;
1558 region->vm_top = region->vm_end = from;
1559 } else {
1560 region->vm_start = to;
1561 }
1562 add_nommu_region(region);
1563 up_write(&nommu_region_sem);
1564
1565 free_page_series(from, to);
1566 return 0;
1567}
1568
1569
1570
1571
1572
1573
1574int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1575{
1576 struct vm_area_struct *vma;
1577 unsigned long end;
1578 int ret;
1579
1580 len = PAGE_ALIGN(len);
1581 if (len == 0)
1582 return -EINVAL;
1583
1584 end = start + len;
1585
1586
1587 vma = find_vma(mm, start);
1588 if (!vma) {
1589 static int limit;
1590 if (limit < 5) {
1591 pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1592 current->pid, current->comm,
1593 start, start + len - 1);
1594 limit++;
1595 }
1596 return -EINVAL;
1597 }
1598
1599
1600 if (vma->vm_file) {
1601 do {
1602 if (start > vma->vm_start)
1603 return -EINVAL;
1604 if (end == vma->vm_end)
1605 goto erase_whole_vma;
1606 vma = vma->vm_next;
1607 } while (vma);
1608 return -EINVAL;
1609 } else {
1610
1611 if (start == vma->vm_start && end == vma->vm_end)
1612 goto erase_whole_vma;
1613 if (start < vma->vm_start || end > vma->vm_end)
1614 return -EINVAL;
1615 if (offset_in_page(start))
1616 return -EINVAL;
1617 if (end != vma->vm_end && offset_in_page(end))
1618 return -EINVAL;
1619 if (start != vma->vm_start && end != vma->vm_end) {
1620 ret = split_vma(mm, vma, start, 1);
1621 if (ret < 0)
1622 return ret;
1623 }
1624 return shrink_vma(mm, vma, start, end);
1625 }
1626
1627erase_whole_vma:
1628 delete_vma_from_mm(vma);
1629 delete_vma(mm, vma);
1630 return 0;
1631}
1632EXPORT_SYMBOL(do_munmap);
1633
1634int vm_munmap(unsigned long addr, size_t len)
1635{
1636 struct mm_struct *mm = current->mm;
1637 int ret;
1638
1639 down_write(&mm->mmap_sem);
1640 ret = do_munmap(mm, addr, len, NULL);
1641 up_write(&mm->mmap_sem);
1642 return ret;
1643}
1644EXPORT_SYMBOL(vm_munmap);
1645
1646SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1647{
1648 return vm_munmap(addr, len);
1649}
1650
1651
1652
1653
1654void exit_mmap(struct mm_struct *mm)
1655{
1656 struct vm_area_struct *vma;
1657
1658 if (!mm)
1659 return;
1660
1661 mm->total_vm = 0;
1662
1663 while ((vma = mm->mmap)) {
1664 mm->mmap = vma->vm_next;
1665 delete_vma_from_mm(vma);
1666 delete_vma(mm, vma);
1667 cond_resched();
1668 }
1669}
1670
1671int vm_brk(unsigned long addr, unsigned long len)
1672{
1673 return -ENOMEM;
1674}
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686static unsigned long do_mremap(unsigned long addr,
1687 unsigned long old_len, unsigned long new_len,
1688 unsigned long flags, unsigned long new_addr)
1689{
1690 struct vm_area_struct *vma;
1691
1692
1693 old_len = PAGE_ALIGN(old_len);
1694 new_len = PAGE_ALIGN(new_len);
1695 if (old_len == 0 || new_len == 0)
1696 return (unsigned long) -EINVAL;
1697
1698 if (offset_in_page(addr))
1699 return -EINVAL;
1700
1701 if (flags & MREMAP_FIXED && new_addr != addr)
1702 return (unsigned long) -EINVAL;
1703
1704 vma = find_vma_exact(current->mm, addr, old_len);
1705 if (!vma)
1706 return (unsigned long) -EINVAL;
1707
1708 if (vma->vm_end != vma->vm_start + old_len)
1709 return (unsigned long) -EFAULT;
1710
1711 if (vma->vm_flags & VM_MAYSHARE)
1712 return (unsigned long) -EPERM;
1713
1714 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1715 return (unsigned long) -ENOMEM;
1716
1717
1718 vma->vm_end = vma->vm_start + new_len;
1719 return vma->vm_start;
1720}
1721
1722SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1723 unsigned long, new_len, unsigned long, flags,
1724 unsigned long, new_addr)
1725{
1726 unsigned long ret;
1727
1728 down_write(¤t->mm->mmap_sem);
1729 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1730 up_write(¤t->mm->mmap_sem);
1731 return ret;
1732}
1733
1734struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1735 unsigned int foll_flags)
1736{
1737 return NULL;
1738}
1739
1740int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1741 unsigned long pfn, unsigned long size, pgprot_t prot)
1742{
1743 if (addr != (pfn << PAGE_SHIFT))
1744 return -EINVAL;
1745
1746 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1747 return 0;
1748}
1749EXPORT_SYMBOL(remap_pfn_range);
1750
1751int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1752{
1753 unsigned long pfn = start >> PAGE_SHIFT;
1754 unsigned long vm_len = vma->vm_end - vma->vm_start;
1755
1756 pfn += vma->vm_pgoff;
1757 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1758}
1759EXPORT_SYMBOL(vm_iomap_memory);
1760
1761int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1762 unsigned long pgoff)
1763{
1764 unsigned int size = vma->vm_end - vma->vm_start;
1765
1766 if (!(vma->vm_flags & VM_USERMAP))
1767 return -EINVAL;
1768
1769 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1770 vma->vm_end = vma->vm_start + size;
1771
1772 return 0;
1773}
1774EXPORT_SYMBOL(remap_vmalloc_range);
1775
1776unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1777 unsigned long len, unsigned long pgoff, unsigned long flags)
1778{
1779 return -ENOMEM;
1780}
1781
1782vm_fault_t filemap_fault(struct vm_fault *vmf)
1783{
1784 BUG();
1785 return 0;
1786}
1787EXPORT_SYMBOL(filemap_fault);
1788
1789void filemap_map_pages(struct vm_fault *vmf,
1790 pgoff_t start_pgoff, pgoff_t end_pgoff)
1791{
1792 BUG();
1793}
1794EXPORT_SYMBOL(filemap_map_pages);
1795
1796int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1797 unsigned long addr, void *buf, int len, unsigned int gup_flags)
1798{
1799 struct vm_area_struct *vma;
1800 int write = gup_flags & FOLL_WRITE;
1801
1802 down_read(&mm->mmap_sem);
1803
1804
1805 vma = find_vma(mm, addr);
1806 if (vma) {
1807
1808 if (addr + len >= vma->vm_end)
1809 len = vma->vm_end - addr;
1810
1811
1812 if (write && vma->vm_flags & VM_MAYWRITE)
1813 copy_to_user_page(vma, NULL, addr,
1814 (void *) addr, buf, len);
1815 else if (!write && vma->vm_flags & VM_MAYREAD)
1816 copy_from_user_page(vma, NULL, addr,
1817 buf, (void *) addr, len);
1818 else
1819 len = 0;
1820 } else {
1821 len = 0;
1822 }
1823
1824 up_read(&mm->mmap_sem);
1825
1826 return len;
1827}
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1840 void *buf, int len, unsigned int gup_flags)
1841{
1842 return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
1843}
1844
1845
1846
1847
1848
1849int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1850 unsigned int gup_flags)
1851{
1852 struct mm_struct *mm;
1853
1854 if (addr + len < addr)
1855 return 0;
1856
1857 mm = get_task_mm(tsk);
1858 if (!mm)
1859 return 0;
1860
1861 len = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
1862
1863 mmput(mm);
1864 return len;
1865}
1866EXPORT_SYMBOL_GPL(access_process_vm);
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1880 size_t newsize)
1881{
1882 struct vm_area_struct *vma;
1883 struct vm_region *region;
1884 pgoff_t low, high;
1885 size_t r_size, r_top;
1886
1887 low = newsize >> PAGE_SHIFT;
1888 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1889
1890 down_write(&nommu_region_sem);
1891 i_mmap_lock_read(inode->i_mapping);
1892
1893
1894 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1895
1896
1897 if (vma->vm_flags & VM_SHARED) {
1898 i_mmap_unlock_read(inode->i_mapping);
1899 up_write(&nommu_region_sem);
1900 return -ETXTBSY;
1901 }
1902 }
1903
1904
1905
1906
1907
1908
1909
1910 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1911 if (!(vma->vm_flags & VM_SHARED))
1912 continue;
1913
1914 region = vma->vm_region;
1915 r_size = region->vm_top - region->vm_start;
1916 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1917
1918 if (r_top > newsize) {
1919 region->vm_top -= r_top - newsize;
1920 if (region->vm_end > region->vm_top)
1921 region->vm_end = region->vm_top;
1922 }
1923 }
1924
1925 i_mmap_unlock_read(inode->i_mapping);
1926 up_write(&nommu_region_sem);
1927 return 0;
1928}
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940static int __meminit init_user_reserve(void)
1941{
1942 unsigned long free_kbytes;
1943
1944 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1945
1946 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1947 return 0;
1948}
1949subsys_initcall(init_user_reserve);
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961static int __meminit init_admin_reserve(void)
1962{
1963 unsigned long free_kbytes;
1964
1965 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1966
1967 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1968 return 0;
1969}
1970subsys_initcall(init_admin_reserve);
1971