1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/export.h>
19#include <linux/mm.h>
20#include <linux/sched/mm.h>
21#include <linux/vmacache.h>
22#include <linux/mman.h>
23#include <linux/swap.h>
24#include <linux/file.h>
25#include <linux/highmem.h>
26#include <linux/pagemap.h>
27#include <linux/slab.h>
28#include <linux/vmalloc.h>
29#include <linux/blkdev.h>
30#include <linux/backing-dev.h>
31#include <linux/compiler.h>
32#include <linux/mount.h>
33#include <linux/personality.h>
34#include <linux/security.h>
35#include <linux/syscalls.h>
36#include <linux/audit.h>
37#include <linux/printk.h>
38
39#include <linux/uaccess.h>
40#include <asm/tlb.h>
41#include <asm/tlbflush.h>
42#include <asm/mmu_context.h>
43#include "internal.h"
44
45void *high_memory;
46EXPORT_SYMBOL(high_memory);
47struct page *mem_map;
48unsigned long max_mapnr;
49EXPORT_SYMBOL(max_mapnr);
50unsigned long highest_memmap_pfn;
51int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
52int heap_stack_gap = 0;
53
54atomic_long_t mmap_pages_allocated;
55
56EXPORT_SYMBOL(mem_map);
57
58
59static struct kmem_cache *vm_region_jar;
60struct rb_root nommu_region_tree = RB_ROOT;
61DECLARE_RWSEM(nommu_region_sem);
62
63const struct vm_operations_struct generic_file_vm_ops = {
64};
65
66
67
68
69
70
71
72unsigned int kobjsize(const void *objp)
73{
74 struct page *page;
75
76
77
78
79
80 if (!objp || !virt_addr_valid(objp))
81 return 0;
82
83 page = virt_to_head_page(objp);
84
85
86
87
88
89 if (PageSlab(page))
90 return ksize(objp);
91
92
93
94
95
96
97
98 if (!PageCompound(page)) {
99 struct vm_area_struct *vma;
100
101 vma = find_vma(current->mm, (unsigned long)objp);
102 if (vma)
103 return vma->vm_end - vma->vm_start;
104 }
105
106
107
108
109
110 return PAGE_SIZE << compound_order(page);
111}
112
113static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
114 unsigned long start, unsigned long nr_pages,
115 unsigned int foll_flags, struct page **pages,
116 struct vm_area_struct **vmas, int *nonblocking)
117{
118 struct vm_area_struct *vma;
119 unsigned long vm_flags;
120 int i;
121
122
123
124
125 vm_flags = (foll_flags & FOLL_WRITE) ?
126 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
127 vm_flags &= (foll_flags & FOLL_FORCE) ?
128 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
129
130 for (i = 0; i < nr_pages; i++) {
131 vma = find_vma(mm, start);
132 if (!vma)
133 goto finish_or_fault;
134
135
136 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
137 !(vm_flags & vma->vm_flags))
138 goto finish_or_fault;
139
140 if (pages) {
141 pages[i] = virt_to_page(start);
142 if (pages[i])
143 get_page(pages[i]);
144 }
145 if (vmas)
146 vmas[i] = vma;
147 start = (start + PAGE_SIZE) & PAGE_MASK;
148 }
149
150 return i;
151
152finish_or_fault:
153 return i ? : -EFAULT;
154}
155
156
157
158
159
160
161
162
163long get_user_pages(unsigned long start, unsigned long nr_pages,
164 unsigned int gup_flags, struct page **pages,
165 struct vm_area_struct **vmas)
166{
167 return __get_user_pages(current, current->mm, start, nr_pages,
168 gup_flags, pages, vmas, NULL);
169}
170EXPORT_SYMBOL(get_user_pages);
171
172long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
173 unsigned int gup_flags, struct page **pages,
174 int *locked)
175{
176 return get_user_pages(start, nr_pages, gup_flags, pages, NULL);
177}
178EXPORT_SYMBOL(get_user_pages_locked);
179
180static long __get_user_pages_unlocked(struct task_struct *tsk,
181 struct mm_struct *mm, unsigned long start,
182 unsigned long nr_pages, struct page **pages,
183 unsigned int gup_flags)
184{
185 long ret;
186 down_read(&mm->mmap_sem);
187 ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages,
188 NULL, NULL);
189 up_read(&mm->mmap_sem);
190 return ret;
191}
192
193long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
194 struct page **pages, unsigned int gup_flags)
195{
196 return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
197 pages, gup_flags);
198}
199EXPORT_SYMBOL(get_user_pages_unlocked);
200
201
202
203
204
205
206
207
208
209
210
211int follow_pfn(struct vm_area_struct *vma, unsigned long address,
212 unsigned long *pfn)
213{
214 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
215 return -EINVAL;
216
217 *pfn = address >> PAGE_SHIFT;
218 return 0;
219}
220EXPORT_SYMBOL(follow_pfn);
221
222LIST_HEAD(vmap_area_list);
223
224void vfree(const void *addr)
225{
226 kfree(addr);
227}
228EXPORT_SYMBOL(vfree);
229
230void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
231{
232
233
234
235
236 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
237}
238EXPORT_SYMBOL(__vmalloc);
239
240void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags)
241{
242 return __vmalloc(size, flags, PAGE_KERNEL);
243}
244
245void *vmalloc_user(unsigned long size)
246{
247 void *ret;
248
249 ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
250 if (ret) {
251 struct vm_area_struct *vma;
252
253 down_write(¤t->mm->mmap_sem);
254 vma = find_vma(current->mm, (unsigned long)ret);
255 if (vma)
256 vma->vm_flags |= VM_USERMAP;
257 up_write(¤t->mm->mmap_sem);
258 }
259
260 return ret;
261}
262EXPORT_SYMBOL(vmalloc_user);
263
264struct page *vmalloc_to_page(const void *addr)
265{
266 return virt_to_page(addr);
267}
268EXPORT_SYMBOL(vmalloc_to_page);
269
270unsigned long vmalloc_to_pfn(const void *addr)
271{
272 return page_to_pfn(virt_to_page(addr));
273}
274EXPORT_SYMBOL(vmalloc_to_pfn);
275
276long vread(char *buf, char *addr, unsigned long count)
277{
278
279 if ((unsigned long) buf + count < count)
280 count = -(unsigned long) buf;
281
282 memcpy(buf, addr, count);
283 return count;
284}
285
286long vwrite(char *buf, char *addr, unsigned long count)
287{
288
289 if ((unsigned long) addr + count < count)
290 count = -(unsigned long) addr;
291
292 memcpy(addr, buf, count);
293 return count;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307void *vmalloc(unsigned long size)
308{
309 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
310}
311EXPORT_SYMBOL(vmalloc);
312
313
314
315
316
317
318
319
320
321
322
323
324
325void *vzalloc(unsigned long size)
326{
327 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
328 PAGE_KERNEL);
329}
330EXPORT_SYMBOL(vzalloc);
331
332
333
334
335
336
337
338
339
340
341
342
343void *vmalloc_node(unsigned long size, int node)
344{
345 return vmalloc(size);
346}
347EXPORT_SYMBOL(vmalloc_node);
348
349
350
351
352
353
354
355
356
357
358
359
360
361void *vzalloc_node(unsigned long size, int node)
362{
363 return vzalloc(size);
364}
365EXPORT_SYMBOL(vzalloc_node);
366
367
368
369
370
371
372
373
374
375
376
377
378
379void *vmalloc_exec(unsigned long size)
380{
381 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
382}
383
384
385
386
387
388
389
390
391void *vmalloc_32(unsigned long size)
392{
393 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
394}
395EXPORT_SYMBOL(vmalloc_32);
396
397
398
399
400
401
402
403
404
405
406
407void *vmalloc_32_user(unsigned long size)
408{
409
410
411
412
413 return vmalloc_user(size);
414}
415EXPORT_SYMBOL(vmalloc_32_user);
416
417void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
418{
419 BUG();
420 return NULL;
421}
422EXPORT_SYMBOL(vmap);
423
424void vunmap(const void *addr)
425{
426 BUG();
427}
428EXPORT_SYMBOL(vunmap);
429
430void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
431{
432 BUG();
433 return NULL;
434}
435EXPORT_SYMBOL(vm_map_ram);
436
437void vm_unmap_ram(const void *mem, unsigned int count)
438{
439 BUG();
440}
441EXPORT_SYMBOL(vm_unmap_ram);
442
443void vm_unmap_aliases(void)
444{
445}
446EXPORT_SYMBOL_GPL(vm_unmap_aliases);
447
448
449
450
451
452void __weak vmalloc_sync_all(void)
453{
454}
455
456struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
457{
458 BUG();
459 return NULL;
460}
461EXPORT_SYMBOL_GPL(alloc_vm_area);
462
463void free_vm_area(struct vm_struct *area)
464{
465 BUG();
466}
467EXPORT_SYMBOL_GPL(free_vm_area);
468
469int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
470 struct page *page)
471{
472 return -EINVAL;
473}
474EXPORT_SYMBOL(vm_insert_page);
475
476
477
478
479
480
481
482
483SYSCALL_DEFINE1(brk, unsigned long, brk)
484{
485 struct mm_struct *mm = current->mm;
486
487 if (brk < mm->start_brk || brk > mm->context.end_brk)
488 return mm->brk;
489
490 if (mm->brk == brk)
491 return mm->brk;
492
493
494
495
496 if (brk <= mm->brk) {
497 mm->brk = brk;
498 return brk;
499 }
500
501
502
503
504 flush_icache_range(mm->brk, brk);
505 return mm->brk = brk;
506}
507
508
509
510
511void __init mmap_init(void)
512{
513 int ret;
514
515 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
516 VM_BUG_ON(ret);
517 vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
518}
519
520
521
522
523
524#ifdef CONFIG_DEBUG_NOMMU_REGIONS
525static noinline void validate_nommu_regions(void)
526{
527 struct vm_region *region, *last;
528 struct rb_node *p, *lastp;
529
530 lastp = rb_first(&nommu_region_tree);
531 if (!lastp)
532 return;
533
534 last = rb_entry(lastp, struct vm_region, vm_rb);
535 BUG_ON(last->vm_end <= last->vm_start);
536 BUG_ON(last->vm_top < last->vm_end);
537
538 while ((p = rb_next(lastp))) {
539 region = rb_entry(p, struct vm_region, vm_rb);
540 last = rb_entry(lastp, struct vm_region, vm_rb);
541
542 BUG_ON(region->vm_end <= region->vm_start);
543 BUG_ON(region->vm_top < region->vm_end);
544 BUG_ON(region->vm_start < last->vm_top);
545
546 lastp = p;
547 }
548}
549#else
550static void validate_nommu_regions(void)
551{
552}
553#endif
554
555
556
557
558static void add_nommu_region(struct vm_region *region)
559{
560 struct vm_region *pregion;
561 struct rb_node **p, *parent;
562
563 validate_nommu_regions();
564
565 parent = NULL;
566 p = &nommu_region_tree.rb_node;
567 while (*p) {
568 parent = *p;
569 pregion = rb_entry(parent, struct vm_region, vm_rb);
570 if (region->vm_start < pregion->vm_start)
571 p = &(*p)->rb_left;
572 else if (region->vm_start > pregion->vm_start)
573 p = &(*p)->rb_right;
574 else if (pregion == region)
575 return;
576 else
577 BUG();
578 }
579
580 rb_link_node(®ion->vm_rb, parent, p);
581 rb_insert_color(®ion->vm_rb, &nommu_region_tree);
582
583 validate_nommu_regions();
584}
585
586
587
588
589static void delete_nommu_region(struct vm_region *region)
590{
591 BUG_ON(!nommu_region_tree.rb_node);
592
593 validate_nommu_regions();
594 rb_erase(®ion->vm_rb, &nommu_region_tree);
595 validate_nommu_regions();
596}
597
598
599
600
601static void free_page_series(unsigned long from, unsigned long to)
602{
603 for (; from < to; from += PAGE_SIZE) {
604 struct page *page = virt_to_page(from);
605
606 atomic_long_dec(&mmap_pages_allocated);
607 put_page(page);
608 }
609}
610
611
612
613
614
615
616
617static void __put_nommu_region(struct vm_region *region)
618 __releases(nommu_region_sem)
619{
620 BUG_ON(!nommu_region_tree.rb_node);
621
622 if (--region->vm_usage == 0) {
623 if (region->vm_top > region->vm_start)
624 delete_nommu_region(region);
625 up_write(&nommu_region_sem);
626
627 if (region->vm_file)
628 fput(region->vm_file);
629
630
631
632 if (region->vm_flags & VM_MAPPED_COPY)
633 free_page_series(region->vm_start, region->vm_top);
634 kmem_cache_free(vm_region_jar, region);
635 } else {
636 up_write(&nommu_region_sem);
637 }
638}
639
640
641
642
643static void put_nommu_region(struct vm_region *region)
644{
645 down_write(&nommu_region_sem);
646 __put_nommu_region(region);
647}
648
649
650
651
652
653
654
655static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
656{
657 struct vm_area_struct *pvma, *prev;
658 struct address_space *mapping;
659 struct rb_node **p, *parent, *rb_prev;
660
661 BUG_ON(!vma->vm_region);
662
663 mm->map_count++;
664 vma->vm_mm = mm;
665
666
667 if (vma->vm_file) {
668 mapping = vma->vm_file->f_mapping;
669
670 i_mmap_lock_write(mapping);
671 flush_dcache_mmap_lock(mapping);
672 vma_interval_tree_insert(vma, &mapping->i_mmap);
673 flush_dcache_mmap_unlock(mapping);
674 i_mmap_unlock_write(mapping);
675 }
676
677
678 parent = rb_prev = NULL;
679 p = &mm->mm_rb.rb_node;
680 while (*p) {
681 parent = *p;
682 pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
683
684
685
686 if (vma->vm_start < pvma->vm_start)
687 p = &(*p)->rb_left;
688 else if (vma->vm_start > pvma->vm_start) {
689 rb_prev = parent;
690 p = &(*p)->rb_right;
691 } else if (vma->vm_end < pvma->vm_end)
692 p = &(*p)->rb_left;
693 else if (vma->vm_end > pvma->vm_end) {
694 rb_prev = parent;
695 p = &(*p)->rb_right;
696 } else if (vma < pvma)
697 p = &(*p)->rb_left;
698 else if (vma > pvma) {
699 rb_prev = parent;
700 p = &(*p)->rb_right;
701 } else
702 BUG();
703 }
704
705 rb_link_node(&vma->vm_rb, parent, p);
706 rb_insert_color(&vma->vm_rb, &mm->mm_rb);
707
708
709 prev = NULL;
710 if (rb_prev)
711 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
712
713 __vma_link_list(mm, vma, prev, parent);
714}
715
716
717
718
719static void delete_vma_from_mm(struct vm_area_struct *vma)
720{
721 int i;
722 struct address_space *mapping;
723 struct mm_struct *mm = vma->vm_mm;
724 struct task_struct *curr = current;
725
726 mm->map_count--;
727 for (i = 0; i < VMACACHE_SIZE; i++) {
728
729 if (curr->vmacache.vmas[i] == vma) {
730 vmacache_invalidate(mm);
731 break;
732 }
733 }
734
735
736 if (vma->vm_file) {
737 mapping = vma->vm_file->f_mapping;
738
739 i_mmap_lock_write(mapping);
740 flush_dcache_mmap_lock(mapping);
741 vma_interval_tree_remove(vma, &mapping->i_mmap);
742 flush_dcache_mmap_unlock(mapping);
743 i_mmap_unlock_write(mapping);
744 }
745
746
747 rb_erase(&vma->vm_rb, &mm->mm_rb);
748
749 if (vma->vm_prev)
750 vma->vm_prev->vm_next = vma->vm_next;
751 else
752 mm->mmap = vma->vm_next;
753
754 if (vma->vm_next)
755 vma->vm_next->vm_prev = vma->vm_prev;
756}
757
758
759
760
761static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
762{
763 if (vma->vm_ops && vma->vm_ops->close)
764 vma->vm_ops->close(vma);
765 if (vma->vm_file)
766 fput(vma->vm_file);
767 put_nommu_region(vma->vm_region);
768 vm_area_free(vma);
769}
770
771
772
773
774
775struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
776{
777 struct vm_area_struct *vma;
778
779
780 vma = vmacache_find(mm, addr);
781 if (likely(vma))
782 return vma;
783
784
785
786 for (vma = mm->mmap; vma; vma = vma->vm_next) {
787 if (vma->vm_start > addr)
788 return NULL;
789 if (vma->vm_end > addr) {
790 vmacache_update(addr, vma);
791 return vma;
792 }
793 }
794
795 return NULL;
796}
797EXPORT_SYMBOL(find_vma);
798
799
800
801
802
803struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
804{
805 return find_vma(mm, addr);
806}
807
808
809
810
811
812int expand_stack(struct vm_area_struct *vma, unsigned long address)
813{
814 return -ENOMEM;
815}
816
817
818
819
820
821static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
822 unsigned long addr,
823 unsigned long len)
824{
825 struct vm_area_struct *vma;
826 unsigned long end = addr + len;
827
828
829 vma = vmacache_find_exact(mm, addr, end);
830 if (vma)
831 return vma;
832
833
834
835 for (vma = mm->mmap; vma; vma = vma->vm_next) {
836 if (vma->vm_start < addr)
837 continue;
838 if (vma->vm_start > addr)
839 return NULL;
840 if (vma->vm_end == end) {
841 vmacache_update(addr, vma);
842 return vma;
843 }
844 }
845
846 return NULL;
847}
848
849
850
851
852
853static int validate_mmap_request(struct file *file,
854 unsigned long addr,
855 unsigned long len,
856 unsigned long prot,
857 unsigned long flags,
858 unsigned long pgoff,
859 unsigned long *_capabilities)
860{
861 unsigned long capabilities, rlen;
862 int ret;
863
864
865 if (flags & MAP_FIXED)
866 return -EINVAL;
867
868 if ((flags & MAP_TYPE) != MAP_PRIVATE &&
869 (flags & MAP_TYPE) != MAP_SHARED)
870 return -EINVAL;
871
872 if (!len)
873 return -EINVAL;
874
875
876 rlen = PAGE_ALIGN(len);
877 if (!rlen || rlen > TASK_SIZE)
878 return -ENOMEM;
879
880
881 if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
882 return -EOVERFLOW;
883
884 if (file) {
885
886 if (!file->f_op->mmap)
887 return -ENODEV;
888
889
890
891
892
893 if (file->f_op->mmap_capabilities) {
894 capabilities = file->f_op->mmap_capabilities(file);
895 } else {
896
897
898 switch (file_inode(file)->i_mode & S_IFMT) {
899 case S_IFREG:
900 case S_IFBLK:
901 capabilities = NOMMU_MAP_COPY;
902 break;
903
904 case S_IFCHR:
905 capabilities =
906 NOMMU_MAP_DIRECT |
907 NOMMU_MAP_READ |
908 NOMMU_MAP_WRITE;
909 break;
910
911 default:
912 return -EINVAL;
913 }
914 }
915
916
917
918 if (!file->f_op->get_unmapped_area)
919 capabilities &= ~NOMMU_MAP_DIRECT;
920 if (!(file->f_mode & FMODE_CAN_READ))
921 capabilities &= ~NOMMU_MAP_COPY;
922
923
924 if (!(file->f_mode & FMODE_READ))
925 return -EACCES;
926
927 if (flags & MAP_SHARED) {
928
929 if ((prot & PROT_WRITE) &&
930 !(file->f_mode & FMODE_WRITE))
931 return -EACCES;
932
933 if (IS_APPEND(file_inode(file)) &&
934 (file->f_mode & FMODE_WRITE))
935 return -EACCES;
936
937 if (locks_verify_locked(file))
938 return -EAGAIN;
939
940 if (!(capabilities & NOMMU_MAP_DIRECT))
941 return -ENODEV;
942
943
944 capabilities &= ~NOMMU_MAP_COPY;
945 } else {
946
947
948 if (!(capabilities & NOMMU_MAP_COPY))
949 return -ENODEV;
950
951
952
953 if (prot & PROT_WRITE)
954 capabilities &= ~NOMMU_MAP_DIRECT;
955 }
956
957 if (capabilities & NOMMU_MAP_DIRECT) {
958 if (((prot & PROT_READ) && !(capabilities & NOMMU_MAP_READ)) ||
959 ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
960 ((prot & PROT_EXEC) && !(capabilities & NOMMU_MAP_EXEC))
961 ) {
962 capabilities &= ~NOMMU_MAP_DIRECT;
963 if (flags & MAP_SHARED) {
964 pr_warn("MAP_SHARED not completely supported on !MMU\n");
965 return -EINVAL;
966 }
967 }
968 }
969
970
971
972 if (path_noexec(&file->f_path)) {
973 if (prot & PROT_EXEC)
974 return -EPERM;
975 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
976
977 if (current->personality & READ_IMPLIES_EXEC) {
978 if (capabilities & NOMMU_MAP_EXEC)
979 prot |= PROT_EXEC;
980 }
981 } else if ((prot & PROT_READ) &&
982 (prot & PROT_EXEC) &&
983 !(capabilities & NOMMU_MAP_EXEC)
984 ) {
985
986 capabilities &= ~NOMMU_MAP_DIRECT;
987 }
988 } else {
989
990
991
992 capabilities = NOMMU_MAP_COPY;
993
994
995 if ((prot & PROT_READ) &&
996 (current->personality & READ_IMPLIES_EXEC))
997 prot |= PROT_EXEC;
998 }
999
1000
1001 ret = security_mmap_addr(addr);
1002 if (ret < 0)
1003 return ret;
1004
1005
1006 *_capabilities = capabilities;
1007 return 0;
1008}
1009
1010
1011
1012
1013
1014static unsigned long determine_vm_flags(struct file *file,
1015 unsigned long prot,
1016 unsigned long flags,
1017 unsigned long capabilities)
1018{
1019 unsigned long vm_flags;
1020
1021 vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags);
1022
1023
1024 if (!(capabilities & NOMMU_MAP_DIRECT)) {
1025
1026 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1027 if (file && !(prot & PROT_WRITE))
1028 vm_flags |= VM_MAYSHARE;
1029 } else {
1030
1031
1032
1033 vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS);
1034 if (flags & MAP_SHARED)
1035 vm_flags |= VM_SHARED;
1036 }
1037
1038
1039
1040
1041
1042 if ((flags & MAP_PRIVATE) && current->ptrace)
1043 vm_flags &= ~VM_MAYSHARE;
1044
1045 return vm_flags;
1046}
1047
1048
1049
1050
1051
1052static int do_mmap_shared_file(struct vm_area_struct *vma)
1053{
1054 int ret;
1055
1056 ret = call_mmap(vma->vm_file, vma);
1057 if (ret == 0) {
1058 vma->vm_region->vm_top = vma->vm_region->vm_end;
1059 return 0;
1060 }
1061 if (ret != -ENOSYS)
1062 return ret;
1063
1064
1065
1066
1067 return -ENODEV;
1068}
1069
1070
1071
1072
1073static int do_mmap_private(struct vm_area_struct *vma,
1074 struct vm_region *region,
1075 unsigned long len,
1076 unsigned long capabilities)
1077{
1078 unsigned long total, point;
1079 void *base;
1080 int ret, order;
1081
1082
1083
1084
1085
1086 if (capabilities & NOMMU_MAP_DIRECT) {
1087 ret = call_mmap(vma->vm_file, vma);
1088 if (ret == 0) {
1089
1090 BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
1091 vma->vm_region->vm_top = vma->vm_region->vm_end;
1092 return 0;
1093 }
1094 if (ret != -ENOSYS)
1095 return ret;
1096
1097
1098
1099
1100 }
1101
1102
1103
1104
1105
1106
1107 order = get_order(len);
1108 total = 1 << order;
1109 point = len >> PAGE_SHIFT;
1110
1111
1112 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
1113 total = point;
1114
1115 base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
1116 if (!base)
1117 goto enomem;
1118
1119 atomic_long_add(total, &mmap_pages_allocated);
1120
1121 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1122 region->vm_start = (unsigned long) base;
1123 region->vm_end = region->vm_start + len;
1124 region->vm_top = region->vm_start + (total << PAGE_SHIFT);
1125
1126 vma->vm_start = region->vm_start;
1127 vma->vm_end = region->vm_start + len;
1128
1129 if (vma->vm_file) {
1130
1131 loff_t fpos;
1132
1133 fpos = vma->vm_pgoff;
1134 fpos <<= PAGE_SHIFT;
1135
1136 ret = kernel_read(vma->vm_file, base, len, &fpos);
1137 if (ret < 0)
1138 goto error_free;
1139
1140
1141 if (ret < len)
1142 memset(base + ret, 0, len - ret);
1143
1144 } else {
1145 vma_set_anonymous(vma);
1146 }
1147
1148 return 0;
1149
1150error_free:
1151 free_page_series(region->vm_start, region->vm_top);
1152 region->vm_start = vma->vm_start = 0;
1153 region->vm_end = vma->vm_end = 0;
1154 region->vm_top = 0;
1155 return ret;
1156
1157enomem:
1158 pr_err("Allocation of length %lu from process %d (%s) failed\n",
1159 len, current->pid, current->comm);
1160 show_free_areas(0, NULL);
1161 return -ENOMEM;
1162}
1163
1164
1165
1166
1167unsigned long do_mmap(struct file *file,
1168 unsigned long addr,
1169 unsigned long len,
1170 unsigned long prot,
1171 unsigned long flags,
1172 vm_flags_t vm_flags,
1173 unsigned long pgoff,
1174 unsigned long *populate,
1175 struct list_head *uf)
1176{
1177 struct vm_area_struct *vma;
1178 struct vm_region *region;
1179 struct rb_node *rb;
1180 unsigned long capabilities, result;
1181 int ret;
1182
1183 *populate = 0;
1184
1185
1186
1187 ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1188 &capabilities);
1189 if (ret < 0)
1190 return ret;
1191
1192
1193 addr = 0;
1194 len = PAGE_ALIGN(len);
1195
1196
1197
1198 vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
1199
1200
1201 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1202 if (!region)
1203 goto error_getting_region;
1204
1205 vma = vm_area_alloc(current->mm);
1206 if (!vma)
1207 goto error_getting_vma;
1208
1209 region->vm_usage = 1;
1210 region->vm_flags = vm_flags;
1211 region->vm_pgoff = pgoff;
1212
1213 vma->vm_flags = vm_flags;
1214 vma->vm_pgoff = pgoff;
1215
1216 if (file) {
1217 region->vm_file = get_file(file);
1218 vma->vm_file = get_file(file);
1219 }
1220
1221 down_write(&nommu_region_sem);
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231 if (vm_flags & VM_MAYSHARE) {
1232 struct vm_region *pregion;
1233 unsigned long pglen, rpglen, pgend, rpgend, start;
1234
1235 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1236 pgend = pgoff + pglen;
1237
1238 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1239 pregion = rb_entry(rb, struct vm_region, vm_rb);
1240
1241 if (!(pregion->vm_flags & VM_MAYSHARE))
1242 continue;
1243
1244
1245 if (file_inode(pregion->vm_file) !=
1246 file_inode(file))
1247 continue;
1248
1249 if (pregion->vm_pgoff >= pgend)
1250 continue;
1251
1252 rpglen = pregion->vm_end - pregion->vm_start;
1253 rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1254 rpgend = pregion->vm_pgoff + rpglen;
1255 if (pgoff >= rpgend)
1256 continue;
1257
1258
1259
1260 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1261 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1262
1263 if (!(capabilities & NOMMU_MAP_DIRECT))
1264 goto sharing_violation;
1265 continue;
1266 }
1267
1268
1269 pregion->vm_usage++;
1270 vma->vm_region = pregion;
1271 start = pregion->vm_start;
1272 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1273 vma->vm_start = start;
1274 vma->vm_end = start + len;
1275
1276 if (pregion->vm_flags & VM_MAPPED_COPY)
1277 vma->vm_flags |= VM_MAPPED_COPY;
1278 else {
1279 ret = do_mmap_shared_file(vma);
1280 if (ret < 0) {
1281 vma->vm_region = NULL;
1282 vma->vm_start = 0;
1283 vma->vm_end = 0;
1284 pregion->vm_usage--;
1285 pregion = NULL;
1286 goto error_just_free;
1287 }
1288 }
1289 fput(region->vm_file);
1290 kmem_cache_free(vm_region_jar, region);
1291 region = pregion;
1292 result = start;
1293 goto share;
1294 }
1295
1296
1297
1298
1299
1300 if (capabilities & NOMMU_MAP_DIRECT) {
1301 addr = file->f_op->get_unmapped_area(file, addr, len,
1302 pgoff, flags);
1303 if (IS_ERR_VALUE(addr)) {
1304 ret = addr;
1305 if (ret != -ENOSYS)
1306 goto error_just_free;
1307
1308
1309
1310
1311 ret = -ENODEV;
1312 if (!(capabilities & NOMMU_MAP_COPY))
1313 goto error_just_free;
1314
1315 capabilities &= ~NOMMU_MAP_DIRECT;
1316 } else {
1317 vma->vm_start = region->vm_start = addr;
1318 vma->vm_end = region->vm_end = addr + len;
1319 }
1320 }
1321 }
1322
1323 vma->vm_region = region;
1324
1325
1326
1327
1328 if (file && vma->vm_flags & VM_SHARED)
1329 ret = do_mmap_shared_file(vma);
1330 else
1331 ret = do_mmap_private(vma, region, len, capabilities);
1332 if (ret < 0)
1333 goto error_just_free;
1334 add_nommu_region(region);
1335
1336
1337 if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
1338 memset((void *)region->vm_start, 0,
1339 region->vm_end - region->vm_start);
1340
1341
1342 result = vma->vm_start;
1343
1344 current->mm->total_vm += len >> PAGE_SHIFT;
1345
1346share:
1347 add_vma_to_mm(current->mm, vma);
1348
1349
1350
1351 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1352 flush_icache_range(region->vm_start, region->vm_end);
1353 region->vm_icache_flushed = true;
1354 }
1355
1356 up_write(&nommu_region_sem);
1357
1358 return result;
1359
1360error_just_free:
1361 up_write(&nommu_region_sem);
1362error:
1363 if (region->vm_file)
1364 fput(region->vm_file);
1365 kmem_cache_free(vm_region_jar, region);
1366 if (vma->vm_file)
1367 fput(vma->vm_file);
1368 vm_area_free(vma);
1369 return ret;
1370
1371sharing_violation:
1372 up_write(&nommu_region_sem);
1373 pr_warn("Attempt to share mismatched mappings\n");
1374 ret = -EINVAL;
1375 goto error;
1376
1377error_getting_vma:
1378 kmem_cache_free(vm_region_jar, region);
1379 pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1380 len, current->pid);
1381 show_free_areas(0, NULL);
1382 return -ENOMEM;
1383
1384error_getting_region:
1385 pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1386 len, current->pid);
1387 show_free_areas(0, NULL);
1388 return -ENOMEM;
1389}
1390
1391unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1392 unsigned long prot, unsigned long flags,
1393 unsigned long fd, unsigned long pgoff)
1394{
1395 struct file *file = NULL;
1396 unsigned long retval = -EBADF;
1397
1398 audit_mmap_fd(fd, flags);
1399 if (!(flags & MAP_ANONYMOUS)) {
1400 file = fget(fd);
1401 if (!file)
1402 goto out;
1403 }
1404
1405 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1406
1407 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1408
1409 if (file)
1410 fput(file);
1411out:
1412 return retval;
1413}
1414
1415SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1416 unsigned long, prot, unsigned long, flags,
1417 unsigned long, fd, unsigned long, pgoff)
1418{
1419 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1420}
1421
1422#ifdef __ARCH_WANT_SYS_OLD_MMAP
1423struct mmap_arg_struct {
1424 unsigned long addr;
1425 unsigned long len;
1426 unsigned long prot;
1427 unsigned long flags;
1428 unsigned long fd;
1429 unsigned long offset;
1430};
1431
1432SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1433{
1434 struct mmap_arg_struct a;
1435
1436 if (copy_from_user(&a, arg, sizeof(a)))
1437 return -EFAULT;
1438 if (offset_in_page(a.offset))
1439 return -EINVAL;
1440
1441 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1442 a.offset >> PAGE_SHIFT);
1443}
1444#endif
1445
1446
1447
1448
1449
1450int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1451 unsigned long addr, int new_below)
1452{
1453 struct vm_area_struct *new;
1454 struct vm_region *region;
1455 unsigned long npages;
1456
1457
1458
1459 if (vma->vm_file)
1460 return -ENOMEM;
1461
1462 if (mm->map_count >= sysctl_max_map_count)
1463 return -ENOMEM;
1464
1465 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1466 if (!region)
1467 return -ENOMEM;
1468
1469 new = vm_area_dup(vma);
1470 if (!new) {
1471 kmem_cache_free(vm_region_jar, region);
1472 return -ENOMEM;
1473 }
1474
1475
1476 *region = *vma->vm_region;
1477 new->vm_region = region;
1478
1479 npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1480
1481 if (new_below) {
1482 region->vm_top = region->vm_end = new->vm_end = addr;
1483 } else {
1484 region->vm_start = new->vm_start = addr;
1485 region->vm_pgoff = new->vm_pgoff += npages;
1486 }
1487
1488 if (new->vm_ops && new->vm_ops->open)
1489 new->vm_ops->open(new);
1490
1491 delete_vma_from_mm(vma);
1492 down_write(&nommu_region_sem);
1493 delete_nommu_region(vma->vm_region);
1494 if (new_below) {
1495 vma->vm_region->vm_start = vma->vm_start = addr;
1496 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1497 } else {
1498 vma->vm_region->vm_end = vma->vm_end = addr;
1499 vma->vm_region->vm_top = addr;
1500 }
1501 add_nommu_region(vma->vm_region);
1502 add_nommu_region(new->vm_region);
1503 up_write(&nommu_region_sem);
1504 add_vma_to_mm(mm, vma);
1505 add_vma_to_mm(mm, new);
1506 return 0;
1507}
1508
1509
1510
1511
1512
1513static int shrink_vma(struct mm_struct *mm,
1514 struct vm_area_struct *vma,
1515 unsigned long from, unsigned long to)
1516{
1517 struct vm_region *region;
1518
1519
1520
1521 delete_vma_from_mm(vma);
1522 if (from > vma->vm_start)
1523 vma->vm_end = from;
1524 else
1525 vma->vm_start = to;
1526 add_vma_to_mm(mm, vma);
1527
1528
1529 region = vma->vm_region;
1530 BUG_ON(region->vm_usage != 1);
1531
1532 down_write(&nommu_region_sem);
1533 delete_nommu_region(region);
1534 if (from > region->vm_start) {
1535 to = region->vm_top;
1536 region->vm_top = region->vm_end = from;
1537 } else {
1538 region->vm_start = to;
1539 }
1540 add_nommu_region(region);
1541 up_write(&nommu_region_sem);
1542
1543 free_page_series(from, to);
1544 return 0;
1545}
1546
1547
1548
1549
1550
1551
1552int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1553{
1554 struct vm_area_struct *vma;
1555 unsigned long end;
1556 int ret;
1557
1558 len = PAGE_ALIGN(len);
1559 if (len == 0)
1560 return -EINVAL;
1561
1562 end = start + len;
1563
1564
1565 vma = find_vma(mm, start);
1566 if (!vma) {
1567 static int limit;
1568 if (limit < 5) {
1569 pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1570 current->pid, current->comm,
1571 start, start + len - 1);
1572 limit++;
1573 }
1574 return -EINVAL;
1575 }
1576
1577
1578 if (vma->vm_file) {
1579 do {
1580 if (start > vma->vm_start)
1581 return -EINVAL;
1582 if (end == vma->vm_end)
1583 goto erase_whole_vma;
1584 vma = vma->vm_next;
1585 } while (vma);
1586 return -EINVAL;
1587 } else {
1588
1589 if (start == vma->vm_start && end == vma->vm_end)
1590 goto erase_whole_vma;
1591 if (start < vma->vm_start || end > vma->vm_end)
1592 return -EINVAL;
1593 if (offset_in_page(start))
1594 return -EINVAL;
1595 if (end != vma->vm_end && offset_in_page(end))
1596 return -EINVAL;
1597 if (start != vma->vm_start && end != vma->vm_end) {
1598 ret = split_vma(mm, vma, start, 1);
1599 if (ret < 0)
1600 return ret;
1601 }
1602 return shrink_vma(mm, vma, start, end);
1603 }
1604
1605erase_whole_vma:
1606 delete_vma_from_mm(vma);
1607 delete_vma(mm, vma);
1608 return 0;
1609}
1610EXPORT_SYMBOL(do_munmap);
1611
1612int vm_munmap(unsigned long addr, size_t len)
1613{
1614 struct mm_struct *mm = current->mm;
1615 int ret;
1616
1617 down_write(&mm->mmap_sem);
1618 ret = do_munmap(mm, addr, len, NULL);
1619 up_write(&mm->mmap_sem);
1620 return ret;
1621}
1622EXPORT_SYMBOL(vm_munmap);
1623
1624SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1625{
1626 return vm_munmap(addr, len);
1627}
1628
1629
1630
1631
1632void exit_mmap(struct mm_struct *mm)
1633{
1634 struct vm_area_struct *vma;
1635
1636 if (!mm)
1637 return;
1638
1639 mm->total_vm = 0;
1640
1641 while ((vma = mm->mmap)) {
1642 mm->mmap = vma->vm_next;
1643 delete_vma_from_mm(vma);
1644 delete_vma(mm, vma);
1645 cond_resched();
1646 }
1647}
1648
1649int vm_brk(unsigned long addr, unsigned long len)
1650{
1651 return -ENOMEM;
1652}
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664static unsigned long do_mremap(unsigned long addr,
1665 unsigned long old_len, unsigned long new_len,
1666 unsigned long flags, unsigned long new_addr)
1667{
1668 struct vm_area_struct *vma;
1669
1670
1671 old_len = PAGE_ALIGN(old_len);
1672 new_len = PAGE_ALIGN(new_len);
1673 if (old_len == 0 || new_len == 0)
1674 return (unsigned long) -EINVAL;
1675
1676 if (offset_in_page(addr))
1677 return -EINVAL;
1678
1679 if (flags & MREMAP_FIXED && new_addr != addr)
1680 return (unsigned long) -EINVAL;
1681
1682 vma = find_vma_exact(current->mm, addr, old_len);
1683 if (!vma)
1684 return (unsigned long) -EINVAL;
1685
1686 if (vma->vm_end != vma->vm_start + old_len)
1687 return (unsigned long) -EFAULT;
1688
1689 if (vma->vm_flags & VM_MAYSHARE)
1690 return (unsigned long) -EPERM;
1691
1692 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1693 return (unsigned long) -ENOMEM;
1694
1695
1696 vma->vm_end = vma->vm_start + new_len;
1697 return vma->vm_start;
1698}
1699
1700SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1701 unsigned long, new_len, unsigned long, flags,
1702 unsigned long, new_addr)
1703{
1704 unsigned long ret;
1705
1706 down_write(¤t->mm->mmap_sem);
1707 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1708 up_write(¤t->mm->mmap_sem);
1709 return ret;
1710}
1711
1712struct page *follow_page_mask(struct vm_area_struct *vma,
1713 unsigned long address, unsigned int flags,
1714 unsigned int *page_mask)
1715{
1716 *page_mask = 0;
1717 return NULL;
1718}
1719
1720int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1721 unsigned long pfn, unsigned long size, pgprot_t prot)
1722{
1723 if (addr != (pfn << PAGE_SHIFT))
1724 return -EINVAL;
1725
1726 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1727 return 0;
1728}
1729EXPORT_SYMBOL(remap_pfn_range);
1730
1731int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1732{
1733 unsigned long pfn = start >> PAGE_SHIFT;
1734 unsigned long vm_len = vma->vm_end - vma->vm_start;
1735
1736 pfn += vma->vm_pgoff;
1737 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1738}
1739EXPORT_SYMBOL(vm_iomap_memory);
1740
1741int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1742 unsigned long pgoff)
1743{
1744 unsigned int size = vma->vm_end - vma->vm_start;
1745
1746 if (!(vma->vm_flags & VM_USERMAP))
1747 return -EINVAL;
1748
1749 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1750 vma->vm_end = vma->vm_start + size;
1751
1752 return 0;
1753}
1754EXPORT_SYMBOL(remap_vmalloc_range);
1755
1756unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1757 unsigned long len, unsigned long pgoff, unsigned long flags)
1758{
1759 return -ENOMEM;
1760}
1761
1762vm_fault_t filemap_fault(struct vm_fault *vmf)
1763{
1764 BUG();
1765 return 0;
1766}
1767EXPORT_SYMBOL(filemap_fault);
1768
1769void filemap_map_pages(struct vm_fault *vmf,
1770 pgoff_t start_pgoff, pgoff_t end_pgoff)
1771{
1772 BUG();
1773}
1774EXPORT_SYMBOL(filemap_map_pages);
1775
1776int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1777 unsigned long addr, void *buf, int len, unsigned int gup_flags)
1778{
1779 struct vm_area_struct *vma;
1780 int write = gup_flags & FOLL_WRITE;
1781
1782 down_read(&mm->mmap_sem);
1783
1784
1785 vma = find_vma(mm, addr);
1786 if (vma) {
1787
1788 if (addr + len >= vma->vm_end)
1789 len = vma->vm_end - addr;
1790
1791
1792 if (write && vma->vm_flags & VM_MAYWRITE)
1793 copy_to_user_page(vma, NULL, addr,
1794 (void *) addr, buf, len);
1795 else if (!write && vma->vm_flags & VM_MAYREAD)
1796 copy_from_user_page(vma, NULL, addr,
1797 buf, (void *) addr, len);
1798 else
1799 len = 0;
1800 } else {
1801 len = 0;
1802 }
1803
1804 up_read(&mm->mmap_sem);
1805
1806 return len;
1807}
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1820 void *buf, int len, unsigned int gup_flags)
1821{
1822 return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
1823}
1824
1825
1826
1827
1828
1829int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1830 unsigned int gup_flags)
1831{
1832 struct mm_struct *mm;
1833
1834 if (addr + len < addr)
1835 return 0;
1836
1837 mm = get_task_mm(tsk);
1838 if (!mm)
1839 return 0;
1840
1841 len = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
1842
1843 mmput(mm);
1844 return len;
1845}
1846EXPORT_SYMBOL_GPL(access_process_vm);
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1860 size_t newsize)
1861{
1862 struct vm_area_struct *vma;
1863 struct vm_region *region;
1864 pgoff_t low, high;
1865 size_t r_size, r_top;
1866
1867 low = newsize >> PAGE_SHIFT;
1868 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1869
1870 down_write(&nommu_region_sem);
1871 i_mmap_lock_read(inode->i_mapping);
1872
1873
1874 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1875
1876
1877 if (vma->vm_flags & VM_SHARED) {
1878 i_mmap_unlock_read(inode->i_mapping);
1879 up_write(&nommu_region_sem);
1880 return -ETXTBSY;
1881 }
1882 }
1883
1884
1885
1886
1887
1888
1889
1890 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1891 if (!(vma->vm_flags & VM_SHARED))
1892 continue;
1893
1894 region = vma->vm_region;
1895 r_size = region->vm_top - region->vm_start;
1896 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1897
1898 if (r_top > newsize) {
1899 region->vm_top -= r_top - newsize;
1900 if (region->vm_end > region->vm_top)
1901 region->vm_end = region->vm_top;
1902 }
1903 }
1904
1905 i_mmap_unlock_read(inode->i_mapping);
1906 up_write(&nommu_region_sem);
1907 return 0;
1908}
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920static int __meminit init_user_reserve(void)
1921{
1922 unsigned long free_kbytes;
1923
1924 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1925
1926 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1927 return 0;
1928}
1929subsys_initcall(init_user_reserve);
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941static int __meminit init_admin_reserve(void)
1942{
1943 unsigned long free_kbytes;
1944
1945 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1946
1947 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1948 return 0;
1949}
1950subsys_initcall(init_admin_reserve);
1951