1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/export.h>
17#include <linux/mm.h>
18#include <linux/mman.h>
19#include <linux/swap.h>
20#include <linux/file.h>
21#include <linux/highmem.h>
22#include <linux/pagemap.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include <linux/blkdev.h>
26#include <linux/backing-dev.h>
27#include <linux/mount.h>
28#include <linux/personality.h>
29#include <linux/security.h>
30#include <linux/syscalls.h>
31#include <linux/audit.h>
32
33#include <asm/uaccess.h>
34#include <asm/tlb.h>
35#include <asm/tlbflush.h>
36#include <asm/mmu_context.h>
37#include "internal.h"
38
39#if 0
40#define kenter(FMT, ...) \
41 printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
42#define kleave(FMT, ...) \
43 printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
44#define kdebug(FMT, ...) \
45 printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
46#else
47#define kenter(FMT, ...) \
48 no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
49#define kleave(FMT, ...) \
50 no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
51#define kdebug(FMT, ...) \
52 no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
53#endif
54
55void *high_memory;
56struct page *mem_map;
57unsigned long max_mapnr;
58unsigned long num_physpages;
59unsigned long highest_memmap_pfn;
60struct percpu_counter vm_committed_as;
61int sysctl_overcommit_memory = OVERCOMMIT_GUESS;
62int sysctl_overcommit_ratio = 50;
63unsigned long sysctl_overcommit_kbytes __read_mostly;
64int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
65int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
66unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17;
67unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13;
68int heap_stack_gap = 0;
69
70atomic_long_t mmap_pages_allocated;
71
72
73
74
75
76
77
78
79
80unsigned long vm_memory_committed(void)
81{
82 return percpu_counter_read_positive(&vm_committed_as);
83}
84
85EXPORT_SYMBOL_GPL(vm_memory_committed);
86
87EXPORT_SYMBOL(mem_map);
88EXPORT_SYMBOL(num_physpages);
89
90
91static struct kmem_cache *vm_region_jar;
92struct rb_root nommu_region_tree = RB_ROOT;
93DECLARE_RWSEM(nommu_region_sem);
94
95const struct vm_operations_struct generic_file_vm_ops = {
96};
97
98
99
100
101
102
103
104unsigned int kobjsize(const void *objp)
105{
106 struct page *page;
107
108
109
110
111
112 if (!objp || !virt_addr_valid(objp))
113 return 0;
114
115 page = virt_to_head_page(objp);
116
117
118
119
120
121 if (PageSlab(page))
122 return ksize(objp);
123
124
125
126
127
128
129
130 if (!PageCompound(page)) {
131 struct vm_area_struct *vma;
132
133 vma = find_vma(current->mm, (unsigned long)objp);
134 if (vma)
135 return vma->vm_end - vma->vm_start;
136 }
137
138
139
140
141
142 return PAGE_SIZE << compound_order(page);
143}
144
145long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
146 unsigned long start, unsigned long nr_pages,
147 unsigned int foll_flags, struct page **pages,
148 struct vm_area_struct **vmas, int *nonblocking)
149{
150 struct vm_area_struct *vma;
151 unsigned long vm_flags;
152 int i;
153
154
155
156
157 vm_flags = (foll_flags & FOLL_WRITE) ?
158 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
159 vm_flags &= (foll_flags & FOLL_FORCE) ?
160 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
161
162 for (i = 0; i < nr_pages; i++) {
163 vma = find_vma(mm, start);
164 if (!vma)
165 goto finish_or_fault;
166
167
168 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
169 !(vm_flags & vma->vm_flags))
170 goto finish_or_fault;
171
172 if (pages) {
173 pages[i] = virt_to_page(start);
174 if (pages[i])
175 page_cache_get(pages[i]);
176 }
177 if (vmas)
178 vmas[i] = vma;
179 start = (start + PAGE_SIZE) & PAGE_MASK;
180 }
181
182 return i;
183
184finish_or_fault:
185 return i ? : -EFAULT;
186}
187
188
189
190
191
192
193
194
195long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
196 unsigned long start, unsigned long nr_pages,
197 int write, int force, struct page **pages,
198 struct vm_area_struct **vmas)
199{
200 int flags = 0;
201
202 if (write)
203 flags |= FOLL_WRITE;
204 if (force)
205 flags |= FOLL_FORCE;
206
207 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
208 NULL);
209}
210EXPORT_SYMBOL(get_user_pages);
211
212long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
213 unsigned long start, unsigned long nr_pages,
214 int write, int force, struct page **pages,
215 int *locked)
216{
217 return get_user_pages(tsk, mm, start, nr_pages, write, force,
218 pages, NULL);
219}
220EXPORT_SYMBOL(get_user_pages_locked);
221
222long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
223 unsigned long start, unsigned long nr_pages,
224 int write, int force, struct page **pages,
225 unsigned int gup_flags)
226{
227 long ret;
228 down_read(&mm->mmap_sem);
229 ret = get_user_pages(tsk, mm, start, nr_pages, write, force,
230 pages, NULL);
231 up_read(&mm->mmap_sem);
232 return ret;
233}
234EXPORT_SYMBOL(__get_user_pages_unlocked);
235
236long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
237 unsigned long start, unsigned long nr_pages,
238 int write, int force, struct page **pages)
239{
240 return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
241 force, pages, 0);
242}
243EXPORT_SYMBOL(get_user_pages_unlocked);
244
245
246
247
248
249
250
251
252
253
254
255int follow_pfn(struct vm_area_struct *vma, unsigned long address,
256 unsigned long *pfn)
257{
258 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
259 return -EINVAL;
260
261 *pfn = address >> PAGE_SHIFT;
262 return 0;
263}
264EXPORT_SYMBOL(follow_pfn);
265
266LIST_HEAD(vmap_area_list);
267
268void vfree(const void *addr)
269{
270 kfree(addr);
271}
272EXPORT_SYMBOL(vfree);
273
274void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
275{
276
277
278
279
280 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
281}
282EXPORT_SYMBOL(__vmalloc);
283
284void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags)
285{
286 return __vmalloc(size, flags, PAGE_KERNEL);
287}
288
289void *vmalloc_user(unsigned long size)
290{
291 void *ret;
292
293 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
294 PAGE_KERNEL);
295 if (ret) {
296 struct vm_area_struct *vma;
297
298 down_write(¤t->mm->mmap_sem);
299 vma = find_vma(current->mm, (unsigned long)ret);
300 if (vma)
301 vma->vm_flags |= VM_USERMAP;
302 up_write(¤t->mm->mmap_sem);
303 }
304
305 return ret;
306}
307EXPORT_SYMBOL(vmalloc_user);
308
309struct page *vmalloc_to_page(const void *addr)
310{
311 return virt_to_page(addr);
312}
313EXPORT_SYMBOL(vmalloc_to_page);
314
315unsigned long vmalloc_to_pfn(const void *addr)
316{
317 return page_to_pfn(virt_to_page(addr));
318}
319EXPORT_SYMBOL(vmalloc_to_pfn);
320
321long vread(char *buf, char *addr, unsigned long count)
322{
323 memcpy(buf, addr, count);
324 return count;
325}
326
327long vwrite(char *buf, char *addr, unsigned long count)
328{
329
330 if ((unsigned long) addr + count < count)
331 count = -(unsigned long) addr;
332
333 memcpy(addr, buf, count);
334 return(count);
335}
336
337
338
339
340
341
342
343
344
345
346
347
348void *vmalloc(unsigned long size)
349{
350 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
351}
352EXPORT_SYMBOL(vmalloc);
353
354
355
356
357
358
359
360
361
362
363
364
365
366void *vzalloc(unsigned long size)
367{
368 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
369 PAGE_KERNEL);
370}
371EXPORT_SYMBOL(vzalloc);
372
373
374
375
376
377
378
379
380
381
382
383
384void *vmalloc_node(unsigned long size, int node)
385{
386 return vmalloc(size);
387}
388EXPORT_SYMBOL(vmalloc_node);
389
390
391
392
393
394
395
396
397
398
399
400
401
402void *vzalloc_node(unsigned long size, int node)
403{
404 return vzalloc(size);
405}
406EXPORT_SYMBOL(vzalloc_node);
407
408#ifndef PAGE_KERNEL_EXEC
409# define PAGE_KERNEL_EXEC PAGE_KERNEL
410#endif
411
412
413
414
415
416
417
418
419
420
421
422
423
424void *vmalloc_exec(unsigned long size)
425{
426 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
427}
428
429
430
431
432
433
434
435
436void *vmalloc_32(unsigned long size)
437{
438 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
439}
440EXPORT_SYMBOL(vmalloc_32);
441
442
443
444
445
446
447
448
449
450
451
452void *vmalloc_32_user(unsigned long size)
453{
454
455
456
457
458 return vmalloc_user(size);
459}
460EXPORT_SYMBOL(vmalloc_32_user);
461
462void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
463{
464 BUG();
465 return NULL;
466}
467EXPORT_SYMBOL(vmap);
468
469void vunmap(const void *addr)
470{
471 BUG();
472}
473EXPORT_SYMBOL(vunmap);
474
475void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
476{
477 BUG();
478 return NULL;
479}
480EXPORT_SYMBOL(vm_map_ram);
481
482void vm_unmap_ram(const void *mem, unsigned int count)
483{
484 BUG();
485}
486EXPORT_SYMBOL(vm_unmap_ram);
487
488void vm_unmap_aliases(void)
489{
490}
491EXPORT_SYMBOL_GPL(vm_unmap_aliases);
492
493
494
495
496
497void __attribute__((weak)) vmalloc_sync_all(void)
498{
499}
500
501
502
503
504
505
506
507
508
509
510
511
512
513struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
514{
515 BUG();
516 return NULL;
517}
518EXPORT_SYMBOL_GPL(alloc_vm_area);
519
520void free_vm_area(struct vm_struct *area)
521{
522 BUG();
523}
524EXPORT_SYMBOL_GPL(free_vm_area);
525
526int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
527 struct page *page)
528{
529 return -EINVAL;
530}
531EXPORT_SYMBOL(vm_insert_page);
532
533
534
535
536
537
538
539
540SYSCALL_DEFINE1(brk, unsigned long, brk)
541{
542 struct mm_struct *mm = current->mm;
543
544 if (brk < mm->start_brk || brk > mm->context.end_brk)
545 return mm->brk;
546
547 if (mm->brk == brk)
548 return mm->brk;
549
550
551
552
553 if (brk <= mm->brk) {
554 mm->brk = brk;
555 return brk;
556 }
557
558
559
560
561 flush_icache_range(mm->brk, brk);
562 return mm->brk = brk;
563}
564
565
566
567
568void __init mmap_init(void)
569{
570 int ret;
571
572 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
573 VM_BUG_ON(ret);
574 vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
575}
576
577
578
579
580
581#ifdef CONFIG_DEBUG_NOMMU_REGIONS
582static noinline void validate_nommu_regions(void)
583{
584 struct vm_region *region, *last;
585 struct rb_node *p, *lastp;
586
587 lastp = rb_first(&nommu_region_tree);
588 if (!lastp)
589 return;
590
591 last = rb_entry(lastp, struct vm_region, vm_rb);
592 BUG_ON(unlikely(last->vm_end <= last->vm_start));
593 BUG_ON(unlikely(last->vm_top < last->vm_end));
594
595 while ((p = rb_next(lastp))) {
596 region = rb_entry(p, struct vm_region, vm_rb);
597 last = rb_entry(lastp, struct vm_region, vm_rb);
598
599 BUG_ON(unlikely(region->vm_end <= region->vm_start));
600 BUG_ON(unlikely(region->vm_top < region->vm_end));
601 BUG_ON(unlikely(region->vm_start < last->vm_top));
602
603 lastp = p;
604 }
605}
606#else
607static void validate_nommu_regions(void)
608{
609}
610#endif
611
612
613
614
615static void add_nommu_region(struct vm_region *region)
616{
617 struct vm_region *pregion;
618 struct rb_node **p, *parent;
619
620 validate_nommu_regions();
621
622 parent = NULL;
623 p = &nommu_region_tree.rb_node;
624 while (*p) {
625 parent = *p;
626 pregion = rb_entry(parent, struct vm_region, vm_rb);
627 if (region->vm_start < pregion->vm_start)
628 p = &(*p)->rb_left;
629 else if (region->vm_start > pregion->vm_start)
630 p = &(*p)->rb_right;
631 else if (pregion == region)
632 return;
633 else
634 BUG();
635 }
636
637 rb_link_node(®ion->vm_rb, parent, p);
638 rb_insert_color(®ion->vm_rb, &nommu_region_tree);
639
640 validate_nommu_regions();
641}
642
643
644
645
646static void delete_nommu_region(struct vm_region *region)
647{
648 BUG_ON(!nommu_region_tree.rb_node);
649
650 validate_nommu_regions();
651 rb_erase(®ion->vm_rb, &nommu_region_tree);
652 validate_nommu_regions();
653}
654
655
656
657
658static void free_page_series(unsigned long from, unsigned long to)
659{
660 for (; from < to; from += PAGE_SIZE) {
661 struct page *page = virt_to_page(from);
662
663 kdebug("- free %lx", from);
664 atomic_long_dec(&mmap_pages_allocated);
665 if (page_count(page) != 1)
666 kdebug("free page %p: refcount not one: %d",
667 page, page_count(page));
668 put_page(page);
669 }
670}
671
672
673
674
675
676
677
678static void __put_nommu_region(struct vm_region *region)
679 __releases(nommu_region_sem)
680{
681 kenter("%p{%d}", region, region->vm_usage);
682
683 BUG_ON(!nommu_region_tree.rb_node);
684
685 if (--region->vm_usage == 0) {
686 if (region->vm_top > region->vm_start)
687 delete_nommu_region(region);
688 up_write(&nommu_region_sem);
689
690 if (region->vm_file)
691 fput(region->vm_file);
692
693
694
695 if (region->vm_flags & VM_MAPPED_COPY) {
696 kdebug("free series");
697 free_page_series(region->vm_start, region->vm_top);
698 }
699 kmem_cache_free(vm_region_jar, region);
700 } else {
701 up_write(&nommu_region_sem);
702 }
703}
704
705
706
707
708static void put_nommu_region(struct vm_region *region)
709{
710 down_write(&nommu_region_sem);
711 __put_nommu_region(region);
712}
713
714
715
716
717static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
718{
719#ifdef CONFIG_MPU
720 struct mm_struct *mm = vma->vm_mm;
721 long start = vma->vm_start & PAGE_MASK;
722 while (start < vma->vm_end) {
723 protect_page(mm, start, flags);
724 start += PAGE_SIZE;
725 }
726 update_protections(mm);
727#endif
728}
729
730
731
732
733
734
735
736static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
737{
738 struct vm_area_struct *pvma, *prev;
739 struct address_space *mapping;
740 struct rb_node **p, *parent, *rb_prev;
741
742 kenter(",%p", vma);
743
744 BUG_ON(!vma->vm_region);
745
746 mm->map_count++;
747 vma->vm_mm = mm;
748
749 protect_vma(vma, vma->vm_flags);
750
751
752 if (vma->vm_file) {
753 mapping = vma->vm_file->f_mapping;
754
755 mutex_lock(&mapping->i_mmap_mutex);
756 flush_dcache_mmap_lock(mapping);
757 vma_interval_tree_insert(vma, &mapping->i_mmap);
758 flush_dcache_mmap_unlock(mapping);
759 mutex_unlock(&mapping->i_mmap_mutex);
760 }
761
762
763 parent = rb_prev = NULL;
764 p = &mm->mm_rb.rb_node;
765 while (*p) {
766 parent = *p;
767 pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
768
769
770
771 if (vma->vm_start < pvma->vm_start)
772 p = &(*p)->rb_left;
773 else if (vma->vm_start > pvma->vm_start) {
774 rb_prev = parent;
775 p = &(*p)->rb_right;
776 } else if (vma->vm_end < pvma->vm_end)
777 p = &(*p)->rb_left;
778 else if (vma->vm_end > pvma->vm_end) {
779 rb_prev = parent;
780 p = &(*p)->rb_right;
781 } else if (vma < pvma)
782 p = &(*p)->rb_left;
783 else if (vma > pvma) {
784 rb_prev = parent;
785 p = &(*p)->rb_right;
786 } else
787 BUG();
788 }
789
790 rb_link_node(&vma->vm_rb, parent, p);
791 rb_insert_color(&vma->vm_rb, &mm->mm_rb);
792
793
794 prev = NULL;
795 if (rb_prev)
796 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
797
798 __vma_link_list(mm, vma, prev, parent);
799}
800
801
802
803
804static void delete_vma_from_mm(struct vm_area_struct *vma)
805{
806 struct address_space *mapping;
807 struct mm_struct *mm = vma->vm_mm;
808
809 kenter("%p", vma);
810
811 protect_vma(vma, 0);
812
813 mm->map_count--;
814 if (mm->mmap_cache == vma)
815 mm->mmap_cache = NULL;
816
817
818 if (vma->vm_file) {
819 mapping = vma->vm_file->f_mapping;
820
821 mutex_lock(&mapping->i_mmap_mutex);
822 flush_dcache_mmap_lock(mapping);
823 vma_interval_tree_remove(vma, &mapping->i_mmap);
824 flush_dcache_mmap_unlock(mapping);
825 mutex_unlock(&mapping->i_mmap_mutex);
826 }
827
828
829 rb_erase(&vma->vm_rb, &mm->mm_rb);
830
831 if (vma->vm_prev)
832 vma->vm_prev->vm_next = vma->vm_next;
833 else
834 mm->mmap = vma->vm_next;
835
836 if (vma->vm_next)
837 vma->vm_next->vm_prev = vma->vm_prev;
838}
839
840
841
842
843static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
844{
845 kenter("%p", vma);
846 if (vma->vm_ops && vma->vm_ops->close)
847 vma->vm_ops->close(vma);
848 if (vma->vm_file)
849 fput(vma->vm_file);
850 put_nommu_region(vma->vm_region);
851 kmem_cache_free(vm_area_cachep, vma);
852}
853
854
855
856
857
858struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
859{
860 struct vm_area_struct *vma;
861
862
863 vma = ACCESS_ONCE(mm->mmap_cache);
864 if (vma && vma->vm_start <= addr && vma->vm_end > addr)
865 return vma;
866
867
868
869 for (vma = mm->mmap; vma; vma = vma->vm_next) {
870 if (vma->vm_start > addr)
871 return NULL;
872 if (vma->vm_end > addr) {
873 mm->mmap_cache = vma;
874 return vma;
875 }
876 }
877
878 return NULL;
879}
880EXPORT_SYMBOL(find_vma);
881
882
883
884
885
886struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
887{
888 return find_vma(mm, addr);
889}
890
891
892
893
894
895int expand_stack(struct vm_area_struct *vma, unsigned long address)
896{
897 return -ENOMEM;
898}
899
900
901
902
903
904static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
905 unsigned long addr,
906 unsigned long len)
907{
908 struct vm_area_struct *vma;
909 unsigned long end = addr + len;
910
911
912 vma = mm->mmap_cache;
913 if (vma && vma->vm_start == addr && vma->vm_end == end)
914 return vma;
915
916
917
918 for (vma = mm->mmap; vma; vma = vma->vm_next) {
919 if (vma->vm_start < addr)
920 continue;
921 if (vma->vm_start > addr)
922 return NULL;
923 if (vma->vm_end == end) {
924 mm->mmap_cache = vma;
925 return vma;
926 }
927 }
928
929 return NULL;
930}
931
932
933
934
935
936static int validate_mmap_request(struct file *file,
937 unsigned long addr,
938 unsigned long len,
939 unsigned long prot,
940 unsigned long flags,
941 unsigned long pgoff,
942 unsigned long *_capabilities)
943{
944 unsigned long capabilities, rlen;
945 int ret;
946
947
948 if (flags & MAP_FIXED) {
949 printk(KERN_DEBUG
950 "%d: Can't do fixed-address/overlay mmap of RAM\n",
951 current->pid);
952 return -EINVAL;
953 }
954
955 if ((flags & MAP_TYPE) != MAP_PRIVATE &&
956 (flags & MAP_TYPE) != MAP_SHARED)
957 return -EINVAL;
958
959 if (!len)
960 return -EINVAL;
961
962
963 rlen = PAGE_ALIGN(len);
964 if (!rlen || rlen > TASK_SIZE)
965 return -ENOMEM;
966
967
968 if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
969 return -EOVERFLOW;
970
971 if (file) {
972
973 struct address_space *mapping;
974
975
976 if (!file->f_op || !file->f_op->mmap)
977 return -ENODEV;
978
979
980
981
982
983 mapping = file->f_mapping;
984 if (!mapping)
985 mapping = file_inode(file)->i_mapping;
986
987 capabilities = 0;
988 if (mapping && mapping->backing_dev_info)
989 capabilities = mapping->backing_dev_info->capabilities;
990
991 if (!capabilities) {
992
993
994 switch (file_inode(file)->i_mode & S_IFMT) {
995 case S_IFREG:
996 case S_IFBLK:
997 capabilities = BDI_CAP_MAP_COPY;
998 break;
999
1000 case S_IFCHR:
1001 capabilities =
1002 BDI_CAP_MAP_DIRECT |
1003 BDI_CAP_READ_MAP |
1004 BDI_CAP_WRITE_MAP;
1005 break;
1006
1007 default:
1008 return -EINVAL;
1009 }
1010 }
1011
1012
1013
1014 if (!file->f_op->get_unmapped_area)
1015 capabilities &= ~BDI_CAP_MAP_DIRECT;
1016 if (!file->f_op->read)
1017 capabilities &= ~BDI_CAP_MAP_COPY;
1018
1019
1020 if (!(file->f_mode & FMODE_READ))
1021 return -EACCES;
1022
1023 if (flags & MAP_SHARED) {
1024
1025 if ((prot & PROT_WRITE) &&
1026 !(file->f_mode & FMODE_WRITE))
1027 return -EACCES;
1028
1029 if (IS_APPEND(file_inode(file)) &&
1030 (file->f_mode & FMODE_WRITE))
1031 return -EACCES;
1032
1033 if (locks_verify_locked(file))
1034 return -EAGAIN;
1035
1036 if (!(capabilities & BDI_CAP_MAP_DIRECT))
1037 return -ENODEV;
1038
1039
1040 capabilities &= ~BDI_CAP_MAP_COPY;
1041 }
1042 else {
1043
1044
1045 if (!(capabilities & BDI_CAP_MAP_COPY))
1046 return -ENODEV;
1047
1048
1049
1050 if (prot & PROT_WRITE)
1051 capabilities &= ~BDI_CAP_MAP_DIRECT;
1052 }
1053
1054 if (capabilities & BDI_CAP_MAP_DIRECT) {
1055 if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) ||
1056 ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
1057 ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP))
1058 ) {
1059 capabilities &= ~BDI_CAP_MAP_DIRECT;
1060 if (flags & MAP_SHARED) {
1061 printk(KERN_WARNING
1062 "MAP_SHARED not completely supported on !MMU\n");
1063 return -EINVAL;
1064 }
1065 }
1066 }
1067
1068
1069
1070 if (path_noexec(&file->f_path)) {
1071 if (prot & PROT_EXEC)
1072 return -EPERM;
1073 }
1074 else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
1075
1076 if (current->personality & READ_IMPLIES_EXEC) {
1077 if (capabilities & BDI_CAP_EXEC_MAP)
1078 prot |= PROT_EXEC;
1079 }
1080 }
1081 else if ((prot & PROT_READ) &&
1082 (prot & PROT_EXEC) &&
1083 !(capabilities & BDI_CAP_EXEC_MAP)
1084 ) {
1085
1086 capabilities &= ~BDI_CAP_MAP_DIRECT;
1087 }
1088 }
1089 else {
1090
1091
1092
1093 capabilities = BDI_CAP_MAP_COPY;
1094
1095
1096 if ((prot & PROT_READ) &&
1097 (current->personality & READ_IMPLIES_EXEC))
1098 prot |= PROT_EXEC;
1099 }
1100
1101
1102 ret = security_mmap_addr(addr);
1103 if (ret < 0)
1104 return ret;
1105
1106
1107 *_capabilities = capabilities;
1108 return 0;
1109}
1110
1111
1112
1113
1114
1115static unsigned long determine_vm_flags(struct file *file,
1116 unsigned long prot,
1117 unsigned long flags,
1118 unsigned long capabilities)
1119{
1120 unsigned long vm_flags;
1121
1122 vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags);
1123
1124
1125 if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
1126
1127 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1128 if (file && !(prot & PROT_WRITE))
1129 vm_flags |= VM_MAYSHARE;
1130 } else {
1131
1132
1133
1134 vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS);
1135 if (flags & MAP_SHARED)
1136 vm_flags |= VM_SHARED;
1137 }
1138
1139
1140
1141
1142
1143 if ((flags & MAP_PRIVATE) && current->ptrace)
1144 vm_flags &= ~VM_MAYSHARE;
1145
1146 return vm_flags;
1147}
1148
1149
1150
1151
1152
1153static int do_mmap_shared_file(struct vm_area_struct *vma)
1154{
1155 int ret;
1156
1157 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1158 if (ret == 0) {
1159 vma->vm_region->vm_top = vma->vm_region->vm_end;
1160 return 0;
1161 }
1162 if (ret != -ENOSYS)
1163 return ret;
1164
1165
1166
1167
1168 return -ENODEV;
1169}
1170
1171
1172
1173
1174static int do_mmap_private(struct vm_area_struct *vma,
1175 struct vm_region *region,
1176 unsigned long len,
1177 unsigned long capabilities)
1178{
1179 struct page *pages;
1180 unsigned long total, point, n;
1181 void *base;
1182 int ret, order;
1183
1184
1185
1186
1187
1188 if (capabilities & BDI_CAP_MAP_DIRECT) {
1189 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1190 if (ret == 0) {
1191
1192 BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
1193 vma->vm_region->vm_top = vma->vm_region->vm_end;
1194 return 0;
1195 }
1196 if (ret != -ENOSYS)
1197 return ret;
1198
1199
1200
1201
1202 }
1203
1204
1205
1206
1207
1208
1209 order = get_order(len);
1210 kdebug("alloc order %d for %lx", order, len);
1211
1212 pages = alloc_pages(GFP_KERNEL, order);
1213 if (!pages)
1214 goto enomem;
1215
1216 total = 1 << order;
1217 atomic_long_add(total, &mmap_pages_allocated);
1218
1219 point = len >> PAGE_SHIFT;
1220
1221
1222
1223 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
1224 while (total > point) {
1225 order = ilog2(total - point);
1226 n = 1 << order;
1227 kdebug("shave %lu/%lu @%lu", n, total - point, total);
1228 atomic_long_sub(n, &mmap_pages_allocated);
1229 total -= n;
1230 set_page_refcounted(pages + total);
1231 __free_pages(pages + total, order);
1232 }
1233 }
1234
1235 for (point = 1; point < total; point++)
1236 set_page_refcounted(&pages[point]);
1237
1238 base = page_address(pages);
1239 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1240 region->vm_start = (unsigned long) base;
1241 region->vm_end = region->vm_start + len;
1242 region->vm_top = region->vm_start + (total << PAGE_SHIFT);
1243
1244 vma->vm_start = region->vm_start;
1245 vma->vm_end = region->vm_start + len;
1246
1247 if (vma->vm_file) {
1248
1249 mm_segment_t old_fs;
1250 loff_t fpos;
1251
1252 fpos = vma->vm_pgoff;
1253 fpos <<= PAGE_SHIFT;
1254
1255 old_fs = get_fs();
1256 set_fs(KERNEL_DS);
1257 ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
1258 set_fs(old_fs);
1259
1260 if (ret < 0)
1261 goto error_free;
1262
1263
1264 if (ret < len)
1265 memset(base + ret, 0, len - ret);
1266
1267 }
1268
1269 return 0;
1270
1271error_free:
1272 free_page_series(region->vm_start, region->vm_top);
1273 region->vm_start = vma->vm_start = 0;
1274 region->vm_end = vma->vm_end = 0;
1275 region->vm_top = 0;
1276 return ret;
1277
1278enomem:
1279 printk("Allocation of length %lu from process %d (%s) failed\n",
1280 len, current->pid, current->comm);
1281 show_free_areas(0);
1282 return -ENOMEM;
1283}
1284
1285
1286
1287
1288unsigned long do_mmap(struct file *file,
1289 unsigned long addr,
1290 unsigned long len,
1291 unsigned long prot,
1292 unsigned long flags,
1293 vm_flags_t vm_flags,
1294 unsigned long pgoff,
1295 unsigned long *populate,
1296 struct list_head *uf_unused)
1297{
1298 struct vm_area_struct *vma;
1299 struct vm_region *region;
1300 struct rb_node *rb;
1301 unsigned long capabilities, result;
1302 int ret;
1303
1304 kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);
1305
1306 *populate = 0;
1307
1308
1309
1310 ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1311 &capabilities);
1312 if (ret < 0) {
1313 kleave(" = %d [val]", ret);
1314 return ret;
1315 }
1316
1317
1318 addr = 0;
1319 len = PAGE_ALIGN(len);
1320
1321
1322
1323 vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
1324
1325
1326 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1327 if (!region)
1328 goto error_getting_region;
1329
1330 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1331 if (!vma)
1332 goto error_getting_vma;
1333
1334 region->vm_usage = 1;
1335 region->vm_flags = vm_flags;
1336 region->vm_pgoff = pgoff;
1337
1338 INIT_LIST_HEAD(&vma->anon_vma_chain);
1339 vma->vm_flags = vm_flags;
1340 vma->vm_pgoff = pgoff;
1341
1342 if (file) {
1343 region->vm_file = get_file(file);
1344 vma->vm_file = get_file(file);
1345 }
1346
1347 down_write(&nommu_region_sem);
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357 if (vm_flags & VM_MAYSHARE) {
1358 struct vm_region *pregion;
1359 unsigned long pglen, rpglen, pgend, rpgend, start;
1360
1361 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1362 pgend = pgoff + pglen;
1363
1364 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1365 pregion = rb_entry(rb, struct vm_region, vm_rb);
1366
1367 if (!(pregion->vm_flags & VM_MAYSHARE))
1368 continue;
1369
1370
1371 if (file_inode(pregion->vm_file) !=
1372 file_inode(file))
1373 continue;
1374
1375 if (pregion->vm_pgoff >= pgend)
1376 continue;
1377
1378 rpglen = pregion->vm_end - pregion->vm_start;
1379 rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1380 rpgend = pregion->vm_pgoff + rpglen;
1381 if (pgoff >= rpgend)
1382 continue;
1383
1384
1385
1386 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1387 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1388
1389 if (!(capabilities & BDI_CAP_MAP_DIRECT))
1390 goto sharing_violation;
1391 continue;
1392 }
1393
1394
1395 pregion->vm_usage++;
1396 vma->vm_region = pregion;
1397 start = pregion->vm_start;
1398 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1399 vma->vm_start = start;
1400 vma->vm_end = start + len;
1401
1402 if (pregion->vm_flags & VM_MAPPED_COPY) {
1403 kdebug("share copy");
1404 vma->vm_flags |= VM_MAPPED_COPY;
1405 } else {
1406 kdebug("share mmap");
1407 ret = do_mmap_shared_file(vma);
1408 if (ret < 0) {
1409 vma->vm_region = NULL;
1410 vma->vm_start = 0;
1411 vma->vm_end = 0;
1412 pregion->vm_usage--;
1413 pregion = NULL;
1414 goto error_just_free;
1415 }
1416 }
1417 fput(region->vm_file);
1418 kmem_cache_free(vm_region_jar, region);
1419 region = pregion;
1420 result = start;
1421 goto share;
1422 }
1423
1424
1425
1426
1427
1428 if (capabilities & BDI_CAP_MAP_DIRECT) {
1429 addr = file->f_op->get_unmapped_area(file, addr, len,
1430 pgoff, flags);
1431 if (IS_ERR_VALUE(addr)) {
1432 ret = addr;
1433 if (ret != -ENOSYS)
1434 goto error_just_free;
1435
1436
1437
1438
1439 ret = -ENODEV;
1440 if (!(capabilities & BDI_CAP_MAP_COPY))
1441 goto error_just_free;
1442
1443 capabilities &= ~BDI_CAP_MAP_DIRECT;
1444 } else {
1445 vma->vm_start = region->vm_start = addr;
1446 vma->vm_end = region->vm_end = addr + len;
1447 }
1448 }
1449 }
1450
1451 vma->vm_region = region;
1452
1453
1454
1455
1456 if (file && vma->vm_flags & VM_SHARED)
1457 ret = do_mmap_shared_file(vma);
1458 else
1459 ret = do_mmap_private(vma, region, len, capabilities);
1460 if (ret < 0)
1461 goto error_just_free;
1462 add_nommu_region(region);
1463
1464
1465 if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
1466 memset((void *)region->vm_start, 0,
1467 region->vm_end - region->vm_start);
1468
1469
1470 result = vma->vm_start;
1471
1472 current->mm->total_vm += len >> PAGE_SHIFT;
1473
1474share:
1475 add_vma_to_mm(current->mm, vma);
1476
1477
1478
1479 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1480 flush_icache_range(region->vm_start, region->vm_end);
1481 region->vm_icache_flushed = true;
1482 }
1483
1484 up_write(&nommu_region_sem);
1485
1486 kleave(" = %lx", result);
1487 return result;
1488
1489error_just_free:
1490 up_write(&nommu_region_sem);
1491error:
1492 if (region->vm_file)
1493 fput(region->vm_file);
1494 kmem_cache_free(vm_region_jar, region);
1495 if (vma->vm_file)
1496 fput(vma->vm_file);
1497 kmem_cache_free(vm_area_cachep, vma);
1498 kleave(" = %d", ret);
1499 return ret;
1500
1501sharing_violation:
1502 up_write(&nommu_region_sem);
1503 printk(KERN_WARNING "Attempt to share mismatched mappings\n");
1504 ret = -EINVAL;
1505 goto error;
1506
1507error_getting_vma:
1508 kmem_cache_free(vm_region_jar, region);
1509 printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
1510 " from process %d failed\n",
1511 len, current->pid);
1512 show_free_areas(0);
1513 return -ENOMEM;
1514
1515error_getting_region:
1516 printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
1517 " from process %d failed\n",
1518 len, current->pid);
1519 show_free_areas(0);
1520 return -ENOMEM;
1521}
1522
1523SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1524 unsigned long, prot, unsigned long, flags,
1525 unsigned long, fd, unsigned long, pgoff)
1526{
1527 struct file *file = NULL;
1528 unsigned long retval = -EBADF;
1529
1530 audit_mmap_fd(fd, flags);
1531 if (!(flags & MAP_ANONYMOUS)) {
1532 file = fget(fd);
1533 if (!file)
1534 goto out;
1535 }
1536
1537 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1538
1539 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1540
1541 if (file)
1542 fput(file);
1543out:
1544 return retval;
1545}
1546
1547#ifdef __ARCH_WANT_SYS_OLD_MMAP
1548struct mmap_arg_struct {
1549 unsigned long addr;
1550 unsigned long len;
1551 unsigned long prot;
1552 unsigned long flags;
1553 unsigned long fd;
1554 unsigned long offset;
1555};
1556
1557SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1558{
1559 struct mmap_arg_struct a;
1560
1561 if (copy_from_user(&a, arg, sizeof(a)))
1562 return -EFAULT;
1563 if (a.offset & ~PAGE_MASK)
1564 return -EINVAL;
1565
1566 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1567 a.offset >> PAGE_SHIFT);
1568}
1569#endif
1570
1571
1572
1573
1574
1575int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1576 unsigned long addr, int new_below)
1577{
1578 struct vm_area_struct *new;
1579 struct vm_region *region;
1580 unsigned long npages;
1581
1582 kenter("");
1583
1584
1585
1586 if (vma->vm_file)
1587 return -ENOMEM;
1588
1589 if (mm->map_count >= sysctl_max_map_count)
1590 return -ENOMEM;
1591
1592 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1593 if (!region)
1594 return -ENOMEM;
1595
1596 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1597 if (!new) {
1598 kmem_cache_free(vm_region_jar, region);
1599 return -ENOMEM;
1600 }
1601
1602
1603 *new = *vma;
1604 *region = *vma->vm_region;
1605 new->vm_region = region;
1606
1607 npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1608
1609 if (new_below) {
1610 region->vm_top = region->vm_end = new->vm_end = addr;
1611 } else {
1612 region->vm_start = new->vm_start = addr;
1613 region->vm_pgoff = new->vm_pgoff += npages;
1614 }
1615
1616 if (new->vm_ops && new->vm_ops->open)
1617 new->vm_ops->open(new);
1618
1619 delete_vma_from_mm(vma);
1620 down_write(&nommu_region_sem);
1621 delete_nommu_region(vma->vm_region);
1622 if (new_below) {
1623 vma->vm_region->vm_start = vma->vm_start = addr;
1624 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1625 } else {
1626 vma->vm_region->vm_end = vma->vm_end = addr;
1627 vma->vm_region->vm_top = addr;
1628 }
1629 add_nommu_region(vma->vm_region);
1630 add_nommu_region(new->vm_region);
1631 up_write(&nommu_region_sem);
1632 add_vma_to_mm(mm, vma);
1633 add_vma_to_mm(mm, new);
1634 return 0;
1635}
1636
1637
1638
1639
1640
1641static int shrink_vma(struct mm_struct *mm,
1642 struct vm_area_struct *vma,
1643 unsigned long from, unsigned long to)
1644{
1645 struct vm_region *region;
1646
1647 kenter("");
1648
1649
1650
1651 delete_vma_from_mm(vma);
1652 if (from > vma->vm_start)
1653 vma->vm_end = from;
1654 else
1655 vma->vm_start = to;
1656 add_vma_to_mm(mm, vma);
1657
1658
1659 region = vma->vm_region;
1660 BUG_ON(region->vm_usage != 1);
1661
1662 down_write(&nommu_region_sem);
1663 delete_nommu_region(region);
1664 if (from > region->vm_start) {
1665 to = region->vm_top;
1666 region->vm_top = region->vm_end = from;
1667 } else {
1668 region->vm_start = to;
1669 }
1670 add_nommu_region(region);
1671 up_write(&nommu_region_sem);
1672
1673 free_page_series(from, to);
1674 return 0;
1675}
1676
1677
1678
1679
1680
1681
1682int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1683{
1684 struct vm_area_struct *vma;
1685 unsigned long end;
1686 int ret;
1687
1688 kenter(",%lx,%zx", start, len);
1689
1690 len = PAGE_ALIGN(len);
1691 if (len == 0)
1692 return -EINVAL;
1693
1694 end = start + len;
1695
1696
1697 vma = find_vma(mm, start);
1698 if (!vma) {
1699 static int limit = 0;
1700 if (limit < 5) {
1701 printk(KERN_WARNING
1702 "munmap of memory not mmapped by process %d"
1703 " (%s): 0x%lx-0x%lx\n",
1704 current->pid, current->comm,
1705 start, start + len - 1);
1706 limit++;
1707 }
1708 return -EINVAL;
1709 }
1710
1711
1712 if (vma->vm_file) {
1713 do {
1714 if (start > vma->vm_start) {
1715 kleave(" = -EINVAL [miss]");
1716 return -EINVAL;
1717 }
1718 if (end == vma->vm_end)
1719 goto erase_whole_vma;
1720 vma = vma->vm_next;
1721 } while (vma);
1722 kleave(" = -EINVAL [split file]");
1723 return -EINVAL;
1724 } else {
1725
1726 if (start == vma->vm_start && end == vma->vm_end)
1727 goto erase_whole_vma;
1728 if (start < vma->vm_start || end > vma->vm_end) {
1729 kleave(" = -EINVAL [superset]");
1730 return -EINVAL;
1731 }
1732 if (start & ~PAGE_MASK) {
1733 kleave(" = -EINVAL [unaligned start]");
1734 return -EINVAL;
1735 }
1736 if (end != vma->vm_end && end & ~PAGE_MASK) {
1737 kleave(" = -EINVAL [unaligned split]");
1738 return -EINVAL;
1739 }
1740 if (start != vma->vm_start && end != vma->vm_end) {
1741 ret = split_vma(mm, vma, start, 1);
1742 if (ret < 0) {
1743 kleave(" = %d [split]", ret);
1744 return ret;
1745 }
1746 }
1747 return shrink_vma(mm, vma, start, end);
1748 }
1749
1750erase_whole_vma:
1751 delete_vma_from_mm(vma);
1752 delete_vma(mm, vma);
1753 kleave(" = 0");
1754 return 0;
1755}
1756EXPORT_SYMBOL(do_munmap);
1757
1758int vm_munmap(unsigned long addr, size_t len)
1759{
1760 struct mm_struct *mm = current->mm;
1761 int ret;
1762
1763 down_write(&mm->mmap_sem);
1764 ret = do_munmap(mm, addr, len, NULL);
1765 up_write(&mm->mmap_sem);
1766 return ret;
1767}
1768EXPORT_SYMBOL(vm_munmap);
1769
1770SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1771{
1772 return vm_munmap(addr, len);
1773}
1774
1775
1776
1777
1778void exit_mmap(struct mm_struct *mm)
1779{
1780 struct vm_area_struct *vma;
1781
1782 if (!mm)
1783 return;
1784
1785 kenter("");
1786
1787 mm->total_vm = 0;
1788
1789 while ((vma = mm->mmap)) {
1790 mm->mmap = vma->vm_next;
1791 delete_vma_from_mm(vma);
1792 delete_vma(mm, vma);
1793 cond_resched();
1794 }
1795
1796 kleave("");
1797}
1798
1799unsigned long vm_brk(unsigned long addr, unsigned long len)
1800{
1801 return -ENOMEM;
1802}
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814static unsigned long do_mremap(unsigned long addr,
1815 unsigned long old_len, unsigned long new_len,
1816 unsigned long flags, unsigned long new_addr)
1817{
1818 struct vm_area_struct *vma;
1819
1820
1821 old_len = PAGE_ALIGN(old_len);
1822 new_len = PAGE_ALIGN(new_len);
1823 if (old_len == 0 || new_len == 0)
1824 return (unsigned long) -EINVAL;
1825
1826 if (addr & ~PAGE_MASK)
1827 return -EINVAL;
1828
1829 if (flags & MREMAP_FIXED && new_addr != addr)
1830 return (unsigned long) -EINVAL;
1831
1832 vma = find_vma_exact(current->mm, addr, old_len);
1833 if (!vma)
1834 return (unsigned long) -EINVAL;
1835
1836 if (vma->vm_end != vma->vm_start + old_len)
1837 return (unsigned long) -EFAULT;
1838
1839 if (vma->vm_flags & VM_MAYSHARE)
1840 return (unsigned long) -EPERM;
1841
1842 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1843 return (unsigned long) -ENOMEM;
1844
1845
1846 vma->vm_end = vma->vm_start + new_len;
1847 return vma->vm_start;
1848}
1849
1850SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1851 unsigned long, new_len, unsigned long, flags,
1852 unsigned long, new_addr)
1853{
1854 unsigned long ret;
1855
1856 down_write(¤t->mm->mmap_sem);
1857 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1858 up_write(¤t->mm->mmap_sem);
1859 return ret;
1860}
1861
1862struct page *follow_page_mask(struct vm_area_struct *vma,
1863 unsigned long address, unsigned int flags,
1864 unsigned int *page_mask)
1865{
1866 *page_mask = 0;
1867 return NULL;
1868}
1869
1870int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1871 unsigned long pfn, unsigned long size, pgprot_t prot)
1872{
1873 if (addr != (pfn << PAGE_SHIFT))
1874 return -EINVAL;
1875
1876 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1877 return 0;
1878}
1879EXPORT_SYMBOL(remap_pfn_range);
1880
1881int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1882{
1883 unsigned long pfn = start >> PAGE_SHIFT;
1884 unsigned long vm_len = vma->vm_end - vma->vm_start;
1885
1886 pfn += vma->vm_pgoff;
1887 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1888}
1889EXPORT_SYMBOL(vm_iomap_memory);
1890
1891int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1892 unsigned long pgoff)
1893{
1894 unsigned int size = vma->vm_end - vma->vm_start;
1895
1896 if (!(vma->vm_flags & VM_USERMAP))
1897 return -EINVAL;
1898
1899 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1900 vma->vm_end = vma->vm_start + size;
1901
1902 return 0;
1903}
1904EXPORT_SYMBOL(remap_vmalloc_range);
1905
1906unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1907 unsigned long len, unsigned long pgoff, unsigned long flags)
1908{
1909 return -ENOMEM;
1910}
1911
1912void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1913{
1914}
1915
1916void unmap_mapping_range(struct address_space *mapping,
1917 loff_t const holebegin, loff_t const holelen,
1918 int even_cows)
1919{
1920}
1921EXPORT_SYMBOL(unmap_mapping_range);
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1940{
1941 unsigned long free, allowed, reserve;
1942
1943 vm_acct_memory(pages);
1944
1945
1946
1947
1948 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1949 return 0;
1950
1951 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1952 free = global_page_state(NR_FREE_PAGES);
1953 free += global_page_state(NR_FILE_PAGES);
1954
1955
1956
1957
1958
1959
1960
1961 free -= global_page_state(NR_SHMEM);
1962
1963 free += get_nr_swap_pages();
1964
1965
1966
1967
1968
1969
1970
1971 free += global_page_state(NR_SLAB_RECLAIMABLE);
1972
1973
1974
1975
1976 if (free <= totalreserve_pages)
1977 goto error;
1978 else
1979 free -= totalreserve_pages;
1980
1981
1982
1983
1984 if (!cap_sys_admin)
1985 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1986
1987 if (free > pages)
1988 return 0;
1989
1990 goto error;
1991 }
1992
1993 allowed = vm_commit_limit();
1994
1995
1996
1997 if (!cap_sys_admin)
1998 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1999
2000
2001
2002
2003 if (mm) {
2004 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
2005 allowed -= min(mm->total_vm / 32, reserve);
2006 }
2007
2008 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
2009 return 0;
2010
2011error:
2012 vm_unacct_memory(pages);
2013
2014 return -ENOMEM;
2015}
2016
2017int in_gate_area_no_mm(unsigned long addr)
2018{
2019 return 0;
2020}
2021
2022int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2023{
2024 BUG();
2025 return 0;
2026}
2027EXPORT_SYMBOL(filemap_fault);
2028
2029int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
2030 unsigned long size, pgoff_t pgoff)
2031{
2032 BUG();
2033 return 0;
2034}
2035EXPORT_SYMBOL(generic_file_remap_pages);
2036
2037static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
2038 unsigned long addr, void *buf, int len, int write)
2039{
2040 struct vm_area_struct *vma;
2041
2042 down_read(&mm->mmap_sem);
2043
2044
2045 vma = find_vma(mm, addr);
2046 if (vma) {
2047
2048 if (addr + len >= vma->vm_end)
2049 len = vma->vm_end - addr;
2050
2051
2052 if (write && vma->vm_flags & VM_MAYWRITE)
2053 copy_to_user_page(vma, NULL, addr,
2054 (void *) addr, buf, len);
2055 else if (!write && vma->vm_flags & VM_MAYREAD)
2056 copy_from_user_page(vma, NULL, addr,
2057 buf, (void *) addr, len);
2058 else
2059 len = 0;
2060 } else {
2061 len = 0;
2062 }
2063
2064 up_read(&mm->mmap_sem);
2065
2066 return len;
2067}
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079int access_remote_vm(struct mm_struct *mm, unsigned long addr,
2080 void *buf, int len, int write)
2081{
2082 return __access_remote_vm(NULL, mm, addr, buf, len, write);
2083}
2084
2085
2086
2087
2088
2089int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
2090{
2091 struct mm_struct *mm;
2092
2093 if (addr + len < addr)
2094 return 0;
2095
2096 mm = get_task_mm(tsk);
2097 if (!mm)
2098 return 0;
2099
2100 len = __access_remote_vm(tsk, mm, addr, buf, len, write);
2101
2102 mmput(mm);
2103 return len;
2104}
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2118 size_t newsize)
2119{
2120 struct vm_area_struct *vma;
2121 struct vm_region *region;
2122 pgoff_t low, high;
2123 size_t r_size, r_top;
2124
2125 low = newsize >> PAGE_SHIFT;
2126 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2127
2128 down_write(&nommu_region_sem);
2129 mutex_lock(&inode->i_mapping->i_mmap_mutex);
2130
2131
2132 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
2133
2134
2135 if (vma->vm_flags & VM_SHARED) {
2136 mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2137 up_write(&nommu_region_sem);
2138 return -ETXTBSY;
2139 }
2140 }
2141
2142
2143
2144
2145
2146
2147
2148 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap,
2149 0, ULONG_MAX) {
2150 if (!(vma->vm_flags & VM_SHARED))
2151 continue;
2152
2153 region = vma->vm_region;
2154 r_size = region->vm_top - region->vm_start;
2155 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
2156
2157 if (r_top > newsize) {
2158 region->vm_top -= r_top - newsize;
2159 if (region->vm_end > region->vm_top)
2160 region->vm_end = region->vm_top;
2161 }
2162 }
2163
2164 mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2165 up_write(&nommu_region_sem);
2166 return 0;
2167}
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179static int __meminit init_user_reserve(void)
2180{
2181 unsigned long free_kbytes;
2182
2183 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
2184
2185 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
2186 return 0;
2187}
2188module_init(init_user_reserve)
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200static int __meminit init_admin_reserve(void)
2201{
2202 unsigned long free_kbytes;
2203
2204 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
2205
2206 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
2207 return 0;
2208}
2209module_init(init_admin_reserve)
2210