1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/export.h>
17#include <linux/mm.h>
18#include <linux/vmacache.h>
19#include <linux/mman.h>
20#include <linux/swap.h>
21#include <linux/file.h>
22#include <linux/highmem.h>
23#include <linux/pagemap.h>
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include <linux/blkdev.h>
27#include <linux/backing-dev.h>
28#include <linux/compiler.h>
29#include <linux/mount.h>
30#include <linux/personality.h>
31#include <linux/security.h>
32#include <linux/syscalls.h>
33#include <linux/audit.h>
34#include <linux/sched/sysctl.h>
35
36#include <asm/uaccess.h>
37#include <asm/tlb.h>
38#include <asm/tlbflush.h>
39#include <asm/mmu_context.h>
40#include "internal.h"
41
42#if 0
43#define kenter(FMT, ...) \
44 printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
45#define kleave(FMT, ...) \
46 printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
47#define kdebug(FMT, ...) \
48 printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
49#else
50#define kenter(FMT, ...) \
51 no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
52#define kleave(FMT, ...) \
53 no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
54#define kdebug(FMT, ...) \
55 no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
56#endif
57
58void *high_memory;
59struct page *mem_map;
60unsigned long max_mapnr;
61unsigned long highest_memmap_pfn;
62struct percpu_counter vm_committed_as;
63int sysctl_overcommit_memory = OVERCOMMIT_GUESS;
64int sysctl_overcommit_ratio = 50;
65unsigned long sysctl_overcommit_kbytes __read_mostly;
66int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
67int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
68unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17;
69unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13;
70int heap_stack_gap = 0;
71
72atomic_long_t mmap_pages_allocated;
73
74
75
76
77
78
79
80
81
82unsigned long vm_memory_committed(void)
83{
84 return percpu_counter_read_positive(&vm_committed_as);
85}
86
87EXPORT_SYMBOL_GPL(vm_memory_committed);
88
89EXPORT_SYMBOL(mem_map);
90
91
92static struct kmem_cache *vm_region_jar;
93struct rb_root nommu_region_tree = RB_ROOT;
94DECLARE_RWSEM(nommu_region_sem);
95
96const struct vm_operations_struct generic_file_vm_ops = {
97};
98
99
100
101
102
103
104
105unsigned int kobjsize(const void *objp)
106{
107 struct page *page;
108
109
110
111
112
113 if (!objp || !virt_addr_valid(objp))
114 return 0;
115
116 page = virt_to_head_page(objp);
117
118
119
120
121
122 if (PageSlab(page))
123 return ksize(objp);
124
125
126
127
128
129
130
131 if (!PageCompound(page)) {
132 struct vm_area_struct *vma;
133
134 vma = find_vma(current->mm, (unsigned long)objp);
135 if (vma)
136 return vma->vm_end - vma->vm_start;
137 }
138
139
140
141
142
143 return PAGE_SIZE << compound_order(page);
144}
145
146long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
147 unsigned long start, unsigned long nr_pages,
148 unsigned int foll_flags, struct page **pages,
149 struct vm_area_struct **vmas, int *nonblocking)
150{
151 struct vm_area_struct *vma;
152 unsigned long vm_flags;
153 int i;
154
155
156
157
158 vm_flags = (foll_flags & FOLL_WRITE) ?
159 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
160 vm_flags &= (foll_flags & FOLL_FORCE) ?
161 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
162
163 for (i = 0; i < nr_pages; i++) {
164 vma = find_vma(mm, start);
165 if (!vma)
166 goto finish_or_fault;
167
168
169 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
170 !(vm_flags & vma->vm_flags))
171 goto finish_or_fault;
172
173 if (pages) {
174 pages[i] = virt_to_page(start);
175 if (pages[i])
176 page_cache_get(pages[i]);
177 }
178 if (vmas)
179 vmas[i] = vma;
180 start = (start + PAGE_SIZE) & PAGE_MASK;
181 }
182
183 return i;
184
185finish_or_fault:
186 return i ? : -EFAULT;
187}
188
189
190
191
192
193
194
195
196long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
197 unsigned long start, unsigned long nr_pages,
198 int write, int force, struct page **pages,
199 struct vm_area_struct **vmas)
200{
201 int flags = 0;
202
203 if (write)
204 flags |= FOLL_WRITE;
205 if (force)
206 flags |= FOLL_FORCE;
207
208 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
209 NULL);
210}
211EXPORT_SYMBOL(get_user_pages);
212
213
214
215
216
217
218
219
220
221
222
223int follow_pfn(struct vm_area_struct *vma, unsigned long address,
224 unsigned long *pfn)
225{
226 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
227 return -EINVAL;
228
229 *pfn = address >> PAGE_SHIFT;
230 return 0;
231}
232EXPORT_SYMBOL(follow_pfn);
233
234LIST_HEAD(vmap_area_list);
235
236void vfree(const void *addr)
237{
238 kfree(addr);
239}
240EXPORT_SYMBOL(vfree);
241
242void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
243{
244
245
246
247
248 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
249}
250EXPORT_SYMBOL(__vmalloc);
251
252void *vmalloc_user(unsigned long size)
253{
254 void *ret;
255
256 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
257 PAGE_KERNEL);
258 if (ret) {
259 struct vm_area_struct *vma;
260
261 down_write(¤t->mm->mmap_sem);
262 vma = find_vma(current->mm, (unsigned long)ret);
263 if (vma)
264 vma->vm_flags |= VM_USERMAP;
265 up_write(¤t->mm->mmap_sem);
266 }
267
268 return ret;
269}
270EXPORT_SYMBOL(vmalloc_user);
271
272struct page *vmalloc_to_page(const void *addr)
273{
274 return virt_to_page(addr);
275}
276EXPORT_SYMBOL(vmalloc_to_page);
277
278unsigned long vmalloc_to_pfn(const void *addr)
279{
280 return page_to_pfn(virt_to_page(addr));
281}
282EXPORT_SYMBOL(vmalloc_to_pfn);
283
284long vread(char *buf, char *addr, unsigned long count)
285{
286
287 if ((unsigned long) buf + count < count)
288 count = -(unsigned long) buf;
289
290 memcpy(buf, addr, count);
291 return count;
292}
293
294long vwrite(char *buf, char *addr, unsigned long count)
295{
296
297 if ((unsigned long) addr + count < count)
298 count = -(unsigned long) addr;
299
300 memcpy(addr, buf, count);
301 return count;
302}
303
304
305
306
307
308
309
310
311
312
313
314
315void *vmalloc(unsigned long size)
316{
317 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
318}
319EXPORT_SYMBOL(vmalloc);
320
321
322
323
324
325
326
327
328
329
330
331
332
333void *vzalloc(unsigned long size)
334{
335 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
336 PAGE_KERNEL);
337}
338EXPORT_SYMBOL(vzalloc);
339
340
341
342
343
344
345
346
347
348
349
350
351void *vmalloc_node(unsigned long size, int node)
352{
353 return vmalloc(size);
354}
355EXPORT_SYMBOL(vmalloc_node);
356
357
358
359
360
361
362
363
364
365
366
367
368
369void *vzalloc_node(unsigned long size, int node)
370{
371 return vzalloc(size);
372}
373EXPORT_SYMBOL(vzalloc_node);
374
375#ifndef PAGE_KERNEL_EXEC
376# define PAGE_KERNEL_EXEC PAGE_KERNEL
377#endif
378
379
380
381
382
383
384
385
386
387
388
389
390
391void *vmalloc_exec(unsigned long size)
392{
393 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
394}
395
396
397
398
399
400
401
402
403void *vmalloc_32(unsigned long size)
404{
405 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
406}
407EXPORT_SYMBOL(vmalloc_32);
408
409
410
411
412
413
414
415
416
417
418
419void *vmalloc_32_user(unsigned long size)
420{
421
422
423
424
425 return vmalloc_user(size);
426}
427EXPORT_SYMBOL(vmalloc_32_user);
428
429void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
430{
431 BUG();
432 return NULL;
433}
434EXPORT_SYMBOL(vmap);
435
436void vunmap(const void *addr)
437{
438 BUG();
439}
440EXPORT_SYMBOL(vunmap);
441
442void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
443{
444 BUG();
445 return NULL;
446}
447EXPORT_SYMBOL(vm_map_ram);
448
449void vm_unmap_ram(const void *mem, unsigned int count)
450{
451 BUG();
452}
453EXPORT_SYMBOL(vm_unmap_ram);
454
455void vm_unmap_aliases(void)
456{
457}
458EXPORT_SYMBOL_GPL(vm_unmap_aliases);
459
460
461
462
463
464void __weak vmalloc_sync_all(void)
465{
466}
467
468
469
470
471
472
473
474
475
476
477
478
479
480struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
481{
482 BUG();
483 return NULL;
484}
485EXPORT_SYMBOL_GPL(alloc_vm_area);
486
487void free_vm_area(struct vm_struct *area)
488{
489 BUG();
490}
491EXPORT_SYMBOL_GPL(free_vm_area);
492
493int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
494 struct page *page)
495{
496 return -EINVAL;
497}
498EXPORT_SYMBOL(vm_insert_page);
499
500
501
502
503
504
505
506
507SYSCALL_DEFINE1(brk, unsigned long, brk)
508{
509 struct mm_struct *mm = current->mm;
510
511 if (brk < mm->start_brk || brk > mm->context.end_brk)
512 return mm->brk;
513
514 if (mm->brk == brk)
515 return mm->brk;
516
517
518
519
520 if (brk <= mm->brk) {
521 mm->brk = brk;
522 return brk;
523 }
524
525
526
527
528 flush_icache_range(mm->brk, brk);
529 return mm->brk = brk;
530}
531
532
533
534
535void __init mmap_init(void)
536{
537 int ret;
538
539 ret = percpu_counter_init(&vm_committed_as, 0);
540 VM_BUG_ON(ret);
541 vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
542}
543
544
545
546
547
548#ifdef CONFIG_DEBUG_NOMMU_REGIONS
549static noinline void validate_nommu_regions(void)
550{
551 struct vm_region *region, *last;
552 struct rb_node *p, *lastp;
553
554 lastp = rb_first(&nommu_region_tree);
555 if (!lastp)
556 return;
557
558 last = rb_entry(lastp, struct vm_region, vm_rb);
559 BUG_ON(unlikely(last->vm_end <= last->vm_start));
560 BUG_ON(unlikely(last->vm_top < last->vm_end));
561
562 while ((p = rb_next(lastp))) {
563 region = rb_entry(p, struct vm_region, vm_rb);
564 last = rb_entry(lastp, struct vm_region, vm_rb);
565
566 BUG_ON(unlikely(region->vm_end <= region->vm_start));
567 BUG_ON(unlikely(region->vm_top < region->vm_end));
568 BUG_ON(unlikely(region->vm_start < last->vm_top));
569
570 lastp = p;
571 }
572}
573#else
574static void validate_nommu_regions(void)
575{
576}
577#endif
578
579
580
581
582static void add_nommu_region(struct vm_region *region)
583{
584 struct vm_region *pregion;
585 struct rb_node **p, *parent;
586
587 validate_nommu_regions();
588
589 parent = NULL;
590 p = &nommu_region_tree.rb_node;
591 while (*p) {
592 parent = *p;
593 pregion = rb_entry(parent, struct vm_region, vm_rb);
594 if (region->vm_start < pregion->vm_start)
595 p = &(*p)->rb_left;
596 else if (region->vm_start > pregion->vm_start)
597 p = &(*p)->rb_right;
598 else if (pregion == region)
599 return;
600 else
601 BUG();
602 }
603
604 rb_link_node(®ion->vm_rb, parent, p);
605 rb_insert_color(®ion->vm_rb, &nommu_region_tree);
606
607 validate_nommu_regions();
608}
609
610
611
612
613static void delete_nommu_region(struct vm_region *region)
614{
615 BUG_ON(!nommu_region_tree.rb_node);
616
617 validate_nommu_regions();
618 rb_erase(®ion->vm_rb, &nommu_region_tree);
619 validate_nommu_regions();
620}
621
622
623
624
625static void free_page_series(unsigned long from, unsigned long to)
626{
627 for (; from < to; from += PAGE_SIZE) {
628 struct page *page = virt_to_page(from);
629
630 kdebug("- free %lx", from);
631 atomic_long_dec(&mmap_pages_allocated);
632 if (page_count(page) != 1)
633 kdebug("free page %p: refcount not one: %d",
634 page, page_count(page));
635 put_page(page);
636 }
637}
638
639
640
641
642
643
644
645static void __put_nommu_region(struct vm_region *region)
646 __releases(nommu_region_sem)
647{
648 kenter("%p{%d}", region, region->vm_usage);
649
650 BUG_ON(!nommu_region_tree.rb_node);
651
652 if (--region->vm_usage == 0) {
653 if (region->vm_top > region->vm_start)
654 delete_nommu_region(region);
655 up_write(&nommu_region_sem);
656
657 if (region->vm_file)
658 fput(region->vm_file);
659
660
661
662 if (region->vm_flags & VM_MAPPED_COPY) {
663 kdebug("free series");
664 free_page_series(region->vm_start, region->vm_top);
665 }
666 kmem_cache_free(vm_region_jar, region);
667 } else {
668 up_write(&nommu_region_sem);
669 }
670}
671
672
673
674
675static void put_nommu_region(struct vm_region *region)
676{
677 down_write(&nommu_region_sem);
678 __put_nommu_region(region);
679}
680
681
682
683
684static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
685{
686#ifdef CONFIG_MPU
687 struct mm_struct *mm = vma->vm_mm;
688 long start = vma->vm_start & PAGE_MASK;
689 while (start < vma->vm_end) {
690 protect_page(mm, start, flags);
691 start += PAGE_SIZE;
692 }
693 update_protections(mm);
694#endif
695}
696
697
698
699
700
701
702
703static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
704{
705 struct vm_area_struct *pvma, *prev;
706 struct address_space *mapping;
707 struct rb_node **p, *parent, *rb_prev;
708
709 kenter(",%p", vma);
710
711 BUG_ON(!vma->vm_region);
712
713 mm->map_count++;
714 vma->vm_mm = mm;
715
716 protect_vma(vma, vma->vm_flags);
717
718
719 if (vma->vm_file) {
720 mapping = vma->vm_file->f_mapping;
721
722 mutex_lock(&mapping->i_mmap_mutex);
723 flush_dcache_mmap_lock(mapping);
724 vma_interval_tree_insert(vma, &mapping->i_mmap);
725 flush_dcache_mmap_unlock(mapping);
726 mutex_unlock(&mapping->i_mmap_mutex);
727 }
728
729
730 parent = rb_prev = NULL;
731 p = &mm->mm_rb.rb_node;
732 while (*p) {
733 parent = *p;
734 pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
735
736
737
738 if (vma->vm_start < pvma->vm_start)
739 p = &(*p)->rb_left;
740 else if (vma->vm_start > pvma->vm_start) {
741 rb_prev = parent;
742 p = &(*p)->rb_right;
743 } else if (vma->vm_end < pvma->vm_end)
744 p = &(*p)->rb_left;
745 else if (vma->vm_end > pvma->vm_end) {
746 rb_prev = parent;
747 p = &(*p)->rb_right;
748 } else if (vma < pvma)
749 p = &(*p)->rb_left;
750 else if (vma > pvma) {
751 rb_prev = parent;
752 p = &(*p)->rb_right;
753 } else
754 BUG();
755 }
756
757 rb_link_node(&vma->vm_rb, parent, p);
758 rb_insert_color(&vma->vm_rb, &mm->mm_rb);
759
760
761 prev = NULL;
762 if (rb_prev)
763 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
764
765 __vma_link_list(mm, vma, prev, parent);
766}
767
768
769
770
771static void delete_vma_from_mm(struct vm_area_struct *vma)
772{
773 int i;
774 struct address_space *mapping;
775 struct mm_struct *mm = vma->vm_mm;
776 struct task_struct *curr = current;
777
778 kenter("%p", vma);
779
780 protect_vma(vma, 0);
781
782 mm->map_count--;
783 for (i = 0; i < VMACACHE_SIZE; i++) {
784
785 if (curr->vmacache[i] == vma) {
786 vmacache_invalidate(curr->mm);
787 break;
788 }
789 }
790
791
792 if (vma->vm_file) {
793 mapping = vma->vm_file->f_mapping;
794
795 mutex_lock(&mapping->i_mmap_mutex);
796 flush_dcache_mmap_lock(mapping);
797 vma_interval_tree_remove(vma, &mapping->i_mmap);
798 flush_dcache_mmap_unlock(mapping);
799 mutex_unlock(&mapping->i_mmap_mutex);
800 }
801
802
803 rb_erase(&vma->vm_rb, &mm->mm_rb);
804
805 if (vma->vm_prev)
806 vma->vm_prev->vm_next = vma->vm_next;
807 else
808 mm->mmap = vma->vm_next;
809
810 if (vma->vm_next)
811 vma->vm_next->vm_prev = vma->vm_prev;
812}
813
814
815
816
817static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
818{
819 kenter("%p", vma);
820 if (vma->vm_ops && vma->vm_ops->close)
821 vma->vm_ops->close(vma);
822 if (vma->vm_file)
823 fput(vma->vm_file);
824 put_nommu_region(vma->vm_region);
825 kmem_cache_free(vm_area_cachep, vma);
826}
827
828
829
830
831
832struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
833{
834 struct vm_area_struct *vma;
835
836
837 vma = vmacache_find(mm, addr);
838 if (likely(vma))
839 return vma;
840
841
842
843 for (vma = mm->mmap; vma; vma = vma->vm_next) {
844 if (vma->vm_start > addr)
845 return NULL;
846 if (vma->vm_end > addr) {
847 vmacache_update(addr, vma);
848 return vma;
849 }
850 }
851
852 return NULL;
853}
854EXPORT_SYMBOL(find_vma);
855
856
857
858
859
860struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
861{
862 return find_vma(mm, addr);
863}
864
865
866
867
868
869int expand_stack(struct vm_area_struct *vma, unsigned long address)
870{
871 return -ENOMEM;
872}
873
874
875
876
877
878static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
879 unsigned long addr,
880 unsigned long len)
881{
882 struct vm_area_struct *vma;
883 unsigned long end = addr + len;
884
885
886 vma = vmacache_find_exact(mm, addr, end);
887 if (vma)
888 return vma;
889
890
891
892 for (vma = mm->mmap; vma; vma = vma->vm_next) {
893 if (vma->vm_start < addr)
894 continue;
895 if (vma->vm_start > addr)
896 return NULL;
897 if (vma->vm_end == end) {
898 vmacache_update(addr, vma);
899 return vma;
900 }
901 }
902
903 return NULL;
904}
905
906
907
908
909
910static int validate_mmap_request(struct file *file,
911 unsigned long addr,
912 unsigned long len,
913 unsigned long prot,
914 unsigned long flags,
915 unsigned long pgoff,
916 unsigned long *_capabilities)
917{
918 unsigned long capabilities, rlen;
919 int ret;
920
921
922 if (flags & MAP_FIXED) {
923 printk(KERN_DEBUG
924 "%d: Can't do fixed-address/overlay mmap of RAM\n",
925 current->pid);
926 return -EINVAL;
927 }
928
929 if ((flags & MAP_TYPE) != MAP_PRIVATE &&
930 (flags & MAP_TYPE) != MAP_SHARED)
931 return -EINVAL;
932
933 if (!len)
934 return -EINVAL;
935
936
937 rlen = PAGE_ALIGN(len);
938 if (!rlen || rlen > TASK_SIZE)
939 return -ENOMEM;
940
941
942 if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
943 return -EOVERFLOW;
944
945 if (file) {
946
947 struct address_space *mapping;
948
949
950 if (!file->f_op->mmap)
951 return -ENODEV;
952
953
954
955
956
957 mapping = file->f_mapping;
958 if (!mapping)
959 mapping = file_inode(file)->i_mapping;
960
961 capabilities = 0;
962 if (mapping && mapping->backing_dev_info)
963 capabilities = mapping->backing_dev_info->capabilities;
964
965 if (!capabilities) {
966
967
968 switch (file_inode(file)->i_mode & S_IFMT) {
969 case S_IFREG:
970 case S_IFBLK:
971 capabilities = BDI_CAP_MAP_COPY;
972 break;
973
974 case S_IFCHR:
975 capabilities =
976 BDI_CAP_MAP_DIRECT |
977 BDI_CAP_READ_MAP |
978 BDI_CAP_WRITE_MAP;
979 break;
980
981 default:
982 return -EINVAL;
983 }
984 }
985
986
987
988 if (!file->f_op->get_unmapped_area)
989 capabilities &= ~BDI_CAP_MAP_DIRECT;
990 if (!file->f_op->read)
991 capabilities &= ~BDI_CAP_MAP_COPY;
992
993
994 if (!(file->f_mode & FMODE_READ))
995 return -EACCES;
996
997 if (flags & MAP_SHARED) {
998
999 if ((prot & PROT_WRITE) &&
1000 !(file->f_mode & FMODE_WRITE))
1001 return -EACCES;
1002
1003 if (IS_APPEND(file_inode(file)) &&
1004 (file->f_mode & FMODE_WRITE))
1005 return -EACCES;
1006
1007 if (locks_verify_locked(file))
1008 return -EAGAIN;
1009
1010 if (!(capabilities & BDI_CAP_MAP_DIRECT))
1011 return -ENODEV;
1012
1013
1014 capabilities &= ~BDI_CAP_MAP_COPY;
1015 } else {
1016
1017
1018 if (!(capabilities & BDI_CAP_MAP_COPY))
1019 return -ENODEV;
1020
1021
1022
1023 if (prot & PROT_WRITE)
1024 capabilities &= ~BDI_CAP_MAP_DIRECT;
1025 }
1026
1027 if (capabilities & BDI_CAP_MAP_DIRECT) {
1028 if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) ||
1029 ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
1030 ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP))
1031 ) {
1032 capabilities &= ~BDI_CAP_MAP_DIRECT;
1033 if (flags & MAP_SHARED) {
1034 printk(KERN_WARNING
1035 "MAP_SHARED not completely supported on !MMU\n");
1036 return -EINVAL;
1037 }
1038 }
1039 }
1040
1041
1042
1043 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
1044 if (prot & PROT_EXEC)
1045 return -EPERM;
1046 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
1047
1048 if (current->personality & READ_IMPLIES_EXEC) {
1049 if (capabilities & BDI_CAP_EXEC_MAP)
1050 prot |= PROT_EXEC;
1051 }
1052 } else if ((prot & PROT_READ) &&
1053 (prot & PROT_EXEC) &&
1054 !(capabilities & BDI_CAP_EXEC_MAP)
1055 ) {
1056
1057 capabilities &= ~BDI_CAP_MAP_DIRECT;
1058 }
1059 } else {
1060
1061
1062
1063 capabilities = BDI_CAP_MAP_COPY;
1064
1065
1066 if ((prot & PROT_READ) &&
1067 (current->personality & READ_IMPLIES_EXEC))
1068 prot |= PROT_EXEC;
1069 }
1070
1071
1072 ret = security_mmap_addr(addr);
1073 if (ret < 0)
1074 return ret;
1075
1076
1077 *_capabilities = capabilities;
1078 return 0;
1079}
1080
1081
1082
1083
1084
1085static unsigned long determine_vm_flags(struct file *file,
1086 unsigned long prot,
1087 unsigned long flags,
1088 unsigned long capabilities)
1089{
1090 unsigned long vm_flags;
1091
1092 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
1093
1094
1095 if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
1096
1097 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1098 if (file && !(prot & PROT_WRITE))
1099 vm_flags |= VM_MAYSHARE;
1100 } else {
1101
1102
1103
1104 vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS);
1105 if (flags & MAP_SHARED)
1106 vm_flags |= VM_SHARED;
1107 }
1108
1109
1110
1111
1112
1113 if ((flags & MAP_PRIVATE) && current->ptrace)
1114 vm_flags &= ~VM_MAYSHARE;
1115
1116 return vm_flags;
1117}
1118
1119
1120
1121
1122
1123static int do_mmap_shared_file(struct vm_area_struct *vma)
1124{
1125 int ret;
1126
1127 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1128 if (ret == 0) {
1129 vma->vm_region->vm_top = vma->vm_region->vm_end;
1130 return 0;
1131 }
1132 if (ret != -ENOSYS)
1133 return ret;
1134
1135
1136
1137
1138 return -ENODEV;
1139}
1140
1141
1142
1143
1144static int do_mmap_private(struct vm_area_struct *vma,
1145 struct vm_region *region,
1146 unsigned long len,
1147 unsigned long capabilities)
1148{
1149 struct page *pages;
1150 unsigned long total, point, n;
1151 void *base;
1152 int ret, order;
1153
1154
1155
1156
1157
1158 if (capabilities & BDI_CAP_MAP_DIRECT) {
1159 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1160 if (ret == 0) {
1161
1162 BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
1163 vma->vm_region->vm_top = vma->vm_region->vm_end;
1164 return 0;
1165 }
1166 if (ret != -ENOSYS)
1167 return ret;
1168
1169
1170
1171
1172 }
1173
1174
1175
1176
1177
1178
1179 order = get_order(len);
1180 kdebug("alloc order %d for %lx", order, len);
1181
1182 pages = alloc_pages(GFP_KERNEL, order);
1183 if (!pages)
1184 goto enomem;
1185
1186 total = 1 << order;
1187 atomic_long_add(total, &mmap_pages_allocated);
1188
1189 point = len >> PAGE_SHIFT;
1190
1191
1192
1193 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
1194 while (total > point) {
1195 order = ilog2(total - point);
1196 n = 1 << order;
1197 kdebug("shave %lu/%lu @%lu", n, total - point, total);
1198 atomic_long_sub(n, &mmap_pages_allocated);
1199 total -= n;
1200 set_page_refcounted(pages + total);
1201 __free_pages(pages + total, order);
1202 }
1203 }
1204
1205 for (point = 1; point < total; point++)
1206 set_page_refcounted(&pages[point]);
1207
1208 base = page_address(pages);
1209 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1210 region->vm_start = (unsigned long) base;
1211 region->vm_end = region->vm_start + len;
1212 region->vm_top = region->vm_start + (total << PAGE_SHIFT);
1213
1214 vma->vm_start = region->vm_start;
1215 vma->vm_end = region->vm_start + len;
1216
1217 if (vma->vm_file) {
1218
1219 mm_segment_t old_fs;
1220 loff_t fpos;
1221
1222 fpos = vma->vm_pgoff;
1223 fpos <<= PAGE_SHIFT;
1224
1225 old_fs = get_fs();
1226 set_fs(KERNEL_DS);
1227 ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
1228 set_fs(old_fs);
1229
1230 if (ret < 0)
1231 goto error_free;
1232
1233
1234 if (ret < len)
1235 memset(base + ret, 0, len - ret);
1236
1237 }
1238
1239 return 0;
1240
1241error_free:
1242 free_page_series(region->vm_start, region->vm_top);
1243 region->vm_start = vma->vm_start = 0;
1244 region->vm_end = vma->vm_end = 0;
1245 region->vm_top = 0;
1246 return ret;
1247
1248enomem:
1249 printk("Allocation of length %lu from process %d (%s) failed\n",
1250 len, current->pid, current->comm);
1251 show_free_areas(0);
1252 return -ENOMEM;
1253}
1254
1255
1256
1257
1258unsigned long do_mmap_pgoff(struct file *file,
1259 unsigned long addr,
1260 unsigned long len,
1261 unsigned long prot,
1262 unsigned long flags,
1263 unsigned long pgoff,
1264 unsigned long *populate)
1265{
1266 struct vm_area_struct *vma;
1267 struct vm_region *region;
1268 struct rb_node *rb;
1269 unsigned long capabilities, vm_flags, result;
1270 int ret;
1271
1272 kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);
1273
1274 *populate = 0;
1275
1276
1277
1278 ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1279 &capabilities);
1280 if (ret < 0) {
1281 kleave(" = %d [val]", ret);
1282 return ret;
1283 }
1284
1285
1286 addr = 0;
1287 len = PAGE_ALIGN(len);
1288
1289
1290
1291 vm_flags = determine_vm_flags(file, prot, flags, capabilities);
1292
1293
1294 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1295 if (!region)
1296 goto error_getting_region;
1297
1298 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1299 if (!vma)
1300 goto error_getting_vma;
1301
1302 region->vm_usage = 1;
1303 region->vm_flags = vm_flags;
1304 region->vm_pgoff = pgoff;
1305
1306 INIT_LIST_HEAD(&vma->anon_vma_chain);
1307 vma->vm_flags = vm_flags;
1308 vma->vm_pgoff = pgoff;
1309
1310 if (file) {
1311 region->vm_file = get_file(file);
1312 vma->vm_file = get_file(file);
1313 }
1314
1315 down_write(&nommu_region_sem);
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325 if (vm_flags & VM_MAYSHARE) {
1326 struct vm_region *pregion;
1327 unsigned long pglen, rpglen, pgend, rpgend, start;
1328
1329 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1330 pgend = pgoff + pglen;
1331
1332 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1333 pregion = rb_entry(rb, struct vm_region, vm_rb);
1334
1335 if (!(pregion->vm_flags & VM_MAYSHARE))
1336 continue;
1337
1338
1339 if (file_inode(pregion->vm_file) !=
1340 file_inode(file))
1341 continue;
1342
1343 if (pregion->vm_pgoff >= pgend)
1344 continue;
1345
1346 rpglen = pregion->vm_end - pregion->vm_start;
1347 rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1348 rpgend = pregion->vm_pgoff + rpglen;
1349 if (pgoff >= rpgend)
1350 continue;
1351
1352
1353
1354 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1355 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1356
1357 if (!(capabilities & BDI_CAP_MAP_DIRECT))
1358 goto sharing_violation;
1359 continue;
1360 }
1361
1362
1363 pregion->vm_usage++;
1364 vma->vm_region = pregion;
1365 start = pregion->vm_start;
1366 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1367 vma->vm_start = start;
1368 vma->vm_end = start + len;
1369
1370 if (pregion->vm_flags & VM_MAPPED_COPY) {
1371 kdebug("share copy");
1372 vma->vm_flags |= VM_MAPPED_COPY;
1373 } else {
1374 kdebug("share mmap");
1375 ret = do_mmap_shared_file(vma);
1376 if (ret < 0) {
1377 vma->vm_region = NULL;
1378 vma->vm_start = 0;
1379 vma->vm_end = 0;
1380 pregion->vm_usage--;
1381 pregion = NULL;
1382 goto error_just_free;
1383 }
1384 }
1385 fput(region->vm_file);
1386 kmem_cache_free(vm_region_jar, region);
1387 region = pregion;
1388 result = start;
1389 goto share;
1390 }
1391
1392
1393
1394
1395
1396 if (capabilities & BDI_CAP_MAP_DIRECT) {
1397 addr = file->f_op->get_unmapped_area(file, addr, len,
1398 pgoff, flags);
1399 if (IS_ERR_VALUE(addr)) {
1400 ret = addr;
1401 if (ret != -ENOSYS)
1402 goto error_just_free;
1403
1404
1405
1406
1407 ret = -ENODEV;
1408 if (!(capabilities & BDI_CAP_MAP_COPY))
1409 goto error_just_free;
1410
1411 capabilities &= ~BDI_CAP_MAP_DIRECT;
1412 } else {
1413 vma->vm_start = region->vm_start = addr;
1414 vma->vm_end = region->vm_end = addr + len;
1415 }
1416 }
1417 }
1418
1419 vma->vm_region = region;
1420
1421
1422
1423
1424 if (file && vma->vm_flags & VM_SHARED)
1425 ret = do_mmap_shared_file(vma);
1426 else
1427 ret = do_mmap_private(vma, region, len, capabilities);
1428 if (ret < 0)
1429 goto error_just_free;
1430 add_nommu_region(region);
1431
1432
1433 if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
1434 memset((void *)region->vm_start, 0,
1435 region->vm_end - region->vm_start);
1436
1437
1438 result = vma->vm_start;
1439
1440 current->mm->total_vm += len >> PAGE_SHIFT;
1441
1442share:
1443 add_vma_to_mm(current->mm, vma);
1444
1445
1446
1447 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1448 flush_icache_range(region->vm_start, region->vm_end);
1449 region->vm_icache_flushed = true;
1450 }
1451
1452 up_write(&nommu_region_sem);
1453
1454 kleave(" = %lx", result);
1455 return result;
1456
1457error_just_free:
1458 up_write(&nommu_region_sem);
1459error:
1460 if (region->vm_file)
1461 fput(region->vm_file);
1462 kmem_cache_free(vm_region_jar, region);
1463 if (vma->vm_file)
1464 fput(vma->vm_file);
1465 kmem_cache_free(vm_area_cachep, vma);
1466 kleave(" = %d", ret);
1467 return ret;
1468
1469sharing_violation:
1470 up_write(&nommu_region_sem);
1471 printk(KERN_WARNING "Attempt to share mismatched mappings\n");
1472 ret = -EINVAL;
1473 goto error;
1474
1475error_getting_vma:
1476 kmem_cache_free(vm_region_jar, region);
1477 printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
1478 " from process %d failed\n",
1479 len, current->pid);
1480 show_free_areas(0);
1481 return -ENOMEM;
1482
1483error_getting_region:
1484 printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
1485 " from process %d failed\n",
1486 len, current->pid);
1487 show_free_areas(0);
1488 return -ENOMEM;
1489}
1490
1491SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1492 unsigned long, prot, unsigned long, flags,
1493 unsigned long, fd, unsigned long, pgoff)
1494{
1495 struct file *file = NULL;
1496 unsigned long retval = -EBADF;
1497
1498 audit_mmap_fd(fd, flags);
1499 if (!(flags & MAP_ANONYMOUS)) {
1500 file = fget(fd);
1501 if (!file)
1502 goto out;
1503 }
1504
1505 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1506
1507 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1508
1509 if (file)
1510 fput(file);
1511out:
1512 return retval;
1513}
1514
1515#ifdef __ARCH_WANT_SYS_OLD_MMAP
1516struct mmap_arg_struct {
1517 unsigned long addr;
1518 unsigned long len;
1519 unsigned long prot;
1520 unsigned long flags;
1521 unsigned long fd;
1522 unsigned long offset;
1523};
1524
1525SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1526{
1527 struct mmap_arg_struct a;
1528
1529 if (copy_from_user(&a, arg, sizeof(a)))
1530 return -EFAULT;
1531 if (a.offset & ~PAGE_MASK)
1532 return -EINVAL;
1533
1534 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1535 a.offset >> PAGE_SHIFT);
1536}
1537#endif
1538
1539
1540
1541
1542
1543int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1544 unsigned long addr, int new_below)
1545{
1546 struct vm_area_struct *new;
1547 struct vm_region *region;
1548 unsigned long npages;
1549
1550 kenter("");
1551
1552
1553
1554 if (vma->vm_file)
1555 return -ENOMEM;
1556
1557 if (mm->map_count >= sysctl_max_map_count)
1558 return -ENOMEM;
1559
1560 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1561 if (!region)
1562 return -ENOMEM;
1563
1564 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1565 if (!new) {
1566 kmem_cache_free(vm_region_jar, region);
1567 return -ENOMEM;
1568 }
1569
1570
1571 *new = *vma;
1572 *region = *vma->vm_region;
1573 new->vm_region = region;
1574
1575 npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1576
1577 if (new_below) {
1578 region->vm_top = region->vm_end = new->vm_end = addr;
1579 } else {
1580 region->vm_start = new->vm_start = addr;
1581 region->vm_pgoff = new->vm_pgoff += npages;
1582 }
1583
1584 if (new->vm_ops && new->vm_ops->open)
1585 new->vm_ops->open(new);
1586
1587 delete_vma_from_mm(vma);
1588 down_write(&nommu_region_sem);
1589 delete_nommu_region(vma->vm_region);
1590 if (new_below) {
1591 vma->vm_region->vm_start = vma->vm_start = addr;
1592 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1593 } else {
1594 vma->vm_region->vm_end = vma->vm_end = addr;
1595 vma->vm_region->vm_top = addr;
1596 }
1597 add_nommu_region(vma->vm_region);
1598 add_nommu_region(new->vm_region);
1599 up_write(&nommu_region_sem);
1600 add_vma_to_mm(mm, vma);
1601 add_vma_to_mm(mm, new);
1602 return 0;
1603}
1604
1605
1606
1607
1608
1609static int shrink_vma(struct mm_struct *mm,
1610 struct vm_area_struct *vma,
1611 unsigned long from, unsigned long to)
1612{
1613 struct vm_region *region;
1614
1615 kenter("");
1616
1617
1618
1619 delete_vma_from_mm(vma);
1620 if (from > vma->vm_start)
1621 vma->vm_end = from;
1622 else
1623 vma->vm_start = to;
1624 add_vma_to_mm(mm, vma);
1625
1626
1627 region = vma->vm_region;
1628 BUG_ON(region->vm_usage != 1);
1629
1630 down_write(&nommu_region_sem);
1631 delete_nommu_region(region);
1632 if (from > region->vm_start) {
1633 to = region->vm_top;
1634 region->vm_top = region->vm_end = from;
1635 } else {
1636 region->vm_start = to;
1637 }
1638 add_nommu_region(region);
1639 up_write(&nommu_region_sem);
1640
1641 free_page_series(from, to);
1642 return 0;
1643}
1644
1645
1646
1647
1648
1649
1650int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1651{
1652 struct vm_area_struct *vma;
1653 unsigned long end;
1654 int ret;
1655
1656 kenter(",%lx,%zx", start, len);
1657
1658 len = PAGE_ALIGN(len);
1659 if (len == 0)
1660 return -EINVAL;
1661
1662 end = start + len;
1663
1664
1665 vma = find_vma(mm, start);
1666 if (!vma) {
1667 static int limit;
1668 if (limit < 5) {
1669 printk(KERN_WARNING
1670 "munmap of memory not mmapped by process %d"
1671 " (%s): 0x%lx-0x%lx\n",
1672 current->pid, current->comm,
1673 start, start + len - 1);
1674 limit++;
1675 }
1676 return -EINVAL;
1677 }
1678
1679
1680 if (vma->vm_file) {
1681 do {
1682 if (start > vma->vm_start) {
1683 kleave(" = -EINVAL [miss]");
1684 return -EINVAL;
1685 }
1686 if (end == vma->vm_end)
1687 goto erase_whole_vma;
1688 vma = vma->vm_next;
1689 } while (vma);
1690 kleave(" = -EINVAL [split file]");
1691 return -EINVAL;
1692 } else {
1693
1694 if (start == vma->vm_start && end == vma->vm_end)
1695 goto erase_whole_vma;
1696 if (start < vma->vm_start || end > vma->vm_end) {
1697 kleave(" = -EINVAL [superset]");
1698 return -EINVAL;
1699 }
1700 if (start & ~PAGE_MASK) {
1701 kleave(" = -EINVAL [unaligned start]");
1702 return -EINVAL;
1703 }
1704 if (end != vma->vm_end && end & ~PAGE_MASK) {
1705 kleave(" = -EINVAL [unaligned split]");
1706 return -EINVAL;
1707 }
1708 if (start != vma->vm_start && end != vma->vm_end) {
1709 ret = split_vma(mm, vma, start, 1);
1710 if (ret < 0) {
1711 kleave(" = %d [split]", ret);
1712 return ret;
1713 }
1714 }
1715 return shrink_vma(mm, vma, start, end);
1716 }
1717
1718erase_whole_vma:
1719 delete_vma_from_mm(vma);
1720 delete_vma(mm, vma);
1721 kleave(" = 0");
1722 return 0;
1723}
1724EXPORT_SYMBOL(do_munmap);
1725
1726int vm_munmap(unsigned long addr, size_t len)
1727{
1728 struct mm_struct *mm = current->mm;
1729 int ret;
1730
1731 down_write(&mm->mmap_sem);
1732 ret = do_munmap(mm, addr, len);
1733 up_write(&mm->mmap_sem);
1734 return ret;
1735}
1736EXPORT_SYMBOL(vm_munmap);
1737
1738SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1739{
1740 return vm_munmap(addr, len);
1741}
1742
1743
1744
1745
1746void exit_mmap(struct mm_struct *mm)
1747{
1748 struct vm_area_struct *vma;
1749
1750 if (!mm)
1751 return;
1752
1753 kenter("");
1754
1755 mm->total_vm = 0;
1756
1757 while ((vma = mm->mmap)) {
1758 mm->mmap = vma->vm_next;
1759 delete_vma_from_mm(vma);
1760 delete_vma(mm, vma);
1761 cond_resched();
1762 }
1763
1764 kleave("");
1765}
1766
1767unsigned long vm_brk(unsigned long addr, unsigned long len)
1768{
1769 return -ENOMEM;
1770}
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782static unsigned long do_mremap(unsigned long addr,
1783 unsigned long old_len, unsigned long new_len,
1784 unsigned long flags, unsigned long new_addr)
1785{
1786 struct vm_area_struct *vma;
1787
1788
1789 old_len = PAGE_ALIGN(old_len);
1790 new_len = PAGE_ALIGN(new_len);
1791 if (old_len == 0 || new_len == 0)
1792 return (unsigned long) -EINVAL;
1793
1794 if (addr & ~PAGE_MASK)
1795 return -EINVAL;
1796
1797 if (flags & MREMAP_FIXED && new_addr != addr)
1798 return (unsigned long) -EINVAL;
1799
1800 vma = find_vma_exact(current->mm, addr, old_len);
1801 if (!vma)
1802 return (unsigned long) -EINVAL;
1803
1804 if (vma->vm_end != vma->vm_start + old_len)
1805 return (unsigned long) -EFAULT;
1806
1807 if (vma->vm_flags & VM_MAYSHARE)
1808 return (unsigned long) -EPERM;
1809
1810 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1811 return (unsigned long) -ENOMEM;
1812
1813
1814 vma->vm_end = vma->vm_start + new_len;
1815 return vma->vm_start;
1816}
1817
1818SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1819 unsigned long, new_len, unsigned long, flags,
1820 unsigned long, new_addr)
1821{
1822 unsigned long ret;
1823
1824 down_write(¤t->mm->mmap_sem);
1825 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1826 up_write(¤t->mm->mmap_sem);
1827 return ret;
1828}
1829
1830struct page *follow_page_mask(struct vm_area_struct *vma,
1831 unsigned long address, unsigned int flags,
1832 unsigned int *page_mask)
1833{
1834 *page_mask = 0;
1835 return NULL;
1836}
1837
1838int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1839 unsigned long pfn, unsigned long size, pgprot_t prot)
1840{
1841 if (addr != (pfn << PAGE_SHIFT))
1842 return -EINVAL;
1843
1844 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1845 return 0;
1846}
1847EXPORT_SYMBOL(remap_pfn_range);
1848
1849int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1850{
1851 unsigned long pfn = start >> PAGE_SHIFT;
1852 unsigned long vm_len = vma->vm_end - vma->vm_start;
1853
1854 pfn += vma->vm_pgoff;
1855 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1856}
1857EXPORT_SYMBOL(vm_iomap_memory);
1858
1859int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1860 unsigned long pgoff)
1861{
1862 unsigned int size = vma->vm_end - vma->vm_start;
1863
1864 if (!(vma->vm_flags & VM_USERMAP))
1865 return -EINVAL;
1866
1867 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1868 vma->vm_end = vma->vm_start + size;
1869
1870 return 0;
1871}
1872EXPORT_SYMBOL(remap_vmalloc_range);
1873
1874unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1875 unsigned long len, unsigned long pgoff, unsigned long flags)
1876{
1877 return -ENOMEM;
1878}
1879
1880void unmap_mapping_range(struct address_space *mapping,
1881 loff_t const holebegin, loff_t const holelen,
1882 int even_cows)
1883{
1884}
1885EXPORT_SYMBOL(unmap_mapping_range);
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1904{
1905 unsigned long free, allowed, reserve;
1906
1907 vm_acct_memory(pages);
1908
1909
1910
1911
1912 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1913 return 0;
1914
1915 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1916 free = global_page_state(NR_FREE_PAGES);
1917 free += global_page_state(NR_FILE_PAGES);
1918
1919
1920
1921
1922
1923
1924
1925 free -= global_page_state(NR_SHMEM);
1926
1927 free += get_nr_swap_pages();
1928
1929
1930
1931
1932
1933
1934
1935 free += global_page_state(NR_SLAB_RECLAIMABLE);
1936
1937
1938
1939
1940 if (free <= totalreserve_pages)
1941 goto error;
1942 else
1943 free -= totalreserve_pages;
1944
1945
1946
1947
1948 if (!cap_sys_admin)
1949 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1950
1951 if (free > pages)
1952 return 0;
1953
1954 goto error;
1955 }
1956
1957 allowed = vm_commit_limit();
1958
1959
1960
1961 if (!cap_sys_admin)
1962 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1963
1964
1965
1966
1967 if (mm) {
1968 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
1969 allowed -= min(mm->total_vm / 32, reserve);
1970 }
1971
1972 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1973 return 0;
1974
1975error:
1976 vm_unacct_memory(pages);
1977
1978 return -ENOMEM;
1979}
1980
1981int in_gate_area_no_mm(unsigned long addr)
1982{
1983 return 0;
1984}
1985
1986int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1987{
1988 BUG();
1989 return 0;
1990}
1991EXPORT_SYMBOL(filemap_fault);
1992
1993void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
1994{
1995 BUG();
1996}
1997EXPORT_SYMBOL(filemap_map_pages);
1998
1999int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
2000 unsigned long size, pgoff_t pgoff)
2001{
2002 BUG();
2003 return 0;
2004}
2005EXPORT_SYMBOL(generic_file_remap_pages);
2006
2007static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
2008 unsigned long addr, void *buf, int len, int write)
2009{
2010 struct vm_area_struct *vma;
2011
2012 down_read(&mm->mmap_sem);
2013
2014
2015 vma = find_vma(mm, addr);
2016 if (vma) {
2017
2018 if (addr + len >= vma->vm_end)
2019 len = vma->vm_end - addr;
2020
2021
2022 if (write && vma->vm_flags & VM_MAYWRITE)
2023 copy_to_user_page(vma, NULL, addr,
2024 (void *) addr, buf, len);
2025 else if (!write && vma->vm_flags & VM_MAYREAD)
2026 copy_from_user_page(vma, NULL, addr,
2027 buf, (void *) addr, len);
2028 else
2029 len = 0;
2030 } else {
2031 len = 0;
2032 }
2033
2034 up_read(&mm->mmap_sem);
2035
2036 return len;
2037}
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049int access_remote_vm(struct mm_struct *mm, unsigned long addr,
2050 void *buf, int len, int write)
2051{
2052 return __access_remote_vm(NULL, mm, addr, buf, len, write);
2053}
2054
2055
2056
2057
2058
2059int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
2060{
2061 struct mm_struct *mm;
2062
2063 if (addr + len < addr)
2064 return 0;
2065
2066 mm = get_task_mm(tsk);
2067 if (!mm)
2068 return 0;
2069
2070 len = __access_remote_vm(tsk, mm, addr, buf, len, write);
2071
2072 mmput(mm);
2073 return len;
2074}
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2088 size_t newsize)
2089{
2090 struct vm_area_struct *vma;
2091 struct vm_region *region;
2092 pgoff_t low, high;
2093 size_t r_size, r_top;
2094
2095 low = newsize >> PAGE_SHIFT;
2096 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2097
2098 down_write(&nommu_region_sem);
2099 mutex_lock(&inode->i_mapping->i_mmap_mutex);
2100
2101
2102 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
2103
2104
2105 if (vma->vm_flags & VM_SHARED) {
2106 mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2107 up_write(&nommu_region_sem);
2108 return -ETXTBSY;
2109 }
2110 }
2111
2112
2113
2114
2115
2116
2117
2118 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap,
2119 0, ULONG_MAX) {
2120 if (!(vma->vm_flags & VM_SHARED))
2121 continue;
2122
2123 region = vma->vm_region;
2124 r_size = region->vm_top - region->vm_start;
2125 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
2126
2127 if (r_top > newsize) {
2128 region->vm_top -= r_top - newsize;
2129 if (region->vm_end > region->vm_top)
2130 region->vm_end = region->vm_top;
2131 }
2132 }
2133
2134 mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2135 up_write(&nommu_region_sem);
2136 return 0;
2137}
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149static int __meminit init_user_reserve(void)
2150{
2151 unsigned long free_kbytes;
2152
2153 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
2154
2155 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
2156 return 0;
2157}
2158module_init(init_user_reserve)
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170static int __meminit init_admin_reserve(void)
2171{
2172 unsigned long free_kbytes;
2173
2174 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
2175
2176 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
2177 return 0;
2178}
2179module_init(init_admin_reserve)
2180