1
2
3
4
5
6
7
8
9#include <linux/slab.h>
10#include <linux/backing-dev.h>
11#include <linux/mm.h>
12#include <linux/shm.h>
13#include <linux/mman.h>
14#include <linux/pagemap.h>
15#include <linux/swap.h>
16#include <linux/syscalls.h>
17#include <linux/capability.h>
18#include <linux/init.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/personality.h>
22#include <linux/security.h>
23#include <linux/hugetlb.h>
24#include <linux/profile.h>
25#include <linux/module.h>
26#include <linux/mount.h>
27#include <linux/mempolicy.h>
28#include <linux/rmap.h>
29#include <linux/mmu_notifier.h>
30#include <linux/perf_event.h>
31#include <linux/audit.h>
32#include <linux/khugepaged.h>
33
34#include <asm/uaccess.h>
35#include <asm/cacheflush.h>
36#include <asm/tlb.h>
37#include <asm/mmu_context.h>
38
39#include "internal.h"
40
41#ifndef arch_mmap_check
42#define arch_mmap_check(addr, len, flags) (0)
43#endif
44
45#ifndef arch_rebalance_pgtables
46#define arch_rebalance_pgtables(addr, len) (addr)
47#endif
48
49static void unmap_region(struct mm_struct *mm,
50 struct vm_area_struct *vma, struct vm_area_struct *prev,
51 unsigned long start, unsigned long end);
52
53
54
55
56
57#undef DEBUG_MM_RB
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74pgprot_t protection_map[16] = {
75 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
76 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
77};
78
79pgprot_t vm_get_page_prot(unsigned long vm_flags)
80{
81 return __pgprot(pgprot_val(protection_map[vm_flags &
82 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
83 pgprot_val(arch_vm_get_page_prot(vm_flags)));
84}
85EXPORT_SYMBOL(vm_get_page_prot);
86
87int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
88int sysctl_overcommit_ratio __read_mostly = 50;
89int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
90
91
92
93
94struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
113{
114 unsigned long free, allowed;
115
116 vm_acct_memory(pages);
117
118
119
120
121 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
122 return 0;
123
124 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
125 free = global_page_state(NR_FREE_PAGES);
126 free += global_page_state(NR_FILE_PAGES);
127
128
129
130
131
132
133
134 free -= global_page_state(NR_SHMEM);
135
136 free += nr_swap_pages;
137
138
139
140
141
142
143
144 free += global_page_state(NR_SLAB_RECLAIMABLE);
145
146
147
148
149 if (free <= totalreserve_pages)
150 goto error;
151 else
152 free -= totalreserve_pages;
153
154
155
156
157 if (!cap_sys_admin)
158 free -= free / 32;
159
160 if (free > pages)
161 return 0;
162
163 goto error;
164 }
165
166 allowed = (totalram_pages - hugetlb_total_pages())
167 * sysctl_overcommit_ratio / 100;
168
169
170
171 if (!cap_sys_admin)
172 allowed -= allowed / 32;
173 allowed += total_swap_pages;
174
175
176
177 if (mm)
178 allowed -= mm->total_vm / 32;
179
180 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
181 return 0;
182error:
183 vm_unacct_memory(pages);
184
185 return -ENOMEM;
186}
187
188
189
190
191static void __remove_shared_vm_struct(struct vm_area_struct *vma,
192 struct file *file, struct address_space *mapping)
193{
194 if (vma->vm_flags & VM_DENYWRITE)
195 atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
196 if (vma->vm_flags & VM_SHARED)
197 mapping->i_mmap_writable--;
198
199 flush_dcache_mmap_lock(mapping);
200 if (unlikely(vma->vm_flags & VM_NONLINEAR))
201 list_del_init(&vma->shared.vm_set.list);
202 else
203 vma_prio_tree_remove(vma, &mapping->i_mmap);
204 flush_dcache_mmap_unlock(mapping);
205}
206
207
208
209
210
211void unlink_file_vma(struct vm_area_struct *vma)
212{
213 struct file *file = vma->vm_file;
214
215 if (file) {
216 struct address_space *mapping = file->f_mapping;
217 mutex_lock(&mapping->i_mmap_mutex);
218 __remove_shared_vm_struct(vma, file, mapping);
219 mutex_unlock(&mapping->i_mmap_mutex);
220 }
221}
222
223
224
225
226static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
227{
228 struct vm_area_struct *next = vma->vm_next;
229
230 might_sleep();
231 if (vma->vm_ops && vma->vm_ops->close)
232 vma->vm_ops->close(vma);
233 if (vma->vm_file) {
234 fput(vma->vm_file);
235 if (vma->vm_flags & VM_EXECUTABLE)
236 removed_exe_file_vma(vma->vm_mm);
237 }
238 mpol_put(vma_policy(vma));
239 kmem_cache_free(vm_area_cachep, vma);
240 return next;
241}
242
243SYSCALL_DEFINE1(brk, unsigned long, brk)
244{
245 unsigned long rlim, retval;
246 unsigned long newbrk, oldbrk;
247 struct mm_struct *mm = current->mm;
248 unsigned long min_brk;
249
250 down_write(&mm->mmap_sem);
251
252#ifdef CONFIG_COMPAT_BRK
253
254
255
256
257
258 if (current->brk_randomized)
259 min_brk = mm->start_brk;
260 else
261 min_brk = mm->end_data;
262#else
263 min_brk = mm->start_brk;
264#endif
265 if (brk < min_brk)
266 goto out;
267
268
269
270
271
272
273
274 rlim = rlimit(RLIMIT_DATA);
275 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
276 (mm->end_data - mm->start_data) > rlim)
277 goto out;
278
279 newbrk = PAGE_ALIGN(brk);
280 oldbrk = PAGE_ALIGN(mm->brk);
281 if (oldbrk == newbrk)
282 goto set_brk;
283
284
285 if (brk <= mm->brk) {
286 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
287 goto set_brk;
288 goto out;
289 }
290
291
292 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
293 goto out;
294
295
296 if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
297 goto out;
298set_brk:
299 mm->brk = brk;
300out:
301 retval = mm->brk;
302 up_write(&mm->mmap_sem);
303 return retval;
304}
305
306#ifdef DEBUG_MM_RB
307static int browse_rb(struct rb_root *root)
308{
309 int i = 0, j;
310 struct rb_node *nd, *pn = NULL;
311 unsigned long prev = 0, pend = 0;
312
313 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
314 struct vm_area_struct *vma;
315 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
316 if (vma->vm_start < prev)
317 printk("vm_start %lx prev %lx\n", vma->vm_start, prev), i = -1;
318 if (vma->vm_start < pend)
319 printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
320 if (vma->vm_start > vma->vm_end)
321 printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
322 i++;
323 pn = nd;
324 prev = vma->vm_start;
325 pend = vma->vm_end;
326 }
327 j = 0;
328 for (nd = pn; nd; nd = rb_prev(nd)) {
329 j++;
330 }
331 if (i != j)
332 printk("backwards %d, forwards %d\n", j, i), i = 0;
333 return i;
334}
335
336void validate_mm(struct mm_struct *mm)
337{
338 int bug = 0;
339 int i = 0;
340 struct vm_area_struct *tmp = mm->mmap;
341 while (tmp) {
342 tmp = tmp->vm_next;
343 i++;
344 }
345 if (i != mm->map_count)
346 printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
347 i = browse_rb(&mm->mm_rb);
348 if (i != mm->map_count)
349 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
350 BUG_ON(bug);
351}
352#else
353#define validate_mm(mm) do { } while (0)
354#endif
355
356static struct vm_area_struct *
357find_vma_prepare(struct mm_struct *mm, unsigned long addr,
358 struct vm_area_struct **pprev, struct rb_node ***rb_link,
359 struct rb_node ** rb_parent)
360{
361 struct vm_area_struct * vma;
362 struct rb_node ** __rb_link, * __rb_parent, * rb_prev;
363
364 __rb_link = &mm->mm_rb.rb_node;
365 rb_prev = __rb_parent = NULL;
366 vma = NULL;
367
368 while (*__rb_link) {
369 struct vm_area_struct *vma_tmp;
370
371 __rb_parent = *__rb_link;
372 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
373
374 if (vma_tmp->vm_end > addr) {
375 vma = vma_tmp;
376 if (vma_tmp->vm_start <= addr)
377 break;
378 __rb_link = &__rb_parent->rb_left;
379 } else {
380 rb_prev = __rb_parent;
381 __rb_link = &__rb_parent->rb_right;
382 }
383 }
384
385 *pprev = NULL;
386 if (rb_prev)
387 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
388 *rb_link = __rb_link;
389 *rb_parent = __rb_parent;
390 return vma;
391}
392
393void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
394 struct rb_node **rb_link, struct rb_node *rb_parent)
395{
396 rb_link_node(&vma->vm_rb, rb_parent, rb_link);
397 rb_insert_color(&vma->vm_rb, &mm->mm_rb);
398}
399
400static void __vma_link_file(struct vm_area_struct *vma)
401{
402 struct file *file;
403
404 file = vma->vm_file;
405 if (file) {
406 struct address_space *mapping = file->f_mapping;
407
408 if (vma->vm_flags & VM_DENYWRITE)
409 atomic_dec(&file->f_path.dentry->d_inode->i_writecount);
410 if (vma->vm_flags & VM_SHARED)
411 mapping->i_mmap_writable++;
412
413 flush_dcache_mmap_lock(mapping);
414 if (unlikely(vma->vm_flags & VM_NONLINEAR))
415 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
416 else
417 vma_prio_tree_insert(vma, &mapping->i_mmap);
418 flush_dcache_mmap_unlock(mapping);
419 }
420}
421
422static void
423__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
424 struct vm_area_struct *prev, struct rb_node **rb_link,
425 struct rb_node *rb_parent)
426{
427 __vma_link_list(mm, vma, prev, rb_parent);
428 __vma_link_rb(mm, vma, rb_link, rb_parent);
429}
430
431static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
432 struct vm_area_struct *prev, struct rb_node **rb_link,
433 struct rb_node *rb_parent)
434{
435 struct address_space *mapping = NULL;
436
437 if (vma->vm_file)
438 mapping = vma->vm_file->f_mapping;
439
440 if (mapping)
441 mutex_lock(&mapping->i_mmap_mutex);
442
443 __vma_link(mm, vma, prev, rb_link, rb_parent);
444 __vma_link_file(vma);
445
446 if (mapping)
447 mutex_unlock(&mapping->i_mmap_mutex);
448
449 mm->map_count++;
450 validate_mm(mm);
451}
452
453
454
455
456
457
458static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
459{
460 struct vm_area_struct *__vma, *prev;
461 struct rb_node **rb_link, *rb_parent;
462
463 __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
464 BUG_ON(__vma && __vma->vm_start < vma->vm_end);
465 __vma_link(mm, vma, prev, rb_link, rb_parent);
466 mm->map_count++;
467}
468
469static inline void
470__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
471 struct vm_area_struct *prev)
472{
473 struct vm_area_struct *next = vma->vm_next;
474
475 prev->vm_next = next;
476 if (next)
477 next->vm_prev = prev;
478 rb_erase(&vma->vm_rb, &mm->mm_rb);
479 if (mm->mmap_cache == vma)
480 mm->mmap_cache = prev;
481}
482
483
484
485
486
487
488
489
490int vma_adjust(struct vm_area_struct *vma, unsigned long start,
491 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
492{
493 struct mm_struct *mm = vma->vm_mm;
494 struct vm_area_struct *next = vma->vm_next;
495 struct vm_area_struct *importer = NULL;
496 struct address_space *mapping = NULL;
497 struct prio_tree_root *root = NULL;
498 struct anon_vma *anon_vma = NULL;
499 struct file *file = vma->vm_file;
500 long adjust_next = 0;
501 int remove_next = 0;
502
503 if (next && !insert) {
504 struct vm_area_struct *exporter = NULL;
505
506 if (end >= next->vm_end) {
507
508
509
510
511again: remove_next = 1 + (end > next->vm_end);
512 end = next->vm_end;
513 exporter = next;
514 importer = vma;
515 } else if (end > next->vm_start) {
516
517
518
519
520 adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
521 exporter = next;
522 importer = vma;
523 } else if (end < vma->vm_end) {
524
525
526
527
528
529 adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
530 exporter = vma;
531 importer = next;
532 }
533
534
535
536
537
538
539 if (exporter && exporter->anon_vma && !importer->anon_vma) {
540 if (anon_vma_clone(importer, exporter))
541 return -ENOMEM;
542 importer->anon_vma = exporter->anon_vma;
543 }
544 }
545
546 if (file) {
547 mapping = file->f_mapping;
548 if (!(vma->vm_flags & VM_NONLINEAR))
549 root = &mapping->i_mmap;
550 mutex_lock(&mapping->i_mmap_mutex);
551 if (insert) {
552
553
554
555
556
557
558 __vma_link_file(insert);
559 }
560 }
561
562 vma_adjust_trans_huge(vma, start, end, adjust_next);
563
564
565
566
567
568
569
570 if (vma->anon_vma && (importer || start != vma->vm_start)) {
571 anon_vma = vma->anon_vma;
572 anon_vma_lock(anon_vma);
573 }
574
575 if (root) {
576 flush_dcache_mmap_lock(mapping);
577 vma_prio_tree_remove(vma, root);
578 if (adjust_next)
579 vma_prio_tree_remove(next, root);
580 }
581
582 vma->vm_start = start;
583 vma->vm_end = end;
584 vma->vm_pgoff = pgoff;
585 if (adjust_next) {
586 next->vm_start += adjust_next << PAGE_SHIFT;
587 next->vm_pgoff += adjust_next;
588 }
589
590 if (root) {
591 if (adjust_next)
592 vma_prio_tree_insert(next, root);
593 vma_prio_tree_insert(vma, root);
594 flush_dcache_mmap_unlock(mapping);
595 }
596
597 if (remove_next) {
598
599
600
601
602 __vma_unlink(mm, next, vma);
603 if (file)
604 __remove_shared_vm_struct(next, file, mapping);
605 } else if (insert) {
606
607
608
609
610
611 __insert_vm_struct(mm, insert);
612 }
613
614 if (anon_vma)
615 anon_vma_unlock(anon_vma);
616 if (mapping)
617 mutex_unlock(&mapping->i_mmap_mutex);
618
619 if (remove_next) {
620 if (file) {
621 fput(file);
622 if (next->vm_flags & VM_EXECUTABLE)
623 removed_exe_file_vma(mm);
624 }
625 if (next->anon_vma)
626 anon_vma_merge(vma, next);
627 mm->map_count--;
628 mpol_put(vma_policy(next));
629 kmem_cache_free(vm_area_cachep, next);
630
631
632
633
634
635 if (remove_next == 2) {
636 next = vma->vm_next;
637 goto again;
638 }
639 }
640
641 validate_mm(mm);
642
643 return 0;
644}
645
646
647
648
649
650static inline int is_mergeable_vma(struct vm_area_struct *vma,
651 struct file *file, unsigned long vm_flags)
652{
653
654 if ((vma->vm_flags ^ vm_flags) & ~VM_CAN_NONLINEAR)
655 return 0;
656 if (vma->vm_file != file)
657 return 0;
658 if (vma->vm_ops && vma->vm_ops->close)
659 return 0;
660 return 1;
661}
662
663static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
664 struct anon_vma *anon_vma2,
665 struct vm_area_struct *vma)
666{
667
668
669
670
671 if ((!anon_vma1 || !anon_vma2) && (!vma ||
672 list_is_singular(&vma->anon_vma_chain)))
673 return 1;
674 return anon_vma1 == anon_vma2;
675}
676
677
678
679
680
681
682
683
684
685
686
687
688static int
689can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
690 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
691{
692 if (is_mergeable_vma(vma, file, vm_flags) &&
693 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
694 if (vma->vm_pgoff == vm_pgoff)
695 return 1;
696 }
697 return 0;
698}
699
700
701
702
703
704
705
706
707static int
708can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
709 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
710{
711 if (is_mergeable_vma(vma, file, vm_flags) &&
712 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
713 pgoff_t vm_pglen;
714 vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
715 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
716 return 1;
717 }
718 return 0;
719}
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750struct vm_area_struct *vma_merge(struct mm_struct *mm,
751 struct vm_area_struct *prev, unsigned long addr,
752 unsigned long end, unsigned long vm_flags,
753 struct anon_vma *anon_vma, struct file *file,
754 pgoff_t pgoff, struct mempolicy *policy)
755{
756 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
757 struct vm_area_struct *area, *next;
758 int err;
759
760
761
762
763
764 if (vm_flags & VM_SPECIAL)
765 return NULL;
766
767 if (prev)
768 next = prev->vm_next;
769 else
770 next = mm->mmap;
771 area = next;
772 if (next && next->vm_end == end)
773 next = next->vm_next;
774
775
776
777
778 if (prev && prev->vm_end == addr &&
779 mpol_equal(vma_policy(prev), policy) &&
780 can_vma_merge_after(prev, vm_flags,
781 anon_vma, file, pgoff)) {
782
783
784
785 if (next && end == next->vm_start &&
786 mpol_equal(policy, vma_policy(next)) &&
787 can_vma_merge_before(next, vm_flags,
788 anon_vma, file, pgoff+pglen) &&
789 is_mergeable_anon_vma(prev->anon_vma,
790 next->anon_vma, NULL)) {
791
792 err = vma_adjust(prev, prev->vm_start,
793 next->vm_end, prev->vm_pgoff, NULL);
794 } else
795 err = vma_adjust(prev, prev->vm_start,
796 end, prev->vm_pgoff, NULL);
797 if (err)
798 return NULL;
799 khugepaged_enter_vma_merge(prev);
800 return prev;
801 }
802
803
804
805
806 if (next && end == next->vm_start &&
807 mpol_equal(policy, vma_policy(next)) &&
808 can_vma_merge_before(next, vm_flags,
809 anon_vma, file, pgoff+pglen)) {
810 if (prev && addr < prev->vm_end)
811 err = vma_adjust(prev, prev->vm_start,
812 addr, prev->vm_pgoff, NULL);
813 else
814 err = vma_adjust(area, addr, next->vm_end,
815 next->vm_pgoff - pglen, NULL);
816 if (err)
817 return NULL;
818 khugepaged_enter_vma_merge(area);
819 return area;
820 }
821
822 return NULL;
823}
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
839{
840 return a->vm_end == b->vm_start &&
841 mpol_equal(vma_policy(a), vma_policy(b)) &&
842 a->vm_file == b->vm_file &&
843 !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
844 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
845}
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
870{
871 if (anon_vma_compatible(a, b)) {
872 struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma);
873
874 if (anon_vma && list_is_singular(&old->anon_vma_chain))
875 return anon_vma;
876 }
877 return NULL;
878}
879
880
881
882
883
884
885
886
887
888struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
889{
890 struct anon_vma *anon_vma;
891 struct vm_area_struct *near;
892
893 near = vma->vm_next;
894 if (!near)
895 goto try_prev;
896
897 anon_vma = reusable_anon_vma(near, vma, near);
898 if (anon_vma)
899 return anon_vma;
900try_prev:
901 near = vma->vm_prev;
902 if (!near)
903 goto none;
904
905 anon_vma = reusable_anon_vma(near, near, vma);
906 if (anon_vma)
907 return anon_vma;
908none:
909
910
911
912
913
914
915
916
917 return NULL;
918}
919
920#ifdef CONFIG_PROC_FS
921void vm_stat_account(struct mm_struct *mm, unsigned long flags,
922 struct file *file, long pages)
923{
924 const unsigned long stack_flags
925 = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
926
927 if (file) {
928 mm->shared_vm += pages;
929 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
930 mm->exec_vm += pages;
931 } else if (flags & stack_flags)
932 mm->stack_vm += pages;
933 if (flags & (VM_RESERVED|VM_IO))
934 mm->reserved_vm += pages;
935}
936#endif
937
938
939
940
941
942unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
943 unsigned long len, unsigned long prot,
944 unsigned long flags, unsigned long pgoff)
945{
946 struct mm_struct * mm = current->mm;
947 struct inode *inode;
948 vm_flags_t vm_flags;
949 int error;
950 unsigned long reqprot = prot;
951
952
953
954
955
956
957
958 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
959 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
960 prot |= PROT_EXEC;
961
962 if (!len)
963 return -EINVAL;
964
965 if (!(flags & MAP_FIXED))
966 addr = round_hint_to_min(addr);
967
968
969 len = PAGE_ALIGN(len);
970 if (!len)
971 return -ENOMEM;
972
973
974 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
975 return -EOVERFLOW;
976
977
978 if (mm->map_count > sysctl_max_map_count)
979 return -ENOMEM;
980
981
982
983
984 addr = get_unmapped_area(file, addr, len, pgoff, flags);
985 if (addr & ~PAGE_MASK)
986 return addr;
987
988
989
990
991
992 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
993 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
994
995 if (flags & MAP_LOCKED)
996 if (!can_do_mlock())
997 return -EPERM;
998
999
1000 if (vm_flags & VM_LOCKED) {
1001 unsigned long locked, lock_limit;
1002 locked = len >> PAGE_SHIFT;
1003 locked += mm->locked_vm;
1004 lock_limit = rlimit(RLIMIT_MEMLOCK);
1005 lock_limit >>= PAGE_SHIFT;
1006 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1007 return -EAGAIN;
1008 }
1009
1010 inode = file ? file->f_path.dentry->d_inode : NULL;
1011
1012 if (file) {
1013 switch (flags & MAP_TYPE) {
1014 case MAP_SHARED:
1015 if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
1016 return -EACCES;
1017
1018
1019
1020
1021
1022 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1023 return -EACCES;
1024
1025
1026
1027
1028 if (locks_verify_locked(inode))
1029 return -EAGAIN;
1030
1031 vm_flags |= VM_SHARED | VM_MAYSHARE;
1032 if (!(file->f_mode & FMODE_WRITE))
1033 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1034
1035
1036 case MAP_PRIVATE:
1037 if (!(file->f_mode & FMODE_READ))
1038 return -EACCES;
1039 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
1040 if (vm_flags & VM_EXEC)
1041 return -EPERM;
1042 vm_flags &= ~VM_MAYEXEC;
1043 }
1044
1045 if (!file->f_op || !file->f_op->mmap)
1046 return -ENODEV;
1047 break;
1048
1049 default:
1050 return -EINVAL;
1051 }
1052 } else {
1053 switch (flags & MAP_TYPE) {
1054 case MAP_SHARED:
1055
1056
1057
1058 pgoff = 0;
1059 vm_flags |= VM_SHARED | VM_MAYSHARE;
1060 break;
1061 case MAP_PRIVATE:
1062
1063
1064
1065 pgoff = addr >> PAGE_SHIFT;
1066 break;
1067 default:
1068 return -EINVAL;
1069 }
1070 }
1071
1072 error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
1073 if (error)
1074 return error;
1075
1076 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
1077}
1078EXPORT_SYMBOL(do_mmap_pgoff);
1079
1080SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1081 unsigned long, prot, unsigned long, flags,
1082 unsigned long, fd, unsigned long, pgoff)
1083{
1084 struct file *file = NULL;
1085 unsigned long retval = -EBADF;
1086
1087 if (!(flags & MAP_ANONYMOUS)) {
1088 audit_mmap_fd(fd, flags);
1089 if (unlikely(flags & MAP_HUGETLB))
1090 return -EINVAL;
1091 file = fget(fd);
1092 if (!file)
1093 goto out;
1094 } else if (flags & MAP_HUGETLB) {
1095 struct user_struct *user = NULL;
1096
1097
1098
1099
1100
1101
1102 len = ALIGN(len, huge_page_size(&default_hstate));
1103 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
1104 &user, HUGETLB_ANONHUGE_INODE);
1105 if (IS_ERR(file))
1106 return PTR_ERR(file);
1107 }
1108
1109 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1110
1111 down_write(¤t->mm->mmap_sem);
1112 retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1113 up_write(¤t->mm->mmap_sem);
1114
1115 if (file)
1116 fput(file);
1117out:
1118 return retval;
1119}
1120
1121#ifdef __ARCH_WANT_SYS_OLD_MMAP
1122struct mmap_arg_struct {
1123 unsigned long addr;
1124 unsigned long len;
1125 unsigned long prot;
1126 unsigned long flags;
1127 unsigned long fd;
1128 unsigned long offset;
1129};
1130
1131SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1132{
1133 struct mmap_arg_struct a;
1134
1135 if (copy_from_user(&a, arg, sizeof(a)))
1136 return -EFAULT;
1137 if (a.offset & ~PAGE_MASK)
1138 return -EINVAL;
1139
1140 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1141 a.offset >> PAGE_SHIFT);
1142}
1143#endif
1144
1145
1146
1147
1148
1149
1150
1151int vma_wants_writenotify(struct vm_area_struct *vma)
1152{
1153 vm_flags_t vm_flags = vma->vm_flags;
1154
1155
1156 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1157 return 0;
1158
1159
1160 if (vma->vm_ops && vma->vm_ops->page_mkwrite)
1161 return 1;
1162
1163
1164 if (pgprot_val(vma->vm_page_prot) !=
1165 pgprot_val(vm_get_page_prot(vm_flags)))
1166 return 0;
1167
1168
1169 if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
1170 return 0;
1171
1172
1173 return vma->vm_file && vma->vm_file->f_mapping &&
1174 mapping_cap_account_dirty(vma->vm_file->f_mapping);
1175}
1176
1177
1178
1179
1180
1181static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1182{
1183
1184
1185
1186
1187 if (file && is_file_hugepages(file))
1188 return 0;
1189
1190 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1191}
1192
1193unsigned long mmap_region(struct file *file, unsigned long addr,
1194 unsigned long len, unsigned long flags,
1195 vm_flags_t vm_flags, unsigned long pgoff)
1196{
1197 struct mm_struct *mm = current->mm;
1198 struct vm_area_struct *vma, *prev;
1199 int correct_wcount = 0;
1200 int error;
1201 struct rb_node **rb_link, *rb_parent;
1202 unsigned long charged = 0;
1203 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
1204
1205
1206 error = -ENOMEM;
1207munmap_back:
1208 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
1209 if (vma && vma->vm_start < addr + len) {
1210 if (do_munmap(mm, addr, len))
1211 return -ENOMEM;
1212 goto munmap_back;
1213 }
1214
1215
1216 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
1217 return -ENOMEM;
1218
1219
1220
1221
1222
1223 if ((flags & MAP_NORESERVE)) {
1224
1225 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1226 vm_flags |= VM_NORESERVE;
1227
1228
1229 if (file && is_file_hugepages(file))
1230 vm_flags |= VM_NORESERVE;
1231 }
1232
1233
1234
1235
1236 if (accountable_mapping(file, vm_flags)) {
1237 charged = len >> PAGE_SHIFT;
1238 if (security_vm_enough_memory(charged))
1239 return -ENOMEM;
1240 vm_flags |= VM_ACCOUNT;
1241 }
1242
1243
1244
1245
1246 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
1247 if (vma)
1248 goto out;
1249
1250
1251
1252
1253
1254
1255 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1256 if (!vma) {
1257 error = -ENOMEM;
1258 goto unacct_error;
1259 }
1260
1261 vma->vm_mm = mm;
1262 vma->vm_start = addr;
1263 vma->vm_end = addr + len;
1264 vma->vm_flags = vm_flags;
1265 vma->vm_page_prot = vm_get_page_prot(vm_flags);
1266 vma->vm_pgoff = pgoff;
1267 INIT_LIST_HEAD(&vma->anon_vma_chain);
1268
1269 if (file) {
1270 error = -EINVAL;
1271 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1272 goto free_vma;
1273 if (vm_flags & VM_DENYWRITE) {
1274 error = deny_write_access(file);
1275 if (error)
1276 goto free_vma;
1277 correct_wcount = 1;
1278 }
1279 vma->vm_file = file;
1280 get_file(file);
1281 error = file->f_op->mmap(file, vma);
1282 if (error)
1283 goto unmap_and_free_vma;
1284 if (vm_flags & VM_EXECUTABLE)
1285 added_exe_file_vma(mm);
1286
1287
1288
1289
1290
1291
1292 addr = vma->vm_start;
1293 pgoff = vma->vm_pgoff;
1294 vm_flags = vma->vm_flags;
1295 } else if (vm_flags & VM_SHARED) {
1296 error = shmem_zero_setup(vma);
1297 if (error)
1298 goto free_vma;
1299 }
1300
1301 if (vma_wants_writenotify(vma)) {
1302 pgprot_t pprot = vma->vm_page_prot;
1303
1304
1305
1306
1307
1308
1309
1310
1311 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
1312 if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
1313 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1314 }
1315
1316 vma_link(mm, vma, prev, rb_link, rb_parent);
1317 file = vma->vm_file;
1318
1319
1320 if (correct_wcount)
1321 atomic_inc(&inode->i_writecount);
1322out:
1323 perf_event_mmap(vma);
1324
1325 mm->total_vm += len >> PAGE_SHIFT;
1326 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
1327 if (vm_flags & VM_LOCKED) {
1328 if (!mlock_vma_pages_range(vma, addr, addr + len))
1329 mm->locked_vm += (len >> PAGE_SHIFT);
1330 } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
1331 make_pages_present(addr, addr + len);
1332 return addr;
1333
1334unmap_and_free_vma:
1335 if (correct_wcount)
1336 atomic_inc(&inode->i_writecount);
1337 vma->vm_file = NULL;
1338 fput(file);
1339
1340
1341 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1342 charged = 0;
1343free_vma:
1344 kmem_cache_free(vm_area_cachep, vma);
1345unacct_error:
1346 if (charged)
1347 vm_unacct_memory(charged);
1348 return error;
1349}
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362#ifndef HAVE_ARCH_UNMAPPED_AREA
1363unsigned long
1364arch_get_unmapped_area(struct file *filp, unsigned long addr,
1365 unsigned long len, unsigned long pgoff, unsigned long flags)
1366{
1367 struct mm_struct *mm = current->mm;
1368 struct vm_area_struct *vma;
1369 unsigned long start_addr;
1370
1371 if (len > TASK_SIZE)
1372 return -ENOMEM;
1373
1374 if (flags & MAP_FIXED)
1375 return addr;
1376
1377 if (addr) {
1378 addr = PAGE_ALIGN(addr);
1379 vma = find_vma(mm, addr);
1380 if (TASK_SIZE - len >= addr &&
1381 (!vma || addr + len <= vma->vm_start))
1382 return addr;
1383 }
1384 if (len > mm->cached_hole_size) {
1385 start_addr = addr = mm->free_area_cache;
1386 } else {
1387 start_addr = addr = TASK_UNMAPPED_BASE;
1388 mm->cached_hole_size = 0;
1389 }
1390
1391full_search:
1392 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1393
1394 if (TASK_SIZE - len < addr) {
1395
1396
1397
1398
1399 if (start_addr != TASK_UNMAPPED_BASE) {
1400 addr = TASK_UNMAPPED_BASE;
1401 start_addr = addr;
1402 mm->cached_hole_size = 0;
1403 goto full_search;
1404 }
1405 return -ENOMEM;
1406 }
1407 if (!vma || addr + len <= vma->vm_start) {
1408
1409
1410
1411 mm->free_area_cache = addr + len;
1412 return addr;
1413 }
1414 if (addr + mm->cached_hole_size < vma->vm_start)
1415 mm->cached_hole_size = vma->vm_start - addr;
1416 addr = vma->vm_end;
1417 }
1418}
1419#endif
1420
1421void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1422{
1423
1424
1425
1426 if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
1427 mm->free_area_cache = addr;
1428 mm->cached_hole_size = ~0UL;
1429 }
1430}
1431
1432
1433
1434
1435
1436#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1437unsigned long
1438arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1439 const unsigned long len, const unsigned long pgoff,
1440 const unsigned long flags)
1441{
1442 struct vm_area_struct *vma;
1443 struct mm_struct *mm = current->mm;
1444 unsigned long addr = addr0;
1445
1446
1447 if (len > TASK_SIZE)
1448 return -ENOMEM;
1449
1450 if (flags & MAP_FIXED)
1451 return addr;
1452
1453
1454 if (addr) {
1455 addr = PAGE_ALIGN(addr);
1456 vma = find_vma(mm, addr);
1457 if (TASK_SIZE - len >= addr &&
1458 (!vma || addr + len <= vma->vm_start))
1459 return addr;
1460 }
1461
1462
1463 if (len <= mm->cached_hole_size) {
1464 mm->cached_hole_size = 0;
1465 mm->free_area_cache = mm->mmap_base;
1466 }
1467
1468
1469 addr = mm->free_area_cache;
1470
1471
1472 if (addr > len) {
1473 vma = find_vma(mm, addr-len);
1474 if (!vma || addr <= vma->vm_start)
1475
1476 return (mm->free_area_cache = addr-len);
1477 }
1478
1479 if (mm->mmap_base < len)
1480 goto bottomup;
1481
1482 addr = mm->mmap_base-len;
1483
1484 do {
1485
1486
1487
1488
1489
1490 vma = find_vma(mm, addr);
1491 if (!vma || addr+len <= vma->vm_start)
1492
1493 return (mm->free_area_cache = addr);
1494
1495
1496 if (addr + mm->cached_hole_size < vma->vm_start)
1497 mm->cached_hole_size = vma->vm_start - addr;
1498
1499
1500 addr = vma->vm_start-len;
1501 } while (len < vma->vm_start);
1502
1503bottomup:
1504
1505
1506
1507
1508
1509
1510 mm->cached_hole_size = ~0UL;
1511 mm->free_area_cache = TASK_UNMAPPED_BASE;
1512 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
1513
1514
1515
1516 mm->free_area_cache = mm->mmap_base;
1517 mm->cached_hole_size = ~0UL;
1518
1519 return addr;
1520}
1521#endif
1522
1523void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
1524{
1525
1526
1527
1528 if (addr > mm->free_area_cache)
1529 mm->free_area_cache = addr;
1530
1531
1532 if (mm->free_area_cache > mm->mmap_base)
1533 mm->free_area_cache = mm->mmap_base;
1534}
1535
1536unsigned long
1537get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1538 unsigned long pgoff, unsigned long flags)
1539{
1540 unsigned long (*get_area)(struct file *, unsigned long,
1541 unsigned long, unsigned long, unsigned long);
1542
1543 unsigned long error = arch_mmap_check(addr, len, flags);
1544 if (error)
1545 return error;
1546
1547
1548 if (len > TASK_SIZE)
1549 return -ENOMEM;
1550
1551 get_area = current->mm->get_unmapped_area;
1552 if (file && file->f_op && file->f_op->get_unmapped_area)
1553 get_area = file->f_op->get_unmapped_area;
1554 addr = get_area(file, addr, len, pgoff, flags);
1555 if (IS_ERR_VALUE(addr))
1556 return addr;
1557
1558 if (addr > TASK_SIZE - len)
1559 return -ENOMEM;
1560 if (addr & ~PAGE_MASK)
1561 return -EINVAL;
1562
1563 return arch_rebalance_pgtables(addr, len);
1564}
1565
1566EXPORT_SYMBOL(get_unmapped_area);
1567
1568
1569struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1570{
1571 struct vm_area_struct *vma = NULL;
1572
1573 if (mm) {
1574
1575
1576 vma = mm->mmap_cache;
1577 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
1578 struct rb_node * rb_node;
1579
1580 rb_node = mm->mm_rb.rb_node;
1581 vma = NULL;
1582
1583 while (rb_node) {
1584 struct vm_area_struct * vma_tmp;
1585
1586 vma_tmp = rb_entry(rb_node,
1587 struct vm_area_struct, vm_rb);
1588
1589 if (vma_tmp->vm_end > addr) {
1590 vma = vma_tmp;
1591 if (vma_tmp->vm_start <= addr)
1592 break;
1593 rb_node = rb_node->rb_left;
1594 } else
1595 rb_node = rb_node->rb_right;
1596 }
1597 if (vma)
1598 mm->mmap_cache = vma;
1599 }
1600 }
1601 return vma;
1602}
1603
1604EXPORT_SYMBOL(find_vma);
1605
1606
1607struct vm_area_struct *
1608find_vma_prev(struct mm_struct *mm, unsigned long addr,
1609 struct vm_area_struct **pprev)
1610{
1611 struct vm_area_struct *vma = NULL, *prev = NULL;
1612 struct rb_node *rb_node;
1613 if (!mm)
1614 goto out;
1615
1616
1617 vma = mm->mmap;
1618
1619
1620 rb_node = mm->mm_rb.rb_node;
1621
1622 while (rb_node) {
1623 struct vm_area_struct *vma_tmp;
1624 vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
1625
1626 if (addr < vma_tmp->vm_end) {
1627 rb_node = rb_node->rb_left;
1628 } else {
1629 prev = vma_tmp;
1630 if (!prev->vm_next || (addr < prev->vm_next->vm_end))
1631 break;
1632 rb_node = rb_node->rb_right;
1633 }
1634 }
1635
1636out:
1637 *pprev = prev;
1638 return prev ? prev->vm_next : vma;
1639}
1640
1641
1642
1643
1644
1645
1646static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
1647{
1648 struct mm_struct *mm = vma->vm_mm;
1649 struct rlimit *rlim = current->signal->rlim;
1650 unsigned long new_start;
1651
1652
1653 if (!may_expand_vm(mm, grow))
1654 return -ENOMEM;
1655
1656
1657 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
1658 return -ENOMEM;
1659
1660
1661 if (vma->vm_flags & VM_LOCKED) {
1662 unsigned long locked;
1663 unsigned long limit;
1664 locked = mm->locked_vm + grow;
1665 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
1666 limit >>= PAGE_SHIFT;
1667 if (locked > limit && !capable(CAP_IPC_LOCK))
1668 return -ENOMEM;
1669 }
1670
1671
1672 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1673 vma->vm_end - size;
1674 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1675 return -EFAULT;
1676
1677
1678
1679
1680
1681 if (security_vm_enough_memory_mm(mm, grow))
1682 return -ENOMEM;
1683
1684
1685 mm->total_vm += grow;
1686 if (vma->vm_flags & VM_LOCKED)
1687 mm->locked_vm += grow;
1688 vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
1689 return 0;
1690}
1691
1692#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
1693
1694
1695
1696
1697int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1698{
1699 int error;
1700
1701 if (!(vma->vm_flags & VM_GROWSUP))
1702 return -EFAULT;
1703
1704
1705
1706
1707
1708 if (unlikely(anon_vma_prepare(vma)))
1709 return -ENOMEM;
1710 vma_lock_anon_vma(vma);
1711
1712
1713
1714
1715
1716
1717
1718 if (address < PAGE_ALIGN(address+4))
1719 address = PAGE_ALIGN(address+4);
1720 else {
1721 vma_unlock_anon_vma(vma);
1722 return -ENOMEM;
1723 }
1724 error = 0;
1725
1726
1727 if (address > vma->vm_end) {
1728 unsigned long size, grow;
1729
1730 size = address - vma->vm_start;
1731 grow = (address - vma->vm_end) >> PAGE_SHIFT;
1732
1733 error = -ENOMEM;
1734 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
1735 error = acct_stack_growth(vma, size, grow);
1736 if (!error) {
1737 vma->vm_end = address;
1738 perf_event_mmap(vma);
1739 }
1740 }
1741 }
1742 vma_unlock_anon_vma(vma);
1743 khugepaged_enter_vma_merge(vma);
1744 return error;
1745}
1746#endif
1747
1748
1749
1750
1751int expand_downwards(struct vm_area_struct *vma,
1752 unsigned long address)
1753{
1754 int error;
1755
1756
1757
1758
1759
1760 if (unlikely(anon_vma_prepare(vma)))
1761 return -ENOMEM;
1762
1763 address &= PAGE_MASK;
1764 error = security_file_mmap(NULL, 0, 0, 0, address, 1);
1765 if (error)
1766 return error;
1767
1768 vma_lock_anon_vma(vma);
1769
1770
1771
1772
1773
1774
1775
1776
1777 if (address < vma->vm_start) {
1778 unsigned long size, grow;
1779
1780 size = vma->vm_end - address;
1781 grow = (vma->vm_start - address) >> PAGE_SHIFT;
1782
1783 error = -ENOMEM;
1784 if (grow <= vma->vm_pgoff) {
1785 error = acct_stack_growth(vma, size, grow);
1786 if (!error) {
1787 vma->vm_start = address;
1788 vma->vm_pgoff -= grow;
1789 perf_event_mmap(vma);
1790 }
1791 }
1792 }
1793 vma_unlock_anon_vma(vma);
1794 khugepaged_enter_vma_merge(vma);
1795 return error;
1796}
1797
1798#ifdef CONFIG_STACK_GROWSUP
1799int expand_stack(struct vm_area_struct *vma, unsigned long address)
1800{
1801 return expand_upwards(vma, address);
1802}
1803
1804struct vm_area_struct *
1805find_extend_vma(struct mm_struct *mm, unsigned long addr)
1806{
1807 struct vm_area_struct *vma, *prev;
1808
1809 addr &= PAGE_MASK;
1810 vma = find_vma_prev(mm, addr, &prev);
1811 if (vma && (vma->vm_start <= addr))
1812 return vma;
1813 if (!prev || expand_stack(prev, addr))
1814 return NULL;
1815 if (prev->vm_flags & VM_LOCKED) {
1816 mlock_vma_pages_range(prev, addr, prev->vm_end);
1817 }
1818 return prev;
1819}
1820#else
1821int expand_stack(struct vm_area_struct *vma, unsigned long address)
1822{
1823 return expand_downwards(vma, address);
1824}
1825
1826struct vm_area_struct *
1827find_extend_vma(struct mm_struct * mm, unsigned long addr)
1828{
1829 struct vm_area_struct * vma;
1830 unsigned long start;
1831
1832 addr &= PAGE_MASK;
1833 vma = find_vma(mm,addr);
1834 if (!vma)
1835 return NULL;
1836 if (vma->vm_start <= addr)
1837 return vma;
1838 if (!(vma->vm_flags & VM_GROWSDOWN))
1839 return NULL;
1840 start = vma->vm_start;
1841 if (expand_stack(vma, addr))
1842 return NULL;
1843 if (vma->vm_flags & VM_LOCKED) {
1844 mlock_vma_pages_range(vma, addr, start);
1845 }
1846 return vma;
1847}
1848#endif
1849
1850
1851
1852
1853
1854
1855
1856static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1857{
1858
1859 update_hiwater_vm(mm);
1860 do {
1861 long nrpages = vma_pages(vma);
1862
1863 mm->total_vm -= nrpages;
1864 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
1865 vma = remove_vma(vma);
1866 } while (vma);
1867 validate_mm(mm);
1868}
1869
1870
1871
1872
1873
1874
1875static void unmap_region(struct mm_struct *mm,
1876 struct vm_area_struct *vma, struct vm_area_struct *prev,
1877 unsigned long start, unsigned long end)
1878{
1879 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
1880 struct mmu_gather tlb;
1881 unsigned long nr_accounted = 0;
1882
1883 lru_add_drain();
1884 tlb_gather_mmu(&tlb, mm, 0);
1885 update_hiwater_rss(mm);
1886 unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
1887 vm_unacct_memory(nr_accounted);
1888 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
1889 next ? next->vm_start : 0);
1890 tlb_finish_mmu(&tlb, start, end);
1891}
1892
1893
1894
1895
1896
1897static void
1898detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1899 struct vm_area_struct *prev, unsigned long end)
1900{
1901 struct vm_area_struct **insertion_point;
1902 struct vm_area_struct *tail_vma = NULL;
1903 unsigned long addr;
1904
1905 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
1906 vma->vm_prev = NULL;
1907 do {
1908 rb_erase(&vma->vm_rb, &mm->mm_rb);
1909 mm->map_count--;
1910 tail_vma = vma;
1911 vma = vma->vm_next;
1912 } while (vma && vma->vm_start < end);
1913 *insertion_point = vma;
1914 if (vma)
1915 vma->vm_prev = prev;
1916 tail_vma->vm_next = NULL;
1917 if (mm->unmap_area == arch_unmap_area)
1918 addr = prev ? prev->vm_end : mm->mmap_base;
1919 else
1920 addr = vma ? vma->vm_start : mm->mmap_base;
1921 mm->unmap_area(mm, addr);
1922 mm->mmap_cache = NULL;
1923}
1924
1925
1926
1927
1928
1929static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1930 unsigned long addr, int new_below)
1931{
1932 struct mempolicy *pol;
1933 struct vm_area_struct *new;
1934 int err = -ENOMEM;
1935
1936 if (is_vm_hugetlb_page(vma) && (addr &
1937 ~(huge_page_mask(hstate_vma(vma)))))
1938 return -EINVAL;
1939
1940 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1941 if (!new)
1942 goto out_err;
1943
1944
1945 *new = *vma;
1946
1947 INIT_LIST_HEAD(&new->anon_vma_chain);
1948
1949 if (new_below)
1950 new->vm_end = addr;
1951 else {
1952 new->vm_start = addr;
1953 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
1954 }
1955
1956 pol = mpol_dup(vma_policy(vma));
1957 if (IS_ERR(pol)) {
1958 err = PTR_ERR(pol);
1959 goto out_free_vma;
1960 }
1961 vma_set_policy(new, pol);
1962
1963 if (anon_vma_clone(new, vma))
1964 goto out_free_mpol;
1965
1966 if (new->vm_file) {
1967 get_file(new->vm_file);
1968 if (vma->vm_flags & VM_EXECUTABLE)
1969 added_exe_file_vma(mm);
1970 }
1971
1972 if (new->vm_ops && new->vm_ops->open)
1973 new->vm_ops->open(new);
1974
1975 if (new_below)
1976 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
1977 ((addr - new->vm_start) >> PAGE_SHIFT), new);
1978 else
1979 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
1980
1981
1982 if (!err)
1983 return 0;
1984
1985
1986 if (new->vm_ops && new->vm_ops->close)
1987 new->vm_ops->close(new);
1988 if (new->vm_file) {
1989 if (vma->vm_flags & VM_EXECUTABLE)
1990 removed_exe_file_vma(mm);
1991 fput(new->vm_file);
1992 }
1993 unlink_anon_vmas(new);
1994 out_free_mpol:
1995 mpol_put(pol);
1996 out_free_vma:
1997 kmem_cache_free(vm_area_cachep, new);
1998 out_err:
1999 return err;
2000}
2001
2002
2003
2004
2005
2006int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2007 unsigned long addr, int new_below)
2008{
2009 if (mm->map_count >= sysctl_max_map_count)
2010 return -ENOMEM;
2011
2012 return __split_vma(mm, vma, addr, new_below);
2013}
2014
2015
2016
2017
2018
2019
2020int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
2021{
2022 unsigned long end;
2023 struct vm_area_struct *vma, *prev, *last;
2024
2025 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
2026 return -EINVAL;
2027
2028 if ((len = PAGE_ALIGN(len)) == 0)
2029 return -EINVAL;
2030
2031
2032 vma = find_vma(mm, start);
2033 if (!vma)
2034 return 0;
2035 prev = vma->vm_prev;
2036
2037
2038
2039 end = start + len;
2040 if (vma->vm_start >= end)
2041 return 0;
2042
2043
2044
2045
2046
2047
2048
2049
2050 if (start > vma->vm_start) {
2051 int error;
2052
2053
2054
2055
2056
2057
2058 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2059 return -ENOMEM;
2060
2061 error = __split_vma(mm, vma, start, 0);
2062 if (error)
2063 return error;
2064 prev = vma;
2065 }
2066
2067
2068 last = find_vma(mm, end);
2069 if (last && end > last->vm_start) {
2070 int error = __split_vma(mm, last, end, 1);
2071 if (error)
2072 return error;
2073 }
2074 vma = prev? prev->vm_next: mm->mmap;
2075
2076
2077
2078
2079 if (mm->locked_vm) {
2080 struct vm_area_struct *tmp = vma;
2081 while (tmp && tmp->vm_start < end) {
2082 if (tmp->vm_flags & VM_LOCKED) {
2083 mm->locked_vm -= vma_pages(tmp);
2084 munlock_vma_pages_all(tmp);
2085 }
2086 tmp = tmp->vm_next;
2087 }
2088 }
2089
2090
2091
2092
2093 detach_vmas_to_be_unmapped(mm, vma, prev, end);
2094 unmap_region(mm, vma, prev, start, end);
2095
2096
2097 remove_vma_list(mm, vma);
2098
2099 return 0;
2100}
2101
2102EXPORT_SYMBOL(do_munmap);
2103
2104SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2105{
2106 int ret;
2107 struct mm_struct *mm = current->mm;
2108
2109 profile_munmap(addr);
2110
2111 down_write(&mm->mmap_sem);
2112 ret = do_munmap(mm, addr, len);
2113 up_write(&mm->mmap_sem);
2114 return ret;
2115}
2116
2117static inline void verify_mm_writelocked(struct mm_struct *mm)
2118{
2119#ifdef CONFIG_DEBUG_VM
2120 if (unlikely(down_read_trylock(&mm->mmap_sem))) {
2121 WARN_ON(1);
2122 up_read(&mm->mmap_sem);
2123 }
2124#endif
2125}
2126
2127
2128
2129
2130
2131
2132unsigned long do_brk(unsigned long addr, unsigned long len)
2133{
2134 struct mm_struct * mm = current->mm;
2135 struct vm_area_struct * vma, * prev;
2136 unsigned long flags;
2137 struct rb_node ** rb_link, * rb_parent;
2138 pgoff_t pgoff = addr >> PAGE_SHIFT;
2139 int error;
2140
2141 len = PAGE_ALIGN(len);
2142 if (!len)
2143 return addr;
2144
2145 error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
2146 if (error)
2147 return error;
2148
2149 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2150
2151 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
2152 if (error & ~PAGE_MASK)
2153 return error;
2154
2155
2156
2157
2158 if (mm->def_flags & VM_LOCKED) {
2159 unsigned long locked, lock_limit;
2160 locked = len >> PAGE_SHIFT;
2161 locked += mm->locked_vm;
2162 lock_limit = rlimit(RLIMIT_MEMLOCK);
2163 lock_limit >>= PAGE_SHIFT;
2164 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
2165 return -EAGAIN;
2166 }
2167
2168
2169
2170
2171
2172 verify_mm_writelocked(mm);
2173
2174
2175
2176
2177 munmap_back:
2178 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
2179 if (vma && vma->vm_start < addr + len) {
2180 if (do_munmap(mm, addr, len))
2181 return -ENOMEM;
2182 goto munmap_back;
2183 }
2184
2185
2186 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
2187 return -ENOMEM;
2188
2189 if (mm->map_count > sysctl_max_map_count)
2190 return -ENOMEM;
2191
2192 if (security_vm_enough_memory(len >> PAGE_SHIFT))
2193 return -ENOMEM;
2194
2195
2196 vma = vma_merge(mm, prev, addr, addr + len, flags,
2197 NULL, NULL, pgoff, NULL);
2198 if (vma)
2199 goto out;
2200
2201
2202
2203
2204 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2205 if (!vma) {
2206 vm_unacct_memory(len >> PAGE_SHIFT);
2207 return -ENOMEM;
2208 }
2209
2210 INIT_LIST_HEAD(&vma->anon_vma_chain);
2211 vma->vm_mm = mm;
2212 vma->vm_start = addr;
2213 vma->vm_end = addr + len;
2214 vma->vm_pgoff = pgoff;
2215 vma->vm_flags = flags;
2216 vma->vm_page_prot = vm_get_page_prot(flags);
2217 vma_link(mm, vma, prev, rb_link, rb_parent);
2218out:
2219 perf_event_mmap(vma);
2220 mm->total_vm += len >> PAGE_SHIFT;
2221 if (flags & VM_LOCKED) {
2222 if (!mlock_vma_pages_range(vma, addr, addr + len))
2223 mm->locked_vm += (len >> PAGE_SHIFT);
2224 }
2225 return addr;
2226}
2227
2228EXPORT_SYMBOL(do_brk);
2229
2230
2231void exit_mmap(struct mm_struct *mm)
2232{
2233 struct mmu_gather tlb;
2234 struct vm_area_struct *vma;
2235 unsigned long nr_accounted = 0;
2236 unsigned long end;
2237
2238
2239 mmu_notifier_release(mm);
2240
2241 if (mm->locked_vm) {
2242 vma = mm->mmap;
2243 while (vma) {
2244 if (vma->vm_flags & VM_LOCKED)
2245 munlock_vma_pages_all(vma);
2246 vma = vma->vm_next;
2247 }
2248 }
2249
2250 arch_exit_mmap(mm);
2251
2252 vma = mm->mmap;
2253 if (!vma)
2254 return;
2255
2256 lru_add_drain();
2257 flush_cache_mm(mm);
2258 tlb_gather_mmu(&tlb, mm, 1);
2259
2260
2261 end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
2262 vm_unacct_memory(nr_accounted);
2263
2264 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
2265 tlb_finish_mmu(&tlb, 0, end);
2266
2267
2268
2269
2270
2271 while (vma)
2272 vma = remove_vma(vma);
2273
2274 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2275}
2276
2277
2278
2279
2280
2281int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
2282{
2283 struct vm_area_struct * __vma, * prev;
2284 struct rb_node ** rb_link, * rb_parent;
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298 if (!vma->vm_file) {
2299 BUG_ON(vma->anon_vma);
2300 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2301 }
2302 __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
2303 if (__vma && __vma->vm_start < vma->vm_end)
2304 return -ENOMEM;
2305 if ((vma->vm_flags & VM_ACCOUNT) &&
2306 security_vm_enough_memory_mm(mm, vma_pages(vma)))
2307 return -ENOMEM;
2308 vma_link(mm, vma, prev, rb_link, rb_parent);
2309 return 0;
2310}
2311
2312
2313
2314
2315
2316struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2317 unsigned long addr, unsigned long len, pgoff_t pgoff)
2318{
2319 struct vm_area_struct *vma = *vmap;
2320 unsigned long vma_start = vma->vm_start;
2321 struct mm_struct *mm = vma->vm_mm;
2322 struct vm_area_struct *new_vma, *prev;
2323 struct rb_node **rb_link, *rb_parent;
2324 struct mempolicy *pol;
2325
2326
2327
2328
2329
2330 if (!vma->vm_file && !vma->anon_vma)
2331 pgoff = addr >> PAGE_SHIFT;
2332
2333 find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
2334 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
2335 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
2336 if (new_vma) {
2337
2338
2339
2340 if (vma_start >= new_vma->vm_start &&
2341 vma_start < new_vma->vm_end)
2342 *vmap = new_vma;
2343 } else {
2344 new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2345 if (new_vma) {
2346 *new_vma = *vma;
2347 pol = mpol_dup(vma_policy(vma));
2348 if (IS_ERR(pol))
2349 goto out_free_vma;
2350 INIT_LIST_HEAD(&new_vma->anon_vma_chain);
2351 if (anon_vma_clone(new_vma, vma))
2352 goto out_free_mempol;
2353 vma_set_policy(new_vma, pol);
2354 new_vma->vm_start = addr;
2355 new_vma->vm_end = addr + len;
2356 new_vma->vm_pgoff = pgoff;
2357 if (new_vma->vm_file) {
2358 get_file(new_vma->vm_file);
2359 if (vma->vm_flags & VM_EXECUTABLE)
2360 added_exe_file_vma(mm);
2361 }
2362 if (new_vma->vm_ops && new_vma->vm_ops->open)
2363 new_vma->vm_ops->open(new_vma);
2364 vma_link(mm, new_vma, prev, rb_link, rb_parent);
2365 }
2366 }
2367 return new_vma;
2368
2369 out_free_mempol:
2370 mpol_put(pol);
2371 out_free_vma:
2372 kmem_cache_free(vm_area_cachep, new_vma);
2373 return NULL;
2374}
2375
2376
2377
2378
2379
2380int may_expand_vm(struct mm_struct *mm, unsigned long npages)
2381{
2382 unsigned long cur = mm->total_vm;
2383 unsigned long lim;
2384
2385 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
2386
2387 if (cur + npages > lim)
2388 return 0;
2389 return 1;
2390}
2391
2392
2393static int special_mapping_fault(struct vm_area_struct *vma,
2394 struct vm_fault *vmf)
2395{
2396 pgoff_t pgoff;
2397 struct page **pages;
2398
2399
2400
2401
2402
2403
2404
2405 pgoff = vmf->pgoff - vma->vm_pgoff;
2406
2407 for (pages = vma->vm_private_data; pgoff && *pages; ++pages)
2408 pgoff--;
2409
2410 if (*pages) {
2411 struct page *page = *pages;
2412 get_page(page);
2413 vmf->page = page;
2414 return 0;
2415 }
2416
2417 return VM_FAULT_SIGBUS;
2418}
2419
2420
2421
2422
2423static void special_mapping_close(struct vm_area_struct *vma)
2424{
2425}
2426
2427static const struct vm_operations_struct special_mapping_vmops = {
2428 .close = special_mapping_close,
2429 .fault = special_mapping_fault,
2430};
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441int install_special_mapping(struct mm_struct *mm,
2442 unsigned long addr, unsigned long len,
2443 unsigned long vm_flags, struct page **pages)
2444{
2445 int ret;
2446 struct vm_area_struct *vma;
2447
2448 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2449 if (unlikely(vma == NULL))
2450 return -ENOMEM;
2451
2452 INIT_LIST_HEAD(&vma->anon_vma_chain);
2453 vma->vm_mm = mm;
2454 vma->vm_start = addr;
2455 vma->vm_end = addr + len;
2456
2457 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
2458 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2459
2460 vma->vm_ops = &special_mapping_vmops;
2461 vma->vm_private_data = pages;
2462
2463 ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
2464 if (ret)
2465 goto out;
2466
2467 ret = insert_vm_struct(mm, vma);
2468 if (ret)
2469 goto out;
2470
2471 mm->total_vm += len >> PAGE_SHIFT;
2472
2473 perf_event_mmap(vma);
2474
2475 return 0;
2476
2477out:
2478 kmem_cache_free(vm_area_cachep, vma);
2479 return ret;
2480}
2481
2482static DEFINE_MUTEX(mm_all_locks_mutex);
2483
2484static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
2485{
2486 if (!test_bit(0, (unsigned long *) &anon_vma->root->head.next)) {
2487
2488
2489
2490
2491 mutex_lock_nest_lock(&anon_vma->root->mutex, &mm->mmap_sem);
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501 if (__test_and_set_bit(0, (unsigned long *)
2502 &anon_vma->root->head.next))
2503 BUG();
2504 }
2505}
2506
2507static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2508{
2509 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2520 BUG();
2521 mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem);
2522 }
2523}
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557int mm_take_all_locks(struct mm_struct *mm)
2558{
2559 struct vm_area_struct *vma;
2560 struct anon_vma_chain *avc;
2561 int ret = -EINTR;
2562
2563 BUG_ON(down_read_trylock(&mm->mmap_sem));
2564
2565 mutex_lock(&mm_all_locks_mutex);
2566
2567 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2568 if (signal_pending(current))
2569 goto out_unlock;
2570 if (vma->vm_file && vma->vm_file->f_mapping)
2571 vm_lock_mapping(mm, vma->vm_file->f_mapping);
2572 }
2573
2574 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2575 if (signal_pending(current))
2576 goto out_unlock;
2577 if (vma->anon_vma)
2578 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2579 vm_lock_anon_vma(mm, avc->anon_vma);
2580 }
2581
2582 ret = 0;
2583
2584out_unlock:
2585 if (ret)
2586 mm_drop_all_locks(mm);
2587
2588 return ret;
2589}
2590
2591static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2592{
2593 if (test_bit(0, (unsigned long *) &anon_vma->root->head.next)) {
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606 if (!__test_and_clear_bit(0, (unsigned long *)
2607 &anon_vma->root->head.next))
2608 BUG();
2609 anon_vma_unlock(anon_vma);
2610 }
2611}
2612
2613static void vm_unlock_mapping(struct address_space *mapping)
2614{
2615 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2616
2617
2618
2619
2620 mutex_unlock(&mapping->i_mmap_mutex);
2621 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2622 &mapping->flags))
2623 BUG();
2624 }
2625}
2626
2627
2628
2629
2630
2631void mm_drop_all_locks(struct mm_struct *mm)
2632{
2633 struct vm_area_struct *vma;
2634 struct anon_vma_chain *avc;
2635
2636 BUG_ON(down_read_trylock(&mm->mmap_sem));
2637 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2638
2639 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2640 if (vma->anon_vma)
2641 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2642 vm_unlock_anon_vma(avc->anon_vma);
2643 if (vma->vm_file && vma->vm_file->f_mapping)
2644 vm_unlock_mapping(vma->vm_file->f_mapping);
2645 }
2646
2647 mutex_unlock(&mm_all_locks_mutex);
2648}
2649
2650
2651
2652
2653void __init mmap_init(void)
2654{
2655 int ret;
2656
2657 ret = percpu_counter_init(&vm_committed_as, 0);
2658 VM_BUG_ON(ret);
2659}
2660