1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/kernel.h>
13#include <linux/slab.h>
14#include <linux/backing-dev.h>
15#include <linux/mm.h>
16#include <linux/vmacache.h>
17#include <linux/shm.h>
18#include <linux/mman.h>
19#include <linux/pagemap.h>
20#include <linux/swap.h>
21#include <linux/syscalls.h>
22#include <linux/capability.h>
23#include <linux/init.h>
24#include <linux/file.h>
25#include <linux/fs.h>
26#include <linux/personality.h>
27#include <linux/security.h>
28#include <linux/hugetlb.h>
29#include <linux/shmem_fs.h>
30#include <linux/profile.h>
31#include <linux/export.h>
32#include <linux/mount.h>
33#include <linux/mempolicy.h>
34#include <linux/rmap.h>
35#include <linux/mmu_notifier.h>
36#include <linux/mmdebug.h>
37#include <linux/perf_event.h>
38#include <linux/audit.h>
39#include <linux/khugepaged.h>
40#include <linux/uprobes.h>
41#include <linux/rbtree_augmented.h>
42#include <linux/notifier.h>
43#include <linux/memory.h>
44#include <linux/printk.h>
45#include <linux/userfaultfd_k.h>
46#include <linux/moduleparam.h>
47#include <linux/pkeys.h>
48#include <linux/oom.h>
49#include <linux/sched/mm.h>
50
51#include <linux/uaccess.h>
52#include <asm/cacheflush.h>
53#include <asm/tlb.h>
54#include <asm/mmu_context.h>
55
56#include "internal.h"
57
58#ifndef arch_mmap_check
59#define arch_mmap_check(addr, len, flags) (0)
60#endif
61
62#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
63const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
64const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
65int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
66#endif
67#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
68const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
69const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
70int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
71#endif
72
73static bool ignore_rlimit_data;
74core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
75
76static void unmap_region(struct mm_struct *mm,
77 struct vm_area_struct *vma, struct vm_area_struct *prev,
78 unsigned long start, unsigned long end);
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100pgprot_t protection_map[16] __ro_after_init = {
101 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
102 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
103};
104
105#ifndef CONFIG_ARCH_HAS_FILTER_PGPROT
106static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
107{
108 return prot;
109}
110#endif
111
112pgprot_t vm_get_page_prot(unsigned long vm_flags)
113{
114 pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags &
115 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
116 pgprot_val(arch_vm_get_page_prot(vm_flags)));
117
118 return arch_filter_pgprot(ret);
119}
120EXPORT_SYMBOL(vm_get_page_prot);
121
122static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
123{
124 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
125}
126
127
128void vma_set_page_prot(struct vm_area_struct *vma)
129{
130 unsigned long vm_flags = vma->vm_flags;
131 pgprot_t vm_page_prot;
132
133 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
134 if (vma_wants_writenotify(vma, vm_page_prot)) {
135 vm_flags &= ~VM_SHARED;
136 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
137 }
138
139 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
140}
141
142
143
144
145static void __remove_shared_vm_struct(struct vm_area_struct *vma,
146 struct file *file, struct address_space *mapping)
147{
148 if (vma->vm_flags & VM_DENYWRITE)
149 atomic_inc(&file_inode(file)->i_writecount);
150 if (vma->vm_flags & VM_SHARED)
151 mapping_unmap_writable(mapping);
152
153 flush_dcache_mmap_lock(mapping);
154 vma_interval_tree_remove(vma, &mapping->i_mmap);
155 flush_dcache_mmap_unlock(mapping);
156}
157
158
159
160
161
162void unlink_file_vma(struct vm_area_struct *vma)
163{
164 struct file *file = vma->vm_file;
165
166 if (file) {
167 struct address_space *mapping = file->f_mapping;
168 i_mmap_lock_write(mapping);
169 __remove_shared_vm_struct(vma, file, mapping);
170 i_mmap_unlock_write(mapping);
171 }
172}
173
174
175
176
177static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
178{
179 struct vm_area_struct *next = vma->vm_next;
180
181 might_sleep();
182 if (vma->vm_ops && vma->vm_ops->close)
183 vma->vm_ops->close(vma);
184 if (vma->vm_file)
185 fput(vma->vm_file);
186 mpol_put(vma_policy(vma));
187 vm_area_free(vma);
188 return next;
189}
190
191static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
192 struct list_head *uf);
193SYSCALL_DEFINE1(brk, unsigned long, brk)
194{
195 unsigned long retval;
196 unsigned long newbrk, oldbrk, origbrk;
197 struct mm_struct *mm = current->mm;
198 struct vm_area_struct *next;
199 unsigned long min_brk;
200 bool populate;
201 bool downgraded = false;
202 LIST_HEAD(uf);
203
204 brk = untagged_addr(brk);
205
206 if (down_write_killable(&mm->mmap_sem))
207 return -EINTR;
208
209 origbrk = mm->brk;
210
211#ifdef CONFIG_COMPAT_BRK
212
213
214
215
216
217 if (current->brk_randomized)
218 min_brk = mm->start_brk;
219 else
220 min_brk = mm->end_data;
221#else
222 min_brk = mm->start_brk;
223#endif
224 if (brk < min_brk)
225 goto out;
226
227
228
229
230
231
232
233 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
234 mm->end_data, mm->start_data))
235 goto out;
236
237 newbrk = PAGE_ALIGN(brk);
238 oldbrk = PAGE_ALIGN(mm->brk);
239 if (oldbrk == newbrk) {
240 mm->brk = brk;
241 goto success;
242 }
243
244
245
246
247
248 if (brk <= mm->brk) {
249 int ret;
250
251
252
253
254
255
256 mm->brk = brk;
257 ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true);
258 if (ret < 0) {
259 mm->brk = origbrk;
260 goto out;
261 } else if (ret == 1) {
262 downgraded = true;
263 }
264 goto success;
265 }
266
267
268 next = find_vma(mm, oldbrk);
269 if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
270 goto out;
271
272
273 if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)
274 goto out;
275 mm->brk = brk;
276
277success:
278 populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
279 if (downgraded)
280 up_read(&mm->mmap_sem);
281 else
282 up_write(&mm->mmap_sem);
283 userfaultfd_unmap_complete(mm, &uf);
284 if (populate)
285 mm_populate(oldbrk, newbrk - oldbrk);
286 return brk;
287
288out:
289 retval = origbrk;
290 up_write(&mm->mmap_sem);
291 return retval;
292}
293
294static inline unsigned long vma_compute_gap(struct vm_area_struct *vma)
295{
296 unsigned long gap, prev_end;
297
298
299
300
301
302
303
304 gap = vm_start_gap(vma);
305 if (vma->vm_prev) {
306 prev_end = vm_end_gap(vma->vm_prev);
307 if (gap > prev_end)
308 gap -= prev_end;
309 else
310 gap = 0;
311 }
312 return gap;
313}
314
315#ifdef CONFIG_DEBUG_VM_RB
316static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma)
317{
318 unsigned long max = vma_compute_gap(vma), subtree_gap;
319 if (vma->vm_rb.rb_left) {
320 subtree_gap = rb_entry(vma->vm_rb.rb_left,
321 struct vm_area_struct, vm_rb)->rb_subtree_gap;
322 if (subtree_gap > max)
323 max = subtree_gap;
324 }
325 if (vma->vm_rb.rb_right) {
326 subtree_gap = rb_entry(vma->vm_rb.rb_right,
327 struct vm_area_struct, vm_rb)->rb_subtree_gap;
328 if (subtree_gap > max)
329 max = subtree_gap;
330 }
331 return max;
332}
333
334static int browse_rb(struct mm_struct *mm)
335{
336 struct rb_root *root = &mm->mm_rb;
337 int i = 0, j, bug = 0;
338 struct rb_node *nd, *pn = NULL;
339 unsigned long prev = 0, pend = 0;
340
341 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
342 struct vm_area_struct *vma;
343 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
344 if (vma->vm_start < prev) {
345 pr_emerg("vm_start %lx < prev %lx\n",
346 vma->vm_start, prev);
347 bug = 1;
348 }
349 if (vma->vm_start < pend) {
350 pr_emerg("vm_start %lx < pend %lx\n",
351 vma->vm_start, pend);
352 bug = 1;
353 }
354 if (vma->vm_start > vma->vm_end) {
355 pr_emerg("vm_start %lx > vm_end %lx\n",
356 vma->vm_start, vma->vm_end);
357 bug = 1;
358 }
359 spin_lock(&mm->page_table_lock);
360 if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
361 pr_emerg("free gap %lx, correct %lx\n",
362 vma->rb_subtree_gap,
363 vma_compute_subtree_gap(vma));
364 bug = 1;
365 }
366 spin_unlock(&mm->page_table_lock);
367 i++;
368 pn = nd;
369 prev = vma->vm_start;
370 pend = vma->vm_end;
371 }
372 j = 0;
373 for (nd = pn; nd; nd = rb_prev(nd))
374 j++;
375 if (i != j) {
376 pr_emerg("backwards %d, forwards %d\n", j, i);
377 bug = 1;
378 }
379 return bug ? -1 : i;
380}
381
382static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
383{
384 struct rb_node *nd;
385
386 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
387 struct vm_area_struct *vma;
388 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
389 VM_BUG_ON_VMA(vma != ignore &&
390 vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
391 vma);
392 }
393}
394
395static void validate_mm(struct mm_struct *mm)
396{
397 int bug = 0;
398 int i = 0;
399 unsigned long highest_address = 0;
400 struct vm_area_struct *vma = mm->mmap;
401
402 while (vma) {
403 struct anon_vma *anon_vma = vma->anon_vma;
404 struct anon_vma_chain *avc;
405
406 if (anon_vma) {
407 anon_vma_lock_read(anon_vma);
408 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
409 anon_vma_interval_tree_verify(avc);
410 anon_vma_unlock_read(anon_vma);
411 }
412
413 highest_address = vm_end_gap(vma);
414 vma = vma->vm_next;
415 i++;
416 }
417 if (i != mm->map_count) {
418 pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
419 bug = 1;
420 }
421 if (highest_address != mm->highest_vm_end) {
422 pr_emerg("mm->highest_vm_end %lx, found %lx\n",
423 mm->highest_vm_end, highest_address);
424 bug = 1;
425 }
426 i = browse_rb(mm);
427 if (i != mm->map_count) {
428 if (i != -1)
429 pr_emerg("map_count %d rb %d\n", mm->map_count, i);
430 bug = 1;
431 }
432 VM_BUG_ON_MM(bug, mm);
433}
434#else
435#define validate_mm_rb(root, ignore) do { } while (0)
436#define validate_mm(mm) do { } while (0)
437#endif
438
439RB_DECLARE_CALLBACKS_MAX(static, vma_gap_callbacks,
440 struct vm_area_struct, vm_rb,
441 unsigned long, rb_subtree_gap, vma_compute_gap)
442
443
444
445
446
447
448static void vma_gap_update(struct vm_area_struct *vma)
449{
450
451
452
453
454 vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
455}
456
457static inline void vma_rb_insert(struct vm_area_struct *vma,
458 struct rb_root *root)
459{
460
461 validate_mm_rb(root, NULL);
462
463 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
464}
465
466static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
467{
468
469
470
471
472
473 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
474}
475
476static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
477 struct rb_root *root,
478 struct vm_area_struct *ignore)
479{
480
481
482
483
484
485 validate_mm_rb(root, ignore);
486
487 __vma_rb_erase(vma, root);
488}
489
490static __always_inline void vma_rb_erase(struct vm_area_struct *vma,
491 struct rb_root *root)
492{
493
494
495
496
497 validate_mm_rb(root, vma);
498
499 __vma_rb_erase(vma, root);
500}
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516static inline void
517anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
518{
519 struct anon_vma_chain *avc;
520
521 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
522 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
523}
524
525static inline void
526anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
527{
528 struct anon_vma_chain *avc;
529
530 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
531 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
532}
533
534static int find_vma_links(struct mm_struct *mm, unsigned long addr,
535 unsigned long end, struct vm_area_struct **pprev,
536 struct rb_node ***rb_link, struct rb_node **rb_parent)
537{
538 struct rb_node **__rb_link, *__rb_parent, *rb_prev;
539
540 __rb_link = &mm->mm_rb.rb_node;
541 rb_prev = __rb_parent = NULL;
542
543 while (*__rb_link) {
544 struct vm_area_struct *vma_tmp;
545
546 __rb_parent = *__rb_link;
547 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
548
549 if (vma_tmp->vm_end > addr) {
550
551 if (vma_tmp->vm_start < end)
552 return -ENOMEM;
553 __rb_link = &__rb_parent->rb_left;
554 } else {
555 rb_prev = __rb_parent;
556 __rb_link = &__rb_parent->rb_right;
557 }
558 }
559
560 *pprev = NULL;
561 if (rb_prev)
562 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
563 *rb_link = __rb_link;
564 *rb_parent = __rb_parent;
565 return 0;
566}
567
568static unsigned long count_vma_pages_range(struct mm_struct *mm,
569 unsigned long addr, unsigned long end)
570{
571 unsigned long nr_pages = 0;
572 struct vm_area_struct *vma;
573
574
575 vma = find_vma_intersection(mm, addr, end);
576 if (!vma)
577 return 0;
578
579 nr_pages = (min(end, vma->vm_end) -
580 max(addr, vma->vm_start)) >> PAGE_SHIFT;
581
582
583 for (vma = vma->vm_next; vma; vma = vma->vm_next) {
584 unsigned long overlap_len;
585
586 if (vma->vm_start > end)
587 break;
588
589 overlap_len = min(end, vma->vm_end) - vma->vm_start;
590 nr_pages += overlap_len >> PAGE_SHIFT;
591 }
592
593 return nr_pages;
594}
595
596void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
597 struct rb_node **rb_link, struct rb_node *rb_parent)
598{
599
600 if (vma->vm_next)
601 vma_gap_update(vma->vm_next);
602 else
603 mm->highest_vm_end = vm_end_gap(vma);
604
605
606
607
608
609
610
611
612
613
614 rb_link_node(&vma->vm_rb, rb_parent, rb_link);
615 vma->rb_subtree_gap = 0;
616 vma_gap_update(vma);
617 vma_rb_insert(vma, &mm->mm_rb);
618}
619
620static void __vma_link_file(struct vm_area_struct *vma)
621{
622 struct file *file;
623
624 file = vma->vm_file;
625 if (file) {
626 struct address_space *mapping = file->f_mapping;
627
628 if (vma->vm_flags & VM_DENYWRITE)
629 atomic_dec(&file_inode(file)->i_writecount);
630 if (vma->vm_flags & VM_SHARED)
631 atomic_inc(&mapping->i_mmap_writable);
632
633 flush_dcache_mmap_lock(mapping);
634 vma_interval_tree_insert(vma, &mapping->i_mmap);
635 flush_dcache_mmap_unlock(mapping);
636 }
637}
638
639static void
640__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
641 struct vm_area_struct *prev, struct rb_node **rb_link,
642 struct rb_node *rb_parent)
643{
644 __vma_link_list(mm, vma, prev, rb_parent);
645 __vma_link_rb(mm, vma, rb_link, rb_parent);
646}
647
648static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
649 struct vm_area_struct *prev, struct rb_node **rb_link,
650 struct rb_node *rb_parent)
651{
652 struct address_space *mapping = NULL;
653
654 if (vma->vm_file) {
655 mapping = vma->vm_file->f_mapping;
656 i_mmap_lock_write(mapping);
657 }
658
659 __vma_link(mm, vma, prev, rb_link, rb_parent);
660 __vma_link_file(vma);
661
662 if (mapping)
663 i_mmap_unlock_write(mapping);
664
665 mm->map_count++;
666 validate_mm(mm);
667}
668
669
670
671
672
673static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
674{
675 struct vm_area_struct *prev;
676 struct rb_node **rb_link, *rb_parent;
677
678 if (find_vma_links(mm, vma->vm_start, vma->vm_end,
679 &prev, &rb_link, &rb_parent))
680 BUG();
681 __vma_link(mm, vma, prev, rb_link, rb_parent);
682 mm->map_count++;
683}
684
685static __always_inline void __vma_unlink_common(struct mm_struct *mm,
686 struct vm_area_struct *vma,
687 struct vm_area_struct *prev,
688 bool has_prev,
689 struct vm_area_struct *ignore)
690{
691 struct vm_area_struct *next;
692
693 vma_rb_erase_ignore(vma, &mm->mm_rb, ignore);
694 next = vma->vm_next;
695 if (has_prev)
696 prev->vm_next = next;
697 else {
698 prev = vma->vm_prev;
699 if (prev)
700 prev->vm_next = next;
701 else
702 mm->mmap = next;
703 }
704 if (next)
705 next->vm_prev = prev;
706
707
708 vmacache_invalidate(mm);
709}
710
711static inline void __vma_unlink_prev(struct mm_struct *mm,
712 struct vm_area_struct *vma,
713 struct vm_area_struct *prev)
714{
715 __vma_unlink_common(mm, vma, prev, true, vma);
716}
717
718
719
720
721
722
723
724
725int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
726 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
727 struct vm_area_struct *expand)
728{
729 struct mm_struct *mm = vma->vm_mm;
730 struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
731 struct address_space *mapping = NULL;
732 struct rb_root_cached *root = NULL;
733 struct anon_vma *anon_vma = NULL;
734 struct file *file = vma->vm_file;
735 bool start_changed = false, end_changed = false;
736 long adjust_next = 0;
737 int remove_next = 0;
738
739 if (next && !insert) {
740 struct vm_area_struct *exporter = NULL, *importer = NULL;
741
742 if (end >= next->vm_end) {
743
744
745
746
747
748
749 if (next == expand) {
750
751
752
753
754 VM_WARN_ON(end != next->vm_end);
755
756
757
758
759
760 remove_next = 3;
761 VM_WARN_ON(file != next->vm_file);
762 swap(vma, next);
763 } else {
764 VM_WARN_ON(expand != vma);
765
766
767
768
769 remove_next = 1 + (end > next->vm_end);
770 VM_WARN_ON(remove_next == 2 &&
771 end != next->vm_next->vm_end);
772 VM_WARN_ON(remove_next == 1 &&
773 end != next->vm_end);
774
775 end = next->vm_end;
776 }
777
778 exporter = next;
779 importer = vma;
780
781
782
783
784
785 if (remove_next == 2 && !next->anon_vma)
786 exporter = next->vm_next;
787
788 } else if (end > next->vm_start) {
789
790
791
792
793 adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
794 exporter = next;
795 importer = vma;
796 VM_WARN_ON(expand != importer);
797 } else if (end < vma->vm_end) {
798
799
800
801
802
803 adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
804 exporter = vma;
805 importer = next;
806 VM_WARN_ON(expand != importer);
807 }
808
809
810
811
812
813
814 if (exporter && exporter->anon_vma && !importer->anon_vma) {
815 int error;
816
817 importer->anon_vma = exporter->anon_vma;
818 error = anon_vma_clone(importer, exporter);
819 if (error)
820 return error;
821 }
822 }
823again:
824 vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
825
826 if (file) {
827 mapping = file->f_mapping;
828 root = &mapping->i_mmap;
829 uprobe_munmap(vma, vma->vm_start, vma->vm_end);
830
831 if (adjust_next)
832 uprobe_munmap(next, next->vm_start, next->vm_end);
833
834 i_mmap_lock_write(mapping);
835 if (insert) {
836
837
838
839
840
841
842 __vma_link_file(insert);
843 }
844 }
845
846 anon_vma = vma->anon_vma;
847 if (!anon_vma && adjust_next)
848 anon_vma = next->anon_vma;
849 if (anon_vma) {
850 VM_WARN_ON(adjust_next && next->anon_vma &&
851 anon_vma != next->anon_vma);
852 anon_vma_lock_write(anon_vma);
853 anon_vma_interval_tree_pre_update_vma(vma);
854 if (adjust_next)
855 anon_vma_interval_tree_pre_update_vma(next);
856 }
857
858 if (root) {
859 flush_dcache_mmap_lock(mapping);
860 vma_interval_tree_remove(vma, root);
861 if (adjust_next)
862 vma_interval_tree_remove(next, root);
863 }
864
865 if (start != vma->vm_start) {
866 vma->vm_start = start;
867 start_changed = true;
868 }
869 if (end != vma->vm_end) {
870 vma->vm_end = end;
871 end_changed = true;
872 }
873 vma->vm_pgoff = pgoff;
874 if (adjust_next) {
875 next->vm_start += adjust_next << PAGE_SHIFT;
876 next->vm_pgoff += adjust_next;
877 }
878
879 if (root) {
880 if (adjust_next)
881 vma_interval_tree_insert(next, root);
882 vma_interval_tree_insert(vma, root);
883 flush_dcache_mmap_unlock(mapping);
884 }
885
886 if (remove_next) {
887
888
889
890
891 if (remove_next != 3)
892 __vma_unlink_prev(mm, next, vma);
893 else
894
895
896
897
898
899
900
901
902
903 __vma_unlink_common(mm, next, NULL, false, vma);
904 if (file)
905 __remove_shared_vm_struct(next, file, mapping);
906 } else if (insert) {
907
908
909
910
911
912 __insert_vm_struct(mm, insert);
913 } else {
914 if (start_changed)
915 vma_gap_update(vma);
916 if (end_changed) {
917 if (!next)
918 mm->highest_vm_end = vm_end_gap(vma);
919 else if (!adjust_next)
920 vma_gap_update(next);
921 }
922 }
923
924 if (anon_vma) {
925 anon_vma_interval_tree_post_update_vma(vma);
926 if (adjust_next)
927 anon_vma_interval_tree_post_update_vma(next);
928 anon_vma_unlock_write(anon_vma);
929 }
930 if (mapping)
931 i_mmap_unlock_write(mapping);
932
933 if (root) {
934 uprobe_mmap(vma);
935
936 if (adjust_next)
937 uprobe_mmap(next);
938 }
939
940 if (remove_next) {
941 if (file) {
942 uprobe_munmap(next, next->vm_start, next->vm_end);
943 fput(file);
944 }
945 if (next->anon_vma)
946 anon_vma_merge(vma, next);
947 mm->map_count--;
948 mpol_put(vma_policy(next));
949 vm_area_free(next);
950
951
952
953
954
955 if (remove_next != 3) {
956
957
958
959
960
961
962 next = vma->vm_next;
963 } else {
964
965
966
967
968
969
970
971
972
973
974 next = vma;
975 }
976 if (remove_next == 2) {
977 remove_next = 1;
978 end = next->vm_end;
979 goto again;
980 }
981 else if (next)
982 vma_gap_update(next);
983 else {
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003 VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
1004 }
1005 }
1006 if (insert && file)
1007 uprobe_mmap(insert);
1008
1009 validate_mm(mm);
1010
1011 return 0;
1012}
1013
1014
1015
1016
1017
1018static inline int is_mergeable_vma(struct vm_area_struct *vma,
1019 struct file *file, unsigned long vm_flags,
1020 struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
1021{
1022
1023
1024
1025
1026
1027
1028
1029
1030 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
1031 return 0;
1032 if (vma->vm_file != file)
1033 return 0;
1034 if (vma->vm_ops && vma->vm_ops->close)
1035 return 0;
1036 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
1037 return 0;
1038 return 1;
1039}
1040
1041static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
1042 struct anon_vma *anon_vma2,
1043 struct vm_area_struct *vma)
1044{
1045
1046
1047
1048
1049 if ((!anon_vma1 || !anon_vma2) && (!vma ||
1050 list_is_singular(&vma->anon_vma_chain)))
1051 return 1;
1052 return anon_vma1 == anon_vma2;
1053}
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066static int
1067can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
1068 struct anon_vma *anon_vma, struct file *file,
1069 pgoff_t vm_pgoff,
1070 struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
1071{
1072 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
1073 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
1074 if (vma->vm_pgoff == vm_pgoff)
1075 return 1;
1076 }
1077 return 0;
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087static int
1088can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
1089 struct anon_vma *anon_vma, struct file *file,
1090 pgoff_t vm_pgoff,
1091 struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
1092{
1093 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
1094 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
1095 pgoff_t vm_pglen;
1096 vm_pglen = vma_pages(vma);
1097 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
1098 return 1;
1099 }
1100 return 0;
1101}
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143struct vm_area_struct *vma_merge(struct mm_struct *mm,
1144 struct vm_area_struct *prev, unsigned long addr,
1145 unsigned long end, unsigned long vm_flags,
1146 struct anon_vma *anon_vma, struct file *file,
1147 pgoff_t pgoff, struct mempolicy *policy,
1148 struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
1149{
1150 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
1151 struct vm_area_struct *area, *next;
1152 int err;
1153
1154
1155
1156
1157
1158 if (vm_flags & VM_SPECIAL)
1159 return NULL;
1160
1161 if (prev)
1162 next = prev->vm_next;
1163 else
1164 next = mm->mmap;
1165 area = next;
1166 if (area && area->vm_end == end)
1167 next = next->vm_next;
1168
1169
1170 VM_WARN_ON(prev && addr <= prev->vm_start);
1171 VM_WARN_ON(area && end > area->vm_end);
1172 VM_WARN_ON(addr >= end);
1173
1174
1175
1176
1177 if (prev && prev->vm_end == addr &&
1178 mpol_equal(vma_policy(prev), policy) &&
1179 can_vma_merge_after(prev, vm_flags,
1180 anon_vma, file, pgoff,
1181 vm_userfaultfd_ctx)) {
1182
1183
1184
1185 if (next && end == next->vm_start &&
1186 mpol_equal(policy, vma_policy(next)) &&
1187 can_vma_merge_before(next, vm_flags,
1188 anon_vma, file,
1189 pgoff+pglen,
1190 vm_userfaultfd_ctx) &&
1191 is_mergeable_anon_vma(prev->anon_vma,
1192 next->anon_vma, NULL)) {
1193
1194 err = __vma_adjust(prev, prev->vm_start,
1195 next->vm_end, prev->vm_pgoff, NULL,
1196 prev);
1197 } else
1198 err = __vma_adjust(prev, prev->vm_start,
1199 end, prev->vm_pgoff, NULL, prev);
1200 if (err)
1201 return NULL;
1202 khugepaged_enter_vma_merge(prev, vm_flags);
1203 return prev;
1204 }
1205
1206
1207
1208
1209 if (next && end == next->vm_start &&
1210 mpol_equal(policy, vma_policy(next)) &&
1211 can_vma_merge_before(next, vm_flags,
1212 anon_vma, file, pgoff+pglen,
1213 vm_userfaultfd_ctx)) {
1214 if (prev && addr < prev->vm_end)
1215 err = __vma_adjust(prev, prev->vm_start,
1216 addr, prev->vm_pgoff, NULL, next);
1217 else {
1218 err = __vma_adjust(area, addr, next->vm_end,
1219 next->vm_pgoff - pglen, NULL, next);
1220
1221
1222
1223
1224
1225 area = next;
1226 }
1227 if (err)
1228 return NULL;
1229 khugepaged_enter_vma_merge(area, vm_flags);
1230 return area;
1231 }
1232
1233 return NULL;
1234}
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1250{
1251 return a->vm_end == b->vm_start &&
1252 mpol_equal(vma_policy(a), vma_policy(b)) &&
1253 a->vm_file == b->vm_file &&
1254 !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) &&
1255 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1256}
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1281{
1282 if (anon_vma_compatible(a, b)) {
1283 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1284
1285 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1286 return anon_vma;
1287 }
1288 return NULL;
1289}
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1300{
1301 struct anon_vma *anon_vma;
1302 struct vm_area_struct *near;
1303
1304 near = vma->vm_next;
1305 if (!near)
1306 goto try_prev;
1307
1308 anon_vma = reusable_anon_vma(near, vma, near);
1309 if (anon_vma)
1310 return anon_vma;
1311try_prev:
1312 near = vma->vm_prev;
1313 if (!near)
1314 goto none;
1315
1316 anon_vma = reusable_anon_vma(near, near, vma);
1317 if (anon_vma)
1318 return anon_vma;
1319none:
1320
1321
1322
1323
1324
1325
1326
1327
1328 return NULL;
1329}
1330
1331
1332
1333
1334
1335static inline unsigned long round_hint_to_min(unsigned long hint)
1336{
1337 hint &= PAGE_MASK;
1338 if (((void *)hint != NULL) &&
1339 (hint < mmap_min_addr))
1340 return PAGE_ALIGN(mmap_min_addr);
1341 return hint;
1342}
1343
1344static inline int mlock_future_check(struct mm_struct *mm,
1345 unsigned long flags,
1346 unsigned long len)
1347{
1348 unsigned long locked, lock_limit;
1349
1350
1351 if (flags & VM_LOCKED) {
1352 locked = len >> PAGE_SHIFT;
1353 locked += mm->locked_vm;
1354 lock_limit = rlimit(RLIMIT_MEMLOCK);
1355 lock_limit >>= PAGE_SHIFT;
1356 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1357 return -EAGAIN;
1358 }
1359 return 0;
1360}
1361
1362static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
1363{
1364 if (S_ISREG(inode->i_mode))
1365 return MAX_LFS_FILESIZE;
1366
1367 if (S_ISBLK(inode->i_mode))
1368 return MAX_LFS_FILESIZE;
1369
1370 if (S_ISSOCK(inode->i_mode))
1371 return MAX_LFS_FILESIZE;
1372
1373
1374 if (file->f_mode & FMODE_UNSIGNED_OFFSET)
1375 return 0;
1376
1377
1378 return ULONG_MAX;
1379}
1380
1381static inline bool file_mmap_ok(struct file *file, struct inode *inode,
1382 unsigned long pgoff, unsigned long len)
1383{
1384 u64 maxsize = file_mmap_size_max(file, inode);
1385
1386 if (maxsize && len > maxsize)
1387 return false;
1388 maxsize -= len;
1389 if (pgoff > maxsize >> PAGE_SHIFT)
1390 return false;
1391 return true;
1392}
1393
1394
1395
1396
1397unsigned long do_mmap(struct file *file, unsigned long addr,
1398 unsigned long len, unsigned long prot,
1399 unsigned long flags, vm_flags_t vm_flags,
1400 unsigned long pgoff, unsigned long *populate,
1401 struct list_head *uf)
1402{
1403 struct mm_struct *mm = current->mm;
1404 int pkey = 0;
1405
1406 *populate = 0;
1407
1408 if (!len)
1409 return -EINVAL;
1410
1411
1412
1413
1414
1415
1416
1417 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1418 if (!(file && path_noexec(&file->f_path)))
1419 prot |= PROT_EXEC;
1420
1421
1422 if (flags & MAP_FIXED_NOREPLACE)
1423 flags |= MAP_FIXED;
1424
1425 if (!(flags & MAP_FIXED))
1426 addr = round_hint_to_min(addr);
1427
1428
1429 len = PAGE_ALIGN(len);
1430 if (!len)
1431 return -ENOMEM;
1432
1433
1434 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1435 return -EOVERFLOW;
1436
1437
1438 if (mm->map_count > sysctl_max_map_count)
1439 return -ENOMEM;
1440
1441
1442
1443
1444 addr = get_unmapped_area(file, addr, len, pgoff, flags);
1445 if (offset_in_page(addr))
1446 return addr;
1447
1448 if (flags & MAP_FIXED_NOREPLACE) {
1449 struct vm_area_struct *vma = find_vma(mm, addr);
1450
1451 if (vma && vma->vm_start < addr + len)
1452 return -EEXIST;
1453 }
1454
1455 if (prot == PROT_EXEC) {
1456 pkey = execute_only_pkey(mm);
1457 if (pkey < 0)
1458 pkey = 0;
1459 }
1460
1461
1462
1463
1464
1465 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
1466 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1467
1468 if (flags & MAP_LOCKED)
1469 if (!can_do_mlock())
1470 return -EPERM;
1471
1472 if (mlock_future_check(mm, vm_flags, len))
1473 return -EAGAIN;
1474
1475 if (file) {
1476 struct inode *inode = file_inode(file);
1477 unsigned long flags_mask;
1478
1479 if (!file_mmap_ok(file, inode, pgoff, len))
1480 return -EOVERFLOW;
1481
1482 flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
1483
1484 switch (flags & MAP_TYPE) {
1485 case MAP_SHARED:
1486
1487
1488
1489
1490
1491
1492
1493 flags &= LEGACY_MAP_MASK;
1494
1495 case MAP_SHARED_VALIDATE:
1496 if (flags & ~flags_mask)
1497 return -EOPNOTSUPP;
1498 if (prot & PROT_WRITE) {
1499 if (!(file->f_mode & FMODE_WRITE))
1500 return -EACCES;
1501 if (IS_SWAPFILE(file->f_mapping->host))
1502 return -ETXTBSY;
1503 }
1504
1505
1506
1507
1508
1509 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1510 return -EACCES;
1511
1512
1513
1514
1515 if (locks_verify_locked(file))
1516 return -EAGAIN;
1517
1518 vm_flags |= VM_SHARED | VM_MAYSHARE;
1519 if (!(file->f_mode & FMODE_WRITE))
1520 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1521
1522
1523 case MAP_PRIVATE:
1524 if (!(file->f_mode & FMODE_READ))
1525 return -EACCES;
1526 if (path_noexec(&file->f_path)) {
1527 if (vm_flags & VM_EXEC)
1528 return -EPERM;
1529 vm_flags &= ~VM_MAYEXEC;
1530 }
1531
1532 if (!file->f_op->mmap)
1533 return -ENODEV;
1534 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1535 return -EINVAL;
1536 break;
1537
1538 default:
1539 return -EINVAL;
1540 }
1541 } else {
1542 switch (flags & MAP_TYPE) {
1543 case MAP_SHARED:
1544 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1545 return -EINVAL;
1546
1547
1548
1549 pgoff = 0;
1550 vm_flags |= VM_SHARED | VM_MAYSHARE;
1551 break;
1552 case MAP_PRIVATE:
1553
1554
1555
1556 pgoff = addr >> PAGE_SHIFT;
1557 break;
1558 default:
1559 return -EINVAL;
1560 }
1561 }
1562
1563
1564
1565
1566
1567 if (flags & MAP_NORESERVE) {
1568
1569 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1570 vm_flags |= VM_NORESERVE;
1571
1572
1573 if (file && is_file_hugepages(file))
1574 vm_flags |= VM_NORESERVE;
1575 }
1576
1577 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
1578 if (!IS_ERR_VALUE(addr) &&
1579 ((vm_flags & VM_LOCKED) ||
1580 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1581 *populate = len;
1582 return addr;
1583}
1584
1585unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1586 unsigned long prot, unsigned long flags,
1587 unsigned long fd, unsigned long pgoff)
1588{
1589 struct file *file = NULL;
1590 unsigned long retval;
1591
1592 addr = untagged_addr(addr);
1593
1594 if (!(flags & MAP_ANONYMOUS)) {
1595 audit_mmap_fd(fd, flags);
1596 file = fget(fd);
1597 if (!file)
1598 return -EBADF;
1599 if (is_file_hugepages(file))
1600 len = ALIGN(len, huge_page_size(hstate_file(file)));
1601 retval = -EINVAL;
1602 if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file)))
1603 goto out_fput;
1604 } else if (flags & MAP_HUGETLB) {
1605 struct user_struct *user = NULL;
1606 struct hstate *hs;
1607
1608 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1609 if (!hs)
1610 return -EINVAL;
1611
1612 len = ALIGN(len, huge_page_size(hs));
1613
1614
1615
1616
1617
1618
1619 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
1620 VM_NORESERVE,
1621 &user, HUGETLB_ANONHUGE_INODE,
1622 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1623 if (IS_ERR(file))
1624 return PTR_ERR(file);
1625 }
1626
1627 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1628
1629 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1630out_fput:
1631 if (file)
1632 fput(file);
1633 return retval;
1634}
1635
1636SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1637 unsigned long, prot, unsigned long, flags,
1638 unsigned long, fd, unsigned long, pgoff)
1639{
1640 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1641}
1642
1643#ifdef __ARCH_WANT_SYS_OLD_MMAP
1644struct mmap_arg_struct {
1645 unsigned long addr;
1646 unsigned long len;
1647 unsigned long prot;
1648 unsigned long flags;
1649 unsigned long fd;
1650 unsigned long offset;
1651};
1652
1653SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1654{
1655 struct mmap_arg_struct a;
1656
1657 if (copy_from_user(&a, arg, sizeof(a)))
1658 return -EFAULT;
1659 if (offset_in_page(a.offset))
1660 return -EINVAL;
1661
1662 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1663 a.offset >> PAGE_SHIFT);
1664}
1665#endif
1666
1667
1668
1669
1670
1671
1672
1673int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1674{
1675 vm_flags_t vm_flags = vma->vm_flags;
1676 const struct vm_operations_struct *vm_ops = vma->vm_ops;
1677
1678
1679 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1680 return 0;
1681
1682
1683 if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite))
1684 return 1;
1685
1686
1687
1688 if (pgprot_val(vm_page_prot) !=
1689 pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
1690 return 0;
1691
1692
1693 if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
1694 return 1;
1695
1696
1697 if (vm_flags & VM_PFNMAP)
1698 return 0;
1699
1700
1701 return vma->vm_file && vma->vm_file->f_mapping &&
1702 mapping_cap_account_dirty(vma->vm_file->f_mapping);
1703}
1704
1705
1706
1707
1708
1709static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1710{
1711
1712
1713
1714
1715 if (file && is_file_hugepages(file))
1716 return 0;
1717
1718 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1719}
1720
1721unsigned long mmap_region(struct file *file, unsigned long addr,
1722 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
1723 struct list_head *uf)
1724{
1725 struct mm_struct *mm = current->mm;
1726 struct vm_area_struct *vma, *prev;
1727 int error;
1728 struct rb_node **rb_link, *rb_parent;
1729 unsigned long charged = 0;
1730
1731
1732 if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
1733 unsigned long nr_pages;
1734
1735
1736
1737
1738
1739 nr_pages = count_vma_pages_range(mm, addr, addr + len);
1740
1741 if (!may_expand_vm(mm, vm_flags,
1742 (len >> PAGE_SHIFT) - nr_pages))
1743 return -ENOMEM;
1744 }
1745
1746
1747 while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
1748 &rb_parent)) {
1749 if (do_munmap(mm, addr, len, uf))
1750 return -ENOMEM;
1751 }
1752
1753
1754
1755
1756 if (accountable_mapping(file, vm_flags)) {
1757 charged = len >> PAGE_SHIFT;
1758 if (security_vm_enough_memory_mm(mm, charged))
1759 return -ENOMEM;
1760 vm_flags |= VM_ACCOUNT;
1761 }
1762
1763
1764
1765
1766 vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
1767 NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX);
1768 if (vma)
1769 goto out;
1770
1771
1772
1773
1774
1775
1776 vma = vm_area_alloc(mm);
1777 if (!vma) {
1778 error = -ENOMEM;
1779 goto unacct_error;
1780 }
1781
1782 vma->vm_start = addr;
1783 vma->vm_end = addr + len;
1784 vma->vm_flags = vm_flags;
1785 vma->vm_page_prot = vm_get_page_prot(vm_flags);
1786 vma->vm_pgoff = pgoff;
1787
1788 if (file) {
1789 if (vm_flags & VM_DENYWRITE) {
1790 error = deny_write_access(file);
1791 if (error)
1792 goto free_vma;
1793 }
1794 if (vm_flags & VM_SHARED) {
1795 error = mapping_map_writable(file->f_mapping);
1796 if (error)
1797 goto allow_write_and_free_vma;
1798 }
1799
1800
1801
1802
1803
1804
1805 vma->vm_file = get_file(file);
1806 error = call_mmap(file, vma);
1807 if (error)
1808 goto unmap_and_free_vma;
1809
1810
1811
1812
1813
1814
1815
1816
1817 WARN_ON_ONCE(addr != vma->vm_start);
1818
1819 addr = vma->vm_start;
1820 vm_flags = vma->vm_flags;
1821 } else if (vm_flags & VM_SHARED) {
1822 error = shmem_zero_setup(vma);
1823 if (error)
1824 goto free_vma;
1825 } else {
1826 vma_set_anonymous(vma);
1827 }
1828
1829 vma_link(mm, vma, prev, rb_link, rb_parent);
1830
1831 if (file) {
1832 if (vm_flags & VM_SHARED)
1833 mapping_unmap_writable(file->f_mapping);
1834 if (vm_flags & VM_DENYWRITE)
1835 allow_write_access(file);
1836 }
1837 file = vma->vm_file;
1838out:
1839 perf_event_mmap(vma);
1840
1841 vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
1842 if (vm_flags & VM_LOCKED) {
1843 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
1844 is_vm_hugetlb_page(vma) ||
1845 vma == get_gate_vma(current->mm))
1846 vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
1847 else
1848 mm->locked_vm += (len >> PAGE_SHIFT);
1849 }
1850
1851 if (file)
1852 uprobe_mmap(vma);
1853
1854
1855
1856
1857
1858
1859
1860
1861 vma->vm_flags |= VM_SOFTDIRTY;
1862
1863 vma_set_page_prot(vma);
1864
1865 return addr;
1866
1867unmap_and_free_vma:
1868 vma->vm_file = NULL;
1869 fput(file);
1870
1871
1872 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1873 charged = 0;
1874 if (vm_flags & VM_SHARED)
1875 mapping_unmap_writable(file->f_mapping);
1876allow_write_and_free_vma:
1877 if (vm_flags & VM_DENYWRITE)
1878 allow_write_access(file);
1879free_vma:
1880 vm_area_free(vma);
1881unacct_error:
1882 if (charged)
1883 vm_unacct_memory(charged);
1884 return error;
1885}
1886
1887unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1888{
1889
1890
1891
1892
1893
1894
1895
1896
1897 struct mm_struct *mm = current->mm;
1898 struct vm_area_struct *vma;
1899 unsigned long length, low_limit, high_limit, gap_start, gap_end;
1900
1901
1902 length = info->length + info->align_mask;
1903 if (length < info->length)
1904 return -ENOMEM;
1905
1906
1907 if (info->high_limit < length)
1908 return -ENOMEM;
1909 high_limit = info->high_limit - length;
1910
1911 if (info->low_limit > high_limit)
1912 return -ENOMEM;
1913 low_limit = info->low_limit + length;
1914
1915
1916 if (RB_EMPTY_ROOT(&mm->mm_rb))
1917 goto check_highest;
1918 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1919 if (vma->rb_subtree_gap < length)
1920 goto check_highest;
1921
1922 while (true) {
1923
1924 gap_end = vm_start_gap(vma);
1925 if (gap_end >= low_limit && vma->vm_rb.rb_left) {
1926 struct vm_area_struct *left =
1927 rb_entry(vma->vm_rb.rb_left,
1928 struct vm_area_struct, vm_rb);
1929 if (left->rb_subtree_gap >= length) {
1930 vma = left;
1931 continue;
1932 }
1933 }
1934
1935 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
1936check_current:
1937
1938 if (gap_start > high_limit)
1939 return -ENOMEM;
1940 if (gap_end >= low_limit &&
1941 gap_end > gap_start && gap_end - gap_start >= length)
1942 goto found;
1943
1944
1945 if (vma->vm_rb.rb_right) {
1946 struct vm_area_struct *right =
1947 rb_entry(vma->vm_rb.rb_right,
1948 struct vm_area_struct, vm_rb);
1949 if (right->rb_subtree_gap >= length) {
1950 vma = right;
1951 continue;
1952 }
1953 }
1954
1955
1956 while (true) {
1957 struct rb_node *prev = &vma->vm_rb;
1958 if (!rb_parent(prev))
1959 goto check_highest;
1960 vma = rb_entry(rb_parent(prev),
1961 struct vm_area_struct, vm_rb);
1962 if (prev == vma->vm_rb.rb_left) {
1963 gap_start = vm_end_gap(vma->vm_prev);
1964 gap_end = vm_start_gap(vma);
1965 goto check_current;
1966 }
1967 }
1968 }
1969
1970check_highest:
1971
1972 gap_start = mm->highest_vm_end;
1973 gap_end = ULONG_MAX;
1974 if (gap_start > high_limit)
1975 return -ENOMEM;
1976
1977found:
1978
1979 if (gap_start < info->low_limit)
1980 gap_start = info->low_limit;
1981
1982
1983 gap_start += (info->align_offset - gap_start) & info->align_mask;
1984
1985 VM_BUG_ON(gap_start + info->length > info->high_limit);
1986 VM_BUG_ON(gap_start + info->length > gap_end);
1987 return gap_start;
1988}
1989
1990unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1991{
1992 struct mm_struct *mm = current->mm;
1993 struct vm_area_struct *vma;
1994 unsigned long length, low_limit, high_limit, gap_start, gap_end;
1995
1996
1997 length = info->length + info->align_mask;
1998 if (length < info->length)
1999 return -ENOMEM;
2000
2001
2002
2003
2004
2005 gap_end = info->high_limit;
2006 if (gap_end < length)
2007 return -ENOMEM;
2008 high_limit = gap_end - length;
2009
2010 if (info->low_limit > high_limit)
2011 return -ENOMEM;
2012 low_limit = info->low_limit + length;
2013
2014
2015 gap_start = mm->highest_vm_end;
2016 if (gap_start <= high_limit)
2017 goto found_highest;
2018
2019
2020 if (RB_EMPTY_ROOT(&mm->mm_rb))
2021 return -ENOMEM;
2022 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
2023 if (vma->rb_subtree_gap < length)
2024 return -ENOMEM;
2025
2026 while (true) {
2027
2028 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
2029 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
2030 struct vm_area_struct *right =
2031 rb_entry(vma->vm_rb.rb_right,
2032 struct vm_area_struct, vm_rb);
2033 if (right->rb_subtree_gap >= length) {
2034 vma = right;
2035 continue;
2036 }
2037 }
2038
2039check_current:
2040
2041 gap_end = vm_start_gap(vma);
2042 if (gap_end < low_limit)
2043 return -ENOMEM;
2044 if (gap_start <= high_limit &&
2045 gap_end > gap_start && gap_end - gap_start >= length)
2046 goto found;
2047
2048
2049 if (vma->vm_rb.rb_left) {
2050 struct vm_area_struct *left =
2051 rb_entry(vma->vm_rb.rb_left,
2052 struct vm_area_struct, vm_rb);
2053 if (left->rb_subtree_gap >= length) {
2054 vma = left;
2055 continue;
2056 }
2057 }
2058
2059
2060 while (true) {
2061 struct rb_node *prev = &vma->vm_rb;
2062 if (!rb_parent(prev))
2063 return -ENOMEM;
2064 vma = rb_entry(rb_parent(prev),
2065 struct vm_area_struct, vm_rb);
2066 if (prev == vma->vm_rb.rb_right) {
2067 gap_start = vma->vm_prev ?
2068 vm_end_gap(vma->vm_prev) : 0;
2069 goto check_current;
2070 }
2071 }
2072 }
2073
2074found:
2075
2076 if (gap_end > info->high_limit)
2077 gap_end = info->high_limit;
2078
2079found_highest:
2080
2081 gap_end -= info->length;
2082 gap_end -= (gap_end - info->align_offset) & info->align_mask;
2083
2084 VM_BUG_ON(gap_end < info->low_limit);
2085 VM_BUG_ON(gap_end < gap_start);
2086 return gap_end;
2087}
2088
2089
2090#ifndef arch_get_mmap_end
2091#define arch_get_mmap_end(addr) (TASK_SIZE)
2092#endif
2093
2094#ifndef arch_get_mmap_base
2095#define arch_get_mmap_base(addr, base) (base)
2096#endif
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109#ifndef HAVE_ARCH_UNMAPPED_AREA
2110unsigned long
2111arch_get_unmapped_area(struct file *filp, unsigned long addr,
2112 unsigned long len, unsigned long pgoff, unsigned long flags)
2113{
2114 struct mm_struct *mm = current->mm;
2115 struct vm_area_struct *vma, *prev;
2116 struct vm_unmapped_area_info info;
2117 const unsigned long mmap_end = arch_get_mmap_end(addr);
2118
2119 if (len > mmap_end - mmap_min_addr)
2120 return -ENOMEM;
2121
2122 if (flags & MAP_FIXED)
2123 return addr;
2124
2125 if (addr) {
2126 addr = PAGE_ALIGN(addr);
2127 vma = find_vma_prev(mm, addr, &prev);
2128 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
2129 (!vma || addr + len <= vm_start_gap(vma)) &&
2130 (!prev || addr >= vm_end_gap(prev)))
2131 return addr;
2132 }
2133
2134 info.flags = 0;
2135 info.length = len;
2136 info.low_limit = mm->mmap_base;
2137 info.high_limit = mmap_end;
2138 info.align_mask = 0;
2139 return vm_unmapped_area(&info);
2140}
2141#endif
2142
2143
2144
2145
2146
2147#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
2148unsigned long
2149arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
2150 unsigned long len, unsigned long pgoff,
2151 unsigned long flags)
2152{
2153 struct vm_area_struct *vma, *prev;
2154 struct mm_struct *mm = current->mm;
2155 struct vm_unmapped_area_info info;
2156 const unsigned long mmap_end = arch_get_mmap_end(addr);
2157
2158
2159 if (len > mmap_end - mmap_min_addr)
2160 return -ENOMEM;
2161
2162 if (flags & MAP_FIXED)
2163 return addr;
2164
2165
2166 if (addr) {
2167 addr = PAGE_ALIGN(addr);
2168 vma = find_vma_prev(mm, addr, &prev);
2169 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
2170 (!vma || addr + len <= vm_start_gap(vma)) &&
2171 (!prev || addr >= vm_end_gap(prev)))
2172 return addr;
2173 }
2174
2175 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
2176 info.length = len;
2177 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
2178 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
2179 info.align_mask = 0;
2180 addr = vm_unmapped_area(&info);
2181
2182
2183
2184
2185
2186
2187
2188 if (offset_in_page(addr)) {
2189 VM_BUG_ON(addr != -ENOMEM);
2190 info.flags = 0;
2191 info.low_limit = TASK_UNMAPPED_BASE;
2192 info.high_limit = mmap_end;
2193 addr = vm_unmapped_area(&info);
2194 }
2195
2196 return addr;
2197}
2198#endif
2199
2200unsigned long
2201get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
2202 unsigned long pgoff, unsigned long flags)
2203{
2204 unsigned long (*get_area)(struct file *, unsigned long,
2205 unsigned long, unsigned long, unsigned long);
2206
2207 unsigned long error = arch_mmap_check(addr, len, flags);
2208 if (error)
2209 return error;
2210
2211
2212 if (len > TASK_SIZE)
2213 return -ENOMEM;
2214
2215 get_area = current->mm->get_unmapped_area;
2216 if (file) {
2217 if (file->f_op->get_unmapped_area)
2218 get_area = file->f_op->get_unmapped_area;
2219 } else if (flags & MAP_SHARED) {
2220
2221
2222
2223
2224
2225 pgoff = 0;
2226 get_area = shmem_get_unmapped_area;
2227 }
2228
2229 addr = get_area(file, addr, len, pgoff, flags);
2230 if (IS_ERR_VALUE(addr))
2231 return addr;
2232
2233 if (addr > TASK_SIZE - len)
2234 return -ENOMEM;
2235 if (offset_in_page(addr))
2236 return -EINVAL;
2237
2238 error = security_mmap_addr(addr);
2239 return error ? error : addr;
2240}
2241
2242EXPORT_SYMBOL(get_unmapped_area);
2243
2244
2245struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
2246{
2247 struct rb_node *rb_node;
2248 struct vm_area_struct *vma;
2249
2250
2251 vma = vmacache_find(mm, addr);
2252 if (likely(vma))
2253 return vma;
2254
2255 rb_node = mm->mm_rb.rb_node;
2256
2257 while (rb_node) {
2258 struct vm_area_struct *tmp;
2259
2260 tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
2261
2262 if (tmp->vm_end > addr) {
2263 vma = tmp;
2264 if (tmp->vm_start <= addr)
2265 break;
2266 rb_node = rb_node->rb_left;
2267 } else
2268 rb_node = rb_node->rb_right;
2269 }
2270
2271 if (vma)
2272 vmacache_update(addr, vma);
2273 return vma;
2274}
2275
2276EXPORT_SYMBOL(find_vma);
2277
2278
2279
2280
2281struct vm_area_struct *
2282find_vma_prev(struct mm_struct *mm, unsigned long addr,
2283 struct vm_area_struct **pprev)
2284{
2285 struct vm_area_struct *vma;
2286
2287 vma = find_vma(mm, addr);
2288 if (vma) {
2289 *pprev = vma->vm_prev;
2290 } else {
2291 struct rb_node *rb_node = rb_last(&mm->mm_rb);
2292
2293 *pprev = rb_node ? rb_entry(rb_node, struct vm_area_struct, vm_rb) : NULL;
2294 }
2295 return vma;
2296}
2297
2298
2299
2300
2301
2302
2303static int acct_stack_growth(struct vm_area_struct *vma,
2304 unsigned long size, unsigned long grow)
2305{
2306 struct mm_struct *mm = vma->vm_mm;
2307 unsigned long new_start;
2308
2309
2310 if (!may_expand_vm(mm, vma->vm_flags, grow))
2311 return -ENOMEM;
2312
2313
2314 if (size > rlimit(RLIMIT_STACK))
2315 return -ENOMEM;
2316
2317
2318 if (vma->vm_flags & VM_LOCKED) {
2319 unsigned long locked;
2320 unsigned long limit;
2321 locked = mm->locked_vm + grow;
2322 limit = rlimit(RLIMIT_MEMLOCK);
2323 limit >>= PAGE_SHIFT;
2324 if (locked > limit && !capable(CAP_IPC_LOCK))
2325 return -ENOMEM;
2326 }
2327
2328
2329 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
2330 vma->vm_end - size;
2331 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
2332 return -EFAULT;
2333
2334
2335
2336
2337
2338 if (security_vm_enough_memory_mm(mm, grow))
2339 return -ENOMEM;
2340
2341 return 0;
2342}
2343
2344#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
2345
2346
2347
2348
2349int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2350{
2351 struct mm_struct *mm = vma->vm_mm;
2352 struct vm_area_struct *next;
2353 unsigned long gap_addr;
2354 int error = 0;
2355
2356 if (!(vma->vm_flags & VM_GROWSUP))
2357 return -EFAULT;
2358
2359
2360 address &= PAGE_MASK;
2361 if (address >= (TASK_SIZE & PAGE_MASK))
2362 return -ENOMEM;
2363 address += PAGE_SIZE;
2364
2365
2366 gap_addr = address + stack_guard_gap;
2367
2368
2369 if (gap_addr < address || gap_addr > TASK_SIZE)
2370 gap_addr = TASK_SIZE;
2371
2372 next = vma->vm_next;
2373 if (next && next->vm_start < gap_addr &&
2374 (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
2375 if (!(next->vm_flags & VM_GROWSUP))
2376 return -ENOMEM;
2377
2378 }
2379
2380
2381 if (unlikely(anon_vma_prepare(vma)))
2382 return -ENOMEM;
2383
2384
2385
2386
2387
2388
2389 anon_vma_lock_write(vma->anon_vma);
2390
2391
2392 if (address > vma->vm_end) {
2393 unsigned long size, grow;
2394
2395 size = address - vma->vm_start;
2396 grow = (address - vma->vm_end) >> PAGE_SHIFT;
2397
2398 error = -ENOMEM;
2399 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2400 error = acct_stack_growth(vma, size, grow);
2401 if (!error) {
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413 spin_lock(&mm->page_table_lock);
2414 if (vma->vm_flags & VM_LOCKED)
2415 mm->locked_vm += grow;
2416 vm_stat_account(mm, vma->vm_flags, grow);
2417 anon_vma_interval_tree_pre_update_vma(vma);
2418 vma->vm_end = address;
2419 anon_vma_interval_tree_post_update_vma(vma);
2420 if (vma->vm_next)
2421 vma_gap_update(vma->vm_next);
2422 else
2423 mm->highest_vm_end = vm_end_gap(vma);
2424 spin_unlock(&mm->page_table_lock);
2425
2426 perf_event_mmap(vma);
2427 }
2428 }
2429 }
2430 anon_vma_unlock_write(vma->anon_vma);
2431 khugepaged_enter_vma_merge(vma, vma->vm_flags);
2432 validate_mm(mm);
2433 return error;
2434}
2435#endif
2436
2437
2438
2439
2440int expand_downwards(struct vm_area_struct *vma,
2441 unsigned long address)
2442{
2443 struct mm_struct *mm = vma->vm_mm;
2444 struct vm_area_struct *prev;
2445 int error = 0;
2446
2447 address &= PAGE_MASK;
2448 if (address < mmap_min_addr)
2449 return -EPERM;
2450
2451
2452 prev = vma->vm_prev;
2453
2454 if (prev && !(prev->vm_flags & VM_GROWSDOWN) &&
2455 (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
2456 if (address - prev->vm_end < stack_guard_gap)
2457 return -ENOMEM;
2458 }
2459
2460
2461 if (unlikely(anon_vma_prepare(vma)))
2462 return -ENOMEM;
2463
2464
2465
2466
2467
2468
2469 anon_vma_lock_write(vma->anon_vma);
2470
2471
2472 if (address < vma->vm_start) {
2473 unsigned long size, grow;
2474
2475 size = vma->vm_end - address;
2476 grow = (vma->vm_start - address) >> PAGE_SHIFT;
2477
2478 error = -ENOMEM;
2479 if (grow <= vma->vm_pgoff) {
2480 error = acct_stack_growth(vma, size, grow);
2481 if (!error) {
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493 spin_lock(&mm->page_table_lock);
2494 if (vma->vm_flags & VM_LOCKED)
2495 mm->locked_vm += grow;
2496 vm_stat_account(mm, vma->vm_flags, grow);
2497 anon_vma_interval_tree_pre_update_vma(vma);
2498 vma->vm_start = address;
2499 vma->vm_pgoff -= grow;
2500 anon_vma_interval_tree_post_update_vma(vma);
2501 vma_gap_update(vma);
2502 spin_unlock(&mm->page_table_lock);
2503
2504 perf_event_mmap(vma);
2505 }
2506 }
2507 }
2508 anon_vma_unlock_write(vma->anon_vma);
2509 khugepaged_enter_vma_merge(vma, vma->vm_flags);
2510 validate_mm(mm);
2511 return error;
2512}
2513
2514
2515unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
2516
2517static int __init cmdline_parse_stack_guard_gap(char *p)
2518{
2519 unsigned long val;
2520 char *endptr;
2521
2522 val = simple_strtoul(p, &endptr, 10);
2523 if (!*endptr)
2524 stack_guard_gap = val << PAGE_SHIFT;
2525
2526 return 0;
2527}
2528__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
2529
2530#ifdef CONFIG_STACK_GROWSUP
2531int expand_stack(struct vm_area_struct *vma, unsigned long address)
2532{
2533 return expand_upwards(vma, address);
2534}
2535
2536struct vm_area_struct *
2537find_extend_vma(struct mm_struct *mm, unsigned long addr)
2538{
2539 struct vm_area_struct *vma, *prev;
2540
2541 addr &= PAGE_MASK;
2542 vma = find_vma_prev(mm, addr, &prev);
2543 if (vma && (vma->vm_start <= addr))
2544 return vma;
2545
2546 if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
2547 return NULL;
2548 if (prev->vm_flags & VM_LOCKED)
2549 populate_vma_page_range(prev, addr, prev->vm_end, NULL);
2550 return prev;
2551}
2552#else
2553int expand_stack(struct vm_area_struct *vma, unsigned long address)
2554{
2555 return expand_downwards(vma, address);
2556}
2557
2558struct vm_area_struct *
2559find_extend_vma(struct mm_struct *mm, unsigned long addr)
2560{
2561 struct vm_area_struct *vma;
2562 unsigned long start;
2563
2564 addr &= PAGE_MASK;
2565 vma = find_vma(mm, addr);
2566 if (!vma)
2567 return NULL;
2568 if (vma->vm_start <= addr)
2569 return vma;
2570 if (!(vma->vm_flags & VM_GROWSDOWN))
2571 return NULL;
2572
2573 if (!mmget_still_valid(mm))
2574 return NULL;
2575 start = vma->vm_start;
2576 if (expand_stack(vma, addr))
2577 return NULL;
2578 if (vma->vm_flags & VM_LOCKED)
2579 populate_vma_page_range(vma, addr, start, NULL);
2580 return vma;
2581}
2582#endif
2583
2584EXPORT_SYMBOL_GPL(find_extend_vma);
2585
2586
2587
2588
2589
2590
2591
2592static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
2593{
2594 unsigned long nr_accounted = 0;
2595
2596
2597 update_hiwater_vm(mm);
2598 do {
2599 long nrpages = vma_pages(vma);
2600
2601 if (vma->vm_flags & VM_ACCOUNT)
2602 nr_accounted += nrpages;
2603 vm_stat_account(mm, vma->vm_flags, -nrpages);
2604 vma = remove_vma(vma);
2605 } while (vma);
2606 vm_unacct_memory(nr_accounted);
2607 validate_mm(mm);
2608}
2609
2610
2611
2612
2613
2614
2615static void unmap_region(struct mm_struct *mm,
2616 struct vm_area_struct *vma, struct vm_area_struct *prev,
2617 unsigned long start, unsigned long end)
2618{
2619 struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap;
2620 struct mmu_gather tlb;
2621
2622 lru_add_drain();
2623 tlb_gather_mmu(&tlb, mm, start, end);
2624 update_hiwater_rss(mm);
2625 unmap_vmas(&tlb, vma, start, end);
2626 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2627 next ? next->vm_start : USER_PGTABLES_CEILING);
2628 tlb_finish_mmu(&tlb, start, end);
2629}
2630
2631
2632
2633
2634
2635static void
2636detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
2637 struct vm_area_struct *prev, unsigned long end)
2638{
2639 struct vm_area_struct **insertion_point;
2640 struct vm_area_struct *tail_vma = NULL;
2641
2642 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
2643 vma->vm_prev = NULL;
2644 do {
2645 vma_rb_erase(vma, &mm->mm_rb);
2646 mm->map_count--;
2647 tail_vma = vma;
2648 vma = vma->vm_next;
2649 } while (vma && vma->vm_start < end);
2650 *insertion_point = vma;
2651 if (vma) {
2652 vma->vm_prev = prev;
2653 vma_gap_update(vma);
2654 } else
2655 mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
2656 tail_vma->vm_next = NULL;
2657
2658
2659 vmacache_invalidate(mm);
2660}
2661
2662
2663
2664
2665
2666int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2667 unsigned long addr, int new_below)
2668{
2669 struct vm_area_struct *new;
2670 int err;
2671
2672 if (vma->vm_ops && vma->vm_ops->split) {
2673 err = vma->vm_ops->split(vma, addr);
2674 if (err)
2675 return err;
2676 }
2677
2678 new = vm_area_dup(vma);
2679 if (!new)
2680 return -ENOMEM;
2681
2682 if (new_below)
2683 new->vm_end = addr;
2684 else {
2685 new->vm_start = addr;
2686 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2687 }
2688
2689 err = vma_dup_policy(vma, new);
2690 if (err)
2691 goto out_free_vma;
2692
2693 err = anon_vma_clone(new, vma);
2694 if (err)
2695 goto out_free_mpol;
2696
2697 if (new->vm_file)
2698 get_file(new->vm_file);
2699
2700 if (new->vm_ops && new->vm_ops->open)
2701 new->vm_ops->open(new);
2702
2703 if (new_below)
2704 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
2705 ((addr - new->vm_start) >> PAGE_SHIFT), new);
2706 else
2707 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
2708
2709
2710 if (!err)
2711 return 0;
2712
2713
2714 if (new->vm_ops && new->vm_ops->close)
2715 new->vm_ops->close(new);
2716 if (new->vm_file)
2717 fput(new->vm_file);
2718 unlink_anon_vmas(new);
2719 out_free_mpol:
2720 mpol_put(vma_policy(new));
2721 out_free_vma:
2722 vm_area_free(new);
2723 return err;
2724}
2725
2726
2727
2728
2729
2730int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2731 unsigned long addr, int new_below)
2732{
2733 if (mm->map_count >= sysctl_max_map_count)
2734 return -ENOMEM;
2735
2736 return __split_vma(mm, vma, addr, new_below);
2737}
2738
2739
2740
2741
2742
2743
2744int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
2745 struct list_head *uf, bool downgrade)
2746{
2747 unsigned long end;
2748 struct vm_area_struct *vma, *prev, *last;
2749
2750 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
2751 return -EINVAL;
2752
2753 len = PAGE_ALIGN(len);
2754 end = start + len;
2755 if (len == 0)
2756 return -EINVAL;
2757
2758
2759
2760
2761
2762
2763 arch_unmap(mm, start, end);
2764
2765
2766 vma = find_vma(mm, start);
2767 if (!vma)
2768 return 0;
2769 prev = vma->vm_prev;
2770
2771
2772
2773 if (vma->vm_start >= end)
2774 return 0;
2775
2776
2777
2778
2779
2780
2781
2782
2783 if (start > vma->vm_start) {
2784 int error;
2785
2786
2787
2788
2789
2790
2791 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2792 return -ENOMEM;
2793
2794 error = __split_vma(mm, vma, start, 0);
2795 if (error)
2796 return error;
2797 prev = vma;
2798 }
2799
2800
2801 last = find_vma(mm, end);
2802 if (last && end > last->vm_start) {
2803 int error = __split_vma(mm, last, end, 1);
2804 if (error)
2805 return error;
2806 }
2807 vma = prev ? prev->vm_next : mm->mmap;
2808
2809 if (unlikely(uf)) {
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819 int error = userfaultfd_unmap_prep(vma, start, end, uf);
2820 if (error)
2821 return error;
2822 }
2823
2824
2825
2826
2827 if (mm->locked_vm) {
2828 struct vm_area_struct *tmp = vma;
2829 while (tmp && tmp->vm_start < end) {
2830 if (tmp->vm_flags & VM_LOCKED) {
2831 mm->locked_vm -= vma_pages(tmp);
2832 munlock_vma_pages_all(tmp);
2833 }
2834
2835 tmp = tmp->vm_next;
2836 }
2837 }
2838
2839
2840 detach_vmas_to_be_unmapped(mm, vma, prev, end);
2841
2842 if (downgrade)
2843 downgrade_write(&mm->mmap_sem);
2844
2845 unmap_region(mm, vma, prev, start, end);
2846
2847
2848 remove_vma_list(mm, vma);
2849
2850 return downgrade ? 1 : 0;
2851}
2852
2853int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
2854 struct list_head *uf)
2855{
2856 return __do_munmap(mm, start, len, uf, false);
2857}
2858
2859static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
2860{
2861 int ret;
2862 struct mm_struct *mm = current->mm;
2863 LIST_HEAD(uf);
2864
2865 if (down_write_killable(&mm->mmap_sem))
2866 return -EINTR;
2867
2868 ret = __do_munmap(mm, start, len, &uf, downgrade);
2869
2870
2871
2872
2873
2874 if (ret == 1) {
2875 up_read(&mm->mmap_sem);
2876 ret = 0;
2877 } else
2878 up_write(&mm->mmap_sem);
2879
2880 userfaultfd_unmap_complete(mm, &uf);
2881 return ret;
2882}
2883
2884int vm_munmap(unsigned long start, size_t len)
2885{
2886 return __vm_munmap(start, len, false);
2887}
2888EXPORT_SYMBOL(vm_munmap);
2889
2890SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2891{
2892 addr = untagged_addr(addr);
2893 profile_munmap(addr);
2894 return __vm_munmap(addr, len, true);
2895}
2896
2897
2898
2899
2900
2901SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2902 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
2903{
2904
2905 struct mm_struct *mm = current->mm;
2906 struct vm_area_struct *vma;
2907 unsigned long populate = 0;
2908 unsigned long ret = -EINVAL;
2909 struct file *file;
2910
2911 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.rst.\n",
2912 current->comm, current->pid);
2913
2914 if (prot)
2915 return ret;
2916 start = start & PAGE_MASK;
2917 size = size & PAGE_MASK;
2918
2919 if (start + size <= start)
2920 return ret;
2921
2922
2923 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
2924 return ret;
2925
2926 if (down_write_killable(&mm->mmap_sem))
2927 return -EINTR;
2928
2929 vma = find_vma(mm, start);
2930
2931 if (!vma || !(vma->vm_flags & VM_SHARED))
2932 goto out;
2933
2934 if (start < vma->vm_start)
2935 goto out;
2936
2937 if (start + size > vma->vm_end) {
2938 struct vm_area_struct *next;
2939
2940 for (next = vma->vm_next; next; next = next->vm_next) {
2941
2942 if (next->vm_start != next->vm_prev->vm_end)
2943 goto out;
2944
2945 if (next->vm_file != vma->vm_file)
2946 goto out;
2947
2948 if (next->vm_flags != vma->vm_flags)
2949 goto out;
2950
2951 if (start + size <= next->vm_end)
2952 break;
2953 }
2954
2955 if (!next)
2956 goto out;
2957 }
2958
2959 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
2960 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
2961 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
2962
2963 flags &= MAP_NONBLOCK;
2964 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
2965 if (vma->vm_flags & VM_LOCKED) {
2966 struct vm_area_struct *tmp;
2967 flags |= MAP_LOCKED;
2968
2969
2970 for (tmp = vma; tmp->vm_start >= start + size;
2971 tmp = tmp->vm_next) {
2972
2973
2974
2975
2976 vma_adjust_trans_huge(tmp, start, start + size, 0);
2977
2978 munlock_vma_pages_range(tmp,
2979 max(tmp->vm_start, start),
2980 min(tmp->vm_end, start + size));
2981 }
2982 }
2983
2984 file = get_file(vma->vm_file);
2985 ret = do_mmap_pgoff(vma->vm_file, start, size,
2986 prot, flags, pgoff, &populate, NULL);
2987 fput(file);
2988out:
2989 up_write(&mm->mmap_sem);
2990 if (populate)
2991 mm_populate(ret, populate);
2992 if (!IS_ERR_VALUE(ret))
2993 ret = 0;
2994 return ret;
2995}
2996
2997
2998
2999
3000
3001
3002static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
3003{
3004 struct mm_struct *mm = current->mm;
3005 struct vm_area_struct *vma, *prev;
3006 struct rb_node **rb_link, *rb_parent;
3007 pgoff_t pgoff = addr >> PAGE_SHIFT;
3008 int error;
3009
3010
3011 if ((flags & (~VM_EXEC)) != 0)
3012 return -EINVAL;
3013 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
3014
3015 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
3016 if (offset_in_page(error))
3017 return error;
3018
3019 error = mlock_future_check(mm, mm->def_flags, len);
3020 if (error)
3021 return error;
3022
3023
3024
3025
3026 while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
3027 &rb_parent)) {
3028 if (do_munmap(mm, addr, len, uf))
3029 return -ENOMEM;
3030 }
3031
3032
3033 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
3034 return -ENOMEM;
3035
3036 if (mm->map_count > sysctl_max_map_count)
3037 return -ENOMEM;
3038
3039 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
3040 return -ENOMEM;
3041
3042
3043 vma = vma_merge(mm, prev, addr, addr + len, flags,
3044 NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX);
3045 if (vma)
3046 goto out;
3047
3048
3049
3050
3051 vma = vm_area_alloc(mm);
3052 if (!vma) {
3053 vm_unacct_memory(len >> PAGE_SHIFT);
3054 return -ENOMEM;
3055 }
3056
3057 vma_set_anonymous(vma);
3058 vma->vm_start = addr;
3059 vma->vm_end = addr + len;
3060 vma->vm_pgoff = pgoff;
3061 vma->vm_flags = flags;
3062 vma->vm_page_prot = vm_get_page_prot(flags);
3063 vma_link(mm, vma, prev, rb_link, rb_parent);
3064out:
3065 perf_event_mmap(vma);
3066 mm->total_vm += len >> PAGE_SHIFT;
3067 mm->data_vm += len >> PAGE_SHIFT;
3068 if (flags & VM_LOCKED)
3069 mm->locked_vm += (len >> PAGE_SHIFT);
3070 vma->vm_flags |= VM_SOFTDIRTY;
3071 return 0;
3072}
3073
3074int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
3075{
3076 struct mm_struct *mm = current->mm;
3077 unsigned long len;
3078 int ret;
3079 bool populate;
3080 LIST_HEAD(uf);
3081
3082 len = PAGE_ALIGN(request);
3083 if (len < request)
3084 return -ENOMEM;
3085 if (!len)
3086 return 0;
3087
3088 if (down_write_killable(&mm->mmap_sem))
3089 return -EINTR;
3090
3091 ret = do_brk_flags(addr, len, flags, &uf);
3092 populate = ((mm->def_flags & VM_LOCKED) != 0);
3093 up_write(&mm->mmap_sem);
3094 userfaultfd_unmap_complete(mm, &uf);
3095 if (populate && !ret)
3096 mm_populate(addr, len);
3097 return ret;
3098}
3099EXPORT_SYMBOL(vm_brk_flags);
3100
3101int vm_brk(unsigned long addr, unsigned long len)
3102{
3103 return vm_brk_flags(addr, len, 0);
3104}
3105EXPORT_SYMBOL(vm_brk);
3106
3107
3108void exit_mmap(struct mm_struct *mm)
3109{
3110 struct mmu_gather tlb;
3111 struct vm_area_struct *vma;
3112 unsigned long nr_accounted = 0;
3113
3114
3115 mmu_notifier_release(mm);
3116
3117 if (unlikely(mm_is_oom_victim(mm))) {
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134 (void)__oom_reap_task_mm(mm);
3135
3136 set_bit(MMF_OOM_SKIP, &mm->flags);
3137 down_write(&mm->mmap_sem);
3138 up_write(&mm->mmap_sem);
3139 }
3140
3141 if (mm->locked_vm) {
3142 vma = mm->mmap;
3143 while (vma) {
3144 if (vma->vm_flags & VM_LOCKED)
3145 munlock_vma_pages_all(vma);
3146 vma = vma->vm_next;
3147 }
3148 }
3149
3150 arch_exit_mmap(mm);
3151
3152 vma = mm->mmap;
3153 if (!vma)
3154 return;
3155
3156 lru_add_drain();
3157 flush_cache_mm(mm);
3158 tlb_gather_mmu(&tlb, mm, 0, -1);
3159
3160
3161 unmap_vmas(&tlb, vma, 0, -1);
3162 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
3163 tlb_finish_mmu(&tlb, 0, -1);
3164
3165
3166
3167
3168
3169 while (vma) {
3170 if (vma->vm_flags & VM_ACCOUNT)
3171 nr_accounted += vma_pages(vma);
3172 vma = remove_vma(vma);
3173 }
3174 vm_unacct_memory(nr_accounted);
3175}
3176
3177
3178
3179
3180
3181int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
3182{
3183 struct vm_area_struct *prev;
3184 struct rb_node **rb_link, *rb_parent;
3185
3186 if (find_vma_links(mm, vma->vm_start, vma->vm_end,
3187 &prev, &rb_link, &rb_parent))
3188 return -ENOMEM;
3189 if ((vma->vm_flags & VM_ACCOUNT) &&
3190 security_vm_enough_memory_mm(mm, vma_pages(vma)))
3191 return -ENOMEM;
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205 if (vma_is_anonymous(vma)) {
3206 BUG_ON(vma->anon_vma);
3207 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
3208 }
3209
3210 vma_link(mm, vma, prev, rb_link, rb_parent);
3211 return 0;
3212}
3213
3214
3215
3216
3217
3218struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
3219 unsigned long addr, unsigned long len, pgoff_t pgoff,
3220 bool *need_rmap_locks)
3221{
3222 struct vm_area_struct *vma = *vmap;
3223 unsigned long vma_start = vma->vm_start;
3224 struct mm_struct *mm = vma->vm_mm;
3225 struct vm_area_struct *new_vma, *prev;
3226 struct rb_node **rb_link, *rb_parent;
3227 bool faulted_in_anon_vma = true;
3228
3229
3230
3231
3232
3233 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
3234 pgoff = addr >> PAGE_SHIFT;
3235 faulted_in_anon_vma = false;
3236 }
3237
3238 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
3239 return NULL;
3240 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
3241 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
3242 vma->vm_userfaultfd_ctx);
3243 if (new_vma) {
3244
3245
3246
3247 if (unlikely(vma_start >= new_vma->vm_start &&
3248 vma_start < new_vma->vm_end)) {
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
3262 *vmap = vma = new_vma;
3263 }
3264 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
3265 } else {
3266 new_vma = vm_area_dup(vma);
3267 if (!new_vma)
3268 goto out;
3269 new_vma->vm_start = addr;
3270 new_vma->vm_end = addr + len;
3271 new_vma->vm_pgoff = pgoff;
3272 if (vma_dup_policy(vma, new_vma))
3273 goto out_free_vma;
3274 if (anon_vma_clone(new_vma, vma))
3275 goto out_free_mempol;
3276 if (new_vma->vm_file)
3277 get_file(new_vma->vm_file);
3278 if (new_vma->vm_ops && new_vma->vm_ops->open)
3279 new_vma->vm_ops->open(new_vma);
3280 vma_link(mm, new_vma, prev, rb_link, rb_parent);
3281 *need_rmap_locks = false;
3282 }
3283 return new_vma;
3284
3285out_free_mempol:
3286 mpol_put(vma_policy(new_vma));
3287out_free_vma:
3288 vm_area_free(new_vma);
3289out:
3290 return NULL;
3291}
3292
3293
3294
3295
3296
3297bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
3298{
3299 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
3300 return false;
3301
3302 if (is_data_mapping(flags) &&
3303 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
3304
3305 if (rlimit(RLIMIT_DATA) == 0 &&
3306 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
3307 return true;
3308
3309 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
3310 current->comm, current->pid,
3311 (mm->data_vm + npages) << PAGE_SHIFT,
3312 rlimit(RLIMIT_DATA),
3313 ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
3314
3315 if (!ignore_rlimit_data)
3316 return false;
3317 }
3318
3319 return true;
3320}
3321
3322void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
3323{
3324 mm->total_vm += npages;
3325
3326 if (is_exec_mapping(flags))
3327 mm->exec_vm += npages;
3328 else if (is_stack_mapping(flags))
3329 mm->stack_vm += npages;
3330 else if (is_data_mapping(flags))
3331 mm->data_vm += npages;
3332}
3333
3334static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
3335
3336
3337
3338
3339static void special_mapping_close(struct vm_area_struct *vma)
3340{
3341}
3342
3343static const char *special_mapping_name(struct vm_area_struct *vma)
3344{
3345 return ((struct vm_special_mapping *)vma->vm_private_data)->name;
3346}
3347
3348static int special_mapping_mremap(struct vm_area_struct *new_vma)
3349{
3350 struct vm_special_mapping *sm = new_vma->vm_private_data;
3351
3352 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
3353 return -EFAULT;
3354
3355 if (sm->mremap)
3356 return sm->mremap(sm, new_vma);
3357
3358 return 0;
3359}
3360
3361static const struct vm_operations_struct special_mapping_vmops = {
3362 .close = special_mapping_close,
3363 .fault = special_mapping_fault,
3364 .mremap = special_mapping_mremap,
3365 .name = special_mapping_name,
3366};
3367
3368static const struct vm_operations_struct legacy_special_mapping_vmops = {
3369 .close = special_mapping_close,
3370 .fault = special_mapping_fault,
3371};
3372
3373static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
3374{
3375 struct vm_area_struct *vma = vmf->vma;
3376 pgoff_t pgoff;
3377 struct page **pages;
3378
3379 if (vma->vm_ops == &legacy_special_mapping_vmops) {
3380 pages = vma->vm_private_data;
3381 } else {
3382 struct vm_special_mapping *sm = vma->vm_private_data;
3383
3384 if (sm->fault)
3385 return sm->fault(sm, vmf->vma, vmf);
3386
3387 pages = sm->pages;
3388 }
3389
3390 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
3391 pgoff--;
3392
3393 if (*pages) {
3394 struct page *page = *pages;
3395 get_page(page);
3396 vmf->page = page;
3397 return 0;
3398 }
3399
3400 return VM_FAULT_SIGBUS;
3401}
3402
3403static struct vm_area_struct *__install_special_mapping(
3404 struct mm_struct *mm,
3405 unsigned long addr, unsigned long len,
3406 unsigned long vm_flags, void *priv,
3407 const struct vm_operations_struct *ops)
3408{
3409 int ret;
3410 struct vm_area_struct *vma;
3411
3412 vma = vm_area_alloc(mm);
3413 if (unlikely(vma == NULL))
3414 return ERR_PTR(-ENOMEM);
3415
3416 vma->vm_start = addr;
3417 vma->vm_end = addr + len;
3418
3419 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
3420 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3421
3422 vma->vm_ops = ops;
3423 vma->vm_private_data = priv;
3424
3425 ret = insert_vm_struct(mm, vma);
3426 if (ret)
3427 goto out;
3428
3429 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
3430
3431 perf_event_mmap(vma);
3432
3433 return vma;
3434
3435out:
3436 vm_area_free(vma);
3437 return ERR_PTR(ret);
3438}
3439
3440bool vma_is_special_mapping(const struct vm_area_struct *vma,
3441 const struct vm_special_mapping *sm)
3442{
3443 return vma->vm_private_data == sm &&
3444 (vma->vm_ops == &special_mapping_vmops ||
3445 vma->vm_ops == &legacy_special_mapping_vmops);
3446}
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457struct vm_area_struct *_install_special_mapping(
3458 struct mm_struct *mm,
3459 unsigned long addr, unsigned long len,
3460 unsigned long vm_flags, const struct vm_special_mapping *spec)
3461{
3462 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
3463 &special_mapping_vmops);
3464}
3465
3466int install_special_mapping(struct mm_struct *mm,
3467 unsigned long addr, unsigned long len,
3468 unsigned long vm_flags, struct page **pages)
3469{
3470 struct vm_area_struct *vma = __install_special_mapping(
3471 mm, addr, len, vm_flags, (void *)pages,
3472 &legacy_special_mapping_vmops);
3473
3474 return PTR_ERR_OR_ZERO(vma);
3475}
3476
3477static DEFINE_MUTEX(mm_all_locks_mutex);
3478
3479static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
3480{
3481 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3482
3483
3484
3485
3486 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496 if (__test_and_set_bit(0, (unsigned long *)
3497 &anon_vma->root->rb_root.rb_root.rb_node))
3498 BUG();
3499 }
3500}
3501
3502static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3503{
3504 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
3515 BUG();
3516 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem);
3517 }
3518}
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557int mm_take_all_locks(struct mm_struct *mm)
3558{
3559 struct vm_area_struct *vma;
3560 struct anon_vma_chain *avc;
3561
3562 BUG_ON(down_read_trylock(&mm->mmap_sem));
3563
3564 mutex_lock(&mm_all_locks_mutex);
3565
3566 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3567 if (signal_pending(current))
3568 goto out_unlock;
3569 if (vma->vm_file && vma->vm_file->f_mapping &&
3570 is_vm_hugetlb_page(vma))
3571 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3572 }
3573
3574 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3575 if (signal_pending(current))
3576 goto out_unlock;
3577 if (vma->vm_file && vma->vm_file->f_mapping &&
3578 !is_vm_hugetlb_page(vma))
3579 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3580 }
3581
3582 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3583 if (signal_pending(current))
3584 goto out_unlock;
3585 if (vma->anon_vma)
3586 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3587 vm_lock_anon_vma(mm, avc->anon_vma);
3588 }
3589
3590 return 0;
3591
3592out_unlock:
3593 mm_drop_all_locks(mm);
3594 return -EINTR;
3595}
3596
3597static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3598{
3599 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612 if (!__test_and_clear_bit(0, (unsigned long *)
3613 &anon_vma->root->rb_root.rb_root.rb_node))
3614 BUG();
3615 anon_vma_unlock_write(anon_vma);
3616 }
3617}
3618
3619static void vm_unlock_mapping(struct address_space *mapping)
3620{
3621 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3622
3623
3624
3625
3626 i_mmap_unlock_write(mapping);
3627 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3628 &mapping->flags))
3629 BUG();
3630 }
3631}
3632
3633
3634
3635
3636
3637void mm_drop_all_locks(struct mm_struct *mm)
3638{
3639 struct vm_area_struct *vma;
3640 struct anon_vma_chain *avc;
3641
3642 BUG_ON(down_read_trylock(&mm->mmap_sem));
3643 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3644
3645 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3646 if (vma->anon_vma)
3647 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3648 vm_unlock_anon_vma(avc->anon_vma);
3649 if (vma->vm_file && vma->vm_file->f_mapping)
3650 vm_unlock_mapping(vma->vm_file->f_mapping);
3651 }
3652
3653 mutex_unlock(&mm_all_locks_mutex);
3654}
3655
3656
3657
3658
3659void __init mmap_init(void)
3660{
3661 int ret;
3662
3663 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
3664 VM_BUG_ON(ret);
3665}
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677static int init_user_reserve(void)
3678{
3679 unsigned long free_kbytes;
3680
3681 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3682
3683 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
3684 return 0;
3685}
3686subsys_initcall(init_user_reserve);
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698static int init_admin_reserve(void)
3699{
3700 unsigned long free_kbytes;
3701
3702 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3703
3704 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
3705 return 0;
3706}
3707subsys_initcall(init_admin_reserve);
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727static int reserve_mem_notifier(struct notifier_block *nb,
3728 unsigned long action, void *data)
3729{
3730 unsigned long tmp, free_kbytes;
3731
3732 switch (action) {
3733 case MEM_ONLINE:
3734
3735 tmp = sysctl_user_reserve_kbytes;
3736 if (0 < tmp && tmp < (1UL << 17))
3737 init_user_reserve();
3738
3739
3740 tmp = sysctl_admin_reserve_kbytes;
3741 if (0 < tmp && tmp < (1UL << 13))
3742 init_admin_reserve();
3743
3744 break;
3745 case MEM_OFFLINE:
3746 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3747
3748 if (sysctl_user_reserve_kbytes > free_kbytes) {
3749 init_user_reserve();
3750 pr_info("vm.user_reserve_kbytes reset to %lu\n",
3751 sysctl_user_reserve_kbytes);
3752 }
3753
3754 if (sysctl_admin_reserve_kbytes > free_kbytes) {
3755 init_admin_reserve();
3756 pr_info("vm.admin_reserve_kbytes reset to %lu\n",
3757 sysctl_admin_reserve_kbytes);
3758 }
3759 break;
3760 default:
3761 break;
3762 }
3763 return NOTIFY_OK;
3764}
3765
3766static struct notifier_block reserve_mem_nb = {
3767 .notifier_call = reserve_mem_notifier,
3768};
3769
3770static int __meminit init_reserve_notifier(void)
3771{
3772 if (register_hotmemory_notifier(&reserve_mem_nb))
3773 pr_err("Failed registering memory add/remove notifier for admin reserve\n");
3774
3775 return 0;
3776}
3777subsys_initcall(init_reserve_notifier);
3778