1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/backing-dev.h>
14#include <linux/mm.h>
15#include <linux/vmacache.h>
16#include <linux/shm.h>
17#include <linux/mman.h>
18#include <linux/pagemap.h>
19#include <linux/swap.h>
20#include <linux/syscalls.h>
21#include <linux/capability.h>
22#include <linux/init.h>
23#include <linux/file.h>
24#include <linux/fs.h>
25#include <linux/personality.h>
26#include <linux/security.h>
27#include <linux/hugetlb.h>
28#include <linux/profile.h>
29#include <linux/export.h>
30#include <linux/mount.h>
31#include <linux/mempolicy.h>
32#include <linux/rmap.h>
33#include <linux/mmu_notifier.h>
34#include <linux/mmdebug.h>
35#include <linux/perf_event.h>
36#include <linux/audit.h>
37#include <linux/khugepaged.h>
38#include <linux/uprobes.h>
39#include <linux/rbtree_augmented.h>
40#include <linux/notifier.h>
41#include <linux/memory.h>
42#include <linux/printk.h>
43#include <linux/userfaultfd_k.h>
44#include <linux/moduleparam.h>
45#include <linux/pkeys.h>
46
47#include <asm/uaccess.h>
48#include <asm/cacheflush.h>
49#include <asm/tlb.h>
50#include <asm/mmu_context.h>
51
52#include "internal.h"
53
54#ifndef arch_mmap_check
55#define arch_mmap_check(addr, len, flags) (0)
56#endif
57
58#ifndef arch_rebalance_pgtables
59#define arch_rebalance_pgtables(addr, len) (addr)
60#endif
61
62#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
63const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
64const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
65int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
66#endif
67#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
68const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
69const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
70int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
71#endif
72
73static bool ignore_rlimit_data = true;
74core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
75
76static void unmap_region(struct mm_struct *mm,
77 struct vm_area_struct *vma, struct vm_area_struct *prev,
78 unsigned long start, unsigned long end);
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95pgprot_t protection_map[16] = {
96 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
97 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
98};
99
100pgprot_t vm_get_page_prot(unsigned long vm_flags)
101{
102 return __pgprot(pgprot_val(protection_map[vm_flags &
103 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
104 pgprot_val(arch_vm_get_page_prot(vm_flags)));
105}
106EXPORT_SYMBOL(vm_get_page_prot);
107
108static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
109{
110 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
111}
112
113
114void vma_set_page_prot(struct vm_area_struct *vma)
115{
116 unsigned long vm_flags = vma->vm_flags;
117
118 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
119 if (vma_wants_writenotify(vma)) {
120 vm_flags &= ~VM_SHARED;
121 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot,
122 vm_flags);
123 }
124}
125
126
127
128
129static void __remove_shared_vm_struct(struct vm_area_struct *vma,
130 struct file *file, struct address_space *mapping)
131{
132 if (vma->vm_flags & VM_DENYWRITE)
133 atomic_inc(&file_inode(file)->i_writecount);
134 if (vma->vm_flags & VM_SHARED)
135 mapping_unmap_writable(mapping);
136
137 flush_dcache_mmap_lock(mapping);
138 vma_interval_tree_remove(vma, &mapping->i_mmap);
139 flush_dcache_mmap_unlock(mapping);
140}
141
142
143
144
145
146void unlink_file_vma(struct vm_area_struct *vma)
147{
148 struct file *file = vma->vm_file;
149
150 if (file) {
151 struct address_space *mapping = file->f_mapping;
152 i_mmap_lock_write(mapping);
153 __remove_shared_vm_struct(vma, file, mapping);
154 i_mmap_unlock_write(mapping);
155 }
156}
157
158
159
160
161static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
162{
163 struct vm_area_struct *next = vma->vm_next;
164
165 might_sleep();
166 if (vma->vm_ops && vma->vm_ops->close)
167 vma->vm_ops->close(vma);
168 if (vma->vm_file)
169 fput(vma->vm_file);
170 mpol_put(vma_policy(vma));
171 kmem_cache_free(vm_area_cachep, vma);
172 return next;
173}
174
175static unsigned long do_brk(unsigned long addr, unsigned long len);
176
177SYSCALL_DEFINE1(brk, unsigned long, brk)
178{
179 unsigned long retval;
180 unsigned long newbrk, oldbrk;
181 struct mm_struct *mm = current->mm;
182 unsigned long min_brk;
183 bool populate;
184
185 down_write(&mm->mmap_sem);
186
187#ifdef CONFIG_COMPAT_BRK
188
189
190
191
192
193 if (current->brk_randomized)
194 min_brk = mm->start_brk;
195 else
196 min_brk = mm->end_data;
197#else
198 min_brk = mm->start_brk;
199#endif
200 if (brk < min_brk)
201 goto out;
202
203
204
205
206
207
208
209 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
210 mm->end_data, mm->start_data))
211 goto out;
212
213 newbrk = PAGE_ALIGN(brk);
214 oldbrk = PAGE_ALIGN(mm->brk);
215 if (oldbrk == newbrk)
216 goto set_brk;
217
218
219 if (brk <= mm->brk) {
220 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
221 goto set_brk;
222 goto out;
223 }
224
225
226 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
227 goto out;
228
229
230 if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
231 goto out;
232
233set_brk:
234 mm->brk = brk;
235 populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
236 up_write(&mm->mmap_sem);
237 if (populate)
238 mm_populate(oldbrk, newbrk - oldbrk);
239 return brk;
240
241out:
242 retval = mm->brk;
243 up_write(&mm->mmap_sem);
244 return retval;
245}
246
247static long vma_compute_subtree_gap(struct vm_area_struct *vma)
248{
249 unsigned long max, subtree_gap;
250 max = vma->vm_start;
251 if (vma->vm_prev)
252 max -= vma->vm_prev->vm_end;
253 if (vma->vm_rb.rb_left) {
254 subtree_gap = rb_entry(vma->vm_rb.rb_left,
255 struct vm_area_struct, vm_rb)->rb_subtree_gap;
256 if (subtree_gap > max)
257 max = subtree_gap;
258 }
259 if (vma->vm_rb.rb_right) {
260 subtree_gap = rb_entry(vma->vm_rb.rb_right,
261 struct vm_area_struct, vm_rb)->rb_subtree_gap;
262 if (subtree_gap > max)
263 max = subtree_gap;
264 }
265 return max;
266}
267
268#ifdef CONFIG_DEBUG_VM_RB
269static int browse_rb(struct mm_struct *mm)
270{
271 struct rb_root *root = &mm->mm_rb;
272 int i = 0, j, bug = 0;
273 struct rb_node *nd, *pn = NULL;
274 unsigned long prev = 0, pend = 0;
275
276 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
277 struct vm_area_struct *vma;
278 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
279 if (vma->vm_start < prev) {
280 pr_emerg("vm_start %lx < prev %lx\n",
281 vma->vm_start, prev);
282 bug = 1;
283 }
284 if (vma->vm_start < pend) {
285 pr_emerg("vm_start %lx < pend %lx\n",
286 vma->vm_start, pend);
287 bug = 1;
288 }
289 if (vma->vm_start > vma->vm_end) {
290 pr_emerg("vm_start %lx > vm_end %lx\n",
291 vma->vm_start, vma->vm_end);
292 bug = 1;
293 }
294 spin_lock(&mm->page_table_lock);
295 if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
296 pr_emerg("free gap %lx, correct %lx\n",
297 vma->rb_subtree_gap,
298 vma_compute_subtree_gap(vma));
299 bug = 1;
300 }
301 spin_unlock(&mm->page_table_lock);
302 i++;
303 pn = nd;
304 prev = vma->vm_start;
305 pend = vma->vm_end;
306 }
307 j = 0;
308 for (nd = pn; nd; nd = rb_prev(nd))
309 j++;
310 if (i != j) {
311 pr_emerg("backwards %d, forwards %d\n", j, i);
312 bug = 1;
313 }
314 return bug ? -1 : i;
315}
316
317static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
318{
319 struct rb_node *nd;
320
321 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
322 struct vm_area_struct *vma;
323 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
324 VM_BUG_ON_VMA(vma != ignore &&
325 vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
326 vma);
327 }
328}
329
330static void validate_mm(struct mm_struct *mm)
331{
332 int bug = 0;
333 int i = 0;
334 unsigned long highest_address = 0;
335 struct vm_area_struct *vma = mm->mmap;
336
337 while (vma) {
338 struct anon_vma *anon_vma = vma->anon_vma;
339 struct anon_vma_chain *avc;
340
341 if (anon_vma) {
342 anon_vma_lock_read(anon_vma);
343 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
344 anon_vma_interval_tree_verify(avc);
345 anon_vma_unlock_read(anon_vma);
346 }
347
348 highest_address = vma->vm_end;
349 vma = vma->vm_next;
350 i++;
351 }
352 if (i != mm->map_count) {
353 pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
354 bug = 1;
355 }
356 if (highest_address != mm->highest_vm_end) {
357 pr_emerg("mm->highest_vm_end %lx, found %lx\n",
358 mm->highest_vm_end, highest_address);
359 bug = 1;
360 }
361 i = browse_rb(mm);
362 if (i != mm->map_count) {
363 if (i != -1)
364 pr_emerg("map_count %d rb %d\n", mm->map_count, i);
365 bug = 1;
366 }
367 VM_BUG_ON_MM(bug, mm);
368}
369#else
370#define validate_mm_rb(root, ignore) do { } while (0)
371#define validate_mm(mm) do { } while (0)
372#endif
373
374RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb,
375 unsigned long, rb_subtree_gap, vma_compute_subtree_gap)
376
377
378
379
380
381
382static void vma_gap_update(struct vm_area_struct *vma)
383{
384
385
386
387
388 vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
389}
390
391static inline void vma_rb_insert(struct vm_area_struct *vma,
392 struct rb_root *root)
393{
394
395 validate_mm_rb(root, NULL);
396
397 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
398}
399
400static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
401{
402
403
404
405
406 validate_mm_rb(root, vma);
407
408
409
410
411
412
413 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
414}
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430static inline void
431anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
432{
433 struct anon_vma_chain *avc;
434
435 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
436 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
437}
438
439static inline void
440anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
441{
442 struct anon_vma_chain *avc;
443
444 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
445 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
446}
447
448static int find_vma_links(struct mm_struct *mm, unsigned long addr,
449 unsigned long end, struct vm_area_struct **pprev,
450 struct rb_node ***rb_link, struct rb_node **rb_parent)
451{
452 struct rb_node **__rb_link, *__rb_parent, *rb_prev;
453
454 __rb_link = &mm->mm_rb.rb_node;
455 rb_prev = __rb_parent = NULL;
456
457 while (*__rb_link) {
458 struct vm_area_struct *vma_tmp;
459
460 __rb_parent = *__rb_link;
461 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
462
463 if (vma_tmp->vm_end > addr) {
464
465 if (vma_tmp->vm_start < end)
466 return -ENOMEM;
467 __rb_link = &__rb_parent->rb_left;
468 } else {
469 rb_prev = __rb_parent;
470 __rb_link = &__rb_parent->rb_right;
471 }
472 }
473
474 *pprev = NULL;
475 if (rb_prev)
476 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
477 *rb_link = __rb_link;
478 *rb_parent = __rb_parent;
479 return 0;
480}
481
482static unsigned long count_vma_pages_range(struct mm_struct *mm,
483 unsigned long addr, unsigned long end)
484{
485 unsigned long nr_pages = 0;
486 struct vm_area_struct *vma;
487
488
489 vma = find_vma_intersection(mm, addr, end);
490 if (!vma)
491 return 0;
492
493 nr_pages = (min(end, vma->vm_end) -
494 max(addr, vma->vm_start)) >> PAGE_SHIFT;
495
496
497 for (vma = vma->vm_next; vma; vma = vma->vm_next) {
498 unsigned long overlap_len;
499
500 if (vma->vm_start > end)
501 break;
502
503 overlap_len = min(end, vma->vm_end) - vma->vm_start;
504 nr_pages += overlap_len >> PAGE_SHIFT;
505 }
506
507 return nr_pages;
508}
509
510void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
511 struct rb_node **rb_link, struct rb_node *rb_parent)
512{
513
514 if (vma->vm_next)
515 vma_gap_update(vma->vm_next);
516 else
517 mm->highest_vm_end = vma->vm_end;
518
519
520
521
522
523
524
525
526
527
528 rb_link_node(&vma->vm_rb, rb_parent, rb_link);
529 vma->rb_subtree_gap = 0;
530 vma_gap_update(vma);
531 vma_rb_insert(vma, &mm->mm_rb);
532}
533
534static void __vma_link_file(struct vm_area_struct *vma)
535{
536 struct file *file;
537
538 file = vma->vm_file;
539 if (file) {
540 struct address_space *mapping = file->f_mapping;
541
542 if (vma->vm_flags & VM_DENYWRITE)
543 atomic_dec(&file_inode(file)->i_writecount);
544 if (vma->vm_flags & VM_SHARED)
545 atomic_inc(&mapping->i_mmap_writable);
546
547 flush_dcache_mmap_lock(mapping);
548 vma_interval_tree_insert(vma, &mapping->i_mmap);
549 flush_dcache_mmap_unlock(mapping);
550 }
551}
552
553static void
554__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
555 struct vm_area_struct *prev, struct rb_node **rb_link,
556 struct rb_node *rb_parent)
557{
558 __vma_link_list(mm, vma, prev, rb_parent);
559 __vma_link_rb(mm, vma, rb_link, rb_parent);
560}
561
562static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
563 struct vm_area_struct *prev, struct rb_node **rb_link,
564 struct rb_node *rb_parent)
565{
566 struct address_space *mapping = NULL;
567
568 if (vma->vm_file) {
569 mapping = vma->vm_file->f_mapping;
570 i_mmap_lock_write(mapping);
571 }
572
573 __vma_link(mm, vma, prev, rb_link, rb_parent);
574 __vma_link_file(vma);
575
576 if (mapping)
577 i_mmap_unlock_write(mapping);
578
579 mm->map_count++;
580 validate_mm(mm);
581}
582
583
584
585
586
587static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
588{
589 struct vm_area_struct *prev;
590 struct rb_node **rb_link, *rb_parent;
591
592 if (find_vma_links(mm, vma->vm_start, vma->vm_end,
593 &prev, &rb_link, &rb_parent))
594 BUG();
595 __vma_link(mm, vma, prev, rb_link, rb_parent);
596 mm->map_count++;
597}
598
599static inline void
600__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
601 struct vm_area_struct *prev)
602{
603 struct vm_area_struct *next;
604
605 vma_rb_erase(vma, &mm->mm_rb);
606 prev->vm_next = next = vma->vm_next;
607 if (next)
608 next->vm_prev = prev;
609
610
611 vmacache_invalidate(mm);
612}
613
614
615
616
617
618
619
620
621int vma_adjust(struct vm_area_struct *vma, unsigned long start,
622 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
623{
624 struct mm_struct *mm = vma->vm_mm;
625 struct vm_area_struct *next = vma->vm_next;
626 struct vm_area_struct *importer = NULL;
627 struct address_space *mapping = NULL;
628 struct rb_root *root = NULL;
629 struct anon_vma *anon_vma = NULL;
630 struct file *file = vma->vm_file;
631 bool start_changed = false, end_changed = false;
632 long adjust_next = 0;
633 int remove_next = 0;
634
635 if (next && !insert) {
636 struct vm_area_struct *exporter = NULL;
637
638 if (end >= next->vm_end) {
639
640
641
642
643again: remove_next = 1 + (end > next->vm_end);
644 end = next->vm_end;
645 exporter = next;
646 importer = vma;
647 } else if (end > next->vm_start) {
648
649
650
651
652 adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
653 exporter = next;
654 importer = vma;
655 } else if (end < vma->vm_end) {
656
657
658
659
660
661 adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
662 exporter = vma;
663 importer = next;
664 }
665
666
667
668
669
670
671 if (exporter && exporter->anon_vma && !importer->anon_vma) {
672 int error;
673
674 importer->anon_vma = exporter->anon_vma;
675 error = anon_vma_clone(importer, exporter);
676 if (error)
677 return error;
678 }
679 }
680
681 if (file) {
682 mapping = file->f_mapping;
683 root = &mapping->i_mmap;
684 uprobe_munmap(vma, vma->vm_start, vma->vm_end);
685
686 if (adjust_next)
687 uprobe_munmap(next, next->vm_start, next->vm_end);
688
689 i_mmap_lock_write(mapping);
690 if (insert) {
691
692
693
694
695
696
697 __vma_link_file(insert);
698 }
699 }
700
701 vma_adjust_trans_huge(vma, start, end, adjust_next);
702
703 anon_vma = vma->anon_vma;
704 if (!anon_vma && adjust_next)
705 anon_vma = next->anon_vma;
706 if (anon_vma) {
707 VM_BUG_ON_VMA(adjust_next && next->anon_vma &&
708 anon_vma != next->anon_vma, next);
709 anon_vma_lock_write(anon_vma);
710 anon_vma_interval_tree_pre_update_vma(vma);
711 if (adjust_next)
712 anon_vma_interval_tree_pre_update_vma(next);
713 }
714
715 if (root) {
716 flush_dcache_mmap_lock(mapping);
717 vma_interval_tree_remove(vma, root);
718 if (adjust_next)
719 vma_interval_tree_remove(next, root);
720 }
721
722 if (start != vma->vm_start) {
723 vma->vm_start = start;
724 start_changed = true;
725 }
726 if (end != vma->vm_end) {
727 vma->vm_end = end;
728 end_changed = true;
729 }
730 vma->vm_pgoff = pgoff;
731 if (adjust_next) {
732 next->vm_start += adjust_next << PAGE_SHIFT;
733 next->vm_pgoff += adjust_next;
734 }
735
736 if (root) {
737 if (adjust_next)
738 vma_interval_tree_insert(next, root);
739 vma_interval_tree_insert(vma, root);
740 flush_dcache_mmap_unlock(mapping);
741 }
742
743 if (remove_next) {
744
745
746
747
748 __vma_unlink(mm, next, vma);
749 if (file)
750 __remove_shared_vm_struct(next, file, mapping);
751 } else if (insert) {
752
753
754
755
756
757 __insert_vm_struct(mm, insert);
758 } else {
759 if (start_changed)
760 vma_gap_update(vma);
761 if (end_changed) {
762 if (!next)
763 mm->highest_vm_end = end;
764 else if (!adjust_next)
765 vma_gap_update(next);
766 }
767 }
768
769 if (anon_vma) {
770 anon_vma_interval_tree_post_update_vma(vma);
771 if (adjust_next)
772 anon_vma_interval_tree_post_update_vma(next);
773 anon_vma_unlock_write(anon_vma);
774 }
775 if (mapping)
776 i_mmap_unlock_write(mapping);
777
778 if (root) {
779 uprobe_mmap(vma);
780
781 if (adjust_next)
782 uprobe_mmap(next);
783 }
784
785 if (remove_next) {
786 if (file) {
787 uprobe_munmap(next, next->vm_start, next->vm_end);
788 fput(file);
789 }
790 if (next->anon_vma)
791 anon_vma_merge(vma, next);
792 mm->map_count--;
793 mpol_put(vma_policy(next));
794 kmem_cache_free(vm_area_cachep, next);
795
796
797
798
799
800 next = vma->vm_next;
801 if (remove_next == 2)
802 goto again;
803 else if (next)
804 vma_gap_update(next);
805 else
806 mm->highest_vm_end = end;
807 }
808 if (insert && file)
809 uprobe_mmap(insert);
810
811 validate_mm(mm);
812
813 return 0;
814}
815
816
817
818
819
820static inline int is_mergeable_vma(struct vm_area_struct *vma,
821 struct file *file, unsigned long vm_flags,
822 struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
823{
824
825
826
827
828
829
830
831
832 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
833 return 0;
834 if (vma->vm_file != file)
835 return 0;
836 if (vma->vm_ops && vma->vm_ops->close)
837 return 0;
838 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
839 return 0;
840 return 1;
841}
842
843static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
844 struct anon_vma *anon_vma2,
845 struct vm_area_struct *vma)
846{
847
848
849
850
851 if ((!anon_vma1 || !anon_vma2) && (!vma ||
852 list_is_singular(&vma->anon_vma_chain)))
853 return 1;
854 return anon_vma1 == anon_vma2;
855}
856
857
858
859
860
861
862
863
864
865
866
867
868static int
869can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
870 struct anon_vma *anon_vma, struct file *file,
871 pgoff_t vm_pgoff,
872 struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
873{
874 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
875 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
876 if (vma->vm_pgoff == vm_pgoff)
877 return 1;
878 }
879 return 0;
880}
881
882
883
884
885
886
887
888
889static int
890can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
891 struct anon_vma *anon_vma, struct file *file,
892 pgoff_t vm_pgoff,
893 struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
894{
895 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
896 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
897 pgoff_t vm_pglen;
898 vm_pglen = vma_pages(vma);
899 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
900 return 1;
901 }
902 return 0;
903}
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934struct vm_area_struct *vma_merge(struct mm_struct *mm,
935 struct vm_area_struct *prev, unsigned long addr,
936 unsigned long end, unsigned long vm_flags,
937 struct anon_vma *anon_vma, struct file *file,
938 pgoff_t pgoff, struct mempolicy *policy,
939 struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
940{
941 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
942 struct vm_area_struct *area, *next;
943 int err;
944
945
946
947
948
949 if (vm_flags & VM_SPECIAL)
950 return NULL;
951
952 if (prev)
953 next = prev->vm_next;
954 else
955 next = mm->mmap;
956 area = next;
957 if (next && next->vm_end == end)
958 next = next->vm_next;
959
960
961
962
963 if (prev && prev->vm_end == addr &&
964 mpol_equal(vma_policy(prev), policy) &&
965 can_vma_merge_after(prev, vm_flags,
966 anon_vma, file, pgoff,
967 vm_userfaultfd_ctx)) {
968
969
970
971 if (next && end == next->vm_start &&
972 mpol_equal(policy, vma_policy(next)) &&
973 can_vma_merge_before(next, vm_flags,
974 anon_vma, file,
975 pgoff+pglen,
976 vm_userfaultfd_ctx) &&
977 is_mergeable_anon_vma(prev->anon_vma,
978 next->anon_vma, NULL)) {
979
980 err = vma_adjust(prev, prev->vm_start,
981 next->vm_end, prev->vm_pgoff, NULL);
982 } else
983 err = vma_adjust(prev, prev->vm_start,
984 end, prev->vm_pgoff, NULL);
985 if (err)
986 return NULL;
987 khugepaged_enter_vma_merge(prev, vm_flags);
988 return prev;
989 }
990
991
992
993
994 if (next && end == next->vm_start &&
995 mpol_equal(policy, vma_policy(next)) &&
996 can_vma_merge_before(next, vm_flags,
997 anon_vma, file, pgoff+pglen,
998 vm_userfaultfd_ctx)) {
999 if (prev && addr < prev->vm_end)
1000 err = vma_adjust(prev, prev->vm_start,
1001 addr, prev->vm_pgoff, NULL);
1002 else
1003 err = vma_adjust(area, addr, next->vm_end,
1004 next->vm_pgoff - pglen, NULL);
1005 if (err)
1006 return NULL;
1007 khugepaged_enter_vma_merge(area, vm_flags);
1008 return area;
1009 }
1010
1011 return NULL;
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1028{
1029 return a->vm_end == b->vm_start &&
1030 mpol_equal(vma_policy(a), vma_policy(b)) &&
1031 a->vm_file == b->vm_file &&
1032 !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) &&
1033 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1034}
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1059{
1060 if (anon_vma_compatible(a, b)) {
1061 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1062
1063 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1064 return anon_vma;
1065 }
1066 return NULL;
1067}
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1078{
1079 struct anon_vma *anon_vma;
1080 struct vm_area_struct *near;
1081
1082 near = vma->vm_next;
1083 if (!near)
1084 goto try_prev;
1085
1086 anon_vma = reusable_anon_vma(near, vma, near);
1087 if (anon_vma)
1088 return anon_vma;
1089try_prev:
1090 near = vma->vm_prev;
1091 if (!near)
1092 goto none;
1093
1094 anon_vma = reusable_anon_vma(near, near, vma);
1095 if (anon_vma)
1096 return anon_vma;
1097none:
1098
1099
1100
1101
1102
1103
1104
1105
1106 return NULL;
1107}
1108
1109
1110
1111
1112
1113static inline unsigned long round_hint_to_min(unsigned long hint)
1114{
1115 hint &= PAGE_MASK;
1116 if (((void *)hint != NULL) &&
1117 (hint < mmap_min_addr))
1118 return PAGE_ALIGN(mmap_min_addr);
1119 return hint;
1120}
1121
1122static inline int mlock_future_check(struct mm_struct *mm,
1123 unsigned long flags,
1124 unsigned long len)
1125{
1126 unsigned long locked, lock_limit;
1127
1128
1129 if (flags & VM_LOCKED) {
1130 locked = len >> PAGE_SHIFT;
1131 locked += mm->locked_vm;
1132 lock_limit = rlimit(RLIMIT_MEMLOCK);
1133 lock_limit >>= PAGE_SHIFT;
1134 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1135 return -EAGAIN;
1136 }
1137 return 0;
1138}
1139
1140
1141
1142
1143unsigned long do_mmap(struct file *file, unsigned long addr,
1144 unsigned long len, unsigned long prot,
1145 unsigned long flags, vm_flags_t vm_flags,
1146 unsigned long pgoff, unsigned long *populate)
1147{
1148 struct mm_struct *mm = current->mm;
1149 int pkey = 0;
1150
1151 *populate = 0;
1152
1153 if (!len)
1154 return -EINVAL;
1155
1156
1157
1158
1159
1160
1161
1162 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1163 if (!(file && path_noexec(&file->f_path)))
1164 prot |= PROT_EXEC;
1165
1166 if (!(flags & MAP_FIXED))
1167 addr = round_hint_to_min(addr);
1168
1169
1170 len = PAGE_ALIGN(len);
1171 if (!len)
1172 return -ENOMEM;
1173
1174
1175 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1176 return -EOVERFLOW;
1177
1178
1179 if (mm->map_count > sysctl_max_map_count)
1180 return -ENOMEM;
1181
1182
1183
1184
1185 addr = get_unmapped_area(file, addr, len, pgoff, flags);
1186 if (offset_in_page(addr))
1187 return addr;
1188
1189 if (prot == PROT_EXEC) {
1190 pkey = execute_only_pkey(mm);
1191 if (pkey < 0)
1192 pkey = 0;
1193 }
1194
1195
1196
1197
1198
1199 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
1200 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1201
1202 if (flags & MAP_LOCKED)
1203 if (!can_do_mlock())
1204 return -EPERM;
1205
1206 if (mlock_future_check(mm, vm_flags, len))
1207 return -EAGAIN;
1208
1209 if (file) {
1210 struct inode *inode = file_inode(file);
1211
1212 switch (flags & MAP_TYPE) {
1213 case MAP_SHARED:
1214 if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
1215 return -EACCES;
1216
1217
1218
1219
1220
1221 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1222 return -EACCES;
1223
1224
1225
1226
1227 if (locks_verify_locked(file))
1228 return -EAGAIN;
1229
1230 vm_flags |= VM_SHARED | VM_MAYSHARE;
1231 if (!(file->f_mode & FMODE_WRITE))
1232 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1233
1234
1235 case MAP_PRIVATE:
1236 if (!(file->f_mode & FMODE_READ))
1237 return -EACCES;
1238 if (path_noexec(&file->f_path)) {
1239 if (vm_flags & VM_EXEC)
1240 return -EPERM;
1241 vm_flags &= ~VM_MAYEXEC;
1242 }
1243
1244 if (!file->f_op->mmap)
1245 return -ENODEV;
1246 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1247 return -EINVAL;
1248 break;
1249
1250 default:
1251 return -EINVAL;
1252 }
1253 } else {
1254 switch (flags & MAP_TYPE) {
1255 case MAP_SHARED:
1256 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1257 return -EINVAL;
1258
1259
1260
1261 pgoff = 0;
1262 vm_flags |= VM_SHARED | VM_MAYSHARE;
1263 break;
1264 case MAP_PRIVATE:
1265
1266
1267
1268 pgoff = addr >> PAGE_SHIFT;
1269 break;
1270 default:
1271 return -EINVAL;
1272 }
1273 }
1274
1275
1276
1277
1278
1279 if (flags & MAP_NORESERVE) {
1280
1281 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1282 vm_flags |= VM_NORESERVE;
1283
1284
1285 if (file && is_file_hugepages(file))
1286 vm_flags |= VM_NORESERVE;
1287 }
1288
1289 addr = mmap_region(file, addr, len, vm_flags, pgoff);
1290 if (!IS_ERR_VALUE(addr) &&
1291 ((vm_flags & VM_LOCKED) ||
1292 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1293 *populate = len;
1294 return addr;
1295}
1296
1297SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1298 unsigned long, prot, unsigned long, flags,
1299 unsigned long, fd, unsigned long, pgoff)
1300{
1301 struct file *file = NULL;
1302 unsigned long retval;
1303
1304 if (!(flags & MAP_ANONYMOUS)) {
1305 audit_mmap_fd(fd, flags);
1306 file = fget(fd);
1307 if (!file)
1308 return -EBADF;
1309 if (is_file_hugepages(file))
1310 len = ALIGN(len, huge_page_size(hstate_file(file)));
1311 retval = -EINVAL;
1312 if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file)))
1313 goto out_fput;
1314 } else if (flags & MAP_HUGETLB) {
1315 struct user_struct *user = NULL;
1316 struct hstate *hs;
1317
1318 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & SHM_HUGE_MASK);
1319 if (!hs)
1320 return -EINVAL;
1321
1322 len = ALIGN(len, huge_page_size(hs));
1323
1324
1325
1326
1327
1328
1329 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
1330 VM_NORESERVE,
1331 &user, HUGETLB_ANONHUGE_INODE,
1332 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1333 if (IS_ERR(file))
1334 return PTR_ERR(file);
1335 }
1336
1337 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1338
1339 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1340out_fput:
1341 if (file)
1342 fput(file);
1343 return retval;
1344}
1345
1346#ifdef __ARCH_WANT_SYS_OLD_MMAP
1347struct mmap_arg_struct {
1348 unsigned long addr;
1349 unsigned long len;
1350 unsigned long prot;
1351 unsigned long flags;
1352 unsigned long fd;
1353 unsigned long offset;
1354};
1355
1356SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1357{
1358 struct mmap_arg_struct a;
1359
1360 if (copy_from_user(&a, arg, sizeof(a)))
1361 return -EFAULT;
1362 if (offset_in_page(a.offset))
1363 return -EINVAL;
1364
1365 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1366 a.offset >> PAGE_SHIFT);
1367}
1368#endif
1369
1370
1371
1372
1373
1374
1375
1376int vma_wants_writenotify(struct vm_area_struct *vma)
1377{
1378 vm_flags_t vm_flags = vma->vm_flags;
1379 const struct vm_operations_struct *vm_ops = vma->vm_ops;
1380
1381
1382 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1383 return 0;
1384
1385
1386 if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite))
1387 return 1;
1388
1389
1390
1391 if (pgprot_val(vma->vm_page_prot) !=
1392 pgprot_val(vm_pgprot_modify(vma->vm_page_prot, vm_flags)))
1393 return 0;
1394
1395
1396 if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
1397 return 1;
1398
1399
1400 if (vm_flags & VM_PFNMAP)
1401 return 0;
1402
1403
1404 return vma->vm_file && vma->vm_file->f_mapping &&
1405 mapping_cap_account_dirty(vma->vm_file->f_mapping);
1406}
1407
1408
1409
1410
1411
1412static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1413{
1414
1415
1416
1417
1418 if (file && is_file_hugepages(file))
1419 return 0;
1420
1421 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1422}
1423
1424unsigned long mmap_region(struct file *file, unsigned long addr,
1425 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff)
1426{
1427 struct mm_struct *mm = current->mm;
1428 struct vm_area_struct *vma, *prev;
1429 int error;
1430 struct rb_node **rb_link, *rb_parent;
1431 unsigned long charged = 0;
1432
1433
1434 if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
1435 unsigned long nr_pages;
1436
1437
1438
1439
1440
1441 nr_pages = count_vma_pages_range(mm, addr, addr + len);
1442
1443 if (!may_expand_vm(mm, vm_flags,
1444 (len >> PAGE_SHIFT) - nr_pages))
1445 return -ENOMEM;
1446 }
1447
1448
1449 while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
1450 &rb_parent)) {
1451 if (do_munmap(mm, addr, len))
1452 return -ENOMEM;
1453 }
1454
1455
1456
1457
1458 if (accountable_mapping(file, vm_flags)) {
1459 charged = len >> PAGE_SHIFT;
1460 if (security_vm_enough_memory_mm(mm, charged))
1461 return -ENOMEM;
1462 vm_flags |= VM_ACCOUNT;
1463 }
1464
1465
1466
1467
1468 vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
1469 NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX);
1470 if (vma)
1471 goto out;
1472
1473
1474
1475
1476
1477
1478 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1479 if (!vma) {
1480 error = -ENOMEM;
1481 goto unacct_error;
1482 }
1483
1484 vma->vm_mm = mm;
1485 vma->vm_start = addr;
1486 vma->vm_end = addr + len;
1487 vma->vm_flags = vm_flags;
1488 vma->vm_page_prot = vm_get_page_prot(vm_flags);
1489 vma->vm_pgoff = pgoff;
1490 INIT_LIST_HEAD(&vma->anon_vma_chain);
1491
1492 if (file) {
1493 if (vm_flags & VM_DENYWRITE) {
1494 error = deny_write_access(file);
1495 if (error)
1496 goto free_vma;
1497 }
1498 if (vm_flags & VM_SHARED) {
1499 error = mapping_map_writable(file->f_mapping);
1500 if (error)
1501 goto allow_write_and_free_vma;
1502 }
1503
1504
1505
1506
1507
1508
1509 vma->vm_file = get_file(file);
1510 error = file->f_op->mmap(file, vma);
1511 if (error)
1512 goto unmap_and_free_vma;
1513
1514
1515
1516
1517
1518
1519
1520
1521 WARN_ON_ONCE(addr != vma->vm_start);
1522
1523 addr = vma->vm_start;
1524 vm_flags = vma->vm_flags;
1525 } else if (vm_flags & VM_SHARED) {
1526 error = shmem_zero_setup(vma);
1527 if (error)
1528 goto free_vma;
1529 }
1530
1531 vma_link(mm, vma, prev, rb_link, rb_parent);
1532
1533 if (file) {
1534 if (vm_flags & VM_SHARED)
1535 mapping_unmap_writable(file->f_mapping);
1536 if (vm_flags & VM_DENYWRITE)
1537 allow_write_access(file);
1538 }
1539 file = vma->vm_file;
1540out:
1541 perf_event_mmap(vma);
1542
1543 vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
1544 if (vm_flags & VM_LOCKED) {
1545 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
1546 vma == get_gate_vma(current->mm)))
1547 mm->locked_vm += (len >> PAGE_SHIFT);
1548 else
1549 vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
1550 }
1551
1552 if (file)
1553 uprobe_mmap(vma);
1554
1555
1556
1557
1558
1559
1560
1561
1562 vma->vm_flags |= VM_SOFTDIRTY;
1563
1564 vma_set_page_prot(vma);
1565
1566 return addr;
1567
1568unmap_and_free_vma:
1569 vma->vm_file = NULL;
1570 fput(file);
1571
1572
1573 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1574 charged = 0;
1575 if (vm_flags & VM_SHARED)
1576 mapping_unmap_writable(file->f_mapping);
1577allow_write_and_free_vma:
1578 if (vm_flags & VM_DENYWRITE)
1579 allow_write_access(file);
1580free_vma:
1581 kmem_cache_free(vm_area_cachep, vma);
1582unacct_error:
1583 if (charged)
1584 vm_unacct_memory(charged);
1585 return error;
1586}
1587
1588unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1589{
1590
1591
1592
1593
1594
1595
1596
1597
1598 struct mm_struct *mm = current->mm;
1599 struct vm_area_struct *vma;
1600 unsigned long length, low_limit, high_limit, gap_start, gap_end;
1601
1602
1603 length = info->length + info->align_mask;
1604 if (length < info->length)
1605 return -ENOMEM;
1606
1607
1608 if (info->high_limit < length)
1609 return -ENOMEM;
1610 high_limit = info->high_limit - length;
1611
1612 if (info->low_limit > high_limit)
1613 return -ENOMEM;
1614 low_limit = info->low_limit + length;
1615
1616
1617 if (RB_EMPTY_ROOT(&mm->mm_rb))
1618 goto check_highest;
1619 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1620 if (vma->rb_subtree_gap < length)
1621 goto check_highest;
1622
1623 while (true) {
1624
1625 gap_end = vma->vm_start;
1626 if (gap_end >= low_limit && vma->vm_rb.rb_left) {
1627 struct vm_area_struct *left =
1628 rb_entry(vma->vm_rb.rb_left,
1629 struct vm_area_struct, vm_rb);
1630 if (left->rb_subtree_gap >= length) {
1631 vma = left;
1632 continue;
1633 }
1634 }
1635
1636 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1637check_current:
1638
1639 if (gap_start > high_limit)
1640 return -ENOMEM;
1641 if (gap_end >= low_limit && gap_end - gap_start >= length)
1642 goto found;
1643
1644
1645 if (vma->vm_rb.rb_right) {
1646 struct vm_area_struct *right =
1647 rb_entry(vma->vm_rb.rb_right,
1648 struct vm_area_struct, vm_rb);
1649 if (right->rb_subtree_gap >= length) {
1650 vma = right;
1651 continue;
1652 }
1653 }
1654
1655
1656 while (true) {
1657 struct rb_node *prev = &vma->vm_rb;
1658 if (!rb_parent(prev))
1659 goto check_highest;
1660 vma = rb_entry(rb_parent(prev),
1661 struct vm_area_struct, vm_rb);
1662 if (prev == vma->vm_rb.rb_left) {
1663 gap_start = vma->vm_prev->vm_end;
1664 gap_end = vma->vm_start;
1665 goto check_current;
1666 }
1667 }
1668 }
1669
1670check_highest:
1671
1672 gap_start = mm->highest_vm_end;
1673 gap_end = ULONG_MAX;
1674 if (gap_start > high_limit)
1675 return -ENOMEM;
1676
1677found:
1678
1679 if (gap_start < info->low_limit)
1680 gap_start = info->low_limit;
1681
1682
1683 gap_start += (info->align_offset - gap_start) & info->align_mask;
1684
1685 VM_BUG_ON(gap_start + info->length > info->high_limit);
1686 VM_BUG_ON(gap_start + info->length > gap_end);
1687 return gap_start;
1688}
1689
1690unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1691{
1692 struct mm_struct *mm = current->mm;
1693 struct vm_area_struct *vma;
1694 unsigned long length, low_limit, high_limit, gap_start, gap_end;
1695
1696
1697 length = info->length + info->align_mask;
1698 if (length < info->length)
1699 return -ENOMEM;
1700
1701
1702
1703
1704
1705 gap_end = info->high_limit;
1706 if (gap_end < length)
1707 return -ENOMEM;
1708 high_limit = gap_end - length;
1709
1710 if (info->low_limit > high_limit)
1711 return -ENOMEM;
1712 low_limit = info->low_limit + length;
1713
1714
1715 gap_start = mm->highest_vm_end;
1716 if (gap_start <= high_limit)
1717 goto found_highest;
1718
1719
1720 if (RB_EMPTY_ROOT(&mm->mm_rb))
1721 return -ENOMEM;
1722 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1723 if (vma->rb_subtree_gap < length)
1724 return -ENOMEM;
1725
1726 while (true) {
1727
1728 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1729 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
1730 struct vm_area_struct *right =
1731 rb_entry(vma->vm_rb.rb_right,
1732 struct vm_area_struct, vm_rb);
1733 if (right->rb_subtree_gap >= length) {
1734 vma = right;
1735 continue;
1736 }
1737 }
1738
1739check_current:
1740
1741 gap_end = vma->vm_start;
1742 if (gap_end < low_limit)
1743 return -ENOMEM;
1744 if (gap_start <= high_limit && gap_end - gap_start >= length)
1745 goto found;
1746
1747
1748 if (vma->vm_rb.rb_left) {
1749 struct vm_area_struct *left =
1750 rb_entry(vma->vm_rb.rb_left,
1751 struct vm_area_struct, vm_rb);
1752 if (left->rb_subtree_gap >= length) {
1753 vma = left;
1754 continue;
1755 }
1756 }
1757
1758
1759 while (true) {
1760 struct rb_node *prev = &vma->vm_rb;
1761 if (!rb_parent(prev))
1762 return -ENOMEM;
1763 vma = rb_entry(rb_parent(prev),
1764 struct vm_area_struct, vm_rb);
1765 if (prev == vma->vm_rb.rb_right) {
1766 gap_start = vma->vm_prev ?
1767 vma->vm_prev->vm_end : 0;
1768 goto check_current;
1769 }
1770 }
1771 }
1772
1773found:
1774
1775 if (gap_end > info->high_limit)
1776 gap_end = info->high_limit;
1777
1778found_highest:
1779
1780 gap_end -= info->length;
1781 gap_end -= (gap_end - info->align_offset) & info->align_mask;
1782
1783 VM_BUG_ON(gap_end < info->low_limit);
1784 VM_BUG_ON(gap_end < gap_start);
1785 return gap_end;
1786}
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799#ifndef HAVE_ARCH_UNMAPPED_AREA
1800unsigned long
1801arch_get_unmapped_area(struct file *filp, unsigned long addr,
1802 unsigned long len, unsigned long pgoff, unsigned long flags)
1803{
1804 struct mm_struct *mm = current->mm;
1805 struct vm_area_struct *vma;
1806 struct vm_unmapped_area_info info;
1807
1808 if (len > TASK_SIZE - mmap_min_addr)
1809 return -ENOMEM;
1810
1811 if (flags & MAP_FIXED)
1812 return addr;
1813
1814 if (addr) {
1815 addr = PAGE_ALIGN(addr);
1816 vma = find_vma(mm, addr);
1817 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
1818 (!vma || addr + len <= vma->vm_start))
1819 return addr;
1820 }
1821
1822 info.flags = 0;
1823 info.length = len;
1824 info.low_limit = mm->mmap_base;
1825 info.high_limit = TASK_SIZE;
1826 info.align_mask = 0;
1827 return vm_unmapped_area(&info);
1828}
1829#endif
1830
1831
1832
1833
1834
1835#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1836unsigned long
1837arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1838 const unsigned long len, const unsigned long pgoff,
1839 const unsigned long flags)
1840{
1841 struct vm_area_struct *vma;
1842 struct mm_struct *mm = current->mm;
1843 unsigned long addr = addr0;
1844 struct vm_unmapped_area_info info;
1845
1846
1847 if (len > TASK_SIZE - mmap_min_addr)
1848 return -ENOMEM;
1849
1850 if (flags & MAP_FIXED)
1851 return addr;
1852
1853
1854 if (addr) {
1855 addr = PAGE_ALIGN(addr);
1856 vma = find_vma(mm, addr);
1857 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
1858 (!vma || addr + len <= vma->vm_start))
1859 return addr;
1860 }
1861
1862 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
1863 info.length = len;
1864 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
1865 info.high_limit = mm->mmap_base;
1866 info.align_mask = 0;
1867 addr = vm_unmapped_area(&info);
1868
1869
1870
1871
1872
1873
1874
1875 if (offset_in_page(addr)) {
1876 VM_BUG_ON(addr != -ENOMEM);
1877 info.flags = 0;
1878 info.low_limit = TASK_UNMAPPED_BASE;
1879 info.high_limit = TASK_SIZE;
1880 addr = vm_unmapped_area(&info);
1881 }
1882
1883 return addr;
1884}
1885#endif
1886
1887unsigned long
1888get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1889 unsigned long pgoff, unsigned long flags)
1890{
1891 unsigned long (*get_area)(struct file *, unsigned long,
1892 unsigned long, unsigned long, unsigned long);
1893
1894 unsigned long error = arch_mmap_check(addr, len, flags);
1895 if (error)
1896 return error;
1897
1898
1899 if (len > TASK_SIZE)
1900 return -ENOMEM;
1901
1902 get_area = current->mm->get_unmapped_area;
1903 if (file && file->f_op->get_unmapped_area)
1904 get_area = file->f_op->get_unmapped_area;
1905 addr = get_area(file, addr, len, pgoff, flags);
1906 if (IS_ERR_VALUE(addr))
1907 return addr;
1908
1909 if (addr > TASK_SIZE - len)
1910 return -ENOMEM;
1911 if (offset_in_page(addr))
1912 return -EINVAL;
1913
1914 addr = arch_rebalance_pgtables(addr, len);
1915 error = security_mmap_addr(addr);
1916 return error ? error : addr;
1917}
1918
1919EXPORT_SYMBOL(get_unmapped_area);
1920
1921
1922struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1923{
1924 struct rb_node *rb_node;
1925 struct vm_area_struct *vma;
1926
1927
1928 vma = vmacache_find(mm, addr);
1929 if (likely(vma))
1930 return vma;
1931
1932 rb_node = mm->mm_rb.rb_node;
1933
1934 while (rb_node) {
1935 struct vm_area_struct *tmp;
1936
1937 tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
1938
1939 if (tmp->vm_end > addr) {
1940 vma = tmp;
1941 if (tmp->vm_start <= addr)
1942 break;
1943 rb_node = rb_node->rb_left;
1944 } else
1945 rb_node = rb_node->rb_right;
1946 }
1947
1948 if (vma)
1949 vmacache_update(addr, vma);
1950 return vma;
1951}
1952
1953EXPORT_SYMBOL(find_vma);
1954
1955
1956
1957
1958struct vm_area_struct *
1959find_vma_prev(struct mm_struct *mm, unsigned long addr,
1960 struct vm_area_struct **pprev)
1961{
1962 struct vm_area_struct *vma;
1963
1964 vma = find_vma(mm, addr);
1965 if (vma) {
1966 *pprev = vma->vm_prev;
1967 } else {
1968 struct rb_node *rb_node = mm->mm_rb.rb_node;
1969 *pprev = NULL;
1970 while (rb_node) {
1971 *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
1972 rb_node = rb_node->rb_right;
1973 }
1974 }
1975 return vma;
1976}
1977
1978
1979
1980
1981
1982
1983static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
1984{
1985 struct mm_struct *mm = vma->vm_mm;
1986 struct rlimit *rlim = current->signal->rlim;
1987 unsigned long new_start, actual_size;
1988
1989
1990 if (!may_expand_vm(mm, vma->vm_flags, grow))
1991 return -ENOMEM;
1992
1993
1994 actual_size = size;
1995 if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
1996 actual_size -= PAGE_SIZE;
1997 if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
1998 return -ENOMEM;
1999
2000
2001 if (vma->vm_flags & VM_LOCKED) {
2002 unsigned long locked;
2003 unsigned long limit;
2004 locked = mm->locked_vm + grow;
2005 limit = READ_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
2006 limit >>= PAGE_SHIFT;
2007 if (locked > limit && !capable(CAP_IPC_LOCK))
2008 return -ENOMEM;
2009 }
2010
2011
2012 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
2013 vma->vm_end - size;
2014 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
2015 return -EFAULT;
2016
2017
2018
2019
2020
2021 if (security_vm_enough_memory_mm(mm, grow))
2022 return -ENOMEM;
2023
2024 return 0;
2025}
2026
2027#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
2028
2029
2030
2031
2032int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2033{
2034 struct mm_struct *mm = vma->vm_mm;
2035 int error = 0;
2036
2037 if (!(vma->vm_flags & VM_GROWSUP))
2038 return -EFAULT;
2039
2040
2041 if (address < PAGE_ALIGN(address+4))
2042 address = PAGE_ALIGN(address+4);
2043 else
2044 return -ENOMEM;
2045
2046
2047 if (unlikely(anon_vma_prepare(vma)))
2048 return -ENOMEM;
2049
2050
2051
2052
2053
2054
2055 anon_vma_lock_write(vma->anon_vma);
2056
2057
2058 if (address > vma->vm_end) {
2059 unsigned long size, grow;
2060
2061 size = address - vma->vm_start;
2062 grow = (address - vma->vm_end) >> PAGE_SHIFT;
2063
2064 error = -ENOMEM;
2065 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2066 error = acct_stack_growth(vma, size, grow);
2067 if (!error) {
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079 spin_lock(&mm->page_table_lock);
2080 if (vma->vm_flags & VM_LOCKED)
2081 mm->locked_vm += grow;
2082 vm_stat_account(mm, vma->vm_flags, grow);
2083 anon_vma_interval_tree_pre_update_vma(vma);
2084 vma->vm_end = address;
2085 anon_vma_interval_tree_post_update_vma(vma);
2086 if (vma->vm_next)
2087 vma_gap_update(vma->vm_next);
2088 else
2089 mm->highest_vm_end = address;
2090 spin_unlock(&mm->page_table_lock);
2091
2092 perf_event_mmap(vma);
2093 }
2094 }
2095 }
2096 anon_vma_unlock_write(vma->anon_vma);
2097 khugepaged_enter_vma_merge(vma, vma->vm_flags);
2098 validate_mm(mm);
2099 return error;
2100}
2101#endif
2102
2103
2104
2105
2106int expand_downwards(struct vm_area_struct *vma,
2107 unsigned long address)
2108{
2109 struct mm_struct *mm = vma->vm_mm;
2110 int error;
2111
2112 address &= PAGE_MASK;
2113 error = security_mmap_addr(address);
2114 if (error)
2115 return error;
2116
2117
2118 if (unlikely(anon_vma_prepare(vma)))
2119 return -ENOMEM;
2120
2121
2122
2123
2124
2125
2126 anon_vma_lock_write(vma->anon_vma);
2127
2128
2129 if (address < vma->vm_start) {
2130 unsigned long size, grow;
2131
2132 size = vma->vm_end - address;
2133 grow = (vma->vm_start - address) >> PAGE_SHIFT;
2134
2135 error = -ENOMEM;
2136 if (grow <= vma->vm_pgoff) {
2137 error = acct_stack_growth(vma, size, grow);
2138 if (!error) {
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150 spin_lock(&mm->page_table_lock);
2151 if (vma->vm_flags & VM_LOCKED)
2152 mm->locked_vm += grow;
2153 vm_stat_account(mm, vma->vm_flags, grow);
2154 anon_vma_interval_tree_pre_update_vma(vma);
2155 vma->vm_start = address;
2156 vma->vm_pgoff -= grow;
2157 anon_vma_interval_tree_post_update_vma(vma);
2158 vma_gap_update(vma);
2159 spin_unlock(&mm->page_table_lock);
2160
2161 perf_event_mmap(vma);
2162 }
2163 }
2164 }
2165 anon_vma_unlock_write(vma->anon_vma);
2166 khugepaged_enter_vma_merge(vma, vma->vm_flags);
2167 validate_mm(mm);
2168 return error;
2169}
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182#ifdef CONFIG_STACK_GROWSUP
2183int expand_stack(struct vm_area_struct *vma, unsigned long address)
2184{
2185 struct vm_area_struct *next;
2186
2187 address &= PAGE_MASK;
2188 next = vma->vm_next;
2189 if (next && next->vm_start == address + PAGE_SIZE) {
2190 if (!(next->vm_flags & VM_GROWSUP))
2191 return -ENOMEM;
2192 }
2193 return expand_upwards(vma, address);
2194}
2195
2196struct vm_area_struct *
2197find_extend_vma(struct mm_struct *mm, unsigned long addr)
2198{
2199 struct vm_area_struct *vma, *prev;
2200
2201 addr &= PAGE_MASK;
2202 vma = find_vma_prev(mm, addr, &prev);
2203 if (vma && (vma->vm_start <= addr))
2204 return vma;
2205 if (!prev || expand_stack(prev, addr))
2206 return NULL;
2207 if (prev->vm_flags & VM_LOCKED)
2208 populate_vma_page_range(prev, addr, prev->vm_end, NULL);
2209 return prev;
2210}
2211#else
2212int expand_stack(struct vm_area_struct *vma, unsigned long address)
2213{
2214 struct vm_area_struct *prev;
2215
2216 address &= PAGE_MASK;
2217 prev = vma->vm_prev;
2218 if (prev && prev->vm_end == address) {
2219 if (!(prev->vm_flags & VM_GROWSDOWN))
2220 return -ENOMEM;
2221 }
2222 return expand_downwards(vma, address);
2223}
2224
2225struct vm_area_struct *
2226find_extend_vma(struct mm_struct *mm, unsigned long addr)
2227{
2228 struct vm_area_struct *vma;
2229 unsigned long start;
2230
2231 addr &= PAGE_MASK;
2232 vma = find_vma(mm, addr);
2233 if (!vma)
2234 return NULL;
2235 if (vma->vm_start <= addr)
2236 return vma;
2237 if (!(vma->vm_flags & VM_GROWSDOWN))
2238 return NULL;
2239 start = vma->vm_start;
2240 if (expand_stack(vma, addr))
2241 return NULL;
2242 if (vma->vm_flags & VM_LOCKED)
2243 populate_vma_page_range(vma, addr, start, NULL);
2244 return vma;
2245}
2246#endif
2247
2248EXPORT_SYMBOL_GPL(find_extend_vma);
2249
2250
2251
2252
2253
2254
2255
2256static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
2257{
2258 unsigned long nr_accounted = 0;
2259
2260
2261 update_hiwater_vm(mm);
2262 do {
2263 long nrpages = vma_pages(vma);
2264
2265 if (vma->vm_flags & VM_ACCOUNT)
2266 nr_accounted += nrpages;
2267 vm_stat_account(mm, vma->vm_flags, -nrpages);
2268 vma = remove_vma(vma);
2269 } while (vma);
2270 vm_unacct_memory(nr_accounted);
2271 validate_mm(mm);
2272}
2273
2274
2275
2276
2277
2278
2279static void unmap_region(struct mm_struct *mm,
2280 struct vm_area_struct *vma, struct vm_area_struct *prev,
2281 unsigned long start, unsigned long end)
2282{
2283 struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap;
2284 struct mmu_gather tlb;
2285
2286 lru_add_drain();
2287 tlb_gather_mmu(&tlb, mm, start, end);
2288 update_hiwater_rss(mm);
2289 unmap_vmas(&tlb, vma, start, end);
2290 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2291 next ? next->vm_start : USER_PGTABLES_CEILING);
2292 tlb_finish_mmu(&tlb, start, end);
2293}
2294
2295
2296
2297
2298
2299static void
2300detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
2301 struct vm_area_struct *prev, unsigned long end)
2302{
2303 struct vm_area_struct **insertion_point;
2304 struct vm_area_struct *tail_vma = NULL;
2305
2306 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
2307 vma->vm_prev = NULL;
2308 do {
2309 vma_rb_erase(vma, &mm->mm_rb);
2310 mm->map_count--;
2311 tail_vma = vma;
2312 vma = vma->vm_next;
2313 } while (vma && vma->vm_start < end);
2314 *insertion_point = vma;
2315 if (vma) {
2316 vma->vm_prev = prev;
2317 vma_gap_update(vma);
2318 } else
2319 mm->highest_vm_end = prev ? prev->vm_end : 0;
2320 tail_vma->vm_next = NULL;
2321
2322
2323 vmacache_invalidate(mm);
2324}
2325
2326
2327
2328
2329
2330static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2331 unsigned long addr, int new_below)
2332{
2333 struct vm_area_struct *new;
2334 int err;
2335
2336 if (is_vm_hugetlb_page(vma) && (addr &
2337 ~(huge_page_mask(hstate_vma(vma)))))
2338 return -EINVAL;
2339
2340 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2341 if (!new)
2342 return -ENOMEM;
2343
2344
2345 *new = *vma;
2346
2347 INIT_LIST_HEAD(&new->anon_vma_chain);
2348
2349 if (new_below)
2350 new->vm_end = addr;
2351 else {
2352 new->vm_start = addr;
2353 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2354 }
2355
2356 err = vma_dup_policy(vma, new);
2357 if (err)
2358 goto out_free_vma;
2359
2360 err = anon_vma_clone(new, vma);
2361 if (err)
2362 goto out_free_mpol;
2363
2364 if (new->vm_file)
2365 get_file(new->vm_file);
2366
2367 if (new->vm_ops && new->vm_ops->open)
2368 new->vm_ops->open(new);
2369
2370 if (new_below)
2371 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
2372 ((addr - new->vm_start) >> PAGE_SHIFT), new);
2373 else
2374 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
2375
2376
2377 if (!err)
2378 return 0;
2379
2380
2381 if (new->vm_ops && new->vm_ops->close)
2382 new->vm_ops->close(new);
2383 if (new->vm_file)
2384 fput(new->vm_file);
2385 unlink_anon_vmas(new);
2386 out_free_mpol:
2387 mpol_put(vma_policy(new));
2388 out_free_vma:
2389 kmem_cache_free(vm_area_cachep, new);
2390 return err;
2391}
2392
2393
2394
2395
2396
2397int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2398 unsigned long addr, int new_below)
2399{
2400 if (mm->map_count >= sysctl_max_map_count)
2401 return -ENOMEM;
2402
2403 return __split_vma(mm, vma, addr, new_below);
2404}
2405
2406
2407
2408
2409
2410
2411int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
2412{
2413 unsigned long end;
2414 struct vm_area_struct *vma, *prev, *last;
2415
2416 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
2417 return -EINVAL;
2418
2419 len = PAGE_ALIGN(len);
2420 if (len == 0)
2421 return -EINVAL;
2422
2423
2424 vma = find_vma(mm, start);
2425 if (!vma)
2426 return 0;
2427 prev = vma->vm_prev;
2428
2429
2430
2431 end = start + len;
2432 if (vma->vm_start >= end)
2433 return 0;
2434
2435
2436
2437
2438
2439
2440
2441
2442 if (start > vma->vm_start) {
2443 int error;
2444
2445
2446
2447
2448
2449
2450 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2451 return -ENOMEM;
2452
2453 error = __split_vma(mm, vma, start, 0);
2454 if (error)
2455 return error;
2456 prev = vma;
2457 }
2458
2459
2460 last = find_vma(mm, end);
2461 if (last && end > last->vm_start) {
2462 int error = __split_vma(mm, last, end, 1);
2463 if (error)
2464 return error;
2465 }
2466 vma = prev ? prev->vm_next : mm->mmap;
2467
2468
2469
2470
2471 if (mm->locked_vm) {
2472 struct vm_area_struct *tmp = vma;
2473 while (tmp && tmp->vm_start < end) {
2474 if (tmp->vm_flags & VM_LOCKED) {
2475 mm->locked_vm -= vma_pages(tmp);
2476 munlock_vma_pages_all(tmp);
2477 }
2478 tmp = tmp->vm_next;
2479 }
2480 }
2481
2482
2483
2484
2485 detach_vmas_to_be_unmapped(mm, vma, prev, end);
2486 unmap_region(mm, vma, prev, start, end);
2487
2488 arch_unmap(mm, vma, start, end);
2489
2490
2491 remove_vma_list(mm, vma);
2492
2493 return 0;
2494}
2495
2496int vm_munmap(unsigned long start, size_t len)
2497{
2498 int ret;
2499 struct mm_struct *mm = current->mm;
2500
2501 down_write(&mm->mmap_sem);
2502 ret = do_munmap(mm, start, len);
2503 up_write(&mm->mmap_sem);
2504 return ret;
2505}
2506EXPORT_SYMBOL(vm_munmap);
2507
2508SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2509{
2510 profile_munmap(addr);
2511 return vm_munmap(addr, len);
2512}
2513
2514
2515
2516
2517
2518SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2519 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
2520{
2521
2522 struct mm_struct *mm = current->mm;
2523 struct vm_area_struct *vma;
2524 unsigned long populate = 0;
2525 unsigned long ret = -EINVAL;
2526 struct file *file;
2527
2528 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.txt.\n",
2529 current->comm, current->pid);
2530
2531 if (prot)
2532 return ret;
2533 start = start & PAGE_MASK;
2534 size = size & PAGE_MASK;
2535
2536 if (start + size <= start)
2537 return ret;
2538
2539
2540 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
2541 return ret;
2542
2543 down_write(&mm->mmap_sem);
2544 vma = find_vma(mm, start);
2545
2546 if (!vma || !(vma->vm_flags & VM_SHARED))
2547 goto out;
2548
2549 if (start < vma->vm_start)
2550 goto out;
2551
2552 if (start + size > vma->vm_end) {
2553 struct vm_area_struct *next;
2554
2555 for (next = vma->vm_next; next; next = next->vm_next) {
2556
2557 if (next->vm_start != next->vm_prev->vm_end)
2558 goto out;
2559
2560 if (next->vm_file != vma->vm_file)
2561 goto out;
2562
2563 if (next->vm_flags != vma->vm_flags)
2564 goto out;
2565
2566 if (start + size <= next->vm_end)
2567 break;
2568 }
2569
2570 if (!next)
2571 goto out;
2572 }
2573
2574 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
2575 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
2576 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
2577
2578 flags &= MAP_NONBLOCK;
2579 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
2580 if (vma->vm_flags & VM_LOCKED) {
2581 struct vm_area_struct *tmp;
2582 flags |= MAP_LOCKED;
2583
2584
2585 for (tmp = vma; tmp->vm_start >= start + size;
2586 tmp = tmp->vm_next) {
2587 munlock_vma_pages_range(tmp,
2588 max(tmp->vm_start, start),
2589 min(tmp->vm_end, start + size));
2590 }
2591 }
2592
2593 file = get_file(vma->vm_file);
2594 ret = do_mmap_pgoff(vma->vm_file, start, size,
2595 prot, flags, pgoff, &populate);
2596 fput(file);
2597out:
2598 up_write(&mm->mmap_sem);
2599 if (populate)
2600 mm_populate(ret, populate);
2601 if (!IS_ERR_VALUE(ret))
2602 ret = 0;
2603 return ret;
2604}
2605
2606static inline void verify_mm_writelocked(struct mm_struct *mm)
2607{
2608#ifdef CONFIG_DEBUG_VM
2609 if (unlikely(down_read_trylock(&mm->mmap_sem))) {
2610 WARN_ON(1);
2611 up_read(&mm->mmap_sem);
2612 }
2613#endif
2614}
2615
2616
2617
2618
2619
2620
2621static unsigned long do_brk(unsigned long addr, unsigned long len)
2622{
2623 struct mm_struct *mm = current->mm;
2624 struct vm_area_struct *vma, *prev;
2625 unsigned long flags;
2626 struct rb_node **rb_link, *rb_parent;
2627 pgoff_t pgoff = addr >> PAGE_SHIFT;
2628 int error;
2629
2630 len = PAGE_ALIGN(len);
2631 if (!len)
2632 return addr;
2633
2634 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2635
2636 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
2637 if (offset_in_page(error))
2638 return error;
2639
2640 error = mlock_future_check(mm, mm->def_flags, len);
2641 if (error)
2642 return error;
2643
2644
2645
2646
2647
2648 verify_mm_writelocked(mm);
2649
2650
2651
2652
2653 while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
2654 &rb_parent)) {
2655 if (do_munmap(mm, addr, len))
2656 return -ENOMEM;
2657 }
2658
2659
2660 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
2661 return -ENOMEM;
2662
2663 if (mm->map_count > sysctl_max_map_count)
2664 return -ENOMEM;
2665
2666 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
2667 return -ENOMEM;
2668
2669
2670 vma = vma_merge(mm, prev, addr, addr + len, flags,
2671 NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX);
2672 if (vma)
2673 goto out;
2674
2675
2676
2677
2678 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2679 if (!vma) {
2680 vm_unacct_memory(len >> PAGE_SHIFT);
2681 return -ENOMEM;
2682 }
2683
2684 INIT_LIST_HEAD(&vma->anon_vma_chain);
2685 vma->vm_mm = mm;
2686 vma->vm_start = addr;
2687 vma->vm_end = addr + len;
2688 vma->vm_pgoff = pgoff;
2689 vma->vm_flags = flags;
2690 vma->vm_page_prot = vm_get_page_prot(flags);
2691 vma_link(mm, vma, prev, rb_link, rb_parent);
2692out:
2693 perf_event_mmap(vma);
2694 mm->total_vm += len >> PAGE_SHIFT;
2695 mm->data_vm += len >> PAGE_SHIFT;
2696 if (flags & VM_LOCKED)
2697 mm->locked_vm += (len >> PAGE_SHIFT);
2698 vma->vm_flags |= VM_SOFTDIRTY;
2699 return addr;
2700}
2701
2702unsigned long vm_brk(unsigned long addr, unsigned long len)
2703{
2704 struct mm_struct *mm = current->mm;
2705 unsigned long ret;
2706 bool populate;
2707
2708 down_write(&mm->mmap_sem);
2709 ret = do_brk(addr, len);
2710 populate = ((mm->def_flags & VM_LOCKED) != 0);
2711 up_write(&mm->mmap_sem);
2712 if (populate)
2713 mm_populate(addr, len);
2714 return ret;
2715}
2716EXPORT_SYMBOL(vm_brk);
2717
2718
2719void exit_mmap(struct mm_struct *mm)
2720{
2721 struct mmu_gather tlb;
2722 struct vm_area_struct *vma;
2723 unsigned long nr_accounted = 0;
2724
2725
2726 mmu_notifier_release(mm);
2727
2728 if (mm->locked_vm) {
2729 vma = mm->mmap;
2730 while (vma) {
2731 if (vma->vm_flags & VM_LOCKED)
2732 munlock_vma_pages_all(vma);
2733 vma = vma->vm_next;
2734 }
2735 }
2736
2737 arch_exit_mmap(mm);
2738
2739 vma = mm->mmap;
2740 if (!vma)
2741 return;
2742
2743 lru_add_drain();
2744 flush_cache_mm(mm);
2745 tlb_gather_mmu(&tlb, mm, 0, -1);
2746
2747
2748 unmap_vmas(&tlb, vma, 0, -1);
2749
2750 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
2751 tlb_finish_mmu(&tlb, 0, -1);
2752
2753
2754
2755
2756
2757 while (vma) {
2758 if (vma->vm_flags & VM_ACCOUNT)
2759 nr_accounted += vma_pages(vma);
2760 vma = remove_vma(vma);
2761 }
2762 vm_unacct_memory(nr_accounted);
2763}
2764
2765
2766
2767
2768
2769int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
2770{
2771 struct vm_area_struct *prev;
2772 struct rb_node **rb_link, *rb_parent;
2773
2774 if (find_vma_links(mm, vma->vm_start, vma->vm_end,
2775 &prev, &rb_link, &rb_parent))
2776 return -ENOMEM;
2777 if ((vma->vm_flags & VM_ACCOUNT) &&
2778 security_vm_enough_memory_mm(mm, vma_pages(vma)))
2779 return -ENOMEM;
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793 if (vma_is_anonymous(vma)) {
2794 BUG_ON(vma->anon_vma);
2795 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2796 }
2797
2798 vma_link(mm, vma, prev, rb_link, rb_parent);
2799 return 0;
2800}
2801
2802
2803
2804
2805
2806struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2807 unsigned long addr, unsigned long len, pgoff_t pgoff,
2808 bool *need_rmap_locks)
2809{
2810 struct vm_area_struct *vma = *vmap;
2811 unsigned long vma_start = vma->vm_start;
2812 struct mm_struct *mm = vma->vm_mm;
2813 struct vm_area_struct *new_vma, *prev;
2814 struct rb_node **rb_link, *rb_parent;
2815 bool faulted_in_anon_vma = true;
2816
2817
2818
2819
2820
2821 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
2822 pgoff = addr >> PAGE_SHIFT;
2823 faulted_in_anon_vma = false;
2824 }
2825
2826 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
2827 return NULL;
2828 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
2829 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
2830 vma->vm_userfaultfd_ctx);
2831 if (new_vma) {
2832
2833
2834
2835 if (unlikely(vma_start >= new_vma->vm_start &&
2836 vma_start < new_vma->vm_end)) {
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
2850 *vmap = vma = new_vma;
2851 }
2852 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
2853 } else {
2854 new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2855 if (!new_vma)
2856 goto out;
2857 *new_vma = *vma;
2858 new_vma->vm_start = addr;
2859 new_vma->vm_end = addr + len;
2860 new_vma->vm_pgoff = pgoff;
2861 if (vma_dup_policy(vma, new_vma))
2862 goto out_free_vma;
2863 INIT_LIST_HEAD(&new_vma->anon_vma_chain);
2864 if (anon_vma_clone(new_vma, vma))
2865 goto out_free_mempol;
2866 if (new_vma->vm_file)
2867 get_file(new_vma->vm_file);
2868 if (new_vma->vm_ops && new_vma->vm_ops->open)
2869 new_vma->vm_ops->open(new_vma);
2870 vma_link(mm, new_vma, prev, rb_link, rb_parent);
2871 *need_rmap_locks = false;
2872 }
2873 return new_vma;
2874
2875out_free_mempol:
2876 mpol_put(vma_policy(new_vma));
2877out_free_vma:
2878 kmem_cache_free(vm_area_cachep, new_vma);
2879out:
2880 return NULL;
2881}
2882
2883
2884
2885
2886
2887bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
2888{
2889 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
2890 return false;
2891
2892 if (is_data_mapping(flags) &&
2893 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
2894 if (ignore_rlimit_data)
2895 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Will be forbidden soon.\n",
2896 current->comm, current->pid,
2897 (mm->data_vm + npages) << PAGE_SHIFT,
2898 rlimit(RLIMIT_DATA));
2899 else
2900 return false;
2901 }
2902
2903 return true;
2904}
2905
2906void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
2907{
2908 mm->total_vm += npages;
2909
2910 if (is_exec_mapping(flags))
2911 mm->exec_vm += npages;
2912 else if (is_stack_mapping(flags))
2913 mm->stack_vm += npages;
2914 else if (is_data_mapping(flags))
2915 mm->data_vm += npages;
2916}
2917
2918static int special_mapping_fault(struct vm_area_struct *vma,
2919 struct vm_fault *vmf);
2920
2921
2922
2923
2924static void special_mapping_close(struct vm_area_struct *vma)
2925{
2926}
2927
2928static const char *special_mapping_name(struct vm_area_struct *vma)
2929{
2930 return ((struct vm_special_mapping *)vma->vm_private_data)->name;
2931}
2932
2933static const struct vm_operations_struct special_mapping_vmops = {
2934 .close = special_mapping_close,
2935 .fault = special_mapping_fault,
2936 .name = special_mapping_name,
2937};
2938
2939static const struct vm_operations_struct legacy_special_mapping_vmops = {
2940 .close = special_mapping_close,
2941 .fault = special_mapping_fault,
2942};
2943
2944static int special_mapping_fault(struct vm_area_struct *vma,
2945 struct vm_fault *vmf)
2946{
2947 pgoff_t pgoff;
2948 struct page **pages;
2949
2950 if (vma->vm_ops == &legacy_special_mapping_vmops) {
2951 pages = vma->vm_private_data;
2952 } else {
2953 struct vm_special_mapping *sm = vma->vm_private_data;
2954
2955 if (sm->fault)
2956 return sm->fault(sm, vma, vmf);
2957
2958 pages = sm->pages;
2959 }
2960
2961 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
2962 pgoff--;
2963
2964 if (*pages) {
2965 struct page *page = *pages;
2966 get_page(page);
2967 vmf->page = page;
2968 return 0;
2969 }
2970
2971 return VM_FAULT_SIGBUS;
2972}
2973
2974static struct vm_area_struct *__install_special_mapping(
2975 struct mm_struct *mm,
2976 unsigned long addr, unsigned long len,
2977 unsigned long vm_flags, void *priv,
2978 const struct vm_operations_struct *ops)
2979{
2980 int ret;
2981 struct vm_area_struct *vma;
2982
2983 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2984 if (unlikely(vma == NULL))
2985 return ERR_PTR(-ENOMEM);
2986
2987 INIT_LIST_HEAD(&vma->anon_vma_chain);
2988 vma->vm_mm = mm;
2989 vma->vm_start = addr;
2990 vma->vm_end = addr + len;
2991
2992 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
2993 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2994
2995 vma->vm_ops = ops;
2996 vma->vm_private_data = priv;
2997
2998 ret = insert_vm_struct(mm, vma);
2999 if (ret)
3000 goto out;
3001
3002 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
3003
3004 perf_event_mmap(vma);
3005
3006 return vma;
3007
3008out:
3009 kmem_cache_free(vm_area_cachep, vma);
3010 return ERR_PTR(ret);
3011}
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022struct vm_area_struct *_install_special_mapping(
3023 struct mm_struct *mm,
3024 unsigned long addr, unsigned long len,
3025 unsigned long vm_flags, const struct vm_special_mapping *spec)
3026{
3027 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
3028 &special_mapping_vmops);
3029}
3030
3031int install_special_mapping(struct mm_struct *mm,
3032 unsigned long addr, unsigned long len,
3033 unsigned long vm_flags, struct page **pages)
3034{
3035 struct vm_area_struct *vma = __install_special_mapping(
3036 mm, addr, len, vm_flags, (void *)pages,
3037 &legacy_special_mapping_vmops);
3038
3039 return PTR_ERR_OR_ZERO(vma);
3040}
3041
3042static DEFINE_MUTEX(mm_all_locks_mutex);
3043
3044static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
3045{
3046 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
3047
3048
3049
3050
3051 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061 if (__test_and_set_bit(0, (unsigned long *)
3062 &anon_vma->root->rb_root.rb_node))
3063 BUG();
3064 }
3065}
3066
3067static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3068{
3069 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
3080 BUG();
3081 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem);
3082 }
3083}
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122int mm_take_all_locks(struct mm_struct *mm)
3123{
3124 struct vm_area_struct *vma;
3125 struct anon_vma_chain *avc;
3126
3127 BUG_ON(down_read_trylock(&mm->mmap_sem));
3128
3129 mutex_lock(&mm_all_locks_mutex);
3130
3131 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3132 if (signal_pending(current))
3133 goto out_unlock;
3134 if (vma->vm_file && vma->vm_file->f_mapping &&
3135 is_vm_hugetlb_page(vma))
3136 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3137 }
3138
3139 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3140 if (signal_pending(current))
3141 goto out_unlock;
3142 if (vma->vm_file && vma->vm_file->f_mapping &&
3143 !is_vm_hugetlb_page(vma))
3144 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3145 }
3146
3147 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3148 if (signal_pending(current))
3149 goto out_unlock;
3150 if (vma->anon_vma)
3151 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3152 vm_lock_anon_vma(mm, avc->anon_vma);
3153 }
3154
3155 return 0;
3156
3157out_unlock:
3158 mm_drop_all_locks(mm);
3159 return -EINTR;
3160}
3161
3162static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3163{
3164 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177 if (!__test_and_clear_bit(0, (unsigned long *)
3178 &anon_vma->root->rb_root.rb_node))
3179 BUG();
3180 anon_vma_unlock_write(anon_vma);
3181 }
3182}
3183
3184static void vm_unlock_mapping(struct address_space *mapping)
3185{
3186 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3187
3188
3189
3190
3191 i_mmap_unlock_write(mapping);
3192 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3193 &mapping->flags))
3194 BUG();
3195 }
3196}
3197
3198
3199
3200
3201
3202void mm_drop_all_locks(struct mm_struct *mm)
3203{
3204 struct vm_area_struct *vma;
3205 struct anon_vma_chain *avc;
3206
3207 BUG_ON(down_read_trylock(&mm->mmap_sem));
3208 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3209
3210 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3211 if (vma->anon_vma)
3212 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3213 vm_unlock_anon_vma(avc->anon_vma);
3214 if (vma->vm_file && vma->vm_file->f_mapping)
3215 vm_unlock_mapping(vma->vm_file->f_mapping);
3216 }
3217
3218 mutex_unlock(&mm_all_locks_mutex);
3219}
3220
3221
3222
3223
3224void __init mmap_init(void)
3225{
3226 int ret;
3227
3228 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
3229 VM_BUG_ON(ret);
3230}
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242static int init_user_reserve(void)
3243{
3244 unsigned long free_kbytes;
3245
3246 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3247
3248 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
3249 return 0;
3250}
3251subsys_initcall(init_user_reserve);
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263static int init_admin_reserve(void)
3264{
3265 unsigned long free_kbytes;
3266
3267 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3268
3269 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
3270 return 0;
3271}
3272subsys_initcall(init_admin_reserve);
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292static int reserve_mem_notifier(struct notifier_block *nb,
3293 unsigned long action, void *data)
3294{
3295 unsigned long tmp, free_kbytes;
3296
3297 switch (action) {
3298 case MEM_ONLINE:
3299
3300 tmp = sysctl_user_reserve_kbytes;
3301 if (0 < tmp && tmp < (1UL << 17))
3302 init_user_reserve();
3303
3304
3305 tmp = sysctl_admin_reserve_kbytes;
3306 if (0 < tmp && tmp < (1UL << 13))
3307 init_admin_reserve();
3308
3309 break;
3310 case MEM_OFFLINE:
3311 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3312
3313 if (sysctl_user_reserve_kbytes > free_kbytes) {
3314 init_user_reserve();
3315 pr_info("vm.user_reserve_kbytes reset to %lu\n",
3316 sysctl_user_reserve_kbytes);
3317 }
3318
3319 if (sysctl_admin_reserve_kbytes > free_kbytes) {
3320 init_admin_reserve();
3321 pr_info("vm.admin_reserve_kbytes reset to %lu\n",
3322 sysctl_admin_reserve_kbytes);
3323 }
3324 break;
3325 default:
3326 break;
3327 }
3328 return NOTIFY_OK;
3329}
3330
3331static struct notifier_block reserve_mem_nb = {
3332 .notifier_call = reserve_mem_notifier,
3333};
3334
3335static int __meminit init_reserve_notifier(void)
3336{
3337 if (register_hotmemory_notifier(&reserve_mem_nb))
3338 pr_err("Failed registering memory add/remove notifier for admin reserve\n");
3339
3340 return 0;
3341}
3342subsys_initcall(init_reserve_notifier);
3343