1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/kernel.h>
13#include <linux/slab.h>
14#include <linux/backing-dev.h>
15#include <linux/mm.h>
16#include <linux/vmacache.h>
17#include <linux/shm.h>
18#include <linux/mman.h>
19#include <linux/pagemap.h>
20#include <linux/swap.h>
21#include <linux/syscalls.h>
22#include <linux/capability.h>
23#include <linux/init.h>
24#include <linux/file.h>
25#include <linux/fs.h>
26#include <linux/personality.h>
27#include <linux/security.h>
28#include <linux/hugetlb.h>
29#include <linux/shmem_fs.h>
30#include <linux/profile.h>
31#include <linux/export.h>
32#include <linux/mount.h>
33#include <linux/mempolicy.h>
34#include <linux/rmap.h>
35#include <linux/mmu_notifier.h>
36#include <linux/mmdebug.h>
37#include <linux/perf_event.h>
38#include <linux/audit.h>
39#include <linux/khugepaged.h>
40#include <linux/uprobes.h>
41#include <linux/rbtree_augmented.h>
42#include <linux/notifier.h>
43#include <linux/memory.h>
44#include <linux/printk.h>
45#include <linux/userfaultfd_k.h>
46#include <linux/moduleparam.h>
47#include <linux/pkeys.h>
48#include <linux/oom.h>
49#include <linux/sched/mm.h>
50
51#include <linux/uaccess.h>
52#include <asm/cacheflush.h>
53#include <asm/tlb.h>
54#include <asm/mmu_context.h>
55
56#define CREATE_TRACE_POINTS
57#include <trace/events/mmap.h>
58
59#include "internal.h"
60
61#ifndef arch_mmap_check
62#define arch_mmap_check(addr, len, flags) (0)
63#endif
64
65#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
66const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
67const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
68int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
69#endif
70#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
71const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
72const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
73int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
74#endif
75
76static bool ignore_rlimit_data;
77core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
78
79static void unmap_region(struct mm_struct *mm,
80 struct vm_area_struct *vma, struct vm_area_struct *prev,
81 unsigned long start, unsigned long end);
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97pgprot_t protection_map[16] __ro_after_init = {
98 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
99 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
100};
101
102#ifndef CONFIG_ARCH_HAS_FILTER_PGPROT
103static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
104{
105 return prot;
106}
107#endif
108
109pgprot_t vm_get_page_prot(unsigned long vm_flags)
110{
111 pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags &
112 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
113 pgprot_val(arch_vm_get_page_prot(vm_flags)));
114
115 return arch_filter_pgprot(ret);
116}
117EXPORT_SYMBOL(vm_get_page_prot);
118
119static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
120{
121 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
122}
123
124
125void vma_set_page_prot(struct vm_area_struct *vma)
126{
127 unsigned long vm_flags = vma->vm_flags;
128 pgprot_t vm_page_prot;
129
130 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
131 if (vma_wants_writenotify(vma, vm_page_prot)) {
132 vm_flags &= ~VM_SHARED;
133 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
134 }
135
136 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
137}
138
139
140
141
142static void __remove_shared_vm_struct(struct vm_area_struct *vma,
143 struct file *file, struct address_space *mapping)
144{
145 if (vma->vm_flags & VM_DENYWRITE)
146 atomic_inc(&file_inode(file)->i_writecount);
147 if (vma->vm_flags & VM_SHARED)
148 mapping_unmap_writable(mapping);
149
150 flush_dcache_mmap_lock(mapping);
151 vma_interval_tree_remove(vma, &mapping->i_mmap);
152 flush_dcache_mmap_unlock(mapping);
153}
154
155
156
157
158
159void unlink_file_vma(struct vm_area_struct *vma)
160{
161 struct file *file = vma->vm_file;
162
163 if (file) {
164 struct address_space *mapping = file->f_mapping;
165 i_mmap_lock_write(mapping);
166 __remove_shared_vm_struct(vma, file, mapping);
167 i_mmap_unlock_write(mapping);
168 }
169}
170
171
172
173
174static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
175{
176 struct vm_area_struct *next = vma->vm_next;
177
178 might_sleep();
179 if (vma->vm_ops && vma->vm_ops->close)
180 vma->vm_ops->close(vma);
181 if (vma->vm_file)
182 fput(vma->vm_file);
183 mpol_put(vma_policy(vma));
184 vm_area_free(vma);
185 return next;
186}
187
188static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
189 struct list_head *uf);
190SYSCALL_DEFINE1(brk, unsigned long, brk)
191{
192 unsigned long retval;
193 unsigned long newbrk, oldbrk, origbrk;
194 struct mm_struct *mm = current->mm;
195 struct vm_area_struct *next;
196 unsigned long min_brk;
197 bool populate;
198 bool downgraded = false;
199 LIST_HEAD(uf);
200
201 if (mmap_write_lock_killable(mm))
202 return -EINTR;
203
204 origbrk = mm->brk;
205
206#ifdef CONFIG_COMPAT_BRK
207
208
209
210
211
212 if (current->brk_randomized)
213 min_brk = mm->start_brk;
214 else
215 min_brk = mm->end_data;
216#else
217 min_brk = mm->start_brk;
218#endif
219 if (brk < min_brk)
220 goto out;
221
222
223
224
225
226
227
228 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
229 mm->end_data, mm->start_data))
230 goto out;
231
232 newbrk = PAGE_ALIGN(brk);
233 oldbrk = PAGE_ALIGN(mm->brk);
234 if (oldbrk == newbrk) {
235 mm->brk = brk;
236 goto success;
237 }
238
239
240
241
242
243 if (brk <= mm->brk) {
244 int ret;
245
246
247
248
249
250
251 mm->brk = brk;
252 ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true);
253 if (ret < 0) {
254 mm->brk = origbrk;
255 goto out;
256 } else if (ret == 1) {
257 downgraded = true;
258 }
259 goto success;
260 }
261
262
263 next = find_vma(mm, oldbrk);
264 if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
265 goto out;
266
267
268 if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)
269 goto out;
270 mm->brk = brk;
271
272success:
273 populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
274 if (downgraded)
275 mmap_read_unlock(mm);
276 else
277 mmap_write_unlock(mm);
278 userfaultfd_unmap_complete(mm, &uf);
279 if (populate)
280 mm_populate(oldbrk, newbrk - oldbrk);
281 return brk;
282
283out:
284 retval = origbrk;
285 mmap_write_unlock(mm);
286 return retval;
287}
288
289static inline unsigned long vma_compute_gap(struct vm_area_struct *vma)
290{
291 unsigned long gap, prev_end;
292
293
294
295
296
297
298
299 gap = vm_start_gap(vma);
300 if (vma->vm_prev) {
301 prev_end = vm_end_gap(vma->vm_prev);
302 if (gap > prev_end)
303 gap -= prev_end;
304 else
305 gap = 0;
306 }
307 return gap;
308}
309
310#ifdef CONFIG_DEBUG_VM_RB
311static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma)
312{
313 unsigned long max = vma_compute_gap(vma), subtree_gap;
314 if (vma->vm_rb.rb_left) {
315 subtree_gap = rb_entry(vma->vm_rb.rb_left,
316 struct vm_area_struct, vm_rb)->rb_subtree_gap;
317 if (subtree_gap > max)
318 max = subtree_gap;
319 }
320 if (vma->vm_rb.rb_right) {
321 subtree_gap = rb_entry(vma->vm_rb.rb_right,
322 struct vm_area_struct, vm_rb)->rb_subtree_gap;
323 if (subtree_gap > max)
324 max = subtree_gap;
325 }
326 return max;
327}
328
329static int browse_rb(struct mm_struct *mm)
330{
331 struct rb_root *root = &mm->mm_rb;
332 int i = 0, j, bug = 0;
333 struct rb_node *nd, *pn = NULL;
334 unsigned long prev = 0, pend = 0;
335
336 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
337 struct vm_area_struct *vma;
338 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
339 if (vma->vm_start < prev) {
340 pr_emerg("vm_start %lx < prev %lx\n",
341 vma->vm_start, prev);
342 bug = 1;
343 }
344 if (vma->vm_start < pend) {
345 pr_emerg("vm_start %lx < pend %lx\n",
346 vma->vm_start, pend);
347 bug = 1;
348 }
349 if (vma->vm_start > vma->vm_end) {
350 pr_emerg("vm_start %lx > vm_end %lx\n",
351 vma->vm_start, vma->vm_end);
352 bug = 1;
353 }
354 spin_lock(&mm->page_table_lock);
355 if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
356 pr_emerg("free gap %lx, correct %lx\n",
357 vma->rb_subtree_gap,
358 vma_compute_subtree_gap(vma));
359 bug = 1;
360 }
361 spin_unlock(&mm->page_table_lock);
362 i++;
363 pn = nd;
364 prev = vma->vm_start;
365 pend = vma->vm_end;
366 }
367 j = 0;
368 for (nd = pn; nd; nd = rb_prev(nd))
369 j++;
370 if (i != j) {
371 pr_emerg("backwards %d, forwards %d\n", j, i);
372 bug = 1;
373 }
374 return bug ? -1 : i;
375}
376
377static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
378{
379 struct rb_node *nd;
380
381 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
382 struct vm_area_struct *vma;
383 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
384 VM_BUG_ON_VMA(vma != ignore &&
385 vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
386 vma);
387 }
388}
389
390static void validate_mm(struct mm_struct *mm)
391{
392 int bug = 0;
393 int i = 0;
394 unsigned long highest_address = 0;
395 struct vm_area_struct *vma = mm->mmap;
396
397 while (vma) {
398 struct anon_vma *anon_vma = vma->anon_vma;
399 struct anon_vma_chain *avc;
400
401 if (anon_vma) {
402 anon_vma_lock_read(anon_vma);
403 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
404 anon_vma_interval_tree_verify(avc);
405 anon_vma_unlock_read(anon_vma);
406 }
407
408 highest_address = vm_end_gap(vma);
409 vma = vma->vm_next;
410 i++;
411 }
412 if (i != mm->map_count) {
413 pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
414 bug = 1;
415 }
416 if (highest_address != mm->highest_vm_end) {
417 pr_emerg("mm->highest_vm_end %lx, found %lx\n",
418 mm->highest_vm_end, highest_address);
419 bug = 1;
420 }
421 i = browse_rb(mm);
422 if (i != mm->map_count) {
423 if (i != -1)
424 pr_emerg("map_count %d rb %d\n", mm->map_count, i);
425 bug = 1;
426 }
427 VM_BUG_ON_MM(bug, mm);
428}
429#else
430#define validate_mm_rb(root, ignore) do { } while (0)
431#define validate_mm(mm) do { } while (0)
432#endif
433
434RB_DECLARE_CALLBACKS_MAX(static, vma_gap_callbacks,
435 struct vm_area_struct, vm_rb,
436 unsigned long, rb_subtree_gap, vma_compute_gap)
437
438
439
440
441
442
443static void vma_gap_update(struct vm_area_struct *vma)
444{
445
446
447
448
449 vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
450}
451
452static inline void vma_rb_insert(struct vm_area_struct *vma,
453 struct rb_root *root)
454{
455
456 validate_mm_rb(root, NULL);
457
458 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
459}
460
461static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
462{
463
464
465
466
467
468 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
469}
470
471static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
472 struct rb_root *root,
473 struct vm_area_struct *ignore)
474{
475
476
477
478
479
480 validate_mm_rb(root, ignore);
481
482 __vma_rb_erase(vma, root);
483}
484
485static __always_inline void vma_rb_erase(struct vm_area_struct *vma,
486 struct rb_root *root)
487{
488
489
490
491
492 validate_mm_rb(root, vma);
493
494 __vma_rb_erase(vma, root);
495}
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511static inline void
512anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
513{
514 struct anon_vma_chain *avc;
515
516 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
517 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
518}
519
520static inline void
521anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
522{
523 struct anon_vma_chain *avc;
524
525 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
526 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
527}
528
529static int find_vma_links(struct mm_struct *mm, unsigned long addr,
530 unsigned long end, struct vm_area_struct **pprev,
531 struct rb_node ***rb_link, struct rb_node **rb_parent)
532{
533 struct rb_node **__rb_link, *__rb_parent, *rb_prev;
534
535 __rb_link = &mm->mm_rb.rb_node;
536 rb_prev = __rb_parent = NULL;
537
538 while (*__rb_link) {
539 struct vm_area_struct *vma_tmp;
540
541 __rb_parent = *__rb_link;
542 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
543
544 if (vma_tmp->vm_end > addr) {
545
546 if (vma_tmp->vm_start < end)
547 return -ENOMEM;
548 __rb_link = &__rb_parent->rb_left;
549 } else {
550 rb_prev = __rb_parent;
551 __rb_link = &__rb_parent->rb_right;
552 }
553 }
554
555 *pprev = NULL;
556 if (rb_prev)
557 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
558 *rb_link = __rb_link;
559 *rb_parent = __rb_parent;
560 return 0;
561}
562
563static unsigned long count_vma_pages_range(struct mm_struct *mm,
564 unsigned long addr, unsigned long end)
565{
566 unsigned long nr_pages = 0;
567 struct vm_area_struct *vma;
568
569
570 vma = find_vma_intersection(mm, addr, end);
571 if (!vma)
572 return 0;
573
574 nr_pages = (min(end, vma->vm_end) -
575 max(addr, vma->vm_start)) >> PAGE_SHIFT;
576
577
578 for (vma = vma->vm_next; vma; vma = vma->vm_next) {
579 unsigned long overlap_len;
580
581 if (vma->vm_start > end)
582 break;
583
584 overlap_len = min(end, vma->vm_end) - vma->vm_start;
585 nr_pages += overlap_len >> PAGE_SHIFT;
586 }
587
588 return nr_pages;
589}
590
591void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
592 struct rb_node **rb_link, struct rb_node *rb_parent)
593{
594
595 if (vma->vm_next)
596 vma_gap_update(vma->vm_next);
597 else
598 mm->highest_vm_end = vm_end_gap(vma);
599
600
601
602
603
604
605
606
607
608
609 rb_link_node(&vma->vm_rb, rb_parent, rb_link);
610 vma->rb_subtree_gap = 0;
611 vma_gap_update(vma);
612 vma_rb_insert(vma, &mm->mm_rb);
613}
614
615static void __vma_link_file(struct vm_area_struct *vma)
616{
617 struct file *file;
618
619 file = vma->vm_file;
620 if (file) {
621 struct address_space *mapping = file->f_mapping;
622
623 if (vma->vm_flags & VM_DENYWRITE)
624 atomic_dec(&file_inode(file)->i_writecount);
625 if (vma->vm_flags & VM_SHARED)
626 atomic_inc(&mapping->i_mmap_writable);
627
628 flush_dcache_mmap_lock(mapping);
629 vma_interval_tree_insert(vma, &mapping->i_mmap);
630 flush_dcache_mmap_unlock(mapping);
631 }
632}
633
634static void
635__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
636 struct vm_area_struct *prev, struct rb_node **rb_link,
637 struct rb_node *rb_parent)
638{
639 __vma_link_list(mm, vma, prev);
640 __vma_link_rb(mm, vma, rb_link, rb_parent);
641}
642
643static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
644 struct vm_area_struct *prev, struct rb_node **rb_link,
645 struct rb_node *rb_parent)
646{
647 struct address_space *mapping = NULL;
648
649 if (vma->vm_file) {
650 mapping = vma->vm_file->f_mapping;
651 i_mmap_lock_write(mapping);
652 }
653
654 __vma_link(mm, vma, prev, rb_link, rb_parent);
655 __vma_link_file(vma);
656
657 if (mapping)
658 i_mmap_unlock_write(mapping);
659
660 mm->map_count++;
661 validate_mm(mm);
662}
663
664
665
666
667
668static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
669{
670 struct vm_area_struct *prev;
671 struct rb_node **rb_link, *rb_parent;
672
673 if (find_vma_links(mm, vma->vm_start, vma->vm_end,
674 &prev, &rb_link, &rb_parent))
675 BUG();
676 __vma_link(mm, vma, prev, rb_link, rb_parent);
677 mm->map_count++;
678}
679
680static __always_inline void __vma_unlink_common(struct mm_struct *mm,
681 struct vm_area_struct *vma,
682 struct vm_area_struct *ignore)
683{
684 vma_rb_erase_ignore(vma, &mm->mm_rb, ignore);
685 __vma_unlink_list(mm, vma);
686
687 vmacache_invalidate(mm);
688}
689
690
691
692
693
694
695
696
697int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
698 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
699 struct vm_area_struct *expand)
700{
701 struct mm_struct *mm = vma->vm_mm;
702 struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
703 struct address_space *mapping = NULL;
704 struct rb_root_cached *root = NULL;
705 struct anon_vma *anon_vma = NULL;
706 struct file *file = vma->vm_file;
707 bool start_changed = false, end_changed = false;
708 long adjust_next = 0;
709 int remove_next = 0;
710
711 if (next && !insert) {
712 struct vm_area_struct *exporter = NULL, *importer = NULL;
713
714 if (end >= next->vm_end) {
715
716
717
718
719
720
721 if (next == expand) {
722
723
724
725
726 VM_WARN_ON(end != next->vm_end);
727
728
729
730
731
732 remove_next = 3;
733 VM_WARN_ON(file != next->vm_file);
734 swap(vma, next);
735 } else {
736 VM_WARN_ON(expand != vma);
737
738
739
740
741 remove_next = 1 + (end > next->vm_end);
742 VM_WARN_ON(remove_next == 2 &&
743 end != next->vm_next->vm_end);
744
745 end = next->vm_end;
746 }
747
748 exporter = next;
749 importer = vma;
750
751
752
753
754
755 if (remove_next == 2 && !next->anon_vma)
756 exporter = next->vm_next;
757
758 } else if (end > next->vm_start) {
759
760
761
762
763 adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
764 exporter = next;
765 importer = vma;
766 VM_WARN_ON(expand != importer);
767 } else if (end < vma->vm_end) {
768
769
770
771
772
773 adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
774 exporter = vma;
775 importer = next;
776 VM_WARN_ON(expand != importer);
777 }
778
779
780
781
782
783
784 if (exporter && exporter->anon_vma && !importer->anon_vma) {
785 int error;
786
787 importer->anon_vma = exporter->anon_vma;
788 error = anon_vma_clone(importer, exporter);
789 if (error)
790 return error;
791 }
792 }
793again:
794 vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
795
796 if (file) {
797 mapping = file->f_mapping;
798 root = &mapping->i_mmap;
799 uprobe_munmap(vma, vma->vm_start, vma->vm_end);
800
801 if (adjust_next)
802 uprobe_munmap(next, next->vm_start, next->vm_end);
803
804 i_mmap_lock_write(mapping);
805 if (insert) {
806
807
808
809
810
811
812 __vma_link_file(insert);
813 }
814 }
815
816 anon_vma = vma->anon_vma;
817 if (!anon_vma && adjust_next)
818 anon_vma = next->anon_vma;
819 if (anon_vma) {
820 VM_WARN_ON(adjust_next && next->anon_vma &&
821 anon_vma != next->anon_vma);
822 anon_vma_lock_write(anon_vma);
823 anon_vma_interval_tree_pre_update_vma(vma);
824 if (adjust_next)
825 anon_vma_interval_tree_pre_update_vma(next);
826 }
827
828 if (root) {
829 flush_dcache_mmap_lock(mapping);
830 vma_interval_tree_remove(vma, root);
831 if (adjust_next)
832 vma_interval_tree_remove(next, root);
833 }
834
835 if (start != vma->vm_start) {
836 vma->vm_start = start;
837 start_changed = true;
838 }
839 if (end != vma->vm_end) {
840 vma->vm_end = end;
841 end_changed = true;
842 }
843 vma->vm_pgoff = pgoff;
844 if (adjust_next) {
845 next->vm_start += adjust_next << PAGE_SHIFT;
846 next->vm_pgoff += adjust_next;
847 }
848
849 if (root) {
850 if (adjust_next)
851 vma_interval_tree_insert(next, root);
852 vma_interval_tree_insert(vma, root);
853 flush_dcache_mmap_unlock(mapping);
854 }
855
856 if (remove_next) {
857
858
859
860
861 if (remove_next != 3)
862 __vma_unlink_common(mm, next, next);
863 else
864
865
866
867
868
869
870
871
872
873 __vma_unlink_common(mm, next, vma);
874 if (file)
875 __remove_shared_vm_struct(next, file, mapping);
876 } else if (insert) {
877
878
879
880
881
882 __insert_vm_struct(mm, insert);
883 } else {
884 if (start_changed)
885 vma_gap_update(vma);
886 if (end_changed) {
887 if (!next)
888 mm->highest_vm_end = vm_end_gap(vma);
889 else if (!adjust_next)
890 vma_gap_update(next);
891 }
892 }
893
894 if (anon_vma) {
895 anon_vma_interval_tree_post_update_vma(vma);
896 if (adjust_next)
897 anon_vma_interval_tree_post_update_vma(next);
898 anon_vma_unlock_write(anon_vma);
899 }
900 if (mapping)
901 i_mmap_unlock_write(mapping);
902
903 if (root) {
904 uprobe_mmap(vma);
905
906 if (adjust_next)
907 uprobe_mmap(next);
908 }
909
910 if (remove_next) {
911 if (file) {
912 uprobe_munmap(next, next->vm_start, next->vm_end);
913 fput(file);
914 }
915 if (next->anon_vma)
916 anon_vma_merge(vma, next);
917 mm->map_count--;
918 mpol_put(vma_policy(next));
919 vm_area_free(next);
920
921
922
923
924
925 if (remove_next != 3) {
926
927
928
929
930
931
932 next = vma->vm_next;
933 } else {
934
935
936
937
938
939
940
941
942
943
944 next = vma;
945 }
946 if (remove_next == 2) {
947 remove_next = 1;
948 end = next->vm_end;
949 goto again;
950 }
951 else if (next)
952 vma_gap_update(next);
953 else {
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973 VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
974 }
975 }
976 if (insert && file)
977 uprobe_mmap(insert);
978
979 validate_mm(mm);
980
981 return 0;
982}
983
984
985
986
987
988static inline int is_mergeable_vma(struct vm_area_struct *vma,
989 struct file *file, unsigned long vm_flags,
990 struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
991{
992
993
994
995
996
997
998
999
1000 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
1001 return 0;
1002 if (vma->vm_file != file)
1003 return 0;
1004 if (vma->vm_ops && vma->vm_ops->close)
1005 return 0;
1006 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
1007 return 0;
1008 return 1;
1009}
1010
1011static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
1012 struct anon_vma *anon_vma2,
1013 struct vm_area_struct *vma)
1014{
1015
1016
1017
1018
1019 if ((!anon_vma1 || !anon_vma2) && (!vma ||
1020 list_is_singular(&vma->anon_vma_chain)))
1021 return 1;
1022 return anon_vma1 == anon_vma2;
1023}
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036static int
1037can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
1038 struct anon_vma *anon_vma, struct file *file,
1039 pgoff_t vm_pgoff,
1040 struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
1041{
1042 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
1043 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
1044 if (vma->vm_pgoff == vm_pgoff)
1045 return 1;
1046 }
1047 return 0;
1048}
1049
1050
1051
1052
1053
1054
1055
1056
1057static int
1058can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
1059 struct anon_vma *anon_vma, struct file *file,
1060 pgoff_t vm_pgoff,
1061 struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
1062{
1063 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
1064 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
1065 pgoff_t vm_pglen;
1066 vm_pglen = vma_pages(vma);
1067 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
1068 return 1;
1069 }
1070 return 0;
1071}
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116struct vm_area_struct *vma_merge(struct mm_struct *mm,
1117 struct vm_area_struct *prev, unsigned long addr,
1118 unsigned long end, unsigned long vm_flags,
1119 struct anon_vma *anon_vma, struct file *file,
1120 pgoff_t pgoff, struct mempolicy *policy,
1121 struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
1122{
1123 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
1124 struct vm_area_struct *area, *next;
1125 int err;
1126
1127
1128
1129
1130
1131 if (vm_flags & VM_SPECIAL)
1132 return NULL;
1133
1134 if (prev)
1135 next = prev->vm_next;
1136 else
1137 next = mm->mmap;
1138 area = next;
1139 if (area && area->vm_end == end)
1140 next = next->vm_next;
1141
1142
1143 VM_WARN_ON(prev && addr <= prev->vm_start);
1144 VM_WARN_ON(area && end > area->vm_end);
1145 VM_WARN_ON(addr >= end);
1146
1147
1148
1149
1150 if (prev && prev->vm_end == addr &&
1151 mpol_equal(vma_policy(prev), policy) &&
1152 can_vma_merge_after(prev, vm_flags,
1153 anon_vma, file, pgoff,
1154 vm_userfaultfd_ctx)) {
1155
1156
1157
1158 if (next && end == next->vm_start &&
1159 mpol_equal(policy, vma_policy(next)) &&
1160 can_vma_merge_before(next, vm_flags,
1161 anon_vma, file,
1162 pgoff+pglen,
1163 vm_userfaultfd_ctx) &&
1164 is_mergeable_anon_vma(prev->anon_vma,
1165 next->anon_vma, NULL)) {
1166
1167 err = __vma_adjust(prev, prev->vm_start,
1168 next->vm_end, prev->vm_pgoff, NULL,
1169 prev);
1170 } else
1171 err = __vma_adjust(prev, prev->vm_start,
1172 end, prev->vm_pgoff, NULL, prev);
1173 if (err)
1174 return NULL;
1175 khugepaged_enter_vma_merge(prev, vm_flags);
1176 return prev;
1177 }
1178
1179
1180
1181
1182 if (next && end == next->vm_start &&
1183 mpol_equal(policy, vma_policy(next)) &&
1184 can_vma_merge_before(next, vm_flags,
1185 anon_vma, file, pgoff+pglen,
1186 vm_userfaultfd_ctx)) {
1187 if (prev && addr < prev->vm_end)
1188 err = __vma_adjust(prev, prev->vm_start,
1189 addr, prev->vm_pgoff, NULL, next);
1190 else {
1191 err = __vma_adjust(area, addr, next->vm_end,
1192 next->vm_pgoff - pglen, NULL, next);
1193
1194
1195
1196
1197
1198 area = next;
1199 }
1200 if (err)
1201 return NULL;
1202 khugepaged_enter_vma_merge(area, vm_flags);
1203 return area;
1204 }
1205
1206 return NULL;
1207}
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1223{
1224 return a->vm_end == b->vm_start &&
1225 mpol_equal(vma_policy(a), vma_policy(b)) &&
1226 a->vm_file == b->vm_file &&
1227 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1228 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1229}
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1254{
1255 if (anon_vma_compatible(a, b)) {
1256 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1257
1258 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1259 return anon_vma;
1260 }
1261 return NULL;
1262}
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1273{
1274 struct anon_vma *anon_vma = NULL;
1275
1276
1277 if (vma->vm_next) {
1278 anon_vma = reusable_anon_vma(vma->vm_next, vma, vma->vm_next);
1279 if (anon_vma)
1280 return anon_vma;
1281 }
1282
1283
1284 if (vma->vm_prev)
1285 anon_vma = reusable_anon_vma(vma->vm_prev, vma->vm_prev, vma);
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297 return anon_vma;
1298}
1299
1300
1301
1302
1303
1304static inline unsigned long round_hint_to_min(unsigned long hint)
1305{
1306 hint &= PAGE_MASK;
1307 if (((void *)hint != NULL) &&
1308 (hint < mmap_min_addr))
1309 return PAGE_ALIGN(mmap_min_addr);
1310 return hint;
1311}
1312
1313static inline int mlock_future_check(struct mm_struct *mm,
1314 unsigned long flags,
1315 unsigned long len)
1316{
1317 unsigned long locked, lock_limit;
1318
1319
1320 if (flags & VM_LOCKED) {
1321 locked = len >> PAGE_SHIFT;
1322 locked += mm->locked_vm;
1323 lock_limit = rlimit(RLIMIT_MEMLOCK);
1324 lock_limit >>= PAGE_SHIFT;
1325 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1326 return -EAGAIN;
1327 }
1328 return 0;
1329}
1330
1331static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
1332{
1333 if (S_ISREG(inode->i_mode))
1334 return MAX_LFS_FILESIZE;
1335
1336 if (S_ISBLK(inode->i_mode))
1337 return MAX_LFS_FILESIZE;
1338
1339 if (S_ISSOCK(inode->i_mode))
1340 return MAX_LFS_FILESIZE;
1341
1342
1343 if (file->f_mode & FMODE_UNSIGNED_OFFSET)
1344 return 0;
1345
1346
1347 return ULONG_MAX;
1348}
1349
1350static inline bool file_mmap_ok(struct file *file, struct inode *inode,
1351 unsigned long pgoff, unsigned long len)
1352{
1353 u64 maxsize = file_mmap_size_max(file, inode);
1354
1355 if (maxsize && len > maxsize)
1356 return false;
1357 maxsize -= len;
1358 if (pgoff > maxsize >> PAGE_SHIFT)
1359 return false;
1360 return true;
1361}
1362
1363
1364
1365
1366unsigned long do_mmap(struct file *file, unsigned long addr,
1367 unsigned long len, unsigned long prot,
1368 unsigned long flags, unsigned long pgoff,
1369 unsigned long *populate, struct list_head *uf)
1370{
1371 struct mm_struct *mm = current->mm;
1372 vm_flags_t vm_flags;
1373 int pkey = 0;
1374
1375 *populate = 0;
1376
1377 if (!len)
1378 return -EINVAL;
1379
1380
1381
1382
1383
1384
1385
1386 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1387 if (!(file && path_noexec(&file->f_path)))
1388 prot |= PROT_EXEC;
1389
1390
1391 if (flags & MAP_FIXED_NOREPLACE)
1392 flags |= MAP_FIXED;
1393
1394 if (!(flags & MAP_FIXED))
1395 addr = round_hint_to_min(addr);
1396
1397
1398 len = PAGE_ALIGN(len);
1399 if (!len)
1400 return -ENOMEM;
1401
1402
1403 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1404 return -EOVERFLOW;
1405
1406
1407 if (mm->map_count > sysctl_max_map_count)
1408 return -ENOMEM;
1409
1410
1411
1412
1413 addr = get_unmapped_area(file, addr, len, pgoff, flags);
1414 if (IS_ERR_VALUE(addr))
1415 return addr;
1416
1417 if (flags & MAP_FIXED_NOREPLACE) {
1418 struct vm_area_struct *vma = find_vma(mm, addr);
1419
1420 if (vma && vma->vm_start < addr + len)
1421 return -EEXIST;
1422 }
1423
1424 if (prot == PROT_EXEC) {
1425 pkey = execute_only_pkey(mm);
1426 if (pkey < 0)
1427 pkey = 0;
1428 }
1429
1430
1431
1432
1433
1434 vm_flags = calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
1435 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1436
1437 if (flags & MAP_LOCKED)
1438 if (!can_do_mlock())
1439 return -EPERM;
1440
1441 if (mlock_future_check(mm, vm_flags, len))
1442 return -EAGAIN;
1443
1444 if (file) {
1445 struct inode *inode = file_inode(file);
1446 unsigned long flags_mask;
1447
1448 if (!file_mmap_ok(file, inode, pgoff, len))
1449 return -EOVERFLOW;
1450
1451 flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
1452
1453 switch (flags & MAP_TYPE) {
1454 case MAP_SHARED:
1455
1456
1457
1458
1459
1460
1461
1462 flags &= LEGACY_MAP_MASK;
1463 fallthrough;
1464 case MAP_SHARED_VALIDATE:
1465 if (flags & ~flags_mask)
1466 return -EOPNOTSUPP;
1467 if (prot & PROT_WRITE) {
1468 if (!(file->f_mode & FMODE_WRITE))
1469 return -EACCES;
1470 if (IS_SWAPFILE(file->f_mapping->host))
1471 return -ETXTBSY;
1472 }
1473
1474
1475
1476
1477
1478 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1479 return -EACCES;
1480
1481
1482
1483
1484 if (locks_verify_locked(file))
1485 return -EAGAIN;
1486
1487 vm_flags |= VM_SHARED | VM_MAYSHARE;
1488 if (!(file->f_mode & FMODE_WRITE))
1489 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1490 fallthrough;
1491 case MAP_PRIVATE:
1492 if (!(file->f_mode & FMODE_READ))
1493 return -EACCES;
1494 if (path_noexec(&file->f_path)) {
1495 if (vm_flags & VM_EXEC)
1496 return -EPERM;
1497 vm_flags &= ~VM_MAYEXEC;
1498 }
1499
1500 if (!file->f_op->mmap)
1501 return -ENODEV;
1502 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1503 return -EINVAL;
1504 break;
1505
1506 default:
1507 return -EINVAL;
1508 }
1509 } else {
1510 switch (flags & MAP_TYPE) {
1511 case MAP_SHARED:
1512 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1513 return -EINVAL;
1514
1515
1516
1517 pgoff = 0;
1518 vm_flags |= VM_SHARED | VM_MAYSHARE;
1519 break;
1520 case MAP_PRIVATE:
1521
1522
1523
1524 pgoff = addr >> PAGE_SHIFT;
1525 break;
1526 default:
1527 return -EINVAL;
1528 }
1529 }
1530
1531
1532
1533
1534
1535 if (flags & MAP_NORESERVE) {
1536
1537 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1538 vm_flags |= VM_NORESERVE;
1539
1540
1541 if (file && is_file_hugepages(file))
1542 vm_flags |= VM_NORESERVE;
1543 }
1544
1545 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
1546 if (!IS_ERR_VALUE(addr) &&
1547 ((vm_flags & VM_LOCKED) ||
1548 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1549 *populate = len;
1550 return addr;
1551}
1552
1553unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1554 unsigned long prot, unsigned long flags,
1555 unsigned long fd, unsigned long pgoff)
1556{
1557 struct file *file = NULL;
1558 unsigned long retval;
1559
1560 if (!(flags & MAP_ANONYMOUS)) {
1561 audit_mmap_fd(fd, flags);
1562 file = fget(fd);
1563 if (!file)
1564 return -EBADF;
1565 if (is_file_hugepages(file)) {
1566 len = ALIGN(len, huge_page_size(hstate_file(file)));
1567 } else if (unlikely(flags & MAP_HUGETLB)) {
1568 retval = -EINVAL;
1569 goto out_fput;
1570 }
1571 } else if (flags & MAP_HUGETLB) {
1572 struct user_struct *user = NULL;
1573 struct hstate *hs;
1574
1575 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1576 if (!hs)
1577 return -EINVAL;
1578
1579 len = ALIGN(len, huge_page_size(hs));
1580
1581
1582
1583
1584
1585
1586 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
1587 VM_NORESERVE,
1588 &user, HUGETLB_ANONHUGE_INODE,
1589 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1590 if (IS_ERR(file))
1591 return PTR_ERR(file);
1592 }
1593
1594 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1595
1596 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1597out_fput:
1598 if (file)
1599 fput(file);
1600 return retval;
1601}
1602
1603SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1604 unsigned long, prot, unsigned long, flags,
1605 unsigned long, fd, unsigned long, pgoff)
1606{
1607 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1608}
1609
1610#ifdef __ARCH_WANT_SYS_OLD_MMAP
1611struct mmap_arg_struct {
1612 unsigned long addr;
1613 unsigned long len;
1614 unsigned long prot;
1615 unsigned long flags;
1616 unsigned long fd;
1617 unsigned long offset;
1618};
1619
1620SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1621{
1622 struct mmap_arg_struct a;
1623
1624 if (copy_from_user(&a, arg, sizeof(a)))
1625 return -EFAULT;
1626 if (offset_in_page(a.offset))
1627 return -EINVAL;
1628
1629 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1630 a.offset >> PAGE_SHIFT);
1631}
1632#endif
1633
1634
1635
1636
1637
1638
1639
1640int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1641{
1642 vm_flags_t vm_flags = vma->vm_flags;
1643 const struct vm_operations_struct *vm_ops = vma->vm_ops;
1644
1645
1646 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1647 return 0;
1648
1649
1650 if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite))
1651 return 1;
1652
1653
1654
1655 if (pgprot_val(vm_page_prot) !=
1656 pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
1657 return 0;
1658
1659
1660 if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
1661 return 1;
1662
1663
1664 if (vm_flags & VM_PFNMAP)
1665 return 0;
1666
1667
1668 return vma->vm_file && vma->vm_file->f_mapping &&
1669 mapping_cap_account_dirty(vma->vm_file->f_mapping);
1670}
1671
1672
1673
1674
1675
1676static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1677{
1678
1679
1680
1681
1682 if (file && is_file_hugepages(file))
1683 return 0;
1684
1685 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1686}
1687
1688unsigned long mmap_region(struct file *file, unsigned long addr,
1689 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
1690 struct list_head *uf)
1691{
1692 struct mm_struct *mm = current->mm;
1693 struct vm_area_struct *vma, *prev, *merge;
1694 int error;
1695 struct rb_node **rb_link, *rb_parent;
1696 unsigned long charged = 0;
1697
1698
1699 if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
1700 unsigned long nr_pages;
1701
1702
1703
1704
1705
1706 nr_pages = count_vma_pages_range(mm, addr, addr + len);
1707
1708 if (!may_expand_vm(mm, vm_flags,
1709 (len >> PAGE_SHIFT) - nr_pages))
1710 return -ENOMEM;
1711 }
1712
1713
1714 while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
1715 &rb_parent)) {
1716 if (do_munmap(mm, addr, len, uf))
1717 return -ENOMEM;
1718 }
1719
1720
1721
1722
1723 if (accountable_mapping(file, vm_flags)) {
1724 charged = len >> PAGE_SHIFT;
1725 if (security_vm_enough_memory_mm(mm, charged))
1726 return -ENOMEM;
1727 vm_flags |= VM_ACCOUNT;
1728 }
1729
1730
1731
1732
1733 vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
1734 NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX);
1735 if (vma)
1736 goto out;
1737
1738
1739
1740
1741
1742
1743 vma = vm_area_alloc(mm);
1744 if (!vma) {
1745 error = -ENOMEM;
1746 goto unacct_error;
1747 }
1748
1749 vma->vm_start = addr;
1750 vma->vm_end = addr + len;
1751 vma->vm_flags = vm_flags;
1752 vma->vm_page_prot = vm_get_page_prot(vm_flags);
1753 vma->vm_pgoff = pgoff;
1754
1755 if (file) {
1756 if (vm_flags & VM_DENYWRITE) {
1757 error = deny_write_access(file);
1758 if (error)
1759 goto free_vma;
1760 }
1761 if (vm_flags & VM_SHARED) {
1762 error = mapping_map_writable(file->f_mapping);
1763 if (error)
1764 goto allow_write_and_free_vma;
1765 }
1766
1767
1768
1769
1770
1771
1772 vma->vm_file = get_file(file);
1773 error = call_mmap(file, vma);
1774 if (error)
1775 goto unmap_and_free_vma;
1776
1777
1778
1779
1780 if (unlikely(vm_flags != vma->vm_flags && prev)) {
1781 merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags,
1782 NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX);
1783 if (merge) {
1784
1785
1786
1787
1788 fput(vma->vm_file);
1789 vm_area_free(vma);
1790 vma = merge;
1791
1792
1793
1794 addr = vma->vm_start;
1795 vm_flags = vma->vm_flags;
1796 goto unmap_writable;
1797 }
1798 }
1799
1800
1801
1802
1803
1804
1805
1806
1807 WARN_ON_ONCE(addr != vma->vm_start);
1808
1809 addr = vma->vm_start;
1810 vm_flags = vma->vm_flags;
1811 } else if (vm_flags & VM_SHARED) {
1812 error = shmem_zero_setup(vma);
1813 if (error)
1814 goto free_vma;
1815 } else {
1816 vma_set_anonymous(vma);
1817 }
1818
1819 vma_link(mm, vma, prev, rb_link, rb_parent);
1820
1821 if (file) {
1822unmap_writable:
1823 if (vm_flags & VM_SHARED)
1824 mapping_unmap_writable(file->f_mapping);
1825 if (vm_flags & VM_DENYWRITE)
1826 allow_write_access(file);
1827 }
1828 file = vma->vm_file;
1829out:
1830 perf_event_mmap(vma);
1831
1832 vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
1833 if (vm_flags & VM_LOCKED) {
1834 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
1835 is_vm_hugetlb_page(vma) ||
1836 vma == get_gate_vma(current->mm))
1837 vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
1838 else
1839 mm->locked_vm += (len >> PAGE_SHIFT);
1840 }
1841
1842 if (file)
1843 uprobe_mmap(vma);
1844
1845
1846
1847
1848
1849
1850
1851
1852 vma->vm_flags |= VM_SOFTDIRTY;
1853
1854 vma_set_page_prot(vma);
1855
1856 return addr;
1857
1858unmap_and_free_vma:
1859 vma->vm_file = NULL;
1860 fput(file);
1861
1862
1863 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1864 charged = 0;
1865 if (vm_flags & VM_SHARED)
1866 mapping_unmap_writable(file->f_mapping);
1867allow_write_and_free_vma:
1868 if (vm_flags & VM_DENYWRITE)
1869 allow_write_access(file);
1870free_vma:
1871 vm_area_free(vma);
1872unacct_error:
1873 if (charged)
1874 vm_unacct_memory(charged);
1875 return error;
1876}
1877
1878static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1879{
1880
1881
1882
1883
1884
1885
1886
1887
1888 struct mm_struct *mm = current->mm;
1889 struct vm_area_struct *vma;
1890 unsigned long length, low_limit, high_limit, gap_start, gap_end;
1891
1892
1893 length = info->length + info->align_mask;
1894 if (length < info->length)
1895 return -ENOMEM;
1896
1897
1898 if (info->high_limit < length)
1899 return -ENOMEM;
1900 high_limit = info->high_limit - length;
1901
1902 if (info->low_limit > high_limit)
1903 return -ENOMEM;
1904 low_limit = info->low_limit + length;
1905
1906
1907 if (RB_EMPTY_ROOT(&mm->mm_rb))
1908 goto check_highest;
1909 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1910 if (vma->rb_subtree_gap < length)
1911 goto check_highest;
1912
1913 while (true) {
1914
1915 gap_end = vm_start_gap(vma);
1916 if (gap_end >= low_limit && vma->vm_rb.rb_left) {
1917 struct vm_area_struct *left =
1918 rb_entry(vma->vm_rb.rb_left,
1919 struct vm_area_struct, vm_rb);
1920 if (left->rb_subtree_gap >= length) {
1921 vma = left;
1922 continue;
1923 }
1924 }
1925
1926 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
1927check_current:
1928
1929 if (gap_start > high_limit)
1930 return -ENOMEM;
1931 if (gap_end >= low_limit &&
1932 gap_end > gap_start && gap_end - gap_start >= length)
1933 goto found;
1934
1935
1936 if (vma->vm_rb.rb_right) {
1937 struct vm_area_struct *right =
1938 rb_entry(vma->vm_rb.rb_right,
1939 struct vm_area_struct, vm_rb);
1940 if (right->rb_subtree_gap >= length) {
1941 vma = right;
1942 continue;
1943 }
1944 }
1945
1946
1947 while (true) {
1948 struct rb_node *prev = &vma->vm_rb;
1949 if (!rb_parent(prev))
1950 goto check_highest;
1951 vma = rb_entry(rb_parent(prev),
1952 struct vm_area_struct, vm_rb);
1953 if (prev == vma->vm_rb.rb_left) {
1954 gap_start = vm_end_gap(vma->vm_prev);
1955 gap_end = vm_start_gap(vma);
1956 goto check_current;
1957 }
1958 }
1959 }
1960
1961check_highest:
1962
1963 gap_start = mm->highest_vm_end;
1964 gap_end = ULONG_MAX;
1965 if (gap_start > high_limit)
1966 return -ENOMEM;
1967
1968found:
1969
1970 if (gap_start < info->low_limit)
1971 gap_start = info->low_limit;
1972
1973
1974 gap_start += (info->align_offset - gap_start) & info->align_mask;
1975
1976 VM_BUG_ON(gap_start + info->length > info->high_limit);
1977 VM_BUG_ON(gap_start + info->length > gap_end);
1978 return gap_start;
1979}
1980
1981static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1982{
1983 struct mm_struct *mm = current->mm;
1984 struct vm_area_struct *vma;
1985 unsigned long length, low_limit, high_limit, gap_start, gap_end;
1986
1987
1988 length = info->length + info->align_mask;
1989 if (length < info->length)
1990 return -ENOMEM;
1991
1992
1993
1994
1995
1996 gap_end = info->high_limit;
1997 if (gap_end < length)
1998 return -ENOMEM;
1999 high_limit = gap_end - length;
2000
2001 if (info->low_limit > high_limit)
2002 return -ENOMEM;
2003 low_limit = info->low_limit + length;
2004
2005
2006 gap_start = mm->highest_vm_end;
2007 if (gap_start <= high_limit)
2008 goto found_highest;
2009
2010
2011 if (RB_EMPTY_ROOT(&mm->mm_rb))
2012 return -ENOMEM;
2013 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
2014 if (vma->rb_subtree_gap < length)
2015 return -ENOMEM;
2016
2017 while (true) {
2018
2019 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
2020 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
2021 struct vm_area_struct *right =
2022 rb_entry(vma->vm_rb.rb_right,
2023 struct vm_area_struct, vm_rb);
2024 if (right->rb_subtree_gap >= length) {
2025 vma = right;
2026 continue;
2027 }
2028 }
2029
2030check_current:
2031
2032 gap_end = vm_start_gap(vma);
2033 if (gap_end < low_limit)
2034 return -ENOMEM;
2035 if (gap_start <= high_limit &&
2036 gap_end > gap_start && gap_end - gap_start >= length)
2037 goto found;
2038
2039
2040 if (vma->vm_rb.rb_left) {
2041 struct vm_area_struct *left =
2042 rb_entry(vma->vm_rb.rb_left,
2043 struct vm_area_struct, vm_rb);
2044 if (left->rb_subtree_gap >= length) {
2045 vma = left;
2046 continue;
2047 }
2048 }
2049
2050
2051 while (true) {
2052 struct rb_node *prev = &vma->vm_rb;
2053 if (!rb_parent(prev))
2054 return -ENOMEM;
2055 vma = rb_entry(rb_parent(prev),
2056 struct vm_area_struct, vm_rb);
2057 if (prev == vma->vm_rb.rb_right) {
2058 gap_start = vma->vm_prev ?
2059 vm_end_gap(vma->vm_prev) : 0;
2060 goto check_current;
2061 }
2062 }
2063 }
2064
2065found:
2066
2067 if (gap_end > info->high_limit)
2068 gap_end = info->high_limit;
2069
2070found_highest:
2071
2072 gap_end -= info->length;
2073 gap_end -= (gap_end - info->align_offset) & info->align_mask;
2074
2075 VM_BUG_ON(gap_end < info->low_limit);
2076 VM_BUG_ON(gap_end < gap_start);
2077 return gap_end;
2078}
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
2090{
2091 unsigned long addr;
2092
2093 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
2094 addr = unmapped_area_topdown(info);
2095 else
2096 addr = unmapped_area(info);
2097
2098 trace_vm_unmapped_area(addr, info);
2099 return addr;
2100}
2101
2102#ifndef arch_get_mmap_end
2103#define arch_get_mmap_end(addr) (TASK_SIZE)
2104#endif
2105
2106#ifndef arch_get_mmap_base
2107#define arch_get_mmap_base(addr, base) (base)
2108#endif
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121#ifndef HAVE_ARCH_UNMAPPED_AREA
2122unsigned long
2123arch_get_unmapped_area(struct file *filp, unsigned long addr,
2124 unsigned long len, unsigned long pgoff, unsigned long flags)
2125{
2126 struct mm_struct *mm = current->mm;
2127 struct vm_area_struct *vma, *prev;
2128 struct vm_unmapped_area_info info;
2129 const unsigned long mmap_end = arch_get_mmap_end(addr);
2130
2131 if (len > mmap_end - mmap_min_addr)
2132 return -ENOMEM;
2133
2134 if (flags & MAP_FIXED)
2135 return addr;
2136
2137 if (addr) {
2138 addr = PAGE_ALIGN(addr);
2139 vma = find_vma_prev(mm, addr, &prev);
2140 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
2141 (!vma || addr + len <= vm_start_gap(vma)) &&
2142 (!prev || addr >= vm_end_gap(prev)))
2143 return addr;
2144 }
2145
2146 info.flags = 0;
2147 info.length = len;
2148 info.low_limit = mm->mmap_base;
2149 info.high_limit = mmap_end;
2150 info.align_mask = 0;
2151 info.align_offset = 0;
2152 return vm_unmapped_area(&info);
2153}
2154#endif
2155
2156
2157
2158
2159
2160#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
2161unsigned long
2162arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
2163 unsigned long len, unsigned long pgoff,
2164 unsigned long flags)
2165{
2166 struct vm_area_struct *vma, *prev;
2167 struct mm_struct *mm = current->mm;
2168 struct vm_unmapped_area_info info;
2169 const unsigned long mmap_end = arch_get_mmap_end(addr);
2170
2171
2172 if (len > mmap_end - mmap_min_addr)
2173 return -ENOMEM;
2174
2175 if (flags & MAP_FIXED)
2176 return addr;
2177
2178
2179 if (addr) {
2180 addr = PAGE_ALIGN(addr);
2181 vma = find_vma_prev(mm, addr, &prev);
2182 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
2183 (!vma || addr + len <= vm_start_gap(vma)) &&
2184 (!prev || addr >= vm_end_gap(prev)))
2185 return addr;
2186 }
2187
2188 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
2189 info.length = len;
2190 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
2191 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
2192 info.align_mask = 0;
2193 info.align_offset = 0;
2194 addr = vm_unmapped_area(&info);
2195
2196
2197
2198
2199
2200
2201
2202 if (offset_in_page(addr)) {
2203 VM_BUG_ON(addr != -ENOMEM);
2204 info.flags = 0;
2205 info.low_limit = TASK_UNMAPPED_BASE;
2206 info.high_limit = mmap_end;
2207 addr = vm_unmapped_area(&info);
2208 }
2209
2210 return addr;
2211}
2212#endif
2213
2214unsigned long
2215get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
2216 unsigned long pgoff, unsigned long flags)
2217{
2218 unsigned long (*get_area)(struct file *, unsigned long,
2219 unsigned long, unsigned long, unsigned long);
2220
2221 unsigned long error = arch_mmap_check(addr, len, flags);
2222 if (error)
2223 return error;
2224
2225
2226 if (len > TASK_SIZE)
2227 return -ENOMEM;
2228
2229 get_area = current->mm->get_unmapped_area;
2230 if (file) {
2231 if (file->f_op->get_unmapped_area)
2232 get_area = file->f_op->get_unmapped_area;
2233 } else if (flags & MAP_SHARED) {
2234
2235
2236
2237
2238
2239 pgoff = 0;
2240 get_area = shmem_get_unmapped_area;
2241 }
2242
2243 addr = get_area(file, addr, len, pgoff, flags);
2244 if (IS_ERR_VALUE(addr))
2245 return addr;
2246
2247 if (addr > TASK_SIZE - len)
2248 return -ENOMEM;
2249 if (offset_in_page(addr))
2250 return -EINVAL;
2251
2252 error = security_mmap_addr(addr);
2253 return error ? error : addr;
2254}
2255
2256EXPORT_SYMBOL(get_unmapped_area);
2257
2258
2259struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
2260{
2261 struct rb_node *rb_node;
2262 struct vm_area_struct *vma;
2263
2264
2265 vma = vmacache_find(mm, addr);
2266 if (likely(vma))
2267 return vma;
2268
2269 rb_node = mm->mm_rb.rb_node;
2270
2271 while (rb_node) {
2272 struct vm_area_struct *tmp;
2273
2274 tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
2275
2276 if (tmp->vm_end > addr) {
2277 vma = tmp;
2278 if (tmp->vm_start <= addr)
2279 break;
2280 rb_node = rb_node->rb_left;
2281 } else
2282 rb_node = rb_node->rb_right;
2283 }
2284
2285 if (vma)
2286 vmacache_update(addr, vma);
2287 return vma;
2288}
2289
2290EXPORT_SYMBOL(find_vma);
2291
2292
2293
2294
2295struct vm_area_struct *
2296find_vma_prev(struct mm_struct *mm, unsigned long addr,
2297 struct vm_area_struct **pprev)
2298{
2299 struct vm_area_struct *vma;
2300
2301 vma = find_vma(mm, addr);
2302 if (vma) {
2303 *pprev = vma->vm_prev;
2304 } else {
2305 struct rb_node *rb_node = rb_last(&mm->mm_rb);
2306
2307 *pprev = rb_node ? rb_entry(rb_node, struct vm_area_struct, vm_rb) : NULL;
2308 }
2309 return vma;
2310}
2311
2312
2313
2314
2315
2316
2317static int acct_stack_growth(struct vm_area_struct *vma,
2318 unsigned long size, unsigned long grow)
2319{
2320 struct mm_struct *mm = vma->vm_mm;
2321 unsigned long new_start;
2322
2323
2324 if (!may_expand_vm(mm, vma->vm_flags, grow))
2325 return -ENOMEM;
2326
2327
2328 if (size > rlimit(RLIMIT_STACK))
2329 return -ENOMEM;
2330
2331
2332 if (vma->vm_flags & VM_LOCKED) {
2333 unsigned long locked;
2334 unsigned long limit;
2335 locked = mm->locked_vm + grow;
2336 limit = rlimit(RLIMIT_MEMLOCK);
2337 limit >>= PAGE_SHIFT;
2338 if (locked > limit && !capable(CAP_IPC_LOCK))
2339 return -ENOMEM;
2340 }
2341
2342
2343 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
2344 vma->vm_end - size;
2345 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
2346 return -EFAULT;
2347
2348
2349
2350
2351
2352 if (security_vm_enough_memory_mm(mm, grow))
2353 return -ENOMEM;
2354
2355 return 0;
2356}
2357
2358#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
2359
2360
2361
2362
2363int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2364{
2365 struct mm_struct *mm = vma->vm_mm;
2366 struct vm_area_struct *next;
2367 unsigned long gap_addr;
2368 int error = 0;
2369
2370 if (!(vma->vm_flags & VM_GROWSUP))
2371 return -EFAULT;
2372
2373
2374 address &= PAGE_MASK;
2375 if (address >= (TASK_SIZE & PAGE_MASK))
2376 return -ENOMEM;
2377 address += PAGE_SIZE;
2378
2379
2380 gap_addr = address + stack_guard_gap;
2381
2382
2383 if (gap_addr < address || gap_addr > TASK_SIZE)
2384 gap_addr = TASK_SIZE;
2385
2386 next = vma->vm_next;
2387 if (next && next->vm_start < gap_addr && vma_is_accessible(next)) {
2388 if (!(next->vm_flags & VM_GROWSUP))
2389 return -ENOMEM;
2390
2391 }
2392
2393
2394 if (unlikely(anon_vma_prepare(vma)))
2395 return -ENOMEM;
2396
2397
2398
2399
2400
2401
2402 anon_vma_lock_write(vma->anon_vma);
2403
2404
2405 if (address > vma->vm_end) {
2406 unsigned long size, grow;
2407
2408 size = address - vma->vm_start;
2409 grow = (address - vma->vm_end) >> PAGE_SHIFT;
2410
2411 error = -ENOMEM;
2412 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2413 error = acct_stack_growth(vma, size, grow);
2414 if (!error) {
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426 spin_lock(&mm->page_table_lock);
2427 if (vma->vm_flags & VM_LOCKED)
2428 mm->locked_vm += grow;
2429 vm_stat_account(mm, vma->vm_flags, grow);
2430 anon_vma_interval_tree_pre_update_vma(vma);
2431 vma->vm_end = address;
2432 anon_vma_interval_tree_post_update_vma(vma);
2433 if (vma->vm_next)
2434 vma_gap_update(vma->vm_next);
2435 else
2436 mm->highest_vm_end = vm_end_gap(vma);
2437 spin_unlock(&mm->page_table_lock);
2438
2439 perf_event_mmap(vma);
2440 }
2441 }
2442 }
2443 anon_vma_unlock_write(vma->anon_vma);
2444 khugepaged_enter_vma_merge(vma, vma->vm_flags);
2445 validate_mm(mm);
2446 return error;
2447}
2448#endif
2449
2450
2451
2452
2453int expand_downwards(struct vm_area_struct *vma,
2454 unsigned long address)
2455{
2456 struct mm_struct *mm = vma->vm_mm;
2457 struct vm_area_struct *prev;
2458 int error = 0;
2459
2460 address &= PAGE_MASK;
2461 if (address < mmap_min_addr)
2462 return -EPERM;
2463
2464
2465 prev = vma->vm_prev;
2466
2467 if (prev && !(prev->vm_flags & VM_GROWSDOWN) &&
2468 vma_is_accessible(prev)) {
2469 if (address - prev->vm_end < stack_guard_gap)
2470 return -ENOMEM;
2471 }
2472
2473
2474 if (unlikely(anon_vma_prepare(vma)))
2475 return -ENOMEM;
2476
2477
2478
2479
2480
2481
2482 anon_vma_lock_write(vma->anon_vma);
2483
2484
2485 if (address < vma->vm_start) {
2486 unsigned long size, grow;
2487
2488 size = vma->vm_end - address;
2489 grow = (vma->vm_start - address) >> PAGE_SHIFT;
2490
2491 error = -ENOMEM;
2492 if (grow <= vma->vm_pgoff) {
2493 error = acct_stack_growth(vma, size, grow);
2494 if (!error) {
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506 spin_lock(&mm->page_table_lock);
2507 if (vma->vm_flags & VM_LOCKED)
2508 mm->locked_vm += grow;
2509 vm_stat_account(mm, vma->vm_flags, grow);
2510 anon_vma_interval_tree_pre_update_vma(vma);
2511 vma->vm_start = address;
2512 vma->vm_pgoff -= grow;
2513 anon_vma_interval_tree_post_update_vma(vma);
2514 vma_gap_update(vma);
2515 spin_unlock(&mm->page_table_lock);
2516
2517 perf_event_mmap(vma);
2518 }
2519 }
2520 }
2521 anon_vma_unlock_write(vma->anon_vma);
2522 khugepaged_enter_vma_merge(vma, vma->vm_flags);
2523 validate_mm(mm);
2524 return error;
2525}
2526
2527
2528unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
2529
2530static int __init cmdline_parse_stack_guard_gap(char *p)
2531{
2532 unsigned long val;
2533 char *endptr;
2534
2535 val = simple_strtoul(p, &endptr, 10);
2536 if (!*endptr)
2537 stack_guard_gap = val << PAGE_SHIFT;
2538
2539 return 0;
2540}
2541__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
2542
2543#ifdef CONFIG_STACK_GROWSUP
2544int expand_stack(struct vm_area_struct *vma, unsigned long address)
2545{
2546 return expand_upwards(vma, address);
2547}
2548
2549struct vm_area_struct *
2550find_extend_vma(struct mm_struct *mm, unsigned long addr)
2551{
2552 struct vm_area_struct *vma, *prev;
2553
2554 addr &= PAGE_MASK;
2555 vma = find_vma_prev(mm, addr, &prev);
2556 if (vma && (vma->vm_start <= addr))
2557 return vma;
2558
2559 if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
2560 return NULL;
2561 if (prev->vm_flags & VM_LOCKED)
2562 populate_vma_page_range(prev, addr, prev->vm_end, NULL);
2563 return prev;
2564}
2565#else
2566int expand_stack(struct vm_area_struct *vma, unsigned long address)
2567{
2568 return expand_downwards(vma, address);
2569}
2570
2571struct vm_area_struct *
2572find_extend_vma(struct mm_struct *mm, unsigned long addr)
2573{
2574 struct vm_area_struct *vma;
2575 unsigned long start;
2576
2577 addr &= PAGE_MASK;
2578 vma = find_vma(mm, addr);
2579 if (!vma)
2580 return NULL;
2581 if (vma->vm_start <= addr)
2582 return vma;
2583 if (!(vma->vm_flags & VM_GROWSDOWN))
2584 return NULL;
2585
2586 if (!mmget_still_valid(mm))
2587 return NULL;
2588 start = vma->vm_start;
2589 if (expand_stack(vma, addr))
2590 return NULL;
2591 if (vma->vm_flags & VM_LOCKED)
2592 populate_vma_page_range(vma, addr, start, NULL);
2593 return vma;
2594}
2595#endif
2596
2597EXPORT_SYMBOL_GPL(find_extend_vma);
2598
2599
2600
2601
2602
2603
2604
2605static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
2606{
2607 unsigned long nr_accounted = 0;
2608
2609
2610 update_hiwater_vm(mm);
2611 do {
2612 long nrpages = vma_pages(vma);
2613
2614 if (vma->vm_flags & VM_ACCOUNT)
2615 nr_accounted += nrpages;
2616 vm_stat_account(mm, vma->vm_flags, -nrpages);
2617 vma = remove_vma(vma);
2618 } while (vma);
2619 vm_unacct_memory(nr_accounted);
2620 validate_mm(mm);
2621}
2622
2623
2624
2625
2626
2627
2628static void unmap_region(struct mm_struct *mm,
2629 struct vm_area_struct *vma, struct vm_area_struct *prev,
2630 unsigned long start, unsigned long end)
2631{
2632 struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap;
2633 struct mmu_gather tlb;
2634
2635 lru_add_drain();
2636 tlb_gather_mmu(&tlb, mm, start, end);
2637 update_hiwater_rss(mm);
2638 unmap_vmas(&tlb, vma, start, end);
2639 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2640 next ? next->vm_start : USER_PGTABLES_CEILING);
2641 tlb_finish_mmu(&tlb, start, end);
2642}
2643
2644
2645
2646
2647
2648static bool
2649detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
2650 struct vm_area_struct *prev, unsigned long end)
2651{
2652 struct vm_area_struct **insertion_point;
2653 struct vm_area_struct *tail_vma = NULL;
2654
2655 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
2656 vma->vm_prev = NULL;
2657 do {
2658 vma_rb_erase(vma, &mm->mm_rb);
2659 mm->map_count--;
2660 tail_vma = vma;
2661 vma = vma->vm_next;
2662 } while (vma && vma->vm_start < end);
2663 *insertion_point = vma;
2664 if (vma) {
2665 vma->vm_prev = prev;
2666 vma_gap_update(vma);
2667 } else
2668 mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
2669 tail_vma->vm_next = NULL;
2670
2671
2672 vmacache_invalidate(mm);
2673
2674
2675
2676
2677
2678
2679 if (vma && (vma->vm_flags & VM_GROWSDOWN))
2680 return false;
2681 if (prev && (prev->vm_flags & VM_GROWSUP))
2682 return false;
2683 return true;
2684}
2685
2686
2687
2688
2689
2690int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2691 unsigned long addr, int new_below)
2692{
2693 struct vm_area_struct *new;
2694 int err;
2695
2696 if (vma->vm_ops && vma->vm_ops->split) {
2697 err = vma->vm_ops->split(vma, addr);
2698 if (err)
2699 return err;
2700 }
2701
2702 new = vm_area_dup(vma);
2703 if (!new)
2704 return -ENOMEM;
2705
2706 if (new_below)
2707 new->vm_end = addr;
2708 else {
2709 new->vm_start = addr;
2710 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2711 }
2712
2713 err = vma_dup_policy(vma, new);
2714 if (err)
2715 goto out_free_vma;
2716
2717 err = anon_vma_clone(new, vma);
2718 if (err)
2719 goto out_free_mpol;
2720
2721 if (new->vm_file)
2722 get_file(new->vm_file);
2723
2724 if (new->vm_ops && new->vm_ops->open)
2725 new->vm_ops->open(new);
2726
2727 if (new_below)
2728 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
2729 ((addr - new->vm_start) >> PAGE_SHIFT), new);
2730 else
2731 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
2732
2733
2734 if (!err)
2735 return 0;
2736
2737
2738 if (new->vm_ops && new->vm_ops->close)
2739 new->vm_ops->close(new);
2740 if (new->vm_file)
2741 fput(new->vm_file);
2742 unlink_anon_vmas(new);
2743 out_free_mpol:
2744 mpol_put(vma_policy(new));
2745 out_free_vma:
2746 vm_area_free(new);
2747 return err;
2748}
2749
2750
2751
2752
2753
2754int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2755 unsigned long addr, int new_below)
2756{
2757 if (mm->map_count >= sysctl_max_map_count)
2758 return -ENOMEM;
2759
2760 return __split_vma(mm, vma, addr, new_below);
2761}
2762
2763
2764
2765
2766
2767
2768int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
2769 struct list_head *uf, bool downgrade)
2770{
2771 unsigned long end;
2772 struct vm_area_struct *vma, *prev, *last;
2773
2774 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
2775 return -EINVAL;
2776
2777 len = PAGE_ALIGN(len);
2778 end = start + len;
2779 if (len == 0)
2780 return -EINVAL;
2781
2782
2783
2784
2785
2786
2787 arch_unmap(mm, start, end);
2788
2789
2790 vma = find_vma(mm, start);
2791 if (!vma)
2792 return 0;
2793 prev = vma->vm_prev;
2794
2795
2796
2797 if (vma->vm_start >= end)
2798 return 0;
2799
2800
2801
2802
2803
2804
2805
2806
2807 if (start > vma->vm_start) {
2808 int error;
2809
2810
2811
2812
2813
2814
2815 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2816 return -ENOMEM;
2817
2818 error = __split_vma(mm, vma, start, 0);
2819 if (error)
2820 return error;
2821 prev = vma;
2822 }
2823
2824
2825 last = find_vma(mm, end);
2826 if (last && end > last->vm_start) {
2827 int error = __split_vma(mm, last, end, 1);
2828 if (error)
2829 return error;
2830 }
2831 vma = prev ? prev->vm_next : mm->mmap;
2832
2833 if (unlikely(uf)) {
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843 int error = userfaultfd_unmap_prep(vma, start, end, uf);
2844 if (error)
2845 return error;
2846 }
2847
2848
2849
2850
2851 if (mm->locked_vm) {
2852 struct vm_area_struct *tmp = vma;
2853 while (tmp && tmp->vm_start < end) {
2854 if (tmp->vm_flags & VM_LOCKED) {
2855 mm->locked_vm -= vma_pages(tmp);
2856 munlock_vma_pages_all(tmp);
2857 }
2858
2859 tmp = tmp->vm_next;
2860 }
2861 }
2862
2863
2864 if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
2865 downgrade = false;
2866
2867 if (downgrade)
2868 mmap_write_downgrade(mm);
2869
2870 unmap_region(mm, vma, prev, start, end);
2871
2872
2873 remove_vma_list(mm, vma);
2874
2875 return downgrade ? 1 : 0;
2876}
2877
2878int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
2879 struct list_head *uf)
2880{
2881 return __do_munmap(mm, start, len, uf, false);
2882}
2883
2884static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
2885{
2886 int ret;
2887 struct mm_struct *mm = current->mm;
2888 LIST_HEAD(uf);
2889
2890 if (mmap_write_lock_killable(mm))
2891 return -EINTR;
2892
2893 ret = __do_munmap(mm, start, len, &uf, downgrade);
2894
2895
2896
2897
2898
2899 if (ret == 1) {
2900 mmap_read_unlock(mm);
2901 ret = 0;
2902 } else
2903 mmap_write_unlock(mm);
2904
2905 userfaultfd_unmap_complete(mm, &uf);
2906 return ret;
2907}
2908
2909int vm_munmap(unsigned long start, size_t len)
2910{
2911 return __vm_munmap(start, len, false);
2912}
2913EXPORT_SYMBOL(vm_munmap);
2914
2915SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2916{
2917 addr = untagged_addr(addr);
2918 profile_munmap(addr);
2919 return __vm_munmap(addr, len, true);
2920}
2921
2922
2923
2924
2925
2926SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2927 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
2928{
2929
2930 struct mm_struct *mm = current->mm;
2931 struct vm_area_struct *vma;
2932 unsigned long populate = 0;
2933 unsigned long ret = -EINVAL;
2934 struct file *file;
2935
2936 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.rst.\n",
2937 current->comm, current->pid);
2938
2939 if (prot)
2940 return ret;
2941 start = start & PAGE_MASK;
2942 size = size & PAGE_MASK;
2943
2944 if (start + size <= start)
2945 return ret;
2946
2947
2948 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
2949 return ret;
2950
2951 if (mmap_write_lock_killable(mm))
2952 return -EINTR;
2953
2954 vma = find_vma(mm, start);
2955
2956 if (!vma || !(vma->vm_flags & VM_SHARED))
2957 goto out;
2958
2959 if (start < vma->vm_start)
2960 goto out;
2961
2962 if (start + size > vma->vm_end) {
2963 struct vm_area_struct *next;
2964
2965 for (next = vma->vm_next; next; next = next->vm_next) {
2966
2967 if (next->vm_start != next->vm_prev->vm_end)
2968 goto out;
2969
2970 if (next->vm_file != vma->vm_file)
2971 goto out;
2972
2973 if (next->vm_flags != vma->vm_flags)
2974 goto out;
2975
2976 if (start + size <= next->vm_end)
2977 break;
2978 }
2979
2980 if (!next)
2981 goto out;
2982 }
2983
2984 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
2985 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
2986 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
2987
2988 flags &= MAP_NONBLOCK;
2989 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
2990 if (vma->vm_flags & VM_LOCKED) {
2991 struct vm_area_struct *tmp;
2992 flags |= MAP_LOCKED;
2993
2994
2995 for (tmp = vma; tmp->vm_start >= start + size;
2996 tmp = tmp->vm_next) {
2997
2998
2999
3000
3001 vma_adjust_trans_huge(tmp, start, start + size, 0);
3002
3003 munlock_vma_pages_range(tmp,
3004 max(tmp->vm_start, start),
3005 min(tmp->vm_end, start + size));
3006 }
3007 }
3008
3009 file = get_file(vma->vm_file);
3010 ret = do_mmap(vma->vm_file, start, size,
3011 prot, flags, pgoff, &populate, NULL);
3012 fput(file);
3013out:
3014 mmap_write_unlock(mm);
3015 if (populate)
3016 mm_populate(ret, populate);
3017 if (!IS_ERR_VALUE(ret))
3018 ret = 0;
3019 return ret;
3020}
3021
3022
3023
3024
3025
3026
3027static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
3028{
3029 struct mm_struct *mm = current->mm;
3030 struct vm_area_struct *vma, *prev;
3031 struct rb_node **rb_link, *rb_parent;
3032 pgoff_t pgoff = addr >> PAGE_SHIFT;
3033 int error;
3034 unsigned long mapped_addr;
3035
3036
3037 if ((flags & (~VM_EXEC)) != 0)
3038 return -EINVAL;
3039 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
3040
3041 mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
3042 if (IS_ERR_VALUE(mapped_addr))
3043 return mapped_addr;
3044
3045 error = mlock_future_check(mm, mm->def_flags, len);
3046 if (error)
3047 return error;
3048
3049
3050
3051
3052 while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
3053 &rb_parent)) {
3054 if (do_munmap(mm, addr, len, uf))
3055 return -ENOMEM;
3056 }
3057
3058
3059 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
3060 return -ENOMEM;
3061
3062 if (mm->map_count > sysctl_max_map_count)
3063 return -ENOMEM;
3064
3065 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
3066 return -ENOMEM;
3067
3068
3069 vma = vma_merge(mm, prev, addr, addr + len, flags,
3070 NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX);
3071 if (vma)
3072 goto out;
3073
3074
3075
3076
3077 vma = vm_area_alloc(mm);
3078 if (!vma) {
3079 vm_unacct_memory(len >> PAGE_SHIFT);
3080 return -ENOMEM;
3081 }
3082
3083 vma_set_anonymous(vma);
3084 vma->vm_start = addr;
3085 vma->vm_end = addr + len;
3086 vma->vm_pgoff = pgoff;
3087 vma->vm_flags = flags;
3088 vma->vm_page_prot = vm_get_page_prot(flags);
3089 vma_link(mm, vma, prev, rb_link, rb_parent);
3090out:
3091 perf_event_mmap(vma);
3092 mm->total_vm += len >> PAGE_SHIFT;
3093 mm->data_vm += len >> PAGE_SHIFT;
3094 if (flags & VM_LOCKED)
3095 mm->locked_vm += (len >> PAGE_SHIFT);
3096 vma->vm_flags |= VM_SOFTDIRTY;
3097 return 0;
3098}
3099
3100int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
3101{
3102 struct mm_struct *mm = current->mm;
3103 unsigned long len;
3104 int ret;
3105 bool populate;
3106 LIST_HEAD(uf);
3107
3108 len = PAGE_ALIGN(request);
3109 if (len < request)
3110 return -ENOMEM;
3111 if (!len)
3112 return 0;
3113
3114 if (mmap_write_lock_killable(mm))
3115 return -EINTR;
3116
3117 ret = do_brk_flags(addr, len, flags, &uf);
3118 populate = ((mm->def_flags & VM_LOCKED) != 0);
3119 mmap_write_unlock(mm);
3120 userfaultfd_unmap_complete(mm, &uf);
3121 if (populate && !ret)
3122 mm_populate(addr, len);
3123 return ret;
3124}
3125EXPORT_SYMBOL(vm_brk_flags);
3126
3127int vm_brk(unsigned long addr, unsigned long len)
3128{
3129 return vm_brk_flags(addr, len, 0);
3130}
3131EXPORT_SYMBOL(vm_brk);
3132
3133
3134void exit_mmap(struct mm_struct *mm)
3135{
3136 struct mmu_gather tlb;
3137 struct vm_area_struct *vma;
3138 unsigned long nr_accounted = 0;
3139
3140
3141 mmu_notifier_release(mm);
3142
3143 if (unlikely(mm_is_oom_victim(mm))) {
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160 (void)__oom_reap_task_mm(mm);
3161
3162 set_bit(MMF_OOM_SKIP, &mm->flags);
3163 mmap_write_lock(mm);
3164 mmap_write_unlock(mm);
3165 }
3166
3167 if (mm->locked_vm) {
3168 vma = mm->mmap;
3169 while (vma) {
3170 if (vma->vm_flags & VM_LOCKED)
3171 munlock_vma_pages_all(vma);
3172 vma = vma->vm_next;
3173 }
3174 }
3175
3176 arch_exit_mmap(mm);
3177
3178 vma = mm->mmap;
3179 if (!vma)
3180 return;
3181
3182 lru_add_drain();
3183 flush_cache_mm(mm);
3184 tlb_gather_mmu(&tlb, mm, 0, -1);
3185
3186
3187 unmap_vmas(&tlb, vma, 0, -1);
3188 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
3189 tlb_finish_mmu(&tlb, 0, -1);
3190
3191
3192
3193
3194
3195 while (vma) {
3196 if (vma->vm_flags & VM_ACCOUNT)
3197 nr_accounted += vma_pages(vma);
3198 vma = remove_vma(vma);
3199 cond_resched();
3200 }
3201 vm_unacct_memory(nr_accounted);
3202}
3203
3204
3205
3206
3207
3208int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
3209{
3210 struct vm_area_struct *prev;
3211 struct rb_node **rb_link, *rb_parent;
3212
3213 if (find_vma_links(mm, vma->vm_start, vma->vm_end,
3214 &prev, &rb_link, &rb_parent))
3215 return -ENOMEM;
3216 if ((vma->vm_flags & VM_ACCOUNT) &&
3217 security_vm_enough_memory_mm(mm, vma_pages(vma)))
3218 return -ENOMEM;
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232 if (vma_is_anonymous(vma)) {
3233 BUG_ON(vma->anon_vma);
3234 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
3235 }
3236
3237 vma_link(mm, vma, prev, rb_link, rb_parent);
3238 return 0;
3239}
3240
3241
3242
3243
3244
3245struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
3246 unsigned long addr, unsigned long len, pgoff_t pgoff,
3247 bool *need_rmap_locks)
3248{
3249 struct vm_area_struct *vma = *vmap;
3250 unsigned long vma_start = vma->vm_start;
3251 struct mm_struct *mm = vma->vm_mm;
3252 struct vm_area_struct *new_vma, *prev;
3253 struct rb_node **rb_link, *rb_parent;
3254 bool faulted_in_anon_vma = true;
3255
3256
3257
3258
3259
3260 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
3261 pgoff = addr >> PAGE_SHIFT;
3262 faulted_in_anon_vma = false;
3263 }
3264
3265 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
3266 return NULL;
3267 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
3268 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
3269 vma->vm_userfaultfd_ctx);
3270 if (new_vma) {
3271
3272
3273
3274 if (unlikely(vma_start >= new_vma->vm_start &&
3275 vma_start < new_vma->vm_end)) {
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
3289 *vmap = vma = new_vma;
3290 }
3291 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
3292 } else {
3293 new_vma = vm_area_dup(vma);
3294 if (!new_vma)
3295 goto out;
3296 new_vma->vm_start = addr;
3297 new_vma->vm_end = addr + len;
3298 new_vma->vm_pgoff = pgoff;
3299 if (vma_dup_policy(vma, new_vma))
3300 goto out_free_vma;
3301 if (anon_vma_clone(new_vma, vma))
3302 goto out_free_mempol;
3303 if (new_vma->vm_file)
3304 get_file(new_vma->vm_file);
3305 if (new_vma->vm_ops && new_vma->vm_ops->open)
3306 new_vma->vm_ops->open(new_vma);
3307 vma_link(mm, new_vma, prev, rb_link, rb_parent);
3308 *need_rmap_locks = false;
3309 }
3310 return new_vma;
3311
3312out_free_mempol:
3313 mpol_put(vma_policy(new_vma));
3314out_free_vma:
3315 vm_area_free(new_vma);
3316out:
3317 return NULL;
3318}
3319
3320
3321
3322
3323
3324bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
3325{
3326 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
3327 return false;
3328
3329 if (is_data_mapping(flags) &&
3330 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
3331
3332 if (rlimit(RLIMIT_DATA) == 0 &&
3333 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
3334 return true;
3335
3336 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
3337 current->comm, current->pid,
3338 (mm->data_vm + npages) << PAGE_SHIFT,
3339 rlimit(RLIMIT_DATA),
3340 ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
3341
3342 if (!ignore_rlimit_data)
3343 return false;
3344 }
3345
3346 return true;
3347}
3348
3349void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
3350{
3351 mm->total_vm += npages;
3352
3353 if (is_exec_mapping(flags))
3354 mm->exec_vm += npages;
3355 else if (is_stack_mapping(flags))
3356 mm->stack_vm += npages;
3357 else if (is_data_mapping(flags))
3358 mm->data_vm += npages;
3359}
3360
3361static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
3362
3363
3364
3365
3366static void special_mapping_close(struct vm_area_struct *vma)
3367{
3368}
3369
3370static const char *special_mapping_name(struct vm_area_struct *vma)
3371{
3372 return ((struct vm_special_mapping *)vma->vm_private_data)->name;
3373}
3374
3375static int special_mapping_mremap(struct vm_area_struct *new_vma)
3376{
3377 struct vm_special_mapping *sm = new_vma->vm_private_data;
3378
3379 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
3380 return -EFAULT;
3381
3382 if (sm->mremap)
3383 return sm->mremap(sm, new_vma);
3384
3385 return 0;
3386}
3387
3388static const struct vm_operations_struct special_mapping_vmops = {
3389 .close = special_mapping_close,
3390 .fault = special_mapping_fault,
3391 .mremap = special_mapping_mremap,
3392 .name = special_mapping_name,
3393
3394 .access = NULL,
3395};
3396
3397static const struct vm_operations_struct legacy_special_mapping_vmops = {
3398 .close = special_mapping_close,
3399 .fault = special_mapping_fault,
3400};
3401
3402static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
3403{
3404 struct vm_area_struct *vma = vmf->vma;
3405 pgoff_t pgoff;
3406 struct page **pages;
3407
3408 if (vma->vm_ops == &legacy_special_mapping_vmops) {
3409 pages = vma->vm_private_data;
3410 } else {
3411 struct vm_special_mapping *sm = vma->vm_private_data;
3412
3413 if (sm->fault)
3414 return sm->fault(sm, vmf->vma, vmf);
3415
3416 pages = sm->pages;
3417 }
3418
3419 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
3420 pgoff--;
3421
3422 if (*pages) {
3423 struct page *page = *pages;
3424 get_page(page);
3425 vmf->page = page;
3426 return 0;
3427 }
3428
3429 return VM_FAULT_SIGBUS;
3430}
3431
3432static struct vm_area_struct *__install_special_mapping(
3433 struct mm_struct *mm,
3434 unsigned long addr, unsigned long len,
3435 unsigned long vm_flags, void *priv,
3436 const struct vm_operations_struct *ops)
3437{
3438 int ret;
3439 struct vm_area_struct *vma;
3440
3441 vma = vm_area_alloc(mm);
3442 if (unlikely(vma == NULL))
3443 return ERR_PTR(-ENOMEM);
3444
3445 vma->vm_start = addr;
3446 vma->vm_end = addr + len;
3447
3448 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
3449 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3450
3451 vma->vm_ops = ops;
3452 vma->vm_private_data = priv;
3453
3454 ret = insert_vm_struct(mm, vma);
3455 if (ret)
3456 goto out;
3457
3458 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
3459
3460 perf_event_mmap(vma);
3461
3462 return vma;
3463
3464out:
3465 vm_area_free(vma);
3466 return ERR_PTR(ret);
3467}
3468
3469bool vma_is_special_mapping(const struct vm_area_struct *vma,
3470 const struct vm_special_mapping *sm)
3471{
3472 return vma->vm_private_data == sm &&
3473 (vma->vm_ops == &special_mapping_vmops ||
3474 vma->vm_ops == &legacy_special_mapping_vmops);
3475}
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486struct vm_area_struct *_install_special_mapping(
3487 struct mm_struct *mm,
3488 unsigned long addr, unsigned long len,
3489 unsigned long vm_flags, const struct vm_special_mapping *spec)
3490{
3491 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
3492 &special_mapping_vmops);
3493}
3494
3495int install_special_mapping(struct mm_struct *mm,
3496 unsigned long addr, unsigned long len,
3497 unsigned long vm_flags, struct page **pages)
3498{
3499 struct vm_area_struct *vma = __install_special_mapping(
3500 mm, addr, len, vm_flags, (void *)pages,
3501 &legacy_special_mapping_vmops);
3502
3503 return PTR_ERR_OR_ZERO(vma);
3504}
3505
3506static DEFINE_MUTEX(mm_all_locks_mutex);
3507
3508static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
3509{
3510 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3511
3512
3513
3514
3515 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525 if (__test_and_set_bit(0, (unsigned long *)
3526 &anon_vma->root->rb_root.rb_root.rb_node))
3527 BUG();
3528 }
3529}
3530
3531static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3532{
3533 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
3544 BUG();
3545 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
3546 }
3547}
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586int mm_take_all_locks(struct mm_struct *mm)
3587{
3588 struct vm_area_struct *vma;
3589 struct anon_vma_chain *avc;
3590
3591 BUG_ON(mmap_read_trylock(mm));
3592
3593 mutex_lock(&mm_all_locks_mutex);
3594
3595 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3596 if (signal_pending(current))
3597 goto out_unlock;
3598 if (vma->vm_file && vma->vm_file->f_mapping &&
3599 is_vm_hugetlb_page(vma))
3600 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3601 }
3602
3603 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3604 if (signal_pending(current))
3605 goto out_unlock;
3606 if (vma->vm_file && vma->vm_file->f_mapping &&
3607 !is_vm_hugetlb_page(vma))
3608 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3609 }
3610
3611 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3612 if (signal_pending(current))
3613 goto out_unlock;
3614 if (vma->anon_vma)
3615 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3616 vm_lock_anon_vma(mm, avc->anon_vma);
3617 }
3618
3619 return 0;
3620
3621out_unlock:
3622 mm_drop_all_locks(mm);
3623 return -EINTR;
3624}
3625
3626static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3627{
3628 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641 if (!__test_and_clear_bit(0, (unsigned long *)
3642 &anon_vma->root->rb_root.rb_root.rb_node))
3643 BUG();
3644 anon_vma_unlock_write(anon_vma);
3645 }
3646}
3647
3648static void vm_unlock_mapping(struct address_space *mapping)
3649{
3650 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3651
3652
3653
3654
3655 i_mmap_unlock_write(mapping);
3656 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3657 &mapping->flags))
3658 BUG();
3659 }
3660}
3661
3662
3663
3664
3665
3666void mm_drop_all_locks(struct mm_struct *mm)
3667{
3668 struct vm_area_struct *vma;
3669 struct anon_vma_chain *avc;
3670
3671 BUG_ON(mmap_read_trylock(mm));
3672 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3673
3674 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3675 if (vma->anon_vma)
3676 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3677 vm_unlock_anon_vma(avc->anon_vma);
3678 if (vma->vm_file && vma->vm_file->f_mapping)
3679 vm_unlock_mapping(vma->vm_file->f_mapping);
3680 }
3681
3682 mutex_unlock(&mm_all_locks_mutex);
3683}
3684
3685
3686
3687
3688void __init mmap_init(void)
3689{
3690 int ret;
3691
3692 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
3693 VM_BUG_ON(ret);
3694}
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706static int init_user_reserve(void)
3707{
3708 unsigned long free_kbytes;
3709
3710 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3711
3712 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
3713 return 0;
3714}
3715subsys_initcall(init_user_reserve);
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727static int init_admin_reserve(void)
3728{
3729 unsigned long free_kbytes;
3730
3731 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3732
3733 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
3734 return 0;
3735}
3736subsys_initcall(init_admin_reserve);
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756static int reserve_mem_notifier(struct notifier_block *nb,
3757 unsigned long action, void *data)
3758{
3759 unsigned long tmp, free_kbytes;
3760
3761 switch (action) {
3762 case MEM_ONLINE:
3763
3764 tmp = sysctl_user_reserve_kbytes;
3765 if (0 < tmp && tmp < (1UL << 17))
3766 init_user_reserve();
3767
3768
3769 tmp = sysctl_admin_reserve_kbytes;
3770 if (0 < tmp && tmp < (1UL << 13))
3771 init_admin_reserve();
3772
3773 break;
3774 case MEM_OFFLINE:
3775 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3776
3777 if (sysctl_user_reserve_kbytes > free_kbytes) {
3778 init_user_reserve();
3779 pr_info("vm.user_reserve_kbytes reset to %lu\n",
3780 sysctl_user_reserve_kbytes);
3781 }
3782
3783 if (sysctl_admin_reserve_kbytes > free_kbytes) {
3784 init_admin_reserve();
3785 pr_info("vm.admin_reserve_kbytes reset to %lu\n",
3786 sysctl_admin_reserve_kbytes);
3787 }
3788 break;
3789 default:
3790 break;
3791 }
3792 return NOTIFY_OK;
3793}
3794
3795static struct notifier_block reserve_mem_nb = {
3796 .notifier_call = reserve_mem_notifier,
3797};
3798
3799static int __meminit init_reserve_notifier(void)
3800{
3801 if (register_hotmemory_notifier(&reserve_mem_nb))
3802 pr_err("Failed registering memory add/remove notifier for admin reserve\n");
3803
3804 return 0;
3805}
3806subsys_initcall(init_reserve_notifier);
3807