1
2
3
4
5
6
7
8
9
10
11#include <linux/mm.h>
12#include <linux/mm_inline.h>
13#include <linux/hugetlb.h>
14#include <linux/shm.h>
15#include <linux/ksm.h>
16#include <linux/mman.h>
17#include <linux/swap.h>
18#include <linux/capability.h>
19#include <linux/fs.h>
20#include <linux/swapops.h>
21#include <linux/highmem.h>
22#include <linux/security.h>
23#include <linux/syscalls.h>
24#include <linux/mmu_notifier.h>
25#include <linux/uaccess.h>
26#include <linux/userfaultfd_k.h>
27#include <linux/mempolicy.h>
28
29#include <asm/cacheflush.h>
30#include <asm/tlb.h>
31#include <asm/pgalloc.h>
32
33#include "internal.h"
34
35
36enum mremap_type {
37 MREMAP_INVALID,
38 MREMAP_NO_RESIZE,
39 MREMAP_SHRINK,
40 MREMAP_EXPAND,
41};
42
43
44
45
46
47
48
49
50struct vma_remap_struct {
51
52 unsigned long addr;
53 unsigned long old_len;
54 unsigned long new_len;
55 const unsigned long flags;
56 unsigned long new_addr;
57
58
59 struct vm_userfaultfd_ctx *uf;
60 struct list_head *uf_unmap_early;
61 struct list_head *uf_unmap;
62
63
64 struct vm_area_struct *vma;
65
66
67 unsigned long delta;
68 bool populate_expand;
69 enum mremap_type remap_type;
70 bool mmap_locked;
71 unsigned long charged;
72 bool vmi_needs_invalidate;
73};
74
75static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
76{
77 pgd_t *pgd;
78 p4d_t *p4d;
79 pud_t *pud;
80
81 pgd = pgd_offset(mm, addr);
82 if (pgd_none_or_clear_bad(pgd))
83 return NULL;
84
85 p4d = p4d_offset(pgd, addr);
86 if (p4d_none_or_clear_bad(p4d))
87 return NULL;
88
89 pud = pud_offset(p4d, addr);
90 if (pud_none_or_clear_bad(pud))
91 return NULL;
92
93 return pud;
94}
95
96static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
97{
98 pud_t *pud;
99 pmd_t *pmd;
100
101 pud = get_old_pud(mm, addr);
102 if (!pud)
103 return NULL;
104
105 pmd = pmd_offset(pud, addr);
106 if (pmd_none(*pmd))
107 return NULL;
108
109 return pmd;
110}
111
112static pud_t *alloc_new_pud(struct mm_struct *mm, unsigned long addr)
113{
114 pgd_t *pgd;
115 p4d_t *p4d;
116
117 pgd = pgd_offset(mm, addr);
118 p4d = p4d_alloc(mm, pgd, addr);
119 if (!p4d)
120 return NULL;
121
122 return pud_alloc(mm, p4d, addr);
123}
124
125static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
126{
127 pud_t *pud;
128 pmd_t *pmd;
129
130 pud = alloc_new_pud(mm, addr);
131 if (!pud)
132 return NULL;
133
134 pmd = pmd_alloc(mm, pud, addr);
135 if (!pmd)
136 return NULL;
137
138 VM_BUG_ON(pmd_trans_huge(*pmd));
139
140 return pmd;
141}
142
143static void take_rmap_locks(struct vm_area_struct *vma)
144{
145 if (vma->vm_file)
146 i_mmap_lock_write(vma->vm_file->f_mapping);
147 if (vma->anon_vma)
148 anon_vma_lock_write(vma->anon_vma);
149}
150
151static void drop_rmap_locks(struct vm_area_struct *vma)
152{
153 if (vma->anon_vma)
154 anon_vma_unlock_write(vma->anon_vma);
155 if (vma->vm_file)
156 i_mmap_unlock_write(vma->vm_file->f_mapping);
157}
158
159static pte_t move_soft_dirty_pte(pte_t pte)
160{
161
162
163
164
165#ifdef CONFIG_MEM_SOFT_DIRTY
166 if (pte_present(pte))
167 pte = pte_mksoft_dirty(pte);
168 else if (is_swap_pte(pte))
169 pte = pte_swp_mksoft_dirty(pte);
170#endif
171 return pte;
172}
173
174static int mremap_folio_pte_batch(struct vm_area_struct *vma, unsigned long addr,
175 pte_t *ptep, pte_t pte, int max_nr)
176{
177 struct folio *folio;
178
179 if (max_nr == 1)
180 return 1;
181
182
183 if (pte_batch_hint(ptep, pte) == 1)
184 return 1;
185
186 folio = vm_normal_folio(vma, addr, pte);
187 if (!folio || !folio_test_large(folio))
188 return 1;
189
190 return folio_pte_batch(folio, ptep, pte, max_nr);
191}
192
193static int move_ptes(struct pagetable_move_control *pmc,
194 unsigned long extent, pmd_t *old_pmd, pmd_t *new_pmd)
195{
196 struct vm_area_struct *vma = pmc->old;
197 bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
198 struct mm_struct *mm = vma->vm_mm;
199 pte_t *old_ptep, *new_ptep;
200 pte_t old_pte, pte;
201 pmd_t dummy_pmdval;
202 spinlock_t *old_ptl, *new_ptl;
203 bool force_flush = false;
204 unsigned long old_addr = pmc->old_addr;
205 unsigned long new_addr = pmc->new_addr;
206 unsigned long old_end = old_addr + extent;
207 unsigned long len = old_end - old_addr;
208 int max_nr_ptes;
209 int nr_ptes;
210 int err = 0;
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230 if (pmc->need_rmap_locks)
231 take_rmap_locks(vma);
232
233
234
235
236
237 old_ptep = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
238 if (!old_ptep) {
239 err = -EAGAIN;
240 goto out;
241 }
242
243
244
245
246
247
248
249 new_ptep = pte_offset_map_rw_nolock(mm, new_pmd, new_addr, &dummy_pmdval,
250 &new_ptl);
251 if (!new_ptep) {
252 pte_unmap_unlock(old_ptep, old_ptl);
253 err = -EAGAIN;
254 goto out;
255 }
256 if (new_ptl != old_ptl)
257 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
258 flush_tlb_batched_pending(vma->vm_mm);
259 arch_enter_lazy_mmu_mode();
260
261 for (; old_addr < old_end; old_ptep += nr_ptes, old_addr += nr_ptes * PAGE_SIZE,
262 new_ptep += nr_ptes, new_addr += nr_ptes * PAGE_SIZE) {
263 VM_WARN_ON_ONCE(!pte_none(*new_ptep));
264
265 nr_ptes = 1;
266 max_nr_ptes = (old_end - old_addr) >> PAGE_SHIFT;
267 old_pte = ptep_get(old_ptep);
268 if (pte_none(old_pte))
269 continue;
270
271
272
273
274
275
276
277
278
279
280
281
282 if (pte_present(old_pte)) {
283 nr_ptes = mremap_folio_pte_batch(vma, old_addr, old_ptep,
284 old_pte, max_nr_ptes);
285 force_flush = true;
286 }
287 pte = get_and_clear_ptes(mm, old_addr, old_ptep, nr_ptes);
288 pte = move_pte(pte, old_addr, new_addr);
289 pte = move_soft_dirty_pte(pte);
290
291 if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
292 pte_clear(mm, new_addr, new_ptep);
293 else {
294 if (need_clear_uffd_wp) {
295 if (pte_present(pte))
296 pte = pte_clear_uffd_wp(pte);
297 else if (is_swap_pte(pte))
298 pte = pte_swp_clear_uffd_wp(pte);
299 }
300 set_ptes(mm, new_addr, new_ptep, pte, nr_ptes);
301 }
302 }
303
304 arch_leave_lazy_mmu_mode();
305 if (force_flush)
306 flush_tlb_range(vma, old_end - len, old_end);
307 if (new_ptl != old_ptl)
308 spin_unlock(new_ptl);
309 pte_unmap(new_ptep - 1);
310 pte_unmap_unlock(old_ptep - 1, old_ptl);
311out:
312 if (pmc->need_rmap_locks)
313 drop_rmap_locks(vma);
314 return err;
315}
316
317#ifndef arch_supports_page_table_move
318#define arch_supports_page_table_move arch_supports_page_table_move
319static inline bool arch_supports_page_table_move(void)
320{
321 return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) ||
322 IS_ENABLED(CONFIG_HAVE_MOVE_PUD);
323}
324#endif
325
326static inline bool uffd_supports_page_table_move(struct pagetable_move_control *pmc)
327{
328
329
330
331
332
333
334
335
336
337
338
339
340
341 return !vma_has_uffd_without_event_remap(pmc->old) &&
342 !vma_has_uffd_without_event_remap(pmc->new);
343}
344
345#ifdef CONFIG_HAVE_MOVE_PMD
346static bool move_normal_pmd(struct pagetable_move_control *pmc,
347 pmd_t *old_pmd, pmd_t *new_pmd)
348{
349 spinlock_t *old_ptl, *new_ptl;
350 struct vm_area_struct *vma = pmc->old;
351 struct mm_struct *mm = vma->vm_mm;
352 bool res = false;
353 pmd_t pmd;
354
355 if (!arch_supports_page_table_move())
356 return false;
357 if (!uffd_supports_page_table_move(pmc))
358 return false;
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382 if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
383 return false;
384
385
386
387
388
389 old_ptl = pmd_lock(mm, old_pmd);
390 new_ptl = pmd_lockptr(mm, new_pmd);
391 if (new_ptl != old_ptl)
392 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
393
394 pmd = *old_pmd;
395
396
397 if (unlikely(!pmd_present(pmd) || pmd_leaf(pmd)))
398 goto out_unlock;
399
400 pmd_clear(old_pmd);
401 res = true;
402
403 VM_BUG_ON(!pmd_none(*new_pmd));
404
405 pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
406 flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PMD_SIZE);
407out_unlock:
408 if (new_ptl != old_ptl)
409 spin_unlock(new_ptl);
410 spin_unlock(old_ptl);
411
412 return res;
413}
414#else
415static inline bool move_normal_pmd(struct pagetable_move_control *pmc,
416 pmd_t *old_pmd, pmd_t *new_pmd)
417{
418 return false;
419}
420#endif
421
422#if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD)
423static bool move_normal_pud(struct pagetable_move_control *pmc,
424 pud_t *old_pud, pud_t *new_pud)
425{
426 spinlock_t *old_ptl, *new_ptl;
427 struct vm_area_struct *vma = pmc->old;
428 struct mm_struct *mm = vma->vm_mm;
429 pud_t pud;
430
431 if (!arch_supports_page_table_move())
432 return false;
433 if (!uffd_supports_page_table_move(pmc))
434 return false;
435
436
437
438
439 if (WARN_ON_ONCE(!pud_none(*new_pud)))
440 return false;
441
442
443
444
445
446 old_ptl = pud_lock(mm, old_pud);
447 new_ptl = pud_lockptr(mm, new_pud);
448 if (new_ptl != old_ptl)
449 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
450
451
452 pud = *old_pud;
453 pud_clear(old_pud);
454
455 VM_BUG_ON(!pud_none(*new_pud));
456
457 pud_populate(mm, new_pud, pud_pgtable(pud));
458 flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PUD_SIZE);
459 if (new_ptl != old_ptl)
460 spin_unlock(new_ptl);
461 spin_unlock(old_ptl);
462
463 return true;
464}
465#else
466static inline bool move_normal_pud(struct pagetable_move_control *pmc,
467 pud_t *old_pud, pud_t *new_pud)
468{
469 return false;
470}
471#endif
472
473#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
474static bool move_huge_pud(struct pagetable_move_control *pmc,
475 pud_t *old_pud, pud_t *new_pud)
476{
477 spinlock_t *old_ptl, *new_ptl;
478 struct vm_area_struct *vma = pmc->old;
479 struct mm_struct *mm = vma->vm_mm;
480 pud_t pud;
481
482
483
484
485
486 if (WARN_ON_ONCE(!pud_none(*new_pud)))
487 return false;
488
489
490
491
492
493 old_ptl = pud_lock(mm, old_pud);
494 new_ptl = pud_lockptr(mm, new_pud);
495 if (new_ptl != old_ptl)
496 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
497
498
499 pud = *old_pud;
500 pud_clear(old_pud);
501
502 VM_BUG_ON(!pud_none(*new_pud));
503
504
505
506 set_pud_at(mm, pmc->new_addr, new_pud, pud);
507 flush_pud_tlb_range(vma, pmc->old_addr, pmc->old_addr + HPAGE_PUD_SIZE);
508 if (new_ptl != old_ptl)
509 spin_unlock(new_ptl);
510 spin_unlock(old_ptl);
511
512 return true;
513}
514#else
515static bool move_huge_pud(struct pagetable_move_control *pmc,
516 pud_t *old_pud, pud_t *new_pud)
517
518{
519 WARN_ON_ONCE(1);
520 return false;
521
522}
523#endif
524
525enum pgt_entry {
526 NORMAL_PMD,
527 HPAGE_PMD,
528 NORMAL_PUD,
529 HPAGE_PUD,
530};
531
532
533
534
535
536
537static __always_inline unsigned long get_extent(enum pgt_entry entry,
538 struct pagetable_move_control *pmc)
539{
540 unsigned long next, extent, mask, size;
541 unsigned long old_addr = pmc->old_addr;
542 unsigned long old_end = pmc->old_end;
543 unsigned long new_addr = pmc->new_addr;
544
545 switch (entry) {
546 case HPAGE_PMD:
547 case NORMAL_PMD:
548 mask = PMD_MASK;
549 size = PMD_SIZE;
550 break;
551 case HPAGE_PUD:
552 case NORMAL_PUD:
553 mask = PUD_MASK;
554 size = PUD_SIZE;
555 break;
556 default:
557 BUILD_BUG();
558 break;
559 }
560
561 next = (old_addr + size) & mask;
562
563 extent = next - old_addr;
564 if (extent > old_end - old_addr)
565 extent = old_end - old_addr;
566 next = (new_addr + size) & mask;
567 if (extent > next - new_addr)
568 extent = next - new_addr;
569 return extent;
570}
571
572
573
574
575
576static bool should_take_rmap_locks(struct pagetable_move_control *pmc,
577 enum pgt_entry entry)
578{
579 switch (entry) {
580 case NORMAL_PMD:
581 case NORMAL_PUD:
582 return true;
583 default:
584 return pmc->need_rmap_locks;
585 }
586}
587
588
589
590
591
592static bool move_pgt_entry(struct pagetable_move_control *pmc,
593 enum pgt_entry entry, void *old_entry, void *new_entry)
594{
595 bool moved = false;
596 bool need_rmap_locks = should_take_rmap_locks(pmc, entry);
597
598
599 if (need_rmap_locks)
600 take_rmap_locks(pmc->old);
601
602 switch (entry) {
603 case NORMAL_PMD:
604 moved = move_normal_pmd(pmc, old_entry, new_entry);
605 break;
606 case NORMAL_PUD:
607 moved = move_normal_pud(pmc, old_entry, new_entry);
608 break;
609 case HPAGE_PMD:
610 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
611 move_huge_pmd(pmc->old, pmc->old_addr, pmc->new_addr, old_entry,
612 new_entry);
613 break;
614 case HPAGE_PUD:
615 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
616 move_huge_pud(pmc, old_entry, new_entry);
617 break;
618
619 default:
620 WARN_ON_ONCE(1);
621 break;
622 }
623
624 if (need_rmap_locks)
625 drop_rmap_locks(pmc->old);
626
627 return moved;
628}
629
630
631
632
633
634
635
636static bool can_align_down(struct pagetable_move_control *pmc,
637 struct vm_area_struct *vma, unsigned long addr_to_align,
638 unsigned long mask)
639{
640 unsigned long addr_masked = addr_to_align & mask;
641
642
643
644
645
646
647 if (!pmc->for_stack && vma->vm_start != addr_to_align)
648 return false;
649
650
651 if (pmc->for_stack && addr_masked >= vma->vm_start)
652 return true;
653
654
655
656
657
658 return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL;
659}
660
661
662
663
664
665static bool can_realign_addr(struct pagetable_move_control *pmc,
666 unsigned long pagetable_mask)
667{
668 unsigned long align_mask = ~pagetable_mask;
669 unsigned long old_align = pmc->old_addr & align_mask;
670 unsigned long new_align = pmc->new_addr & align_mask;
671 unsigned long pagetable_size = align_mask + 1;
672 unsigned long old_align_next = pagetable_size - old_align;
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694 if (pmc->len_in < old_align_next)
695 return false;
696
697
698 if (old_align == 0)
699 return false;
700
701
702 if (old_align != new_align)
703 return false;
704
705
706 if (!can_align_down(pmc, pmc->old, pmc->old_addr, pagetable_mask) ||
707 !can_align_down(pmc, pmc->new, pmc->new_addr, pagetable_mask))
708 return false;
709
710 return true;
711}
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744static void try_realign_addr(struct pagetable_move_control *pmc,
745 unsigned long pagetable_mask)
746{
747
748 if (!can_realign_addr(pmc, pagetable_mask))
749 return;
750
751
752
753
754
755
756
757 pmc->old_addr &= pagetable_mask;
758 pmc->new_addr &= pagetable_mask;
759}
760
761
762static bool pmc_done(struct pagetable_move_control *pmc)
763{
764 return pmc->old_addr >= pmc->old_end;
765}
766
767
768static void pmc_next(struct pagetable_move_control *pmc, unsigned long extent)
769{
770 pmc->old_addr += extent;
771 pmc->new_addr += extent;
772}
773
774
775
776
777
778static unsigned long pmc_progress(struct pagetable_move_control *pmc)
779{
780 unsigned long orig_old_addr = pmc->old_end - pmc->len_in;
781 unsigned long old_addr = pmc->old_addr;
782
783
784
785
786
787
788 return old_addr < orig_old_addr ? 0 : old_addr - orig_old_addr;
789}
790
791unsigned long move_page_tables(struct pagetable_move_control *pmc)
792{
793 unsigned long extent;
794 struct mmu_notifier_range range;
795 pmd_t *old_pmd, *new_pmd;
796 pud_t *old_pud, *new_pud;
797 struct mm_struct *mm = pmc->old->vm_mm;
798
799 if (!pmc->len_in)
800 return 0;
801
802 if (is_vm_hugetlb_page(pmc->old))
803 return move_hugetlb_page_tables(pmc->old, pmc->new, pmc->old_addr,
804 pmc->new_addr, pmc->len_in);
805
806
807
808
809
810 try_realign_addr(pmc, PMD_MASK);
811
812 flush_cache_range(pmc->old, pmc->old_addr, pmc->old_end);
813 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, mm,
814 pmc->old_addr, pmc->old_end);
815 mmu_notifier_invalidate_range_start(&range);
816
817 for (; !pmc_done(pmc); pmc_next(pmc, extent)) {
818 cond_resched();
819
820
821
822
823 extent = get_extent(NORMAL_PUD, pmc);
824
825 old_pud = get_old_pud(mm, pmc->old_addr);
826 if (!old_pud)
827 continue;
828 new_pud = alloc_new_pud(mm, pmc->new_addr);
829 if (!new_pud)
830 break;
831 if (pud_trans_huge(*old_pud)) {
832 if (extent == HPAGE_PUD_SIZE) {
833 move_pgt_entry(pmc, HPAGE_PUD, old_pud, new_pud);
834
835 continue;
836 }
837 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
838 if (move_pgt_entry(pmc, NORMAL_PUD, old_pud, new_pud))
839 continue;
840 }
841
842 extent = get_extent(NORMAL_PMD, pmc);
843 old_pmd = get_old_pmd(mm, pmc->old_addr);
844 if (!old_pmd)
845 continue;
846 new_pmd = alloc_new_pmd(mm, pmc->new_addr);
847 if (!new_pmd)
848 break;
849again:
850 if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd)) {
851 if (extent == HPAGE_PMD_SIZE &&
852 move_pgt_entry(pmc, HPAGE_PMD, old_pmd, new_pmd))
853 continue;
854 split_huge_pmd(pmc->old, old_pmd, pmc->old_addr);
855 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
856 extent == PMD_SIZE) {
857
858
859
860
861 if (move_pgt_entry(pmc, NORMAL_PMD, old_pmd, new_pmd))
862 continue;
863 }
864 if (pmd_none(*old_pmd))
865 continue;
866 if (pte_alloc(pmc->new->vm_mm, new_pmd))
867 break;
868 if (move_ptes(pmc, extent, old_pmd, new_pmd) < 0)
869 goto again;
870 }
871
872 mmu_notifier_invalidate_range_end(&range);
873
874 return pmc_progress(pmc);
875}
876
877
878static void vrm_set_delta(struct vma_remap_struct *vrm)
879{
880 vrm->delta = abs_diff(vrm->old_len, vrm->new_len);
881}
882
883
884static enum mremap_type vrm_remap_type(struct vma_remap_struct *vrm)
885{
886 if (vrm->delta == 0)
887 return MREMAP_NO_RESIZE;
888
889 if (vrm->old_len > vrm->new_len)
890 return MREMAP_SHRINK;
891
892 return MREMAP_EXPAND;
893}
894
895
896
897
898
899static bool vrm_overlaps(struct vma_remap_struct *vrm)
900{
901 unsigned long start_old = vrm->addr;
902 unsigned long start_new = vrm->new_addr;
903 unsigned long end_old = vrm->addr + vrm->old_len;
904 unsigned long end_new = vrm->new_addr + vrm->new_len;
905
906
907
908
909
910
911
912
913
914
915
916 if (end_old > start_new && end_new > start_old)
917 return true;
918
919 return false;
920}
921
922
923
924
925
926
927static bool vrm_implies_new_addr(struct vma_remap_struct *vrm)
928{
929 return vrm->flags & (MREMAP_FIXED | MREMAP_DONTUNMAP);
930}
931
932
933
934
935
936
937
938
939
940
941
942static unsigned long vrm_set_new_addr(struct vma_remap_struct *vrm)
943{
944 struct vm_area_struct *vma = vrm->vma;
945 unsigned long map_flags = 0;
946
947 pgoff_t internal_pgoff = (vrm->addr - vma->vm_start) >> PAGE_SHIFT;
948 pgoff_t pgoff = vma->vm_pgoff + internal_pgoff;
949 unsigned long new_addr = vrm_implies_new_addr(vrm) ? vrm->new_addr : 0;
950 unsigned long res;
951
952 if (vrm->flags & MREMAP_FIXED)
953 map_flags |= MAP_FIXED;
954 if (vma->vm_flags & VM_MAYSHARE)
955 map_flags |= MAP_SHARED;
956
957 res = get_unmapped_area(vma->vm_file, new_addr, vrm->new_len, pgoff,
958 map_flags);
959 if (IS_ERR_VALUE(res))
960 return res;
961
962 vrm->new_addr = res;
963 return 0;
964}
965
966
967
968
969
970
971
972static bool vrm_calc_charge(struct vma_remap_struct *vrm)
973{
974 unsigned long charged;
975
976 if (!(vrm->vma->vm_flags & VM_ACCOUNT))
977 return true;
978
979
980
981
982
983 if (vrm->flags & MREMAP_DONTUNMAP)
984 charged = vrm->new_len >> PAGE_SHIFT;
985 else
986 charged = vrm->delta >> PAGE_SHIFT;
987
988
989
990 if (security_vm_enough_memory_mm(current->mm, charged))
991 return false;
992
993 vrm->charged = charged;
994 return true;
995}
996
997
998
999
1000
1001static void vrm_uncharge(struct vma_remap_struct *vrm)
1002{
1003 if (!(vrm->vma->vm_flags & VM_ACCOUNT))
1004 return;
1005
1006 vm_unacct_memory(vrm->charged);
1007 vrm->charged = 0;
1008}
1009
1010
1011
1012
1013
1014
1015static void vrm_stat_account(struct vma_remap_struct *vrm,
1016 unsigned long bytes)
1017{
1018 unsigned long pages = bytes >> PAGE_SHIFT;
1019 struct mm_struct *mm = current->mm;
1020 struct vm_area_struct *vma = vrm->vma;
1021
1022 vm_stat_account(mm, vma->vm_flags, pages);
1023 if (vma->vm_flags & VM_LOCKED)
1024 mm->locked_vm += pages;
1025}
1026
1027
1028
1029
1030
1031static unsigned long prep_move_vma(struct vma_remap_struct *vrm)
1032{
1033 unsigned long err = 0;
1034 struct vm_area_struct *vma = vrm->vma;
1035 unsigned long old_addr = vrm->addr;
1036 unsigned long old_len = vrm->old_len;
1037 vm_flags_t dummy = vma->vm_flags;
1038
1039
1040
1041
1042
1043 if (current->mm->map_count >= sysctl_max_map_count - 3)
1044 return -ENOMEM;
1045
1046 if (vma->vm_ops && vma->vm_ops->may_split) {
1047 if (vma->vm_start != old_addr)
1048 err = vma->vm_ops->may_split(vma, old_addr);
1049 if (!err && vma->vm_end != old_addr + old_len)
1050 err = vma->vm_ops->may_split(vma, old_addr + old_len);
1051 if (err)
1052 return err;
1053 }
1054
1055
1056
1057
1058
1059
1060
1061
1062 err = ksm_madvise(vma, old_addr, old_addr + old_len,
1063 MADV_UNMERGEABLE, &dummy);
1064 if (err)
1065 return err;
1066
1067 return 0;
1068}
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078static void unmap_source_vma(struct vma_remap_struct *vrm)
1079{
1080 struct mm_struct *mm = current->mm;
1081 unsigned long addr = vrm->addr;
1082 unsigned long len = vrm->old_len;
1083 struct vm_area_struct *vma = vrm->vma;
1084 VMA_ITERATOR(vmi, mm, addr);
1085 int err;
1086 unsigned long vm_start;
1087 unsigned long vm_end;
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097 bool accountable_move = (vma->vm_flags & VM_ACCOUNT) &&
1098 !(vrm->flags & MREMAP_DONTUNMAP);
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115 if (accountable_move) {
1116 vm_flags_clear(vma, VM_ACCOUNT);
1117
1118 vm_start = vma->vm_start;
1119 vm_end = vma->vm_end;
1120 }
1121
1122 err = do_vmi_munmap(&vmi, mm, addr, len, vrm->uf_unmap, false);
1123 vrm->vma = NULL;
1124 vrm->vmi_needs_invalidate = true;
1125 if (err) {
1126
1127 vm_acct_memory(len >> PAGE_SHIFT);
1128 return;
1129 }
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156 if (accountable_move) {
1157 unsigned long end = addr + len;
1158
1159 if (vm_start < addr) {
1160 struct vm_area_struct *prev = vma_prev(&vmi);
1161
1162 vm_flags_set(prev, VM_ACCOUNT);
1163 }
1164
1165 if (vm_end > end) {
1166 struct vm_area_struct *next = vma_next(&vmi);
1167
1168 vm_flags_set(next, VM_ACCOUNT);
1169 }
1170 }
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181static int copy_vma_and_data(struct vma_remap_struct *vrm,
1182 struct vm_area_struct **new_vma_ptr)
1183{
1184 unsigned long internal_offset = vrm->addr - vrm->vma->vm_start;
1185 unsigned long internal_pgoff = internal_offset >> PAGE_SHIFT;
1186 unsigned long new_pgoff = vrm->vma->vm_pgoff + internal_pgoff;
1187 unsigned long moved_len;
1188 struct vm_area_struct *vma = vrm->vma;
1189 struct vm_area_struct *new_vma;
1190 int err = 0;
1191 PAGETABLE_MOVE(pmc, NULL, NULL, vrm->addr, vrm->new_addr, vrm->old_len);
1192
1193 new_vma = copy_vma(&vma, vrm->new_addr, vrm->new_len, new_pgoff,
1194 &pmc.need_rmap_locks);
1195 if (!new_vma) {
1196 vrm_uncharge(vrm);
1197 *new_vma_ptr = NULL;
1198 return -ENOMEM;
1199 }
1200
1201 if (vma != vrm->vma)
1202 vrm->vmi_needs_invalidate = true;
1203
1204 vrm->vma = vma;
1205 pmc.old = vma;
1206 pmc.new = new_vma;
1207
1208 moved_len = move_page_tables(&pmc);
1209 if (moved_len < vrm->old_len)
1210 err = -ENOMEM;
1211 else if (vma->vm_ops && vma->vm_ops->mremap)
1212 err = vma->vm_ops->mremap(new_vma);
1213
1214 if (unlikely(err)) {
1215 PAGETABLE_MOVE(pmc_revert, new_vma, vma, vrm->new_addr,
1216 vrm->addr, moved_len);
1217
1218
1219
1220
1221
1222
1223 pmc_revert.need_rmap_locks = true;
1224 move_page_tables(&pmc_revert);
1225
1226 vrm->vma = new_vma;
1227 vrm->old_len = vrm->new_len;
1228 vrm->addr = vrm->new_addr;
1229 } else {
1230 mremap_userfaultfd_prep(new_vma, vrm->uf);
1231 }
1232
1233 fixup_hugetlb_reservations(vma);
1234
1235 *new_vma_ptr = new_vma;
1236 return err;
1237}
1238
1239
1240
1241
1242
1243
1244
1245static void dontunmap_complete(struct vma_remap_struct *vrm,
1246 struct vm_area_struct *new_vma)
1247{
1248 unsigned long start = vrm->addr;
1249 unsigned long end = vrm->addr + vrm->old_len;
1250 unsigned long old_start = vrm->vma->vm_start;
1251 unsigned long old_end = vrm->vma->vm_end;
1252
1253
1254
1255
1256
1257 vm_flags_clear(vrm->vma, VM_LOCKED_MASK | VM_ACCOUNT);
1258
1259
1260
1261
1262
1263 if (new_vma != vrm->vma && start == old_start && end == old_end)
1264 unlink_anon_vmas(vrm->vma);
1265
1266
1267}
1268
1269static unsigned long move_vma(struct vma_remap_struct *vrm)
1270{
1271 struct mm_struct *mm = current->mm;
1272 struct vm_area_struct *new_vma;
1273 unsigned long hiwater_vm;
1274 int err;
1275
1276 err = prep_move_vma(vrm);
1277 if (err)
1278 return err;
1279
1280
1281
1282
1283
1284 if (!vrm_calc_charge(vrm))
1285 return -ENOMEM;
1286
1287
1288 vma_start_write(vrm->vma);
1289
1290
1291 err = copy_vma_and_data(vrm, &new_vma);
1292
1293
1294
1295
1296
1297 if (err && !new_vma)
1298 return err;
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309 hiwater_vm = mm->hiwater_vm;
1310
1311 vrm_stat_account(vrm, vrm->new_len);
1312 if (unlikely(!err && (vrm->flags & MREMAP_DONTUNMAP)))
1313 dontunmap_complete(vrm, new_vma);
1314 else
1315 unmap_source_vma(vrm);
1316
1317 mm->hiwater_vm = hiwater_vm;
1318
1319 return err ? (unsigned long)err : vrm->new_addr;
1320}
1321
1322
1323
1324
1325
1326
1327
1328
1329static unsigned long shrink_vma(struct vma_remap_struct *vrm,
1330 bool drop_lock)
1331{
1332 struct mm_struct *mm = current->mm;
1333 unsigned long unmap_start = vrm->addr + vrm->new_len;
1334 unsigned long unmap_bytes = vrm->delta;
1335 unsigned long res;
1336 VMA_ITERATOR(vmi, mm, unmap_start);
1337
1338 VM_BUG_ON(vrm->remap_type != MREMAP_SHRINK);
1339
1340 res = do_vmi_munmap(&vmi, mm, unmap_start, unmap_bytes,
1341 vrm->uf_unmap, drop_lock);
1342 vrm->vma = NULL;
1343 if (res)
1344 return res;
1345
1346
1347
1348
1349
1350
1351 if (drop_lock) {
1352 vrm->mmap_locked = false;
1353 } else {
1354 vrm->vma = vma_lookup(mm, vrm->addr);
1355 if (!vrm->vma)
1356 return -EFAULT;
1357 }
1358
1359 return 0;
1360}
1361
1362
1363
1364
1365
1366static unsigned long mremap_to(struct vma_remap_struct *vrm)
1367{
1368 struct mm_struct *mm = current->mm;
1369 unsigned long err;
1370
1371 if (vrm->flags & MREMAP_FIXED) {
1372
1373
1374
1375
1376
1377 err = do_munmap(mm, vrm->new_addr, vrm->new_len,
1378 vrm->uf_unmap_early);
1379 vrm->vma = NULL;
1380 vrm->vmi_needs_invalidate = true;
1381 if (err)
1382 return err;
1383
1384
1385
1386
1387
1388 vrm->vma = vma_lookup(mm, vrm->addr);
1389 if (!vrm->vma)
1390 return -EFAULT;
1391 }
1392
1393 if (vrm->remap_type == MREMAP_SHRINK) {
1394 err = shrink_vma(vrm, false);
1395 if (err)
1396 return err;
1397
1398
1399 vrm->old_len = vrm->new_len;
1400 }
1401
1402
1403 if (vrm->flags & MREMAP_DONTUNMAP) {
1404 vm_flags_t vm_flags = vrm->vma->vm_flags;
1405 unsigned long pages = vrm->old_len >> PAGE_SHIFT;
1406
1407 if (!may_expand_vm(mm, vm_flags, pages))
1408 return -ENOMEM;
1409 }
1410
1411 err = vrm_set_new_addr(vrm);
1412 if (err)
1413 return err;
1414
1415 return move_vma(vrm);
1416}
1417
1418static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
1419{
1420 unsigned long end = vma->vm_end + delta;
1421
1422 if (end < vma->vm_end)
1423 return 0;
1424 if (find_vma_intersection(vma->vm_mm, vma->vm_end, end))
1425 return 0;
1426 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
1427 0, MAP_FIXED) & ~PAGE_MASK)
1428 return 0;
1429 return 1;
1430}
1431
1432
1433static bool vrm_can_expand_in_place(struct vma_remap_struct *vrm)
1434{
1435
1436 unsigned long suffix_bytes = vrm->vma->vm_end - vrm->addr;
1437
1438
1439 if (suffix_bytes != vrm->old_len)
1440 return false;
1441
1442
1443 if (!vma_expandable(vrm->vma, vrm->delta))
1444 return false;
1445
1446 return true;
1447}
1448
1449
1450
1451
1452
1453
1454
1455static unsigned long expand_vma_in_place(struct vma_remap_struct *vrm)
1456{
1457 struct mm_struct *mm = current->mm;
1458 struct vm_area_struct *vma = vrm->vma;
1459 VMA_ITERATOR(vmi, mm, vma->vm_end);
1460
1461 if (!vrm_calc_charge(vrm))
1462 return -ENOMEM;
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473 vma = vma_merge_extend(&vmi, vma, vrm->delta);
1474 if (!vma) {
1475 vrm_uncharge(vrm);
1476 return -ENOMEM;
1477 }
1478 vrm->vma = vma;
1479
1480 vrm_stat_account(vrm, vrm->delta);
1481
1482 return 0;
1483}
1484
1485static bool align_hugetlb(struct vma_remap_struct *vrm)
1486{
1487 struct hstate *h __maybe_unused = hstate_vma(vrm->vma);
1488
1489 vrm->old_len = ALIGN(vrm->old_len, huge_page_size(h));
1490 vrm->new_len = ALIGN(vrm->new_len, huge_page_size(h));
1491
1492
1493 if (vrm->addr & ~huge_page_mask(h))
1494 return false;
1495 if (vrm->new_addr & ~huge_page_mask(h))
1496 return false;
1497
1498
1499
1500
1501
1502 if (vrm->new_len > vrm->old_len)
1503 return false;
1504
1505 return true;
1506}
1507
1508
1509
1510
1511
1512
1513
1514
1515static unsigned long expand_vma(struct vma_remap_struct *vrm)
1516{
1517 unsigned long err;
1518
1519
1520
1521
1522
1523 if (vrm_can_expand_in_place(vrm)) {
1524 err = expand_vma_in_place(vrm);
1525 if (err)
1526 return err;
1527
1528
1529 return vrm->addr;
1530 }
1531
1532
1533
1534
1535
1536
1537
1538 if (!(vrm->flags & MREMAP_MAYMOVE))
1539 return -ENOMEM;
1540
1541
1542 err = vrm_set_new_addr(vrm);
1543 if (err)
1544 return err;
1545
1546 return move_vma(vrm);
1547}
1548
1549
1550
1551
1552
1553static unsigned long mremap_at(struct vma_remap_struct *vrm)
1554{
1555 unsigned long res;
1556
1557 switch (vrm->remap_type) {
1558 case MREMAP_INVALID:
1559 break;
1560 case MREMAP_NO_RESIZE:
1561
1562 return vrm->addr;
1563 case MREMAP_SHRINK:
1564
1565
1566
1567
1568
1569
1570
1571 res = shrink_vma(vrm, true);
1572 if (res)
1573 return res;
1574
1575 return vrm->addr;
1576 case MREMAP_EXPAND:
1577 return expand_vma(vrm);
1578 }
1579
1580
1581 WARN_ON_ONCE(1);
1582 return -EINVAL;
1583}
1584
1585
1586
1587
1588
1589static bool vrm_will_map_new(struct vma_remap_struct *vrm)
1590{
1591 if (vrm->remap_type == MREMAP_EXPAND)
1592 return true;
1593
1594 if (vrm_implies_new_addr(vrm))
1595 return true;
1596
1597 return false;
1598}
1599
1600
1601static bool vrm_move_only(struct vma_remap_struct *vrm)
1602{
1603 if (!(vrm->flags & MREMAP_FIXED))
1604 return false;
1605
1606 if (vrm->old_len != vrm->new_len)
1607 return false;
1608
1609 return true;
1610}
1611
1612static void notify_uffd(struct vma_remap_struct *vrm, bool failed)
1613{
1614 struct mm_struct *mm = current->mm;
1615
1616
1617 userfaultfd_unmap_complete(mm, vrm->uf_unmap_early);
1618 if (failed)
1619 mremap_userfaultfd_fail(vrm->uf);
1620 else
1621 mremap_userfaultfd_complete(vrm->uf, vrm->addr,
1622 vrm->new_addr, vrm->old_len);
1623 userfaultfd_unmap_complete(mm, vrm->uf_unmap);
1624}
1625
1626static bool vma_multi_allowed(struct vm_area_struct *vma)
1627{
1628 struct file *file = vma->vm_file;
1629
1630
1631
1632
1633
1634 if (userfaultfd_armed(vma))
1635 return false;
1636
1637
1638
1639
1640
1641 if (!file || !file->f_op->get_unmapped_area)
1642 return true;
1643
1644 if (vma_is_shmem(vma))
1645 return true;
1646 if (is_vm_hugetlb_page(vma))
1647 return true;
1648 if (file->f_op->get_unmapped_area == thp_get_unmapped_area)
1649 return true;
1650
1651 return false;
1652}
1653
1654static int check_prep_vma(struct vma_remap_struct *vrm)
1655{
1656 struct vm_area_struct *vma = vrm->vma;
1657 struct mm_struct *mm = current->mm;
1658 unsigned long addr = vrm->addr;
1659 unsigned long old_len, new_len, pgoff;
1660
1661 if (!vma)
1662 return -EFAULT;
1663
1664
1665 if (vma_is_sealed(vma))
1666 return -EPERM;
1667
1668
1669 if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm))
1670 return -EINVAL;
1671
1672 vrm_set_delta(vrm);
1673 vrm->remap_type = vrm_remap_type(vrm);
1674
1675 if (!vrm_implies_new_addr(vrm))
1676 vrm->new_addr = addr;
1677
1678
1679 if (!vrm_will_map_new(vrm))
1680 return 0;
1681
1682 old_len = vrm->old_len;
1683 new_len = vrm->new_len;
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
1694 pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n",
1695 current->comm, current->pid);
1696 return -EINVAL;
1697 }
1698
1699 if ((vrm->flags & MREMAP_DONTUNMAP) &&
1700 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
1701 return -EINVAL;
1702
1703
1704
1705
1706
1707 if (vrm->remap_type == MREMAP_SHRINK)
1708 old_len = new_len;
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723 if (old_len > vma->vm_end - addr)
1724 return -EFAULT;
1725
1726 if (new_len == old_len)
1727 return 0;
1728
1729
1730 if (vma->vm_flags & VM_LOCKED)
1731 vrm->populate_expand = true;
1732
1733
1734 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
1735 pgoff += vma->vm_pgoff;
1736 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
1737 return -EINVAL;
1738
1739 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
1740 return -EFAULT;
1741
1742 if (!mlock_future_ok(mm, vma->vm_flags, vrm->delta))
1743 return -EAGAIN;
1744
1745 if (!may_expand_vm(mm, vma->vm_flags, vrm->delta >> PAGE_SHIFT))
1746 return -ENOMEM;
1747
1748 return 0;
1749}
1750
1751
1752
1753
1754
1755static unsigned long check_mremap_params(struct vma_remap_struct *vrm)
1756
1757{
1758 unsigned long addr = vrm->addr;
1759 unsigned long flags = vrm->flags;
1760
1761
1762 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
1763 return -EINVAL;
1764
1765
1766 if (offset_in_page(addr))
1767 return -EINVAL;
1768
1769
1770
1771
1772
1773
1774 if (!vrm->new_len)
1775 return -EINVAL;
1776
1777
1778 if (vrm->new_len > TASK_SIZE)
1779 return -EINVAL;
1780
1781
1782 if (!vrm_implies_new_addr(vrm))
1783 return 0;
1784
1785
1786 if (vrm->new_addr > TASK_SIZE - vrm->new_len)
1787 return -EINVAL;
1788
1789
1790 if (offset_in_page(vrm->new_addr))
1791 return -EINVAL;
1792
1793
1794 if (!(flags & MREMAP_MAYMOVE))
1795 return -EINVAL;
1796
1797
1798 if (flags & MREMAP_DONTUNMAP && vrm->old_len != vrm->new_len)
1799 return -EINVAL;
1800
1801
1802 if (vrm_overlaps(vrm))
1803 return -EINVAL;
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819 if ((current->mm->map_count + 2) >= sysctl_max_map_count - 3)
1820 return -ENOMEM;
1821
1822 return 0;
1823}
1824
1825static unsigned long remap_move(struct vma_remap_struct *vrm)
1826{
1827 struct vm_area_struct *vma;
1828 unsigned long start = vrm->addr;
1829 unsigned long end = vrm->addr + vrm->old_len;
1830 unsigned long new_addr = vrm->new_addr;
1831 unsigned long target_addr = new_addr;
1832 unsigned long res = -EFAULT;
1833 unsigned long last_end;
1834 bool seen_vma = false;
1835
1836 VMA_ITERATOR(vmi, current->mm, start);
1837
1838
1839
1840
1841
1842
1843 for_each_vma_range(vmi, vma, end) {
1844
1845 unsigned long addr = max(vma->vm_start, start);
1846 unsigned long len = min(end, vma->vm_end) - addr;
1847 unsigned long offset, res_vma;
1848 bool multi_allowed;
1849
1850
1851 if (!seen_vma && start < vma->vm_start)
1852 return -EFAULT;
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869 offset = seen_vma ? vma->vm_start - last_end : 0;
1870 last_end = vma->vm_end;
1871
1872 vrm->vma = vma;
1873 vrm->addr = addr;
1874 vrm->new_addr = target_addr + offset;
1875 vrm->old_len = vrm->new_len = len;
1876
1877 multi_allowed = vma_multi_allowed(vma);
1878 if (!multi_allowed) {
1879
1880 if (seen_vma)
1881 return -EFAULT;
1882
1883 if (vma->vm_end < end)
1884 return -EFAULT;
1885 }
1886
1887 res_vma = check_prep_vma(vrm);
1888 if (!res_vma)
1889 res_vma = mremap_to(vrm);
1890 if (IS_ERR_VALUE(res_vma))
1891 return res_vma;
1892
1893 if (!seen_vma) {
1894 VM_WARN_ON_ONCE(multi_allowed && res_vma != new_addr);
1895 res = res_vma;
1896 }
1897
1898
1899 VM_WARN_ON_ONCE(!vrm->mmap_locked);
1900
1901 VM_WARN_ON_ONCE(vrm->populate_expand);
1902
1903 if (vrm->vmi_needs_invalidate) {
1904 vma_iter_invalidate(&vmi);
1905 vrm->vmi_needs_invalidate = false;
1906 }
1907 seen_vma = true;
1908 target_addr = res_vma + vrm->new_len;
1909 }
1910
1911 return res;
1912}
1913
1914static unsigned long do_mremap(struct vma_remap_struct *vrm)
1915{
1916 struct mm_struct *mm = current->mm;
1917 unsigned long res;
1918 bool failed;
1919
1920 vrm->old_len = PAGE_ALIGN(vrm->old_len);
1921 vrm->new_len = PAGE_ALIGN(vrm->new_len);
1922
1923 res = check_mremap_params(vrm);
1924 if (res)
1925 return res;
1926
1927 if (mmap_write_lock_killable(mm))
1928 return -EINTR;
1929 vrm->mmap_locked = true;
1930
1931 if (vrm_move_only(vrm)) {
1932 res = remap_move(vrm);
1933 } else {
1934 vrm->vma = vma_lookup(current->mm, vrm->addr);
1935 res = check_prep_vma(vrm);
1936 if (res)
1937 goto out;
1938
1939
1940 res = vrm_implies_new_addr(vrm) ? mremap_to(vrm) : mremap_at(vrm);
1941 }
1942
1943out:
1944 failed = IS_ERR_VALUE(res);
1945
1946 if (vrm->mmap_locked)
1947 mmap_write_unlock(mm);
1948
1949
1950 if (!failed && vrm->populate_expand)
1951 mm_populate(vrm->new_addr + vrm->old_len, vrm->delta);
1952
1953 notify_uffd(vrm, failed);
1954 return res;
1955}
1956
1957
1958
1959
1960
1961
1962
1963
1964SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1965 unsigned long, new_len, unsigned long, flags,
1966 unsigned long, new_addr)
1967{
1968 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
1969 LIST_HEAD(uf_unmap_early);
1970 LIST_HEAD(uf_unmap);
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982 struct vma_remap_struct vrm = {
1983 .addr = untagged_addr(addr),
1984 .old_len = old_len,
1985 .new_len = new_len,
1986 .flags = flags,
1987 .new_addr = new_addr,
1988
1989 .uf = &uf,
1990 .uf_unmap_early = &uf_unmap_early,
1991 .uf_unmap = &uf_unmap,
1992
1993 .remap_type = MREMAP_INVALID,
1994 };
1995
1996 return do_mremap(&vrm);
1997}
1998