1
2
3
4
5
6
7
8
9
10
11#include <linux/mm.h>
12#include <linux/hugetlb.h>
13#include <linux/shm.h>
14#include <linux/ksm.h>
15#include <linux/mman.h>
16#include <linux/swap.h>
17#include <linux/capability.h>
18#include <linux/fs.h>
19#include <linux/swapops.h>
20#include <linux/highmem.h>
21#include <linux/security.h>
22#include <linux/syscalls.h>
23#include <linux/mmu_notifier.h>
24#include <linux/uaccess.h>
25#include <linux/mm-arch-hooks.h>
26#include <linux/userfaultfd_k.h>
27
28#include <asm/cacheflush.h>
29#include <asm/tlbflush.h>
30
31#include "internal.h"
32
33static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
34{
35 pgd_t *pgd;
36 p4d_t *p4d;
37 pud_t *pud;
38
39 pgd = pgd_offset(mm, addr);
40 if (pgd_none_or_clear_bad(pgd))
41 return NULL;
42
43 p4d = p4d_offset(pgd, addr);
44 if (p4d_none_or_clear_bad(p4d))
45 return NULL;
46
47 pud = pud_offset(p4d, addr);
48 if (pud_none_or_clear_bad(pud))
49 return NULL;
50
51 return pud;
52}
53
54static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
55{
56 pud_t *pud;
57 pmd_t *pmd;
58
59 pud = get_old_pud(mm, addr);
60 if (!pud)
61 return NULL;
62
63 pmd = pmd_offset(pud, addr);
64 if (pmd_none(*pmd))
65 return NULL;
66
67 return pmd;
68}
69
70static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
71 unsigned long addr)
72{
73 pgd_t *pgd;
74 p4d_t *p4d;
75
76 pgd = pgd_offset(mm, addr);
77 p4d = p4d_alloc(mm, pgd, addr);
78 if (!p4d)
79 return NULL;
80
81 return pud_alloc(mm, p4d, addr);
82}
83
84static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
85 unsigned long addr)
86{
87 pud_t *pud;
88 pmd_t *pmd;
89
90 pud = alloc_new_pud(mm, vma, addr);
91 if (!pud)
92 return NULL;
93
94 pmd = pmd_alloc(mm, pud, addr);
95 if (!pmd)
96 return NULL;
97
98 VM_BUG_ON(pmd_trans_huge(*pmd));
99
100 return pmd;
101}
102
103static void take_rmap_locks(struct vm_area_struct *vma)
104{
105 if (vma->vm_file)
106 i_mmap_lock_write(vma->vm_file->f_mapping);
107 if (vma->anon_vma)
108 anon_vma_lock_write(vma->anon_vma);
109}
110
111static void drop_rmap_locks(struct vm_area_struct *vma)
112{
113 if (vma->anon_vma)
114 anon_vma_unlock_write(vma->anon_vma);
115 if (vma->vm_file)
116 i_mmap_unlock_write(vma->vm_file->f_mapping);
117}
118
119static pte_t move_soft_dirty_pte(pte_t pte)
120{
121
122
123
124
125#ifdef CONFIG_MEM_SOFT_DIRTY
126 if (pte_present(pte))
127 pte = pte_mksoft_dirty(pte);
128 else if (is_swap_pte(pte))
129 pte = pte_swp_mksoft_dirty(pte);
130#endif
131 return pte;
132}
133
134static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
135 unsigned long old_addr, unsigned long old_end,
136 struct vm_area_struct *new_vma, pmd_t *new_pmd,
137 unsigned long new_addr, bool need_rmap_locks)
138{
139 struct mm_struct *mm = vma->vm_mm;
140 pte_t *old_pte, *new_pte, pte;
141 spinlock_t *old_ptl, *new_ptl;
142 bool force_flush = false;
143 unsigned long len = old_end - old_addr;
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163 if (need_rmap_locks)
164 take_rmap_locks(vma);
165
166
167
168
169
170 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
171 new_pte = pte_offset_map(new_pmd, new_addr);
172 new_ptl = pte_lockptr(mm, new_pmd);
173 if (new_ptl != old_ptl)
174 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
175 flush_tlb_batched_pending(vma->vm_mm);
176 arch_enter_lazy_mmu_mode();
177
178 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
179 new_pte++, new_addr += PAGE_SIZE) {
180 if (pte_none(*old_pte))
181 continue;
182
183 pte = ptep_get_and_clear(mm, old_addr, old_pte);
184
185
186
187
188
189
190
191
192
193
194
195 if (pte_present(pte))
196 force_flush = true;
197 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
198 pte = move_soft_dirty_pte(pte);
199 set_pte_at(mm, new_addr, new_pte, pte);
200 }
201
202 arch_leave_lazy_mmu_mode();
203 if (force_flush)
204 flush_tlb_range(vma, old_end - len, old_end);
205 if (new_ptl != old_ptl)
206 spin_unlock(new_ptl);
207 pte_unmap(new_pte - 1);
208 pte_unmap_unlock(old_pte - 1, old_ptl);
209 if (need_rmap_locks)
210 drop_rmap_locks(vma);
211}
212
213#ifdef CONFIG_HAVE_MOVE_PMD
214static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
215 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
216{
217 spinlock_t *old_ptl, *new_ptl;
218 struct mm_struct *mm = vma->vm_mm;
219 pmd_t pmd;
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244 if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
245 return false;
246
247
248
249
250
251 old_ptl = pmd_lock(vma->vm_mm, old_pmd);
252 new_ptl = pmd_lockptr(mm, new_pmd);
253 if (new_ptl != old_ptl)
254 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
255
256
257 pmd = *old_pmd;
258 pmd_clear(old_pmd);
259
260 VM_BUG_ON(!pmd_none(*new_pmd));
261
262
263 set_pmd_at(mm, new_addr, new_pmd, pmd);
264 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
265 if (new_ptl != old_ptl)
266 spin_unlock(new_ptl);
267 spin_unlock(old_ptl);
268
269 return true;
270}
271#else
272static inline bool move_normal_pmd(struct vm_area_struct *vma,
273 unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd,
274 pmd_t *new_pmd)
275{
276 return false;
277}
278#endif
279
280#ifdef CONFIG_HAVE_MOVE_PUD
281static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
282 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
283{
284 spinlock_t *old_ptl, *new_ptl;
285 struct mm_struct *mm = vma->vm_mm;
286 pud_t pud;
287
288
289
290
291
292 if (WARN_ON_ONCE(!pud_none(*new_pud)))
293 return false;
294
295
296
297
298
299 old_ptl = pud_lock(vma->vm_mm, old_pud);
300 new_ptl = pud_lockptr(mm, new_pud);
301 if (new_ptl != old_ptl)
302 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
303
304
305 pud = *old_pud;
306 pud_clear(old_pud);
307
308 VM_BUG_ON(!pud_none(*new_pud));
309
310
311 set_pud_at(mm, new_addr, new_pud, pud);
312 flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
313 if (new_ptl != old_ptl)
314 spin_unlock(new_ptl);
315 spin_unlock(old_ptl);
316
317 return true;
318}
319#else
320static inline bool move_normal_pud(struct vm_area_struct *vma,
321 unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
322 pud_t *new_pud)
323{
324 return false;
325}
326#endif
327
328enum pgt_entry {
329 NORMAL_PMD,
330 HPAGE_PMD,
331 NORMAL_PUD,
332};
333
334
335
336
337
338
339static __always_inline unsigned long get_extent(enum pgt_entry entry,
340 unsigned long old_addr, unsigned long old_end,
341 unsigned long new_addr)
342{
343 unsigned long next, extent, mask, size;
344
345 switch (entry) {
346 case HPAGE_PMD:
347 case NORMAL_PMD:
348 mask = PMD_MASK;
349 size = PMD_SIZE;
350 break;
351 case NORMAL_PUD:
352 mask = PUD_MASK;
353 size = PUD_SIZE;
354 break;
355 default:
356 BUILD_BUG();
357 break;
358 }
359
360 next = (old_addr + size) & mask;
361
362 extent = next - old_addr;
363 if (extent > old_end - old_addr)
364 extent = old_end - old_addr;
365 next = (new_addr + size) & mask;
366 if (extent > next - new_addr)
367 extent = next - new_addr;
368 return extent;
369}
370
371
372
373
374
375static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
376 unsigned long old_addr, unsigned long new_addr,
377 void *old_entry, void *new_entry, bool need_rmap_locks)
378{
379 bool moved = false;
380
381
382 if (need_rmap_locks)
383 take_rmap_locks(vma);
384
385 switch (entry) {
386 case NORMAL_PMD:
387 moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
388 new_entry);
389 break;
390 case NORMAL_PUD:
391 moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
392 new_entry);
393 break;
394 case HPAGE_PMD:
395 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
396 move_huge_pmd(vma, old_addr, new_addr, old_entry,
397 new_entry);
398 break;
399 default:
400 WARN_ON_ONCE(1);
401 break;
402 }
403
404 if (need_rmap_locks)
405 drop_rmap_locks(vma);
406
407 return moved;
408}
409
410unsigned long move_page_tables(struct vm_area_struct *vma,
411 unsigned long old_addr, struct vm_area_struct *new_vma,
412 unsigned long new_addr, unsigned long len,
413 bool need_rmap_locks)
414{
415 unsigned long extent, old_end;
416 struct mmu_notifier_range range;
417 pmd_t *old_pmd, *new_pmd;
418
419 old_end = old_addr + len;
420 flush_cache_range(vma, old_addr, old_end);
421
422 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
423 old_addr, old_end);
424 mmu_notifier_invalidate_range_start(&range);
425
426 for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
427 cond_resched();
428
429
430
431
432 extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
433 if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
434 pud_t *old_pud, *new_pud;
435
436 old_pud = get_old_pud(vma->vm_mm, old_addr);
437 if (!old_pud)
438 continue;
439 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
440 if (!new_pud)
441 break;
442 if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
443 old_pud, new_pud, need_rmap_locks))
444 continue;
445 }
446
447 extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr);
448 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
449 if (!old_pmd)
450 continue;
451 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
452 if (!new_pmd)
453 break;
454 if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
455 pmd_devmap(*old_pmd)) {
456 if (extent == HPAGE_PMD_SIZE &&
457 move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
458 old_pmd, new_pmd, need_rmap_locks))
459 continue;
460 split_huge_pmd(vma, old_pmd, old_addr);
461 if (pmd_trans_unstable(old_pmd))
462 continue;
463 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
464 extent == PMD_SIZE) {
465
466
467
468
469 if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
470 old_pmd, new_pmd, need_rmap_locks))
471 continue;
472 }
473
474 if (pte_alloc(new_vma->vm_mm, new_pmd))
475 break;
476 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
477 new_pmd, new_addr, need_rmap_locks);
478 }
479
480 mmu_notifier_invalidate_range_end(&range);
481
482 return len + old_addr - old_end;
483}
484
485static unsigned long move_vma(struct vm_area_struct *vma,
486 unsigned long old_addr, unsigned long old_len,
487 unsigned long new_len, unsigned long new_addr,
488 bool *locked, unsigned long flags,
489 struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
490{
491 struct mm_struct *mm = vma->vm_mm;
492 struct vm_area_struct *new_vma;
493 unsigned long vm_flags = vma->vm_flags;
494 unsigned long new_pgoff;
495 unsigned long moved_len;
496 unsigned long excess = 0;
497 unsigned long hiwater_vm;
498 int split = 0;
499 int err = 0;
500 bool need_rmap_locks;
501
502
503
504
505
506 if (mm->map_count >= sysctl_max_map_count - 3)
507 return -ENOMEM;
508
509 if (vma->vm_ops && vma->vm_ops->may_split) {
510 if (vma->vm_start != old_addr)
511 err = vma->vm_ops->may_split(vma, old_addr);
512 if (!err && vma->vm_end != old_addr + old_len)
513 err = vma->vm_ops->may_split(vma, old_addr + old_len);
514 if (err)
515 return err;
516 }
517
518
519
520
521
522
523
524
525 err = ksm_madvise(vma, old_addr, old_addr + old_len,
526 MADV_UNMERGEABLE, &vm_flags);
527 if (err)
528 return err;
529
530 if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT)) {
531 if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))
532 return -ENOMEM;
533 }
534
535 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
536 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
537 &need_rmap_locks);
538 if (!new_vma) {
539 if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT))
540 vm_unacct_memory(new_len >> PAGE_SHIFT);
541 return -ENOMEM;
542 }
543
544 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
545 need_rmap_locks);
546 if (moved_len < old_len) {
547 err = -ENOMEM;
548 } else if (vma->vm_ops && vma->vm_ops->mremap) {
549 err = vma->vm_ops->mremap(new_vma, flags);
550 }
551
552 if (unlikely(err)) {
553
554
555
556
557
558 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
559 true);
560 vma = new_vma;
561 old_len = new_len;
562 old_addr = new_addr;
563 new_addr = err;
564 } else {
565 mremap_userfaultfd_prep(new_vma, uf);
566 arch_remap(mm, old_addr, old_addr + old_len,
567 new_addr, new_addr + new_len);
568 }
569
570
571 if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
572 vma->vm_flags &= ~VM_ACCOUNT;
573 excess = vma->vm_end - vma->vm_start - old_len;
574 if (old_addr > vma->vm_start &&
575 old_addr + old_len < vma->vm_end)
576 split = 1;
577 }
578
579
580
581
582
583
584
585
586
587
588 hiwater_vm = mm->hiwater_vm;
589 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
590
591
592 if (unlikely(vma->vm_flags & VM_PFNMAP))
593 untrack_pfn_moved(vma);
594
595 if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
596
597 vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
598
599
600 return new_addr;
601 }
602
603 if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
604
605 if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
606 vm_acct_memory(new_len >> PAGE_SHIFT);
607 excess = 0;
608 }
609
610 if (vm_flags & VM_LOCKED) {
611 mm->locked_vm += new_len >> PAGE_SHIFT;
612 *locked = true;
613 }
614
615 mm->hiwater_vm = hiwater_vm;
616
617
618 if (excess) {
619 vma->vm_flags |= VM_ACCOUNT;
620 if (split)
621 vma->vm_next->vm_flags |= VM_ACCOUNT;
622 }
623
624 return new_addr;
625}
626
627static struct vm_area_struct *vma_to_resize(unsigned long addr,
628 unsigned long old_len, unsigned long new_len, unsigned long flags,
629 unsigned long *p)
630{
631 struct mm_struct *mm = current->mm;
632 struct vm_area_struct *vma = find_vma(mm, addr);
633 unsigned long pgoff;
634
635 if (!vma || vma->vm_start > addr)
636 return ERR_PTR(-EFAULT);
637
638
639
640
641
642
643
644
645
646 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
647 pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid);
648 return ERR_PTR(-EINVAL);
649 }
650
651 if (flags & MREMAP_DONTUNMAP && (!vma_is_anonymous(vma) ||
652 vma->vm_flags & VM_SHARED))
653 return ERR_PTR(-EINVAL);
654
655 if (is_vm_hugetlb_page(vma))
656 return ERR_PTR(-EINVAL);
657
658
659 if (old_len > vma->vm_end - addr)
660 return ERR_PTR(-EFAULT);
661
662 if (new_len == old_len)
663 return vma;
664
665
666 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
667 pgoff += vma->vm_pgoff;
668 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
669 return ERR_PTR(-EINVAL);
670
671 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
672 return ERR_PTR(-EFAULT);
673
674 if (vma->vm_flags & VM_LOCKED) {
675 unsigned long locked, lock_limit;
676 locked = mm->locked_vm << PAGE_SHIFT;
677 lock_limit = rlimit(RLIMIT_MEMLOCK);
678 locked += new_len - old_len;
679 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
680 return ERR_PTR(-EAGAIN);
681 }
682
683 if (!may_expand_vm(mm, vma->vm_flags,
684 (new_len - old_len) >> PAGE_SHIFT))
685 return ERR_PTR(-ENOMEM);
686
687 if (vma->vm_flags & VM_ACCOUNT) {
688 unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
689 if (security_vm_enough_memory_mm(mm, charged))
690 return ERR_PTR(-ENOMEM);
691 *p = charged;
692 }
693
694 return vma;
695}
696
697static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
698 unsigned long new_addr, unsigned long new_len, bool *locked,
699 unsigned long flags, struct vm_userfaultfd_ctx *uf,
700 struct list_head *uf_unmap_early,
701 struct list_head *uf_unmap)
702{
703 struct mm_struct *mm = current->mm;
704 struct vm_area_struct *vma;
705 unsigned long ret = -EINVAL;
706 unsigned long charged = 0;
707 unsigned long map_flags = 0;
708
709 if (offset_in_page(new_addr))
710 goto out;
711
712 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
713 goto out;
714
715
716 if (addr + old_len > new_addr && new_addr + new_len > addr)
717 goto out;
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733 if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
734 return -ENOMEM;
735
736 if (flags & MREMAP_FIXED) {
737 ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
738 if (ret)
739 goto out;
740 }
741
742 if (old_len >= new_len) {
743 ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
744 if (ret && old_len != new_len)
745 goto out;
746 old_len = new_len;
747 }
748
749 vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
750 if (IS_ERR(vma)) {
751 ret = PTR_ERR(vma);
752 goto out;
753 }
754
755
756 if (flags & MREMAP_DONTUNMAP &&
757 !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
758 ret = -ENOMEM;
759 goto out;
760 }
761
762 if (flags & MREMAP_FIXED)
763 map_flags |= MAP_FIXED;
764
765 if (vma->vm_flags & VM_MAYSHARE)
766 map_flags |= MAP_SHARED;
767
768 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
769 ((addr - vma->vm_start) >> PAGE_SHIFT),
770 map_flags);
771 if (IS_ERR_VALUE(ret))
772 goto out1;
773
774
775 if (!(flags & MREMAP_FIXED))
776 new_addr = ret;
777
778 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
779 uf_unmap);
780
781 if (!(offset_in_page(ret)))
782 goto out;
783
784out1:
785 vm_unacct_memory(charged);
786
787out:
788 return ret;
789}
790
791static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
792{
793 unsigned long end = vma->vm_end + delta;
794 if (end < vma->vm_end)
795 return 0;
796 if (vma->vm_next && vma->vm_next->vm_start < end)
797 return 0;
798 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
799 0, MAP_FIXED) & ~PAGE_MASK)
800 return 0;
801 return 1;
802}
803
804
805
806
807
808
809
810
811SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
812 unsigned long, new_len, unsigned long, flags,
813 unsigned long, new_addr)
814{
815 struct mm_struct *mm = current->mm;
816 struct vm_area_struct *vma;
817 unsigned long ret = -EINVAL;
818 unsigned long charged = 0;
819 bool locked = false;
820 bool downgraded = false;
821 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
822 LIST_HEAD(uf_unmap_early);
823 LIST_HEAD(uf_unmap);
824
825
826
827
828
829
830
831
832
833
834
835 addr = untagged_addr(addr);
836
837 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
838 return ret;
839
840 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
841 return ret;
842
843
844
845
846
847 if (flags & MREMAP_DONTUNMAP &&
848 (!(flags & MREMAP_MAYMOVE) || old_len != new_len))
849 return ret;
850
851
852 if (offset_in_page(addr))
853 return ret;
854
855 old_len = PAGE_ALIGN(old_len);
856 new_len = PAGE_ALIGN(new_len);
857
858
859
860
861
862
863 if (!new_len)
864 return ret;
865
866 if (mmap_write_lock_killable(current->mm))
867 return -EINTR;
868
869 if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
870 ret = mremap_to(addr, old_len, new_addr, new_len,
871 &locked, flags, &uf, &uf_unmap_early,
872 &uf_unmap);
873 goto out;
874 }
875
876
877
878
879
880
881
882 if (old_len >= new_len) {
883 int retval;
884
885 retval = __do_munmap(mm, addr+new_len, old_len - new_len,
886 &uf_unmap, true);
887 if (retval < 0 && old_len != new_len) {
888 ret = retval;
889 goto out;
890
891 } else if (retval == 1)
892 downgraded = true;
893 ret = addr;
894 goto out;
895 }
896
897
898
899
900 vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
901 if (IS_ERR(vma)) {
902 ret = PTR_ERR(vma);
903 goto out;
904 }
905
906
907
908 if (old_len == vma->vm_end - addr) {
909
910 if (vma_expandable(vma, new_len - old_len)) {
911 int pages = (new_len - old_len) >> PAGE_SHIFT;
912
913 if (vma_adjust(vma, vma->vm_start, addr + new_len,
914 vma->vm_pgoff, NULL)) {
915 ret = -ENOMEM;
916 goto out;
917 }
918
919 vm_stat_account(mm, vma->vm_flags, pages);
920 if (vma->vm_flags & VM_LOCKED) {
921 mm->locked_vm += pages;
922 locked = true;
923 new_addr = addr;
924 }
925 ret = addr;
926 goto out;
927 }
928 }
929
930
931
932
933
934 ret = -ENOMEM;
935 if (flags & MREMAP_MAYMOVE) {
936 unsigned long map_flags = 0;
937 if (vma->vm_flags & VM_MAYSHARE)
938 map_flags |= MAP_SHARED;
939
940 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
941 vma->vm_pgoff +
942 ((addr - vma->vm_start) >> PAGE_SHIFT),
943 map_flags);
944 if (IS_ERR_VALUE(new_addr)) {
945 ret = new_addr;
946 goto out;
947 }
948
949 ret = move_vma(vma, addr, old_len, new_len, new_addr,
950 &locked, flags, &uf, &uf_unmap);
951 }
952out:
953 if (offset_in_page(ret)) {
954 vm_unacct_memory(charged);
955 locked = false;
956 }
957 if (downgraded)
958 mmap_read_unlock(current->mm);
959 else
960 mmap_write_unlock(current->mm);
961 if (locked && new_len > old_len)
962 mm_populate(new_addr + old_len, new_len - old_len);
963 userfaultfd_unmap_complete(mm, &uf_unmap_early);
964 mremap_userfaultfd_complete(&uf, addr, ret, old_len);
965 userfaultfd_unmap_complete(mm, &uf_unmap);
966 return ret;
967}
968