1
2
3
4
5
6
7
8
9
10
11#include <linux/mm.h>
12#include <linux/hugetlb.h>
13#include <linux/shm.h>
14#include <linux/ksm.h>
15#include <linux/mman.h>
16#include <linux/swap.h>
17#include <linux/capability.h>
18#include <linux/fs.h>
19#include <linux/swapops.h>
20#include <linux/highmem.h>
21#include <linux/security.h>
22#include <linux/syscalls.h>
23#include <linux/mmu_notifier.h>
24#include <linux/uaccess.h>
25#include <linux/mm-arch-hooks.h>
26#include <linux/userfaultfd_k.h>
27
28#include <asm/cacheflush.h>
29#include <asm/tlbflush.h>
30
31#include "internal.h"
32
33static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
34{
35 pgd_t *pgd;
36 p4d_t *p4d;
37 pud_t *pud;
38 pmd_t *pmd;
39
40 pgd = pgd_offset(mm, addr);
41 if (pgd_none_or_clear_bad(pgd))
42 return NULL;
43
44 p4d = p4d_offset(pgd, addr);
45 if (p4d_none_or_clear_bad(p4d))
46 return NULL;
47
48 pud = pud_offset(p4d, addr);
49 if (pud_none_or_clear_bad(pud))
50 return NULL;
51
52 pmd = pmd_offset(pud, addr);
53 if (pmd_none(*pmd))
54 return NULL;
55
56 return pmd;
57}
58
59static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
60 unsigned long addr)
61{
62 pgd_t *pgd;
63 p4d_t *p4d;
64 pud_t *pud;
65 pmd_t *pmd;
66
67 pgd = pgd_offset(mm, addr);
68 p4d = p4d_alloc(mm, pgd, addr);
69 if (!p4d)
70 return NULL;
71 pud = pud_alloc(mm, p4d, addr);
72 if (!pud)
73 return NULL;
74
75 pmd = pmd_alloc(mm, pud, addr);
76 if (!pmd)
77 return NULL;
78
79 VM_BUG_ON(pmd_trans_huge(*pmd));
80
81 return pmd;
82}
83
84static void take_rmap_locks(struct vm_area_struct *vma)
85{
86 if (vma->vm_file)
87 i_mmap_lock_write(vma->vm_file->f_mapping);
88 if (vma->anon_vma)
89 anon_vma_lock_write(vma->anon_vma);
90}
91
92static void drop_rmap_locks(struct vm_area_struct *vma)
93{
94 if (vma->anon_vma)
95 anon_vma_unlock_write(vma->anon_vma);
96 if (vma->vm_file)
97 i_mmap_unlock_write(vma->vm_file->f_mapping);
98}
99
100static pte_t move_soft_dirty_pte(pte_t pte)
101{
102
103
104
105
106#ifdef CONFIG_MEM_SOFT_DIRTY
107 if (pte_present(pte))
108 pte = pte_mksoft_dirty(pte);
109 else if (is_swap_pte(pte))
110 pte = pte_swp_mksoft_dirty(pte);
111#endif
112 return pte;
113}
114
115static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
116 unsigned long old_addr, unsigned long old_end,
117 struct vm_area_struct *new_vma, pmd_t *new_pmd,
118 unsigned long new_addr, bool need_rmap_locks)
119{
120 struct mm_struct *mm = vma->vm_mm;
121 pte_t *old_pte, *new_pte, pte;
122 spinlock_t *old_ptl, *new_ptl;
123 bool force_flush = false;
124 unsigned long len = old_end - old_addr;
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144 if (need_rmap_locks)
145 take_rmap_locks(vma);
146
147
148
149
150
151 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
152 new_pte = pte_offset_map(new_pmd, new_addr);
153 new_ptl = pte_lockptr(mm, new_pmd);
154 if (new_ptl != old_ptl)
155 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
156 flush_tlb_batched_pending(vma->vm_mm);
157 arch_enter_lazy_mmu_mode();
158
159 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
160 new_pte++, new_addr += PAGE_SIZE) {
161 if (pte_none(*old_pte))
162 continue;
163
164 pte = ptep_get_and_clear(mm, old_addr, old_pte);
165
166
167
168
169
170
171
172
173
174
175
176 if (pte_present(pte))
177 force_flush = true;
178 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
179 pte = move_soft_dirty_pte(pte);
180 set_pte_at(mm, new_addr, new_pte, pte);
181 }
182
183 arch_leave_lazy_mmu_mode();
184 if (force_flush)
185 flush_tlb_range(vma, old_end - len, old_end);
186 if (new_ptl != old_ptl)
187 spin_unlock(new_ptl);
188 pte_unmap(new_pte - 1);
189 pte_unmap_unlock(old_pte - 1, old_ptl);
190 if (need_rmap_locks)
191 drop_rmap_locks(vma);
192}
193
194unsigned long move_page_tables(struct vm_area_struct *vma,
195 unsigned long old_addr, struct vm_area_struct *new_vma,
196 unsigned long new_addr, unsigned long len,
197 bool need_rmap_locks)
198{
199 unsigned long extent, next, old_end;
200 pmd_t *old_pmd, *new_pmd;
201 unsigned long mmun_start;
202 unsigned long mmun_end;
203
204 old_end = old_addr + len;
205 flush_cache_range(vma, old_addr, old_end);
206
207 mmun_start = old_addr;
208 mmun_end = old_end;
209 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
210
211 for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
212 cond_resched();
213 next = (old_addr + PMD_SIZE) & PMD_MASK;
214
215 extent = next - old_addr;
216 if (extent > old_end - old_addr)
217 extent = old_end - old_addr;
218 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
219 if (!old_pmd)
220 continue;
221 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
222 if (!new_pmd)
223 break;
224 if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) {
225 if (extent == HPAGE_PMD_SIZE) {
226 bool moved;
227
228 if (need_rmap_locks)
229 take_rmap_locks(vma);
230 moved = move_huge_pmd(vma, old_addr, new_addr,
231 old_end, old_pmd, new_pmd);
232 if (need_rmap_locks)
233 drop_rmap_locks(vma);
234 if (moved)
235 continue;
236 }
237 split_huge_pmd(vma, old_pmd, old_addr);
238 if (pmd_trans_unstable(old_pmd))
239 continue;
240 }
241 if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr))
242 break;
243 next = (new_addr + PMD_SIZE) & PMD_MASK;
244 if (extent > next - new_addr)
245 extent = next - new_addr;
246 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
247 new_pmd, new_addr, need_rmap_locks);
248 }
249
250 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
251
252 return len + old_addr - old_end;
253}
254
255static unsigned long move_vma(struct vm_area_struct *vma,
256 unsigned long old_addr, unsigned long old_len,
257 unsigned long new_len, unsigned long new_addr,
258 bool *locked, struct vm_userfaultfd_ctx *uf,
259 struct list_head *uf_unmap)
260{
261 struct mm_struct *mm = vma->vm_mm;
262 struct vm_area_struct *new_vma;
263 unsigned long vm_flags = vma->vm_flags;
264 unsigned long new_pgoff;
265 unsigned long moved_len;
266 unsigned long excess = 0;
267 unsigned long hiwater_vm;
268 int split = 0;
269 int err;
270 bool need_rmap_locks;
271
272
273
274
275
276 if (mm->map_count >= sysctl_max_map_count - 3)
277 return -ENOMEM;
278
279
280
281
282
283
284
285
286 err = ksm_madvise(vma, old_addr, old_addr + old_len,
287 MADV_UNMERGEABLE, &vm_flags);
288 if (err)
289 return err;
290
291 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
292 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
293 &need_rmap_locks);
294 if (!new_vma)
295 return -ENOMEM;
296
297 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
298 need_rmap_locks);
299 if (moved_len < old_len) {
300 err = -ENOMEM;
301 } else if (vma->vm_ops && vma->vm_ops->mremap) {
302 err = vma->vm_ops->mremap(new_vma);
303 }
304
305 if (unlikely(err)) {
306
307
308
309
310
311 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
312 true);
313 vma = new_vma;
314 old_len = new_len;
315 old_addr = new_addr;
316 new_addr = err;
317 } else {
318 mremap_userfaultfd_prep(new_vma, uf);
319 arch_remap(mm, old_addr, old_addr + old_len,
320 new_addr, new_addr + new_len);
321 }
322
323
324 if (vm_flags & VM_ACCOUNT) {
325 vma->vm_flags &= ~VM_ACCOUNT;
326 excess = vma->vm_end - vma->vm_start - old_len;
327 if (old_addr > vma->vm_start &&
328 old_addr + old_len < vma->vm_end)
329 split = 1;
330 }
331
332
333
334
335
336
337
338
339
340
341 hiwater_vm = mm->hiwater_vm;
342 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
343
344
345 if (unlikely(vma->vm_flags & VM_PFNMAP))
346 untrack_pfn_moved(vma);
347
348 if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
349
350 vm_unacct_memory(excess >> PAGE_SHIFT);
351 excess = 0;
352 }
353 mm->hiwater_vm = hiwater_vm;
354
355
356 if (excess) {
357 vma->vm_flags |= VM_ACCOUNT;
358 if (split)
359 vma->vm_next->vm_flags |= VM_ACCOUNT;
360 }
361
362 if (vm_flags & VM_LOCKED) {
363 mm->locked_vm += new_len >> PAGE_SHIFT;
364 *locked = true;
365 }
366
367 return new_addr;
368}
369
370static struct vm_area_struct *vma_to_resize(unsigned long addr,
371 unsigned long old_len, unsigned long new_len, unsigned long *p)
372{
373 struct mm_struct *mm = current->mm;
374 struct vm_area_struct *vma = find_vma(mm, addr);
375 unsigned long pgoff;
376
377 if (!vma || vma->vm_start > addr)
378 return ERR_PTR(-EFAULT);
379
380
381
382
383
384
385
386
387
388 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
389 pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid);
390 return ERR_PTR(-EINVAL);
391 }
392
393 if (is_vm_hugetlb_page(vma))
394 return ERR_PTR(-EINVAL);
395
396
397 if (old_len > vma->vm_end - addr)
398 return ERR_PTR(-EFAULT);
399
400 if (new_len == old_len)
401 return vma;
402
403
404 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
405 pgoff += vma->vm_pgoff;
406 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
407 return ERR_PTR(-EINVAL);
408
409 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
410 return ERR_PTR(-EFAULT);
411
412 if (vma->vm_flags & VM_LOCKED) {
413 unsigned long locked, lock_limit;
414 locked = mm->locked_vm << PAGE_SHIFT;
415 lock_limit = rlimit(RLIMIT_MEMLOCK);
416 locked += new_len - old_len;
417 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
418 return ERR_PTR(-EAGAIN);
419 }
420
421 if (!may_expand_vm(mm, vma->vm_flags,
422 (new_len - old_len) >> PAGE_SHIFT))
423 return ERR_PTR(-ENOMEM);
424
425 if (vma->vm_flags & VM_ACCOUNT) {
426 unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
427 if (security_vm_enough_memory_mm(mm, charged))
428 return ERR_PTR(-ENOMEM);
429 *p = charged;
430 }
431
432 return vma;
433}
434
435static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
436 unsigned long new_addr, unsigned long new_len, bool *locked,
437 struct vm_userfaultfd_ctx *uf,
438 struct list_head *uf_unmap_early,
439 struct list_head *uf_unmap)
440{
441 struct mm_struct *mm = current->mm;
442 struct vm_area_struct *vma;
443 unsigned long ret = -EINVAL;
444 unsigned long charged = 0;
445 unsigned long map_flags;
446
447 if (offset_in_page(new_addr))
448 goto out;
449
450 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
451 goto out;
452
453
454 if (addr + old_len > new_addr && new_addr + new_len > addr)
455 goto out;
456
457 ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
458 if (ret)
459 goto out;
460
461 if (old_len >= new_len) {
462 ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
463 if (ret && old_len != new_len)
464 goto out;
465 old_len = new_len;
466 }
467
468 vma = vma_to_resize(addr, old_len, new_len, &charged);
469 if (IS_ERR(vma)) {
470 ret = PTR_ERR(vma);
471 goto out;
472 }
473
474 map_flags = MAP_FIXED;
475 if (vma->vm_flags & VM_MAYSHARE)
476 map_flags |= MAP_SHARED;
477
478 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
479 ((addr - vma->vm_start) >> PAGE_SHIFT),
480 map_flags);
481 if (offset_in_page(ret))
482 goto out1;
483
484 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, uf,
485 uf_unmap);
486 if (!(offset_in_page(ret)))
487 goto out;
488out1:
489 vm_unacct_memory(charged);
490
491out:
492 return ret;
493}
494
495static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
496{
497 unsigned long end = vma->vm_end + delta;
498 if (end < vma->vm_end)
499 return 0;
500 if (vma->vm_next && vma->vm_next->vm_start < end)
501 return 0;
502 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
503 0, MAP_FIXED) & ~PAGE_MASK)
504 return 0;
505 return 1;
506}
507
508
509
510
511
512
513
514
515SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
516 unsigned long, new_len, unsigned long, flags,
517 unsigned long, new_addr)
518{
519 struct mm_struct *mm = current->mm;
520 struct vm_area_struct *vma;
521 unsigned long ret = -EINVAL;
522 unsigned long charged = 0;
523 bool locked = false;
524 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
525 LIST_HEAD(uf_unmap_early);
526 LIST_HEAD(uf_unmap);
527
528 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
529 return ret;
530
531 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
532 return ret;
533
534 if (offset_in_page(addr))
535 return ret;
536
537 old_len = PAGE_ALIGN(old_len);
538 new_len = PAGE_ALIGN(new_len);
539
540
541
542
543
544
545 if (!new_len)
546 return ret;
547
548 if (down_write_killable(¤t->mm->mmap_sem))
549 return -EINTR;
550
551 if (flags & MREMAP_FIXED) {
552 ret = mremap_to(addr, old_len, new_addr, new_len,
553 &locked, &uf, &uf_unmap_early, &uf_unmap);
554 goto out;
555 }
556
557
558
559
560
561
562 if (old_len >= new_len) {
563 ret = do_munmap(mm, addr+new_len, old_len - new_len, &uf_unmap);
564 if (ret && old_len != new_len)
565 goto out;
566 ret = addr;
567 goto out;
568 }
569
570
571
572
573 vma = vma_to_resize(addr, old_len, new_len, &charged);
574 if (IS_ERR(vma)) {
575 ret = PTR_ERR(vma);
576 goto out;
577 }
578
579
580
581 if (old_len == vma->vm_end - addr) {
582
583 if (vma_expandable(vma, new_len - old_len)) {
584 int pages = (new_len - old_len) >> PAGE_SHIFT;
585
586 if (vma_adjust(vma, vma->vm_start, addr + new_len,
587 vma->vm_pgoff, NULL)) {
588 ret = -ENOMEM;
589 goto out;
590 }
591
592 vm_stat_account(mm, vma->vm_flags, pages);
593 if (vma->vm_flags & VM_LOCKED) {
594 mm->locked_vm += pages;
595 locked = true;
596 new_addr = addr;
597 }
598 ret = addr;
599 goto out;
600 }
601 }
602
603
604
605
606
607 ret = -ENOMEM;
608 if (flags & MREMAP_MAYMOVE) {
609 unsigned long map_flags = 0;
610 if (vma->vm_flags & VM_MAYSHARE)
611 map_flags |= MAP_SHARED;
612
613 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
614 vma->vm_pgoff +
615 ((addr - vma->vm_start) >> PAGE_SHIFT),
616 map_flags);
617 if (offset_in_page(new_addr)) {
618 ret = new_addr;
619 goto out;
620 }
621
622 ret = move_vma(vma, addr, old_len, new_len, new_addr,
623 &locked, &uf, &uf_unmap);
624 }
625out:
626 if (offset_in_page(ret)) {
627 vm_unacct_memory(charged);
628 locked = 0;
629 }
630 up_write(¤t->mm->mmap_sem);
631 if (locked && new_len > old_len)
632 mm_populate(new_addr + old_len, new_len - old_len);
633 userfaultfd_unmap_complete(mm, &uf_unmap_early);
634 mremap_userfaultfd_complete(&uf, addr, new_addr, old_len);
635 userfaultfd_unmap_complete(mm, &uf_unmap);
636 return ret;
637}
638