1
2
3
4
5
6
7
8
9
10#include <linux/mm.h>
11#include <linux/hugetlb.h>
12#include <linux/shm.h>
13#include <linux/ksm.h>
14#include <linux/mman.h>
15#include <linux/swap.h>
16#include <linux/capability.h>
17#include <linux/fs.h>
18#include <linux/swapops.h>
19#include <linux/highmem.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/mmu_notifier.h>
23#include <linux/sched/sysctl.h>
24#include <linux/uaccess.h>
25#include <linux/mm-arch-hooks.h>
26#include <linux/userfaultfd_k.h>
27
28#include <asm/uaccess.h>
29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h>
31
32#include "internal.h"
33
34static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
35{
36 pgd_t *pgd;
37 pud_t *pud;
38 pmd_t *pmd;
39
40 pgd = pgd_offset(mm, addr);
41 if (pgd_none_or_clear_bad(pgd))
42 return NULL;
43
44 pud = pud_offset(pgd, addr);
45 if (pud_none_or_clear_bad(pud))
46 return NULL;
47
48 pmd = pmd_offset(pud, addr);
49 if (pmd_none(*pmd))
50 return NULL;
51
52 return pmd;
53}
54
55static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
56 unsigned long addr)
57{
58 pgd_t *pgd;
59 pud_t *pud;
60 pmd_t *pmd;
61
62 pgd = pgd_offset(mm, addr);
63 pud = pud_alloc(mm, pgd, addr);
64 if (!pud)
65 return NULL;
66
67 pmd = pmd_alloc(mm, pud, addr);
68 if (!pmd)
69 return NULL;
70
71 VM_BUG_ON(pmd_trans_huge(*pmd));
72
73 return pmd;
74}
75
76static pte_t move_soft_dirty_pte(pte_t pte)
77{
78
79
80
81
82#ifdef CONFIG_MEM_SOFT_DIRTY
83 if (pte_present(pte))
84 pte = pte_mksoft_dirty(pte);
85 else if (is_swap_pte(pte))
86 pte = pte_swp_mksoft_dirty(pte);
87 else if (pte_file(pte))
88 pte = pte_file_mksoft_dirty(pte);
89#endif
90 return pte;
91}
92
93static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
94 unsigned long old_addr, unsigned long old_end,
95 struct vm_area_struct *new_vma, pmd_t *new_pmd,
96 unsigned long new_addr, bool need_rmap_locks)
97{
98 struct address_space *mapping = NULL;
99 struct anon_vma *anon_vma = NULL;
100 struct mm_struct *mm = vma->vm_mm;
101 pte_t *old_pte, *new_pte, pte;
102 spinlock_t *old_ptl, *new_ptl;
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122 if (need_rmap_locks) {
123 if (vma->vm_file) {
124 mapping = vma->vm_file->f_mapping;
125 mutex_lock(&mapping->i_mmap_mutex);
126 }
127 if (vma->anon_vma) {
128 anon_vma = vma->anon_vma;
129 anon_vma_lock_write(anon_vma);
130 }
131 }
132
133
134
135
136
137 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
138 new_pte = pte_offset_map(new_pmd, new_addr);
139 new_ptl = pte_lockptr(mm, new_pmd);
140 if (new_ptl != old_ptl)
141 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
142 arch_enter_lazy_mmu_mode();
143
144 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
145 new_pte++, new_addr += PAGE_SIZE) {
146 if (pte_none(*old_pte))
147 continue;
148 pte = ptep_get_and_clear(mm, old_addr, old_pte);
149 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
150 pte = move_soft_dirty_pte(pte);
151 set_pte_at(mm, new_addr, new_pte, pte);
152 }
153
154 arch_leave_lazy_mmu_mode();
155 if (new_ptl != old_ptl)
156 spin_unlock(new_ptl);
157 pte_unmap(new_pte - 1);
158 pte_unmap_unlock(old_pte - 1, old_ptl);
159 if (anon_vma)
160 anon_vma_unlock_write(anon_vma);
161 if (mapping)
162 mutex_unlock(&mapping->i_mmap_mutex);
163}
164
165#define LATENCY_LIMIT (64 * PAGE_SIZE)
166
167unsigned long move_page_tables(struct vm_area_struct *vma,
168 unsigned long old_addr, struct vm_area_struct *new_vma,
169 unsigned long new_addr, unsigned long len,
170 bool need_rmap_locks)
171{
172 unsigned long extent, next, old_end;
173 pmd_t *old_pmd, *new_pmd;
174 bool need_flush = false;
175 unsigned long mmun_start;
176 unsigned long mmun_end;
177
178 old_end = old_addr + len;
179 flush_cache_range(vma, old_addr, old_end);
180
181 mmun_start = old_addr;
182 mmun_end = old_end;
183 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
184
185 for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
186 cond_resched();
187 next = (old_addr + PMD_SIZE) & PMD_MASK;
188
189 extent = next - old_addr;
190 if (extent > old_end - old_addr)
191 extent = old_end - old_addr;
192 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
193 if (!old_pmd)
194 continue;
195 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
196 if (!new_pmd)
197 break;
198 if (pmd_trans_huge(*old_pmd)) {
199 int err = 0;
200 if (extent == HPAGE_PMD_SIZE) {
201 VM_BUG_ON(vma->vm_file || !vma->anon_vma);
202
203 if (need_rmap_locks)
204 anon_vma_lock_write(vma->anon_vma);
205 err = move_huge_pmd(vma, new_vma, old_addr,
206 new_addr, old_end,
207 old_pmd, new_pmd);
208 if (need_rmap_locks)
209 anon_vma_unlock_write(vma->anon_vma);
210 }
211 if (err > 0) {
212 need_flush = true;
213 continue;
214 } else if (!err) {
215 split_huge_page_pmd(vma, old_addr, old_pmd);
216 }
217 VM_BUG_ON(pmd_trans_huge(*old_pmd));
218 }
219 if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
220 new_pmd, new_addr))
221 break;
222 next = (new_addr + PMD_SIZE) & PMD_MASK;
223 if (extent > next - new_addr)
224 extent = next - new_addr;
225 if (extent > LATENCY_LIMIT)
226 extent = LATENCY_LIMIT;
227 move_ptes(vma, old_pmd, old_addr, old_addr + extent,
228 new_vma, new_pmd, new_addr, need_rmap_locks);
229 need_flush = true;
230 }
231 if (likely(need_flush))
232 flush_tlb_range(vma, old_end-len, old_addr);
233
234 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
235
236 return len + old_addr - old_end;
237}
238
239static unsigned long move_vma(struct vm_area_struct *vma,
240 unsigned long old_addr, unsigned long old_len,
241 unsigned long new_len, unsigned long new_addr,
242 bool *locked, struct vm_userfaultfd_ctx *uf,
243 struct list_head *uf_unmap)
244{
245 struct mm_struct *mm = vma->vm_mm;
246 struct vm_area_struct *new_vma;
247 unsigned long vm_flags = vma->vm_flags;
248 unsigned long new_pgoff;
249 unsigned long moved_len;
250 unsigned long excess = 0;
251 unsigned long hiwater_vm;
252 int split = 0;
253 int err;
254 bool need_rmap_locks;
255
256
257
258
259
260 if (mm->map_count >= sysctl_max_map_count - 3)
261 return -ENOMEM;
262
263
264
265
266
267
268
269
270 err = ksm_madvise(vma, old_addr, old_addr + old_len,
271 MADV_UNMERGEABLE, &vm_flags);
272 if (err)
273 return err;
274
275 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
276 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
277 &need_rmap_locks);
278 if (!new_vma)
279 return -ENOMEM;
280
281 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
282 need_rmap_locks);
283 if (moved_len < old_len) {
284
285
286
287
288
289 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
290 true);
291 vma = new_vma;
292 old_len = new_len;
293 old_addr = new_addr;
294 new_addr = -ENOMEM;
295 } else {
296 mremap_userfaultfd_prep(new_vma, uf);
297 if (vm_flags & VM_FOP_EXTEND) {
298 struct file_operations_extend *fop = to_fop_extend(vma->vm_file->f_op);
299 if (fop->mremap)
300 fop->mremap(vma->vm_file, new_vma);
301
302 }
303 arch_remap(mm, old_addr, old_addr + old_len,
304 new_addr, new_addr + new_len);
305 }
306
307
308 if (vm_flags & VM_ACCOUNT) {
309 vma->vm_flags &= ~VM_ACCOUNT;
310 excess = vma->vm_end - vma->vm_start - old_len;
311 if (old_addr > vma->vm_start &&
312 old_addr + old_len < vma->vm_end)
313 split = 1;
314 }
315
316
317
318
319
320
321
322
323
324
325 hiwater_vm = mm->hiwater_vm;
326 vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
327
328
329 if (unlikely(vma->vm_flags & VM_PFNMAP))
330 untrack_pfn_moved(vma);
331
332 if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
333
334 vm_unacct_memory(excess >> PAGE_SHIFT);
335 excess = 0;
336 }
337 mm->hiwater_vm = hiwater_vm;
338
339
340 if (excess) {
341 vma->vm_flags |= VM_ACCOUNT;
342 if (split)
343 vma->vm_next->vm_flags |= VM_ACCOUNT;
344 }
345
346 if (vm_flags & VM_LOCKED) {
347 mm->locked_vm += new_len >> PAGE_SHIFT;
348 *locked = true;
349 }
350
351 return new_addr;
352}
353
354static struct vm_area_struct *vma_to_resize(unsigned long addr,
355 unsigned long old_len, unsigned long new_len, unsigned long *p)
356{
357 struct mm_struct *mm = current->mm;
358 struct vm_area_struct *vma = find_vma(mm, addr);
359
360 if (!vma || vma->vm_start > addr)
361 goto Efault;
362
363 if (is_vm_hugetlb_page(vma))
364 goto Einval;
365
366
367 if (old_len > vma->vm_end - addr)
368 goto Efault;
369
370
371 if (new_len > old_len) {
372 unsigned long pgoff;
373
374 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
375 goto Efault;
376 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
377 pgoff += vma->vm_pgoff;
378 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
379 goto Einval;
380 }
381
382 if (vma->vm_flags & VM_LOCKED) {
383 unsigned long locked, lock_limit;
384 locked = mm->locked_vm << PAGE_SHIFT;
385 lock_limit = rlimit(RLIMIT_MEMLOCK);
386 locked += new_len - old_len;
387 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
388 goto Eagain;
389 }
390
391 if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
392 goto Enomem;
393
394 if (vma->vm_flags & VM_ACCOUNT) {
395 unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
396 if (security_vm_enough_memory_mm(mm, charged))
397 goto Efault;
398 *p = charged;
399 }
400
401 return vma;
402
403Efault:
404 return ERR_PTR(-EFAULT);
405Einval:
406 return ERR_PTR(-EINVAL);
407Enomem:
408 return ERR_PTR(-ENOMEM);
409Eagain:
410 return ERR_PTR(-EAGAIN);
411}
412
413static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
414 unsigned long new_addr, unsigned long new_len, bool *locked,
415 struct vm_userfaultfd_ctx *uf,
416 struct list_head *uf_unmap)
417{
418 struct mm_struct *mm = current->mm;
419 struct vm_area_struct *vma;
420 unsigned long ret = -EINVAL;
421 unsigned long charged = 0;
422 unsigned long map_flags;
423
424 if (new_addr & ~PAGE_MASK)
425 goto out;
426
427 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
428 goto out;
429
430
431
432
433 if ((new_addr <= addr) && (new_addr+new_len) > addr)
434 goto out;
435
436 if ((addr <= new_addr) && (addr+old_len) > new_addr)
437 goto out;
438
439 ret = do_munmap(mm, new_addr, new_len, NULL);
440 if (ret)
441 goto out;
442
443 if (old_len >= new_len) {
444 ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
445 if (ret && old_len != new_len)
446 goto out;
447 old_len = new_len;
448 }
449
450 vma = vma_to_resize(addr, old_len, new_len, &charged);
451 if (IS_ERR(vma)) {
452 ret = PTR_ERR(vma);
453 goto out;
454 }
455
456 map_flags = MAP_FIXED;
457 if (vma->vm_flags & VM_MAYSHARE)
458 map_flags |= MAP_SHARED;
459
460 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
461 ((addr - vma->vm_start) >> PAGE_SHIFT),
462 map_flags);
463 if (ret & ~PAGE_MASK)
464 goto out1;
465
466 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, uf,
467 uf_unmap);
468 if (!(ret & ~PAGE_MASK))
469 goto out;
470out1:
471 vm_unacct_memory(charged);
472
473out:
474 return ret;
475}
476
477static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
478{
479 unsigned long end = vma->vm_end + delta;
480 if (end < vma->vm_end)
481 return 0;
482 if (vma->vm_next && vma->vm_next->vm_start < end)
483 return 0;
484 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
485 0, MAP_FIXED) & ~PAGE_MASK)
486 return 0;
487 return 1;
488}
489
490
491
492
493
494
495
496
497SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
498 unsigned long, new_len, unsigned long, flags,
499 unsigned long, new_addr)
500{
501 struct mm_struct *mm = current->mm;
502 struct vm_area_struct *vma;
503 unsigned long ret = -EINVAL;
504 unsigned long charged = 0;
505 bool locked = false;
506 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
507 LIST_HEAD(uf_unmap);
508
509 down_write(¤t->mm->mmap_sem);
510
511 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
512 goto out;
513
514 if (addr & ~PAGE_MASK)
515 goto out;
516
517 old_len = PAGE_ALIGN(old_len);
518 new_len = PAGE_ALIGN(new_len);
519
520
521
522
523
524
525 if (!new_len)
526 goto out;
527
528 if (flags & MREMAP_FIXED) {
529 if (flags & MREMAP_MAYMOVE)
530 ret = mremap_to(addr, old_len, new_addr, new_len,
531 &locked, &uf, &uf_unmap);
532 goto out;
533 }
534
535
536
537
538
539
540 if (old_len >= new_len) {
541 ret = do_munmap(mm, addr+new_len, old_len - new_len, &uf_unmap);
542 if (ret && old_len != new_len)
543 goto out;
544 ret = addr;
545 goto out;
546 }
547
548
549
550
551 vma = vma_to_resize(addr, old_len, new_len, &charged);
552 if (IS_ERR(vma)) {
553 ret = PTR_ERR(vma);
554 goto out;
555 }
556
557
558
559 if (old_len == vma->vm_end - addr) {
560
561 if (vma_expandable(vma, new_len - old_len)) {
562 int pages = (new_len - old_len) >> PAGE_SHIFT;
563
564 if (vma_adjust(vma, vma->vm_start, addr + new_len,
565 vma->vm_pgoff, NULL)) {
566 ret = -ENOMEM;
567 goto out;
568 }
569
570 vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
571 if (vma->vm_flags & VM_LOCKED) {
572 mm->locked_vm += pages;
573 locked = true;
574 new_addr = addr;
575 }
576 ret = addr;
577 goto out;
578 }
579 }
580
581
582
583
584
585 ret = -ENOMEM;
586 if (flags & MREMAP_MAYMOVE) {
587 unsigned long map_flags = 0;
588 if (vma->vm_flags & VM_MAYSHARE)
589 map_flags |= MAP_SHARED;
590
591 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
592 vma->vm_pgoff +
593 ((addr - vma->vm_start) >> PAGE_SHIFT),
594 map_flags);
595 if (new_addr & ~PAGE_MASK) {
596 ret = new_addr;
597 goto out;
598 }
599
600 ret = move_vma(vma, addr, old_len, new_len, new_addr,
601 &locked, &uf, &uf_unmap);
602 }
603out:
604 if (ret & ~PAGE_MASK)
605 vm_unacct_memory(charged);
606 up_write(¤t->mm->mmap_sem);
607 if (locked && new_len > old_len)
608 mm_populate(new_addr + old_len, new_len - old_len);
609 mremap_userfaultfd_complete(&uf, addr, new_addr, old_len);
610 userfaultfd_unmap_complete(mm, &uf_unmap);
611 return ret;
612}
613