1
2
3
4
5
6
7
8#include <linux/mman.h>
9#include <linux/pagemap.h>
10#include <linux/syscalls.h>
11#include <linux/mempolicy.h>
12#include <linux/page-isolation.h>
13#include <linux/hugetlb.h>
14#include <linux/falloc.h>
15#include <linux/sched.h>
16#include <linux/ksm.h>
17#include <linux/fs.h>
18#include <linux/file.h>
19#include <linux/blkdev.h>
20#include <linux/backing-dev.h>
21#include <linux/swap.h>
22#include <linux/swapops.h>
23#include <linux/mmu_notifier.h>
24
25#include <asm/tlb.h>
26
27
28
29
30
31
32static int madvise_need_mmap_write(int behavior)
33{
34 switch (behavior) {
35 case MADV_REMOVE:
36 case MADV_WILLNEED:
37 case MADV_DONTNEED:
38 case MADV_FREE:
39 return 0;
40 default:
41
42 return 1;
43 }
44}
45
46
47
48
49
50static long madvise_behavior(struct vm_area_struct *vma,
51 struct vm_area_struct **prev,
52 unsigned long start, unsigned long end, int behavior)
53{
54 struct mm_struct *mm = vma->vm_mm;
55 int error = 0;
56 pgoff_t pgoff;
57 unsigned long new_flags = vma->vm_flags;
58
59 switch (behavior) {
60 case MADV_NORMAL:
61 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
62 break;
63 case MADV_SEQUENTIAL:
64 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
65 break;
66 case MADV_RANDOM:
67 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
68 break;
69 case MADV_DONTFORK:
70 new_flags |= VM_DONTCOPY;
71 break;
72 case MADV_DOFORK:
73 if (vma->vm_flags & VM_IO) {
74 error = -EINVAL;
75 goto out;
76 }
77 new_flags &= ~VM_DONTCOPY;
78 break;
79 case MADV_DONTDUMP:
80 new_flags |= VM_DONTDUMP;
81 break;
82 case MADV_DODUMP:
83 if (new_flags & VM_SPECIAL) {
84 error = -EINVAL;
85 goto out;
86 }
87 new_flags &= ~VM_DONTDUMP;
88 break;
89 case MADV_MERGEABLE:
90 case MADV_UNMERGEABLE:
91 error = ksm_madvise(vma, start, end, behavior, &new_flags);
92 if (error)
93 goto out;
94 break;
95 case MADV_HUGEPAGE:
96 case MADV_NOHUGEPAGE:
97 error = hugepage_madvise(vma, &new_flags, behavior);
98 if (error)
99 goto out;
100 break;
101 }
102
103 if (new_flags == vma->vm_flags) {
104 *prev = vma;
105 goto out;
106 }
107
108 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
109 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
110 vma->vm_file, pgoff, vma_policy(vma),
111 vma->vm_userfaultfd_ctx);
112 if (*prev) {
113 vma = *prev;
114 goto success;
115 }
116
117 *prev = vma;
118
119 if (start != vma->vm_start) {
120 error = split_vma(mm, vma, start, 1);
121 if (error)
122 goto out;
123 }
124
125 if (end != vma->vm_end) {
126 error = split_vma(mm, vma, end, 0);
127 if (error)
128 goto out;
129 }
130
131success:
132
133
134
135 vma->vm_flags = new_flags;
136
137out:
138 if (error == -ENOMEM)
139 error = -EAGAIN;
140 return error;
141}
142
143#ifdef CONFIG_SWAP
144static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
145 unsigned long end, struct mm_walk *walk)
146{
147 pte_t *orig_pte;
148 struct vm_area_struct *vma = walk->private;
149 unsigned long index;
150
151 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
152 return 0;
153
154 for (index = start; index != end; index += PAGE_SIZE) {
155 pte_t pte;
156 swp_entry_t entry;
157 struct page *page;
158 spinlock_t *ptl;
159
160 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
161 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
162 pte_unmap_unlock(orig_pte, ptl);
163
164 if (pte_present(pte) || pte_none(pte))
165 continue;
166 entry = pte_to_swp_entry(pte);
167 if (unlikely(non_swap_entry(entry)))
168 continue;
169
170 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
171 vma, index);
172 if (page)
173 page_cache_release(page);
174 }
175
176 return 0;
177}
178
179static void force_swapin_readahead(struct vm_area_struct *vma,
180 unsigned long start, unsigned long end)
181{
182 struct mm_walk walk = {
183 .mm = vma->vm_mm,
184 .pmd_entry = swapin_walk_pmd_entry,
185 .private = vma,
186 };
187
188 walk_page_range(start, end, &walk);
189
190 lru_add_drain();
191}
192
193static void force_shm_swapin_readahead(struct vm_area_struct *vma,
194 unsigned long start, unsigned long end,
195 struct address_space *mapping)
196{
197 pgoff_t index;
198 struct page *page;
199 swp_entry_t swap;
200
201 for (; start < end; start += PAGE_SIZE) {
202 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
203
204 page = find_get_entry(mapping, index);
205 if (!radix_tree_exceptional_entry(page)) {
206 if (page)
207 page_cache_release(page);
208 continue;
209 }
210 swap = radix_to_swp_entry(page);
211 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
212 NULL, 0);
213 if (page)
214 page_cache_release(page);
215 }
216
217 lru_add_drain();
218}
219#endif
220
221
222
223
224static long madvise_willneed(struct vm_area_struct *vma,
225 struct vm_area_struct **prev,
226 unsigned long start, unsigned long end)
227{
228 struct file *file = vma->vm_file;
229
230#ifdef CONFIG_SWAP
231 if (!file) {
232 *prev = vma;
233 force_swapin_readahead(vma, start, end);
234 return 0;
235 }
236
237 if (shmem_mapping(file->f_mapping)) {
238 *prev = vma;
239 force_shm_swapin_readahead(vma, start, end,
240 file->f_mapping);
241 return 0;
242 }
243#else
244 if (!file)
245 return -EBADF;
246#endif
247
248 if (IS_DAX(file_inode(file))) {
249
250 return 0;
251 }
252
253 *prev = vma;
254 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
255 if (end > vma->vm_end)
256 end = vma->vm_end;
257 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
258
259 force_page_cache_readahead(file->f_mapping, file, start, end - start);
260 return 0;
261}
262
263static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
264 unsigned long end, struct mm_walk *walk)
265
266{
267 struct mmu_gather *tlb = walk->private;
268 struct mm_struct *mm = tlb->mm;
269 struct vm_area_struct *vma = walk->vma;
270 spinlock_t *ptl;
271 pte_t *orig_pte, *pte, ptent;
272 struct page *page;
273 int nr_swap = 0;
274 unsigned long next;
275
276 next = pmd_addr_end(addr, end);
277 if (pmd_trans_huge(*pmd))
278 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
279 goto next;
280
281 if (pmd_trans_unstable(pmd))
282 return 0;
283
284 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
285 arch_enter_lazy_mmu_mode();
286 for (; addr != end; pte++, addr += PAGE_SIZE) {
287 ptent = *pte;
288
289 if (pte_none(ptent))
290 continue;
291
292
293
294
295
296 if (!pte_present(ptent)) {
297 swp_entry_t entry;
298
299 entry = pte_to_swp_entry(ptent);
300 if (non_swap_entry(entry))
301 continue;
302 nr_swap--;
303 free_swap_and_cache(entry);
304 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
305 continue;
306 }
307
308 page = vm_normal_page(vma, addr, ptent);
309 if (!page)
310 continue;
311
312
313
314
315
316
317 if (PageTransCompound(page)) {
318 if (page_mapcount(page) != 1)
319 goto out;
320 get_page(page);
321 if (!trylock_page(page)) {
322 put_page(page);
323 goto out;
324 }
325 pte_unmap_unlock(orig_pte, ptl);
326 if (split_huge_page(page)) {
327 unlock_page(page);
328 put_page(page);
329 pte_offset_map_lock(mm, pmd, addr, &ptl);
330 goto out;
331 }
332 put_page(page);
333 unlock_page(page);
334 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
335 pte--;
336 addr -= PAGE_SIZE;
337 continue;
338 }
339
340 VM_BUG_ON_PAGE(PageTransCompound(page), page);
341
342 if (PageSwapCache(page) || PageDirty(page)) {
343 if (!trylock_page(page))
344 continue;
345
346
347
348
349 if (page_mapcount(page) != 1) {
350 unlock_page(page);
351 continue;
352 }
353
354 if (PageSwapCache(page) && !try_to_free_swap(page)) {
355 unlock_page(page);
356 continue;
357 }
358
359 ClearPageDirty(page);
360 unlock_page(page);
361 }
362
363 if (pte_young(ptent) || pte_dirty(ptent)) {
364
365
366
367
368
369
370 ptent = ptep_get_and_clear_full(mm, addr, pte,
371 tlb->fullmm);
372
373 ptent = pte_mkold(ptent);
374 ptent = pte_mkclean(ptent);
375 set_pte_at(mm, addr, pte, ptent);
376 if (PageActive(page))
377 deactivate_page(page);
378 tlb_remove_tlb_entry(tlb, pte, addr);
379 }
380 }
381out:
382 if (nr_swap) {
383 if (current->mm == mm)
384 sync_mm_rss(mm);
385
386 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
387 }
388 arch_leave_lazy_mmu_mode();
389 pte_unmap_unlock(orig_pte, ptl);
390 cond_resched();
391next:
392 return 0;
393}
394
395static void madvise_free_page_range(struct mmu_gather *tlb,
396 struct vm_area_struct *vma,
397 unsigned long addr, unsigned long end)
398{
399 struct mm_walk free_walk = {
400 .pmd_entry = madvise_free_pte_range,
401 .mm = vma->vm_mm,
402 .private = tlb,
403 };
404
405 tlb_start_vma(tlb, vma);
406 walk_page_range(addr, end, &free_walk);
407 tlb_end_vma(tlb, vma);
408}
409
410static int madvise_free_single_vma(struct vm_area_struct *vma,
411 unsigned long start_addr, unsigned long end_addr)
412{
413 unsigned long start, end;
414 struct mm_struct *mm = vma->vm_mm;
415 struct mmu_gather tlb;
416
417 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
418 return -EINVAL;
419
420
421 if (!vma_is_anonymous(vma))
422 return -EINVAL;
423
424 start = max(vma->vm_start, start_addr);
425 if (start >= vma->vm_end)
426 return -EINVAL;
427 end = min(vma->vm_end, end_addr);
428 if (end <= vma->vm_start)
429 return -EINVAL;
430
431 lru_add_drain();
432 tlb_gather_mmu(&tlb, mm, start, end);
433 update_hiwater_rss(mm);
434
435 mmu_notifier_invalidate_range_start(mm, start, end);
436 madvise_free_page_range(&tlb, vma, start, end);
437 mmu_notifier_invalidate_range_end(mm, start, end);
438 tlb_finish_mmu(&tlb, start, end);
439
440 return 0;
441}
442
443static long madvise_free(struct vm_area_struct *vma,
444 struct vm_area_struct **prev,
445 unsigned long start, unsigned long end)
446{
447 *prev = vma;
448 return madvise_free_single_vma(vma, start, end);
449}
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470static long madvise_dontneed(struct vm_area_struct *vma,
471 struct vm_area_struct **prev,
472 unsigned long start, unsigned long end)
473{
474 *prev = vma;
475 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
476 return -EINVAL;
477
478 zap_page_range(vma, start, end - start, NULL);
479 return 0;
480}
481
482
483
484
485
486static long madvise_remove(struct vm_area_struct *vma,
487 struct vm_area_struct **prev,
488 unsigned long start, unsigned long end)
489{
490 loff_t offset;
491 int error;
492 struct file *f;
493
494 *prev = NULL;
495
496 if (vma->vm_flags & VM_LOCKED)
497 return -EINVAL;
498
499 f = vma->vm_file;
500
501 if (!f || !f->f_mapping || !f->f_mapping->host) {
502 return -EINVAL;
503 }
504
505 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
506 return -EACCES;
507
508 offset = (loff_t)(start - vma->vm_start)
509 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
510
511
512
513
514
515
516
517 get_file(f);
518 up_read(¤t->mm->mmap_sem);
519 error = vfs_fallocate(f,
520 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
521 offset, end - start);
522 fput(f);
523 down_read(¤t->mm->mmap_sem);
524 return error;
525}
526
527#ifdef CONFIG_MEMORY_FAILURE
528
529
530
531static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
532{
533 struct page *p;
534 if (!capable(CAP_SYS_ADMIN))
535 return -EPERM;
536 for (; start < end; start += PAGE_SIZE <<
537 compound_order(compound_head(p))) {
538 int ret;
539
540 ret = get_user_pages_fast(start, 1, 0, &p);
541 if (ret != 1)
542 return ret;
543
544 if (PageHWPoison(p)) {
545 put_page(p);
546 continue;
547 }
548 if (bhv == MADV_SOFT_OFFLINE) {
549 pr_info("Soft offlining page %#lx at %#lx\n",
550 page_to_pfn(p), start);
551 ret = soft_offline_page(p, MF_COUNT_INCREASED);
552 if (ret)
553 return ret;
554 continue;
555 }
556 pr_info("Injecting memory failure for page %#lx at %#lx\n",
557 page_to_pfn(p), start);
558
559 memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
560 }
561 return 0;
562}
563#endif
564
565static long
566madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
567 unsigned long start, unsigned long end, int behavior)
568{
569 switch (behavior) {
570 case MADV_REMOVE:
571 return madvise_remove(vma, prev, start, end);
572 case MADV_WILLNEED:
573 return madvise_willneed(vma, prev, start, end);
574 case MADV_FREE:
575
576
577
578
579 if (get_nr_swap_pages() > 0)
580 return madvise_free(vma, prev, start, end);
581
582 case MADV_DONTNEED:
583 return madvise_dontneed(vma, prev, start, end);
584 default:
585 return madvise_behavior(vma, prev, start, end, behavior);
586 }
587}
588
589static bool
590madvise_behavior_valid(int behavior)
591{
592 switch (behavior) {
593 case MADV_DOFORK:
594 case MADV_DONTFORK:
595 case MADV_NORMAL:
596 case MADV_SEQUENTIAL:
597 case MADV_RANDOM:
598 case MADV_REMOVE:
599 case MADV_WILLNEED:
600 case MADV_DONTNEED:
601 case MADV_FREE:
602#ifdef CONFIG_KSM
603 case MADV_MERGEABLE:
604 case MADV_UNMERGEABLE:
605#endif
606#ifdef CONFIG_TRANSPARENT_HUGEPAGE
607 case MADV_HUGEPAGE:
608 case MADV_NOHUGEPAGE:
609#endif
610 case MADV_DONTDUMP:
611 case MADV_DODUMP:
612 return true;
613
614 default:
615 return false;
616 }
617}
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
662{
663 unsigned long end, tmp;
664 struct vm_area_struct *vma, *prev;
665 int unmapped_error = 0;
666 int error = -EINVAL;
667 int write;
668 size_t len;
669 struct blk_plug plug;
670
671#ifdef CONFIG_MEMORY_FAILURE
672 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
673 return madvise_hwpoison(behavior, start, start+len_in);
674#endif
675 if (!madvise_behavior_valid(behavior))
676 return error;
677
678 if (start & ~PAGE_MASK)
679 return error;
680 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
681
682
683 if (len_in && !len)
684 return error;
685
686 end = start + len;
687 if (end < start)
688 return error;
689
690 error = 0;
691 if (end == start)
692 return error;
693
694 write = madvise_need_mmap_write(behavior);
695 if (write)
696 down_write(¤t->mm->mmap_sem);
697 else
698 down_read(¤t->mm->mmap_sem);
699
700
701
702
703
704
705 vma = find_vma_prev(current->mm, start, &prev);
706 if (vma && start > vma->vm_start)
707 prev = vma;
708
709 blk_start_plug(&plug);
710 for (;;) {
711
712 error = -ENOMEM;
713 if (!vma)
714 goto out;
715
716
717 if (start < vma->vm_start) {
718 unmapped_error = -ENOMEM;
719 start = vma->vm_start;
720 if (start >= end)
721 goto out;
722 }
723
724
725 tmp = vma->vm_end;
726 if (end < tmp)
727 tmp = end;
728
729
730 error = madvise_vma(vma, &prev, start, tmp, behavior);
731 if (error)
732 goto out;
733 start = tmp;
734 if (prev && start < prev->vm_end)
735 start = prev->vm_end;
736 error = unmapped_error;
737 if (start >= end)
738 goto out;
739 if (prev)
740 vma = prev->vm_next;
741 else
742 vma = find_vma(current->mm, start);
743 }
744out:
745 blk_finish_plug(&plug);
746 if (write)
747 up_write(¤t->mm->mmap_sem);
748 else
749 up_read(¤t->mm->mmap_sem);
750
751 return error;
752}
753