1
2
3
4
5
6
7
8
9#include <linux/mman.h>
10#include <linux/pagemap.h>
11#include <linux/syscalls.h>
12#include <linux/mempolicy.h>
13#include <linux/page-isolation.h>
14#include <linux/page_idle.h>
15#include <linux/userfaultfd_k.h>
16#include <linux/hugetlb.h>
17#include <linux/falloc.h>
18#include <linux/fadvise.h>
19#include <linux/sched.h>
20#include <linux/ksm.h>
21#include <linux/fs.h>
22#include <linux/file.h>
23#include <linux/blkdev.h>
24#include <linux/backing-dev.h>
25#include <linux/pagewalk.h>
26#include <linux/swap.h>
27#include <linux/swapops.h>
28#include <linux/shmem_fs.h>
29#include <linux/mmu_notifier.h>
30#include <linux/sched/mm.h>
31
32#include <asm/tlb.h>
33
34#include "internal.h"
35
36struct madvise_walk_private {
37 struct mmu_gather *tlb;
38 bool pageout;
39};
40
41
42
43
44
45
46static int madvise_need_mmap_write(int behavior)
47{
48 switch (behavior) {
49 case MADV_REMOVE:
50 case MADV_WILLNEED:
51 case MADV_DONTNEED:
52 case MADV_COLD:
53 case MADV_PAGEOUT:
54 case MADV_FREE:
55 return 0;
56 default:
57
58 return 1;
59 }
60}
61
62
63
64
65
66static long madvise_behavior(struct vm_area_struct *vma,
67 struct vm_area_struct **prev,
68 unsigned long start, unsigned long end, int behavior)
69{
70 struct mm_struct *mm = vma->vm_mm;
71 int error = 0;
72 pgoff_t pgoff;
73 unsigned long new_flags = vma->vm_flags;
74
75 switch (behavior) {
76 case MADV_NORMAL:
77 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
78 break;
79 case MADV_SEQUENTIAL:
80 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
81 break;
82 case MADV_RANDOM:
83 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
84 break;
85 case MADV_DONTFORK:
86 new_flags |= VM_DONTCOPY;
87 break;
88 case MADV_DOFORK:
89 if (vma->vm_flags & VM_IO) {
90 error = -EINVAL;
91 goto out;
92 }
93 new_flags &= ~VM_DONTCOPY;
94 break;
95 case MADV_WIPEONFORK:
96
97 if (vma->vm_file || vma->vm_flags & VM_SHARED) {
98 error = -EINVAL;
99 goto out;
100 }
101 new_flags |= VM_WIPEONFORK;
102 break;
103 case MADV_KEEPONFORK:
104 new_flags &= ~VM_WIPEONFORK;
105 break;
106 case MADV_DONTDUMP:
107 new_flags |= VM_DONTDUMP;
108 break;
109 case MADV_DODUMP:
110 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
111 error = -EINVAL;
112 goto out;
113 }
114 new_flags &= ~VM_DONTDUMP;
115 break;
116 case MADV_MERGEABLE:
117 case MADV_UNMERGEABLE:
118 error = ksm_madvise(vma, start, end, behavior, &new_flags);
119 if (error)
120 goto out_convert_errno;
121 break;
122 case MADV_HUGEPAGE:
123 case MADV_NOHUGEPAGE:
124 error = hugepage_madvise(vma, &new_flags, behavior);
125 if (error)
126 goto out_convert_errno;
127 break;
128 }
129
130 if (new_flags == vma->vm_flags) {
131 *prev = vma;
132 goto out;
133 }
134
135 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
136 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
137 vma->vm_file, pgoff, vma_policy(vma),
138 vma->vm_userfaultfd_ctx);
139 if (*prev) {
140 vma = *prev;
141 goto success;
142 }
143
144 *prev = vma;
145
146 if (start != vma->vm_start) {
147 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
148 error = -ENOMEM;
149 goto out;
150 }
151 error = __split_vma(mm, vma, start, 1);
152 if (error)
153 goto out_convert_errno;
154 }
155
156 if (end != vma->vm_end) {
157 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
158 error = -ENOMEM;
159 goto out;
160 }
161 error = __split_vma(mm, vma, end, 0);
162 if (error)
163 goto out_convert_errno;
164 }
165
166success:
167
168
169
170 vma->vm_flags = new_flags;
171
172out_convert_errno:
173
174
175
176
177 if (error == -ENOMEM)
178 error = -EAGAIN;
179out:
180 return error;
181}
182
183#ifdef CONFIG_SWAP
184static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
185 unsigned long end, struct mm_walk *walk)
186{
187 pte_t *orig_pte;
188 struct vm_area_struct *vma = walk->private;
189 unsigned long index;
190
191 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
192 return 0;
193
194 for (index = start; index != end; index += PAGE_SIZE) {
195 pte_t pte;
196 swp_entry_t entry;
197 struct page *page;
198 spinlock_t *ptl;
199
200 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
201 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
202 pte_unmap_unlock(orig_pte, ptl);
203
204 if (pte_present(pte) || pte_none(pte))
205 continue;
206 entry = pte_to_swp_entry(pte);
207 if (unlikely(non_swap_entry(entry)))
208 continue;
209
210 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
211 vma, index, false);
212 if (page)
213 put_page(page);
214 }
215
216 return 0;
217}
218
219static const struct mm_walk_ops swapin_walk_ops = {
220 .pmd_entry = swapin_walk_pmd_entry,
221};
222
223static void force_shm_swapin_readahead(struct vm_area_struct *vma,
224 unsigned long start, unsigned long end,
225 struct address_space *mapping)
226{
227 pgoff_t index;
228 struct page *page;
229 swp_entry_t swap;
230
231 for (; start < end; start += PAGE_SIZE) {
232 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
233
234 page = find_get_entry(mapping, index);
235 if (!xa_is_value(page)) {
236 if (page)
237 put_page(page);
238 continue;
239 }
240 swap = radix_to_swp_entry(page);
241 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
242 NULL, 0, false);
243 if (page)
244 put_page(page);
245 }
246
247 lru_add_drain();
248}
249#endif
250
251
252
253
254static long madvise_willneed(struct vm_area_struct *vma,
255 struct vm_area_struct **prev,
256 unsigned long start, unsigned long end)
257{
258 struct file *file = vma->vm_file;
259 loff_t offset;
260
261 *prev = vma;
262#ifdef CONFIG_SWAP
263 if (!file) {
264 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
265 lru_add_drain();
266 return 0;
267 }
268
269 if (shmem_mapping(file->f_mapping)) {
270 force_shm_swapin_readahead(vma, start, end,
271 file->f_mapping);
272 return 0;
273 }
274#else
275 if (!file)
276 return -EBADF;
277#endif
278
279 if (IS_DAX(file_inode(file))) {
280
281 return 0;
282 }
283
284
285
286
287
288
289
290 *prev = NULL;
291 get_file(file);
292 offset = (loff_t)(start - vma->vm_start)
293 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
294 mmap_read_unlock(current->mm);
295 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
296 fput(file);
297 mmap_read_lock(current->mm);
298 return 0;
299}
300
301static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
302 unsigned long addr, unsigned long end,
303 struct mm_walk *walk)
304{
305 struct madvise_walk_private *private = walk->private;
306 struct mmu_gather *tlb = private->tlb;
307 bool pageout = private->pageout;
308 struct mm_struct *mm = tlb->mm;
309 struct vm_area_struct *vma = walk->vma;
310 pte_t *orig_pte, *pte, ptent;
311 spinlock_t *ptl;
312 struct page *page = NULL;
313 LIST_HEAD(page_list);
314
315 if (fatal_signal_pending(current))
316 return -EINTR;
317
318#ifdef CONFIG_TRANSPARENT_HUGEPAGE
319 if (pmd_trans_huge(*pmd)) {
320 pmd_t orig_pmd;
321 unsigned long next = pmd_addr_end(addr, end);
322
323 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
324 ptl = pmd_trans_huge_lock(pmd, vma);
325 if (!ptl)
326 return 0;
327
328 orig_pmd = *pmd;
329 if (is_huge_zero_pmd(orig_pmd))
330 goto huge_unlock;
331
332 if (unlikely(!pmd_present(orig_pmd))) {
333 VM_BUG_ON(thp_migration_supported() &&
334 !is_pmd_migration_entry(orig_pmd));
335 goto huge_unlock;
336 }
337
338 page = pmd_page(orig_pmd);
339
340
341 if (page_mapcount(page) != 1)
342 goto huge_unlock;
343
344 if (next - addr != HPAGE_PMD_SIZE) {
345 int err;
346
347 get_page(page);
348 spin_unlock(ptl);
349 lock_page(page);
350 err = split_huge_page(page);
351 unlock_page(page);
352 put_page(page);
353 if (!err)
354 goto regular_page;
355 return 0;
356 }
357
358 if (pmd_young(orig_pmd)) {
359 pmdp_invalidate(vma, addr, pmd);
360 orig_pmd = pmd_mkold(orig_pmd);
361
362 set_pmd_at(mm, addr, pmd, orig_pmd);
363 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
364 }
365
366 ClearPageReferenced(page);
367 test_and_clear_page_young(page);
368 if (pageout) {
369 if (!isolate_lru_page(page)) {
370 if (PageUnevictable(page))
371 putback_lru_page(page);
372 else
373 list_add(&page->lru, &page_list);
374 }
375 } else
376 deactivate_page(page);
377huge_unlock:
378 spin_unlock(ptl);
379 if (pageout)
380 reclaim_pages(&page_list);
381 return 0;
382 }
383
384regular_page:
385 if (pmd_trans_unstable(pmd))
386 return 0;
387#endif
388 tlb_change_page_size(tlb, PAGE_SIZE);
389 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
390 flush_tlb_batched_pending(mm);
391 arch_enter_lazy_mmu_mode();
392 for (; addr < end; pte++, addr += PAGE_SIZE) {
393 ptent = *pte;
394
395 if (pte_none(ptent))
396 continue;
397
398 if (!pte_present(ptent))
399 continue;
400
401 page = vm_normal_page(vma, addr, ptent);
402 if (!page)
403 continue;
404
405
406
407
408
409 if (PageTransCompound(page)) {
410 if (page_mapcount(page) != 1)
411 break;
412 get_page(page);
413 if (!trylock_page(page)) {
414 put_page(page);
415 break;
416 }
417 pte_unmap_unlock(orig_pte, ptl);
418 if (split_huge_page(page)) {
419 unlock_page(page);
420 put_page(page);
421 pte_offset_map_lock(mm, pmd, addr, &ptl);
422 break;
423 }
424 unlock_page(page);
425 put_page(page);
426 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
427 pte--;
428 addr -= PAGE_SIZE;
429 continue;
430 }
431
432
433 if (page_mapcount(page) != 1)
434 continue;
435
436 VM_BUG_ON_PAGE(PageTransCompound(page), page);
437
438 if (pte_young(ptent)) {
439 ptent = ptep_get_and_clear_full(mm, addr, pte,
440 tlb->fullmm);
441 ptent = pte_mkold(ptent);
442 set_pte_at(mm, addr, pte, ptent);
443 tlb_remove_tlb_entry(tlb, pte, addr);
444 }
445
446
447
448
449
450
451
452 ClearPageReferenced(page);
453 test_and_clear_page_young(page);
454 if (pageout) {
455 if (!isolate_lru_page(page)) {
456 if (PageUnevictable(page))
457 putback_lru_page(page);
458 else
459 list_add(&page->lru, &page_list);
460 }
461 } else
462 deactivate_page(page);
463 }
464
465 arch_leave_lazy_mmu_mode();
466 pte_unmap_unlock(orig_pte, ptl);
467 if (pageout)
468 reclaim_pages(&page_list);
469 cond_resched();
470
471 return 0;
472}
473
474static const struct mm_walk_ops cold_walk_ops = {
475 .pmd_entry = madvise_cold_or_pageout_pte_range,
476};
477
478static void madvise_cold_page_range(struct mmu_gather *tlb,
479 struct vm_area_struct *vma,
480 unsigned long addr, unsigned long end)
481{
482 struct madvise_walk_private walk_private = {
483 .pageout = false,
484 .tlb = tlb,
485 };
486
487 tlb_start_vma(tlb, vma);
488 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
489 tlb_end_vma(tlb, vma);
490}
491
492static long madvise_cold(struct vm_area_struct *vma,
493 struct vm_area_struct **prev,
494 unsigned long start_addr, unsigned long end_addr)
495{
496 struct mm_struct *mm = vma->vm_mm;
497 struct mmu_gather tlb;
498
499 *prev = vma;
500 if (!can_madv_lru_vma(vma))
501 return -EINVAL;
502
503 lru_add_drain();
504 tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
505 madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
506 tlb_finish_mmu(&tlb, start_addr, end_addr);
507
508 return 0;
509}
510
511static void madvise_pageout_page_range(struct mmu_gather *tlb,
512 struct vm_area_struct *vma,
513 unsigned long addr, unsigned long end)
514{
515 struct madvise_walk_private walk_private = {
516 .pageout = true,
517 .tlb = tlb,
518 };
519
520 tlb_start_vma(tlb, vma);
521 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
522 tlb_end_vma(tlb, vma);
523}
524
525static inline bool can_do_pageout(struct vm_area_struct *vma)
526{
527 if (vma_is_anonymous(vma))
528 return true;
529 if (!vma->vm_file)
530 return false;
531
532
533
534
535
536
537 return inode_owner_or_capable(file_inode(vma->vm_file)) ||
538 inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
539}
540
541static long madvise_pageout(struct vm_area_struct *vma,
542 struct vm_area_struct **prev,
543 unsigned long start_addr, unsigned long end_addr)
544{
545 struct mm_struct *mm = vma->vm_mm;
546 struct mmu_gather tlb;
547
548 *prev = vma;
549 if (!can_madv_lru_vma(vma))
550 return -EINVAL;
551
552 if (!can_do_pageout(vma))
553 return 0;
554
555 lru_add_drain();
556 tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
557 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
558 tlb_finish_mmu(&tlb, start_addr, end_addr);
559
560 return 0;
561}
562
563static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
564 unsigned long end, struct mm_walk *walk)
565
566{
567 struct mmu_gather *tlb = walk->private;
568 struct mm_struct *mm = tlb->mm;
569 struct vm_area_struct *vma = walk->vma;
570 spinlock_t *ptl;
571 pte_t *orig_pte, *pte, ptent;
572 struct page *page;
573 int nr_swap = 0;
574 unsigned long next;
575
576 next = pmd_addr_end(addr, end);
577 if (pmd_trans_huge(*pmd))
578 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
579 goto next;
580
581 if (pmd_trans_unstable(pmd))
582 return 0;
583
584 tlb_change_page_size(tlb, PAGE_SIZE);
585 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
586 flush_tlb_batched_pending(mm);
587 arch_enter_lazy_mmu_mode();
588 for (; addr != end; pte++, addr += PAGE_SIZE) {
589 ptent = *pte;
590
591 if (pte_none(ptent))
592 continue;
593
594
595
596
597
598 if (!pte_present(ptent)) {
599 swp_entry_t entry;
600
601 entry = pte_to_swp_entry(ptent);
602 if (non_swap_entry(entry))
603 continue;
604 nr_swap--;
605 free_swap_and_cache(entry);
606 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
607 continue;
608 }
609
610 page = vm_normal_page(vma, addr, ptent);
611 if (!page)
612 continue;
613
614
615
616
617
618
619 if (PageTransCompound(page)) {
620 if (page_mapcount(page) != 1)
621 goto out;
622 get_page(page);
623 if (!trylock_page(page)) {
624 put_page(page);
625 goto out;
626 }
627 pte_unmap_unlock(orig_pte, ptl);
628 if (split_huge_page(page)) {
629 unlock_page(page);
630 put_page(page);
631 pte_offset_map_lock(mm, pmd, addr, &ptl);
632 goto out;
633 }
634 unlock_page(page);
635 put_page(page);
636 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
637 pte--;
638 addr -= PAGE_SIZE;
639 continue;
640 }
641
642 VM_BUG_ON_PAGE(PageTransCompound(page), page);
643
644 if (PageSwapCache(page) || PageDirty(page)) {
645 if (!trylock_page(page))
646 continue;
647
648
649
650
651 if (page_mapcount(page) != 1) {
652 unlock_page(page);
653 continue;
654 }
655
656 if (PageSwapCache(page) && !try_to_free_swap(page)) {
657 unlock_page(page);
658 continue;
659 }
660
661 ClearPageDirty(page);
662 unlock_page(page);
663 }
664
665 if (pte_young(ptent) || pte_dirty(ptent)) {
666
667
668
669
670
671
672 ptent = ptep_get_and_clear_full(mm, addr, pte,
673 tlb->fullmm);
674
675 ptent = pte_mkold(ptent);
676 ptent = pte_mkclean(ptent);
677 set_pte_at(mm, addr, pte, ptent);
678 tlb_remove_tlb_entry(tlb, pte, addr);
679 }
680 mark_page_lazyfree(page);
681 }
682out:
683 if (nr_swap) {
684 if (current->mm == mm)
685 sync_mm_rss(mm);
686
687 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
688 }
689 arch_leave_lazy_mmu_mode();
690 pte_unmap_unlock(orig_pte, ptl);
691 cond_resched();
692next:
693 return 0;
694}
695
696static const struct mm_walk_ops madvise_free_walk_ops = {
697 .pmd_entry = madvise_free_pte_range,
698};
699
700static int madvise_free_single_vma(struct vm_area_struct *vma,
701 unsigned long start_addr, unsigned long end_addr)
702{
703 struct mm_struct *mm = vma->vm_mm;
704 struct mmu_notifier_range range;
705 struct mmu_gather tlb;
706
707
708 if (!vma_is_anonymous(vma))
709 return -EINVAL;
710
711 range.start = max(vma->vm_start, start_addr);
712 if (range.start >= vma->vm_end)
713 return -EINVAL;
714 range.end = min(vma->vm_end, end_addr);
715 if (range.end <= vma->vm_start)
716 return -EINVAL;
717 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
718 range.start, range.end);
719
720 lru_add_drain();
721 tlb_gather_mmu(&tlb, mm, range.start, range.end);
722 update_hiwater_rss(mm);
723
724 mmu_notifier_invalidate_range_start(&range);
725 tlb_start_vma(&tlb, vma);
726 walk_page_range(vma->vm_mm, range.start, range.end,
727 &madvise_free_walk_ops, &tlb);
728 tlb_end_vma(&tlb, vma);
729 mmu_notifier_invalidate_range_end(&range);
730 tlb_finish_mmu(&tlb, range.start, range.end);
731
732 return 0;
733}
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
755 unsigned long start, unsigned long end)
756{
757 zap_page_range(vma, start, end - start);
758 return 0;
759}
760
761static long madvise_dontneed_free(struct vm_area_struct *vma,
762 struct vm_area_struct **prev,
763 unsigned long start, unsigned long end,
764 int behavior)
765{
766 *prev = vma;
767 if (!can_madv_lru_vma(vma))
768 return -EINVAL;
769
770 if (!userfaultfd_remove(vma, start, end)) {
771 *prev = NULL;
772
773 mmap_read_lock(current->mm);
774 vma = find_vma(current->mm, start);
775 if (!vma)
776 return -ENOMEM;
777 if (start < vma->vm_start) {
778
779
780
781
782
783
784
785
786
787 return -ENOMEM;
788 }
789 if (!can_madv_lru_vma(vma))
790 return -EINVAL;
791 if (end > vma->vm_end) {
792
793
794
795
796
797
798
799
800
801
802
803
804 end = vma->vm_end;
805 }
806 VM_WARN_ON(start >= end);
807 }
808
809 if (behavior == MADV_DONTNEED)
810 return madvise_dontneed_single_vma(vma, start, end);
811 else if (behavior == MADV_FREE)
812 return madvise_free_single_vma(vma, start, end);
813 else
814 return -EINVAL;
815}
816
817
818
819
820
821static long madvise_remove(struct vm_area_struct *vma,
822 struct vm_area_struct **prev,
823 unsigned long start, unsigned long end)
824{
825 loff_t offset;
826 int error;
827 struct file *f;
828
829 *prev = NULL;
830
831 if (vma->vm_flags & VM_LOCKED)
832 return -EINVAL;
833
834 f = vma->vm_file;
835
836 if (!f || !f->f_mapping || !f->f_mapping->host) {
837 return -EINVAL;
838 }
839
840 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
841 return -EACCES;
842
843 offset = (loff_t)(start - vma->vm_start)
844 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
845
846
847
848
849
850
851
852 get_file(f);
853 if (userfaultfd_remove(vma, start, end)) {
854
855 mmap_read_unlock(current->mm);
856 }
857 error = vfs_fallocate(f,
858 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
859 offset, end - start);
860 fput(f);
861 mmap_read_lock(current->mm);
862 return error;
863}
864
865#ifdef CONFIG_MEMORY_FAILURE
866
867
868
869static int madvise_inject_error(int behavior,
870 unsigned long start, unsigned long end)
871{
872 struct page *page;
873 struct zone *zone;
874 unsigned long size;
875
876 if (!capable(CAP_SYS_ADMIN))
877 return -EPERM;
878
879
880 for (; start < end; start += size) {
881 unsigned long pfn;
882 int ret;
883
884 ret = get_user_pages_fast(start, 1, 0, &page);
885 if (ret != 1)
886 return ret;
887 pfn = page_to_pfn(page);
888
889
890
891
892
893
894 size = page_size(compound_head(page));
895
896 if (PageHWPoison(page)) {
897 put_page(page);
898 continue;
899 }
900
901 if (behavior == MADV_SOFT_OFFLINE) {
902 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
903 pfn, start);
904
905 ret = soft_offline_page(pfn, MF_COUNT_INCREASED);
906 if (ret)
907 return ret;
908 continue;
909 }
910
911 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
912 pfn, start);
913
914
915
916
917
918
919
920 put_page(page);
921 ret = memory_failure(pfn, 0);
922 if (ret)
923 return ret;
924 }
925
926
927 for_each_populated_zone(zone)
928 drain_all_pages(zone);
929
930 return 0;
931}
932#endif
933
934static long
935madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
936 unsigned long start, unsigned long end, int behavior)
937{
938 switch (behavior) {
939 case MADV_REMOVE:
940 return madvise_remove(vma, prev, start, end);
941 case MADV_WILLNEED:
942 return madvise_willneed(vma, prev, start, end);
943 case MADV_COLD:
944 return madvise_cold(vma, prev, start, end);
945 case MADV_PAGEOUT:
946 return madvise_pageout(vma, prev, start, end);
947 case MADV_FREE:
948 case MADV_DONTNEED:
949 return madvise_dontneed_free(vma, prev, start, end, behavior);
950 default:
951 return madvise_behavior(vma, prev, start, end, behavior);
952 }
953}
954
955static bool
956madvise_behavior_valid(int behavior)
957{
958 switch (behavior) {
959 case MADV_DOFORK:
960 case MADV_DONTFORK:
961 case MADV_NORMAL:
962 case MADV_SEQUENTIAL:
963 case MADV_RANDOM:
964 case MADV_REMOVE:
965 case MADV_WILLNEED:
966 case MADV_DONTNEED:
967 case MADV_FREE:
968 case MADV_COLD:
969 case MADV_PAGEOUT:
970#ifdef CONFIG_KSM
971 case MADV_MERGEABLE:
972 case MADV_UNMERGEABLE:
973#endif
974#ifdef CONFIG_TRANSPARENT_HUGEPAGE
975 case MADV_HUGEPAGE:
976 case MADV_NOHUGEPAGE:
977#endif
978 case MADV_DONTDUMP:
979 case MADV_DODUMP:
980 case MADV_WIPEONFORK:
981 case MADV_KEEPONFORK:
982#ifdef CONFIG_MEMORY_FAILURE
983 case MADV_SOFT_OFFLINE:
984 case MADV_HWPOISON:
985#endif
986 return true;
987
988 default:
989 return false;
990 }
991}
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054int do_madvise(unsigned long start, size_t len_in, int behavior)
1055{
1056 unsigned long end, tmp;
1057 struct vm_area_struct *vma, *prev;
1058 int unmapped_error = 0;
1059 int error = -EINVAL;
1060 int write;
1061 size_t len;
1062 struct blk_plug plug;
1063
1064 start = untagged_addr(start);
1065
1066 if (!madvise_behavior_valid(behavior))
1067 return error;
1068
1069 if (!PAGE_ALIGNED(start))
1070 return error;
1071 len = PAGE_ALIGN(len_in);
1072
1073
1074 if (len_in && !len)
1075 return error;
1076
1077 end = start + len;
1078 if (end < start)
1079 return error;
1080
1081 error = 0;
1082 if (end == start)
1083 return error;
1084
1085#ifdef CONFIG_MEMORY_FAILURE
1086 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
1087 return madvise_inject_error(behavior, start, start + len_in);
1088#endif
1089
1090 write = madvise_need_mmap_write(behavior);
1091 if (write) {
1092 if (mmap_write_lock_killable(current->mm))
1093 return -EINTR;
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107 if (!mmget_still_valid(current->mm)) {
1108 mmap_write_unlock(current->mm);
1109 return -EINTR;
1110 }
1111 } else {
1112 mmap_read_lock(current->mm);
1113 }
1114
1115
1116
1117
1118
1119
1120 vma = find_vma_prev(current->mm, start, &prev);
1121 if (vma && start > vma->vm_start)
1122 prev = vma;
1123
1124 blk_start_plug(&plug);
1125 for (;;) {
1126
1127 error = -ENOMEM;
1128 if (!vma)
1129 goto out;
1130
1131
1132 if (start < vma->vm_start) {
1133 unmapped_error = -ENOMEM;
1134 start = vma->vm_start;
1135 if (start >= end)
1136 goto out;
1137 }
1138
1139
1140 tmp = vma->vm_end;
1141 if (end < tmp)
1142 tmp = end;
1143
1144
1145 error = madvise_vma(vma, &prev, start, tmp, behavior);
1146 if (error)
1147 goto out;
1148 start = tmp;
1149 if (prev && start < prev->vm_end)
1150 start = prev->vm_end;
1151 error = unmapped_error;
1152 if (start >= end)
1153 goto out;
1154 if (prev)
1155 vma = prev->vm_next;
1156 else
1157 vma = find_vma(current->mm, start);
1158 }
1159out:
1160 blk_finish_plug(&plug);
1161 if (write)
1162 mmap_write_unlock(current->mm);
1163 else
1164 mmap_read_unlock(current->mm);
1165
1166 return error;
1167}
1168
1169SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
1170{
1171 return do_madvise(start, len_in, behavior);
1172}
1173