1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/kernel.h>
37#include <linux/mm.h>
38#include <linux/page-flags.h>
39#include <linux/kernel-page-flags.h>
40#include <linux/sched/signal.h>
41#include <linux/sched/task.h>
42#include <linux/ksm.h>
43#include <linux/rmap.h>
44#include <linux/export.h>
45#include <linux/pagemap.h>
46#include <linux/swap.h>
47#include <linux/backing-dev.h>
48#include <linux/migrate.h>
49#include <linux/suspend.h>
50#include <linux/slab.h>
51#include <linux/swapops.h>
52#include <linux/hugetlb.h>
53#include <linux/memory_hotplug.h>
54#include <linux/mm_inline.h>
55#include <linux/memremap.h>
56#include <linux/kfifo.h>
57#include <linux/ratelimit.h>
58#include <linux/page-isolation.h>
59#include <linux/pagewalk.h>
60#include "internal.h"
61#include "ras/ras_event.h"
62
63int sysctl_memory_failure_early_kill __read_mostly = 0;
64
65int sysctl_memory_failure_recovery __read_mostly = 1;
66
67atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
68
69static bool __page_handle_poison(struct page *page)
70{
71 bool ret;
72
73 zone_pcp_disable(page_zone(page));
74 ret = dissolve_free_huge_page(page);
75 if (!ret)
76 ret = take_page_off_buddy(page);
77 zone_pcp_enable(page_zone(page));
78
79 return ret;
80}
81
82static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
83{
84 if (hugepage_or_freepage) {
85
86
87
88
89 if (!__page_handle_poison(page))
90
91
92
93
94
95
96
97 return false;
98 }
99
100 SetPageHWPoison(page);
101 if (release)
102 put_page(page);
103 page_ref_inc(page);
104 num_poisoned_pages_inc();
105
106 return true;
107}
108
109#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
110
111u32 hwpoison_filter_enable = 0;
112u32 hwpoison_filter_dev_major = ~0U;
113u32 hwpoison_filter_dev_minor = ~0U;
114u64 hwpoison_filter_flags_mask;
115u64 hwpoison_filter_flags_value;
116EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
117EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
118EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
119EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
120EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
121
122static int hwpoison_filter_dev(struct page *p)
123{
124 struct address_space *mapping;
125 dev_t dev;
126
127 if (hwpoison_filter_dev_major == ~0U &&
128 hwpoison_filter_dev_minor == ~0U)
129 return 0;
130
131
132
133
134 if (PageSlab(p))
135 return -EINVAL;
136
137 mapping = page_mapping(p);
138 if (mapping == NULL || mapping->host == NULL)
139 return -EINVAL;
140
141 dev = mapping->host->i_sb->s_dev;
142 if (hwpoison_filter_dev_major != ~0U &&
143 hwpoison_filter_dev_major != MAJOR(dev))
144 return -EINVAL;
145 if (hwpoison_filter_dev_minor != ~0U &&
146 hwpoison_filter_dev_minor != MINOR(dev))
147 return -EINVAL;
148
149 return 0;
150}
151
152static int hwpoison_filter_flags(struct page *p)
153{
154 if (!hwpoison_filter_flags_mask)
155 return 0;
156
157 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
158 hwpoison_filter_flags_value)
159 return 0;
160 else
161 return -EINVAL;
162}
163
164
165
166
167
168
169
170
171
172
173
174#ifdef CONFIG_MEMCG
175u64 hwpoison_filter_memcg;
176EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
177static int hwpoison_filter_task(struct page *p)
178{
179 if (!hwpoison_filter_memcg)
180 return 0;
181
182 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
183 return -EINVAL;
184
185 return 0;
186}
187#else
188static int hwpoison_filter_task(struct page *p) { return 0; }
189#endif
190
191int hwpoison_filter(struct page *p)
192{
193 if (!hwpoison_filter_enable)
194 return 0;
195
196 if (hwpoison_filter_dev(p))
197 return -EINVAL;
198
199 if (hwpoison_filter_flags(p))
200 return -EINVAL;
201
202 if (hwpoison_filter_task(p))
203 return -EINVAL;
204
205 return 0;
206}
207#else
208int hwpoison_filter(struct page *p)
209{
210 return 0;
211}
212#endif
213
214EXPORT_SYMBOL_GPL(hwpoison_filter);
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238struct to_kill {
239 struct list_head nd;
240 struct task_struct *tsk;
241 unsigned long addr;
242 short size_shift;
243};
244
245
246
247
248
249
250static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
251{
252 struct task_struct *t = tk->tsk;
253 short addr_lsb = tk->size_shift;
254 int ret = 0;
255
256 pr_err("Memory failure: %#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
257 pfn, t->comm, t->pid);
258
259 if (flags & MF_ACTION_REQUIRED) {
260 if (t == current)
261 ret = force_sig_mceerr(BUS_MCEERR_AR,
262 (void __user *)tk->addr, addr_lsb);
263 else
264
265 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
266 addr_lsb, t);
267 } else {
268
269
270
271
272
273
274 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
275 addr_lsb, t);
276 }
277 if (ret < 0)
278 pr_info("Memory failure: Error sending signal to %s:%d: %d\n",
279 t->comm, t->pid, ret);
280 return ret;
281}
282
283
284
285
286
287void shake_page(struct page *p, int access)
288{
289 if (PageHuge(p))
290 return;
291
292 if (!PageSlab(p)) {
293 lru_add_drain_all();
294 if (PageLRU(p) || is_free_buddy_page(p))
295 return;
296 }
297
298
299
300
301
302 if (access)
303 drop_slab_node(page_to_nid(p));
304}
305EXPORT_SYMBOL_GPL(shake_page);
306
307static unsigned long dev_pagemap_mapping_shift(struct page *page,
308 struct vm_area_struct *vma)
309{
310 unsigned long address = vma_address(page, vma);
311 pgd_t *pgd;
312 p4d_t *p4d;
313 pud_t *pud;
314 pmd_t *pmd;
315 pte_t *pte;
316
317 pgd = pgd_offset(vma->vm_mm, address);
318 if (!pgd_present(*pgd))
319 return 0;
320 p4d = p4d_offset(pgd, address);
321 if (!p4d_present(*p4d))
322 return 0;
323 pud = pud_offset(p4d, address);
324 if (!pud_present(*pud))
325 return 0;
326 if (pud_devmap(*pud))
327 return PUD_SHIFT;
328 pmd = pmd_offset(pud, address);
329 if (!pmd_present(*pmd))
330 return 0;
331 if (pmd_devmap(*pmd))
332 return PMD_SHIFT;
333 pte = pte_offset_map(pmd, address);
334 if (!pte_present(*pte))
335 return 0;
336 if (pte_devmap(*pte))
337 return PAGE_SHIFT;
338 return 0;
339}
340
341
342
343
344
345
346
347
348
349
350static void add_to_kill(struct task_struct *tsk, struct page *p,
351 struct vm_area_struct *vma,
352 struct list_head *to_kill)
353{
354 struct to_kill *tk;
355
356 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
357 if (!tk) {
358 pr_err("Memory failure: Out of memory while machine check handling\n");
359 return;
360 }
361
362 tk->addr = page_address_in_vma(p, vma);
363 if (is_zone_device_page(p))
364 tk->size_shift = dev_pagemap_mapping_shift(p, vma);
365 else
366 tk->size_shift = page_shift(compound_head(p));
367
368
369
370
371
372
373
374
375
376
377
378 if (tk->addr == -EFAULT) {
379 pr_info("Memory failure: Unable to find user space address %lx in %s\n",
380 page_to_pfn(p), tsk->comm);
381 } else if (tk->size_shift == 0) {
382 kfree(tk);
383 return;
384 }
385
386 get_task_struct(tsk);
387 tk->tsk = tsk;
388 list_add_tail(&tk->nd, to_kill);
389}
390
391
392
393
394
395
396
397
398
399static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
400 unsigned long pfn, int flags)
401{
402 struct to_kill *tk, *next;
403
404 list_for_each_entry_safe (tk, next, to_kill, nd) {
405 if (forcekill) {
406
407
408
409
410
411 if (fail || tk->addr == -EFAULT) {
412 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
413 pfn, tk->tsk->comm, tk->tsk->pid);
414 do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
415 tk->tsk, PIDTYPE_PID);
416 }
417
418
419
420
421
422
423
424 else if (kill_proc(tk, pfn, flags) < 0)
425 pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n",
426 pfn, tk->tsk->comm, tk->tsk->pid);
427 }
428 put_task_struct(tk->tsk);
429 kfree(tk);
430 }
431}
432
433
434
435
436
437
438
439
440
441static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
442{
443 struct task_struct *t;
444
445 for_each_thread(tsk, t) {
446 if (t->flags & PF_MCE_PROCESS) {
447 if (t->flags & PF_MCE_EARLY)
448 return t;
449 } else {
450 if (sysctl_memory_failure_early_kill)
451 return t;
452 }
453 }
454 return NULL;
455}
456
457
458
459
460
461
462
463
464
465
466
467
468
469static struct task_struct *task_early_kill(struct task_struct *tsk,
470 int force_early)
471{
472 if (!tsk->mm)
473 return NULL;
474
475
476
477
478 if (force_early && tsk->mm == current->mm)
479 return current;
480
481 return find_early_kill_thread(tsk);
482}
483
484
485
486
487static void collect_procs_anon(struct page *page, struct list_head *to_kill,
488 int force_early)
489{
490 struct vm_area_struct *vma;
491 struct task_struct *tsk;
492 struct anon_vma *av;
493 pgoff_t pgoff;
494
495 av = page_lock_anon_vma_read(page);
496 if (av == NULL)
497 return;
498
499 pgoff = page_to_pgoff(page);
500 read_lock(&tasklist_lock);
501 for_each_process (tsk) {
502 struct anon_vma_chain *vmac;
503 struct task_struct *t = task_early_kill(tsk, force_early);
504
505 if (!t)
506 continue;
507 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
508 pgoff, pgoff) {
509 vma = vmac->vma;
510 if (!page_mapped_in_vma(page, vma))
511 continue;
512 if (vma->vm_mm == t->mm)
513 add_to_kill(t, page, vma, to_kill);
514 }
515 }
516 read_unlock(&tasklist_lock);
517 page_unlock_anon_vma_read(av);
518}
519
520
521
522
523static void collect_procs_file(struct page *page, struct list_head *to_kill,
524 int force_early)
525{
526 struct vm_area_struct *vma;
527 struct task_struct *tsk;
528 struct address_space *mapping = page->mapping;
529 pgoff_t pgoff;
530
531 i_mmap_lock_read(mapping);
532 read_lock(&tasklist_lock);
533 pgoff = page_to_pgoff(page);
534 for_each_process(tsk) {
535 struct task_struct *t = task_early_kill(tsk, force_early);
536
537 if (!t)
538 continue;
539 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
540 pgoff) {
541
542
543
544
545
546
547
548 if (vma->vm_mm == t->mm)
549 add_to_kill(t, page, vma, to_kill);
550 }
551 }
552 read_unlock(&tasklist_lock);
553 i_mmap_unlock_read(mapping);
554}
555
556
557
558
559static void collect_procs(struct page *page, struct list_head *tokill,
560 int force_early)
561{
562 if (!page->mapping)
563 return;
564
565 if (PageAnon(page))
566 collect_procs_anon(page, tokill, force_early);
567 else
568 collect_procs_file(page, tokill, force_early);
569}
570
571struct hwp_walk {
572 struct to_kill tk;
573 unsigned long pfn;
574 int flags;
575};
576
577static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift)
578{
579 tk->addr = addr;
580 tk->size_shift = shift;
581}
582
583static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
584 unsigned long poisoned_pfn, struct to_kill *tk)
585{
586 unsigned long pfn = 0;
587
588 if (pte_present(pte)) {
589 pfn = pte_pfn(pte);
590 } else {
591 swp_entry_t swp = pte_to_swp_entry(pte);
592
593 if (is_hwpoison_entry(swp))
594 pfn = hwpoison_entry_to_pfn(swp);
595 }
596
597 if (!pfn || pfn != poisoned_pfn)
598 return 0;
599
600 set_to_kill(tk, addr, shift);
601 return 1;
602}
603
604#ifdef CONFIG_TRANSPARENT_HUGEPAGE
605static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
606 struct hwp_walk *hwp)
607{
608 pmd_t pmd = *pmdp;
609 unsigned long pfn;
610 unsigned long hwpoison_vaddr;
611
612 if (!pmd_present(pmd))
613 return 0;
614 pfn = pmd_pfn(pmd);
615 if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) {
616 hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT);
617 set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT);
618 return 1;
619 }
620 return 0;
621}
622#else
623static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
624 struct hwp_walk *hwp)
625{
626 return 0;
627}
628#endif
629
630static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
631 unsigned long end, struct mm_walk *walk)
632{
633 struct hwp_walk *hwp = (struct hwp_walk *)walk->private;
634 int ret = 0;
635 pte_t *ptep;
636 spinlock_t *ptl;
637
638 ptl = pmd_trans_huge_lock(pmdp, walk->vma);
639 if (ptl) {
640 ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp);
641 spin_unlock(ptl);
642 goto out;
643 }
644
645 if (pmd_trans_unstable(pmdp))
646 goto out;
647
648 ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp, addr, &ptl);
649 for (; addr != end; ptep++, addr += PAGE_SIZE) {
650 ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT,
651 hwp->pfn, &hwp->tk);
652 if (ret == 1)
653 break;
654 }
655 pte_unmap_unlock(ptep - 1, ptl);
656out:
657 cond_resched();
658 return ret;
659}
660
661#ifdef CONFIG_HUGETLB_PAGE
662static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
663 unsigned long addr, unsigned long end,
664 struct mm_walk *walk)
665{
666 struct hwp_walk *hwp = (struct hwp_walk *)walk->private;
667 pte_t pte = huge_ptep_get(ptep);
668 struct hstate *h = hstate_vma(walk->vma);
669
670 return check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
671 hwp->pfn, &hwp->tk);
672}
673#else
674#define hwpoison_hugetlb_range NULL
675#endif
676
677static struct mm_walk_ops hwp_walk_ops = {
678 .pmd_entry = hwpoison_pte_range,
679 .hugetlb_entry = hwpoison_hugetlb_range,
680};
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
696 int flags)
697{
698 int ret;
699 struct hwp_walk priv = {
700 .pfn = pfn,
701 };
702 priv.tk.tsk = p;
703
704 mmap_read_lock(p->mm);
705 ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops,
706 (void *)&priv);
707 if (ret == 1 && priv.tk.addr)
708 kill_proc(&priv.tk, pfn, flags);
709 mmap_read_unlock(p->mm);
710 return ret ? -EFAULT : -EHWPOISON;
711}
712
713static const char *action_name[] = {
714 [MF_IGNORED] = "Ignored",
715 [MF_FAILED] = "Failed",
716 [MF_DELAYED] = "Delayed",
717 [MF_RECOVERED] = "Recovered",
718};
719
720static const char * const action_page_types[] = {
721 [MF_MSG_KERNEL] = "reserved kernel page",
722 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
723 [MF_MSG_SLAB] = "kernel slab page",
724 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
725 [MF_MSG_POISONED_HUGE] = "huge page already hardware poisoned",
726 [MF_MSG_HUGE] = "huge page",
727 [MF_MSG_FREE_HUGE] = "free huge page",
728 [MF_MSG_NON_PMD_HUGE] = "non-pmd-sized huge page",
729 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
730 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
731 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
732 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
733 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
734 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
735 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
736 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
737 [MF_MSG_CLEAN_LRU] = "clean LRU page",
738 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
739 [MF_MSG_BUDDY] = "free buddy page",
740 [MF_MSG_BUDDY_2ND] = "free buddy page (2nd try)",
741 [MF_MSG_DAX] = "dax page",
742 [MF_MSG_UNSPLIT_THP] = "unsplit thp",
743 [MF_MSG_UNKNOWN] = "unknown page",
744};
745
746
747
748
749
750
751
752static int delete_from_lru_cache(struct page *p)
753{
754 if (!isolate_lru_page(p)) {
755
756
757
758
759 ClearPageActive(p);
760 ClearPageUnevictable(p);
761
762
763
764
765
766 mem_cgroup_uncharge(p);
767
768
769
770
771 put_page(p);
772 return 0;
773 }
774 return -EIO;
775}
776
777static int truncate_error_page(struct page *p, unsigned long pfn,
778 struct address_space *mapping)
779{
780 int ret = MF_FAILED;
781
782 if (mapping->a_ops->error_remove_page) {
783 int err = mapping->a_ops->error_remove_page(mapping, p);
784
785 if (err != 0) {
786 pr_info("Memory failure: %#lx: Failed to punch page: %d\n",
787 pfn, err);
788 } else if (page_has_private(p) &&
789 !try_to_release_page(p, GFP_NOIO)) {
790 pr_info("Memory failure: %#lx: failed to release buffers\n",
791 pfn);
792 } else {
793 ret = MF_RECOVERED;
794 }
795 } else {
796
797
798
799
800 if (invalidate_inode_page(p))
801 ret = MF_RECOVERED;
802 else
803 pr_info("Memory failure: %#lx: Failed to invalidate\n",
804 pfn);
805 }
806
807 return ret;
808}
809
810
811
812
813
814
815static int me_kernel(struct page *p, unsigned long pfn)
816{
817 unlock_page(p);
818 return MF_IGNORED;
819}
820
821
822
823
824static int me_unknown(struct page *p, unsigned long pfn)
825{
826 pr_err("Memory failure: %#lx: Unknown page state\n", pfn);
827 unlock_page(p);
828 return MF_FAILED;
829}
830
831
832
833
834static int me_pagecache_clean(struct page *p, unsigned long pfn)
835{
836 int ret;
837 struct address_space *mapping;
838
839 delete_from_lru_cache(p);
840
841
842
843
844
845 if (PageAnon(p)) {
846 ret = MF_RECOVERED;
847 goto out;
848 }
849
850
851
852
853
854
855
856
857 mapping = page_mapping(p);
858 if (!mapping) {
859
860
861
862 ret = MF_FAILED;
863 goto out;
864 }
865
866
867
868
869
870
871 ret = truncate_error_page(p, pfn, mapping);
872out:
873 unlock_page(p);
874 return ret;
875}
876
877
878
879
880
881
882static int me_pagecache_dirty(struct page *p, unsigned long pfn)
883{
884 struct address_space *mapping = page_mapping(p);
885
886 SetPageError(p);
887
888 if (mapping) {
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923 mapping_set_error(mapping, -EIO);
924 }
925
926 return me_pagecache_clean(p, pfn);
927}
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948static int me_swapcache_dirty(struct page *p, unsigned long pfn)
949{
950 int ret;
951
952 ClearPageDirty(p);
953
954 ClearPageUptodate(p);
955
956 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED;
957 unlock_page(p);
958 return ret;
959}
960
961static int me_swapcache_clean(struct page *p, unsigned long pfn)
962{
963 int ret;
964
965 delete_from_swap_cache(p);
966
967 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
968 unlock_page(p);
969 return ret;
970}
971
972
973
974
975
976
977
978static int me_huge_page(struct page *p, unsigned long pfn)
979{
980 int res;
981 struct page *hpage = compound_head(p);
982 struct address_space *mapping;
983
984 if (!PageHuge(hpage))
985 return MF_DELAYED;
986
987 mapping = page_mapping(hpage);
988 if (mapping) {
989 res = truncate_error_page(hpage, pfn, mapping);
990 unlock_page(hpage);
991 } else {
992 res = MF_FAILED;
993 unlock_page(hpage);
994
995
996
997
998
999 if (PageAnon(hpage))
1000 put_page(hpage);
1001 if (__page_handle_poison(p)) {
1002 page_ref_inc(p);
1003 res = MF_RECOVERED;
1004 }
1005 }
1006
1007 return res;
1008}
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023#define dirty (1UL << PG_dirty)
1024#define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
1025#define unevict (1UL << PG_unevictable)
1026#define mlock (1UL << PG_mlocked)
1027#define lru (1UL << PG_lru)
1028#define head (1UL << PG_head)
1029#define slab (1UL << PG_slab)
1030#define reserved (1UL << PG_reserved)
1031
1032static struct page_state {
1033 unsigned long mask;
1034 unsigned long res;
1035 enum mf_action_page_type type;
1036
1037
1038 int (*action)(struct page *p, unsigned long pfn);
1039} error_states[] = {
1040 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051 { slab, slab, MF_MSG_SLAB, me_kernel },
1052
1053 { head, head, MF_MSG_HUGE, me_huge_page },
1054
1055 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
1056 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
1057
1058 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
1059 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
1060
1061 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
1062 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
1063
1064 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
1065 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
1066
1067
1068
1069
1070 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
1071};
1072
1073#undef dirty
1074#undef sc
1075#undef unevict
1076#undef mlock
1077#undef lru
1078#undef head
1079#undef slab
1080#undef reserved
1081
1082
1083
1084
1085
1086static void action_result(unsigned long pfn, enum mf_action_page_type type,
1087 enum mf_result result)
1088{
1089 trace_memory_failure_event(pfn, type, result);
1090
1091 pr_err("Memory failure: %#lx: recovery action for %s: %s\n",
1092 pfn, action_page_types[type], action_name[result]);
1093}
1094
1095static int page_action(struct page_state *ps, struct page *p,
1096 unsigned long pfn)
1097{
1098 int result;
1099 int count;
1100
1101
1102 result = ps->action(p, pfn);
1103
1104 count = page_count(p) - 1;
1105 if (ps->action == me_swapcache_dirty && result == MF_DELAYED)
1106 count--;
1107 if (count > 0) {
1108 pr_err("Memory failure: %#lx: %s still referenced by %d users\n",
1109 pfn, action_page_types[ps->type], count);
1110 result = MF_FAILED;
1111 }
1112 action_result(pfn, ps->type, result);
1113
1114
1115
1116
1117
1118
1119 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
1120}
1121
1122
1123
1124
1125
1126
1127
1128static inline bool HWPoisonHandlable(struct page *page)
1129{
1130 return PageLRU(page) || __PageMovable(page);
1131}
1132
1133static int __get_hwpoison_page(struct page *page)
1134{
1135 struct page *head = compound_head(page);
1136 int ret = 0;
1137 bool hugetlb = false;
1138
1139 ret = get_hwpoison_huge_page(head, &hugetlb);
1140 if (hugetlb)
1141 return ret;
1142
1143
1144
1145
1146
1147
1148 if (!HWPoisonHandlable(head))
1149 return -EBUSY;
1150
1151 if (PageTransHuge(head)) {
1152
1153
1154
1155
1156
1157
1158 if (!PageAnon(head)) {
1159 pr_err("Memory failure: %#lx: non anonymous thp\n",
1160 page_to_pfn(page));
1161 return 0;
1162 }
1163 }
1164
1165 if (get_page_unless_zero(head)) {
1166 if (head == compound_head(page))
1167 return 1;
1168
1169 pr_info("Memory failure: %#lx cannot catch tail\n",
1170 page_to_pfn(page));
1171 put_page(head);
1172 }
1173
1174 return 0;
1175}
1176
1177static int get_any_page(struct page *p, unsigned long flags)
1178{
1179 int ret = 0, pass = 0;
1180 bool count_increased = false;
1181
1182 if (flags & MF_COUNT_INCREASED)
1183 count_increased = true;
1184
1185try_again:
1186 if (!count_increased) {
1187 ret = __get_hwpoison_page(p);
1188 if (!ret) {
1189 if (page_count(p)) {
1190
1191 if (pass++ < 3)
1192 goto try_again;
1193 ret = -EBUSY;
1194 } else if (!PageHuge(p) && !is_free_buddy_page(p)) {
1195
1196 if (pass++ < 3)
1197 goto try_again;
1198 ret = -EIO;
1199 }
1200 goto out;
1201 } else if (ret == -EBUSY) {
1202
1203
1204
1205
1206 if (pass++ < 3) {
1207 shake_page(p, 1);
1208 goto try_again;
1209 }
1210 ret = -EIO;
1211 goto out;
1212 }
1213 }
1214
1215 if (PageHuge(p) || HWPoisonHandlable(p)) {
1216 ret = 1;
1217 } else {
1218
1219
1220
1221
1222 if (pass++ < 3) {
1223 put_page(p);
1224 shake_page(p, 1);
1225 count_increased = false;
1226 goto try_again;
1227 }
1228 put_page(p);
1229 ret = -EIO;
1230 }
1231out:
1232 return ret;
1233}
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257static int get_hwpoison_page(struct page *p, unsigned long flags)
1258{
1259 int ret;
1260
1261 zone_pcp_disable(page_zone(p));
1262 ret = get_any_page(p, flags);
1263 zone_pcp_enable(page_zone(p));
1264
1265 return ret;
1266}
1267
1268
1269
1270
1271
1272static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
1273 int flags, struct page **hpagep)
1274{
1275 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC;
1276 struct address_space *mapping;
1277 LIST_HEAD(tokill);
1278 bool unmap_success;
1279 int kill = 1, forcekill;
1280 struct page *hpage = *hpagep;
1281 bool mlocked = PageMlocked(hpage);
1282
1283
1284
1285
1286
1287 if (PageReserved(p) || PageSlab(p))
1288 return true;
1289 if (!(PageLRU(hpage) || PageHuge(p)))
1290 return true;
1291
1292
1293
1294
1295
1296 if (!page_mapped(hpage))
1297 return true;
1298
1299 if (PageKsm(p)) {
1300 pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
1301 return false;
1302 }
1303
1304 if (PageSwapCache(p)) {
1305 pr_err("Memory failure: %#lx: keeping poisoned page in swap cache\n",
1306 pfn);
1307 ttu |= TTU_IGNORE_HWPOISON;
1308 }
1309
1310
1311
1312
1313
1314
1315
1316 mapping = page_mapping(hpage);
1317 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
1318 mapping_can_writeback(mapping)) {
1319 if (page_mkclean(hpage)) {
1320 SetPageDirty(hpage);
1321 } else {
1322 kill = 0;
1323 ttu |= TTU_IGNORE_HWPOISON;
1324 pr_info("Memory failure: %#lx: corrupted page was clean: dropped without side effects\n",
1325 pfn);
1326 }
1327 }
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337 if (kill)
1338 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
1339
1340 if (!PageHuge(hpage)) {
1341 try_to_unmap(hpage, ttu);
1342 } else {
1343 if (!PageAnon(hpage)) {
1344
1345
1346
1347
1348
1349
1350
1351 mapping = hugetlb_page_mapping_lock_write(hpage);
1352 if (mapping) {
1353 try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
1354 i_mmap_unlock_write(mapping);
1355 } else
1356 pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
1357 } else {
1358 try_to_unmap(hpage, ttu);
1359 }
1360 }
1361
1362 unmap_success = !page_mapped(hpage);
1363 if (!unmap_success)
1364 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
1365 pfn, page_mapcount(hpage));
1366
1367
1368
1369
1370
1371 if (mlocked)
1372 shake_page(hpage, 0);
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
1385 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
1386
1387 return unmap_success;
1388}
1389
1390static int identify_page_state(unsigned long pfn, struct page *p,
1391 unsigned long page_flags)
1392{
1393 struct page_state *ps;
1394
1395
1396
1397
1398
1399
1400 for (ps = error_states;; ps++)
1401 if ((p->flags & ps->mask) == ps->res)
1402 break;
1403
1404 page_flags |= (p->flags & (1UL << PG_dirty));
1405
1406 if (!ps->mask)
1407 for (ps = error_states;; ps++)
1408 if ((page_flags & ps->mask) == ps->res)
1409 break;
1410 return page_action(ps, p, pfn);
1411}
1412
1413static int try_to_split_thp_page(struct page *page, const char *msg)
1414{
1415 lock_page(page);
1416 if (!PageAnon(page) || unlikely(split_huge_page(page))) {
1417 unsigned long pfn = page_to_pfn(page);
1418
1419 unlock_page(page);
1420 if (!PageAnon(page))
1421 pr_info("%s: %#lx: non anonymous thp\n", msg, pfn);
1422 else
1423 pr_info("%s: %#lx: thp split failed\n", msg, pfn);
1424 put_page(page);
1425 return -EBUSY;
1426 }
1427 unlock_page(page);
1428
1429 return 0;
1430}
1431
1432static int memory_failure_hugetlb(unsigned long pfn, int flags)
1433{
1434 struct page *p = pfn_to_page(pfn);
1435 struct page *head = compound_head(p);
1436 int res;
1437 unsigned long page_flags;
1438
1439 if (TestSetPageHWPoison(head)) {
1440 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1441 pfn);
1442 res = -EHWPOISON;
1443 if (flags & MF_ACTION_REQUIRED)
1444 res = kill_accessing_process(current, page_to_pfn(head), flags);
1445 return res;
1446 }
1447
1448 num_poisoned_pages_inc();
1449
1450 if (!(flags & MF_COUNT_INCREASED)) {
1451 res = get_hwpoison_page(p, flags);
1452 if (!res) {
1453
1454
1455
1456 lock_page(head);
1457 if (PageHWPoison(head)) {
1458 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
1459 || (p != head && TestSetPageHWPoison(head))) {
1460 num_poisoned_pages_dec();
1461 unlock_page(head);
1462 return 0;
1463 }
1464 }
1465 unlock_page(head);
1466 res = MF_FAILED;
1467 if (__page_handle_poison(p)) {
1468 page_ref_inc(p);
1469 res = MF_RECOVERED;
1470 }
1471 action_result(pfn, MF_MSG_FREE_HUGE, res);
1472 return res == MF_RECOVERED ? 0 : -EBUSY;
1473 } else if (res < 0) {
1474 action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
1475 return -EBUSY;
1476 }
1477 }
1478
1479 lock_page(head);
1480 page_flags = head->flags;
1481
1482 if (!PageHWPoison(head)) {
1483 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
1484 num_poisoned_pages_dec();
1485 unlock_page(head);
1486 put_page(head);
1487 return 0;
1488 }
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499 if (huge_page_size(page_hstate(head)) > PMD_SIZE) {
1500 action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED);
1501 res = -EBUSY;
1502 goto out;
1503 }
1504
1505 if (!hwpoison_user_mappings(p, pfn, flags, &head)) {
1506 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1507 res = -EBUSY;
1508 goto out;
1509 }
1510
1511 return identify_page_state(pfn, p, page_flags);
1512out:
1513 unlock_page(head);
1514 return res;
1515}
1516
1517static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
1518 struct dev_pagemap *pgmap)
1519{
1520 struct page *page = pfn_to_page(pfn);
1521 const bool unmap_success = true;
1522 unsigned long size = 0;
1523 struct to_kill *tk;
1524 LIST_HEAD(tokill);
1525 int rc = -EBUSY;
1526 loff_t start;
1527 dax_entry_t cookie;
1528
1529 if (flags & MF_COUNT_INCREASED)
1530
1531
1532
1533 put_page(page);
1534
1535
1536 if (!pgmap_pfn_valid(pgmap, pfn)) {
1537 rc = -ENXIO;
1538 goto out;
1539 }
1540
1541
1542
1543
1544
1545
1546
1547
1548 cookie = dax_lock_page(page);
1549 if (!cookie)
1550 goto out;
1551
1552 if (hwpoison_filter(page)) {
1553 rc = 0;
1554 goto unlock;
1555 }
1556
1557 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
1558
1559
1560
1561
1562 goto unlock;
1563 }
1564
1565
1566
1567
1568
1569 SetPageHWPoison(page);
1570
1571
1572
1573
1574
1575
1576
1577 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1578 collect_procs(page, &tokill, flags & MF_ACTION_REQUIRED);
1579
1580 list_for_each_entry(tk, &tokill, nd)
1581 if (tk->size_shift)
1582 size = max(size, 1UL << tk->size_shift);
1583 if (size) {
1584
1585
1586
1587
1588
1589
1590 start = (page->index << PAGE_SHIFT) & ~(size - 1);
1591 unmap_mapping_range(page->mapping, start, size, 0);
1592 }
1593 kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
1594 rc = 0;
1595unlock:
1596 dax_unlock_page(page, cookie);
1597out:
1598
1599 put_dev_pagemap(pgmap);
1600 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
1601 return rc;
1602}
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621int memory_failure(unsigned long pfn, int flags)
1622{
1623 struct page *p;
1624 struct page *hpage;
1625 struct page *orig_head;
1626 struct dev_pagemap *pgmap;
1627 int res = 0;
1628 unsigned long page_flags;
1629 bool retry = true;
1630 static DEFINE_MUTEX(mf_mutex);
1631
1632 if (!sysctl_memory_failure_recovery)
1633 panic("Memory failure on page %lx", pfn);
1634
1635 p = pfn_to_online_page(pfn);
1636 if (!p) {
1637 if (pfn_valid(pfn)) {
1638 pgmap = get_dev_pagemap(pfn, NULL);
1639 if (pgmap)
1640 return memory_failure_dev_pagemap(pfn, flags,
1641 pgmap);
1642 }
1643 pr_err("Memory failure: %#lx: memory outside kernel control\n",
1644 pfn);
1645 return -ENXIO;
1646 }
1647
1648 mutex_lock(&mf_mutex);
1649
1650try_again:
1651 if (PageHuge(p)) {
1652 res = memory_failure_hugetlb(pfn, flags);
1653 goto unlock_mutex;
1654 }
1655
1656 if (TestSetPageHWPoison(p)) {
1657 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1658 pfn);
1659 res = -EHWPOISON;
1660 if (flags & MF_ACTION_REQUIRED)
1661 res = kill_accessing_process(current, pfn, flags);
1662 goto unlock_mutex;
1663 }
1664
1665 orig_head = hpage = compound_head(p);
1666 num_poisoned_pages_inc();
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679 if (!(flags & MF_COUNT_INCREASED)) {
1680 res = get_hwpoison_page(p, flags);
1681 if (!res) {
1682 if (is_free_buddy_page(p)) {
1683 if (take_page_off_buddy(p)) {
1684 page_ref_inc(p);
1685 res = MF_RECOVERED;
1686 } else {
1687
1688 if (retry) {
1689 ClearPageHWPoison(p);
1690 num_poisoned_pages_dec();
1691 retry = false;
1692 goto try_again;
1693 }
1694 res = MF_FAILED;
1695 }
1696 action_result(pfn, MF_MSG_BUDDY, res);
1697 res = res == MF_RECOVERED ? 0 : -EBUSY;
1698 } else {
1699 action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
1700 res = -EBUSY;
1701 }
1702 goto unlock_mutex;
1703 } else if (res < 0) {
1704 action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
1705 res = -EBUSY;
1706 goto unlock_mutex;
1707 }
1708 }
1709
1710 if (PageTransHuge(hpage)) {
1711 if (try_to_split_thp_page(p, "Memory Failure") < 0) {
1712 action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
1713 res = -EBUSY;
1714 goto unlock_mutex;
1715 }
1716 VM_BUG_ON_PAGE(!page_count(p), p);
1717 }
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727 shake_page(p, 0);
1728
1729 lock_page(p);
1730
1731
1732
1733
1734
1735 if (PageCompound(p) && compound_head(p) != orig_head) {
1736 action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
1737 res = -EBUSY;
1738 goto unlock_page;
1739 }
1740
1741
1742
1743
1744
1745
1746
1747
1748 page_flags = p->flags;
1749
1750
1751
1752
1753 if (!PageHWPoison(p)) {
1754 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
1755 num_poisoned_pages_dec();
1756 unlock_page(p);
1757 put_page(p);
1758 goto unlock_mutex;
1759 }
1760 if (hwpoison_filter(p)) {
1761 if (TestClearPageHWPoison(p))
1762 num_poisoned_pages_dec();
1763 unlock_page(p);
1764 put_page(p);
1765 goto unlock_mutex;
1766 }
1767
1768
1769
1770
1771
1772
1773 if (!PageTransTail(p) && !PageLRU(p) && !PageWriteback(p))
1774 goto identify_page_state;
1775
1776
1777
1778
1779
1780 wait_on_page_writeback(p);
1781
1782
1783
1784
1785
1786 if (!hwpoison_user_mappings(p, pfn, flags, &p)) {
1787 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1788 res = -EBUSY;
1789 goto unlock_page;
1790 }
1791
1792
1793
1794
1795 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
1796 action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
1797 res = -EBUSY;
1798 goto unlock_page;
1799 }
1800
1801identify_page_state:
1802 res = identify_page_state(pfn, p, page_flags);
1803 mutex_unlock(&mf_mutex);
1804 return res;
1805unlock_page:
1806 unlock_page(p);
1807unlock_mutex:
1808 mutex_unlock(&mf_mutex);
1809 return res;
1810}
1811EXPORT_SYMBOL_GPL(memory_failure);
1812
1813#define MEMORY_FAILURE_FIFO_ORDER 4
1814#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
1815
1816struct memory_failure_entry {
1817 unsigned long pfn;
1818 int flags;
1819};
1820
1821struct memory_failure_cpu {
1822 DECLARE_KFIFO(fifo, struct memory_failure_entry,
1823 MEMORY_FAILURE_FIFO_SIZE);
1824 spinlock_t lock;
1825 struct work_struct work;
1826};
1827
1828static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846void memory_failure_queue(unsigned long pfn, int flags)
1847{
1848 struct memory_failure_cpu *mf_cpu;
1849 unsigned long proc_flags;
1850 struct memory_failure_entry entry = {
1851 .pfn = pfn,
1852 .flags = flags,
1853 };
1854
1855 mf_cpu = &get_cpu_var(memory_failure_cpu);
1856 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1857 if (kfifo_put(&mf_cpu->fifo, entry))
1858 schedule_work_on(smp_processor_id(), &mf_cpu->work);
1859 else
1860 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
1861 pfn);
1862 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1863 put_cpu_var(memory_failure_cpu);
1864}
1865EXPORT_SYMBOL_GPL(memory_failure_queue);
1866
1867static void memory_failure_work_func(struct work_struct *work)
1868{
1869 struct memory_failure_cpu *mf_cpu;
1870 struct memory_failure_entry entry = { 0, };
1871 unsigned long proc_flags;
1872 int gotten;
1873
1874 mf_cpu = container_of(work, struct memory_failure_cpu, work);
1875 for (;;) {
1876 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1877 gotten = kfifo_get(&mf_cpu->fifo, &entry);
1878 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1879 if (!gotten)
1880 break;
1881 if (entry.flags & MF_SOFT_OFFLINE)
1882 soft_offline_page(entry.pfn, entry.flags);
1883 else
1884 memory_failure(entry.pfn, entry.flags);
1885 }
1886}
1887
1888
1889
1890
1891
1892void memory_failure_queue_kick(int cpu)
1893{
1894 struct memory_failure_cpu *mf_cpu;
1895
1896 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1897 cancel_work_sync(&mf_cpu->work);
1898 memory_failure_work_func(&mf_cpu->work);
1899}
1900
1901static int __init memory_failure_init(void)
1902{
1903 struct memory_failure_cpu *mf_cpu;
1904 int cpu;
1905
1906 for_each_possible_cpu(cpu) {
1907 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1908 spin_lock_init(&mf_cpu->lock);
1909 INIT_KFIFO(mf_cpu->fifo);
1910 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
1911 }
1912
1913 return 0;
1914}
1915core_initcall(memory_failure_init);
1916
1917#define unpoison_pr_info(fmt, pfn, rs) \
1918({ \
1919 if (__ratelimit(rs)) \
1920 pr_info(fmt, pfn); \
1921})
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935int unpoison_memory(unsigned long pfn)
1936{
1937 struct page *page;
1938 struct page *p;
1939 int freeit = 0;
1940 unsigned long flags = 0;
1941 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
1942 DEFAULT_RATELIMIT_BURST);
1943
1944 if (!pfn_valid(pfn))
1945 return -ENXIO;
1946
1947 p = pfn_to_page(pfn);
1948 page = compound_head(p);
1949
1950 if (!PageHWPoison(p)) {
1951 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
1952 pfn, &unpoison_rs);
1953 return 0;
1954 }
1955
1956 if (page_count(page) > 1) {
1957 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
1958 pfn, &unpoison_rs);
1959 return 0;
1960 }
1961
1962 if (page_mapped(page)) {
1963 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
1964 pfn, &unpoison_rs);
1965 return 0;
1966 }
1967
1968 if (page_mapping(page)) {
1969 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
1970 pfn, &unpoison_rs);
1971 return 0;
1972 }
1973
1974
1975
1976
1977
1978
1979 if (!PageHuge(page) && PageTransHuge(page)) {
1980 unpoison_pr_info("Unpoison: Memory failure is now running on %#lx\n",
1981 pfn, &unpoison_rs);
1982 return 0;
1983 }
1984
1985 if (!get_hwpoison_page(p, flags)) {
1986 if (TestClearPageHWPoison(p))
1987 num_poisoned_pages_dec();
1988 unpoison_pr_info("Unpoison: Software-unpoisoned free page %#lx\n",
1989 pfn, &unpoison_rs);
1990 return 0;
1991 }
1992
1993 lock_page(page);
1994
1995
1996
1997
1998
1999
2000 if (TestClearPageHWPoison(page)) {
2001 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
2002 pfn, &unpoison_rs);
2003 num_poisoned_pages_dec();
2004 freeit = 1;
2005 }
2006 unlock_page(page);
2007
2008 put_page(page);
2009 if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
2010 put_page(page);
2011
2012 return 0;
2013}
2014EXPORT_SYMBOL(unpoison_memory);
2015
2016static bool isolate_page(struct page *page, struct list_head *pagelist)
2017{
2018 bool isolated = false;
2019 bool lru = PageLRU(page);
2020
2021 if (PageHuge(page)) {
2022 isolated = isolate_huge_page(page, pagelist);
2023 } else {
2024 if (lru)
2025 isolated = !isolate_lru_page(page);
2026 else
2027 isolated = !isolate_movable_page(page, ISOLATE_UNEVICTABLE);
2028
2029 if (isolated)
2030 list_add(&page->lru, pagelist);
2031 }
2032
2033 if (isolated && lru)
2034 inc_node_page_state(page, NR_ISOLATED_ANON +
2035 page_is_file_lru(page));
2036
2037
2038
2039
2040
2041
2042
2043
2044 put_page(page);
2045 return isolated;
2046}
2047
2048
2049
2050
2051
2052
2053static int __soft_offline_page(struct page *page)
2054{
2055 int ret = 0;
2056 unsigned long pfn = page_to_pfn(page);
2057 struct page *hpage = compound_head(page);
2058 char const *msg_page[] = {"page", "hugepage"};
2059 bool huge = PageHuge(page);
2060 LIST_HEAD(pagelist);
2061 struct migration_target_control mtc = {
2062 .nid = NUMA_NO_NODE,
2063 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
2064 };
2065
2066
2067
2068
2069
2070
2071
2072 lock_page(page);
2073 if (!PageHuge(page))
2074 wait_on_page_writeback(page);
2075 if (PageHWPoison(page)) {
2076 unlock_page(page);
2077 put_page(page);
2078 pr_info("soft offline: %#lx page already poisoned\n", pfn);
2079 return 0;
2080 }
2081
2082 if (!PageHuge(page))
2083
2084
2085
2086
2087 ret = invalidate_inode_page(page);
2088 unlock_page(page);
2089
2090
2091
2092
2093
2094 if (ret) {
2095 pr_info("soft_offline: %#lx: invalidated\n", pfn);
2096 page_handle_poison(page, false, true);
2097 return 0;
2098 }
2099
2100 if (isolate_page(hpage, &pagelist)) {
2101 ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
2102 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE);
2103 if (!ret) {
2104 bool release = !huge;
2105
2106 if (!page_handle_poison(page, huge, release))
2107 ret = -EBUSY;
2108 } else {
2109 if (!list_empty(&pagelist))
2110 putback_movable_pages(&pagelist);
2111
2112 pr_info("soft offline: %#lx: %s migration failed %d, type %lx (%pGp)\n",
2113 pfn, msg_page[huge], ret, page->flags, &page->flags);
2114 if (ret > 0)
2115 ret = -EBUSY;
2116 }
2117 } else {
2118 pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %lx (%pGp)\n",
2119 pfn, msg_page[huge], page_count(page), page->flags, &page->flags);
2120 ret = -EBUSY;
2121 }
2122 return ret;
2123}
2124
2125static int soft_offline_in_use_page(struct page *page)
2126{
2127 struct page *hpage = compound_head(page);
2128
2129 if (!PageHuge(page) && PageTransHuge(hpage))
2130 if (try_to_split_thp_page(page, "soft offline") < 0)
2131 return -EBUSY;
2132 return __soft_offline_page(page);
2133}
2134
2135static int soft_offline_free_page(struct page *page)
2136{
2137 int rc = 0;
2138
2139 if (!page_handle_poison(page, true, false))
2140 rc = -EBUSY;
2141
2142 return rc;
2143}
2144
2145static void put_ref_page(struct page *page)
2146{
2147 if (page)
2148 put_page(page);
2149}
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173int soft_offline_page(unsigned long pfn, int flags)
2174{
2175 int ret;
2176 bool try_again = true;
2177 struct page *page, *ref_page = NULL;
2178
2179 WARN_ON_ONCE(!pfn_valid(pfn) && (flags & MF_COUNT_INCREASED));
2180
2181 if (!pfn_valid(pfn))
2182 return -ENXIO;
2183 if (flags & MF_COUNT_INCREASED)
2184 ref_page = pfn_to_page(pfn);
2185
2186
2187 page = pfn_to_online_page(pfn);
2188 if (!page) {
2189 put_ref_page(ref_page);
2190 return -EIO;
2191 }
2192
2193 if (PageHWPoison(page)) {
2194 pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
2195 put_ref_page(ref_page);
2196 return 0;
2197 }
2198
2199retry:
2200 get_online_mems();
2201 ret = get_hwpoison_page(page, flags);
2202 put_online_mems();
2203
2204 if (ret > 0) {
2205 ret = soft_offline_in_use_page(page);
2206 } else if (ret == 0) {
2207 if (soft_offline_free_page(page) && try_again) {
2208 try_again = false;
2209 goto retry;
2210 }
2211 } else if (ret == -EIO) {
2212 pr_info("%s: %#lx: unknown page type: %lx (%pGp)\n",
2213 __func__, pfn, page->flags, &page->flags);
2214 }
2215
2216 return ret;
2217}
2218