1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/page-flags.h>
42#include <linux/kernel-page-flags.h>
43#include <linux/sched.h>
44#include <linux/ksm.h>
45#include <linux/rmap.h>
46#include <linux/export.h>
47#include <linux/pagemap.h>
48#include <linux/swap.h>
49#include <linux/backing-dev.h>
50#include <linux/migrate.h>
51#include <linux/page-isolation.h>
52#include <linux/suspend.h>
53#include <linux/slab.h>
54#include <linux/swapops.h>
55#include <linux/hugetlb.h>
56#include <linux/memory_hotplug.h>
57#include <linux/mm_inline.h>
58#include <linux/kfifo.h>
59#include <linux/ratelimit.h>
60#include "internal.h"
61#include "ras/ras_event.h"
62
63int sysctl_memory_failure_early_kill __read_mostly = 0;
64
65int sysctl_memory_failure_recovery __read_mostly = 1;
66
67atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
68
69#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
70
71u32 hwpoison_filter_enable = 0;
72u32 hwpoison_filter_dev_major = ~0U;
73u32 hwpoison_filter_dev_minor = ~0U;
74u64 hwpoison_filter_flags_mask;
75u64 hwpoison_filter_flags_value;
76EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
77EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
78EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
79EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
80EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
81
82static int hwpoison_filter_dev(struct page *p)
83{
84 struct address_space *mapping;
85 dev_t dev;
86
87 if (hwpoison_filter_dev_major == ~0U &&
88 hwpoison_filter_dev_minor == ~0U)
89 return 0;
90
91
92
93
94 if (PageSlab(p))
95 return -EINVAL;
96
97 mapping = page_mapping(p);
98 if (mapping == NULL || mapping->host == NULL)
99 return -EINVAL;
100
101 dev = mapping->host->i_sb->s_dev;
102 if (hwpoison_filter_dev_major != ~0U &&
103 hwpoison_filter_dev_major != MAJOR(dev))
104 return -EINVAL;
105 if (hwpoison_filter_dev_minor != ~0U &&
106 hwpoison_filter_dev_minor != MINOR(dev))
107 return -EINVAL;
108
109 return 0;
110}
111
112static int hwpoison_filter_flags(struct page *p)
113{
114 if (!hwpoison_filter_flags_mask)
115 return 0;
116
117 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
118 hwpoison_filter_flags_value)
119 return 0;
120 else
121 return -EINVAL;
122}
123
124
125
126
127
128
129
130
131
132
133
134#ifdef CONFIG_MEMCG
135u64 hwpoison_filter_memcg;
136EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
137static int hwpoison_filter_task(struct page *p)
138{
139 if (!hwpoison_filter_memcg)
140 return 0;
141
142 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
143 return -EINVAL;
144
145 return 0;
146}
147#else
148static int hwpoison_filter_task(struct page *p) { return 0; }
149#endif
150
151int hwpoison_filter(struct page *p)
152{
153 if (!hwpoison_filter_enable)
154 return 0;
155
156 if (hwpoison_filter_dev(p))
157 return -EINVAL;
158
159 if (hwpoison_filter_flags(p))
160 return -EINVAL;
161
162 if (hwpoison_filter_task(p))
163 return -EINVAL;
164
165 return 0;
166}
167#else
168int hwpoison_filter(struct page *p)
169{
170 return 0;
171}
172#endif
173
174EXPORT_SYMBOL_GPL(hwpoison_filter);
175
176
177
178
179
180
181static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
182 unsigned long pfn, struct page *page, int flags)
183{
184 struct siginfo si;
185 int ret;
186
187 pr_err("MCE %#lx: Killing %s:%d due to hardware memory corruption\n",
188 pfn, t->comm, t->pid);
189 si.si_signo = SIGBUS;
190 si.si_errno = 0;
191 si.si_addr = (void *)addr;
192#ifdef __ARCH_SI_TRAPNO
193 si.si_trapno = trapno;
194#endif
195 si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
196
197 if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
198 si.si_code = BUS_MCEERR_AR;
199 ret = force_sig_info(SIGBUS, &si, current);
200 } else {
201
202
203
204
205
206
207 si.si_code = BUS_MCEERR_AO;
208 ret = send_sig_info(SIGBUS, &si, t);
209 }
210 if (ret < 0)
211 pr_info("MCE: Error sending signal to %s:%d: %d\n",
212 t->comm, t->pid, ret);
213 return ret;
214}
215
216
217
218
219
220void shake_page(struct page *p, int access)
221{
222 if (!PageSlab(p)) {
223 lru_add_drain_all();
224 if (PageLRU(p))
225 return;
226 drain_all_pages(page_zone(p));
227 if (PageLRU(p) || is_free_buddy_page(p))
228 return;
229 }
230
231
232
233
234
235 if (access)
236 drop_slab_node(page_to_nid(p));
237}
238EXPORT_SYMBOL_GPL(shake_page);
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262struct to_kill {
263 struct list_head nd;
264 struct task_struct *tsk;
265 unsigned long addr;
266 char addr_valid;
267};
268
269
270
271
272
273
274
275
276
277
278
279static void add_to_kill(struct task_struct *tsk, struct page *p,
280 struct vm_area_struct *vma,
281 struct list_head *to_kill,
282 struct to_kill **tkc)
283{
284 struct to_kill *tk;
285
286 if (*tkc) {
287 tk = *tkc;
288 *tkc = NULL;
289 } else {
290 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
291 if (!tk) {
292 pr_err("MCE: Out of memory while machine check handling\n");
293 return;
294 }
295 }
296 tk->addr = page_address_in_vma(p, vma);
297 tk->addr_valid = 1;
298
299
300
301
302
303
304
305 if (tk->addr == -EFAULT) {
306 pr_info("MCE: Unable to find user space address %lx in %s\n",
307 page_to_pfn(p), tsk->comm);
308 tk->addr_valid = 0;
309 }
310 get_task_struct(tsk);
311 tk->tsk = tsk;
312 list_add_tail(&tk->nd, to_kill);
313}
314
315
316
317
318
319
320
321
322
323static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
324 int fail, struct page *page, unsigned long pfn,
325 int flags)
326{
327 struct to_kill *tk, *next;
328
329 list_for_each_entry_safe (tk, next, to_kill, nd) {
330 if (forcekill) {
331
332
333
334
335
336 if (fail || tk->addr_valid == 0) {
337 pr_err("MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
338 pfn, tk->tsk->comm, tk->tsk->pid);
339 force_sig(SIGKILL, tk->tsk);
340 }
341
342
343
344
345
346
347
348 else if (kill_proc(tk->tsk, tk->addr, trapno,
349 pfn, page, flags) < 0)
350 pr_err("MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
351 pfn, tk->tsk->comm, tk->tsk->pid);
352 }
353 put_task_struct(tk->tsk);
354 kfree(tk);
355 }
356}
357
358
359
360
361
362
363
364
365
366static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
367{
368 struct task_struct *t;
369
370 for_each_thread(tsk, t)
371 if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY))
372 return t;
373 return NULL;
374}
375
376
377
378
379
380
381
382static struct task_struct *task_early_kill(struct task_struct *tsk,
383 int force_early)
384{
385 struct task_struct *t;
386 if (!tsk->mm)
387 return NULL;
388 if (force_early)
389 return tsk;
390 t = find_early_kill_thread(tsk);
391 if (t)
392 return t;
393 if (sysctl_memory_failure_early_kill)
394 return tsk;
395 return NULL;
396}
397
398
399
400
401static void collect_procs_anon(struct page *page, struct list_head *to_kill,
402 struct to_kill **tkc, int force_early)
403{
404 struct vm_area_struct *vma;
405 struct task_struct *tsk;
406 struct anon_vma *av;
407 pgoff_t pgoff;
408
409 av = page_lock_anon_vma_read(page);
410 if (av == NULL)
411 return;
412
413 pgoff = page_to_pgoff(page);
414 read_lock(&tasklist_lock);
415 for_each_process (tsk) {
416 struct anon_vma_chain *vmac;
417 struct task_struct *t = task_early_kill(tsk, force_early);
418
419 if (!t)
420 continue;
421 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
422 pgoff, pgoff) {
423 vma = vmac->vma;
424 if (!page_mapped_in_vma(page, vma))
425 continue;
426 if (vma->vm_mm == t->mm)
427 add_to_kill(t, page, vma, to_kill, tkc);
428 }
429 }
430 read_unlock(&tasklist_lock);
431 page_unlock_anon_vma_read(av);
432}
433
434
435
436
437static void collect_procs_file(struct page *page, struct list_head *to_kill,
438 struct to_kill **tkc, int force_early)
439{
440 struct vm_area_struct *vma;
441 struct task_struct *tsk;
442 struct address_space *mapping = page->mapping;
443
444 i_mmap_lock_read(mapping);
445 read_lock(&tasklist_lock);
446 for_each_process(tsk) {
447 pgoff_t pgoff = page_to_pgoff(page);
448 struct task_struct *t = task_early_kill(tsk, force_early);
449
450 if (!t)
451 continue;
452 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
453 pgoff) {
454
455
456
457
458
459
460
461 if (vma->vm_mm == t->mm)
462 add_to_kill(t, page, vma, to_kill, tkc);
463 }
464 }
465 read_unlock(&tasklist_lock);
466 i_mmap_unlock_read(mapping);
467}
468
469
470
471
472
473
474
475static void collect_procs(struct page *page, struct list_head *tokill,
476 int force_early)
477{
478 struct to_kill *tk;
479
480 if (!page->mapping)
481 return;
482
483 tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
484 if (!tk)
485 return;
486 if (PageAnon(page))
487 collect_procs_anon(page, tokill, &tk, force_early);
488 else
489 collect_procs_file(page, tokill, &tk, force_early);
490 kfree(tk);
491}
492
493static const char *action_name[] = {
494 [MF_IGNORED] = "Ignored",
495 [MF_FAILED] = "Failed",
496 [MF_DELAYED] = "Delayed",
497 [MF_RECOVERED] = "Recovered",
498};
499
500static const char * const action_page_types[] = {
501 [MF_MSG_KERNEL] = "reserved kernel page",
502 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
503 [MF_MSG_SLAB] = "kernel slab page",
504 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
505 [MF_MSG_POISONED_HUGE] = "huge page already hardware poisoned",
506 [MF_MSG_HUGE] = "huge page",
507 [MF_MSG_FREE_HUGE] = "free huge page",
508 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
509 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
510 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
511 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
512 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
513 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
514 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
515 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
516 [MF_MSG_CLEAN_LRU] = "clean LRU page",
517 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
518 [MF_MSG_BUDDY] = "free buddy page",
519 [MF_MSG_BUDDY_2ND] = "free buddy page (2nd try)",
520 [MF_MSG_UNKNOWN] = "unknown page",
521};
522
523
524
525
526
527
528
529static int delete_from_lru_cache(struct page *p)
530{
531 if (!isolate_lru_page(p)) {
532
533
534
535
536 ClearPageActive(p);
537 ClearPageUnevictable(p);
538
539
540
541 put_page(p);
542 return 0;
543 }
544 return -EIO;
545}
546
547
548
549
550
551
552static int me_kernel(struct page *p, unsigned long pfn)
553{
554 return MF_IGNORED;
555}
556
557
558
559
560static int me_unknown(struct page *p, unsigned long pfn)
561{
562 pr_err("MCE %#lx: Unknown page state\n", pfn);
563 return MF_FAILED;
564}
565
566
567
568
569static int me_pagecache_clean(struct page *p, unsigned long pfn)
570{
571 int err;
572 int ret = MF_FAILED;
573 struct address_space *mapping;
574
575 delete_from_lru_cache(p);
576
577
578
579
580
581 if (PageAnon(p))
582 return MF_RECOVERED;
583
584
585
586
587
588
589
590
591 mapping = page_mapping(p);
592 if (!mapping) {
593
594
595
596 return MF_FAILED;
597 }
598
599
600
601
602
603
604 if (mapping->a_ops->error_remove_page) {
605 err = mapping->a_ops->error_remove_page(mapping, p);
606 if (err != 0) {
607 pr_info("MCE %#lx: Failed to punch page: %d\n",
608 pfn, err);
609 } else if (page_has_private(p) &&
610 !try_to_release_page(p, GFP_NOIO)) {
611 pr_info("MCE %#lx: failed to release buffers\n", pfn);
612 } else {
613 ret = MF_RECOVERED;
614 }
615 } else {
616
617
618
619
620 if (invalidate_inode_page(p))
621 ret = MF_RECOVERED;
622 else
623 pr_info("MCE %#lx: Failed to invalidate\n", pfn);
624 }
625 return ret;
626}
627
628
629
630
631
632
633static int me_pagecache_dirty(struct page *p, unsigned long pfn)
634{
635 struct address_space *mapping = page_mapping(p);
636
637 SetPageError(p);
638
639 if (mapping) {
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674 mapping_set_error(mapping, EIO);
675 }
676
677 return me_pagecache_clean(p, pfn);
678}
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699static int me_swapcache_dirty(struct page *p, unsigned long pfn)
700{
701 ClearPageDirty(p);
702
703 ClearPageUptodate(p);
704
705 if (!delete_from_lru_cache(p))
706 return MF_DELAYED;
707 else
708 return MF_FAILED;
709}
710
711static int me_swapcache_clean(struct page *p, unsigned long pfn)
712{
713 delete_from_swap_cache(p);
714
715 if (!delete_from_lru_cache(p))
716 return MF_RECOVERED;
717 else
718 return MF_FAILED;
719}
720
721
722
723
724
725
726
727static int me_huge_page(struct page *p, unsigned long pfn)
728{
729 int res = 0;
730 struct page *hpage = compound_head(p);
731
732 if (!PageHuge(hpage))
733 return MF_DELAYED;
734
735
736
737
738
739
740
741
742
743
744
745 if (!(page_mapping(hpage) || PageAnon(hpage))) {
746 res = dequeue_hwpoisoned_huge_page(hpage);
747 if (!res)
748 return MF_RECOVERED;
749 }
750 return MF_DELAYED;
751}
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766#define dirty (1UL << PG_dirty)
767#define sc (1UL << PG_swapcache)
768#define unevict (1UL << PG_unevictable)
769#define mlock (1UL << PG_mlocked)
770#define writeback (1UL << PG_writeback)
771#define lru (1UL << PG_lru)
772#define swapbacked (1UL << PG_swapbacked)
773#define head (1UL << PG_head)
774#define slab (1UL << PG_slab)
775#define reserved (1UL << PG_reserved)
776
777static struct page_state {
778 unsigned long mask;
779 unsigned long res;
780 enum mf_action_page_type type;
781 int (*action)(struct page *p, unsigned long pfn);
782} error_states[] = {
783 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
784
785
786
787
788
789
790
791
792
793
794 { slab, slab, MF_MSG_SLAB, me_kernel },
795
796 { head, head, MF_MSG_HUGE, me_huge_page },
797
798 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
799 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
800
801 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
802 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
803
804 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
805 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
806
807 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
808 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
809
810
811
812
813 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
814};
815
816#undef dirty
817#undef sc
818#undef unevict
819#undef mlock
820#undef writeback
821#undef lru
822#undef swapbacked
823#undef head
824#undef slab
825#undef reserved
826
827
828
829
830
831static void action_result(unsigned long pfn, enum mf_action_page_type type,
832 enum mf_result result)
833{
834 trace_memory_failure_event(pfn, type, result);
835
836 pr_err("MCE %#lx: recovery action for %s: %s\n",
837 pfn, action_page_types[type], action_name[result]);
838}
839
840static int page_action(struct page_state *ps, struct page *p,
841 unsigned long pfn)
842{
843 int result;
844 int count;
845
846 result = ps->action(p, pfn);
847
848 count = page_count(p) - 1;
849 if (ps->action == me_swapcache_dirty && result == MF_DELAYED)
850 count--;
851 if (count != 0) {
852 pr_err("MCE %#lx: %s still referenced by %d users\n",
853 pfn, action_page_types[ps->type], count);
854 result = MF_FAILED;
855 }
856 action_result(pfn, ps->type, result);
857
858
859
860
861
862
863 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
864}
865
866
867
868
869
870
871
872
873int get_hwpoison_page(struct page *page)
874{
875 struct page *head = compound_head(page);
876
877 if (!PageHuge(head) && PageTransHuge(head)) {
878
879
880
881
882
883
884 if (!PageAnon(head)) {
885 pr_err("MCE: %#lx: non anonymous thp\n",
886 page_to_pfn(page));
887 return 0;
888 }
889 }
890
891 if (get_page_unless_zero(head)) {
892 if (head == compound_head(page))
893 return 1;
894
895 pr_info("MCE: %#lx cannot catch tail\n", page_to_pfn(page));
896 put_page(head);
897 }
898
899 return 0;
900}
901EXPORT_SYMBOL_GPL(get_hwpoison_page);
902
903
904
905
906
907static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
908 int trapno, int flags, struct page **hpagep)
909{
910 enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
911 struct address_space *mapping;
912 LIST_HEAD(tokill);
913 int ret;
914 int kill = 1, forcekill;
915 struct page *hpage = *hpagep;
916
917
918
919
920
921 if (PageReserved(p) || PageSlab(p))
922 return SWAP_SUCCESS;
923 if (!(PageLRU(hpage) || PageHuge(p)))
924 return SWAP_SUCCESS;
925
926
927
928
929
930 if (!page_mapped(hpage))
931 return SWAP_SUCCESS;
932
933 if (PageKsm(p)) {
934 pr_err("MCE %#lx: can't handle KSM pages.\n", pfn);
935 return SWAP_FAIL;
936 }
937
938 if (PageSwapCache(p)) {
939 pr_err("MCE %#lx: keeping poisoned page in swap cache\n", pfn);
940 ttu |= TTU_IGNORE_HWPOISON;
941 }
942
943
944
945
946
947
948
949 mapping = page_mapping(hpage);
950 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
951 mapping_cap_writeback_dirty(mapping)) {
952 if (page_mkclean(hpage)) {
953 SetPageDirty(hpage);
954 } else {
955 kill = 0;
956 ttu |= TTU_IGNORE_HWPOISON;
957 pr_info("MCE %#lx: corrupted page was clean: dropped without side effects\n",
958 pfn);
959 }
960 }
961
962
963
964
965
966
967
968
969
970 if (kill)
971 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
972
973 ret = try_to_unmap(hpage, ttu);
974 if (ret != SWAP_SUCCESS)
975 pr_err("MCE %#lx: failed to unmap page (mapcount=%d)\n",
976 pfn, page_mapcount(hpage));
977
978
979
980
981
982
983
984
985
986
987
988 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
989 kill_procs(&tokill, forcekill, trapno,
990 ret != SWAP_SUCCESS, p, pfn, flags);
991
992 return ret;
993}
994
995static void set_page_hwpoison_huge_page(struct page *hpage)
996{
997 int i;
998 int nr_pages = 1 << compound_order(hpage);
999 for (i = 0; i < nr_pages; i++)
1000 SetPageHWPoison(hpage + i);
1001}
1002
1003static void clear_page_hwpoison_huge_page(struct page *hpage)
1004{
1005 int i;
1006 int nr_pages = 1 << compound_order(hpage);
1007 for (i = 0; i < nr_pages; i++)
1008 ClearPageHWPoison(hpage + i);
1009}
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029int memory_failure(unsigned long pfn, int trapno, int flags)
1030{
1031 struct page_state *ps;
1032 struct page *p;
1033 struct page *hpage;
1034 struct page *orig_head;
1035 int res;
1036 unsigned int nr_pages;
1037 unsigned long page_flags;
1038
1039 if (!sysctl_memory_failure_recovery)
1040 panic("Memory failure from trap %d on page %lx", trapno, pfn);
1041
1042 if (!pfn_valid(pfn)) {
1043 pr_err("MCE %#lx: memory outside kernel control\n", pfn);
1044 return -ENXIO;
1045 }
1046
1047 p = pfn_to_page(pfn);
1048 orig_head = hpage = compound_head(p);
1049 if (TestSetPageHWPoison(p)) {
1050 pr_err("MCE %#lx: already hardware poisoned\n", pfn);
1051 return 0;
1052 }
1053
1054
1055
1056
1057
1058
1059
1060
1061 if (PageHuge(p))
1062 nr_pages = 1 << compound_order(hpage);
1063 else
1064 nr_pages = 1;
1065 num_poisoned_pages_add(nr_pages);
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081 if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
1082 if (is_free_buddy_page(p)) {
1083 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
1084 return 0;
1085 } else if (PageHuge(hpage)) {
1086
1087
1088
1089 lock_page(hpage);
1090 if (PageHWPoison(hpage)) {
1091 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
1092 || (p != hpage && TestSetPageHWPoison(hpage))) {
1093 num_poisoned_pages_sub(nr_pages);
1094 unlock_page(hpage);
1095 return 0;
1096 }
1097 }
1098 set_page_hwpoison_huge_page(hpage);
1099 res = dequeue_hwpoisoned_huge_page(hpage);
1100 action_result(pfn, MF_MSG_FREE_HUGE,
1101 res ? MF_IGNORED : MF_DELAYED);
1102 unlock_page(hpage);
1103 return res;
1104 } else {
1105 action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
1106 return -EBUSY;
1107 }
1108 }
1109
1110 if (!PageHuge(p) && PageTransHuge(hpage)) {
1111 lock_page(hpage);
1112 if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
1113 unlock_page(hpage);
1114 if (!PageAnon(hpage))
1115 pr_err("MCE: %#lx: non anonymous thp\n", pfn);
1116 else
1117 pr_err("MCE: %#lx: thp split failed\n", pfn);
1118 if (TestClearPageHWPoison(p))
1119 num_poisoned_pages_sub(nr_pages);
1120 put_hwpoison_page(p);
1121 return -EBUSY;
1122 }
1123 unlock_page(hpage);
1124 get_hwpoison_page(p);
1125 put_hwpoison_page(hpage);
1126 VM_BUG_ON_PAGE(!page_count(p), p);
1127 hpage = compound_head(p);
1128 }
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138 if (!PageHuge(p)) {
1139 if (!PageLRU(p))
1140 shake_page(p, 0);
1141 if (!PageLRU(p)) {
1142
1143
1144
1145 if (is_free_buddy_page(p)) {
1146 if (flags & MF_COUNT_INCREASED)
1147 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
1148 else
1149 action_result(pfn, MF_MSG_BUDDY_2ND,
1150 MF_DELAYED);
1151 return 0;
1152 }
1153 }
1154 }
1155
1156 lock_page(hpage);
1157
1158
1159
1160
1161
1162 if (PageCompound(p) && compound_head(p) != orig_head) {
1163 action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
1164 res = -EBUSY;
1165 goto out;
1166 }
1167
1168
1169
1170
1171
1172
1173
1174
1175 page_flags = p->flags;
1176
1177
1178
1179
1180 if (!PageHWPoison(p)) {
1181 pr_err("MCE %#lx: just unpoisoned\n", pfn);
1182 num_poisoned_pages_sub(nr_pages);
1183 unlock_page(hpage);
1184 put_hwpoison_page(hpage);
1185 return 0;
1186 }
1187 if (hwpoison_filter(p)) {
1188 if (TestClearPageHWPoison(p))
1189 num_poisoned_pages_sub(nr_pages);
1190 unlock_page(hpage);
1191 put_hwpoison_page(hpage);
1192 return 0;
1193 }
1194
1195 if (!PageHuge(p) && !PageTransTail(p) && !PageLRU(p))
1196 goto identify_page_state;
1197
1198
1199
1200
1201
1202 if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) {
1203 action_result(pfn, MF_MSG_POISONED_HUGE, MF_IGNORED);
1204 unlock_page(hpage);
1205 put_hwpoison_page(hpage);
1206 return 0;
1207 }
1208
1209
1210
1211
1212
1213
1214 if (PageHuge(p))
1215 set_page_hwpoison_huge_page(hpage);
1216
1217
1218
1219
1220
1221 wait_on_page_writeback(p);
1222
1223
1224
1225
1226
1227
1228
1229
1230 if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
1231 != SWAP_SUCCESS) {
1232 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1233 res = -EBUSY;
1234 goto out;
1235 }
1236
1237
1238
1239
1240 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
1241 action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
1242 res = -EBUSY;
1243 goto out;
1244 }
1245
1246identify_page_state:
1247 res = -EBUSY;
1248
1249
1250
1251
1252
1253 for (ps = error_states;; ps++)
1254 if ((p->flags & ps->mask) == ps->res)
1255 break;
1256
1257 page_flags |= (p->flags & (1UL << PG_dirty));
1258
1259 if (!ps->mask)
1260 for (ps = error_states;; ps++)
1261 if ((page_flags & ps->mask) == ps->res)
1262 break;
1263 res = page_action(ps, p, pfn);
1264out:
1265 unlock_page(hpage);
1266 return res;
1267}
1268EXPORT_SYMBOL_GPL(memory_failure);
1269
1270#define MEMORY_FAILURE_FIFO_ORDER 4
1271#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
1272
1273struct memory_failure_entry {
1274 unsigned long pfn;
1275 int trapno;
1276 int flags;
1277};
1278
1279struct memory_failure_cpu {
1280 DECLARE_KFIFO(fifo, struct memory_failure_entry,
1281 MEMORY_FAILURE_FIFO_SIZE);
1282 spinlock_t lock;
1283 struct work_struct work;
1284};
1285
1286static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305void memory_failure_queue(unsigned long pfn, int trapno, int flags)
1306{
1307 struct memory_failure_cpu *mf_cpu;
1308 unsigned long proc_flags;
1309 struct memory_failure_entry entry = {
1310 .pfn = pfn,
1311 .trapno = trapno,
1312 .flags = flags,
1313 };
1314
1315 mf_cpu = &get_cpu_var(memory_failure_cpu);
1316 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1317 if (kfifo_put(&mf_cpu->fifo, entry))
1318 schedule_work_on(smp_processor_id(), &mf_cpu->work);
1319 else
1320 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
1321 pfn);
1322 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1323 put_cpu_var(memory_failure_cpu);
1324}
1325EXPORT_SYMBOL_GPL(memory_failure_queue);
1326
1327static void memory_failure_work_func(struct work_struct *work)
1328{
1329 struct memory_failure_cpu *mf_cpu;
1330 struct memory_failure_entry entry = { 0, };
1331 unsigned long proc_flags;
1332 int gotten;
1333
1334 mf_cpu = this_cpu_ptr(&memory_failure_cpu);
1335 for (;;) {
1336 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1337 gotten = kfifo_get(&mf_cpu->fifo, &entry);
1338 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1339 if (!gotten)
1340 break;
1341 if (entry.flags & MF_SOFT_OFFLINE)
1342 soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
1343 else
1344 memory_failure(entry.pfn, entry.trapno, entry.flags);
1345 }
1346}
1347
1348static int __init memory_failure_init(void)
1349{
1350 struct memory_failure_cpu *mf_cpu;
1351 int cpu;
1352
1353 for_each_possible_cpu(cpu) {
1354 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1355 spin_lock_init(&mf_cpu->lock);
1356 INIT_KFIFO(mf_cpu->fifo);
1357 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
1358 }
1359
1360 return 0;
1361}
1362core_initcall(memory_failure_init);
1363
1364#define unpoison_pr_info(fmt, pfn, rs) \
1365({ \
1366 if (__ratelimit(rs)) \
1367 pr_info(fmt, pfn); \
1368})
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382int unpoison_memory(unsigned long pfn)
1383{
1384 struct page *page;
1385 struct page *p;
1386 int freeit = 0;
1387 unsigned int nr_pages;
1388 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
1389 DEFAULT_RATELIMIT_BURST);
1390
1391 if (!pfn_valid(pfn))
1392 return -ENXIO;
1393
1394 p = pfn_to_page(pfn);
1395 page = compound_head(p);
1396
1397 if (!PageHWPoison(p)) {
1398 unpoison_pr_info("MCE: Page was already unpoisoned %#lx\n",
1399 pfn, &unpoison_rs);
1400 return 0;
1401 }
1402
1403 if (page_count(page) > 1) {
1404 unpoison_pr_info("MCE: Someone grabs the hwpoison page %#lx\n",
1405 pfn, &unpoison_rs);
1406 return 0;
1407 }
1408
1409 if (page_mapped(page)) {
1410 unpoison_pr_info("MCE: Someone maps the hwpoison page %#lx\n",
1411 pfn, &unpoison_rs);
1412 return 0;
1413 }
1414
1415 if (page_mapping(page)) {
1416 unpoison_pr_info("MCE: the hwpoison page has non-NULL mapping %#lx\n",
1417 pfn, &unpoison_rs);
1418 return 0;
1419 }
1420
1421
1422
1423
1424
1425
1426 if (!PageHuge(page) && PageTransHuge(page)) {
1427 unpoison_pr_info("MCE: Memory failure is now running on %#lx\n",
1428 pfn, &unpoison_rs);
1429 return 0;
1430 }
1431
1432 nr_pages = 1 << compound_order(page);
1433
1434 if (!get_hwpoison_page(p)) {
1435
1436
1437
1438
1439
1440
1441 if (PageHuge(page)) {
1442 unpoison_pr_info("MCE: Memory failure is now running on free hugepage %#lx\n",
1443 pfn, &unpoison_rs);
1444 return 0;
1445 }
1446 if (TestClearPageHWPoison(p))
1447 num_poisoned_pages_dec();
1448 unpoison_pr_info("MCE: Software-unpoisoned free page %#lx\n",
1449 pfn, &unpoison_rs);
1450 return 0;
1451 }
1452
1453 lock_page(page);
1454
1455
1456
1457
1458
1459
1460 if (TestClearPageHWPoison(page)) {
1461 unpoison_pr_info("MCE: Software-unpoisoned page %#lx\n",
1462 pfn, &unpoison_rs);
1463 num_poisoned_pages_sub(nr_pages);
1464 freeit = 1;
1465 if (PageHuge(page))
1466 clear_page_hwpoison_huge_page(page);
1467 }
1468 unlock_page(page);
1469
1470 put_hwpoison_page(page);
1471 if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
1472 put_hwpoison_page(page);
1473
1474 return 0;
1475}
1476EXPORT_SYMBOL(unpoison_memory);
1477
1478static struct page *new_page(struct page *p, unsigned long private, int **x)
1479{
1480 int nid = page_to_nid(p);
1481 if (PageHuge(p))
1482 return alloc_huge_page_node(page_hstate(compound_head(p)),
1483 nid);
1484 else
1485 return __alloc_pages_node(nid, GFP_HIGHUSER_MOVABLE, 0);
1486}
1487
1488
1489
1490
1491
1492
1493
1494static int __get_any_page(struct page *p, unsigned long pfn, int flags)
1495{
1496 int ret;
1497
1498 if (flags & MF_COUNT_INCREASED)
1499 return 1;
1500
1501
1502
1503
1504
1505 if (!get_hwpoison_page(p)) {
1506 if (PageHuge(p)) {
1507 pr_info("%s: %#lx free huge page\n", __func__, pfn);
1508 ret = 0;
1509 } else if (is_free_buddy_page(p)) {
1510 pr_info("%s: %#lx free buddy page\n", __func__, pfn);
1511 ret = 0;
1512 } else {
1513 pr_info("%s: %#lx: unknown zero refcount page type %lx\n",
1514 __func__, pfn, p->flags);
1515 ret = -EIO;
1516 }
1517 } else {
1518
1519 ret = 1;
1520 }
1521 return ret;
1522}
1523
1524static int get_any_page(struct page *page, unsigned long pfn, int flags)
1525{
1526 int ret = __get_any_page(page, pfn, flags);
1527
1528 if (ret == 1 && !PageHuge(page) && !PageLRU(page)) {
1529
1530
1531
1532 put_hwpoison_page(page);
1533 shake_page(page, 1);
1534
1535
1536
1537
1538 ret = __get_any_page(page, pfn, 0);
1539 if (ret == 1 && !PageLRU(page)) {
1540
1541 put_hwpoison_page(page);
1542 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
1543 pfn, page->flags);
1544 return -EIO;
1545 }
1546 }
1547 return ret;
1548}
1549
1550static int soft_offline_huge_page(struct page *page, int flags)
1551{
1552 int ret;
1553 unsigned long pfn = page_to_pfn(page);
1554 struct page *hpage = compound_head(page);
1555 LIST_HEAD(pagelist);
1556
1557
1558
1559
1560
1561 lock_page(hpage);
1562 if (PageHWPoison(hpage)) {
1563 unlock_page(hpage);
1564 put_hwpoison_page(hpage);
1565 pr_info("soft offline: %#lx hugepage already poisoned\n", pfn);
1566 return -EBUSY;
1567 }
1568 unlock_page(hpage);
1569
1570 ret = isolate_huge_page(hpage, &pagelist);
1571
1572
1573
1574
1575 put_hwpoison_page(hpage);
1576 if (!ret) {
1577 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
1578 return -EBUSY;
1579 }
1580
1581 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
1582 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1583 if (ret) {
1584 pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
1585 pfn, ret, page->flags);
1586
1587
1588
1589
1590
1591 putback_active_hugepage(hpage);
1592 if (ret > 0)
1593 ret = -EIO;
1594 } else {
1595
1596 if (PageHuge(page)) {
1597 set_page_hwpoison_huge_page(hpage);
1598 dequeue_hwpoisoned_huge_page(hpage);
1599 num_poisoned_pages_add(1 << compound_order(hpage));
1600 } else {
1601 SetPageHWPoison(page);
1602 num_poisoned_pages_inc();
1603 }
1604 }
1605 return ret;
1606}
1607
1608static int __soft_offline_page(struct page *page, int flags)
1609{
1610 int ret;
1611 unsigned long pfn = page_to_pfn(page);
1612
1613
1614
1615
1616
1617
1618
1619 lock_page(page);
1620 wait_on_page_writeback(page);
1621 if (PageHWPoison(page)) {
1622 unlock_page(page);
1623 put_hwpoison_page(page);
1624 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1625 return -EBUSY;
1626 }
1627
1628
1629
1630
1631 ret = invalidate_inode_page(page);
1632 unlock_page(page);
1633
1634
1635
1636
1637 if (ret == 1) {
1638 put_hwpoison_page(page);
1639 pr_info("soft_offline: %#lx: invalidated\n", pfn);
1640 SetPageHWPoison(page);
1641 num_poisoned_pages_inc();
1642 return 0;
1643 }
1644
1645
1646
1647
1648
1649
1650 ret = isolate_lru_page(page);
1651
1652
1653
1654
1655 put_hwpoison_page(page);
1656 if (!ret) {
1657 LIST_HEAD(pagelist);
1658 inc_zone_page_state(page, NR_ISOLATED_ANON +
1659 page_is_file_cache(page));
1660 list_add(&page->lru, &pagelist);
1661 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
1662 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1663 if (ret) {
1664 if (!list_empty(&pagelist)) {
1665 list_del(&page->lru);
1666 dec_zone_page_state(page, NR_ISOLATED_ANON +
1667 page_is_file_cache(page));
1668 putback_lru_page(page);
1669 }
1670
1671 pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
1672 pfn, ret, page->flags);
1673 if (ret > 0)
1674 ret = -EIO;
1675 }
1676 } else {
1677 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
1678 pfn, ret, page_count(page), page->flags);
1679 }
1680 return ret;
1681}
1682
1683static int soft_offline_in_use_page(struct page *page, int flags)
1684{
1685 int ret;
1686 struct page *hpage = compound_head(page);
1687
1688 if (!PageHuge(page) && PageTransHuge(hpage)) {
1689 lock_page(hpage);
1690 if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
1691 unlock_page(hpage);
1692 if (!PageAnon(hpage))
1693 pr_info("soft offline: %#lx: non anonymous thp\n", page_to_pfn(page));
1694 else
1695 pr_info("soft offline: %#lx: thp split failed\n", page_to_pfn(page));
1696 put_hwpoison_page(hpage);
1697 return -EBUSY;
1698 }
1699 unlock_page(hpage);
1700 get_hwpoison_page(page);
1701 put_hwpoison_page(hpage);
1702 }
1703
1704 if (PageHuge(page))
1705 ret = soft_offline_huge_page(page, flags);
1706 else
1707 ret = __soft_offline_page(page, flags);
1708
1709 return ret;
1710}
1711
1712static void soft_offline_free_page(struct page *page)
1713{
1714 if (PageHuge(page)) {
1715 struct page *hpage = compound_head(page);
1716
1717 set_page_hwpoison_huge_page(hpage);
1718 if (!dequeue_hwpoisoned_huge_page(hpage))
1719 num_poisoned_pages_add(1 << compound_order(hpage));
1720 } else {
1721 if (!TestSetPageHWPoison(page))
1722 num_poisoned_pages_inc();
1723 }
1724}
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748int soft_offline_page(struct page *page, int flags)
1749{
1750 int ret;
1751 unsigned long pfn = page_to_pfn(page);
1752
1753 if (PageHWPoison(page)) {
1754 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1755 if (flags & MF_COUNT_INCREASED)
1756 put_hwpoison_page(page);
1757 return -EBUSY;
1758 }
1759
1760 get_online_mems();
1761 ret = get_any_page(page, pfn, flags);
1762 put_online_mems();
1763
1764 if (ret > 0)
1765 ret = soft_offline_in_use_page(page, flags);
1766 else if (ret == 0)
1767 soft_offline_free_page(page);
1768
1769 return ret;
1770}
1771