1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/kernel.h>
37#include <linux/mm.h>
38#include <linux/page-flags.h>
39#include <linux/kernel-page-flags.h>
40#include <linux/sched/signal.h>
41#include <linux/sched/task.h>
42#include <linux/ksm.h>
43#include <linux/rmap.h>
44#include <linux/export.h>
45#include <linux/pagemap.h>
46#include <linux/swap.h>
47#include <linux/backing-dev.h>
48#include <linux/migrate.h>
49#include <linux/suspend.h>
50#include <linux/slab.h>
51#include <linux/swapops.h>
52#include <linux/hugetlb.h>
53#include <linux/memory_hotplug.h>
54#include <linux/mm_inline.h>
55#include <linux/memremap.h>
56#include <linux/kfifo.h>
57#include <linux/ratelimit.h>
58#include <linux/page-isolation.h>
59#include "internal.h"
60#include "ras/ras_event.h"
61
62int sysctl_memory_failure_early_kill __read_mostly = 0;
63
64int sysctl_memory_failure_recovery __read_mostly = 1;
65
66atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
67
68static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
69{
70 if (hugepage_or_freepage) {
71
72
73
74
75 if (dissolve_free_huge_page(page) || !take_page_off_buddy(page))
76
77
78
79
80
81
82
83 return false;
84 }
85
86 SetPageHWPoison(page);
87 if (release)
88 put_page(page);
89 page_ref_inc(page);
90 num_poisoned_pages_inc();
91
92 return true;
93}
94
95#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
96
97u32 hwpoison_filter_enable = 0;
98u32 hwpoison_filter_dev_major = ~0U;
99u32 hwpoison_filter_dev_minor = ~0U;
100u64 hwpoison_filter_flags_mask;
101u64 hwpoison_filter_flags_value;
102EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
103EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
104EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
105EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
106EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
107
108static int hwpoison_filter_dev(struct page *p)
109{
110 struct address_space *mapping;
111 dev_t dev;
112
113 if (hwpoison_filter_dev_major == ~0U &&
114 hwpoison_filter_dev_minor == ~0U)
115 return 0;
116
117
118
119
120 if (PageSlab(p))
121 return -EINVAL;
122
123 mapping = page_mapping(p);
124 if (mapping == NULL || mapping->host == NULL)
125 return -EINVAL;
126
127 dev = mapping->host->i_sb->s_dev;
128 if (hwpoison_filter_dev_major != ~0U &&
129 hwpoison_filter_dev_major != MAJOR(dev))
130 return -EINVAL;
131 if (hwpoison_filter_dev_minor != ~0U &&
132 hwpoison_filter_dev_minor != MINOR(dev))
133 return -EINVAL;
134
135 return 0;
136}
137
138static int hwpoison_filter_flags(struct page *p)
139{
140 if (!hwpoison_filter_flags_mask)
141 return 0;
142
143 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
144 hwpoison_filter_flags_value)
145 return 0;
146 else
147 return -EINVAL;
148}
149
150
151
152
153
154
155
156
157
158
159
160#ifdef CONFIG_MEMCG
161u64 hwpoison_filter_memcg;
162EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
163static int hwpoison_filter_task(struct page *p)
164{
165 if (!hwpoison_filter_memcg)
166 return 0;
167
168 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
169 return -EINVAL;
170
171 return 0;
172}
173#else
174static int hwpoison_filter_task(struct page *p) { return 0; }
175#endif
176
177int hwpoison_filter(struct page *p)
178{
179 if (!hwpoison_filter_enable)
180 return 0;
181
182 if (hwpoison_filter_dev(p))
183 return -EINVAL;
184
185 if (hwpoison_filter_flags(p))
186 return -EINVAL;
187
188 if (hwpoison_filter_task(p))
189 return -EINVAL;
190
191 return 0;
192}
193#else
194int hwpoison_filter(struct page *p)
195{
196 return 0;
197}
198#endif
199
200EXPORT_SYMBOL_GPL(hwpoison_filter);
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224struct to_kill {
225 struct list_head nd;
226 struct task_struct *tsk;
227 unsigned long addr;
228 short size_shift;
229};
230
231
232
233
234
235
236static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
237{
238 struct task_struct *t = tk->tsk;
239 short addr_lsb = tk->size_shift;
240 int ret = 0;
241
242 pr_err("Memory failure: %#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
243 pfn, t->comm, t->pid);
244
245 if (flags & MF_ACTION_REQUIRED) {
246 WARN_ON_ONCE(t != current);
247 ret = force_sig_mceerr(BUS_MCEERR_AR,
248 (void __user *)tk->addr, addr_lsb);
249 } else {
250
251
252
253
254
255
256 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
257 addr_lsb, t);
258 }
259 if (ret < 0)
260 pr_info("Memory failure: Error sending signal to %s:%d: %d\n",
261 t->comm, t->pid, ret);
262 return ret;
263}
264
265
266
267
268
269void shake_page(struct page *p, int access)
270{
271 if (PageHuge(p))
272 return;
273
274 if (!PageSlab(p)) {
275 lru_add_drain_all();
276 if (PageLRU(p) || is_free_buddy_page(p))
277 return;
278 }
279
280
281
282
283
284 if (access)
285 drop_slab_node(page_to_nid(p));
286}
287EXPORT_SYMBOL_GPL(shake_page);
288
289static unsigned long dev_pagemap_mapping_shift(struct page *page,
290 struct vm_area_struct *vma)
291{
292 unsigned long address = vma_address(page, vma);
293 pgd_t *pgd;
294 p4d_t *p4d;
295 pud_t *pud;
296 pmd_t *pmd;
297 pte_t *pte;
298
299 pgd = pgd_offset(vma->vm_mm, address);
300 if (!pgd_present(*pgd))
301 return 0;
302 p4d = p4d_offset(pgd, address);
303 if (!p4d_present(*p4d))
304 return 0;
305 pud = pud_offset(p4d, address);
306 if (!pud_present(*pud))
307 return 0;
308 if (pud_devmap(*pud))
309 return PUD_SHIFT;
310 pmd = pmd_offset(pud, address);
311 if (!pmd_present(*pmd))
312 return 0;
313 if (pmd_devmap(*pmd))
314 return PMD_SHIFT;
315 pte = pte_offset_map(pmd, address);
316 if (!pte_present(*pte))
317 return 0;
318 if (pte_devmap(*pte))
319 return PAGE_SHIFT;
320 return 0;
321}
322
323
324
325
326
327
328
329
330
331
332static void add_to_kill(struct task_struct *tsk, struct page *p,
333 struct vm_area_struct *vma,
334 struct list_head *to_kill)
335{
336 struct to_kill *tk;
337
338 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
339 if (!tk) {
340 pr_err("Memory failure: Out of memory while machine check handling\n");
341 return;
342 }
343
344 tk->addr = page_address_in_vma(p, vma);
345 if (is_zone_device_page(p))
346 tk->size_shift = dev_pagemap_mapping_shift(p, vma);
347 else
348 tk->size_shift = page_shift(compound_head(p));
349
350
351
352
353
354
355
356
357
358
359
360 if (tk->addr == -EFAULT) {
361 pr_info("Memory failure: Unable to find user space address %lx in %s\n",
362 page_to_pfn(p), tsk->comm);
363 } else if (tk->size_shift == 0) {
364 kfree(tk);
365 return;
366 }
367
368 get_task_struct(tsk);
369 tk->tsk = tsk;
370 list_add_tail(&tk->nd, to_kill);
371}
372
373
374
375
376
377
378
379
380
381static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
382 unsigned long pfn, int flags)
383{
384 struct to_kill *tk, *next;
385
386 list_for_each_entry_safe (tk, next, to_kill, nd) {
387 if (forcekill) {
388
389
390
391
392
393 if (fail || tk->addr == -EFAULT) {
394 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
395 pfn, tk->tsk->comm, tk->tsk->pid);
396 do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
397 tk->tsk, PIDTYPE_PID);
398 }
399
400
401
402
403
404
405
406 else if (kill_proc(tk, pfn, flags) < 0)
407 pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n",
408 pfn, tk->tsk->comm, tk->tsk->pid);
409 }
410 put_task_struct(tk->tsk);
411 kfree(tk);
412 }
413}
414
415
416
417
418
419
420
421
422
423static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
424{
425 struct task_struct *t;
426
427 for_each_thread(tsk, t) {
428 if (t->flags & PF_MCE_PROCESS) {
429 if (t->flags & PF_MCE_EARLY)
430 return t;
431 } else {
432 if (sysctl_memory_failure_early_kill)
433 return t;
434 }
435 }
436 return NULL;
437}
438
439
440
441
442
443
444
445
446
447
448static struct task_struct *task_early_kill(struct task_struct *tsk,
449 int force_early)
450{
451 if (!tsk->mm)
452 return NULL;
453 if (force_early) {
454
455
456
457
458 if (tsk->mm == current->mm)
459 return current;
460 else
461 return NULL;
462 }
463 return find_early_kill_thread(tsk);
464}
465
466
467
468
469static void collect_procs_anon(struct page *page, struct list_head *to_kill,
470 int force_early)
471{
472 struct vm_area_struct *vma;
473 struct task_struct *tsk;
474 struct anon_vma *av;
475 pgoff_t pgoff;
476
477 av = page_lock_anon_vma_read(page);
478 if (av == NULL)
479 return;
480
481 pgoff = page_to_pgoff(page);
482 read_lock(&tasklist_lock);
483 for_each_process (tsk) {
484 struct anon_vma_chain *vmac;
485 struct task_struct *t = task_early_kill(tsk, force_early);
486
487 if (!t)
488 continue;
489 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
490 pgoff, pgoff) {
491 vma = vmac->vma;
492 if (!page_mapped_in_vma(page, vma))
493 continue;
494 if (vma->vm_mm == t->mm)
495 add_to_kill(t, page, vma, to_kill);
496 }
497 }
498 read_unlock(&tasklist_lock);
499 page_unlock_anon_vma_read(av);
500}
501
502
503
504
505static void collect_procs_file(struct page *page, struct list_head *to_kill,
506 int force_early)
507{
508 struct vm_area_struct *vma;
509 struct task_struct *tsk;
510 struct address_space *mapping = page->mapping;
511 pgoff_t pgoff;
512
513 i_mmap_lock_read(mapping);
514 read_lock(&tasklist_lock);
515 pgoff = page_to_pgoff(page);
516 for_each_process(tsk) {
517 struct task_struct *t = task_early_kill(tsk, force_early);
518
519 if (!t)
520 continue;
521 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
522 pgoff) {
523
524
525
526
527
528
529
530 if (vma->vm_mm == t->mm)
531 add_to_kill(t, page, vma, to_kill);
532 }
533 }
534 read_unlock(&tasklist_lock);
535 i_mmap_unlock_read(mapping);
536}
537
538
539
540
541static void collect_procs(struct page *page, struct list_head *tokill,
542 int force_early)
543{
544 if (!page->mapping)
545 return;
546
547 if (PageAnon(page))
548 collect_procs_anon(page, tokill, force_early);
549 else
550 collect_procs_file(page, tokill, force_early);
551}
552
553static const char *action_name[] = {
554 [MF_IGNORED] = "Ignored",
555 [MF_FAILED] = "Failed",
556 [MF_DELAYED] = "Delayed",
557 [MF_RECOVERED] = "Recovered",
558};
559
560static const char * const action_page_types[] = {
561 [MF_MSG_KERNEL] = "reserved kernel page",
562 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
563 [MF_MSG_SLAB] = "kernel slab page",
564 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
565 [MF_MSG_POISONED_HUGE] = "huge page already hardware poisoned",
566 [MF_MSG_HUGE] = "huge page",
567 [MF_MSG_FREE_HUGE] = "free huge page",
568 [MF_MSG_NON_PMD_HUGE] = "non-pmd-sized huge page",
569 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
570 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
571 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
572 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
573 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
574 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
575 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
576 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
577 [MF_MSG_CLEAN_LRU] = "clean LRU page",
578 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
579 [MF_MSG_BUDDY] = "free buddy page",
580 [MF_MSG_BUDDY_2ND] = "free buddy page (2nd try)",
581 [MF_MSG_DAX] = "dax page",
582 [MF_MSG_UNSPLIT_THP] = "unsplit thp",
583 [MF_MSG_UNKNOWN] = "unknown page",
584};
585
586
587
588
589
590
591
592static int delete_from_lru_cache(struct page *p)
593{
594 if (!isolate_lru_page(p)) {
595
596
597
598
599 ClearPageActive(p);
600 ClearPageUnevictable(p);
601
602
603
604
605
606 mem_cgroup_uncharge(p);
607
608
609
610
611 put_page(p);
612 return 0;
613 }
614 return -EIO;
615}
616
617static int truncate_error_page(struct page *p, unsigned long pfn,
618 struct address_space *mapping)
619{
620 int ret = MF_FAILED;
621
622 if (mapping->a_ops->error_remove_page) {
623 int err = mapping->a_ops->error_remove_page(mapping, p);
624
625 if (err != 0) {
626 pr_info("Memory failure: %#lx: Failed to punch page: %d\n",
627 pfn, err);
628 } else if (page_has_private(p) &&
629 !try_to_release_page(p, GFP_NOIO)) {
630 pr_info("Memory failure: %#lx: failed to release buffers\n",
631 pfn);
632 } else {
633 ret = MF_RECOVERED;
634 }
635 } else {
636
637
638
639
640 if (invalidate_inode_page(p))
641 ret = MF_RECOVERED;
642 else
643 pr_info("Memory failure: %#lx: Failed to invalidate\n",
644 pfn);
645 }
646
647 return ret;
648}
649
650
651
652
653
654
655static int me_kernel(struct page *p, unsigned long pfn)
656{
657 return MF_IGNORED;
658}
659
660
661
662
663static int me_unknown(struct page *p, unsigned long pfn)
664{
665 pr_err("Memory failure: %#lx: Unknown page state\n", pfn);
666 return MF_FAILED;
667}
668
669
670
671
672static int me_pagecache_clean(struct page *p, unsigned long pfn)
673{
674 struct address_space *mapping;
675
676 delete_from_lru_cache(p);
677
678
679
680
681
682 if (PageAnon(p))
683 return MF_RECOVERED;
684
685
686
687
688
689
690
691
692 mapping = page_mapping(p);
693 if (!mapping) {
694
695
696
697 return MF_FAILED;
698 }
699
700
701
702
703
704
705 return truncate_error_page(p, pfn, mapping);
706}
707
708
709
710
711
712
713static int me_pagecache_dirty(struct page *p, unsigned long pfn)
714{
715 struct address_space *mapping = page_mapping(p);
716
717 SetPageError(p);
718
719 if (mapping) {
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754 mapping_set_error(mapping, -EIO);
755 }
756
757 return me_pagecache_clean(p, pfn);
758}
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779static int me_swapcache_dirty(struct page *p, unsigned long pfn)
780{
781 ClearPageDirty(p);
782
783 ClearPageUptodate(p);
784
785 if (!delete_from_lru_cache(p))
786 return MF_DELAYED;
787 else
788 return MF_FAILED;
789}
790
791static int me_swapcache_clean(struct page *p, unsigned long pfn)
792{
793 delete_from_swap_cache(p);
794
795 if (!delete_from_lru_cache(p))
796 return MF_RECOVERED;
797 else
798 return MF_FAILED;
799}
800
801
802
803
804
805
806
807static int me_huge_page(struct page *p, unsigned long pfn)
808{
809 int res;
810 struct page *hpage = compound_head(p);
811 struct address_space *mapping;
812
813 if (!PageHuge(hpage))
814 return MF_DELAYED;
815
816 mapping = page_mapping(hpage);
817 if (mapping) {
818 res = truncate_error_page(hpage, pfn, mapping);
819 } else {
820 res = MF_FAILED;
821 unlock_page(hpage);
822
823
824
825
826
827 if (PageAnon(hpage))
828 put_page(hpage);
829 if (!dissolve_free_huge_page(p) && take_page_off_buddy(p)) {
830 page_ref_inc(p);
831 res = MF_RECOVERED;
832 }
833 lock_page(hpage);
834 }
835
836 return res;
837}
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852#define dirty (1UL << PG_dirty)
853#define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
854#define unevict (1UL << PG_unevictable)
855#define mlock (1UL << PG_mlocked)
856#define lru (1UL << PG_lru)
857#define head (1UL << PG_head)
858#define slab (1UL << PG_slab)
859#define reserved (1UL << PG_reserved)
860
861static struct page_state {
862 unsigned long mask;
863 unsigned long res;
864 enum mf_action_page_type type;
865 int (*action)(struct page *p, unsigned long pfn);
866} error_states[] = {
867 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
868
869
870
871
872
873
874
875
876
877
878 { slab, slab, MF_MSG_SLAB, me_kernel },
879
880 { head, head, MF_MSG_HUGE, me_huge_page },
881
882 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
883 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
884
885 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
886 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
887
888 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
889 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
890
891 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
892 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
893
894
895
896
897 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
898};
899
900#undef dirty
901#undef sc
902#undef unevict
903#undef mlock
904#undef lru
905#undef head
906#undef slab
907#undef reserved
908
909
910
911
912
913static void action_result(unsigned long pfn, enum mf_action_page_type type,
914 enum mf_result result)
915{
916 trace_memory_failure_event(pfn, type, result);
917
918 pr_err("Memory failure: %#lx: recovery action for %s: %s\n",
919 pfn, action_page_types[type], action_name[result]);
920}
921
922static int page_action(struct page_state *ps, struct page *p,
923 unsigned long pfn)
924{
925 int result;
926 int count;
927
928 result = ps->action(p, pfn);
929
930 count = page_count(p) - 1;
931 if (ps->action == me_swapcache_dirty && result == MF_DELAYED)
932 count--;
933 if (count > 0) {
934 pr_err("Memory failure: %#lx: %s still referenced by %d users\n",
935 pfn, action_page_types[ps->type], count);
936 result = MF_FAILED;
937 }
938 action_result(pfn, ps->type, result);
939
940
941
942
943
944
945 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
946}
947
948
949
950
951
952
953
954
955static int __get_hwpoison_page(struct page *page)
956{
957 struct page *head = compound_head(page);
958
959 if (!PageHuge(head) && PageTransHuge(head)) {
960
961
962
963
964
965
966 if (!PageAnon(head)) {
967 pr_err("Memory failure: %#lx: non anonymous thp\n",
968 page_to_pfn(page));
969 return 0;
970 }
971 }
972
973 if (get_page_unless_zero(head)) {
974 if (head == compound_head(page))
975 return 1;
976
977 pr_info("Memory failure: %#lx cannot catch tail\n",
978 page_to_pfn(page));
979 put_page(head);
980 }
981
982 return 0;
983}
984
985
986
987
988
989
990
991
992
993
994static int get_any_page(struct page *p, unsigned long flags)
995{
996 int ret = 0, pass = 0;
997 bool count_increased = false;
998
999 if (flags & MF_COUNT_INCREASED)
1000 count_increased = true;
1001
1002try_again:
1003 if (!count_increased && !__get_hwpoison_page(p)) {
1004 if (page_count(p)) {
1005
1006 if (pass++ < 3)
1007 goto try_again;
1008 ret = -EBUSY;
1009 } else if (!PageHuge(p) && !is_free_buddy_page(p)) {
1010
1011 if (pass++ < 3)
1012 goto try_again;
1013 ret = -EIO;
1014 }
1015 } else {
1016 if (PageHuge(p) || PageLRU(p) || __PageMovable(p)) {
1017 ret = 1;
1018 } else {
1019
1020
1021
1022
1023 if (pass++ < 3) {
1024 put_page(p);
1025 shake_page(p, 1);
1026 count_increased = false;
1027 goto try_again;
1028 }
1029 put_page(p);
1030 ret = -EIO;
1031 }
1032 }
1033
1034 return ret;
1035}
1036
1037static int get_hwpoison_page(struct page *p, unsigned long flags,
1038 enum mf_flags ctxt)
1039{
1040 int ret;
1041
1042 zone_pcp_disable(page_zone(p));
1043 if (ctxt == MF_SOFT_OFFLINE)
1044 ret = get_any_page(p, flags);
1045 else
1046 ret = __get_hwpoison_page(p);
1047 zone_pcp_enable(page_zone(p));
1048
1049 return ret;
1050}
1051
1052
1053
1054
1055
1056static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
1057 int flags, struct page **hpagep)
1058{
1059 enum ttu_flags ttu = TTU_IGNORE_MLOCK;
1060 struct address_space *mapping;
1061 LIST_HEAD(tokill);
1062 bool unmap_success = true;
1063 int kill = 1, forcekill;
1064 struct page *hpage = *hpagep;
1065 bool mlocked = PageMlocked(hpage);
1066
1067
1068
1069
1070
1071 if (PageReserved(p) || PageSlab(p))
1072 return true;
1073 if (!(PageLRU(hpage) || PageHuge(p)))
1074 return true;
1075
1076
1077
1078
1079
1080 if (!page_mapped(hpage))
1081 return true;
1082
1083 if (PageKsm(p)) {
1084 pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
1085 return false;
1086 }
1087
1088 if (PageSwapCache(p)) {
1089 pr_err("Memory failure: %#lx: keeping poisoned page in swap cache\n",
1090 pfn);
1091 ttu |= TTU_IGNORE_HWPOISON;
1092 }
1093
1094
1095
1096
1097
1098
1099
1100 mapping = page_mapping(hpage);
1101 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
1102 mapping_can_writeback(mapping)) {
1103 if (page_mkclean(hpage)) {
1104 SetPageDirty(hpage);
1105 } else {
1106 kill = 0;
1107 ttu |= TTU_IGNORE_HWPOISON;
1108 pr_info("Memory failure: %#lx: corrupted page was clean: dropped without side effects\n",
1109 pfn);
1110 }
1111 }
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121 if (kill)
1122 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
1123
1124 if (!PageHuge(hpage)) {
1125 unmap_success = try_to_unmap(hpage, ttu);
1126 } else {
1127 if (!PageAnon(hpage)) {
1128
1129
1130
1131
1132
1133
1134
1135 mapping = hugetlb_page_mapping_lock_write(hpage);
1136 if (mapping) {
1137 unmap_success = try_to_unmap(hpage,
1138 ttu|TTU_RMAP_LOCKED);
1139 i_mmap_unlock_write(mapping);
1140 } else {
1141 pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
1142 unmap_success = false;
1143 }
1144 } else {
1145 unmap_success = try_to_unmap(hpage, ttu);
1146 }
1147 }
1148 if (!unmap_success)
1149 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
1150 pfn, page_mapcount(hpage));
1151
1152
1153
1154
1155
1156 if (mlocked)
1157 shake_page(hpage, 0);
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
1170 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
1171
1172 return unmap_success;
1173}
1174
1175static int identify_page_state(unsigned long pfn, struct page *p,
1176 unsigned long page_flags)
1177{
1178 struct page_state *ps;
1179
1180
1181
1182
1183
1184
1185 for (ps = error_states;; ps++)
1186 if ((p->flags & ps->mask) == ps->res)
1187 break;
1188
1189 page_flags |= (p->flags & (1UL << PG_dirty));
1190
1191 if (!ps->mask)
1192 for (ps = error_states;; ps++)
1193 if ((page_flags & ps->mask) == ps->res)
1194 break;
1195 return page_action(ps, p, pfn);
1196}
1197
1198static int try_to_split_thp_page(struct page *page, const char *msg)
1199{
1200 lock_page(page);
1201 if (!PageAnon(page) || unlikely(split_huge_page(page))) {
1202 unsigned long pfn = page_to_pfn(page);
1203
1204 unlock_page(page);
1205 if (!PageAnon(page))
1206 pr_info("%s: %#lx: non anonymous thp\n", msg, pfn);
1207 else
1208 pr_info("%s: %#lx: thp split failed\n", msg, pfn);
1209 put_page(page);
1210 return -EBUSY;
1211 }
1212 unlock_page(page);
1213
1214 return 0;
1215}
1216
1217static int memory_failure_hugetlb(unsigned long pfn, int flags)
1218{
1219 struct page *p = pfn_to_page(pfn);
1220 struct page *head = compound_head(p);
1221 int res;
1222 unsigned long page_flags;
1223
1224 if (TestSetPageHWPoison(head)) {
1225 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1226 pfn);
1227 return 0;
1228 }
1229
1230 num_poisoned_pages_inc();
1231
1232 if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p, flags, 0)) {
1233
1234
1235
1236 lock_page(head);
1237 if (PageHWPoison(head)) {
1238 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
1239 || (p != head && TestSetPageHWPoison(head))) {
1240 num_poisoned_pages_dec();
1241 unlock_page(head);
1242 return 0;
1243 }
1244 }
1245 unlock_page(head);
1246 res = MF_FAILED;
1247 if (!dissolve_free_huge_page(p) && take_page_off_buddy(p)) {
1248 page_ref_inc(p);
1249 res = MF_RECOVERED;
1250 }
1251 action_result(pfn, MF_MSG_FREE_HUGE, res);
1252 return res == MF_RECOVERED ? 0 : -EBUSY;
1253 }
1254
1255 lock_page(head);
1256 page_flags = head->flags;
1257
1258 if (!PageHWPoison(head)) {
1259 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
1260 num_poisoned_pages_dec();
1261 unlock_page(head);
1262 put_page(head);
1263 return 0;
1264 }
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275 if (huge_page_size(page_hstate(head)) > PMD_SIZE) {
1276 action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED);
1277 res = -EBUSY;
1278 goto out;
1279 }
1280
1281 if (!hwpoison_user_mappings(p, pfn, flags, &head)) {
1282 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1283 res = -EBUSY;
1284 goto out;
1285 }
1286
1287 res = identify_page_state(pfn, p, page_flags);
1288out:
1289 unlock_page(head);
1290 return res;
1291}
1292
1293static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
1294 struct dev_pagemap *pgmap)
1295{
1296 struct page *page = pfn_to_page(pfn);
1297 const bool unmap_success = true;
1298 unsigned long size = 0;
1299 struct to_kill *tk;
1300 LIST_HEAD(tokill);
1301 int rc = -EBUSY;
1302 loff_t start;
1303 dax_entry_t cookie;
1304
1305 if (flags & MF_COUNT_INCREASED)
1306
1307
1308
1309 put_page(page);
1310
1311
1312
1313
1314
1315
1316
1317
1318 cookie = dax_lock_page(page);
1319 if (!cookie)
1320 goto out;
1321
1322 if (hwpoison_filter(page)) {
1323 rc = 0;
1324 goto unlock;
1325 }
1326
1327 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
1328
1329
1330
1331
1332 goto unlock;
1333 }
1334
1335
1336
1337
1338
1339 SetPageHWPoison(page);
1340
1341
1342
1343
1344
1345
1346
1347 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1348 collect_procs(page, &tokill, flags & MF_ACTION_REQUIRED);
1349
1350 list_for_each_entry(tk, &tokill, nd)
1351 if (tk->size_shift)
1352 size = max(size, 1UL << tk->size_shift);
1353 if (size) {
1354
1355
1356
1357
1358
1359
1360 start = (page->index << PAGE_SHIFT) & ~(size - 1);
1361 unmap_mapping_range(page->mapping, start, start + size, 0);
1362 }
1363 kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
1364 rc = 0;
1365unlock:
1366 dax_unlock_page(page, cookie);
1367out:
1368
1369 put_dev_pagemap(pgmap);
1370 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
1371 return rc;
1372}
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391int memory_failure(unsigned long pfn, int flags)
1392{
1393 struct page *p;
1394 struct page *hpage;
1395 struct page *orig_head;
1396 struct dev_pagemap *pgmap;
1397 int res;
1398 unsigned long page_flags;
1399 bool retry = true;
1400
1401 if (!sysctl_memory_failure_recovery)
1402 panic("Memory failure on page %lx", pfn);
1403
1404 p = pfn_to_online_page(pfn);
1405 if (!p) {
1406 if (pfn_valid(pfn)) {
1407 pgmap = get_dev_pagemap(pfn, NULL);
1408 if (pgmap)
1409 return memory_failure_dev_pagemap(pfn, flags,
1410 pgmap);
1411 }
1412 pr_err("Memory failure: %#lx: memory outside kernel control\n",
1413 pfn);
1414 return -ENXIO;
1415 }
1416
1417try_again:
1418 if (PageHuge(p))
1419 return memory_failure_hugetlb(pfn, flags);
1420 if (TestSetPageHWPoison(p)) {
1421 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1422 pfn);
1423 return 0;
1424 }
1425
1426 orig_head = hpage = compound_head(p);
1427 num_poisoned_pages_inc();
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440 if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p, flags, 0)) {
1441 if (is_free_buddy_page(p)) {
1442 if (take_page_off_buddy(p)) {
1443 page_ref_inc(p);
1444 res = MF_RECOVERED;
1445 } else {
1446
1447 if (retry) {
1448 ClearPageHWPoison(p);
1449 num_poisoned_pages_dec();
1450 retry = false;
1451 goto try_again;
1452 }
1453 res = MF_FAILED;
1454 }
1455 action_result(pfn, MF_MSG_BUDDY, res);
1456 return res == MF_RECOVERED ? 0 : -EBUSY;
1457 } else {
1458 action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
1459 return -EBUSY;
1460 }
1461 }
1462
1463 if (PageTransHuge(hpage)) {
1464 if (try_to_split_thp_page(p, "Memory Failure") < 0) {
1465 action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
1466 return -EBUSY;
1467 }
1468 VM_BUG_ON_PAGE(!page_count(p), p);
1469 }
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479 shake_page(p, 0);
1480
1481 lock_page(p);
1482
1483
1484
1485
1486
1487 if (PageCompound(p) && compound_head(p) != orig_head) {
1488 action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
1489 res = -EBUSY;
1490 goto out;
1491 }
1492
1493
1494
1495
1496
1497
1498
1499
1500 page_flags = p->flags;
1501
1502
1503
1504
1505 if (!PageHWPoison(p)) {
1506 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
1507 num_poisoned_pages_dec();
1508 unlock_page(p);
1509 put_page(p);
1510 return 0;
1511 }
1512 if (hwpoison_filter(p)) {
1513 if (TestClearPageHWPoison(p))
1514 num_poisoned_pages_dec();
1515 unlock_page(p);
1516 put_page(p);
1517 return 0;
1518 }
1519
1520 if (!PageTransTail(p) && !PageLRU(p))
1521 goto identify_page_state;
1522
1523
1524
1525
1526
1527 wait_on_page_writeback(p);
1528
1529
1530
1531
1532
1533 if (!hwpoison_user_mappings(p, pfn, flags, &p)) {
1534 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1535 res = -EBUSY;
1536 goto out;
1537 }
1538
1539
1540
1541
1542 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
1543 action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
1544 res = -EBUSY;
1545 goto out;
1546 }
1547
1548identify_page_state:
1549 res = identify_page_state(pfn, p, page_flags);
1550out:
1551 unlock_page(p);
1552 return res;
1553}
1554EXPORT_SYMBOL_GPL(memory_failure);
1555
1556#define MEMORY_FAILURE_FIFO_ORDER 4
1557#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
1558
1559struct memory_failure_entry {
1560 unsigned long pfn;
1561 int flags;
1562};
1563
1564struct memory_failure_cpu {
1565 DECLARE_KFIFO(fifo, struct memory_failure_entry,
1566 MEMORY_FAILURE_FIFO_SIZE);
1567 spinlock_t lock;
1568 struct work_struct work;
1569};
1570
1571static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589void memory_failure_queue(unsigned long pfn, int flags)
1590{
1591 struct memory_failure_cpu *mf_cpu;
1592 unsigned long proc_flags;
1593 struct memory_failure_entry entry = {
1594 .pfn = pfn,
1595 .flags = flags,
1596 };
1597
1598 mf_cpu = &get_cpu_var(memory_failure_cpu);
1599 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1600 if (kfifo_put(&mf_cpu->fifo, entry))
1601 schedule_work_on(smp_processor_id(), &mf_cpu->work);
1602 else
1603 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
1604 pfn);
1605 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1606 put_cpu_var(memory_failure_cpu);
1607}
1608EXPORT_SYMBOL_GPL(memory_failure_queue);
1609
1610static void memory_failure_work_func(struct work_struct *work)
1611{
1612 struct memory_failure_cpu *mf_cpu;
1613 struct memory_failure_entry entry = { 0, };
1614 unsigned long proc_flags;
1615 int gotten;
1616
1617 mf_cpu = container_of(work, struct memory_failure_cpu, work);
1618 for (;;) {
1619 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1620 gotten = kfifo_get(&mf_cpu->fifo, &entry);
1621 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1622 if (!gotten)
1623 break;
1624 if (entry.flags & MF_SOFT_OFFLINE)
1625 soft_offline_page(entry.pfn, entry.flags);
1626 else
1627 memory_failure(entry.pfn, entry.flags);
1628 }
1629}
1630
1631
1632
1633
1634
1635void memory_failure_queue_kick(int cpu)
1636{
1637 struct memory_failure_cpu *mf_cpu;
1638
1639 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1640 cancel_work_sync(&mf_cpu->work);
1641 memory_failure_work_func(&mf_cpu->work);
1642}
1643
1644static int __init memory_failure_init(void)
1645{
1646 struct memory_failure_cpu *mf_cpu;
1647 int cpu;
1648
1649 for_each_possible_cpu(cpu) {
1650 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1651 spin_lock_init(&mf_cpu->lock);
1652 INIT_KFIFO(mf_cpu->fifo);
1653 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
1654 }
1655
1656 return 0;
1657}
1658core_initcall(memory_failure_init);
1659
1660#define unpoison_pr_info(fmt, pfn, rs) \
1661({ \
1662 if (__ratelimit(rs)) \
1663 pr_info(fmt, pfn); \
1664})
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678int unpoison_memory(unsigned long pfn)
1679{
1680 struct page *page;
1681 struct page *p;
1682 int freeit = 0;
1683 unsigned long flags = 0;
1684 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
1685 DEFAULT_RATELIMIT_BURST);
1686
1687 if (!pfn_valid(pfn))
1688 return -ENXIO;
1689
1690 p = pfn_to_page(pfn);
1691 page = compound_head(p);
1692
1693 if (!PageHWPoison(p)) {
1694 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
1695 pfn, &unpoison_rs);
1696 return 0;
1697 }
1698
1699 if (page_count(page) > 1) {
1700 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
1701 pfn, &unpoison_rs);
1702 return 0;
1703 }
1704
1705 if (page_mapped(page)) {
1706 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
1707 pfn, &unpoison_rs);
1708 return 0;
1709 }
1710
1711 if (page_mapping(page)) {
1712 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
1713 pfn, &unpoison_rs);
1714 return 0;
1715 }
1716
1717
1718
1719
1720
1721
1722 if (!PageHuge(page) && PageTransHuge(page)) {
1723 unpoison_pr_info("Unpoison: Memory failure is now running on %#lx\n",
1724 pfn, &unpoison_rs);
1725 return 0;
1726 }
1727
1728 if (!get_hwpoison_page(p, flags, 0)) {
1729 if (TestClearPageHWPoison(p))
1730 num_poisoned_pages_dec();
1731 unpoison_pr_info("Unpoison: Software-unpoisoned free page %#lx\n",
1732 pfn, &unpoison_rs);
1733 return 0;
1734 }
1735
1736 lock_page(page);
1737
1738
1739
1740
1741
1742
1743 if (TestClearPageHWPoison(page)) {
1744 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
1745 pfn, &unpoison_rs);
1746 num_poisoned_pages_dec();
1747 freeit = 1;
1748 }
1749 unlock_page(page);
1750
1751 put_page(page);
1752 if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
1753 put_page(page);
1754
1755 return 0;
1756}
1757EXPORT_SYMBOL(unpoison_memory);
1758
1759static bool isolate_page(struct page *page, struct list_head *pagelist)
1760{
1761 bool isolated = false;
1762 bool lru = PageLRU(page);
1763
1764 if (PageHuge(page)) {
1765 isolated = isolate_huge_page(page, pagelist);
1766 } else {
1767 if (lru)
1768 isolated = !isolate_lru_page(page);
1769 else
1770 isolated = !isolate_movable_page(page, ISOLATE_UNEVICTABLE);
1771
1772 if (isolated)
1773 list_add(&page->lru, pagelist);
1774 }
1775
1776 if (isolated && lru)
1777 inc_node_page_state(page, NR_ISOLATED_ANON +
1778 page_is_file_lru(page));
1779
1780
1781
1782
1783
1784
1785
1786
1787 put_page(page);
1788 return isolated;
1789}
1790
1791
1792
1793
1794
1795
1796static int __soft_offline_page(struct page *page)
1797{
1798 int ret = 0;
1799 unsigned long pfn = page_to_pfn(page);
1800 struct page *hpage = compound_head(page);
1801 char const *msg_page[] = {"page", "hugepage"};
1802 bool huge = PageHuge(page);
1803 LIST_HEAD(pagelist);
1804 struct migration_target_control mtc = {
1805 .nid = NUMA_NO_NODE,
1806 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
1807 };
1808
1809
1810
1811
1812
1813
1814
1815 lock_page(page);
1816 if (!PageHuge(page))
1817 wait_on_page_writeback(page);
1818 if (PageHWPoison(page)) {
1819 unlock_page(page);
1820 put_page(page);
1821 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1822 return 0;
1823 }
1824
1825 if (!PageHuge(page))
1826
1827
1828
1829
1830 ret = invalidate_inode_page(page);
1831 unlock_page(page);
1832
1833
1834
1835
1836
1837 if (ret) {
1838 pr_info("soft_offline: %#lx: invalidated\n", pfn);
1839 page_handle_poison(page, false, true);
1840 return 0;
1841 }
1842
1843 if (isolate_page(hpage, &pagelist)) {
1844 ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
1845 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE);
1846 if (!ret) {
1847 bool release = !huge;
1848
1849 if (!page_handle_poison(page, huge, release))
1850 ret = -EBUSY;
1851 } else {
1852 if (!list_empty(&pagelist))
1853 putback_movable_pages(&pagelist);
1854
1855 pr_info("soft offline: %#lx: %s migration failed %d, type %lx (%pGp)\n",
1856 pfn, msg_page[huge], ret, page->flags, &page->flags);
1857 if (ret > 0)
1858 ret = -EBUSY;
1859 }
1860 } else {
1861 pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %lx (%pGp)\n",
1862 pfn, msg_page[huge], page_count(page), page->flags, &page->flags);
1863 ret = -EBUSY;
1864 }
1865 return ret;
1866}
1867
1868static int soft_offline_in_use_page(struct page *page)
1869{
1870 struct page *hpage = compound_head(page);
1871
1872 if (!PageHuge(page) && PageTransHuge(hpage))
1873 if (try_to_split_thp_page(page, "soft offline") < 0)
1874 return -EBUSY;
1875 return __soft_offline_page(page);
1876}
1877
1878static int soft_offline_free_page(struct page *page)
1879{
1880 int rc = 0;
1881
1882 if (!page_handle_poison(page, true, false))
1883 rc = -EBUSY;
1884
1885 return rc;
1886}
1887
1888static void put_ref_page(struct page *page)
1889{
1890 if (page)
1891 put_page(page);
1892}
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916int soft_offline_page(unsigned long pfn, int flags)
1917{
1918 int ret;
1919 bool try_again = true;
1920 struct page *page, *ref_page = NULL;
1921
1922 WARN_ON_ONCE(!pfn_valid(pfn) && (flags & MF_COUNT_INCREASED));
1923
1924 if (!pfn_valid(pfn))
1925 return -ENXIO;
1926 if (flags & MF_COUNT_INCREASED)
1927 ref_page = pfn_to_page(pfn);
1928
1929
1930 page = pfn_to_online_page(pfn);
1931 if (!page) {
1932 put_ref_page(ref_page);
1933 return -EIO;
1934 }
1935
1936 if (PageHWPoison(page)) {
1937 pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
1938 put_ref_page(ref_page);
1939 return 0;
1940 }
1941
1942retry:
1943 get_online_mems();
1944 ret = get_hwpoison_page(page, flags, MF_SOFT_OFFLINE);
1945 put_online_mems();
1946
1947 if (ret > 0) {
1948 ret = soft_offline_in_use_page(page);
1949 } else if (ret == 0) {
1950 if (soft_offline_free_page(page) && try_again) {
1951 try_again = false;
1952 goto retry;
1953 }
1954 } else if (ret == -EIO) {
1955 pr_info("%s: %#lx: unknown page type: %lx (%pGp)\n",
1956 __func__, pfn, page->flags, &page->flags);
1957 }
1958
1959 return ret;
1960}
1961