linux/mm/memory-failure.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008, 2009 Intel Corporation
   3 * Authors: Andi Kleen, Fengguang Wu
   4 *
   5 * This software may be redistributed and/or modified under the terms of
   6 * the GNU General Public License ("GPL") version 2 only as published by the
   7 * Free Software Foundation.
   8 *
   9 * High level machine check handler. Handles pages reported by the
  10 * hardware as being corrupted usually due to a 2bit ECC memory or cache
  11 * failure.
  12 *
  13 * Handles page cache pages in various states.  The tricky part
  14 * here is that we can access any page asynchronous to other VM
  15 * users, because memory failures could happen anytime and anywhere,
  16 * possibly violating some of their assumptions. This is why this code
  17 * has to be extremely careful. Generally it tries to use normal locking
  18 * rules, as in get the standard locks, even if that means the
  19 * error handling takes potentially a long time.
  20 *
  21 * The operation to map back from RMAP chains to processes has to walk
  22 * the complete process list and has non linear complexity with the number
  23 * mappings. In short it can be quite slow. But since memory corruptions
  24 * are rare we hope to get away with this.
  25 */
  26
  27/*
  28 * Notebook:
  29 * - hugetlb needs more code
  30 * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages
  31 * - pass bad pages to kdump next kernel
  32 */
  33#define DEBUG 1         /* remove me in 2.6.34 */
  34#include <linux/kernel.h>
  35#include <linux/mm.h>
  36#include <linux/page-flags.h>
  37#include <linux/sched.h>
  38#include <linux/ksm.h>
  39#include <linux/rmap.h>
  40#include <linux/pagemap.h>
  41#include <linux/swap.h>
  42#include <linux/backing-dev.h>
  43#include "internal.h"
  44
  45int sysctl_memory_failure_early_kill __read_mostly = 0;
  46
  47int sysctl_memory_failure_recovery __read_mostly = 1;
  48
  49atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
  50
  51/*
  52 * Send all the processes who have the page mapped an ``action optional''
  53 * signal.
  54 */
  55static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
  56                        unsigned long pfn)
  57{
  58        struct siginfo si;
  59        int ret;
  60
  61        printk(KERN_ERR
  62                "MCE %#lx: Killing %s:%d early due to hardware memory corruption\n",
  63                pfn, t->comm, t->pid);
  64        si.si_signo = SIGBUS;
  65        si.si_errno = 0;
  66        si.si_code = BUS_MCEERR_AO;
  67        si.si_addr = (void *)addr;
  68#ifdef __ARCH_SI_TRAPNO
  69        si.si_trapno = trapno;
  70#endif
  71        si.si_addr_lsb = PAGE_SHIFT;
  72        /*
  73         * Don't use force here, it's convenient if the signal
  74         * can be temporarily blocked.
  75         * This could cause a loop when the user sets SIGBUS
  76         * to SIG_IGN, but hopefully noone will do that?
  77         */
  78        ret = send_sig_info(SIGBUS, &si, t);  /* synchronous? */
  79        if (ret < 0)
  80                printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n",
  81                       t->comm, t->pid, ret);
  82        return ret;
  83}
  84
  85/*
  86 * Kill all processes that have a poisoned page mapped and then isolate
  87 * the page.
  88 *
  89 * General strategy:
  90 * Find all processes having the page mapped and kill them.
  91 * But we keep a page reference around so that the page is not
  92 * actually freed yet.
  93 * Then stash the page away
  94 *
  95 * There's no convenient way to get back to mapped processes
  96 * from the VMAs. So do a brute-force search over all
  97 * running processes.
  98 *
  99 * Remember that machine checks are not common (or rather
 100 * if they are common you have other problems), so this shouldn't
 101 * be a performance issue.
 102 *
 103 * Also there are some races possible while we get from the
 104 * error detection to actually handle it.
 105 */
 106
 107struct to_kill {
 108        struct list_head nd;
 109        struct task_struct *tsk;
 110        unsigned long addr;
 111        unsigned addr_valid:1;
 112};
 113
 114/*
 115 * Failure handling: if we can't find or can't kill a process there's
 116 * not much we can do.  We just print a message and ignore otherwise.
 117 */
 118
 119/*
 120 * Schedule a process for later kill.
 121 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
 122 * TBD would GFP_NOIO be enough?
 123 */
 124static void add_to_kill(struct task_struct *tsk, struct page *p,
 125                       struct vm_area_struct *vma,
 126                       struct list_head *to_kill,
 127                       struct to_kill **tkc)
 128{
 129        struct to_kill *tk;
 130
 131        if (*tkc) {
 132                tk = *tkc;
 133                *tkc = NULL;
 134        } else {
 135                tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
 136                if (!tk) {
 137                        printk(KERN_ERR
 138                "MCE: Out of memory while machine check handling\n");
 139                        return;
 140                }
 141        }
 142        tk->addr = page_address_in_vma(p, vma);
 143        tk->addr_valid = 1;
 144
 145        /*
 146         * In theory we don't have to kill when the page was
 147         * munmaped. But it could be also a mremap. Since that's
 148         * likely very rare kill anyways just out of paranoia, but use
 149         * a SIGKILL because the error is not contained anymore.
 150         */
 151        if (tk->addr == -EFAULT) {
 152                pr_debug("MCE: Unable to find user space address %lx in %s\n",
 153                        page_to_pfn(p), tsk->comm);
 154                tk->addr_valid = 0;
 155        }
 156        get_task_struct(tsk);
 157        tk->tsk = tsk;
 158        list_add_tail(&tk->nd, to_kill);
 159}
 160
 161/*
 162 * Kill the processes that have been collected earlier.
 163 *
 164 * Only do anything when DOIT is set, otherwise just free the list
 165 * (this is used for clean pages which do not need killing)
 166 * Also when FAIL is set do a force kill because something went
 167 * wrong earlier.
 168 */
 169static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
 170                          int fail, unsigned long pfn)
 171{
 172        struct to_kill *tk, *next;
 173
 174        list_for_each_entry_safe (tk, next, to_kill, nd) {
 175                if (doit) {
 176                        /*
 177                         * In case something went wrong with munmaping
 178                         * make sure the process doesn't catch the
 179                         * signal and then access the memory. Just kill it.
 180                         * the signal handlers
 181                         */
 182                        if (fail || tk->addr_valid == 0) {
 183                                printk(KERN_ERR
 184                "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
 185                                        pfn, tk->tsk->comm, tk->tsk->pid);
 186                                force_sig(SIGKILL, tk->tsk);
 187                        }
 188
 189                        /*
 190                         * In theory the process could have mapped
 191                         * something else on the address in-between. We could
 192                         * check for that, but we need to tell the
 193                         * process anyways.
 194                         */
 195                        else if (kill_proc_ao(tk->tsk, tk->addr, trapno,
 196                                              pfn) < 0)
 197                                printk(KERN_ERR
 198                "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
 199                                        pfn, tk->tsk->comm, tk->tsk->pid);
 200                }
 201                put_task_struct(tk->tsk);
 202                kfree(tk);
 203        }
 204}
 205
 206static int task_early_kill(struct task_struct *tsk)
 207{
 208        if (!tsk->mm)
 209                return 0;
 210        if (tsk->flags & PF_MCE_PROCESS)
 211                return !!(tsk->flags & PF_MCE_EARLY);
 212        return sysctl_memory_failure_early_kill;
 213}
 214
 215/*
 216 * Collect processes when the error hit an anonymous page.
 217 */
 218static void collect_procs_anon(struct page *page, struct list_head *to_kill,
 219                              struct to_kill **tkc)
 220{
 221        struct vm_area_struct *vma;
 222        struct task_struct *tsk;
 223        struct anon_vma *av;
 224
 225        read_lock(&tasklist_lock);
 226        av = page_lock_anon_vma(page);
 227        if (av == NULL) /* Not actually mapped anymore */
 228                goto out;
 229        for_each_process (tsk) {
 230                if (!task_early_kill(tsk))
 231                        continue;
 232                list_for_each_entry (vma, &av->head, anon_vma_node) {
 233                        if (!page_mapped_in_vma(page, vma))
 234                                continue;
 235                        if (vma->vm_mm == tsk->mm)
 236                                add_to_kill(tsk, page, vma, to_kill, tkc);
 237                }
 238        }
 239        page_unlock_anon_vma(av);
 240out:
 241        read_unlock(&tasklist_lock);
 242}
 243
 244/*
 245 * Collect processes when the error hit a file mapped page.
 246 */
 247static void collect_procs_file(struct page *page, struct list_head *to_kill,
 248                              struct to_kill **tkc)
 249{
 250        struct vm_area_struct *vma;
 251        struct task_struct *tsk;
 252        struct prio_tree_iter iter;
 253        struct address_space *mapping = page->mapping;
 254
 255        /*
 256         * A note on the locking order between the two locks.
 257         * We don't rely on this particular order.
 258         * If you have some other code that needs a different order
 259         * feel free to switch them around. Or add a reverse link
 260         * from mm_struct to task_struct, then this could be all
 261         * done without taking tasklist_lock and looping over all tasks.
 262         */
 263
 264        read_lock(&tasklist_lock);
 265        spin_lock(&mapping->i_mmap_lock);
 266        for_each_process(tsk) {
 267                pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 268
 269                if (!task_early_kill(tsk))
 270                        continue;
 271
 272                vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff,
 273                                      pgoff) {
 274                        /*
 275                         * Send early kill signal to tasks where a vma covers
 276                         * the page but the corrupted page is not necessarily
 277                         * mapped it in its pte.
 278                         * Assume applications who requested early kill want
 279                         * to be informed of all such data corruptions.
 280                         */
 281                        if (vma->vm_mm == tsk->mm)
 282                                add_to_kill(tsk, page, vma, to_kill, tkc);
 283                }
 284        }
 285        spin_unlock(&mapping->i_mmap_lock);
 286        read_unlock(&tasklist_lock);
 287}
 288
 289/*
 290 * Collect the processes who have the corrupted page mapped to kill.
 291 * This is done in two steps for locking reasons.
 292 * First preallocate one tokill structure outside the spin locks,
 293 * so that we can kill at least one process reasonably reliable.
 294 */
 295static void collect_procs(struct page *page, struct list_head *tokill)
 296{
 297        struct to_kill *tk;
 298
 299        if (!page->mapping)
 300                return;
 301
 302        tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
 303        if (!tk)
 304                return;
 305        if (PageAnon(page))
 306                collect_procs_anon(page, tokill, &tk);
 307        else
 308                collect_procs_file(page, tokill, &tk);
 309        kfree(tk);
 310}
 311
 312/*
 313 * Error handlers for various types of pages.
 314 */
 315
 316enum outcome {
 317        FAILED,         /* Error handling failed */
 318        DELAYED,        /* Will be handled later */
 319        IGNORED,        /* Error safely ignored */
 320        RECOVERED,      /* Successfully recovered */
 321};
 322
 323static const char *action_name[] = {
 324        [FAILED] = "Failed",
 325        [DELAYED] = "Delayed",
 326        [IGNORED] = "Ignored",
 327        [RECOVERED] = "Recovered",
 328};
 329
 330/*
 331 * Error hit kernel page.
 332 * Do nothing, try to be lucky and not touch this instead. For a few cases we
 333 * could be more sophisticated.
 334 */
 335static int me_kernel(struct page *p, unsigned long pfn)
 336{
 337        return DELAYED;
 338}
 339
 340/*
 341 * Already poisoned page.
 342 */
 343static int me_ignore(struct page *p, unsigned long pfn)
 344{
 345        return IGNORED;
 346}
 347
 348/*
 349 * Page in unknown state. Do nothing.
 350 */
 351static int me_unknown(struct page *p, unsigned long pfn)
 352{
 353        printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn);
 354        return FAILED;
 355}
 356
 357/*
 358 * Free memory
 359 */
 360static int me_free(struct page *p, unsigned long pfn)
 361{
 362        return DELAYED;
 363}
 364
 365/*
 366 * Clean (or cleaned) page cache page.
 367 */
 368static int me_pagecache_clean(struct page *p, unsigned long pfn)
 369{
 370        int err;
 371        int ret = FAILED;
 372        struct address_space *mapping;
 373
 374        /*
 375         * For anonymous pages we're done the only reference left
 376         * should be the one m_f() holds.
 377         */
 378        if (PageAnon(p))
 379                return RECOVERED;
 380
 381        /*
 382         * Now truncate the page in the page cache. This is really
 383         * more like a "temporary hole punch"
 384         * Don't do this for block devices when someone else
 385         * has a reference, because it could be file system metadata
 386         * and that's not safe to truncate.
 387         */
 388        mapping = page_mapping(p);
 389        if (!mapping) {
 390                /*
 391                 * Page has been teared down in the meanwhile
 392                 */
 393                return FAILED;
 394        }
 395
 396        /*
 397         * Truncation is a bit tricky. Enable it per file system for now.
 398         *
 399         * Open: to take i_mutex or not for this? Right now we don't.
 400         */
 401        if (mapping->a_ops->error_remove_page) {
 402                err = mapping->a_ops->error_remove_page(mapping, p);
 403                if (err != 0) {
 404                        printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n",
 405                                        pfn, err);
 406                } else if (page_has_private(p) &&
 407                                !try_to_release_page(p, GFP_NOIO)) {
 408                        pr_debug("MCE %#lx: failed to release buffers\n", pfn);
 409                } else {
 410                        ret = RECOVERED;
 411                }
 412        } else {
 413                /*
 414                 * If the file system doesn't support it just invalidate
 415                 * This fails on dirty or anything with private pages
 416                 */
 417                if (invalidate_inode_page(p))
 418                        ret = RECOVERED;
 419                else
 420                        printk(KERN_INFO "MCE %#lx: Failed to invalidate\n",
 421                                pfn);
 422        }
 423        return ret;
 424}
 425
 426/*
 427 * Dirty cache page page
 428 * Issues: when the error hit a hole page the error is not properly
 429 * propagated.
 430 */
 431static int me_pagecache_dirty(struct page *p, unsigned long pfn)
 432{
 433        struct address_space *mapping = page_mapping(p);
 434
 435        SetPageError(p);
 436        /* TBD: print more information about the file. */
 437        if (mapping) {
 438                /*
 439                 * IO error will be reported by write(), fsync(), etc.
 440                 * who check the mapping.
 441                 * This way the application knows that something went
 442                 * wrong with its dirty file data.
 443                 *
 444                 * There's one open issue:
 445                 *
 446                 * The EIO will be only reported on the next IO
 447                 * operation and then cleared through the IO map.
 448                 * Normally Linux has two mechanisms to pass IO error
 449                 * first through the AS_EIO flag in the address space
 450                 * and then through the PageError flag in the page.
 451                 * Since we drop pages on memory failure handling the
 452                 * only mechanism open to use is through AS_AIO.
 453                 *
 454                 * This has the disadvantage that it gets cleared on
 455                 * the first operation that returns an error, while
 456                 * the PageError bit is more sticky and only cleared
 457                 * when the page is reread or dropped.  If an
 458                 * application assumes it will always get error on
 459                 * fsync, but does other operations on the fd before
 460                 * and the page is dropped inbetween then the error
 461                 * will not be properly reported.
 462                 *
 463                 * This can already happen even without hwpoisoned
 464                 * pages: first on metadata IO errors (which only
 465                 * report through AS_EIO) or when the page is dropped
 466                 * at the wrong time.
 467                 *
 468                 * So right now we assume that the application DTRT on
 469                 * the first EIO, but we're not worse than other parts
 470                 * of the kernel.
 471                 */
 472                mapping_set_error(mapping, EIO);
 473        }
 474
 475        return me_pagecache_clean(p, pfn);
 476}
 477
 478/*
 479 * Clean and dirty swap cache.
 480 *
 481 * Dirty swap cache page is tricky to handle. The page could live both in page
 482 * cache and swap cache(ie. page is freshly swapped in). So it could be
 483 * referenced concurrently by 2 types of PTEs:
 484 * normal PTEs and swap PTEs. We try to handle them consistently by calling
 485 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
 486 * and then
 487 *      - clear dirty bit to prevent IO
 488 *      - remove from LRU
 489 *      - but keep in the swap cache, so that when we return to it on
 490 *        a later page fault, we know the application is accessing
 491 *        corrupted data and shall be killed (we installed simple
 492 *        interception code in do_swap_page to catch it).
 493 *
 494 * Clean swap cache pages can be directly isolated. A later page fault will
 495 * bring in the known good data from disk.
 496 */
 497static int me_swapcache_dirty(struct page *p, unsigned long pfn)
 498{
 499        ClearPageDirty(p);
 500        /* Trigger EIO in shmem: */
 501        ClearPageUptodate(p);
 502
 503        return DELAYED;
 504}
 505
 506static int me_swapcache_clean(struct page *p, unsigned long pfn)
 507{
 508        delete_from_swap_cache(p);
 509
 510        return RECOVERED;
 511}
 512
 513/*
 514 * Huge pages. Needs work.
 515 * Issues:
 516 * No rmap support so we cannot find the original mapper. In theory could walk
 517 * all MMs and look for the mappings, but that would be non atomic and racy.
 518 * Need rmap for hugepages for this. Alternatively we could employ a heuristic,
 519 * like just walking the current process and hoping it has it mapped (that
 520 * should be usually true for the common "shared database cache" case)
 521 * Should handle free huge pages and dequeue them too, but this needs to
 522 * handle huge page accounting correctly.
 523 */
 524static int me_huge_page(struct page *p, unsigned long pfn)
 525{
 526        return FAILED;
 527}
 528
 529/*
 530 * Various page states we can handle.
 531 *
 532 * A page state is defined by its current page->flags bits.
 533 * The table matches them in order and calls the right handler.
 534 *
 535 * This is quite tricky because we can access page at any time
 536 * in its live cycle, so all accesses have to be extremly careful.
 537 *
 538 * This is not complete. More states could be added.
 539 * For any missing state don't attempt recovery.
 540 */
 541
 542#define dirty           (1UL << PG_dirty)
 543#define sc              (1UL << PG_swapcache)
 544#define unevict         (1UL << PG_unevictable)
 545#define mlock           (1UL << PG_mlocked)
 546#define writeback       (1UL << PG_writeback)
 547#define lru             (1UL << PG_lru)
 548#define swapbacked      (1UL << PG_swapbacked)
 549#define head            (1UL << PG_head)
 550#define tail            (1UL << PG_tail)
 551#define compound        (1UL << PG_compound)
 552#define slab            (1UL << PG_slab)
 553#define buddy           (1UL << PG_buddy)
 554#define reserved        (1UL << PG_reserved)
 555
 556static struct page_state {
 557        unsigned long mask;
 558        unsigned long res;
 559        char *msg;
 560        int (*action)(struct page *p, unsigned long pfn);
 561} error_states[] = {
 562        { reserved,     reserved,       "reserved kernel",      me_ignore },
 563        { buddy,        buddy,          "free kernel",  me_free },
 564
 565        /*
 566         * Could in theory check if slab page is free or if we can drop
 567         * currently unused objects without touching them. But just
 568         * treat it as standard kernel for now.
 569         */
 570        { slab,         slab,           "kernel slab",  me_kernel },
 571
 572#ifdef CONFIG_PAGEFLAGS_EXTENDED
 573        { head,         head,           "huge",         me_huge_page },
 574        { tail,         tail,           "huge",         me_huge_page },
 575#else
 576        { compound,     compound,       "huge",         me_huge_page },
 577#endif
 578
 579        { sc|dirty,     sc|dirty,       "swapcache",    me_swapcache_dirty },
 580        { sc|dirty,     sc,             "swapcache",    me_swapcache_clean },
 581
 582        { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty},
 583        { unevict,      unevict,        "unevictable LRU", me_pagecache_clean},
 584
 585#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
 586        { mlock|dirty,  mlock|dirty,    "mlocked LRU",  me_pagecache_dirty },
 587        { mlock,        mlock,          "mlocked LRU",  me_pagecache_clean },
 588#endif
 589
 590        { lru|dirty,    lru|dirty,      "LRU",          me_pagecache_dirty },
 591        { lru|dirty,    lru,            "clean LRU",    me_pagecache_clean },
 592        { swapbacked,   swapbacked,     "anonymous",    me_pagecache_clean },
 593
 594        /*
 595         * Catchall entry: must be at end.
 596         */
 597        { 0,            0,              "unknown page state",   me_unknown },
 598};
 599
 600static void action_result(unsigned long pfn, char *msg, int result)
 601{
 602        struct page *page = NULL;
 603        if (pfn_valid(pfn))
 604                page = pfn_to_page(pfn);
 605
 606        printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n",
 607                pfn,
 608                page && PageDirty(page) ? "dirty " : "",
 609                msg, action_name[result]);
 610}
 611
 612static int page_action(struct page_state *ps, struct page *p,
 613                        unsigned long pfn, int ref)
 614{
 615        int result;
 616        int count;
 617
 618        result = ps->action(p, pfn);
 619        action_result(pfn, ps->msg, result);
 620
 621        count = page_count(p) - 1 - ref;
 622        if (count != 0)
 623                printk(KERN_ERR
 624                       "MCE %#lx: %s page still referenced by %d users\n",
 625                       pfn, ps->msg, count);
 626
 627        /* Could do more checks here if page looks ok */
 628        /*
 629         * Could adjust zone counters here to correct for the missing page.
 630         */
 631
 632        return result == RECOVERED ? 0 : -EBUSY;
 633}
 634
 635#define N_UNMAP_TRIES 5
 636
 637/*
 638 * Do all that is necessary to remove user space mappings. Unmap
 639 * the pages and send SIGBUS to the processes if the data was dirty.
 640 */
 641static void hwpoison_user_mappings(struct page *p, unsigned long pfn,
 642                                  int trapno)
 643{
 644        enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
 645        struct address_space *mapping;
 646        LIST_HEAD(tokill);
 647        int ret;
 648        int i;
 649        int kill = 1;
 650
 651        if (PageReserved(p) || PageCompound(p) || PageSlab(p) || PageKsm(p))
 652                return;
 653
 654        /*
 655         * This check implies we don't kill processes if their pages
 656         * are in the swap cache early. Those are always late kills.
 657         */
 658        if (!page_mapped(p))
 659                return;
 660
 661        if (PageSwapCache(p)) {
 662                printk(KERN_ERR
 663                       "MCE %#lx: keeping poisoned page in swap cache\n", pfn);
 664                ttu |= TTU_IGNORE_HWPOISON;
 665        }
 666
 667        /*
 668         * Propagate the dirty bit from PTEs to struct page first, because we
 669         * need this to decide if we should kill or just drop the page.
 670         */
 671        mapping = page_mapping(p);
 672        if (!PageDirty(p) && mapping && mapping_cap_writeback_dirty(mapping)) {
 673                if (page_mkclean(p)) {
 674                        SetPageDirty(p);
 675                } else {
 676                        kill = 0;
 677                        ttu |= TTU_IGNORE_HWPOISON;
 678                        printk(KERN_INFO
 679        "MCE %#lx: corrupted page was clean: dropped without side effects\n",
 680                                pfn);
 681                }
 682        }
 683
 684        /*
 685         * First collect all the processes that have the page
 686         * mapped in dirty form.  This has to be done before try_to_unmap,
 687         * because ttu takes the rmap data structures down.
 688         *
 689         * Error handling: We ignore errors here because
 690         * there's nothing that can be done.
 691         */
 692        if (kill)
 693                collect_procs(p, &tokill);
 694
 695        /*
 696         * try_to_unmap can fail temporarily due to races.
 697         * Try a few times (RED-PEN better strategy?)
 698         */
 699        for (i = 0; i < N_UNMAP_TRIES; i++) {
 700                ret = try_to_unmap(p, ttu);
 701                if (ret == SWAP_SUCCESS)
 702                        break;
 703                pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn,  ret);
 704        }
 705
 706        if (ret != SWAP_SUCCESS)
 707                printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
 708                                pfn, page_mapcount(p));
 709
 710        /*
 711         * Now that the dirty bit has been propagated to the
 712         * struct page and all unmaps done we can decide if
 713         * killing is needed or not.  Only kill when the page
 714         * was dirty, otherwise the tokill list is merely
 715         * freed.  When there was a problem unmapping earlier
 716         * use a more force-full uncatchable kill to prevent
 717         * any accesses to the poisoned memory.
 718         */
 719        kill_procs_ao(&tokill, !!PageDirty(p), trapno,
 720                      ret != SWAP_SUCCESS, pfn);
 721}
 722
 723int __memory_failure(unsigned long pfn, int trapno, int ref)
 724{
 725        unsigned long lru_flag;
 726        struct page_state *ps;
 727        struct page *p;
 728        int res;
 729
 730        if (!sysctl_memory_failure_recovery)
 731                panic("Memory failure from trap %d on page %lx", trapno, pfn);
 732
 733        if (!pfn_valid(pfn)) {
 734                action_result(pfn, "memory outside kernel control", IGNORED);
 735                return -EIO;
 736        }
 737
 738        p = pfn_to_page(pfn);
 739        if (TestSetPageHWPoison(p)) {
 740                action_result(pfn, "already hardware poisoned", IGNORED);
 741                return 0;
 742        }
 743
 744        atomic_long_add(1, &mce_bad_pages);
 745
 746        /*
 747         * We need/can do nothing about count=0 pages.
 748         * 1) it's a free page, and therefore in safe hand:
 749         *    prep_new_page() will be the gate keeper.
 750         * 2) it's part of a non-compound high order page.
 751         *    Implies some kernel user: cannot stop them from
 752         *    R/W the page; let's pray that the page has been
 753         *    used and will be freed some time later.
 754         * In fact it's dangerous to directly bump up page count from 0,
 755         * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
 756         */
 757        if (!get_page_unless_zero(compound_head(p))) {
 758                action_result(pfn, "free or high order kernel", IGNORED);
 759                return PageBuddy(compound_head(p)) ? 0 : -EBUSY;
 760        }
 761
 762        /*
 763         * We ignore non-LRU pages for good reasons.
 764         * - PG_locked is only well defined for LRU pages and a few others
 765         * - to avoid races with __set_page_locked()
 766         * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
 767         * The check (unnecessarily) ignores LRU pages being isolated and
 768         * walked by the page reclaim code, however that's not a big loss.
 769         */
 770        if (!PageLRU(p))
 771                lru_add_drain_all();
 772        lru_flag = p->flags & lru;
 773        if (isolate_lru_page(p)) {
 774                action_result(pfn, "non LRU", IGNORED);
 775                put_page(p);
 776                return -EBUSY;
 777        }
 778        page_cache_release(p);
 779
 780        /*
 781         * Lock the page and wait for writeback to finish.
 782         * It's very difficult to mess with pages currently under IO
 783         * and in many cases impossible, so we just avoid it here.
 784         */
 785        lock_page_nosync(p);
 786        wait_on_page_writeback(p);
 787
 788        /*
 789         * Now take care of user space mappings.
 790         */
 791        hwpoison_user_mappings(p, pfn, trapno);
 792
 793        /*
 794         * Torn down by someone else?
 795         */
 796        if ((lru_flag & lru) && !PageSwapCache(p) && p->mapping == NULL) {
 797                action_result(pfn, "already truncated LRU", IGNORED);
 798                res = 0;
 799                goto out;
 800        }
 801
 802        res = -EBUSY;
 803        for (ps = error_states;; ps++) {
 804                if (((p->flags | lru_flag)& ps->mask) == ps->res) {
 805                        res = page_action(ps, p, pfn, ref);
 806                        break;
 807                }
 808        }
 809out:
 810        unlock_page(p);
 811        return res;
 812}
 813EXPORT_SYMBOL_GPL(__memory_failure);
 814
 815/**
 816 * memory_failure - Handle memory failure of a page.
 817 * @pfn: Page Number of the corrupted page
 818 * @trapno: Trap number reported in the signal to user space.
 819 *
 820 * This function is called by the low level machine check code
 821 * of an architecture when it detects hardware memory corruption
 822 * of a page. It tries its best to recover, which includes
 823 * dropping pages, killing processes etc.
 824 *
 825 * The function is primarily of use for corruptions that
 826 * happen outside the current execution context (e.g. when
 827 * detected by a background scrubber)
 828 *
 829 * Must run in process context (e.g. a work queue) with interrupts
 830 * enabled and no spinlocks hold.
 831 */
 832void memory_failure(unsigned long pfn, int trapno)
 833{
 834        __memory_failure(pfn, trapno, 0);
 835}
 836