linux/arch/x86/kernel/cpu/mce/core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Machine check handler.
   4 *
   5 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
   6 * Rest from unknown author(s).
   7 * 2004 Andi Kleen. Rewrote most of it.
   8 * Copyright 2008 Intel Corporation
   9 * Author: Andi Kleen
  10 */
  11
  12#include <linux/thread_info.h>
  13#include <linux/capability.h>
  14#include <linux/miscdevice.h>
  15#include <linux/ratelimit.h>
  16#include <linux/rcupdate.h>
  17#include <linux/kobject.h>
  18#include <linux/uaccess.h>
  19#include <linux/kdebug.h>
  20#include <linux/kernel.h>
  21#include <linux/percpu.h>
  22#include <linux/string.h>
  23#include <linux/device.h>
  24#include <linux/syscore_ops.h>
  25#include <linux/delay.h>
  26#include <linux/ctype.h>
  27#include <linux/sched.h>
  28#include <linux/sysfs.h>
  29#include <linux/types.h>
  30#include <linux/slab.h>
  31#include <linux/init.h>
  32#include <linux/kmod.h>
  33#include <linux/poll.h>
  34#include <linux/nmi.h>
  35#include <linux/cpu.h>
  36#include <linux/ras.h>
  37#include <linux/smp.h>
  38#include <linux/fs.h>
  39#include <linux/mm.h>
  40#include <linux/debugfs.h>
  41#include <linux/irq_work.h>
  42#include <linux/export.h>
  43#include <linux/set_memory.h>
  44#include <linux/sync_core.h>
  45#include <linux/task_work.h>
  46#include <linux/hardirq.h>
  47
  48#include <asm/intel-family.h>
  49#include <asm/processor.h>
  50#include <asm/traps.h>
  51#include <asm/tlbflush.h>
  52#include <asm/mce.h>
  53#include <asm/msr.h>
  54#include <asm/reboot.h>
  55
  56#include "internal.h"
  57
  58/* sysfs synchronization */
  59static DEFINE_MUTEX(mce_sysfs_mutex);
  60
  61#define CREATE_TRACE_POINTS
  62#include <trace/events/mce.h>
  63
  64#define SPINUNIT                100     /* 100ns */
  65
  66DEFINE_PER_CPU(unsigned, mce_exception_count);
  67
  68DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
  69
  70struct mce_bank {
  71        u64                     ctl;                    /* subevents to enable */
  72        bool                    init;                   /* initialise bank? */
  73};
  74static DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array);
  75
  76#define ATTR_LEN               16
  77/* One object for each MCE bank, shared by all CPUs */
  78struct mce_bank_dev {
  79        struct device_attribute attr;                   /* device attribute */
  80        char                    attrname[ATTR_LEN];     /* attribute name */
  81        u8                      bank;                   /* bank number */
  82};
  83static struct mce_bank_dev mce_bank_devs[MAX_NR_BANKS];
  84
  85struct mce_vendor_flags mce_flags __read_mostly;
  86
  87struct mca_config mca_cfg __read_mostly = {
  88        .bootlog  = -1,
  89        /*
  90         * Tolerant levels:
  91         * 0: always panic on uncorrected errors, log corrected errors
  92         * 1: panic or SIGBUS on uncorrected errors, log corrected errors
  93         * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
  94         * 3: never panic or SIGBUS, log all errors (for testing only)
  95         */
  96        .tolerant = 1,
  97        .monarch_timeout = -1
  98};
  99
 100static DEFINE_PER_CPU(struct mce, mces_seen);
 101static unsigned long mce_need_notify;
 102static int cpu_missing;
 103
 104/*
 105 * MCA banks polled by the period polling timer for corrected events.
 106 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
 107 */
 108DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
 109        [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
 110};
 111
 112/*
 113 * MCA banks controlled through firmware first for corrected errors.
 114 * This is a global list of banks for which we won't enable CMCI and we
 115 * won't poll. Firmware controls these banks and is responsible for
 116 * reporting corrected errors through GHES. Uncorrected/recoverable
 117 * errors are still notified through a machine check.
 118 */
 119mce_banks_t mce_banks_ce_disabled;
 120
 121static struct work_struct mce_work;
 122static struct irq_work mce_irq_work;
 123
 124static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
 125
 126/*
 127 * CPU/chipset specific EDAC code can register a notifier call here to print
 128 * MCE errors in a human-readable form.
 129 */
 130BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
 131
 132/* Do initial initialization of a struct mce */
 133noinstr void mce_setup(struct mce *m)
 134{
 135        memset(m, 0, sizeof(struct mce));
 136        m->cpu = m->extcpu = smp_processor_id();
 137        /* need the internal __ version to avoid deadlocks */
 138        m->time = __ktime_get_real_seconds();
 139        m->cpuvendor = boot_cpu_data.x86_vendor;
 140        m->cpuid = cpuid_eax(1);
 141        m->socketid = cpu_data(m->extcpu).phys_proc_id;
 142        m->apicid = cpu_data(m->extcpu).initial_apicid;
 143        m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP);
 144
 145        if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
 146                m->ppin = __rdmsr(MSR_PPIN);
 147        else if (this_cpu_has(X86_FEATURE_AMD_PPIN))
 148                m->ppin = __rdmsr(MSR_AMD_PPIN);
 149
 150        m->microcode = boot_cpu_data.microcode;
 151}
 152
 153DEFINE_PER_CPU(struct mce, injectm);
 154EXPORT_PER_CPU_SYMBOL_GPL(injectm);
 155
 156void mce_log(struct mce *m)
 157{
 158        if (!mce_gen_pool_add(m))
 159                irq_work_queue(&mce_irq_work);
 160}
 161EXPORT_SYMBOL_GPL(mce_log);
 162
 163void mce_register_decode_chain(struct notifier_block *nb)
 164{
 165        if (WARN_ON(nb->priority < MCE_PRIO_LOWEST ||
 166                    nb->priority > MCE_PRIO_HIGHEST))
 167                return;
 168
 169        blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
 170}
 171EXPORT_SYMBOL_GPL(mce_register_decode_chain);
 172
 173void mce_unregister_decode_chain(struct notifier_block *nb)
 174{
 175        blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
 176}
 177EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
 178
 179static inline u32 ctl_reg(int bank)
 180{
 181        return MSR_IA32_MCx_CTL(bank);
 182}
 183
 184static inline u32 status_reg(int bank)
 185{
 186        return MSR_IA32_MCx_STATUS(bank);
 187}
 188
 189static inline u32 addr_reg(int bank)
 190{
 191        return MSR_IA32_MCx_ADDR(bank);
 192}
 193
 194static inline u32 misc_reg(int bank)
 195{
 196        return MSR_IA32_MCx_MISC(bank);
 197}
 198
 199static inline u32 smca_ctl_reg(int bank)
 200{
 201        return MSR_AMD64_SMCA_MCx_CTL(bank);
 202}
 203
 204static inline u32 smca_status_reg(int bank)
 205{
 206        return MSR_AMD64_SMCA_MCx_STATUS(bank);
 207}
 208
 209static inline u32 smca_addr_reg(int bank)
 210{
 211        return MSR_AMD64_SMCA_MCx_ADDR(bank);
 212}
 213
 214static inline u32 smca_misc_reg(int bank)
 215{
 216        return MSR_AMD64_SMCA_MCx_MISC(bank);
 217}
 218
 219struct mca_msr_regs msr_ops = {
 220        .ctl    = ctl_reg,
 221        .status = status_reg,
 222        .addr   = addr_reg,
 223        .misc   = misc_reg
 224};
 225
 226static void __print_mce(struct mce *m)
 227{
 228        pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
 229                 m->extcpu,
 230                 (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""),
 231                 m->mcgstatus, m->bank, m->status);
 232
 233        if (m->ip) {
 234                pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
 235                        !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
 236                        m->cs, m->ip);
 237
 238                if (m->cs == __KERNEL_CS)
 239                        pr_cont("{%pS}", (void *)(unsigned long)m->ip);
 240                pr_cont("\n");
 241        }
 242
 243        pr_emerg(HW_ERR "TSC %llx ", m->tsc);
 244        if (m->addr)
 245                pr_cont("ADDR %llx ", m->addr);
 246        if (m->misc)
 247                pr_cont("MISC %llx ", m->misc);
 248        if (m->ppin)
 249                pr_cont("PPIN %llx ", m->ppin);
 250
 251        if (mce_flags.smca) {
 252                if (m->synd)
 253                        pr_cont("SYND %llx ", m->synd);
 254                if (m->ipid)
 255                        pr_cont("IPID %llx ", m->ipid);
 256        }
 257
 258        pr_cont("\n");
 259
 260        /*
 261         * Note this output is parsed by external tools and old fields
 262         * should not be changed.
 263         */
 264        pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
 265                m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
 266                m->microcode);
 267}
 268
 269static void print_mce(struct mce *m)
 270{
 271        __print_mce(m);
 272
 273        if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON)
 274                pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
 275}
 276
 277#define PANIC_TIMEOUT 5 /* 5 seconds */
 278
 279static atomic_t mce_panicked;
 280
 281static int fake_panic;
 282static atomic_t mce_fake_panicked;
 283
 284/* Panic in progress. Enable interrupts and wait for final IPI */
 285static void wait_for_panic(void)
 286{
 287        long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
 288
 289        preempt_disable();
 290        local_irq_enable();
 291        while (timeout-- > 0)
 292                udelay(1);
 293        if (panic_timeout == 0)
 294                panic_timeout = mca_cfg.panic_timeout;
 295        panic("Panicing machine check CPU died");
 296}
 297
 298static void mce_panic(const char *msg, struct mce *final, char *exp)
 299{
 300        int apei_err = 0;
 301        struct llist_node *pending;
 302        struct mce_evt_llist *l;
 303
 304        if (!fake_panic) {
 305                /*
 306                 * Make sure only one CPU runs in machine check panic
 307                 */
 308                if (atomic_inc_return(&mce_panicked) > 1)
 309                        wait_for_panic();
 310                barrier();
 311
 312                bust_spinlocks(1);
 313                console_verbose();
 314        } else {
 315                /* Don't log too much for fake panic */
 316                if (atomic_inc_return(&mce_fake_panicked) > 1)
 317                        return;
 318        }
 319        pending = mce_gen_pool_prepare_records();
 320        /* First print corrected ones that are still unlogged */
 321        llist_for_each_entry(l, pending, llnode) {
 322                struct mce *m = &l->mce;
 323                if (!(m->status & MCI_STATUS_UC)) {
 324                        print_mce(m);
 325                        if (!apei_err)
 326                                apei_err = apei_write_mce(m);
 327                }
 328        }
 329        /* Now print uncorrected but with the final one last */
 330        llist_for_each_entry(l, pending, llnode) {
 331                struct mce *m = &l->mce;
 332                if (!(m->status & MCI_STATUS_UC))
 333                        continue;
 334                if (!final || mce_cmp(m, final)) {
 335                        print_mce(m);
 336                        if (!apei_err)
 337                                apei_err = apei_write_mce(m);
 338                }
 339        }
 340        if (final) {
 341                print_mce(final);
 342                if (!apei_err)
 343                        apei_err = apei_write_mce(final);
 344        }
 345        if (cpu_missing)
 346                pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
 347        if (exp)
 348                pr_emerg(HW_ERR "Machine check: %s\n", exp);
 349        if (!fake_panic) {
 350                if (panic_timeout == 0)
 351                        panic_timeout = mca_cfg.panic_timeout;
 352                panic(msg);
 353        } else
 354                pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
 355}
 356
 357/* Support code for software error injection */
 358
 359static int msr_to_offset(u32 msr)
 360{
 361        unsigned bank = __this_cpu_read(injectm.bank);
 362
 363        if (msr == mca_cfg.rip_msr)
 364                return offsetof(struct mce, ip);
 365        if (msr == msr_ops.status(bank))
 366                return offsetof(struct mce, status);
 367        if (msr == msr_ops.addr(bank))
 368                return offsetof(struct mce, addr);
 369        if (msr == msr_ops.misc(bank))
 370                return offsetof(struct mce, misc);
 371        if (msr == MSR_IA32_MCG_STATUS)
 372                return offsetof(struct mce, mcgstatus);
 373        return -1;
 374}
 375
 376__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
 377                                      struct pt_regs *regs, int trapnr,
 378                                      unsigned long error_code,
 379                                      unsigned long fault_addr)
 380{
 381        pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
 382                 (unsigned int)regs->cx, regs->ip, (void *)regs->ip);
 383
 384        show_stack_regs(regs);
 385
 386        panic("MCA architectural violation!\n");
 387
 388        while (true)
 389                cpu_relax();
 390
 391        return true;
 392}
 393
 394/* MSR access wrappers used for error injection */
 395static noinstr u64 mce_rdmsrl(u32 msr)
 396{
 397        DECLARE_ARGS(val, low, high);
 398
 399        if (__this_cpu_read(injectm.finished)) {
 400                int offset;
 401                u64 ret;
 402
 403                instrumentation_begin();
 404
 405                offset = msr_to_offset(msr);
 406                if (offset < 0)
 407                        ret = 0;
 408                else
 409                        ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
 410
 411                instrumentation_end();
 412
 413                return ret;
 414        }
 415
 416        /*
 417         * RDMSR on MCA MSRs should not fault. If they do, this is very much an
 418         * architectural violation and needs to be reported to hw vendor. Panic
 419         * the box to not allow any further progress.
 420         */
 421        asm volatile("1: rdmsr\n"
 422                     "2:\n"
 423                     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault)
 424                     : EAX_EDX_RET(val, low, high) : "c" (msr));
 425
 426
 427        return EAX_EDX_VAL(val, low, high);
 428}
 429
 430__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
 431                                      struct pt_regs *regs, int trapnr,
 432                                      unsigned long error_code,
 433                                      unsigned long fault_addr)
 434{
 435        pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
 436                 (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
 437                  regs->ip, (void *)regs->ip);
 438
 439        show_stack_regs(regs);
 440
 441        panic("MCA architectural violation!\n");
 442
 443        while (true)
 444                cpu_relax();
 445
 446        return true;
 447}
 448
 449static noinstr void mce_wrmsrl(u32 msr, u64 v)
 450{
 451        u32 low, high;
 452
 453        if (__this_cpu_read(injectm.finished)) {
 454                int offset;
 455
 456                instrumentation_begin();
 457
 458                offset = msr_to_offset(msr);
 459                if (offset >= 0)
 460                        *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
 461
 462                instrumentation_end();
 463
 464                return;
 465        }
 466
 467        low  = (u32)v;
 468        high = (u32)(v >> 32);
 469
 470        /* See comment in mce_rdmsrl() */
 471        asm volatile("1: wrmsr\n"
 472                     "2:\n"
 473                     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault)
 474                     : : "c" (msr), "a"(low), "d" (high) : "memory");
 475}
 476
 477/*
 478 * Collect all global (w.r.t. this processor) status about this machine
 479 * check into our "mce" struct so that we can use it later to assess
 480 * the severity of the problem as we read per-bank specific details.
 481 */
 482static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
 483{
 484        mce_setup(m);
 485
 486        m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
 487        if (regs) {
 488                /*
 489                 * Get the address of the instruction at the time of
 490                 * the machine check error.
 491                 */
 492                if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
 493                        m->ip = regs->ip;
 494                        m->cs = regs->cs;
 495
 496                        /*
 497                         * When in VM86 mode make the cs look like ring 3
 498                         * always. This is a lie, but it's better than passing
 499                         * the additional vm86 bit around everywhere.
 500                         */
 501                        if (v8086_mode(regs))
 502                                m->cs |= 3;
 503                }
 504                /* Use accurate RIP reporting if available. */
 505                if (mca_cfg.rip_msr)
 506                        m->ip = mce_rdmsrl(mca_cfg.rip_msr);
 507        }
 508}
 509
 510int mce_available(struct cpuinfo_x86 *c)
 511{
 512        if (mca_cfg.disabled)
 513                return 0;
 514        return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
 515}
 516
 517static void mce_schedule_work(void)
 518{
 519        if (!mce_gen_pool_empty())
 520                schedule_work(&mce_work);
 521}
 522
 523static void mce_irq_work_cb(struct irq_work *entry)
 524{
 525        mce_schedule_work();
 526}
 527
 528/*
 529 * Check if the address reported by the CPU is in a format we can parse.
 530 * It would be possible to add code for most other cases, but all would
 531 * be somewhat complicated (e.g. segment offset would require an instruction
 532 * parser). So only support physical addresses up to page granularity for now.
 533 */
 534int mce_usable_address(struct mce *m)
 535{
 536        if (!(m->status & MCI_STATUS_ADDRV))
 537                return 0;
 538
 539        /* Checks after this one are Intel/Zhaoxin-specific: */
 540        if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL &&
 541            boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN)
 542                return 1;
 543
 544        if (!(m->status & MCI_STATUS_MISCV))
 545                return 0;
 546
 547        if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
 548                return 0;
 549
 550        if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
 551                return 0;
 552
 553        return 1;
 554}
 555EXPORT_SYMBOL_GPL(mce_usable_address);
 556
 557bool mce_is_memory_error(struct mce *m)
 558{
 559        switch (m->cpuvendor) {
 560        case X86_VENDOR_AMD:
 561        case X86_VENDOR_HYGON:
 562                return amd_mce_is_memory_error(m);
 563
 564        case X86_VENDOR_INTEL:
 565        case X86_VENDOR_ZHAOXIN:
 566                /*
 567                 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
 568                 *
 569                 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
 570                 * indicating a memory error. Bit 8 is used for indicating a
 571                 * cache hierarchy error. The combination of bit 2 and bit 3
 572                 * is used for indicating a `generic' cache hierarchy error
 573                 * But we can't just blindly check the above bits, because if
 574                 * bit 11 is set, then it is a bus/interconnect error - and
 575                 * either way the above bits just gives more detail on what
 576                 * bus/interconnect error happened. Note that bit 12 can be
 577                 * ignored, as it's the "filter" bit.
 578                 */
 579                return (m->status & 0xef80) == BIT(7) ||
 580                       (m->status & 0xef00) == BIT(8) ||
 581                       (m->status & 0xeffc) == 0xc;
 582
 583        default:
 584                return false;
 585        }
 586}
 587EXPORT_SYMBOL_GPL(mce_is_memory_error);
 588
 589static bool whole_page(struct mce *m)
 590{
 591        if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV))
 592                return true;
 593
 594        return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT;
 595}
 596
 597bool mce_is_correctable(struct mce *m)
 598{
 599        if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
 600                return false;
 601
 602        if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED)
 603                return false;
 604
 605        if (m->status & MCI_STATUS_UC)
 606                return false;
 607
 608        return true;
 609}
 610EXPORT_SYMBOL_GPL(mce_is_correctable);
 611
 612static int mce_early_notifier(struct notifier_block *nb, unsigned long val,
 613                              void *data)
 614{
 615        struct mce *m = (struct mce *)data;
 616
 617        if (!m)
 618                return NOTIFY_DONE;
 619
 620        /* Emit the trace record: */
 621        trace_mce_record(m);
 622
 623        set_bit(0, &mce_need_notify);
 624
 625        mce_notify_irq();
 626
 627        return NOTIFY_DONE;
 628}
 629
 630static struct notifier_block early_nb = {
 631        .notifier_call  = mce_early_notifier,
 632        .priority       = MCE_PRIO_EARLY,
 633};
 634
 635static int uc_decode_notifier(struct notifier_block *nb, unsigned long val,
 636                              void *data)
 637{
 638        struct mce *mce = (struct mce *)data;
 639        unsigned long pfn;
 640
 641        if (!mce || !mce_usable_address(mce))
 642                return NOTIFY_DONE;
 643
 644        if (mce->severity != MCE_AO_SEVERITY &&
 645            mce->severity != MCE_DEFERRED_SEVERITY)
 646                return NOTIFY_DONE;
 647
 648        pfn = mce->addr >> PAGE_SHIFT;
 649        if (!memory_failure(pfn, 0)) {
 650                set_mce_nospec(pfn, whole_page(mce));
 651                mce->kflags |= MCE_HANDLED_UC;
 652        }
 653
 654        return NOTIFY_OK;
 655}
 656
 657static struct notifier_block mce_uc_nb = {
 658        .notifier_call  = uc_decode_notifier,
 659        .priority       = MCE_PRIO_UC,
 660};
 661
 662static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
 663                                void *data)
 664{
 665        struct mce *m = (struct mce *)data;
 666
 667        if (!m)
 668                return NOTIFY_DONE;
 669
 670        if (mca_cfg.print_all || !m->kflags)
 671                __print_mce(m);
 672
 673        return NOTIFY_DONE;
 674}
 675
 676static struct notifier_block mce_default_nb = {
 677        .notifier_call  = mce_default_notifier,
 678        /* lowest prio, we want it to run last. */
 679        .priority       = MCE_PRIO_LOWEST,
 680};
 681
 682/*
 683 * Read ADDR and MISC registers.
 684 */
 685static void mce_read_aux(struct mce *m, int i)
 686{
 687        if (m->status & MCI_STATUS_MISCV)
 688                m->misc = mce_rdmsrl(msr_ops.misc(i));
 689
 690        if (m->status & MCI_STATUS_ADDRV) {
 691                m->addr = mce_rdmsrl(msr_ops.addr(i));
 692
 693                /*
 694                 * Mask the reported address by the reported granularity.
 695                 */
 696                if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
 697                        u8 shift = MCI_MISC_ADDR_LSB(m->misc);
 698                        m->addr >>= shift;
 699                        m->addr <<= shift;
 700                }
 701
 702                /*
 703                 * Extract [55:<lsb>] where lsb is the least significant
 704                 * *valid* bit of the address bits.
 705                 */
 706                if (mce_flags.smca) {
 707                        u8 lsb = (m->addr >> 56) & 0x3f;
 708
 709                        m->addr &= GENMASK_ULL(55, lsb);
 710                }
 711        }
 712
 713        if (mce_flags.smca) {
 714                m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i));
 715
 716                if (m->status & MCI_STATUS_SYNDV)
 717                        m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i));
 718        }
 719}
 720
 721DEFINE_PER_CPU(unsigned, mce_poll_count);
 722
 723/*
 724 * Poll for corrected events or events that happened before reset.
 725 * Those are just logged through /dev/mcelog.
 726 *
 727 * This is executed in standard interrupt context.
 728 *
 729 * Note: spec recommends to panic for fatal unsignalled
 730 * errors here. However this would be quite problematic --
 731 * we would need to reimplement the Monarch handling and
 732 * it would mess up the exclusion between exception handler
 733 * and poll handler -- * so we skip this for now.
 734 * These cases should not happen anyways, or only when the CPU
 735 * is already totally * confused. In this case it's likely it will
 736 * not fully execute the machine check handler either.
 737 */
 738bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
 739{
 740        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
 741        bool error_seen = false;
 742        struct mce m;
 743        int i;
 744
 745        this_cpu_inc(mce_poll_count);
 746
 747        mce_gather_info(&m, NULL);
 748
 749        if (flags & MCP_TIMESTAMP)
 750                m.tsc = rdtsc();
 751
 752        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
 753                if (!mce_banks[i].ctl || !test_bit(i, *b))
 754                        continue;
 755
 756                m.misc = 0;
 757                m.addr = 0;
 758                m.bank = i;
 759
 760                barrier();
 761                m.status = mce_rdmsrl(msr_ops.status(i));
 762
 763                /* If this entry is not valid, ignore it */
 764                if (!(m.status & MCI_STATUS_VAL))
 765                        continue;
 766
 767                /*
 768                 * If we are logging everything (at CPU online) or this
 769                 * is a corrected error, then we must log it.
 770                 */
 771                if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
 772                        goto log_it;
 773
 774                /*
 775                 * Newer Intel systems that support software error
 776                 * recovery need to make additional checks. Other
 777                 * CPUs should skip over uncorrected errors, but log
 778                 * everything else.
 779                 */
 780                if (!mca_cfg.ser) {
 781                        if (m.status & MCI_STATUS_UC)
 782                                continue;
 783                        goto log_it;
 784                }
 785
 786                /* Log "not enabled" (speculative) errors */
 787                if (!(m.status & MCI_STATUS_EN))
 788                        goto log_it;
 789
 790                /*
 791                 * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
 792                 * UC == 1 && PCC == 0 && S == 0
 793                 */
 794                if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
 795                        goto log_it;
 796
 797                /*
 798                 * Skip anything else. Presumption is that our read of this
 799                 * bank is racing with a machine check. Leave the log alone
 800                 * for do_machine_check() to deal with it.
 801                 */
 802                continue;
 803
 804log_it:
 805                error_seen = true;
 806
 807                if (flags & MCP_DONTLOG)
 808                        goto clear_it;
 809
 810                mce_read_aux(&m, i);
 811                m.severity = mce_severity(&m, NULL, mca_cfg.tolerant, NULL, false);
 812                /*
 813                 * Don't get the IP here because it's unlikely to
 814                 * have anything to do with the actual error location.
 815                 */
 816
 817                if (mca_cfg.dont_log_ce && !mce_usable_address(&m))
 818                        goto clear_it;
 819
 820                mce_log(&m);
 821
 822clear_it:
 823                /*
 824                 * Clear state for this bank.
 825                 */
 826                mce_wrmsrl(msr_ops.status(i), 0);
 827        }
 828
 829        /*
 830         * Don't clear MCG_STATUS here because it's only defined for
 831         * exceptions.
 832         */
 833
 834        sync_core();
 835
 836        return error_seen;
 837}
 838EXPORT_SYMBOL_GPL(machine_check_poll);
 839
 840/*
 841 * Do a quick check if any of the events requires a panic.
 842 * This decides if we keep the events around or clear them.
 843 */
 844static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
 845                          struct pt_regs *regs)
 846{
 847        char *tmp = *msg;
 848        int i;
 849
 850        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
 851                m->status = mce_rdmsrl(msr_ops.status(i));
 852                if (!(m->status & MCI_STATUS_VAL))
 853                        continue;
 854
 855                __set_bit(i, validp);
 856                if (quirk_no_way_out)
 857                        quirk_no_way_out(i, m, regs);
 858
 859                m->bank = i;
 860                if (mce_severity(m, regs, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
 861                        mce_read_aux(m, i);
 862                        *msg = tmp;
 863                        return 1;
 864                }
 865        }
 866        return 0;
 867}
 868
 869/*
 870 * Variable to establish order between CPUs while scanning.
 871 * Each CPU spins initially until executing is equal its number.
 872 */
 873static atomic_t mce_executing;
 874
 875/*
 876 * Defines order of CPUs on entry. First CPU becomes Monarch.
 877 */
 878static atomic_t mce_callin;
 879
 880/*
 881 * Track which CPUs entered the MCA broadcast synchronization and which not in
 882 * order to print holdouts.
 883 */
 884static cpumask_t mce_missing_cpus = CPU_MASK_ALL;
 885
 886/*
 887 * Check if a timeout waiting for other CPUs happened.
 888 */
 889static int mce_timed_out(u64 *t, const char *msg)
 890{
 891        /*
 892         * The others already did panic for some reason.
 893         * Bail out like in a timeout.
 894         * rmb() to tell the compiler that system_state
 895         * might have been modified by someone else.
 896         */
 897        rmb();
 898        if (atomic_read(&mce_panicked))
 899                wait_for_panic();
 900        if (!mca_cfg.monarch_timeout)
 901                goto out;
 902        if ((s64)*t < SPINUNIT) {
 903                if (mca_cfg.tolerant <= 1) {
 904                        if (cpumask_and(&mce_missing_cpus, cpu_online_mask, &mce_missing_cpus))
 905                                pr_emerg("CPUs not responding to MCE broadcast (may include false positives): %*pbl\n",
 906                                         cpumask_pr_args(&mce_missing_cpus));
 907                        mce_panic(msg, NULL, NULL);
 908                }
 909                cpu_missing = 1;
 910                return 1;
 911        }
 912        *t -= SPINUNIT;
 913out:
 914        touch_nmi_watchdog();
 915        return 0;
 916}
 917
 918/*
 919 * The Monarch's reign.  The Monarch is the CPU who entered
 920 * the machine check handler first. It waits for the others to
 921 * raise the exception too and then grades them. When any
 922 * error is fatal panic. Only then let the others continue.
 923 *
 924 * The other CPUs entering the MCE handler will be controlled by the
 925 * Monarch. They are called Subjects.
 926 *
 927 * This way we prevent any potential data corruption in a unrecoverable case
 928 * and also makes sure always all CPU's errors are examined.
 929 *
 930 * Also this detects the case of a machine check event coming from outer
 931 * space (not detected by any CPUs) In this case some external agent wants
 932 * us to shut down, so panic too.
 933 *
 934 * The other CPUs might still decide to panic if the handler happens
 935 * in a unrecoverable place, but in this case the system is in a semi-stable
 936 * state and won't corrupt anything by itself. It's ok to let the others
 937 * continue for a bit first.
 938 *
 939 * All the spin loops have timeouts; when a timeout happens a CPU
 940 * typically elects itself to be Monarch.
 941 */
 942static void mce_reign(void)
 943{
 944        int cpu;
 945        struct mce *m = NULL;
 946        int global_worst = 0;
 947        char *msg = NULL;
 948
 949        /*
 950         * This CPU is the Monarch and the other CPUs have run
 951         * through their handlers.
 952         * Grade the severity of the errors of all the CPUs.
 953         */
 954        for_each_possible_cpu(cpu) {
 955                struct mce *mtmp = &per_cpu(mces_seen, cpu);
 956
 957                if (mtmp->severity > global_worst) {
 958                        global_worst = mtmp->severity;
 959                        m = &per_cpu(mces_seen, cpu);
 960                }
 961        }
 962
 963        /*
 964         * Cannot recover? Panic here then.
 965         * This dumps all the mces in the log buffer and stops the
 966         * other CPUs.
 967         */
 968        if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
 969                /* call mce_severity() to get "msg" for panic */
 970                mce_severity(m, NULL, mca_cfg.tolerant, &msg, true);
 971                mce_panic("Fatal machine check", m, msg);
 972        }
 973
 974        /*
 975         * For UC somewhere we let the CPU who detects it handle it.
 976         * Also must let continue the others, otherwise the handling
 977         * CPU could deadlock on a lock.
 978         */
 979
 980        /*
 981         * No machine check event found. Must be some external
 982         * source or one CPU is hung. Panic.
 983         */
 984        if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
 985                mce_panic("Fatal machine check from unknown source", NULL, NULL);
 986
 987        /*
 988         * Now clear all the mces_seen so that they don't reappear on
 989         * the next mce.
 990         */
 991        for_each_possible_cpu(cpu)
 992                memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
 993}
 994
 995static atomic_t global_nwo;
 996
 997/*
 998 * Start of Monarch synchronization. This waits until all CPUs have
 999 * entered the exception handler and then determines if any of them
1000 * saw a fatal event that requires panic. Then it executes them
1001 * in the entry order.
1002 * TBD double check parallel CPU hotunplug
1003 */
1004static int mce_start(int *no_way_out)
1005{
1006        int order;
1007        int cpus = num_online_cpus();
1008        u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
1009
1010        if (!timeout)
1011                return -1;
1012
1013        atomic_add(*no_way_out, &global_nwo);
1014        /*
1015         * Rely on the implied barrier below, such that global_nwo
1016         * is updated before mce_callin.
1017         */
1018        order = atomic_inc_return(&mce_callin);
1019        cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus);
1020
1021        /*
1022         * Wait for everyone.
1023         */
1024        while (atomic_read(&mce_callin) != cpus) {
1025                if (mce_timed_out(&timeout,
1026                                  "Timeout: Not all CPUs entered broadcast exception handler")) {
1027                        atomic_set(&global_nwo, 0);
1028                        return -1;
1029                }
1030                ndelay(SPINUNIT);
1031        }
1032
1033        /*
1034         * mce_callin should be read before global_nwo
1035         */
1036        smp_rmb();
1037
1038        if (order == 1) {
1039                /*
1040                 * Monarch: Starts executing now, the others wait.
1041                 */
1042                atomic_set(&mce_executing, 1);
1043        } else {
1044                /*
1045                 * Subject: Now start the scanning loop one by one in
1046                 * the original callin order.
1047                 * This way when there are any shared banks it will be
1048                 * only seen by one CPU before cleared, avoiding duplicates.
1049                 */
1050                while (atomic_read(&mce_executing) < order) {
1051                        if (mce_timed_out(&timeout,
1052                                          "Timeout: Subject CPUs unable to finish machine check processing")) {
1053                                atomic_set(&global_nwo, 0);
1054                                return -1;
1055                        }
1056                        ndelay(SPINUNIT);
1057                }
1058        }
1059
1060        /*
1061         * Cache the global no_way_out state.
1062         */
1063        *no_way_out = atomic_read(&global_nwo);
1064
1065        return order;
1066}
1067
1068/*
1069 * Synchronize between CPUs after main scanning loop.
1070 * This invokes the bulk of the Monarch processing.
1071 */
1072static int mce_end(int order)
1073{
1074        int ret = -1;
1075        u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
1076
1077        if (!timeout)
1078                goto reset;
1079        if (order < 0)
1080                goto reset;
1081
1082        /*
1083         * Allow others to run.
1084         */
1085        atomic_inc(&mce_executing);
1086
1087        if (order == 1) {
1088                /* CHECKME: Can this race with a parallel hotplug? */
1089                int cpus = num_online_cpus();
1090
1091                /*
1092                 * Monarch: Wait for everyone to go through their scanning
1093                 * loops.
1094                 */
1095                while (atomic_read(&mce_executing) <= cpus) {
1096                        if (mce_timed_out(&timeout,
1097                                          "Timeout: Monarch CPU unable to finish machine check processing"))
1098                                goto reset;
1099                        ndelay(SPINUNIT);
1100                }
1101
1102                mce_reign();
1103                barrier();
1104                ret = 0;
1105        } else {
1106                /*
1107                 * Subject: Wait for Monarch to finish.
1108                 */
1109                while (atomic_read(&mce_executing) != 0) {
1110                        if (mce_timed_out(&timeout,
1111                                          "Timeout: Monarch CPU did not finish machine check processing"))
1112                                goto reset;
1113                        ndelay(SPINUNIT);
1114                }
1115
1116                /*
1117                 * Don't reset anything. That's done by the Monarch.
1118                 */
1119                return 0;
1120        }
1121
1122        /*
1123         * Reset all global state.
1124         */
1125reset:
1126        atomic_set(&global_nwo, 0);
1127        atomic_set(&mce_callin, 0);
1128        cpumask_setall(&mce_missing_cpus);
1129        barrier();
1130
1131        /*
1132         * Let others run again.
1133         */
1134        atomic_set(&mce_executing, 0);
1135        return ret;
1136}
1137
1138static void mce_clear_state(unsigned long *toclear)
1139{
1140        int i;
1141
1142        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1143                if (test_bit(i, toclear))
1144                        mce_wrmsrl(msr_ops.status(i), 0);
1145        }
1146}
1147
1148/*
1149 * Cases where we avoid rendezvous handler timeout:
1150 * 1) If this CPU is offline.
1151 *
1152 * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
1153 *  skip those CPUs which remain looping in the 1st kernel - see
1154 *  crash_nmi_callback().
1155 *
1156 * Note: there still is a small window between kexec-ing and the new,
1157 * kdump kernel establishing a new #MC handler where a broadcasted MCE
1158 * might not get handled properly.
1159 */
1160static noinstr bool mce_check_crashing_cpu(void)
1161{
1162        unsigned int cpu = smp_processor_id();
1163
1164        if (arch_cpu_is_offline(cpu) ||
1165            (crashing_cpu != -1 && crashing_cpu != cpu)) {
1166                u64 mcgstatus;
1167
1168                mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS);
1169
1170                if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) {
1171                        if (mcgstatus & MCG_STATUS_LMCES)
1172                                return false;
1173                }
1174
1175                if (mcgstatus & MCG_STATUS_RIPV) {
1176                        __wrmsr(MSR_IA32_MCG_STATUS, 0, 0);
1177                        return true;
1178                }
1179        }
1180        return false;
1181}
1182
1183static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final,
1184                            unsigned long *toclear, unsigned long *valid_banks,
1185                            int no_way_out, int *worst)
1186{
1187        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1188        struct mca_config *cfg = &mca_cfg;
1189        int severity, i;
1190
1191        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1192                __clear_bit(i, toclear);
1193                if (!test_bit(i, valid_banks))
1194                        continue;
1195
1196                if (!mce_banks[i].ctl)
1197                        continue;
1198
1199                m->misc = 0;
1200                m->addr = 0;
1201                m->bank = i;
1202
1203                m->status = mce_rdmsrl(msr_ops.status(i));
1204                if (!(m->status & MCI_STATUS_VAL))
1205                        continue;
1206
1207                /*
1208                 * Corrected or non-signaled errors are handled by
1209                 * machine_check_poll(). Leave them alone, unless this panics.
1210                 */
1211                if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1212                        !no_way_out)
1213                        continue;
1214
1215                /* Set taint even when machine check was not enabled. */
1216                add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1217
1218                severity = mce_severity(m, regs, cfg->tolerant, NULL, true);
1219
1220                /*
1221                 * When machine check was for corrected/deferred handler don't
1222                 * touch, unless we're panicking.
1223                 */
1224                if ((severity == MCE_KEEP_SEVERITY ||
1225                     severity == MCE_UCNA_SEVERITY) && !no_way_out)
1226                        continue;
1227
1228                __set_bit(i, toclear);
1229
1230                /* Machine check event was not enabled. Clear, but ignore. */
1231                if (severity == MCE_NO_SEVERITY)
1232                        continue;
1233
1234                mce_read_aux(m, i);
1235
1236                /* assuming valid severity level != 0 */
1237                m->severity = severity;
1238
1239                mce_log(m);
1240
1241                if (severity > *worst) {
1242                        *final = *m;
1243                        *worst = severity;
1244                }
1245        }
1246
1247        /* mce_clear_state will clear *final, save locally for use later */
1248        *m = *final;
1249}
1250
1251static void kill_me_now(struct callback_head *ch)
1252{
1253        force_sig(SIGBUS);
1254}
1255
1256static void kill_me_maybe(struct callback_head *cb)
1257{
1258        struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
1259        int flags = MF_ACTION_REQUIRED;
1260
1261        pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
1262
1263        if (!p->mce_ripv)
1264                flags |= MF_MUST_KILL;
1265
1266        if (!memory_failure(p->mce_addr >> PAGE_SHIFT, flags) &&
1267            !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) {
1268                set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page);
1269                sync_core();
1270                return;
1271        }
1272
1273        if (p->mce_vaddr != (void __user *)-1l) {
1274                force_sig_mceerr(BUS_MCEERR_AR, p->mce_vaddr, PAGE_SHIFT);
1275        } else {
1276                pr_err("Memory error not recovered");
1277                kill_me_now(cb);
1278        }
1279}
1280
1281static void queue_task_work(struct mce *m, int kill_current_task)
1282{
1283        current->mce_addr = m->addr;
1284        current->mce_kflags = m->kflags;
1285        current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
1286        current->mce_whole_page = whole_page(m);
1287
1288        if (kill_current_task)
1289                current->mce_kill_me.func = kill_me_now;
1290        else
1291                current->mce_kill_me.func = kill_me_maybe;
1292
1293        task_work_add(current, &current->mce_kill_me, TWA_RESUME);
1294}
1295
1296/*
1297 * The actual machine check handler. This only handles real
1298 * exceptions when something got corrupted coming in through int 18.
1299 *
1300 * This is executed in NMI context not subject to normal locking rules. This
1301 * implies that most kernel services cannot be safely used. Don't even
1302 * think about putting a printk in there!
1303 *
1304 * On Intel systems this is entered on all CPUs in parallel through
1305 * MCE broadcast. However some CPUs might be broken beyond repair,
1306 * so be always careful when synchronizing with others.
1307 *
1308 * Tracing and kprobes are disabled: if we interrupted a kernel context
1309 * with IF=1, we need to minimize stack usage.  There are also recursion
1310 * issues: if the machine check was due to a failure of the memory
1311 * backing the user stack, tracing that reads the user stack will cause
1312 * potentially infinite recursion.
1313 */
1314noinstr void do_machine_check(struct pt_regs *regs)
1315{
1316        DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1317        DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1318        struct mca_config *cfg = &mca_cfg;
1319        struct mce m, *final;
1320        char *msg = NULL;
1321        int worst = 0;
1322
1323        /*
1324         * Establish sequential order between the CPUs entering the machine
1325         * check handler.
1326         */
1327        int order = -1;
1328
1329        /*
1330         * If no_way_out gets set, there is no safe way to recover from this
1331         * MCE.  If mca_cfg.tolerant is cranked up, we'll try anyway.
1332         */
1333        int no_way_out = 0;
1334
1335        /*
1336         * If kill_current_task is not set, there might be a way to recover from this
1337         * error.
1338         */
1339        int kill_current_task = 0;
1340
1341        /*
1342         * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
1343         * on Intel.
1344         */
1345        int lmce = 1;
1346
1347        this_cpu_inc(mce_exception_count);
1348
1349        mce_gather_info(&m, regs);
1350        m.tsc = rdtsc();
1351
1352        final = this_cpu_ptr(&mces_seen);
1353        *final = m;
1354
1355        memset(valid_banks, 0, sizeof(valid_banks));
1356        no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1357
1358        barrier();
1359
1360        /*
1361         * When no restart IP might need to kill or panic.
1362         * Assume the worst for now, but if we find the
1363         * severity is MCE_AR_SEVERITY we have other options.
1364         */
1365        if (!(m.mcgstatus & MCG_STATUS_RIPV))
1366                kill_current_task = (cfg->tolerant == 3) ? 0 : 1;
1367        /*
1368         * Check if this MCE is signaled to only this logical processor,
1369         * on Intel, Zhaoxin only.
1370         */
1371        if (m.cpuvendor == X86_VENDOR_INTEL ||
1372            m.cpuvendor == X86_VENDOR_ZHAOXIN)
1373                lmce = m.mcgstatus & MCG_STATUS_LMCES;
1374
1375        /*
1376         * Local machine check may already know that we have to panic.
1377         * Broadcast machine check begins rendezvous in mce_start()
1378         * Go through all banks in exclusion of the other CPUs. This way we
1379         * don't report duplicated events on shared banks because the first one
1380         * to see it will clear it.
1381         */
1382        if (lmce) {
1383                if (no_way_out && cfg->tolerant < 3)
1384                        mce_panic("Fatal local machine check", &m, msg);
1385        } else {
1386                order = mce_start(&no_way_out);
1387        }
1388
1389        __mc_scan_banks(&m, regs, final, toclear, valid_banks, no_way_out, &worst);
1390
1391        if (!no_way_out)
1392                mce_clear_state(toclear);
1393
1394        /*
1395         * Do most of the synchronization with other CPUs.
1396         * When there's any problem use only local no_way_out state.
1397         */
1398        if (!lmce) {
1399                if (mce_end(order) < 0) {
1400                        if (!no_way_out)
1401                                no_way_out = worst >= MCE_PANIC_SEVERITY;
1402
1403                        if (no_way_out && cfg->tolerant < 3)
1404                                mce_panic("Fatal machine check on current CPU", &m, msg);
1405                }
1406        } else {
1407                /*
1408                 * If there was a fatal machine check we should have
1409                 * already called mce_panic earlier in this function.
1410                 * Since we re-read the banks, we might have found
1411                 * something new. Check again to see if we found a
1412                 * fatal error. We call "mce_severity()" again to
1413                 * make sure we have the right "msg".
1414                 */
1415                if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
1416                        mce_severity(&m, regs, cfg->tolerant, &msg, true);
1417                        mce_panic("Local fatal machine check!", &m, msg);
1418                }
1419        }
1420
1421        if (worst != MCE_AR_SEVERITY && !kill_current_task)
1422                goto out;
1423
1424        /* Fault was in user mode and we need to take some action */
1425        if ((m.cs & 3) == 3) {
1426                /* If this triggers there is no way to recover. Die hard. */
1427                BUG_ON(!on_thread_stack() || !user_mode(regs));
1428
1429                queue_task_work(&m, kill_current_task);
1430
1431        } else {
1432                /*
1433                 * Handle an MCE which has happened in kernel space but from
1434                 * which the kernel can recover: ex_has_fault_handler() has
1435                 * already verified that the rIP at which the error happened is
1436                 * a rIP from which the kernel can recover (by jumping to
1437                 * recovery code specified in _ASM_EXTABLE_FAULT()) and the
1438                 * corresponding exception handler which would do that is the
1439                 * proper one.
1440                 */
1441                if (m.kflags & MCE_IN_KERNEL_RECOV) {
1442                        if (!fixup_exception(regs, X86_TRAP_MC, 0, 0))
1443                                mce_panic("Failed kernel mode recovery", &m, msg);
1444                }
1445
1446                if (m.kflags & MCE_IN_KERNEL_COPYIN)
1447                        queue_task_work(&m, kill_current_task);
1448        }
1449out:
1450        mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1451}
1452EXPORT_SYMBOL_GPL(do_machine_check);
1453
1454#ifndef CONFIG_MEMORY_FAILURE
1455int memory_failure(unsigned long pfn, int flags)
1456{
1457        /* mce_severity() should not hand us an ACTION_REQUIRED error */
1458        BUG_ON(flags & MF_ACTION_REQUIRED);
1459        pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1460               "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1461               pfn);
1462
1463        return 0;
1464}
1465#endif
1466
1467/*
1468 * Periodic polling timer for "silent" machine check errors.  If the
1469 * poller finds an MCE, poll 2x faster.  When the poller finds no more
1470 * errors, poll 2x slower (up to check_interval seconds).
1471 */
1472static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
1473
1474static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1475static DEFINE_PER_CPU(struct timer_list, mce_timer);
1476
1477static unsigned long mce_adjust_timer_default(unsigned long interval)
1478{
1479        return interval;
1480}
1481
1482static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
1483
1484static void __start_timer(struct timer_list *t, unsigned long interval)
1485{
1486        unsigned long when = jiffies + interval;
1487        unsigned long flags;
1488
1489        local_irq_save(flags);
1490
1491        if (!timer_pending(t) || time_before(when, t->expires))
1492                mod_timer(t, round_jiffies(when));
1493
1494        local_irq_restore(flags);
1495}
1496
1497static void mce_timer_fn(struct timer_list *t)
1498{
1499        struct timer_list *cpu_t = this_cpu_ptr(&mce_timer);
1500        unsigned long iv;
1501
1502        WARN_ON(cpu_t != t);
1503
1504        iv = __this_cpu_read(mce_next_interval);
1505
1506        if (mce_available(this_cpu_ptr(&cpu_info))) {
1507                machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
1508
1509                if (mce_intel_cmci_poll()) {
1510                        iv = mce_adjust_timer(iv);
1511                        goto done;
1512                }
1513        }
1514
1515        /*
1516         * Alert userspace if needed. If we logged an MCE, reduce the polling
1517         * interval, otherwise increase the polling interval.
1518         */
1519        if (mce_notify_irq())
1520                iv = max(iv / 2, (unsigned long) HZ/100);
1521        else
1522                iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1523
1524done:
1525        __this_cpu_write(mce_next_interval, iv);
1526        __start_timer(t, iv);
1527}
1528
1529/*
1530 * Ensure that the timer is firing in @interval from now.
1531 */
1532void mce_timer_kick(unsigned long interval)
1533{
1534        struct timer_list *t = this_cpu_ptr(&mce_timer);
1535        unsigned long iv = __this_cpu_read(mce_next_interval);
1536
1537        __start_timer(t, interval);
1538
1539        if (interval < iv)
1540                __this_cpu_write(mce_next_interval, interval);
1541}
1542
1543/* Must not be called in IRQ context where del_timer_sync() can deadlock */
1544static void mce_timer_delete_all(void)
1545{
1546        int cpu;
1547
1548        for_each_online_cpu(cpu)
1549                del_timer_sync(&per_cpu(mce_timer, cpu));
1550}
1551
1552/*
1553 * Notify the user(s) about new machine check events.
1554 * Can be called from interrupt context, but not from machine check/NMI
1555 * context.
1556 */
1557int mce_notify_irq(void)
1558{
1559        /* Not more than two messages every minute */
1560        static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1561
1562        if (test_and_clear_bit(0, &mce_need_notify)) {
1563                mce_work_trigger();
1564
1565                if (__ratelimit(&ratelimit))
1566                        pr_info(HW_ERR "Machine check events logged\n");
1567
1568                return 1;
1569        }
1570        return 0;
1571}
1572EXPORT_SYMBOL_GPL(mce_notify_irq);
1573
1574static void __mcheck_cpu_mce_banks_init(void)
1575{
1576        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1577        u8 n_banks = this_cpu_read(mce_num_banks);
1578        int i;
1579
1580        for (i = 0; i < n_banks; i++) {
1581                struct mce_bank *b = &mce_banks[i];
1582
1583                /*
1584                 * Init them all, __mcheck_cpu_apply_quirks() is going to apply
1585                 * the required vendor quirks before
1586                 * __mcheck_cpu_init_clear_banks() does the final bank setup.
1587                 */
1588                b->ctl = -1ULL;
1589                b->init = true;
1590        }
1591}
1592
1593/*
1594 * Initialize Machine Checks for a CPU.
1595 */
1596static void __mcheck_cpu_cap_init(void)
1597{
1598        u64 cap;
1599        u8 b;
1600
1601        rdmsrl(MSR_IA32_MCG_CAP, cap);
1602
1603        b = cap & MCG_BANKCNT_MASK;
1604
1605        if (b > MAX_NR_BANKS) {
1606                pr_warn("CPU%d: Using only %u machine check banks out of %u\n",
1607                        smp_processor_id(), MAX_NR_BANKS, b);
1608                b = MAX_NR_BANKS;
1609        }
1610
1611        this_cpu_write(mce_num_banks, b);
1612
1613        __mcheck_cpu_mce_banks_init();
1614
1615        /* Use accurate RIP reporting if available. */
1616        if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1617                mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1618
1619        if (cap & MCG_SER_P)
1620                mca_cfg.ser = 1;
1621}
1622
1623static void __mcheck_cpu_init_generic(void)
1624{
1625        enum mcp_flags m_fl = 0;
1626        mce_banks_t all_banks;
1627        u64 cap;
1628
1629        if (!mca_cfg.bootlog)
1630                m_fl = MCP_DONTLOG;
1631
1632        /*
1633         * Log the machine checks left over from the previous reset.
1634         */
1635        bitmap_fill(all_banks, MAX_NR_BANKS);
1636        machine_check_poll(MCP_UC | m_fl, &all_banks);
1637
1638        cr4_set_bits(X86_CR4_MCE);
1639
1640        rdmsrl(MSR_IA32_MCG_CAP, cap);
1641        if (cap & MCG_CTL_P)
1642                wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1643}
1644
1645static void __mcheck_cpu_init_clear_banks(void)
1646{
1647        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1648        int i;
1649
1650        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1651                struct mce_bank *b = &mce_banks[i];
1652
1653                if (!b->init)
1654                        continue;
1655                wrmsrl(msr_ops.ctl(i), b->ctl);
1656                wrmsrl(msr_ops.status(i), 0);
1657        }
1658}
1659
1660/*
1661 * Do a final check to see if there are any unused/RAZ banks.
1662 *
1663 * This must be done after the banks have been initialized and any quirks have
1664 * been applied.
1665 *
1666 * Do not call this from any user-initiated flows, e.g. CPU hotplug or sysfs.
1667 * Otherwise, a user who disables a bank will not be able to re-enable it
1668 * without a system reboot.
1669 */
1670static void __mcheck_cpu_check_banks(void)
1671{
1672        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1673        u64 msrval;
1674        int i;
1675
1676        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1677                struct mce_bank *b = &mce_banks[i];
1678
1679                if (!b->init)
1680                        continue;
1681
1682                rdmsrl(msr_ops.ctl(i), msrval);
1683                b->init = !!msrval;
1684        }
1685}
1686
1687/*
1688 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1689 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1690 * Vol 3B Table 15-20). But this confuses both the code that determines
1691 * whether the machine check occurred in kernel or user mode, and also
1692 * the severity assessment code. Pretend that EIPV was set, and take the
1693 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1694 */
1695static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1696{
1697        if (bank != 0)
1698                return;
1699        if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1700                return;
1701        if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1702                          MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1703                          MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1704                          MCACOD)) !=
1705                         (MCI_STATUS_UC|MCI_STATUS_EN|
1706                          MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1707                          MCI_STATUS_AR|MCACOD_INSTR))
1708                return;
1709
1710        m->mcgstatus |= MCG_STATUS_EIPV;
1711        m->ip = regs->ip;
1712        m->cs = regs->cs;
1713}
1714
1715/* Add per CPU specific workarounds here */
1716static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1717{
1718        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1719        struct mca_config *cfg = &mca_cfg;
1720
1721        if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1722                pr_info("unknown CPU type - not enabling MCE support\n");
1723                return -EOPNOTSUPP;
1724        }
1725
1726        /* This should be disabled by the BIOS, but isn't always */
1727        if (c->x86_vendor == X86_VENDOR_AMD) {
1728                if (c->x86 == 15 && this_cpu_read(mce_num_banks) > 4) {
1729                        /*
1730                         * disable GART TBL walk error reporting, which
1731                         * trips off incorrectly with the IOMMU & 3ware
1732                         * & Cerberus:
1733                         */
1734                        clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1735                }
1736                if (c->x86 < 0x11 && cfg->bootlog < 0) {
1737                        /*
1738                         * Lots of broken BIOS around that don't clear them
1739                         * by default and leave crap in there. Don't log:
1740                         */
1741                        cfg->bootlog = 0;
1742                }
1743                /*
1744                 * Various K7s with broken bank 0 around. Always disable
1745                 * by default.
1746                 */
1747                if (c->x86 == 6 && this_cpu_read(mce_num_banks) > 0)
1748                        mce_banks[0].ctl = 0;
1749
1750                /*
1751                 * overflow_recov is supported for F15h Models 00h-0fh
1752                 * even though we don't have a CPUID bit for it.
1753                 */
1754                if (c->x86 == 0x15 && c->x86_model <= 0xf)
1755                        mce_flags.overflow_recov = 1;
1756
1757        }
1758
1759        if (c->x86_vendor == X86_VENDOR_INTEL) {
1760                /*
1761                 * SDM documents that on family 6 bank 0 should not be written
1762                 * because it aliases to another special BIOS controlled
1763                 * register.
1764                 * But it's not aliased anymore on model 0x1a+
1765                 * Don't ignore bank 0 completely because there could be a
1766                 * valid event later, merely don't write CTL0.
1767                 */
1768
1769                if (c->x86 == 6 && c->x86_model < 0x1A && this_cpu_read(mce_num_banks) > 0)
1770                        mce_banks[0].init = false;
1771
1772                /*
1773                 * All newer Intel systems support MCE broadcasting. Enable
1774                 * synchronization with a one second timeout.
1775                 */
1776                if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1777                        cfg->monarch_timeout < 0)
1778                        cfg->monarch_timeout = USEC_PER_SEC;
1779
1780                /*
1781                 * There are also broken BIOSes on some Pentium M and
1782                 * earlier systems:
1783                 */
1784                if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1785                        cfg->bootlog = 0;
1786
1787                if (c->x86 == 6 && c->x86_model == 45)
1788                        quirk_no_way_out = quirk_sandybridge_ifu;
1789        }
1790
1791        if (c->x86_vendor == X86_VENDOR_ZHAOXIN) {
1792                /*
1793                 * All newer Zhaoxin CPUs support MCE broadcasting. Enable
1794                 * synchronization with a one second timeout.
1795                 */
1796                if (c->x86 > 6 || (c->x86_model == 0x19 || c->x86_model == 0x1f)) {
1797                        if (cfg->monarch_timeout < 0)
1798                                cfg->monarch_timeout = USEC_PER_SEC;
1799                }
1800        }
1801
1802        if (cfg->monarch_timeout < 0)
1803                cfg->monarch_timeout = 0;
1804        if (cfg->bootlog != 0)
1805                cfg->panic_timeout = 30;
1806
1807        return 0;
1808}
1809
1810static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1811{
1812        if (c->x86 != 5)
1813                return 0;
1814
1815        switch (c->x86_vendor) {
1816        case X86_VENDOR_INTEL:
1817                intel_p5_mcheck_init(c);
1818                return 1;
1819        case X86_VENDOR_CENTAUR:
1820                winchip_mcheck_init(c);
1821                return 1;
1822        default:
1823                return 0;
1824        }
1825
1826        return 0;
1827}
1828
1829/*
1830 * Init basic CPU features needed for early decoding of MCEs.
1831 */
1832static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
1833{
1834        if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) {
1835                mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
1836                mce_flags.succor         = !!cpu_has(c, X86_FEATURE_SUCCOR);
1837                mce_flags.smca           = !!cpu_has(c, X86_FEATURE_SMCA);
1838                mce_flags.amd_threshold  = 1;
1839
1840                if (mce_flags.smca) {
1841                        msr_ops.ctl     = smca_ctl_reg;
1842                        msr_ops.status  = smca_status_reg;
1843                        msr_ops.addr    = smca_addr_reg;
1844                        msr_ops.misc    = smca_misc_reg;
1845                }
1846        }
1847}
1848
1849static void mce_centaur_feature_init(struct cpuinfo_x86 *c)
1850{
1851        struct mca_config *cfg = &mca_cfg;
1852
1853         /*
1854          * All newer Centaur CPUs support MCE broadcasting. Enable
1855          * synchronization with a one second timeout.
1856          */
1857        if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) ||
1858             c->x86 > 6) {
1859                if (cfg->monarch_timeout < 0)
1860                        cfg->monarch_timeout = USEC_PER_SEC;
1861        }
1862}
1863
1864static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c)
1865{
1866        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1867
1868        /*
1869         * These CPUs have MCA bank 8 which reports only one error type called
1870         * SVAD (System View Address Decoder). The reporting of that error is
1871         * controlled by IA32_MC8.CTL.0.
1872         *
1873         * If enabled, prefetching on these CPUs will cause SVAD MCE when
1874         * virtual machines start and result in a system  panic. Always disable
1875         * bank 8 SVAD error by default.
1876         */
1877        if ((c->x86 == 7 && c->x86_model == 0x1b) ||
1878            (c->x86_model == 0x19 || c->x86_model == 0x1f)) {
1879                if (this_cpu_read(mce_num_banks) > 8)
1880                        mce_banks[8].ctl = 0;
1881        }
1882
1883        intel_init_cmci();
1884        intel_init_lmce();
1885        mce_adjust_timer = cmci_intel_adjust_timer;
1886}
1887
1888static void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c)
1889{
1890        intel_clear_lmce();
1891}
1892
1893static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1894{
1895        switch (c->x86_vendor) {
1896        case X86_VENDOR_INTEL:
1897                mce_intel_feature_init(c);
1898                mce_adjust_timer = cmci_intel_adjust_timer;
1899                break;
1900
1901        case X86_VENDOR_AMD: {
1902                mce_amd_feature_init(c);
1903                break;
1904                }
1905
1906        case X86_VENDOR_HYGON:
1907                mce_hygon_feature_init(c);
1908                break;
1909
1910        case X86_VENDOR_CENTAUR:
1911                mce_centaur_feature_init(c);
1912                break;
1913
1914        case X86_VENDOR_ZHAOXIN:
1915                mce_zhaoxin_feature_init(c);
1916                break;
1917
1918        default:
1919                break;
1920        }
1921}
1922
1923static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
1924{
1925        switch (c->x86_vendor) {
1926        case X86_VENDOR_INTEL:
1927                mce_intel_feature_clear(c);
1928                break;
1929
1930        case X86_VENDOR_ZHAOXIN:
1931                mce_zhaoxin_feature_clear(c);
1932                break;
1933
1934        default:
1935                break;
1936        }
1937}
1938
1939static void mce_start_timer(struct timer_list *t)
1940{
1941        unsigned long iv = check_interval * HZ;
1942
1943        if (mca_cfg.ignore_ce || !iv)
1944                return;
1945
1946        this_cpu_write(mce_next_interval, iv);
1947        __start_timer(t, iv);
1948}
1949
1950static void __mcheck_cpu_setup_timer(void)
1951{
1952        struct timer_list *t = this_cpu_ptr(&mce_timer);
1953
1954        timer_setup(t, mce_timer_fn, TIMER_PINNED);
1955}
1956
1957static void __mcheck_cpu_init_timer(void)
1958{
1959        struct timer_list *t = this_cpu_ptr(&mce_timer);
1960
1961        timer_setup(t, mce_timer_fn, TIMER_PINNED);
1962        mce_start_timer(t);
1963}
1964
1965bool filter_mce(struct mce *m)
1966{
1967        if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1968                return amd_filter_mce(m);
1969        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1970                return intel_filter_mce(m);
1971
1972        return false;
1973}
1974
1975/* Handle unconfigured int18 (should never happen) */
1976static noinstr void unexpected_machine_check(struct pt_regs *regs)
1977{
1978        instrumentation_begin();
1979        pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
1980               smp_processor_id());
1981        instrumentation_end();
1982}
1983
1984/* Call the installed machine check handler for this CPU setup. */
1985void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;
1986
1987static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
1988{
1989        irqentry_state_t irq_state;
1990
1991        WARN_ON_ONCE(user_mode(regs));
1992
1993        /*
1994         * Only required when from kernel mode. See
1995         * mce_check_crashing_cpu() for details.
1996         */
1997        if (machine_check_vector == do_machine_check &&
1998            mce_check_crashing_cpu())
1999                return;
2000
2001        irq_state = irqentry_nmi_enter(regs);
2002        /*
2003         * The call targets are marked noinstr, but objtool can't figure
2004         * that out because it's an indirect call. Annotate it.
2005         */
2006        instrumentation_begin();
2007
2008        machine_check_vector(regs);
2009
2010        instrumentation_end();
2011        irqentry_nmi_exit(regs, irq_state);
2012}
2013
2014static __always_inline void exc_machine_check_user(struct pt_regs *regs)
2015{
2016        irqentry_enter_from_user_mode(regs);
2017        instrumentation_begin();
2018
2019        machine_check_vector(regs);
2020
2021        instrumentation_end();
2022        irqentry_exit_to_user_mode(regs);
2023}
2024
2025#ifdef CONFIG_X86_64
2026/* MCE hit kernel mode */
2027DEFINE_IDTENTRY_MCE(exc_machine_check)
2028{
2029        unsigned long dr7;
2030
2031        dr7 = local_db_save();
2032        exc_machine_check_kernel(regs);
2033        local_db_restore(dr7);
2034}
2035
2036/* The user mode variant. */
2037DEFINE_IDTENTRY_MCE_USER(exc_machine_check)
2038{
2039        unsigned long dr7;
2040
2041        dr7 = local_db_save();
2042        exc_machine_check_user(regs);
2043        local_db_restore(dr7);
2044}
2045#else
2046/* 32bit unified entry point */
2047DEFINE_IDTENTRY_RAW(exc_machine_check)
2048{
2049        unsigned long dr7;
2050
2051        dr7 = local_db_save();
2052        if (user_mode(regs))
2053                exc_machine_check_user(regs);
2054        else
2055                exc_machine_check_kernel(regs);
2056        local_db_restore(dr7);
2057}
2058#endif
2059
2060/*
2061 * Called for each booted CPU to set up machine checks.
2062 * Must be called with preempt off:
2063 */
2064void mcheck_cpu_init(struct cpuinfo_x86 *c)
2065{
2066        if (mca_cfg.disabled)
2067                return;
2068
2069        if (__mcheck_cpu_ancient_init(c))
2070                return;
2071
2072        if (!mce_available(c))
2073                return;
2074
2075        __mcheck_cpu_cap_init();
2076
2077        if (__mcheck_cpu_apply_quirks(c) < 0) {
2078                mca_cfg.disabled = 1;
2079                return;
2080        }
2081
2082        if (mce_gen_pool_init()) {
2083                mca_cfg.disabled = 1;
2084                pr_emerg("Couldn't allocate MCE records pool!\n");
2085                return;
2086        }
2087
2088        machine_check_vector = do_machine_check;
2089
2090        __mcheck_cpu_init_early(c);
2091        __mcheck_cpu_init_generic();
2092        __mcheck_cpu_init_vendor(c);
2093        __mcheck_cpu_init_clear_banks();
2094        __mcheck_cpu_check_banks();
2095        __mcheck_cpu_setup_timer();
2096}
2097
2098/*
2099 * Called for each booted CPU to clear some machine checks opt-ins
2100 */
2101void mcheck_cpu_clear(struct cpuinfo_x86 *c)
2102{
2103        if (mca_cfg.disabled)
2104                return;
2105
2106        if (!mce_available(c))
2107                return;
2108
2109        /*
2110         * Possibly to clear general settings generic to x86
2111         * __mcheck_cpu_clear_generic(c);
2112         */
2113        __mcheck_cpu_clear_vendor(c);
2114
2115}
2116
2117static void __mce_disable_bank(void *arg)
2118{
2119        int bank = *((int *)arg);
2120        __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
2121        cmci_disable_bank(bank);
2122}
2123
2124void mce_disable_bank(int bank)
2125{
2126        if (bank >= this_cpu_read(mce_num_banks)) {
2127                pr_warn(FW_BUG
2128                        "Ignoring request to disable invalid MCA bank %d.\n",
2129                        bank);
2130                return;
2131        }
2132        set_bit(bank, mce_banks_ce_disabled);
2133        on_each_cpu(__mce_disable_bank, &bank, 1);
2134}
2135
2136/*
2137 * mce=off Disables machine check
2138 * mce=no_cmci Disables CMCI
2139 * mce=no_lmce Disables LMCE
2140 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
2141 * mce=print_all Print all machine check logs to console
2142 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
2143 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
2144 *      monarchtimeout is how long to wait for other CPUs on machine
2145 *      check, or 0 to not wait
2146 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h
2147        and older.
2148 * mce=nobootlog Don't log MCEs from before booting.
2149 * mce=bios_cmci_threshold Don't program the CMCI threshold
2150 * mce=recovery force enable copy_mc_fragile()
2151 */
2152static int __init mcheck_enable(char *str)
2153{
2154        struct mca_config *cfg = &mca_cfg;
2155
2156        if (*str == 0) {
2157                enable_p5_mce();
2158                return 1;
2159        }
2160        if (*str == '=')
2161                str++;
2162        if (!strcmp(str, "off"))
2163                cfg->disabled = 1;
2164        else if (!strcmp(str, "no_cmci"))
2165                cfg->cmci_disabled = true;
2166        else if (!strcmp(str, "no_lmce"))
2167                cfg->lmce_disabled = 1;
2168        else if (!strcmp(str, "dont_log_ce"))
2169                cfg->dont_log_ce = true;
2170        else if (!strcmp(str, "print_all"))
2171                cfg->print_all = true;
2172        else if (!strcmp(str, "ignore_ce"))
2173                cfg->ignore_ce = true;
2174        else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
2175                cfg->bootlog = (str[0] == 'b');
2176        else if (!strcmp(str, "bios_cmci_threshold"))
2177                cfg->bios_cmci_threshold = 1;
2178        else if (!strcmp(str, "recovery"))
2179                cfg->recovery = 1;
2180        else if (isdigit(str[0])) {
2181                if (get_option(&str, &cfg->tolerant) == 2)
2182                        get_option(&str, &(cfg->monarch_timeout));
2183        } else {
2184                pr_info("mce argument %s ignored. Please use /sys\n", str);
2185                return 0;
2186        }
2187        return 1;
2188}
2189__setup("mce", mcheck_enable);
2190
2191int __init mcheck_init(void)
2192{
2193        mce_register_decode_chain(&early_nb);
2194        mce_register_decode_chain(&mce_uc_nb);
2195        mce_register_decode_chain(&mce_default_nb);
2196        mcheck_vendor_init_severity();
2197
2198        INIT_WORK(&mce_work, mce_gen_pool_process);
2199        init_irq_work(&mce_irq_work, mce_irq_work_cb);
2200
2201        return 0;
2202}
2203
2204/*
2205 * mce_syscore: PM support
2206 */
2207
2208/*
2209 * Disable machine checks on suspend and shutdown. We can't really handle
2210 * them later.
2211 */
2212static void mce_disable_error_reporting(void)
2213{
2214        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2215        int i;
2216
2217        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
2218                struct mce_bank *b = &mce_banks[i];
2219
2220                if (b->init)
2221                        wrmsrl(msr_ops.ctl(i), 0);
2222        }
2223        return;
2224}
2225
2226static void vendor_disable_error_reporting(void)
2227{
2228        /*
2229         * Don't clear on Intel or AMD or Hygon or Zhaoxin CPUs. Some of these
2230         * MSRs are socket-wide. Disabling them for just a single offlined CPU
2231         * is bad, since it will inhibit reporting for all shared resources on
2232         * the socket like the last level cache (LLC), the integrated memory
2233         * controller (iMC), etc.
2234         */
2235        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
2236            boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||
2237            boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
2238            boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN)
2239                return;
2240
2241        mce_disable_error_reporting();
2242}
2243
2244static int mce_syscore_suspend(void)
2245{
2246        vendor_disable_error_reporting();
2247        return 0;
2248}
2249
2250static void mce_syscore_shutdown(void)
2251{
2252        vendor_disable_error_reporting();
2253}
2254
2255/*
2256 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2257 * Only one CPU is active at this time, the others get re-added later using
2258 * CPU hotplug:
2259 */
2260static void mce_syscore_resume(void)
2261{
2262        __mcheck_cpu_init_generic();
2263        __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2264        __mcheck_cpu_init_clear_banks();
2265}
2266
2267static struct syscore_ops mce_syscore_ops = {
2268        .suspend        = mce_syscore_suspend,
2269        .shutdown       = mce_syscore_shutdown,
2270        .resume         = mce_syscore_resume,
2271};
2272
2273/*
2274 * mce_device: Sysfs support
2275 */
2276
2277static void mce_cpu_restart(void *data)
2278{
2279        if (!mce_available(raw_cpu_ptr(&cpu_info)))
2280                return;
2281        __mcheck_cpu_init_generic();
2282        __mcheck_cpu_init_clear_banks();
2283        __mcheck_cpu_init_timer();
2284}
2285
2286/* Reinit MCEs after user configuration changes */
2287static void mce_restart(void)
2288{
2289        mce_timer_delete_all();
2290        on_each_cpu(mce_cpu_restart, NULL, 1);
2291}
2292
2293/* Toggle features for corrected errors */
2294static void mce_disable_cmci(void *data)
2295{
2296        if (!mce_available(raw_cpu_ptr(&cpu_info)))
2297                return;
2298        cmci_clear();
2299}
2300
2301static void mce_enable_ce(void *all)
2302{
2303        if (!mce_available(raw_cpu_ptr(&cpu_info)))
2304                return;
2305        cmci_reenable();
2306        cmci_recheck();
2307        if (all)
2308                __mcheck_cpu_init_timer();
2309}
2310
2311static struct bus_type mce_subsys = {
2312        .name           = "machinecheck",
2313        .dev_name       = "machinecheck",
2314};
2315
2316DEFINE_PER_CPU(struct device *, mce_device);
2317
2318static inline struct mce_bank_dev *attr_to_bank(struct device_attribute *attr)
2319{
2320        return container_of(attr, struct mce_bank_dev, attr);
2321}
2322
2323static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2324                         char *buf)
2325{
2326        u8 bank = attr_to_bank(attr)->bank;
2327        struct mce_bank *b;
2328
2329        if (bank >= per_cpu(mce_num_banks, s->id))
2330                return -EINVAL;
2331
2332        b = &per_cpu(mce_banks_array, s->id)[bank];
2333
2334        if (!b->init)
2335                return -ENODEV;
2336
2337        return sprintf(buf, "%llx\n", b->ctl);
2338}
2339
2340static ssize_t set_bank(struct device *s, struct device_attribute *attr,
2341                        const char *buf, size_t size)
2342{
2343        u8 bank = attr_to_bank(attr)->bank;
2344        struct mce_bank *b;
2345        u64 new;
2346
2347        if (kstrtou64(buf, 0, &new) < 0)
2348                return -EINVAL;
2349
2350        if (bank >= per_cpu(mce_num_banks, s->id))
2351                return -EINVAL;
2352
2353        b = &per_cpu(mce_banks_array, s->id)[bank];
2354
2355        if (!b->init)
2356                return -ENODEV;
2357
2358        b->ctl = new;
2359        mce_restart();
2360
2361        return size;
2362}
2363
2364static ssize_t set_ignore_ce(struct device *s,
2365                             struct device_attribute *attr,
2366                             const char *buf, size_t size)
2367{
2368        u64 new;
2369
2370        if (kstrtou64(buf, 0, &new) < 0)
2371                return -EINVAL;
2372
2373        mutex_lock(&mce_sysfs_mutex);
2374        if (mca_cfg.ignore_ce ^ !!new) {
2375                if (new) {
2376                        /* disable ce features */
2377                        mce_timer_delete_all();
2378                        on_each_cpu(mce_disable_cmci, NULL, 1);
2379                        mca_cfg.ignore_ce = true;
2380                } else {
2381                        /* enable ce features */
2382                        mca_cfg.ignore_ce = false;
2383                        on_each_cpu(mce_enable_ce, (void *)1, 1);
2384                }
2385        }
2386        mutex_unlock(&mce_sysfs_mutex);
2387
2388        return size;
2389}
2390
2391static ssize_t set_cmci_disabled(struct device *s,
2392                                 struct device_attribute *attr,
2393                                 const char *buf, size_t size)
2394{
2395        u64 new;
2396
2397        if (kstrtou64(buf, 0, &new) < 0)
2398                return -EINVAL;
2399
2400        mutex_lock(&mce_sysfs_mutex);
2401        if (mca_cfg.cmci_disabled ^ !!new) {
2402                if (new) {
2403                        /* disable cmci */
2404                        on_each_cpu(mce_disable_cmci, NULL, 1);
2405                        mca_cfg.cmci_disabled = true;
2406                } else {
2407                        /* enable cmci */
2408                        mca_cfg.cmci_disabled = false;
2409                        on_each_cpu(mce_enable_ce, NULL, 1);
2410                }
2411        }
2412        mutex_unlock(&mce_sysfs_mutex);
2413
2414        return size;
2415}
2416
2417static ssize_t store_int_with_restart(struct device *s,
2418                                      struct device_attribute *attr,
2419                                      const char *buf, size_t size)
2420{
2421        unsigned long old_check_interval = check_interval;
2422        ssize_t ret = device_store_ulong(s, attr, buf, size);
2423
2424        if (check_interval == old_check_interval)
2425                return ret;
2426
2427        mutex_lock(&mce_sysfs_mutex);
2428        mce_restart();
2429        mutex_unlock(&mce_sysfs_mutex);
2430
2431        return ret;
2432}
2433
2434static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2435static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2436static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
2437static DEVICE_BOOL_ATTR(print_all, 0644, mca_cfg.print_all);
2438
2439static struct dev_ext_attribute dev_attr_check_interval = {
2440        __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2441        &check_interval
2442};
2443
2444static struct dev_ext_attribute dev_attr_ignore_ce = {
2445        __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2446        &mca_cfg.ignore_ce
2447};
2448
2449static struct dev_ext_attribute dev_attr_cmci_disabled = {
2450        __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2451        &mca_cfg.cmci_disabled
2452};
2453
2454static struct device_attribute *mce_device_attrs[] = {
2455        &dev_attr_tolerant.attr,
2456        &dev_attr_check_interval.attr,
2457#ifdef CONFIG_X86_MCELOG_LEGACY
2458        &dev_attr_trigger,
2459#endif
2460        &dev_attr_monarch_timeout.attr,
2461        &dev_attr_dont_log_ce.attr,
2462        &dev_attr_print_all.attr,
2463        &dev_attr_ignore_ce.attr,
2464        &dev_attr_cmci_disabled.attr,
2465        NULL
2466};
2467
2468static cpumask_var_t mce_device_initialized;
2469
2470static void mce_device_release(struct device *dev)
2471{
2472        kfree(dev);
2473}
2474
2475/* Per CPU device init. All of the CPUs still share the same bank device: */
2476static int mce_device_create(unsigned int cpu)
2477{
2478        struct device *dev;
2479        int err;
2480        int i, j;
2481
2482        if (!mce_available(&boot_cpu_data))
2483                return -EIO;
2484
2485        dev = per_cpu(mce_device, cpu);
2486        if (dev)
2487                return 0;
2488
2489        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2490        if (!dev)
2491                return -ENOMEM;
2492        dev->id  = cpu;
2493        dev->bus = &mce_subsys;
2494        dev->release = &mce_device_release;
2495
2496        err = device_register(dev);
2497        if (err) {
2498                put_device(dev);
2499                return err;
2500        }
2501
2502        for (i = 0; mce_device_attrs[i]; i++) {
2503                err = device_create_file(dev, mce_device_attrs[i]);
2504                if (err)
2505                        goto error;
2506        }
2507        for (j = 0; j < per_cpu(mce_num_banks, cpu); j++) {
2508                err = device_create_file(dev, &mce_bank_devs[j].attr);
2509                if (err)
2510                        goto error2;
2511        }
2512        cpumask_set_cpu(cpu, mce_device_initialized);
2513        per_cpu(mce_device, cpu) = dev;
2514
2515        return 0;
2516error2:
2517        while (--j >= 0)
2518                device_remove_file(dev, &mce_bank_devs[j].attr);
2519error:
2520        while (--i >= 0)
2521                device_remove_file(dev, mce_device_attrs[i]);
2522
2523        device_unregister(dev);
2524
2525        return err;
2526}
2527
2528static void mce_device_remove(unsigned int cpu)
2529{
2530        struct device *dev = per_cpu(mce_device, cpu);
2531        int i;
2532
2533        if (!cpumask_test_cpu(cpu, mce_device_initialized))
2534                return;
2535
2536        for (i = 0; mce_device_attrs[i]; i++)
2537                device_remove_file(dev, mce_device_attrs[i]);
2538
2539        for (i = 0; i < per_cpu(mce_num_banks, cpu); i++)
2540                device_remove_file(dev, &mce_bank_devs[i].attr);
2541
2542        device_unregister(dev);
2543        cpumask_clear_cpu(cpu, mce_device_initialized);
2544        per_cpu(mce_device, cpu) = NULL;
2545}
2546
2547/* Make sure there are no machine checks on offlined CPUs. */
2548static void mce_disable_cpu(void)
2549{
2550        if (!mce_available(raw_cpu_ptr(&cpu_info)))
2551                return;
2552
2553        if (!cpuhp_tasks_frozen)
2554                cmci_clear();
2555
2556        vendor_disable_error_reporting();
2557}
2558
2559static void mce_reenable_cpu(void)
2560{
2561        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2562        int i;
2563
2564        if (!mce_available(raw_cpu_ptr(&cpu_info)))
2565                return;
2566
2567        if (!cpuhp_tasks_frozen)
2568                cmci_reenable();
2569        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
2570                struct mce_bank *b = &mce_banks[i];
2571
2572                if (b->init)
2573                        wrmsrl(msr_ops.ctl(i), b->ctl);
2574        }
2575}
2576
2577static int mce_cpu_dead(unsigned int cpu)
2578{
2579        mce_intel_hcpu_update(cpu);
2580
2581        /* intentionally ignoring frozen here */
2582        if (!cpuhp_tasks_frozen)
2583                cmci_rediscover();
2584        return 0;
2585}
2586
2587static int mce_cpu_online(unsigned int cpu)
2588{
2589        struct timer_list *t = this_cpu_ptr(&mce_timer);
2590        int ret;
2591
2592        mce_device_create(cpu);
2593
2594        ret = mce_threshold_create_device(cpu);
2595        if (ret) {
2596                mce_device_remove(cpu);
2597                return ret;
2598        }
2599        mce_reenable_cpu();
2600        mce_start_timer(t);
2601        return 0;
2602}
2603
2604static int mce_cpu_pre_down(unsigned int cpu)
2605{
2606        struct timer_list *t = this_cpu_ptr(&mce_timer);
2607
2608        mce_disable_cpu();
2609        del_timer_sync(t);
2610        mce_threshold_remove_device(cpu);
2611        mce_device_remove(cpu);
2612        return 0;
2613}
2614
2615static __init void mce_init_banks(void)
2616{
2617        int i;
2618
2619        for (i = 0; i < MAX_NR_BANKS; i++) {
2620                struct mce_bank_dev *b = &mce_bank_devs[i];
2621                struct device_attribute *a = &b->attr;
2622
2623                b->bank = i;
2624
2625                sysfs_attr_init(&a->attr);
2626                a->attr.name    = b->attrname;
2627                snprintf(b->attrname, ATTR_LEN, "bank%d", i);
2628
2629                a->attr.mode    = 0644;
2630                a->show         = show_bank;
2631                a->store        = set_bank;
2632        }
2633}
2634
2635/*
2636 * When running on XEN, this initcall is ordered against the XEN mcelog
2637 * initcall:
2638 *
2639 *   device_initcall(xen_late_init_mcelog);
2640 *   device_initcall_sync(mcheck_init_device);
2641 */
2642static __init int mcheck_init_device(void)
2643{
2644        int err;
2645
2646        /*
2647         * Check if we have a spare virtual bit. This will only become
2648         * a problem if/when we move beyond 5-level page tables.
2649         */
2650        MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63);
2651
2652        if (!mce_available(&boot_cpu_data)) {
2653                err = -EIO;
2654                goto err_out;
2655        }
2656
2657        if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2658                err = -ENOMEM;
2659                goto err_out;
2660        }
2661
2662        mce_init_banks();
2663
2664        err = subsys_system_register(&mce_subsys, NULL);
2665        if (err)
2666                goto err_out_mem;
2667
2668        err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL,
2669                                mce_cpu_dead);
2670        if (err)
2671                goto err_out_mem;
2672
2673        /*
2674         * Invokes mce_cpu_online() on all CPUs which are online when
2675         * the state is installed.
2676         */
2677        err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
2678                                mce_cpu_online, mce_cpu_pre_down);
2679        if (err < 0)
2680                goto err_out_online;
2681
2682        register_syscore_ops(&mce_syscore_ops);
2683
2684        return 0;
2685
2686err_out_online:
2687        cpuhp_remove_state(CPUHP_X86_MCE_DEAD);
2688
2689err_out_mem:
2690        free_cpumask_var(mce_device_initialized);
2691
2692err_out:
2693        pr_err("Unable to init MCE device (rc: %d)\n", err);
2694
2695        return err;
2696}
2697device_initcall_sync(mcheck_init_device);
2698
2699/*
2700 * Old style boot options parsing. Only for compatibility.
2701 */
2702static int __init mcheck_disable(char *str)
2703{
2704        mca_cfg.disabled = 1;
2705        return 1;
2706}
2707__setup("nomce", mcheck_disable);
2708
2709#ifdef CONFIG_DEBUG_FS
2710struct dentry *mce_get_debugfs_dir(void)
2711{
2712        static struct dentry *dmce;
2713
2714        if (!dmce)
2715                dmce = debugfs_create_dir("mce", NULL);
2716
2717        return dmce;
2718}
2719
2720static void mce_reset(void)
2721{
2722        cpu_missing = 0;
2723        atomic_set(&mce_fake_panicked, 0);
2724        atomic_set(&mce_executing, 0);
2725        atomic_set(&mce_callin, 0);
2726        atomic_set(&global_nwo, 0);
2727        cpumask_setall(&mce_missing_cpus);
2728}
2729
2730static int fake_panic_get(void *data, u64 *val)
2731{
2732        *val = fake_panic;
2733        return 0;
2734}
2735
2736static int fake_panic_set(void *data, u64 val)
2737{
2738        mce_reset();
2739        fake_panic = val;
2740        return 0;
2741}
2742
2743DEFINE_DEBUGFS_ATTRIBUTE(fake_panic_fops, fake_panic_get, fake_panic_set,
2744                         "%llu\n");
2745
2746static void __init mcheck_debugfs_init(void)
2747{
2748        struct dentry *dmce;
2749
2750        dmce = mce_get_debugfs_dir();
2751        debugfs_create_file_unsafe("fake_panic", 0444, dmce, NULL,
2752                                   &fake_panic_fops);
2753}
2754#else
2755static void __init mcheck_debugfs_init(void) { }
2756#endif
2757
2758static int __init mcheck_late_init(void)
2759{
2760        if (mca_cfg.recovery)
2761                enable_copy_mc_fragile();
2762
2763        mcheck_debugfs_init();
2764
2765        /*
2766         * Flush out everything that has been logged during early boot, now that
2767         * everything has been initialized (workqueues, decoders, ...).
2768         */
2769        mce_schedule_work();
2770
2771        return 0;
2772}
2773late_initcall(mcheck_late_init);
2774