linux/arch/x86/kernel/cpu/mce/core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Machine check handler.
   4 *
   5 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
   6 * Rest from unknown author(s).
   7 * 2004 Andi Kleen. Rewrote most of it.
   8 * Copyright 2008 Intel Corporation
   9 * Author: Andi Kleen
  10 */
  11
  12#include <linux/thread_info.h>
  13#include <linux/capability.h>
  14#include <linux/miscdevice.h>
  15#include <linux/ratelimit.h>
  16#include <linux/rcupdate.h>
  17#include <linux/kobject.h>
  18#include <linux/uaccess.h>
  19#include <linux/kdebug.h>
  20#include <linux/kernel.h>
  21#include <linux/percpu.h>
  22#include <linux/string.h>
  23#include <linux/device.h>
  24#include <linux/syscore_ops.h>
  25#include <linux/delay.h>
  26#include <linux/ctype.h>
  27#include <linux/sched.h>
  28#include <linux/sysfs.h>
  29#include <linux/types.h>
  30#include <linux/slab.h>
  31#include <linux/init.h>
  32#include <linux/kmod.h>
  33#include <linux/poll.h>
  34#include <linux/nmi.h>
  35#include <linux/cpu.h>
  36#include <linux/ras.h>
  37#include <linux/smp.h>
  38#include <linux/fs.h>
  39#include <linux/mm.h>
  40#include <linux/debugfs.h>
  41#include <linux/irq_work.h>
  42#include <linux/export.h>
  43#include <linux/set_memory.h>
  44#include <linux/sync_core.h>
  45#include <linux/task_work.h>
  46#include <linux/hardirq.h>
  47
  48#include <asm/intel-family.h>
  49#include <asm/processor.h>
  50#include <asm/traps.h>
  51#include <asm/tlbflush.h>
  52#include <asm/mce.h>
  53#include <asm/msr.h>
  54#include <asm/reboot.h>
  55
  56#include "internal.h"
  57
  58/* sysfs synchronization */
  59static DEFINE_MUTEX(mce_sysfs_mutex);
  60
  61#define CREATE_TRACE_POINTS
  62#include <trace/events/mce.h>
  63
  64#define SPINUNIT                100     /* 100ns */
  65
  66DEFINE_PER_CPU(unsigned, mce_exception_count);
  67
  68DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
  69
  70struct mce_bank {
  71        u64                     ctl;                    /* subevents to enable */
  72        bool                    init;                   /* initialise bank? */
  73};
  74static DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array);
  75
  76#define ATTR_LEN               16
  77/* One object for each MCE bank, shared by all CPUs */
  78struct mce_bank_dev {
  79        struct device_attribute attr;                   /* device attribute */
  80        char                    attrname[ATTR_LEN];     /* attribute name */
  81        u8                      bank;                   /* bank number */
  82};
  83static struct mce_bank_dev mce_bank_devs[MAX_NR_BANKS];
  84
  85struct mce_vendor_flags mce_flags __read_mostly;
  86
  87struct mca_config mca_cfg __read_mostly = {
  88        .bootlog  = -1,
  89        /*
  90         * Tolerant levels:
  91         * 0: always panic on uncorrected errors, log corrected errors
  92         * 1: panic or SIGBUS on uncorrected errors, log corrected errors
  93         * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
  94         * 3: never panic or SIGBUS, log all errors (for testing only)
  95         */
  96        .tolerant = 1,
  97        .monarch_timeout = -1
  98};
  99
 100static DEFINE_PER_CPU(struct mce, mces_seen);
 101static unsigned long mce_need_notify;
 102static int cpu_missing;
 103
 104/*
 105 * MCA banks polled by the period polling timer for corrected events.
 106 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
 107 */
 108DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
 109        [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
 110};
 111
 112/*
 113 * MCA banks controlled through firmware first for corrected errors.
 114 * This is a global list of banks for which we won't enable CMCI and we
 115 * won't poll. Firmware controls these banks and is responsible for
 116 * reporting corrected errors through GHES. Uncorrected/recoverable
 117 * errors are still notified through a machine check.
 118 */
 119mce_banks_t mce_banks_ce_disabled;
 120
 121static struct work_struct mce_work;
 122static struct irq_work mce_irq_work;
 123
 124static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
 125
 126/*
 127 * CPU/chipset specific EDAC code can register a notifier call here to print
 128 * MCE errors in a human-readable form.
 129 */
 130BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
 131
 132/* Do initial initialization of a struct mce */
 133noinstr void mce_setup(struct mce *m)
 134{
 135        memset(m, 0, sizeof(struct mce));
 136        m->cpu = m->extcpu = smp_processor_id();
 137        /* need the internal __ version to avoid deadlocks */
 138        m->time = __ktime_get_real_seconds();
 139        m->cpuvendor = boot_cpu_data.x86_vendor;
 140        m->cpuid = cpuid_eax(1);
 141        m->socketid = cpu_data(m->extcpu).phys_proc_id;
 142        m->apicid = cpu_data(m->extcpu).initial_apicid;
 143        m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP);
 144
 145        if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
 146                m->ppin = __rdmsr(MSR_PPIN);
 147        else if (this_cpu_has(X86_FEATURE_AMD_PPIN))
 148                m->ppin = __rdmsr(MSR_AMD_PPIN);
 149
 150        m->microcode = boot_cpu_data.microcode;
 151}
 152
 153DEFINE_PER_CPU(struct mce, injectm);
 154EXPORT_PER_CPU_SYMBOL_GPL(injectm);
 155
 156void mce_log(struct mce *m)
 157{
 158        if (!mce_gen_pool_add(m))
 159                irq_work_queue(&mce_irq_work);
 160}
 161EXPORT_SYMBOL_GPL(mce_log);
 162
 163void mce_register_decode_chain(struct notifier_block *nb)
 164{
 165        if (WARN_ON(nb->priority < MCE_PRIO_LOWEST ||
 166                    nb->priority > MCE_PRIO_HIGHEST))
 167                return;
 168
 169        blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
 170}
 171EXPORT_SYMBOL_GPL(mce_register_decode_chain);
 172
 173void mce_unregister_decode_chain(struct notifier_block *nb)
 174{
 175        blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
 176}
 177EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
 178
 179static inline u32 ctl_reg(int bank)
 180{
 181        return MSR_IA32_MCx_CTL(bank);
 182}
 183
 184static inline u32 status_reg(int bank)
 185{
 186        return MSR_IA32_MCx_STATUS(bank);
 187}
 188
 189static inline u32 addr_reg(int bank)
 190{
 191        return MSR_IA32_MCx_ADDR(bank);
 192}
 193
 194static inline u32 misc_reg(int bank)
 195{
 196        return MSR_IA32_MCx_MISC(bank);
 197}
 198
 199static inline u32 smca_ctl_reg(int bank)
 200{
 201        return MSR_AMD64_SMCA_MCx_CTL(bank);
 202}
 203
 204static inline u32 smca_status_reg(int bank)
 205{
 206        return MSR_AMD64_SMCA_MCx_STATUS(bank);
 207}
 208
 209static inline u32 smca_addr_reg(int bank)
 210{
 211        return MSR_AMD64_SMCA_MCx_ADDR(bank);
 212}
 213
 214static inline u32 smca_misc_reg(int bank)
 215{
 216        return MSR_AMD64_SMCA_MCx_MISC(bank);
 217}
 218
 219struct mca_msr_regs msr_ops = {
 220        .ctl    = ctl_reg,
 221        .status = status_reg,
 222        .addr   = addr_reg,
 223        .misc   = misc_reg
 224};
 225
 226static void __print_mce(struct mce *m)
 227{
 228        pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
 229                 m->extcpu,
 230                 (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""),
 231                 m->mcgstatus, m->bank, m->status);
 232
 233        if (m->ip) {
 234                pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
 235                        !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
 236                        m->cs, m->ip);
 237
 238                if (m->cs == __KERNEL_CS)
 239                        pr_cont("{%pS}", (void *)(unsigned long)m->ip);
 240                pr_cont("\n");
 241        }
 242
 243        pr_emerg(HW_ERR "TSC %llx ", m->tsc);
 244        if (m->addr)
 245                pr_cont("ADDR %llx ", m->addr);
 246        if (m->misc)
 247                pr_cont("MISC %llx ", m->misc);
 248        if (m->ppin)
 249                pr_cont("PPIN %llx ", m->ppin);
 250
 251        if (mce_flags.smca) {
 252                if (m->synd)
 253                        pr_cont("SYND %llx ", m->synd);
 254                if (m->ipid)
 255                        pr_cont("IPID %llx ", m->ipid);
 256        }
 257
 258        pr_cont("\n");
 259
 260        /*
 261         * Note this output is parsed by external tools and old fields
 262         * should not be changed.
 263         */
 264        pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
 265                m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
 266                m->microcode);
 267}
 268
 269static void print_mce(struct mce *m)
 270{
 271        __print_mce(m);
 272
 273        if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON)
 274                pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
 275}
 276
 277#define PANIC_TIMEOUT 5 /* 5 seconds */
 278
 279static atomic_t mce_panicked;
 280
 281static int fake_panic;
 282static atomic_t mce_fake_panicked;
 283
 284/* Panic in progress. Enable interrupts and wait for final IPI */
 285static void wait_for_panic(void)
 286{
 287        long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
 288
 289        preempt_disable();
 290        local_irq_enable();
 291        while (timeout-- > 0)
 292                udelay(1);
 293        if (panic_timeout == 0)
 294                panic_timeout = mca_cfg.panic_timeout;
 295        panic("Panicing machine check CPU died");
 296}
 297
 298static void mce_panic(const char *msg, struct mce *final, char *exp)
 299{
 300        int apei_err = 0;
 301        struct llist_node *pending;
 302        struct mce_evt_llist *l;
 303
 304        if (!fake_panic) {
 305                /*
 306                 * Make sure only one CPU runs in machine check panic
 307                 */
 308                if (atomic_inc_return(&mce_panicked) > 1)
 309                        wait_for_panic();
 310                barrier();
 311
 312                bust_spinlocks(1);
 313                console_verbose();
 314        } else {
 315                /* Don't log too much for fake panic */
 316                if (atomic_inc_return(&mce_fake_panicked) > 1)
 317                        return;
 318        }
 319        pending = mce_gen_pool_prepare_records();
 320        /* First print corrected ones that are still unlogged */
 321        llist_for_each_entry(l, pending, llnode) {
 322                struct mce *m = &l->mce;
 323                if (!(m->status & MCI_STATUS_UC)) {
 324                        print_mce(m);
 325                        if (!apei_err)
 326                                apei_err = apei_write_mce(m);
 327                }
 328        }
 329        /* Now print uncorrected but with the final one last */
 330        llist_for_each_entry(l, pending, llnode) {
 331                struct mce *m = &l->mce;
 332                if (!(m->status & MCI_STATUS_UC))
 333                        continue;
 334                if (!final || mce_cmp(m, final)) {
 335                        print_mce(m);
 336                        if (!apei_err)
 337                                apei_err = apei_write_mce(m);
 338                }
 339        }
 340        if (final) {
 341                print_mce(final);
 342                if (!apei_err)
 343                        apei_err = apei_write_mce(final);
 344        }
 345        if (cpu_missing)
 346                pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
 347        if (exp)
 348                pr_emerg(HW_ERR "Machine check: %s\n", exp);
 349        if (!fake_panic) {
 350                if (panic_timeout == 0)
 351                        panic_timeout = mca_cfg.panic_timeout;
 352                panic(msg);
 353        } else
 354                pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
 355}
 356
 357/* Support code for software error injection */
 358
 359static int msr_to_offset(u32 msr)
 360{
 361        unsigned bank = __this_cpu_read(injectm.bank);
 362
 363        if (msr == mca_cfg.rip_msr)
 364                return offsetof(struct mce, ip);
 365        if (msr == msr_ops.status(bank))
 366                return offsetof(struct mce, status);
 367        if (msr == msr_ops.addr(bank))
 368                return offsetof(struct mce, addr);
 369        if (msr == msr_ops.misc(bank))
 370                return offsetof(struct mce, misc);
 371        if (msr == MSR_IA32_MCG_STATUS)
 372                return offsetof(struct mce, mcgstatus);
 373        return -1;
 374}
 375
 376__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
 377                                      struct pt_regs *regs, int trapnr,
 378                                      unsigned long error_code,
 379                                      unsigned long fault_addr)
 380{
 381        pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
 382                 (unsigned int)regs->cx, regs->ip, (void *)regs->ip);
 383
 384        show_stack_regs(regs);
 385
 386        panic("MCA architectural violation!\n");
 387
 388        while (true)
 389                cpu_relax();
 390
 391        return true;
 392}
 393
 394/* MSR access wrappers used for error injection */
 395static noinstr u64 mce_rdmsrl(u32 msr)
 396{
 397        DECLARE_ARGS(val, low, high);
 398
 399        if (__this_cpu_read(injectm.finished)) {
 400                int offset;
 401                u64 ret;
 402
 403                instrumentation_begin();
 404
 405                offset = msr_to_offset(msr);
 406                if (offset < 0)
 407                        ret = 0;
 408                else
 409                        ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
 410
 411                instrumentation_end();
 412
 413                return ret;
 414        }
 415
 416        /*
 417         * RDMSR on MCA MSRs should not fault. If they do, this is very much an
 418         * architectural violation and needs to be reported to hw vendor. Panic
 419         * the box to not allow any further progress.
 420         */
 421        asm volatile("1: rdmsr\n"
 422                     "2:\n"
 423                     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault)
 424                     : EAX_EDX_RET(val, low, high) : "c" (msr));
 425
 426
 427        return EAX_EDX_VAL(val, low, high);
 428}
 429
 430__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
 431                                      struct pt_regs *regs, int trapnr,
 432                                      unsigned long error_code,
 433                                      unsigned long fault_addr)
 434{
 435        pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
 436                 (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
 437                  regs->ip, (void *)regs->ip);
 438
 439        show_stack_regs(regs);
 440
 441        panic("MCA architectural violation!\n");
 442
 443        while (true)
 444                cpu_relax();
 445
 446        return true;
 447}
 448
 449static noinstr void mce_wrmsrl(u32 msr, u64 v)
 450{
 451        u32 low, high;
 452
 453        if (__this_cpu_read(injectm.finished)) {
 454                int offset;
 455
 456                instrumentation_begin();
 457
 458                offset = msr_to_offset(msr);
 459                if (offset >= 0)
 460                        *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
 461
 462                instrumentation_end();
 463
 464                return;
 465        }
 466
 467        low  = (u32)v;
 468        high = (u32)(v >> 32);
 469
 470        /* See comment in mce_rdmsrl() */
 471        asm volatile("1: wrmsr\n"
 472                     "2:\n"
 473                     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault)
 474                     : : "c" (msr), "a"(low), "d" (high) : "memory");
 475}
 476
 477/*
 478 * Collect all global (w.r.t. this processor) status about this machine
 479 * check into our "mce" struct so that we can use it later to assess
 480 * the severity of the problem as we read per-bank specific details.
 481 */
 482static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
 483{
 484        mce_setup(m);
 485
 486        m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
 487        if (regs) {
 488                /*
 489                 * Get the address of the instruction at the time of
 490                 * the machine check error.
 491                 */
 492                if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
 493                        m->ip = regs->ip;
 494                        m->cs = regs->cs;
 495
 496                        /*
 497                         * When in VM86 mode make the cs look like ring 3
 498                         * always. This is a lie, but it's better than passing
 499                         * the additional vm86 bit around everywhere.
 500                         */
 501                        if (v8086_mode(regs))
 502                                m->cs |= 3;
 503                }
 504                /* Use accurate RIP reporting if available. */
 505                if (mca_cfg.rip_msr)
 506                        m->ip = mce_rdmsrl(mca_cfg.rip_msr);
 507        }
 508}
 509
 510int mce_available(struct cpuinfo_x86 *c)
 511{
 512        if (mca_cfg.disabled)
 513                return 0;
 514        return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
 515}
 516
 517static void mce_schedule_work(void)
 518{
 519        if (!mce_gen_pool_empty())
 520                schedule_work(&mce_work);
 521}
 522
 523static void mce_irq_work_cb(struct irq_work *entry)
 524{
 525        mce_schedule_work();
 526}
 527
 528/*
 529 * Check if the address reported by the CPU is in a format we can parse.
 530 * It would be possible to add code for most other cases, but all would
 531 * be somewhat complicated (e.g. segment offset would require an instruction
 532 * parser). So only support physical addresses up to page granularity for now.
 533 */
 534int mce_usable_address(struct mce *m)
 535{
 536        if (!(m->status & MCI_STATUS_ADDRV))
 537                return 0;
 538
 539        /* Checks after this one are Intel/Zhaoxin-specific: */
 540        if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL &&
 541            boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN)
 542                return 1;
 543
 544        if (!(m->status & MCI_STATUS_MISCV))
 545                return 0;
 546
 547        if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
 548                return 0;
 549
 550        if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
 551                return 0;
 552
 553        return 1;
 554}
 555EXPORT_SYMBOL_GPL(mce_usable_address);
 556
 557bool mce_is_memory_error(struct mce *m)
 558{
 559        switch (m->cpuvendor) {
 560        case X86_VENDOR_AMD:
 561        case X86_VENDOR_HYGON:
 562                return amd_mce_is_memory_error(m);
 563
 564        case X86_VENDOR_INTEL:
 565        case X86_VENDOR_ZHAOXIN:
 566                /*
 567                 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
 568                 *
 569                 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
 570                 * indicating a memory error. Bit 8 is used for indicating a
 571                 * cache hierarchy error. The combination of bit 2 and bit 3
 572                 * is used for indicating a `generic' cache hierarchy error
 573                 * But we can't just blindly check the above bits, because if
 574                 * bit 11 is set, then it is a bus/interconnect error - and
 575                 * either way the above bits just gives more detail on what
 576                 * bus/interconnect error happened. Note that bit 12 can be
 577                 * ignored, as it's the "filter" bit.
 578                 */
 579                return (m->status & 0xef80) == BIT(7) ||
 580                       (m->status & 0xef00) == BIT(8) ||
 581                       (m->status & 0xeffc) == 0xc;
 582
 583        default:
 584                return false;
 585        }
 586}
 587EXPORT_SYMBOL_GPL(mce_is_memory_error);
 588
 589static bool whole_page(struct mce *m)
 590{
 591        if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV))
 592                return true;
 593
 594        return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT;
 595}
 596
 597bool mce_is_correctable(struct mce *m)
 598{
 599        if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
 600                return false;
 601
 602        if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED)
 603                return false;
 604
 605        if (m->status & MCI_STATUS_UC)
 606                return false;
 607
 608        return true;
 609}
 610EXPORT_SYMBOL_GPL(mce_is_correctable);
 611
 612static int mce_early_notifier(struct notifier_block *nb, unsigned long val,
 613                              void *data)
 614{
 615        struct mce *m = (struct mce *)data;
 616
 617        if (!m)
 618                return NOTIFY_DONE;
 619
 620        /* Emit the trace record: */
 621        trace_mce_record(m);
 622
 623        set_bit(0, &mce_need_notify);
 624
 625        mce_notify_irq();
 626
 627        return NOTIFY_DONE;
 628}
 629
 630static struct notifier_block early_nb = {
 631        .notifier_call  = mce_early_notifier,
 632        .priority       = MCE_PRIO_EARLY,
 633};
 634
 635static int uc_decode_notifier(struct notifier_block *nb, unsigned long val,
 636                              void *data)
 637{
 638        struct mce *mce = (struct mce *)data;
 639        unsigned long pfn;
 640
 641        if (!mce || !mce_usable_address(mce))
 642                return NOTIFY_DONE;
 643
 644        if (mce->severity != MCE_AO_SEVERITY &&
 645            mce->severity != MCE_DEFERRED_SEVERITY)
 646                return NOTIFY_DONE;
 647
 648        pfn = mce->addr >> PAGE_SHIFT;
 649        if (!memory_failure(pfn, 0)) {
 650                set_mce_nospec(pfn, whole_page(mce));
 651                mce->kflags |= MCE_HANDLED_UC;
 652        }
 653
 654        return NOTIFY_OK;
 655}
 656
 657static struct notifier_block mce_uc_nb = {
 658        .notifier_call  = uc_decode_notifier,
 659        .priority       = MCE_PRIO_UC,
 660};
 661
 662static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
 663                                void *data)
 664{
 665        struct mce *m = (struct mce *)data;
 666
 667        if (!m)
 668                return NOTIFY_DONE;
 669
 670        if (mca_cfg.print_all || !m->kflags)
 671                __print_mce(m);
 672
 673        return NOTIFY_DONE;
 674}
 675
 676static struct notifier_block mce_default_nb = {
 677        .notifier_call  = mce_default_notifier,
 678        /* lowest prio, we want it to run last. */
 679        .priority       = MCE_PRIO_LOWEST,
 680};
 681
 682/*
 683 * Read ADDR and MISC registers.
 684 */
 685static void mce_read_aux(struct mce *m, int i)
 686{
 687        if (m->status & MCI_STATUS_MISCV)
 688                m->misc = mce_rdmsrl(msr_ops.misc(i));
 689
 690        if (m->status & MCI_STATUS_ADDRV) {
 691                m->addr = mce_rdmsrl(msr_ops.addr(i));
 692
 693                /*
 694                 * Mask the reported address by the reported granularity.
 695                 */
 696                if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
 697                        u8 shift = MCI_MISC_ADDR_LSB(m->misc);
 698                        m->addr >>= shift;
 699                        m->addr <<= shift;
 700                }
 701
 702                /*
 703                 * Extract [55:<lsb>] where lsb is the least significant
 704                 * *valid* bit of the address bits.
 705                 */
 706                if (mce_flags.smca) {
 707                        u8 lsb = (m->addr >> 56) & 0x3f;
 708
 709                        m->addr &= GENMASK_ULL(55, lsb);
 710                }
 711        }
 712
 713        if (mce_flags.smca) {
 714                m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i));
 715
 716                if (m->status & MCI_STATUS_SYNDV)
 717                        m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i));
 718        }
 719}
 720
 721DEFINE_PER_CPU(unsigned, mce_poll_count);
 722
 723/*
 724 * Poll for corrected events or events that happened before reset.
 725 * Those are just logged through /dev/mcelog.
 726 *
 727 * This is executed in standard interrupt context.
 728 *
 729 * Note: spec recommends to panic for fatal unsignalled
 730 * errors here. However this would be quite problematic --
 731 * we would need to reimplement the Monarch handling and
 732 * it would mess up the exclusion between exception handler
 733 * and poll handler -- * so we skip this for now.
 734 * These cases should not happen anyways, or only when the CPU
 735 * is already totally * confused. In this case it's likely it will
 736 * not fully execute the machine check handler either.
 737 */
 738bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
 739{
 740        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
 741        bool error_seen = false;
 742        struct mce m;
 743        int i;
 744
 745        this_cpu_inc(mce_poll_count);
 746
 747        mce_gather_info(&m, NULL);
 748
 749        if (flags & MCP_TIMESTAMP)
 750                m.tsc = rdtsc();
 751
 752        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
 753                if (!mce_banks[i].ctl || !test_bit(i, *b))
 754                        continue;
 755
 756                m.misc = 0;
 757                m.addr = 0;
 758                m.bank = i;
 759
 760                barrier();
 761                m.status = mce_rdmsrl(msr_ops.status(i));
 762
 763                /* If this entry is not valid, ignore it */
 764                if (!(m.status & MCI_STATUS_VAL))
 765                        continue;
 766
 767                /*
 768                 * If we are logging everything (at CPU online) or this
 769                 * is a corrected error, then we must log it.
 770                 */
 771                if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
 772                        goto log_it;
 773
 774                /*
 775                 * Newer Intel systems that support software error
 776                 * recovery need to make additional checks. Other
 777                 * CPUs should skip over uncorrected errors, but log
 778                 * everything else.
 779                 */
 780                if (!mca_cfg.ser) {
 781                        if (m.status & MCI_STATUS_UC)
 782                                continue;
 783                        goto log_it;
 784                }
 785
 786                /* Log "not enabled" (speculative) errors */
 787                if (!(m.status & MCI_STATUS_EN))
 788                        goto log_it;
 789
 790                /*
 791                 * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
 792                 * UC == 1 && PCC == 0 && S == 0
 793                 */
 794                if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
 795                        goto log_it;
 796
 797                /*
 798                 * Skip anything else. Presumption is that our read of this
 799                 * bank is racing with a machine check. Leave the log alone
 800                 * for do_machine_check() to deal with it.
 801                 */
 802                continue;
 803
 804log_it:
 805                error_seen = true;
 806
 807                if (flags & MCP_DONTLOG)
 808                        goto clear_it;
 809
 810                mce_read_aux(&m, i);
 811                m.severity = mce_severity(&m, NULL, mca_cfg.tolerant, NULL, false);
 812                /*
 813                 * Don't get the IP here because it's unlikely to
 814                 * have anything to do with the actual error location.
 815                 */
 816
 817                if (mca_cfg.dont_log_ce && !mce_usable_address(&m))
 818                        goto clear_it;
 819
 820                if (flags & MCP_QUEUE_LOG)
 821                        mce_gen_pool_add(&m);
 822                else
 823                        mce_log(&m);
 824
 825clear_it:
 826                /*
 827                 * Clear state for this bank.
 828                 */
 829                mce_wrmsrl(msr_ops.status(i), 0);
 830        }
 831
 832        /*
 833         * Don't clear MCG_STATUS here because it's only defined for
 834         * exceptions.
 835         */
 836
 837        sync_core();
 838
 839        return error_seen;
 840}
 841EXPORT_SYMBOL_GPL(machine_check_poll);
 842
 843/*
 844 * Do a quick check if any of the events requires a panic.
 845 * This decides if we keep the events around or clear them.
 846 */
 847static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
 848                          struct pt_regs *regs)
 849{
 850        char *tmp = *msg;
 851        int i;
 852
 853        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
 854                m->status = mce_rdmsrl(msr_ops.status(i));
 855                if (!(m->status & MCI_STATUS_VAL))
 856                        continue;
 857
 858                __set_bit(i, validp);
 859                if (quirk_no_way_out)
 860                        quirk_no_way_out(i, m, regs);
 861
 862                m->bank = i;
 863                if (mce_severity(m, regs, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
 864                        mce_read_aux(m, i);
 865                        *msg = tmp;
 866                        return 1;
 867                }
 868        }
 869        return 0;
 870}
 871
 872/*
 873 * Variable to establish order between CPUs while scanning.
 874 * Each CPU spins initially until executing is equal its number.
 875 */
 876static atomic_t mce_executing;
 877
 878/*
 879 * Defines order of CPUs on entry. First CPU becomes Monarch.
 880 */
 881static atomic_t mce_callin;
 882
 883/*
 884 * Track which CPUs entered the MCA broadcast synchronization and which not in
 885 * order to print holdouts.
 886 */
 887static cpumask_t mce_missing_cpus = CPU_MASK_ALL;
 888
 889/*
 890 * Check if a timeout waiting for other CPUs happened.
 891 */
 892static int mce_timed_out(u64 *t, const char *msg)
 893{
 894        /*
 895         * The others already did panic for some reason.
 896         * Bail out like in a timeout.
 897         * rmb() to tell the compiler that system_state
 898         * might have been modified by someone else.
 899         */
 900        rmb();
 901        if (atomic_read(&mce_panicked))
 902                wait_for_panic();
 903        if (!mca_cfg.monarch_timeout)
 904                goto out;
 905        if ((s64)*t < SPINUNIT) {
 906                if (mca_cfg.tolerant <= 1) {
 907                        if (cpumask_and(&mce_missing_cpus, cpu_online_mask, &mce_missing_cpus))
 908                                pr_emerg("CPUs not responding to MCE broadcast (may include false positives): %*pbl\n",
 909                                         cpumask_pr_args(&mce_missing_cpus));
 910                        mce_panic(msg, NULL, NULL);
 911                }
 912                cpu_missing = 1;
 913                return 1;
 914        }
 915        *t -= SPINUNIT;
 916out:
 917        touch_nmi_watchdog();
 918        return 0;
 919}
 920
 921/*
 922 * The Monarch's reign.  The Monarch is the CPU who entered
 923 * the machine check handler first. It waits for the others to
 924 * raise the exception too and then grades them. When any
 925 * error is fatal panic. Only then let the others continue.
 926 *
 927 * The other CPUs entering the MCE handler will be controlled by the
 928 * Monarch. They are called Subjects.
 929 *
 930 * This way we prevent any potential data corruption in a unrecoverable case
 931 * and also makes sure always all CPU's errors are examined.
 932 *
 933 * Also this detects the case of a machine check event coming from outer
 934 * space (not detected by any CPUs) In this case some external agent wants
 935 * us to shut down, so panic too.
 936 *
 937 * The other CPUs might still decide to panic if the handler happens
 938 * in a unrecoverable place, but in this case the system is in a semi-stable
 939 * state and won't corrupt anything by itself. It's ok to let the others
 940 * continue for a bit first.
 941 *
 942 * All the spin loops have timeouts; when a timeout happens a CPU
 943 * typically elects itself to be Monarch.
 944 */
 945static void mce_reign(void)
 946{
 947        int cpu;
 948        struct mce *m = NULL;
 949        int global_worst = 0;
 950        char *msg = NULL;
 951
 952        /*
 953         * This CPU is the Monarch and the other CPUs have run
 954         * through their handlers.
 955         * Grade the severity of the errors of all the CPUs.
 956         */
 957        for_each_possible_cpu(cpu) {
 958                struct mce *mtmp = &per_cpu(mces_seen, cpu);
 959
 960                if (mtmp->severity > global_worst) {
 961                        global_worst = mtmp->severity;
 962                        m = &per_cpu(mces_seen, cpu);
 963                }
 964        }
 965
 966        /*
 967         * Cannot recover? Panic here then.
 968         * This dumps all the mces in the log buffer and stops the
 969         * other CPUs.
 970         */
 971        if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
 972                /* call mce_severity() to get "msg" for panic */
 973                mce_severity(m, NULL, mca_cfg.tolerant, &msg, true);
 974                mce_panic("Fatal machine check", m, msg);
 975        }
 976
 977        /*
 978         * For UC somewhere we let the CPU who detects it handle it.
 979         * Also must let continue the others, otherwise the handling
 980         * CPU could deadlock on a lock.
 981         */
 982
 983        /*
 984         * No machine check event found. Must be some external
 985         * source or one CPU is hung. Panic.
 986         */
 987        if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
 988                mce_panic("Fatal machine check from unknown source", NULL, NULL);
 989
 990        /*
 991         * Now clear all the mces_seen so that they don't reappear on
 992         * the next mce.
 993         */
 994        for_each_possible_cpu(cpu)
 995                memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
 996}
 997
 998static atomic_t global_nwo;
 999
1000/*
1001 * Start of Monarch synchronization. This waits until all CPUs have
1002 * entered the exception handler and then determines if any of them
1003 * saw a fatal event that requires panic. Then it executes them
1004 * in the entry order.
1005 * TBD double check parallel CPU hotunplug
1006 */
1007static int mce_start(int *no_way_out)
1008{
1009        int order;
1010        int cpus = num_online_cpus();
1011        u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
1012
1013        if (!timeout)
1014                return -1;
1015
1016        atomic_add(*no_way_out, &global_nwo);
1017        /*
1018         * Rely on the implied barrier below, such that global_nwo
1019         * is updated before mce_callin.
1020         */
1021        order = atomic_inc_return(&mce_callin);
1022        cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus);
1023
1024        /*
1025         * Wait for everyone.
1026         */
1027        while (atomic_read(&mce_callin) != cpus) {
1028                if (mce_timed_out(&timeout,
1029                                  "Timeout: Not all CPUs entered broadcast exception handler")) {
1030                        atomic_set(&global_nwo, 0);
1031                        return -1;
1032                }
1033                ndelay(SPINUNIT);
1034        }
1035
1036        /*
1037         * mce_callin should be read before global_nwo
1038         */
1039        smp_rmb();
1040
1041        if (order == 1) {
1042                /*
1043                 * Monarch: Starts executing now, the others wait.
1044                 */
1045                atomic_set(&mce_executing, 1);
1046        } else {
1047                /*
1048                 * Subject: Now start the scanning loop one by one in
1049                 * the original callin order.
1050                 * This way when there are any shared banks it will be
1051                 * only seen by one CPU before cleared, avoiding duplicates.
1052                 */
1053                while (atomic_read(&mce_executing) < order) {
1054                        if (mce_timed_out(&timeout,
1055                                          "Timeout: Subject CPUs unable to finish machine check processing")) {
1056                                atomic_set(&global_nwo, 0);
1057                                return -1;
1058                        }
1059                        ndelay(SPINUNIT);
1060                }
1061        }
1062
1063        /*
1064         * Cache the global no_way_out state.
1065         */
1066        *no_way_out = atomic_read(&global_nwo);
1067
1068        return order;
1069}
1070
1071/*
1072 * Synchronize between CPUs after main scanning loop.
1073 * This invokes the bulk of the Monarch processing.
1074 */
1075static int mce_end(int order)
1076{
1077        int ret = -1;
1078        u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
1079
1080        if (!timeout)
1081                goto reset;
1082        if (order < 0)
1083                goto reset;
1084
1085        /*
1086         * Allow others to run.
1087         */
1088        atomic_inc(&mce_executing);
1089
1090        if (order == 1) {
1091                /* CHECKME: Can this race with a parallel hotplug? */
1092                int cpus = num_online_cpus();
1093
1094                /*
1095                 * Monarch: Wait for everyone to go through their scanning
1096                 * loops.
1097                 */
1098                while (atomic_read(&mce_executing) <= cpus) {
1099                        if (mce_timed_out(&timeout,
1100                                          "Timeout: Monarch CPU unable to finish machine check processing"))
1101                                goto reset;
1102                        ndelay(SPINUNIT);
1103                }
1104
1105                mce_reign();
1106                barrier();
1107                ret = 0;
1108        } else {
1109                /*
1110                 * Subject: Wait for Monarch to finish.
1111                 */
1112                while (atomic_read(&mce_executing) != 0) {
1113                        if (mce_timed_out(&timeout,
1114                                          "Timeout: Monarch CPU did not finish machine check processing"))
1115                                goto reset;
1116                        ndelay(SPINUNIT);
1117                }
1118
1119                /*
1120                 * Don't reset anything. That's done by the Monarch.
1121                 */
1122                return 0;
1123        }
1124
1125        /*
1126         * Reset all global state.
1127         */
1128reset:
1129        atomic_set(&global_nwo, 0);
1130        atomic_set(&mce_callin, 0);
1131        cpumask_setall(&mce_missing_cpus);
1132        barrier();
1133
1134        /*
1135         * Let others run again.
1136         */
1137        atomic_set(&mce_executing, 0);
1138        return ret;
1139}
1140
1141static void mce_clear_state(unsigned long *toclear)
1142{
1143        int i;
1144
1145        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1146                if (test_bit(i, toclear))
1147                        mce_wrmsrl(msr_ops.status(i), 0);
1148        }
1149}
1150
1151/*
1152 * Cases where we avoid rendezvous handler timeout:
1153 * 1) If this CPU is offline.
1154 *
1155 * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
1156 *  skip those CPUs which remain looping in the 1st kernel - see
1157 *  crash_nmi_callback().
1158 *
1159 * Note: there still is a small window between kexec-ing and the new,
1160 * kdump kernel establishing a new #MC handler where a broadcasted MCE
1161 * might not get handled properly.
1162 */
1163static noinstr bool mce_check_crashing_cpu(void)
1164{
1165        unsigned int cpu = smp_processor_id();
1166
1167        if (arch_cpu_is_offline(cpu) ||
1168            (crashing_cpu != -1 && crashing_cpu != cpu)) {
1169                u64 mcgstatus;
1170
1171                mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS);
1172
1173                if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) {
1174                        if (mcgstatus & MCG_STATUS_LMCES)
1175                                return false;
1176                }
1177
1178                if (mcgstatus & MCG_STATUS_RIPV) {
1179                        __wrmsr(MSR_IA32_MCG_STATUS, 0, 0);
1180                        return true;
1181                }
1182        }
1183        return false;
1184}
1185
1186static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final,
1187                            unsigned long *toclear, unsigned long *valid_banks,
1188                            int no_way_out, int *worst)
1189{
1190        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1191        struct mca_config *cfg = &mca_cfg;
1192        int severity, i;
1193
1194        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1195                __clear_bit(i, toclear);
1196                if (!test_bit(i, valid_banks))
1197                        continue;
1198
1199                if (!mce_banks[i].ctl)
1200                        continue;
1201
1202                m->misc = 0;
1203                m->addr = 0;
1204                m->bank = i;
1205
1206                m->status = mce_rdmsrl(msr_ops.status(i));
1207                if (!(m->status & MCI_STATUS_VAL))
1208                        continue;
1209
1210                /*
1211                 * Corrected or non-signaled errors are handled by
1212                 * machine_check_poll(). Leave them alone, unless this panics.
1213                 */
1214                if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1215                        !no_way_out)
1216                        continue;
1217
1218                /* Set taint even when machine check was not enabled. */
1219                add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1220
1221                severity = mce_severity(m, regs, cfg->tolerant, NULL, true);
1222
1223                /*
1224                 * When machine check was for corrected/deferred handler don't
1225                 * touch, unless we're panicking.
1226                 */
1227                if ((severity == MCE_KEEP_SEVERITY ||
1228                     severity == MCE_UCNA_SEVERITY) && !no_way_out)
1229                        continue;
1230
1231                __set_bit(i, toclear);
1232
1233                /* Machine check event was not enabled. Clear, but ignore. */
1234                if (severity == MCE_NO_SEVERITY)
1235                        continue;
1236
1237                mce_read_aux(m, i);
1238
1239                /* assuming valid severity level != 0 */
1240                m->severity = severity;
1241
1242                mce_log(m);
1243
1244                if (severity > *worst) {
1245                        *final = *m;
1246                        *worst = severity;
1247                }
1248        }
1249
1250        /* mce_clear_state will clear *final, save locally for use later */
1251        *m = *final;
1252}
1253
1254static void kill_me_now(struct callback_head *ch)
1255{
1256        struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
1257
1258        p->mce_count = 0;
1259        force_sig(SIGBUS);
1260}
1261
1262static void kill_me_maybe(struct callback_head *cb)
1263{
1264        struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
1265        int flags = MF_ACTION_REQUIRED;
1266        int ret;
1267
1268        p->mce_count = 0;
1269        pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
1270
1271        if (!p->mce_ripv)
1272                flags |= MF_MUST_KILL;
1273
1274        ret = memory_failure(p->mce_addr >> PAGE_SHIFT, flags);
1275        if (!ret && !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) {
1276                set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page);
1277                sync_core();
1278                return;
1279        }
1280
1281        /*
1282         * -EHWPOISON from memory_failure() means that it already sent SIGBUS
1283         * to the current process with the proper error info, so no need to
1284         * send SIGBUS here again.
1285         */
1286        if (ret == -EHWPOISON)
1287                return;
1288
1289        if (p->mce_vaddr != (void __user *)-1l) {
1290                force_sig_mceerr(BUS_MCEERR_AR, p->mce_vaddr, PAGE_SHIFT);
1291        } else {
1292                pr_err("Memory error not recovered");
1293                kill_me_now(cb);
1294        }
1295}
1296
1297static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
1298{
1299        int count = ++current->mce_count;
1300
1301        /* First call, save all the details */
1302        if (count == 1) {
1303                current->mce_addr = m->addr;
1304                current->mce_kflags = m->kflags;
1305                current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
1306                current->mce_whole_page = whole_page(m);
1307
1308                if (kill_current_task)
1309                        current->mce_kill_me.func = kill_me_now;
1310                else
1311                        current->mce_kill_me.func = kill_me_maybe;
1312        }
1313
1314        /* Ten is likely overkill. Don't expect more than two faults before task_work() */
1315        if (count > 10)
1316                mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
1317
1318        /* Second or later call, make sure page address matches the one from first call */
1319        if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
1320                mce_panic("Consecutive machine checks to different user pages", m, msg);
1321
1322        /* Do not call task_work_add() more than once */
1323        if (count > 1)
1324                return;
1325
1326        task_work_add(current, &current->mce_kill_me, TWA_RESUME);
1327}
1328
1329/*
1330 * The actual machine check handler. This only handles real
1331 * exceptions when something got corrupted coming in through int 18.
1332 *
1333 * This is executed in NMI context not subject to normal locking rules. This
1334 * implies that most kernel services cannot be safely used. Don't even
1335 * think about putting a printk in there!
1336 *
1337 * On Intel systems this is entered on all CPUs in parallel through
1338 * MCE broadcast. However some CPUs might be broken beyond repair,
1339 * so be always careful when synchronizing with others.
1340 *
1341 * Tracing and kprobes are disabled: if we interrupted a kernel context
1342 * with IF=1, we need to minimize stack usage.  There are also recursion
1343 * issues: if the machine check was due to a failure of the memory
1344 * backing the user stack, tracing that reads the user stack will cause
1345 * potentially infinite recursion.
1346 */
1347noinstr void do_machine_check(struct pt_regs *regs)
1348{
1349        DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1350        DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1351        struct mca_config *cfg = &mca_cfg;
1352        struct mce m, *final;
1353        char *msg = NULL;
1354        int worst = 0;
1355
1356        /*
1357         * Establish sequential order between the CPUs entering the machine
1358         * check handler.
1359         */
1360        int order = -1;
1361
1362        /*
1363         * If no_way_out gets set, there is no safe way to recover from this
1364         * MCE.  If mca_cfg.tolerant is cranked up, we'll try anyway.
1365         */
1366        int no_way_out = 0;
1367
1368        /*
1369         * If kill_current_task is not set, there might be a way to recover from this
1370         * error.
1371         */
1372        int kill_current_task = 0;
1373
1374        /*
1375         * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
1376         * on Intel.
1377         */
1378        int lmce = 1;
1379
1380        this_cpu_inc(mce_exception_count);
1381
1382        mce_gather_info(&m, regs);
1383        m.tsc = rdtsc();
1384
1385        final = this_cpu_ptr(&mces_seen);
1386        *final = m;
1387
1388        memset(valid_banks, 0, sizeof(valid_banks));
1389        no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1390
1391        barrier();
1392
1393        /*
1394         * When no restart IP might need to kill or panic.
1395         * Assume the worst for now, but if we find the
1396         * severity is MCE_AR_SEVERITY we have other options.
1397         */
1398        if (!(m.mcgstatus & MCG_STATUS_RIPV))
1399                kill_current_task = (cfg->tolerant == 3) ? 0 : 1;
1400        /*
1401         * Check if this MCE is signaled to only this logical processor,
1402         * on Intel, Zhaoxin only.
1403         */
1404        if (m.cpuvendor == X86_VENDOR_INTEL ||
1405            m.cpuvendor == X86_VENDOR_ZHAOXIN)
1406                lmce = m.mcgstatus & MCG_STATUS_LMCES;
1407
1408        /*
1409         * Local machine check may already know that we have to panic.
1410         * Broadcast machine check begins rendezvous in mce_start()
1411         * Go through all banks in exclusion of the other CPUs. This way we
1412         * don't report duplicated events on shared banks because the first one
1413         * to see it will clear it.
1414         */
1415        if (lmce) {
1416                if (no_way_out && cfg->tolerant < 3)
1417                        mce_panic("Fatal local machine check", &m, msg);
1418        } else {
1419                order = mce_start(&no_way_out);
1420        }
1421
1422        __mc_scan_banks(&m, regs, final, toclear, valid_banks, no_way_out, &worst);
1423
1424        if (!no_way_out)
1425                mce_clear_state(toclear);
1426
1427        /*
1428         * Do most of the synchronization with other CPUs.
1429         * When there's any problem use only local no_way_out state.
1430         */
1431        if (!lmce) {
1432                if (mce_end(order) < 0) {
1433                        if (!no_way_out)
1434                                no_way_out = worst >= MCE_PANIC_SEVERITY;
1435
1436                        if (no_way_out && cfg->tolerant < 3)
1437                                mce_panic("Fatal machine check on current CPU", &m, msg);
1438                }
1439        } else {
1440                /*
1441                 * If there was a fatal machine check we should have
1442                 * already called mce_panic earlier in this function.
1443                 * Since we re-read the banks, we might have found
1444                 * something new. Check again to see if we found a
1445                 * fatal error. We call "mce_severity()" again to
1446                 * make sure we have the right "msg".
1447                 */
1448                if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
1449                        mce_severity(&m, regs, cfg->tolerant, &msg, true);
1450                        mce_panic("Local fatal machine check!", &m, msg);
1451                }
1452        }
1453
1454        if (worst != MCE_AR_SEVERITY && !kill_current_task)
1455                goto out;
1456
1457        /* Fault was in user mode and we need to take some action */
1458        if ((m.cs & 3) == 3) {
1459                /* If this triggers there is no way to recover. Die hard. */
1460                BUG_ON(!on_thread_stack() || !user_mode(regs));
1461
1462                queue_task_work(&m, msg, kill_current_task);
1463
1464        } else {
1465                /*
1466                 * Handle an MCE which has happened in kernel space but from
1467                 * which the kernel can recover: ex_has_fault_handler() has
1468                 * already verified that the rIP at which the error happened is
1469                 * a rIP from which the kernel can recover (by jumping to
1470                 * recovery code specified in _ASM_EXTABLE_FAULT()) and the
1471                 * corresponding exception handler which would do that is the
1472                 * proper one.
1473                 */
1474                if (m.kflags & MCE_IN_KERNEL_RECOV) {
1475                        if (!fixup_exception(regs, X86_TRAP_MC, 0, 0))
1476                                mce_panic("Failed kernel mode recovery", &m, msg);
1477                }
1478
1479                if (m.kflags & MCE_IN_KERNEL_COPYIN)
1480                        queue_task_work(&m, msg, kill_current_task);
1481        }
1482out:
1483        mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1484}
1485EXPORT_SYMBOL_GPL(do_machine_check);
1486
1487#ifndef CONFIG_MEMORY_FAILURE
1488int memory_failure(unsigned long pfn, int flags)
1489{
1490        /* mce_severity() should not hand us an ACTION_REQUIRED error */
1491        BUG_ON(flags & MF_ACTION_REQUIRED);
1492        pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1493               "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1494               pfn);
1495
1496        return 0;
1497}
1498#endif
1499
1500/*
1501 * Periodic polling timer for "silent" machine check errors.  If the
1502 * poller finds an MCE, poll 2x faster.  When the poller finds no more
1503 * errors, poll 2x slower (up to check_interval seconds).
1504 */
1505static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
1506
1507static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1508static DEFINE_PER_CPU(struct timer_list, mce_timer);
1509
1510static unsigned long mce_adjust_timer_default(unsigned long interval)
1511{
1512        return interval;
1513}
1514
1515static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
1516
1517static void __start_timer(struct timer_list *t, unsigned long interval)
1518{
1519        unsigned long when = jiffies + interval;
1520        unsigned long flags;
1521
1522        local_irq_save(flags);
1523
1524        if (!timer_pending(t) || time_before(when, t->expires))
1525                mod_timer(t, round_jiffies(when));
1526
1527        local_irq_restore(flags);
1528}
1529
1530static void mce_timer_fn(struct timer_list *t)
1531{
1532        struct timer_list *cpu_t = this_cpu_ptr(&mce_timer);
1533        unsigned long iv;
1534
1535        WARN_ON(cpu_t != t);
1536
1537        iv = __this_cpu_read(mce_next_interval);
1538
1539        if (mce_available(this_cpu_ptr(&cpu_info))) {
1540                machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
1541
1542                if (mce_intel_cmci_poll()) {
1543                        iv = mce_adjust_timer(iv);
1544                        goto done;
1545                }
1546        }
1547
1548        /*
1549         * Alert userspace if needed. If we logged an MCE, reduce the polling
1550         * interval, otherwise increase the polling interval.
1551         */
1552        if (mce_notify_irq())
1553                iv = max(iv / 2, (unsigned long) HZ/100);
1554        else
1555                iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1556
1557done:
1558        __this_cpu_write(mce_next_interval, iv);
1559        __start_timer(t, iv);
1560}
1561
1562/*
1563 * Ensure that the timer is firing in @interval from now.
1564 */
1565void mce_timer_kick(unsigned long interval)
1566{
1567        struct timer_list *t = this_cpu_ptr(&mce_timer);
1568        unsigned long iv = __this_cpu_read(mce_next_interval);
1569
1570        __start_timer(t, interval);
1571
1572        if (interval < iv)
1573                __this_cpu_write(mce_next_interval, interval);
1574}
1575
1576/* Must not be called in IRQ context where del_timer_sync() can deadlock */
1577static void mce_timer_delete_all(void)
1578{
1579        int cpu;
1580
1581        for_each_online_cpu(cpu)
1582                del_timer_sync(&per_cpu(mce_timer, cpu));
1583}
1584
1585/*
1586 * Notify the user(s) about new machine check events.
1587 * Can be called from interrupt context, but not from machine check/NMI
1588 * context.
1589 */
1590int mce_notify_irq(void)
1591{
1592        /* Not more than two messages every minute */
1593        static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1594
1595        if (test_and_clear_bit(0, &mce_need_notify)) {
1596                mce_work_trigger();
1597
1598                if (__ratelimit(&ratelimit))
1599                        pr_info(HW_ERR "Machine check events logged\n");
1600
1601                return 1;
1602        }
1603        return 0;
1604}
1605EXPORT_SYMBOL_GPL(mce_notify_irq);
1606
1607static void __mcheck_cpu_mce_banks_init(void)
1608{
1609        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1610        u8 n_banks = this_cpu_read(mce_num_banks);
1611        int i;
1612
1613        for (i = 0; i < n_banks; i++) {
1614                struct mce_bank *b = &mce_banks[i];
1615
1616                /*
1617                 * Init them all, __mcheck_cpu_apply_quirks() is going to apply
1618                 * the required vendor quirks before
1619                 * __mcheck_cpu_init_clear_banks() does the final bank setup.
1620                 */
1621                b->ctl = -1ULL;
1622                b->init = true;
1623        }
1624}
1625
1626/*
1627 * Initialize Machine Checks for a CPU.
1628 */
1629static void __mcheck_cpu_cap_init(void)
1630{
1631        u64 cap;
1632        u8 b;
1633
1634        rdmsrl(MSR_IA32_MCG_CAP, cap);
1635
1636        b = cap & MCG_BANKCNT_MASK;
1637
1638        if (b > MAX_NR_BANKS) {
1639                pr_warn("CPU%d: Using only %u machine check banks out of %u\n",
1640                        smp_processor_id(), MAX_NR_BANKS, b);
1641                b = MAX_NR_BANKS;
1642        }
1643
1644        this_cpu_write(mce_num_banks, b);
1645
1646        __mcheck_cpu_mce_banks_init();
1647
1648        /* Use accurate RIP reporting if available. */
1649        if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1650                mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1651
1652        if (cap & MCG_SER_P)
1653                mca_cfg.ser = 1;
1654}
1655
1656static void __mcheck_cpu_init_generic(void)
1657{
1658        enum mcp_flags m_fl = 0;
1659        mce_banks_t all_banks;
1660        u64 cap;
1661
1662        if (!mca_cfg.bootlog)
1663                m_fl = MCP_DONTLOG;
1664
1665        /*
1666         * Log the machine checks left over from the previous reset. Log them
1667         * only, do not start processing them. That will happen in mcheck_late_init()
1668         * when all consumers have been registered on the notifier chain.
1669         */
1670        bitmap_fill(all_banks, MAX_NR_BANKS);
1671        machine_check_poll(MCP_UC | MCP_QUEUE_LOG | m_fl, &all_banks);
1672
1673        cr4_set_bits(X86_CR4_MCE);
1674
1675        rdmsrl(MSR_IA32_MCG_CAP, cap);
1676        if (cap & MCG_CTL_P)
1677                wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1678}
1679
1680static void __mcheck_cpu_init_clear_banks(void)
1681{
1682        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1683        int i;
1684
1685        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1686                struct mce_bank *b = &mce_banks[i];
1687
1688                if (!b->init)
1689                        continue;
1690                wrmsrl(msr_ops.ctl(i), b->ctl);
1691                wrmsrl(msr_ops.status(i), 0);
1692        }
1693}
1694
1695/*
1696 * Do a final check to see if there are any unused/RAZ banks.
1697 *
1698 * This must be done after the banks have been initialized and any quirks have
1699 * been applied.
1700 *
1701 * Do not call this from any user-initiated flows, e.g. CPU hotplug or sysfs.
1702 * Otherwise, a user who disables a bank will not be able to re-enable it
1703 * without a system reboot.
1704 */
1705static void __mcheck_cpu_check_banks(void)
1706{
1707        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1708        u64 msrval;
1709        int i;
1710
1711        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1712                struct mce_bank *b = &mce_banks[i];
1713
1714                if (!b->init)
1715                        continue;
1716
1717                rdmsrl(msr_ops.ctl(i), msrval);
1718                b->init = !!msrval;
1719        }
1720}
1721
1722/*
1723 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1724 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1725 * Vol 3B Table 15-20). But this confuses both the code that determines
1726 * whether the machine check occurred in kernel or user mode, and also
1727 * the severity assessment code. Pretend that EIPV was set, and take the
1728 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1729 */
1730static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1731{
1732        if (bank != 0)
1733                return;
1734        if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1735                return;
1736        if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1737                          MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1738                          MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1739                          MCACOD)) !=
1740                         (MCI_STATUS_UC|MCI_STATUS_EN|
1741                          MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1742                          MCI_STATUS_AR|MCACOD_INSTR))
1743                return;
1744
1745        m->mcgstatus |= MCG_STATUS_EIPV;
1746        m->ip = regs->ip;
1747        m->cs = regs->cs;
1748}
1749
1750/* Add per CPU specific workarounds here */
1751static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1752{
1753        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1754        struct mca_config *cfg = &mca_cfg;
1755
1756        if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1757                pr_info("unknown CPU type - not enabling MCE support\n");
1758                return -EOPNOTSUPP;
1759        }
1760
1761        /* This should be disabled by the BIOS, but isn't always */
1762        if (c->x86_vendor == X86_VENDOR_AMD) {
1763                if (c->x86 == 15 && this_cpu_read(mce_num_banks) > 4) {
1764                        /*
1765                         * disable GART TBL walk error reporting, which
1766                         * trips off incorrectly with the IOMMU & 3ware
1767                         * & Cerberus:
1768                         */
1769                        clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1770                }
1771                if (c->x86 < 0x11 && cfg->bootlog < 0) {
1772                        /*
1773                         * Lots of broken BIOS around that don't clear them
1774                         * by default and leave crap in there. Don't log:
1775                         */
1776                        cfg->bootlog = 0;
1777                }
1778                /*
1779                 * Various K7s with broken bank 0 around. Always disable
1780                 * by default.
1781                 */
1782                if (c->x86 == 6 && this_cpu_read(mce_num_banks) > 0)
1783                        mce_banks[0].ctl = 0;
1784
1785                /*
1786                 * overflow_recov is supported for F15h Models 00h-0fh
1787                 * even though we don't have a CPUID bit for it.
1788                 */
1789                if (c->x86 == 0x15 && c->x86_model <= 0xf)
1790                        mce_flags.overflow_recov = 1;
1791
1792        }
1793
1794        if (c->x86_vendor == X86_VENDOR_INTEL) {
1795                /*
1796                 * SDM documents that on family 6 bank 0 should not be written
1797                 * because it aliases to another special BIOS controlled
1798                 * register.
1799                 * But it's not aliased anymore on model 0x1a+
1800                 * Don't ignore bank 0 completely because there could be a
1801                 * valid event later, merely don't write CTL0.
1802                 */
1803
1804                if (c->x86 == 6 && c->x86_model < 0x1A && this_cpu_read(mce_num_banks) > 0)
1805                        mce_banks[0].init = false;
1806
1807                /*
1808                 * All newer Intel systems support MCE broadcasting. Enable
1809                 * synchronization with a one second timeout.
1810                 */
1811                if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1812                        cfg->monarch_timeout < 0)
1813                        cfg->monarch_timeout = USEC_PER_SEC;
1814
1815                /*
1816                 * There are also broken BIOSes on some Pentium M and
1817                 * earlier systems:
1818                 */
1819                if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1820                        cfg->bootlog = 0;
1821
1822                if (c->x86 == 6 && c->x86_model == 45)
1823                        quirk_no_way_out = quirk_sandybridge_ifu;
1824        }
1825
1826        if (c->x86_vendor == X86_VENDOR_ZHAOXIN) {
1827                /*
1828                 * All newer Zhaoxin CPUs support MCE broadcasting. Enable
1829                 * synchronization with a one second timeout.
1830                 */
1831                if (c->x86 > 6 || (c->x86_model == 0x19 || c->x86_model == 0x1f)) {
1832                        if (cfg->monarch_timeout < 0)
1833                                cfg->monarch_timeout = USEC_PER_SEC;
1834                }
1835        }
1836
1837        if (cfg->monarch_timeout < 0)
1838                cfg->monarch_timeout = 0;
1839        if (cfg->bootlog != 0)
1840                cfg->panic_timeout = 30;
1841
1842        return 0;
1843}
1844
1845static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1846{
1847        if (c->x86 != 5)
1848                return 0;
1849
1850        switch (c->x86_vendor) {
1851        case X86_VENDOR_INTEL:
1852                intel_p5_mcheck_init(c);
1853                return 1;
1854        case X86_VENDOR_CENTAUR:
1855                winchip_mcheck_init(c);
1856                return 1;
1857        default:
1858                return 0;
1859        }
1860
1861        return 0;
1862}
1863
1864/*
1865 * Init basic CPU features needed for early decoding of MCEs.
1866 */
1867static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
1868{
1869        if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) {
1870                mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
1871                mce_flags.succor         = !!cpu_has(c, X86_FEATURE_SUCCOR);
1872                mce_flags.smca           = !!cpu_has(c, X86_FEATURE_SMCA);
1873                mce_flags.amd_threshold  = 1;
1874
1875                if (mce_flags.smca) {
1876                        msr_ops.ctl     = smca_ctl_reg;
1877                        msr_ops.status  = smca_status_reg;
1878                        msr_ops.addr    = smca_addr_reg;
1879                        msr_ops.misc    = smca_misc_reg;
1880                }
1881        }
1882}
1883
1884static void mce_centaur_feature_init(struct cpuinfo_x86 *c)
1885{
1886        struct mca_config *cfg = &mca_cfg;
1887
1888         /*
1889          * All newer Centaur CPUs support MCE broadcasting. Enable
1890          * synchronization with a one second timeout.
1891          */
1892        if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) ||
1893             c->x86 > 6) {
1894                if (cfg->monarch_timeout < 0)
1895                        cfg->monarch_timeout = USEC_PER_SEC;
1896        }
1897}
1898
1899static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c)
1900{
1901        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1902
1903        /*
1904         * These CPUs have MCA bank 8 which reports only one error type called
1905         * SVAD (System View Address Decoder). The reporting of that error is
1906         * controlled by IA32_MC8.CTL.0.
1907         *
1908         * If enabled, prefetching on these CPUs will cause SVAD MCE when
1909         * virtual machines start and result in a system  panic. Always disable
1910         * bank 8 SVAD error by default.
1911         */
1912        if ((c->x86 == 7 && c->x86_model == 0x1b) ||
1913            (c->x86_model == 0x19 || c->x86_model == 0x1f)) {
1914                if (this_cpu_read(mce_num_banks) > 8)
1915                        mce_banks[8].ctl = 0;
1916        }
1917
1918        intel_init_cmci();
1919        intel_init_lmce();
1920        mce_adjust_timer = cmci_intel_adjust_timer;
1921}
1922
1923static void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c)
1924{
1925        intel_clear_lmce();
1926}
1927
1928static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1929{
1930        switch (c->x86_vendor) {
1931        case X86_VENDOR_INTEL:
1932                mce_intel_feature_init(c);
1933                mce_adjust_timer = cmci_intel_adjust_timer;
1934                break;
1935
1936        case X86_VENDOR_AMD: {
1937                mce_amd_feature_init(c);
1938                break;
1939                }
1940
1941        case X86_VENDOR_HYGON:
1942                mce_hygon_feature_init(c);
1943                break;
1944
1945        case X86_VENDOR_CENTAUR:
1946                mce_centaur_feature_init(c);
1947                break;
1948
1949        case X86_VENDOR_ZHAOXIN:
1950                mce_zhaoxin_feature_init(c);
1951                break;
1952
1953        default:
1954                break;
1955        }
1956}
1957
1958static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
1959{
1960        switch (c->x86_vendor) {
1961        case X86_VENDOR_INTEL:
1962                mce_intel_feature_clear(c);
1963                break;
1964
1965        case X86_VENDOR_ZHAOXIN:
1966                mce_zhaoxin_feature_clear(c);
1967                break;
1968
1969        default:
1970                break;
1971        }
1972}
1973
1974static void mce_start_timer(struct timer_list *t)
1975{
1976        unsigned long iv = check_interval * HZ;
1977
1978        if (mca_cfg.ignore_ce || !iv)
1979                return;
1980
1981        this_cpu_write(mce_next_interval, iv);
1982        __start_timer(t, iv);
1983}
1984
1985static void __mcheck_cpu_setup_timer(void)
1986{
1987        struct timer_list *t = this_cpu_ptr(&mce_timer);
1988
1989        timer_setup(t, mce_timer_fn, TIMER_PINNED);
1990}
1991
1992static void __mcheck_cpu_init_timer(void)
1993{
1994        struct timer_list *t = this_cpu_ptr(&mce_timer);
1995
1996        timer_setup(t, mce_timer_fn, TIMER_PINNED);
1997        mce_start_timer(t);
1998}
1999
2000bool filter_mce(struct mce *m)
2001{
2002        if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
2003                return amd_filter_mce(m);
2004        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2005                return intel_filter_mce(m);
2006
2007        return false;
2008}
2009
2010/* Handle unconfigured int18 (should never happen) */
2011static noinstr void unexpected_machine_check(struct pt_regs *regs)
2012{
2013        instrumentation_begin();
2014        pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
2015               smp_processor_id());
2016        instrumentation_end();
2017}
2018
2019/* Call the installed machine check handler for this CPU setup. */
2020void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;
2021
2022static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
2023{
2024        irqentry_state_t irq_state;
2025
2026        WARN_ON_ONCE(user_mode(regs));
2027
2028        /*
2029         * Only required when from kernel mode. See
2030         * mce_check_crashing_cpu() for details.
2031         */
2032        if (machine_check_vector == do_machine_check &&
2033            mce_check_crashing_cpu())
2034                return;
2035
2036        irq_state = irqentry_nmi_enter(regs);
2037        /*
2038         * The call targets are marked noinstr, but objtool can't figure
2039         * that out because it's an indirect call. Annotate it.
2040         */
2041        instrumentation_begin();
2042
2043        machine_check_vector(regs);
2044
2045        instrumentation_end();
2046        irqentry_nmi_exit(regs, irq_state);
2047}
2048
2049static __always_inline void exc_machine_check_user(struct pt_regs *regs)
2050{
2051        irqentry_enter_from_user_mode(regs);
2052        instrumentation_begin();
2053
2054        machine_check_vector(regs);
2055
2056        instrumentation_end();
2057        irqentry_exit_to_user_mode(regs);
2058}
2059
2060#ifdef CONFIG_X86_64
2061/* MCE hit kernel mode */
2062DEFINE_IDTENTRY_MCE(exc_machine_check)
2063{
2064        unsigned long dr7;
2065
2066        dr7 = local_db_save();
2067        exc_machine_check_kernel(regs);
2068        local_db_restore(dr7);
2069}
2070
2071/* The user mode variant. */
2072DEFINE_IDTENTRY_MCE_USER(exc_machine_check)
2073{
2074        unsigned long dr7;
2075
2076        dr7 = local_db_save();
2077        exc_machine_check_user(regs);
2078        local_db_restore(dr7);
2079}
2080#else
2081/* 32bit unified entry point */
2082DEFINE_IDTENTRY_RAW(exc_machine_check)
2083{
2084        unsigned long dr7;
2085
2086        dr7 = local_db_save();
2087        if (user_mode(regs))
2088                exc_machine_check_user(regs);
2089        else
2090                exc_machine_check_kernel(regs);
2091        local_db_restore(dr7);
2092}
2093#endif
2094
2095/*
2096 * Called for each booted CPU to set up machine checks.
2097 * Must be called with preempt off:
2098 */
2099void mcheck_cpu_init(struct cpuinfo_x86 *c)
2100{
2101        if (mca_cfg.disabled)
2102                return;
2103
2104        if (__mcheck_cpu_ancient_init(c))
2105                return;
2106
2107        if (!mce_available(c))
2108                return;
2109
2110        __mcheck_cpu_cap_init();
2111
2112        if (__mcheck_cpu_apply_quirks(c) < 0) {
2113                mca_cfg.disabled = 1;
2114                return;
2115        }
2116
2117        if (mce_gen_pool_init()) {
2118                mca_cfg.disabled = 1;
2119                pr_emerg("Couldn't allocate MCE records pool!\n");
2120                return;
2121        }
2122
2123        machine_check_vector = do_machine_check;
2124
2125        __mcheck_cpu_init_early(c);
2126        __mcheck_cpu_init_generic();
2127        __mcheck_cpu_init_vendor(c);
2128        __mcheck_cpu_init_clear_banks();
2129        __mcheck_cpu_check_banks();
2130        __mcheck_cpu_setup_timer();
2131}
2132
2133/*
2134 * Called for each booted CPU to clear some machine checks opt-ins
2135 */
2136void mcheck_cpu_clear(struct cpuinfo_x86 *c)
2137{
2138        if (mca_cfg.disabled)
2139                return;
2140
2141        if (!mce_available(c))
2142                return;
2143
2144        /*
2145         * Possibly to clear general settings generic to x86
2146         * __mcheck_cpu_clear_generic(c);
2147         */
2148        __mcheck_cpu_clear_vendor(c);
2149
2150}
2151
2152static void __mce_disable_bank(void *arg)
2153{
2154        int bank = *((int *)arg);
2155        __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
2156        cmci_disable_bank(bank);
2157}
2158
2159void mce_disable_bank(int bank)
2160{
2161        if (bank >= this_cpu_read(mce_num_banks)) {
2162                pr_warn(FW_BUG
2163                        "Ignoring request to disable invalid MCA bank %d.\n",
2164                        bank);
2165                return;
2166        }
2167        set_bit(bank, mce_banks_ce_disabled);
2168        on_each_cpu(__mce_disable_bank, &bank, 1);
2169}
2170
2171/*
2172 * mce=off Disables machine check
2173 * mce=no_cmci Disables CMCI
2174 * mce=no_lmce Disables LMCE
2175 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
2176 * mce=print_all Print all machine check logs to console
2177 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
2178 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
2179 *      monarchtimeout is how long to wait for other CPUs on machine
2180 *      check, or 0 to not wait
2181 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h
2182        and older.
2183 * mce=nobootlog Don't log MCEs from before booting.
2184 * mce=bios_cmci_threshold Don't program the CMCI threshold
2185 * mce=recovery force enable copy_mc_fragile()
2186 */
2187static int __init mcheck_enable(char *str)
2188{
2189        struct mca_config *cfg = &mca_cfg;
2190
2191        if (*str == 0) {
2192                enable_p5_mce();
2193                return 1;
2194        }
2195        if (*str == '=')
2196                str++;
2197        if (!strcmp(str, "off"))
2198                cfg->disabled = 1;
2199        else if (!strcmp(str, "no_cmci"))
2200                cfg->cmci_disabled = true;
2201        else if (!strcmp(str, "no_lmce"))
2202                cfg->lmce_disabled = 1;
2203        else if (!strcmp(str, "dont_log_ce"))
2204                cfg->dont_log_ce = true;
2205        else if (!strcmp(str, "print_all"))
2206                cfg->print_all = true;
2207        else if (!strcmp(str, "ignore_ce"))
2208                cfg->ignore_ce = true;
2209        else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
2210                cfg->bootlog = (str[0] == 'b');
2211        else if (!strcmp(str, "bios_cmci_threshold"))
2212                cfg->bios_cmci_threshold = 1;
2213        else if (!strcmp(str, "recovery"))
2214                cfg->recovery = 1;
2215        else if (isdigit(str[0])) {
2216                if (get_option(&str, &cfg->tolerant) == 2)
2217                        get_option(&str, &(cfg->monarch_timeout));
2218        } else {
2219                pr_info("mce argument %s ignored. Please use /sys\n", str);
2220                return 0;
2221        }
2222        return 1;
2223}
2224__setup("mce", mcheck_enable);
2225
2226int __init mcheck_init(void)
2227{
2228        mce_register_decode_chain(&early_nb);
2229        mce_register_decode_chain(&mce_uc_nb);
2230        mce_register_decode_chain(&mce_default_nb);
2231        mcheck_vendor_init_severity();
2232
2233        INIT_WORK(&mce_work, mce_gen_pool_process);
2234        init_irq_work(&mce_irq_work, mce_irq_work_cb);
2235
2236        return 0;
2237}
2238
2239/*
2240 * mce_syscore: PM support
2241 */
2242
2243/*
2244 * Disable machine checks on suspend and shutdown. We can't really handle
2245 * them later.
2246 */
2247static void mce_disable_error_reporting(void)
2248{
2249        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2250        int i;
2251
2252        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
2253                struct mce_bank *b = &mce_banks[i];
2254
2255                if (b->init)
2256                        wrmsrl(msr_ops.ctl(i), 0);
2257        }
2258        return;
2259}
2260
2261static void vendor_disable_error_reporting(void)
2262{
2263        /*
2264         * Don't clear on Intel or AMD or Hygon or Zhaoxin CPUs. Some of these
2265         * MSRs are socket-wide. Disabling them for just a single offlined CPU
2266         * is bad, since it will inhibit reporting for all shared resources on
2267         * the socket like the last level cache (LLC), the integrated memory
2268         * controller (iMC), etc.
2269         */
2270        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
2271            boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||
2272            boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
2273            boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN)
2274                return;
2275
2276        mce_disable_error_reporting();
2277}
2278
2279static int mce_syscore_suspend(void)
2280{
2281        vendor_disable_error_reporting();
2282        return 0;
2283}
2284
2285static void mce_syscore_shutdown(void)
2286{
2287        vendor_disable_error_reporting();
2288}
2289
2290/*
2291 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2292 * Only one CPU is active at this time, the others get re-added later using
2293 * CPU hotplug:
2294 */
2295static void mce_syscore_resume(void)
2296{
2297        __mcheck_cpu_init_generic();
2298        __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2299        __mcheck_cpu_init_clear_banks();
2300}
2301
2302static struct syscore_ops mce_syscore_ops = {
2303        .suspend        = mce_syscore_suspend,
2304        .shutdown       = mce_syscore_shutdown,
2305        .resume         = mce_syscore_resume,
2306};
2307
2308/*
2309 * mce_device: Sysfs support
2310 */
2311
2312static void mce_cpu_restart(void *data)
2313{
2314        if (!mce_available(raw_cpu_ptr(&cpu_info)))
2315                return;
2316        __mcheck_cpu_init_generic();
2317        __mcheck_cpu_init_clear_banks();
2318        __mcheck_cpu_init_timer();
2319}
2320
2321/* Reinit MCEs after user configuration changes */
2322static void mce_restart(void)
2323{
2324        mce_timer_delete_all();
2325        on_each_cpu(mce_cpu_restart, NULL, 1);
2326}
2327
2328/* Toggle features for corrected errors */
2329static void mce_disable_cmci(void *data)
2330{
2331        if (!mce_available(raw_cpu_ptr(&cpu_info)))
2332                return;
2333        cmci_clear();
2334}
2335
2336static void mce_enable_ce(void *all)
2337{
2338        if (!mce_available(raw_cpu_ptr(&cpu_info)))
2339                return;
2340        cmci_reenable();
2341        cmci_recheck();
2342        if (all)
2343                __mcheck_cpu_init_timer();
2344}
2345
2346static struct bus_type mce_subsys = {
2347        .name           = "machinecheck",
2348        .dev_name       = "machinecheck",
2349};
2350
2351DEFINE_PER_CPU(struct device *, mce_device);
2352
2353static inline struct mce_bank_dev *attr_to_bank(struct device_attribute *attr)
2354{
2355        return container_of(attr, struct mce_bank_dev, attr);
2356}
2357
2358static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2359                         char *buf)
2360{
2361        u8 bank = attr_to_bank(attr)->bank;
2362        struct mce_bank *b;
2363
2364        if (bank >= per_cpu(mce_num_banks, s->id))
2365                return -EINVAL;
2366
2367        b = &per_cpu(mce_banks_array, s->id)[bank];
2368
2369        if (!b->init)
2370                return -ENODEV;
2371
2372        return sprintf(buf, "%llx\n", b->ctl);
2373}
2374
2375static ssize_t set_bank(struct device *s, struct device_attribute *attr,
2376                        const char *buf, size_t size)
2377{
2378        u8 bank = attr_to_bank(attr)->bank;
2379        struct mce_bank *b;
2380        u64 new;
2381
2382        if (kstrtou64(buf, 0, &new) < 0)
2383                return -EINVAL;
2384
2385        if (bank >= per_cpu(mce_num_banks, s->id))
2386                return -EINVAL;
2387
2388        b = &per_cpu(mce_banks_array, s->id)[bank];
2389
2390        if (!b->init)
2391                return -ENODEV;
2392
2393        b->ctl = new;
2394        mce_restart();
2395
2396        return size;
2397}
2398
2399static ssize_t set_ignore_ce(struct device *s,
2400                             struct device_attribute *attr,
2401                             const char *buf, size_t size)
2402{
2403        u64 new;
2404
2405        if (kstrtou64(buf, 0, &new) < 0)
2406                return -EINVAL;
2407
2408        mutex_lock(&mce_sysfs_mutex);
2409        if (mca_cfg.ignore_ce ^ !!new) {
2410                if (new) {
2411                        /* disable ce features */
2412                        mce_timer_delete_all();
2413                        on_each_cpu(mce_disable_cmci, NULL, 1);
2414                        mca_cfg.ignore_ce = true;
2415                } else {
2416                        /* enable ce features */
2417                        mca_cfg.ignore_ce = false;
2418                        on_each_cpu(mce_enable_ce, (void *)1, 1);
2419                }
2420        }
2421        mutex_unlock(&mce_sysfs_mutex);
2422
2423        return size;
2424}
2425
2426static ssize_t set_cmci_disabled(struct device *s,
2427                                 struct device_attribute *attr,
2428                                 const char *buf, size_t size)
2429{
2430        u64 new;
2431
2432        if (kstrtou64(buf, 0, &new) < 0)
2433                return -EINVAL;
2434
2435        mutex_lock(&mce_sysfs_mutex);
2436        if (mca_cfg.cmci_disabled ^ !!new) {
2437                if (new) {
2438                        /* disable cmci */
2439                        on_each_cpu(mce_disable_cmci, NULL, 1);
2440                        mca_cfg.cmci_disabled = true;
2441                } else {
2442                        /* enable cmci */
2443                        mca_cfg.cmci_disabled = false;
2444                        on_each_cpu(mce_enable_ce, NULL, 1);
2445                }
2446        }
2447        mutex_unlock(&mce_sysfs_mutex);
2448
2449        return size;
2450}
2451
2452static ssize_t store_int_with_restart(struct device *s,
2453                                      struct device_attribute *attr,
2454                                      const char *buf, size_t size)
2455{
2456        unsigned long old_check_interval = check_interval;
2457        ssize_t ret = device_store_ulong(s, attr, buf, size);
2458
2459        if (check_interval == old_check_interval)
2460                return ret;
2461
2462        mutex_lock(&mce_sysfs_mutex);
2463        mce_restart();
2464        mutex_unlock(&mce_sysfs_mutex);
2465
2466        return ret;
2467}
2468
2469static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2470static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2471static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
2472static DEVICE_BOOL_ATTR(print_all, 0644, mca_cfg.print_all);
2473
2474static struct dev_ext_attribute dev_attr_check_interval = {
2475        __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2476        &check_interval
2477};
2478
2479static struct dev_ext_attribute dev_attr_ignore_ce = {
2480        __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2481        &mca_cfg.ignore_ce
2482};
2483
2484static struct dev_ext_attribute dev_attr_cmci_disabled = {
2485        __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2486        &mca_cfg.cmci_disabled
2487};
2488
2489static struct device_attribute *mce_device_attrs[] = {
2490        &dev_attr_tolerant.attr,
2491        &dev_attr_check_interval.attr,
2492#ifdef CONFIG_X86_MCELOG_LEGACY
2493        &dev_attr_trigger,
2494#endif
2495        &dev_attr_monarch_timeout.attr,
2496        &dev_attr_dont_log_ce.attr,
2497        &dev_attr_print_all.attr,
2498        &dev_attr_ignore_ce.attr,
2499        &dev_attr_cmci_disabled.attr,
2500        NULL
2501};
2502
2503static cpumask_var_t mce_device_initialized;
2504
2505static void mce_device_release(struct device *dev)
2506{
2507        kfree(dev);
2508}
2509
2510/* Per CPU device init. All of the CPUs still share the same bank device: */
2511static int mce_device_create(unsigned int cpu)
2512{
2513        struct device *dev;
2514        int err;
2515        int i, j;
2516
2517        if (!mce_available(&boot_cpu_data))
2518                return -EIO;
2519
2520        dev = per_cpu(mce_device, cpu);
2521        if (dev)
2522                return 0;
2523
2524        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2525        if (!dev)
2526                return -ENOMEM;
2527        dev->id  = cpu;
2528        dev->bus = &mce_subsys;
2529        dev->release = &mce_device_release;
2530
2531        err = device_register(dev);
2532        if (err) {
2533                put_device(dev);
2534                return err;
2535        }
2536
2537        for (i = 0; mce_device_attrs[i]; i++) {
2538                err = device_create_file(dev, mce_device_attrs[i]);
2539                if (err)
2540                        goto error;
2541        }
2542        for (j = 0; j < per_cpu(mce_num_banks, cpu); j++) {
2543                err = device_create_file(dev, &mce_bank_devs[j].attr);
2544                if (err)
2545                        goto error2;
2546        }
2547        cpumask_set_cpu(cpu, mce_device_initialized);
2548        per_cpu(mce_device, cpu) = dev;
2549
2550        return 0;
2551error2:
2552        while (--j >= 0)
2553                device_remove_file(dev, &mce_bank_devs[j].attr);
2554error:
2555        while (--i >= 0)
2556                device_remove_file(dev, mce_device_attrs[i]);
2557
2558        device_unregister(dev);
2559
2560        return err;
2561}
2562
2563static void mce_device_remove(unsigned int cpu)
2564{
2565        struct device *dev = per_cpu(mce_device, cpu);
2566        int i;
2567
2568        if (!cpumask_test_cpu(cpu, mce_device_initialized))
2569                return;
2570
2571        for (i = 0; mce_device_attrs[i]; i++)
2572                device_remove_file(dev, mce_device_attrs[i]);
2573
2574        for (i = 0; i < per_cpu(mce_num_banks, cpu); i++)
2575                device_remove_file(dev, &mce_bank_devs[i].attr);
2576
2577        device_unregister(dev);
2578        cpumask_clear_cpu(cpu, mce_device_initialized);
2579        per_cpu(mce_device, cpu) = NULL;
2580}
2581
2582/* Make sure there are no machine checks on offlined CPUs. */
2583static void mce_disable_cpu(void)
2584{
2585        if (!mce_available(raw_cpu_ptr(&cpu_info)))
2586                return;
2587
2588        if (!cpuhp_tasks_frozen)
2589                cmci_clear();
2590
2591        vendor_disable_error_reporting();
2592}
2593
2594static void mce_reenable_cpu(void)
2595{
2596        struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2597        int i;
2598
2599        if (!mce_available(raw_cpu_ptr(&cpu_info)))
2600                return;
2601
2602        if (!cpuhp_tasks_frozen)
2603                cmci_reenable();
2604        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
2605                struct mce_bank *b = &mce_banks[i];
2606
2607                if (b->init)
2608                        wrmsrl(msr_ops.ctl(i), b->ctl);
2609        }
2610}
2611
2612static int mce_cpu_dead(unsigned int cpu)
2613{
2614        mce_intel_hcpu_update(cpu);
2615
2616        /* intentionally ignoring frozen here */
2617        if (!cpuhp_tasks_frozen)
2618                cmci_rediscover();
2619        return 0;
2620}
2621
2622static int mce_cpu_online(unsigned int cpu)
2623{
2624        struct timer_list *t = this_cpu_ptr(&mce_timer);
2625        int ret;
2626
2627        mce_device_create(cpu);
2628
2629        ret = mce_threshold_create_device(cpu);
2630        if (ret) {
2631                mce_device_remove(cpu);
2632                return ret;
2633        }
2634        mce_reenable_cpu();
2635        mce_start_timer(t);
2636        return 0;
2637}
2638
2639static int mce_cpu_pre_down(unsigned int cpu)
2640{
2641        struct timer_list *t = this_cpu_ptr(&mce_timer);
2642
2643        mce_disable_cpu();
2644        del_timer_sync(t);
2645        mce_threshold_remove_device(cpu);
2646        mce_device_remove(cpu);
2647        return 0;
2648}
2649
2650static __init void mce_init_banks(void)
2651{
2652        int i;
2653
2654        for (i = 0; i < MAX_NR_BANKS; i++) {
2655                struct mce_bank_dev *b = &mce_bank_devs[i];
2656                struct device_attribute *a = &b->attr;
2657
2658                b->bank = i;
2659
2660                sysfs_attr_init(&a->attr);
2661                a->attr.name    = b->attrname;
2662                snprintf(b->attrname, ATTR_LEN, "bank%d", i);
2663
2664                a->attr.mode    = 0644;
2665                a->show         = show_bank;
2666                a->store        = set_bank;
2667        }
2668}
2669
2670/*
2671 * When running on XEN, this initcall is ordered against the XEN mcelog
2672 * initcall:
2673 *
2674 *   device_initcall(xen_late_init_mcelog);
2675 *   device_initcall_sync(mcheck_init_device);
2676 */
2677static __init int mcheck_init_device(void)
2678{
2679        int err;
2680
2681        /*
2682         * Check if we have a spare virtual bit. This will only become
2683         * a problem if/when we move beyond 5-level page tables.
2684         */
2685        MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63);
2686
2687        if (!mce_available(&boot_cpu_data)) {
2688                err = -EIO;
2689                goto err_out;
2690        }
2691
2692        if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2693                err = -ENOMEM;
2694                goto err_out;
2695        }
2696
2697        mce_init_banks();
2698
2699        err = subsys_system_register(&mce_subsys, NULL);
2700        if (err)
2701                goto err_out_mem;
2702
2703        err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL,
2704                                mce_cpu_dead);
2705        if (err)
2706                goto err_out_mem;
2707
2708        /*
2709         * Invokes mce_cpu_online() on all CPUs which are online when
2710         * the state is installed.
2711         */
2712        err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
2713                                mce_cpu_online, mce_cpu_pre_down);
2714        if (err < 0)
2715                goto err_out_online;
2716
2717        register_syscore_ops(&mce_syscore_ops);
2718
2719        return 0;
2720
2721err_out_online:
2722        cpuhp_remove_state(CPUHP_X86_MCE_DEAD);
2723
2724err_out_mem:
2725        free_cpumask_var(mce_device_initialized);
2726
2727err_out:
2728        pr_err("Unable to init MCE device (rc: %d)\n", err);
2729
2730        return err;
2731}
2732device_initcall_sync(mcheck_init_device);
2733
2734/*
2735 * Old style boot options parsing. Only for compatibility.
2736 */
2737static int __init mcheck_disable(char *str)
2738{
2739        mca_cfg.disabled = 1;
2740        return 1;
2741}
2742__setup("nomce", mcheck_disable);
2743
2744#ifdef CONFIG_DEBUG_FS
2745struct dentry *mce_get_debugfs_dir(void)
2746{
2747        static struct dentry *dmce;
2748
2749        if (!dmce)
2750                dmce = debugfs_create_dir("mce", NULL);
2751
2752        return dmce;
2753}
2754
2755static void mce_reset(void)
2756{
2757        cpu_missing = 0;
2758        atomic_set(&mce_fake_panicked, 0);
2759        atomic_set(&mce_executing, 0);
2760        atomic_set(&mce_callin, 0);
2761        atomic_set(&global_nwo, 0);
2762        cpumask_setall(&mce_missing_cpus);
2763}
2764
2765static int fake_panic_get(void *data, u64 *val)
2766{
2767        *val = fake_panic;
2768        return 0;
2769}
2770
2771static int fake_panic_set(void *data, u64 val)
2772{
2773        mce_reset();
2774        fake_panic = val;
2775        return 0;
2776}
2777
2778DEFINE_DEBUGFS_ATTRIBUTE(fake_panic_fops, fake_panic_get, fake_panic_set,
2779                         "%llu\n");
2780
2781static void __init mcheck_debugfs_init(void)
2782{
2783        struct dentry *dmce;
2784
2785        dmce = mce_get_debugfs_dir();
2786        debugfs_create_file_unsafe("fake_panic", 0444, dmce, NULL,
2787                                   &fake_panic_fops);
2788}
2789#else
2790static void __init mcheck_debugfs_init(void) { }
2791#endif
2792
2793static int __init mcheck_late_init(void)
2794{
2795        if (mca_cfg.recovery)
2796                enable_copy_mc_fragile();
2797
2798        mcheck_debugfs_init();
2799
2800        /*
2801         * Flush out everything that has been logged during early boot, now that
2802         * everything has been initialized (workqueues, decoders, ...).
2803         */
2804        mce_schedule_work();
2805
2806        return 0;
2807}
2808late_initcall(mcheck_late_init);
2809