linux/arch/x86/kernel/quirks.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * This file contains work-arounds for x86 and x86_64 platform bugs.
   4 */
   5#include <linux/dmi.h>
   6#include <linux/pci.h>
   7#include <linux/irq.h>
   8
   9#include <asm/hpet.h>
  10
  11#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
  12
  13static void quirk_intel_irqbalance(struct pci_dev *dev)
  14{
  15        u8 config;
  16        u16 word;
  17
  18        /* BIOS may enable hardware IRQ balancing for
  19         * E7520/E7320/E7525(revision ID 0x9 and below)
  20         * based platforms.
  21         * Disable SW irqbalance/affinity on those platforms.
  22         */
  23        if (dev->revision > 0x9)
  24                return;
  25
  26        /* enable access to config space*/
  27        pci_read_config_byte(dev, 0xf4, &config);
  28        pci_write_config_byte(dev, 0xf4, config|0x2);
  29
  30        /*
  31         * read xTPR register.  We may not have a pci_dev for device 8
  32         * because it might be hidden until the above write.
  33         */
  34        pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word);
  35
  36        if (!(word & (1 << 13))) {
  37                dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
  38                        "disabling irq balancing and affinity\n");
  39                noirqdebug_setup("");
  40#ifdef CONFIG_PROC_FS
  41                no_irq_affinity = 1;
  42#endif
  43        }
  44
  45        /* put back the original value for config space*/
  46        if (!(config & 0x2))
  47                pci_write_config_byte(dev, 0xf4, config);
  48}
  49DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
  50                        quirk_intel_irqbalance);
  51DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
  52                        quirk_intel_irqbalance);
  53DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
  54                        quirk_intel_irqbalance);
  55#endif
  56
  57#if defined(CONFIG_HPET_TIMER)
  58unsigned long force_hpet_address;
  59
  60static enum {
  61        NONE_FORCE_HPET_RESUME,
  62        OLD_ICH_FORCE_HPET_RESUME,
  63        ICH_FORCE_HPET_RESUME,
  64        VT8237_FORCE_HPET_RESUME,
  65        NVIDIA_FORCE_HPET_RESUME,
  66        ATI_FORCE_HPET_RESUME,
  67} force_hpet_resume_type;
  68
  69static void __iomem *rcba_base;
  70
  71static void ich_force_hpet_resume(void)
  72{
  73        u32 val;
  74
  75        if (!force_hpet_address)
  76                return;
  77
  78        BUG_ON(rcba_base == NULL);
  79
  80        /* read the Function Disable register, dword mode only */
  81        val = readl(rcba_base + 0x3404);
  82        if (!(val & 0x80)) {
  83                /* HPET disabled in HPTC. Trying to enable */
  84                writel(val | 0x80, rcba_base + 0x3404);
  85        }
  86
  87        val = readl(rcba_base + 0x3404);
  88        if (!(val & 0x80))
  89                BUG();
  90        else
  91                printk(KERN_DEBUG "Force enabled HPET at resume\n");
  92
  93        return;
  94}
  95
  96static void ich_force_enable_hpet(struct pci_dev *dev)
  97{
  98        u32 val;
  99        u32 uninitialized_var(rcba);
 100        int err = 0;
 101
 102        if (hpet_address || force_hpet_address)
 103                return;
 104
 105        pci_read_config_dword(dev, 0xF0, &rcba);
 106        rcba &= 0xFFFFC000;
 107        if (rcba == 0) {
 108                dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; "
 109                        "cannot force enable HPET\n");
 110                return;
 111        }
 112
 113        /* use bits 31:14, 16 kB aligned */
 114        rcba_base = ioremap_nocache(rcba, 0x4000);
 115        if (rcba_base == NULL) {
 116                dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
 117                        "cannot force enable HPET\n");
 118                return;
 119        }
 120
 121        /* read the Function Disable register, dword mode only */
 122        val = readl(rcba_base + 0x3404);
 123
 124        if (val & 0x80) {
 125                /* HPET is enabled in HPTC. Just not reported by BIOS */
 126                val = val & 0x3;
 127                force_hpet_address = 0xFED00000 | (val << 12);
 128                dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
 129                        "0x%lx\n", force_hpet_address);
 130                iounmap(rcba_base);
 131                return;
 132        }
 133
 134        /* HPET disabled in HPTC. Trying to enable */
 135        writel(val | 0x80, rcba_base + 0x3404);
 136
 137        val = readl(rcba_base + 0x3404);
 138        if (!(val & 0x80)) {
 139                err = 1;
 140        } else {
 141                val = val & 0x3;
 142                force_hpet_address = 0xFED00000 | (val << 12);
 143        }
 144
 145        if (err) {
 146                force_hpet_address = 0;
 147                iounmap(rcba_base);
 148                dev_printk(KERN_DEBUG, &dev->dev,
 149                        "Failed to force enable HPET\n");
 150        } else {
 151                force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
 152                dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
 153                        "0x%lx\n", force_hpet_address);
 154        }
 155}
 156
 157DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
 158                         ich_force_enable_hpet);
 159DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0,
 160                         ich_force_enable_hpet);
 161DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
 162                         ich_force_enable_hpet);
 163DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
 164                         ich_force_enable_hpet);
 165DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
 166                         ich_force_enable_hpet);
 167DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
 168                         ich_force_enable_hpet);
 169DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
 170                         ich_force_enable_hpet);
 171DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
 172                         ich_force_enable_hpet);
 173DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
 174                         ich_force_enable_hpet);
 175DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16,   /* ICH10 */
 176                         ich_force_enable_hpet);
 177
 178static struct pci_dev *cached_dev;
 179
 180static void hpet_print_force_info(void)
 181{
 182        printk(KERN_INFO "HPET not enabled in BIOS. "
 183               "You might try hpet=force boot option\n");
 184}
 185
 186static void old_ich_force_hpet_resume(void)
 187{
 188        u32 val;
 189        u32 uninitialized_var(gen_cntl);
 190
 191        if (!force_hpet_address || !cached_dev)
 192                return;
 193
 194        pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
 195        gen_cntl &= (~(0x7 << 15));
 196        gen_cntl |= (0x4 << 15);
 197
 198        pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
 199        pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
 200        val = gen_cntl >> 15;
 201        val &= 0x7;
 202        if (val == 0x4)
 203                printk(KERN_DEBUG "Force enabled HPET at resume\n");
 204        else
 205                BUG();
 206}
 207
 208static void old_ich_force_enable_hpet(struct pci_dev *dev)
 209{
 210        u32 val;
 211        u32 uninitialized_var(gen_cntl);
 212
 213        if (hpet_address || force_hpet_address)
 214                return;
 215
 216        pci_read_config_dword(dev, 0xD0, &gen_cntl);
 217        /*
 218         * Bit 17 is HPET enable bit.
 219         * Bit 16:15 control the HPET base address.
 220         */
 221        val = gen_cntl >> 15;
 222        val &= 0x7;
 223        if (val & 0x4) {
 224                val &= 0x3;
 225                force_hpet_address = 0xFED00000 | (val << 12);
 226                dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
 227                        force_hpet_address);
 228                return;
 229        }
 230
 231        /*
 232         * HPET is disabled. Trying enabling at FED00000 and check
 233         * whether it sticks
 234         */
 235        gen_cntl &= (~(0x7 << 15));
 236        gen_cntl |= (0x4 << 15);
 237        pci_write_config_dword(dev, 0xD0, gen_cntl);
 238
 239        pci_read_config_dword(dev, 0xD0, &gen_cntl);
 240
 241        val = gen_cntl >> 15;
 242        val &= 0x7;
 243        if (val & 0x4) {
 244                /* HPET is enabled in HPTC. Just not reported by BIOS */
 245                val &= 0x3;
 246                force_hpet_address = 0xFED00000 | (val << 12);
 247                dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
 248                        "0x%lx\n", force_hpet_address);
 249                cached_dev = dev;
 250                force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
 251                return;
 252        }
 253
 254        dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
 255}
 256
 257/*
 258 * Undocumented chipset features. Make sure that the user enforced
 259 * this.
 260 */
 261static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
 262{
 263        if (hpet_force_user)
 264                old_ich_force_enable_hpet(dev);
 265}
 266
 267DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
 268                         old_ich_force_enable_hpet_user);
 269DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
 270                         old_ich_force_enable_hpet_user);
 271DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
 272                         old_ich_force_enable_hpet_user);
 273DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
 274                         old_ich_force_enable_hpet_user);
 275DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
 276                         old_ich_force_enable_hpet_user);
 277DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
 278                         old_ich_force_enable_hpet);
 279DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
 280                         old_ich_force_enable_hpet);
 281
 282
 283static void vt8237_force_hpet_resume(void)
 284{
 285        u32 val;
 286
 287        if (!force_hpet_address || !cached_dev)
 288                return;
 289
 290        val = 0xfed00000 | 0x80;
 291        pci_write_config_dword(cached_dev, 0x68, val);
 292
 293        pci_read_config_dword(cached_dev, 0x68, &val);
 294        if (val & 0x80)
 295                printk(KERN_DEBUG "Force enabled HPET at resume\n");
 296        else
 297                BUG();
 298}
 299
 300static void vt8237_force_enable_hpet(struct pci_dev *dev)
 301{
 302        u32 uninitialized_var(val);
 303
 304        if (hpet_address || force_hpet_address)
 305                return;
 306
 307        if (!hpet_force_user) {
 308                hpet_print_force_info();
 309                return;
 310        }
 311
 312        pci_read_config_dword(dev, 0x68, &val);
 313        /*
 314         * Bit 7 is HPET enable bit.
 315         * Bit 31:10 is HPET base address (contrary to what datasheet claims)
 316         */
 317        if (val & 0x80) {
 318                force_hpet_address = (val & ~0x3ff);
 319                dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
 320                        force_hpet_address);
 321                return;
 322        }
 323
 324        /*
 325         * HPET is disabled. Trying enabling at FED00000 and check
 326         * whether it sticks
 327         */
 328        val = 0xfed00000 | 0x80;
 329        pci_write_config_dword(dev, 0x68, val);
 330
 331        pci_read_config_dword(dev, 0x68, &val);
 332        if (val & 0x80) {
 333                force_hpet_address = (val & ~0x3ff);
 334                dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
 335                        "0x%lx\n", force_hpet_address);
 336                cached_dev = dev;
 337                force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
 338                return;
 339        }
 340
 341        dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
 342}
 343
 344DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
 345                         vt8237_force_enable_hpet);
 346DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
 347                         vt8237_force_enable_hpet);
 348DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700,
 349                         vt8237_force_enable_hpet);
 350
 351static void ati_force_hpet_resume(void)
 352{
 353        pci_write_config_dword(cached_dev, 0x14, 0xfed00000);
 354        printk(KERN_DEBUG "Force enabled HPET at resume\n");
 355}
 356
 357static u32 ati_ixp4x0_rev(struct pci_dev *dev)
 358{
 359        int err = 0;
 360        u32 d = 0;
 361        u8  b = 0;
 362
 363        err = pci_read_config_byte(dev, 0xac, &b);
 364        b &= ~(1<<5);
 365        err |= pci_write_config_byte(dev, 0xac, b);
 366        err |= pci_read_config_dword(dev, 0x70, &d);
 367        d |= 1<<8;
 368        err |= pci_write_config_dword(dev, 0x70, d);
 369        err |= pci_read_config_dword(dev, 0x8, &d);
 370        d &= 0xff;
 371        dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
 372
 373        WARN_ON_ONCE(err);
 374
 375        return d;
 376}
 377
 378static void ati_force_enable_hpet(struct pci_dev *dev)
 379{
 380        u32 d, val;
 381        u8  b;
 382
 383        if (hpet_address || force_hpet_address)
 384                return;
 385
 386        if (!hpet_force_user) {
 387                hpet_print_force_info();
 388                return;
 389        }
 390
 391        d = ati_ixp4x0_rev(dev);
 392        if (d  < 0x82)
 393                return;
 394
 395        /* base address */
 396        pci_write_config_dword(dev, 0x14, 0xfed00000);
 397        pci_read_config_dword(dev, 0x14, &val);
 398
 399        /* enable interrupt */
 400        outb(0x72, 0xcd6); b = inb(0xcd7);
 401        b |= 0x1;
 402        outb(0x72, 0xcd6); outb(b, 0xcd7);
 403        outb(0x72, 0xcd6); b = inb(0xcd7);
 404        if (!(b & 0x1))
 405                return;
 406        pci_read_config_dword(dev, 0x64, &d);
 407        d |= (1<<10);
 408        pci_write_config_dword(dev, 0x64, d);
 409        pci_read_config_dword(dev, 0x64, &d);
 410        if (!(d & (1<<10)))
 411                return;
 412
 413        force_hpet_address = val;
 414        force_hpet_resume_type = ATI_FORCE_HPET_RESUME;
 415        dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
 416                   force_hpet_address);
 417        cached_dev = dev;
 418}
 419DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
 420                         ati_force_enable_hpet);
 421
 422/*
 423 * Undocumented chipset feature taken from LinuxBIOS.
 424 */
 425static void nvidia_force_hpet_resume(void)
 426{
 427        pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
 428        printk(KERN_DEBUG "Force enabled HPET at resume\n");
 429}
 430
 431static void nvidia_force_enable_hpet(struct pci_dev *dev)
 432{
 433        u32 uninitialized_var(val);
 434
 435        if (hpet_address || force_hpet_address)
 436                return;
 437
 438        if (!hpet_force_user) {
 439                hpet_print_force_info();
 440                return;
 441        }
 442
 443        pci_write_config_dword(dev, 0x44, 0xfed00001);
 444        pci_read_config_dword(dev, 0x44, &val);
 445        force_hpet_address = val & 0xfffffffe;
 446        force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
 447        dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
 448                force_hpet_address);
 449        cached_dev = dev;
 450        return;
 451}
 452
 453/* ISA Bridges */
 454DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
 455                        nvidia_force_enable_hpet);
 456DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
 457                        nvidia_force_enable_hpet);
 458
 459/* LPC bridges */
 460DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
 461                        nvidia_force_enable_hpet);
 462DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
 463                        nvidia_force_enable_hpet);
 464DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
 465                        nvidia_force_enable_hpet);
 466DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
 467                        nvidia_force_enable_hpet);
 468DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
 469                        nvidia_force_enable_hpet);
 470DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
 471                        nvidia_force_enable_hpet);
 472DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
 473                        nvidia_force_enable_hpet);
 474DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
 475                        nvidia_force_enable_hpet);
 476DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
 477                        nvidia_force_enable_hpet);
 478
 479void force_hpet_resume(void)
 480{
 481        switch (force_hpet_resume_type) {
 482        case ICH_FORCE_HPET_RESUME:
 483                ich_force_hpet_resume();
 484                return;
 485        case OLD_ICH_FORCE_HPET_RESUME:
 486                old_ich_force_hpet_resume();
 487                return;
 488        case VT8237_FORCE_HPET_RESUME:
 489                vt8237_force_hpet_resume();
 490                return;
 491        case NVIDIA_FORCE_HPET_RESUME:
 492                nvidia_force_hpet_resume();
 493                return;
 494        case ATI_FORCE_HPET_RESUME:
 495                ati_force_hpet_resume();
 496                return;
 497        default:
 498                break;
 499        }
 500}
 501
 502/*
 503 * According to the datasheet e6xx systems have the HPET hardwired to
 504 * 0xfed00000
 505 */
 506static void e6xx_force_enable_hpet(struct pci_dev *dev)
 507{
 508        if (hpet_address || force_hpet_address)
 509                return;
 510
 511        force_hpet_address = 0xFED00000;
 512        force_hpet_resume_type = NONE_FORCE_HPET_RESUME;
 513        dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
 514                "0x%lx\n", force_hpet_address);
 515        return;
 516}
 517DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU,
 518                         e6xx_force_enable_hpet);
 519
 520/*
 521 * HPET MSI on some boards (ATI SB700/SB800) has side effect on
 522 * floppy DMA. Disable HPET MSI on such platforms.
 523 * See erratum #27 (Misinterpreted MSI Requests May Result in
 524 * Corrupted LPC DMA Data) in AMD Publication #46837,
 525 * "SB700 Family Product Errata", Rev. 1.0, March 2010.
 526 */
 527static void force_disable_hpet_msi(struct pci_dev *unused)
 528{
 529        hpet_msi_disable = true;
 530}
 531
 532DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
 533                         force_disable_hpet_msi);
 534
 535#endif
 536
 537#if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
 538/* Set correct numa_node information for AMD NB functions */
 539static void quirk_amd_nb_node(struct pci_dev *dev)
 540{
 541        struct pci_dev *nb_ht;
 542        unsigned int devfn;
 543        u32 node;
 544        u32 val;
 545
 546        devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
 547        nb_ht = pci_get_slot(dev->bus, devfn);
 548        if (!nb_ht)
 549                return;
 550
 551        pci_read_config_dword(nb_ht, 0x60, &val);
 552        node = pcibus_to_node(dev->bus) | (val & 7);
 553        /*
 554         * Some hardware may return an invalid node ID,
 555         * so check it first:
 556         */
 557        if (node_online(node))
 558                set_dev_node(&dev->dev, node);
 559        pci_dev_put(nb_ht);
 560}
 561
 562DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
 563                        quirk_amd_nb_node);
 564DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
 565                        quirk_amd_nb_node);
 566DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
 567                        quirk_amd_nb_node);
 568DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC,
 569                        quirk_amd_nb_node);
 570DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT,
 571                        quirk_amd_nb_node);
 572DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP,
 573                        quirk_amd_nb_node);
 574DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM,
 575                        quirk_amd_nb_node);
 576DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
 577                        quirk_amd_nb_node);
 578DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
 579                        quirk_amd_nb_node);
 580DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
 581                        quirk_amd_nb_node);
 582DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
 583                        quirk_amd_nb_node);
 584DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
 585                        quirk_amd_nb_node);
 586DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
 587                        quirk_amd_nb_node);
 588DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
 589                        quirk_amd_nb_node);
 590DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
 591                        quirk_amd_nb_node);
 592
 593#endif
 594
 595#ifdef CONFIG_PCI
 596/*
 597 * Processor does not ensure DRAM scrub read/write sequence
 598 * is atomic wrt accesses to CC6 save state area. Therefore
 599 * if a concurrent scrub read/write access is to same address
 600 * the entry may appear as if it is not written. This quirk
 601 * applies to Fam16h models 00h-0Fh
 602 *
 603 * See "Revision Guide" for AMD F16h models 00h-0fh,
 604 * document 51810 rev. 3.04, Nov 2013
 605 */
 606static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
 607{
 608        u32 val;
 609
 610        /*
 611         * Suggested workaround:
 612         * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
 613         */
 614        pci_read_config_dword(dev, 0x58, &val);
 615        if (val & 0x1F) {
 616                val &= ~(0x1F);
 617                pci_write_config_dword(dev, 0x58, val);
 618        }
 619
 620        pci_read_config_dword(dev, 0x5C, &val);
 621        if (val & BIT(0)) {
 622                val &= ~BIT(0);
 623                pci_write_config_dword(dev, 0x5c, val);
 624        }
 625}
 626
 627DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
 628                        amd_disable_seq_and_redirect_scrub);
 629
 630#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
 631#include <linux/jump_label.h>
 632#include <asm/string_64.h>
 633
 634/* Ivy Bridge, Haswell, Broadwell */
 635static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
 636{
 637        u32 capid0;
 638
 639        pci_read_config_dword(pdev, 0x84, &capid0);
 640
 641        if (capid0 & 0x10)
 642                static_branch_inc(&mcsafe_key);
 643}
 644
 645/* Skylake */
 646static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
 647{
 648        u32 capid0, capid5;
 649
 650        pci_read_config_dword(pdev, 0x84, &capid0);
 651        pci_read_config_dword(pdev, 0x98, &capid5);
 652
 653        /*
 654         * CAPID0{7:6} indicate whether this is an advanced RAS SKU
 655         * CAPID5{8:5} indicate that various NVDIMM usage modes are
 656         * enabled, so memory machine check recovery is also enabled.
 657         */
 658        if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
 659                static_branch_inc(&mcsafe_key);
 660
 661}
 662DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
 663DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
 664DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
 665DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
 666#endif
 667#endif
 668
 669bool x86_apple_machine;
 670EXPORT_SYMBOL(x86_apple_machine);
 671
 672void __init early_platform_quirks(void)
 673{
 674        x86_apple_machine = dmi_match(DMI_SYS_VENDOR, "Apple Inc.") ||
 675                            dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc.");
 676}
 677