linux/arch/x86/kernel/acpi/boot.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  boot.c - Architecture-Specific Low-Level ACPI Boot Support
   4 *
   5 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
   6 *  Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
   7 */
   8
   9#include <linux/init.h>
  10#include <linux/acpi.h>
  11#include <linux/acpi_pmtmr.h>
  12#include <linux/efi.h>
  13#include <linux/cpumask.h>
  14#include <linux/export.h>
  15#include <linux/dmi.h>
  16#include <linux/irq.h>
  17#include <linux/slab.h>
  18#include <linux/memblock.h>
  19#include <linux/ioport.h>
  20#include <linux/pci.h>
  21#include <linux/efi-bgrt.h>
  22#include <linux/serial_core.h>
  23
  24#include <asm/e820/api.h>
  25#include <asm/irqdomain.h>
  26#include <asm/pci_x86.h>
  27#include <asm/pgtable.h>
  28#include <asm/io_apic.h>
  29#include <asm/apic.h>
  30#include <asm/io.h>
  31#include <asm/mpspec.h>
  32#include <asm/smp.h>
  33#include <asm/i8259.h>
  34#include <asm/setup.h>
  35
  36#include "sleep.h" /* To include x86_acpi_suspend_lowlevel */
  37static int __initdata acpi_force = 0;
  38int acpi_disabled;
  39EXPORT_SYMBOL(acpi_disabled);
  40
  41#ifdef  CONFIG_X86_64
  42# include <asm/proto.h>
  43#endif                          /* X86 */
  44
  45#define PREFIX                  "ACPI: "
  46
  47int acpi_noirq;                         /* skip ACPI IRQ initialization */
  48static int acpi_nobgrt;                 /* skip ACPI BGRT */
  49int acpi_pci_disabled;          /* skip ACPI PCI scan and IRQ initialization */
  50EXPORT_SYMBOL(acpi_pci_disabled);
  51
  52int acpi_lapic;
  53int acpi_ioapic;
  54int acpi_strict;
  55int acpi_disable_cmcff;
  56
  57/* ACPI SCI override configuration */
  58u8 acpi_sci_flags __initdata;
  59u32 acpi_sci_override_gsi __initdata = INVALID_ACPI_IRQ;
  60int acpi_skip_timer_override __initdata;
  61int acpi_use_timer_override __initdata;
  62int acpi_fix_pin2_polarity __initdata;
  63
  64#ifdef CONFIG_X86_LOCAL_APIC
  65static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
  66#endif
  67
  68#ifdef CONFIG_X86_IO_APIC
  69/*
  70 * Locks related to IOAPIC hotplug
  71 * Hotplug side:
  72 *      ->device_hotplug_lock
  73 *              ->acpi_ioapic_lock
  74 *                      ->ioapic_lock
  75 * Interrupt mapping side:
  76 *      ->acpi_ioapic_lock
  77 *              ->ioapic_mutex
  78 *                      ->ioapic_lock
  79 */
  80static DEFINE_MUTEX(acpi_ioapic_lock);
  81#endif
  82
  83/* --------------------------------------------------------------------------
  84                              Boot-time Configuration
  85   -------------------------------------------------------------------------- */
  86
  87/*
  88 * The default interrupt routing model is PIC (8259).  This gets
  89 * overridden if IOAPICs are enumerated (below).
  90 */
  91enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
  92
  93
  94/*
  95 * ISA irqs by default are the first 16 gsis but can be
  96 * any gsi as specified by an interrupt source override.
  97 */
  98static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = {
  99        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
 100};
 101
 102/*
 103 * This is just a simple wrapper around early_memremap(),
 104 * with sanity checks for phys == 0 and size == 0.
 105 */
 106void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
 107{
 108
 109        if (!phys || !size)
 110                return NULL;
 111
 112        return early_memremap(phys, size);
 113}
 114
 115void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
 116{
 117        if (!map || !size)
 118                return;
 119
 120        early_memunmap(map, size);
 121}
 122
 123#ifdef CONFIG_X86_LOCAL_APIC
 124static int __init acpi_parse_madt(struct acpi_table_header *table)
 125{
 126        struct acpi_table_madt *madt = NULL;
 127
 128        if (!boot_cpu_has(X86_FEATURE_APIC))
 129                return -EINVAL;
 130
 131        madt = (struct acpi_table_madt *)table;
 132        if (!madt) {
 133                printk(KERN_WARNING PREFIX "Unable to map MADT\n");
 134                return -ENODEV;
 135        }
 136
 137        if (madt->address) {
 138                acpi_lapic_addr = (u64) madt->address;
 139
 140                printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
 141                       madt->address);
 142        }
 143
 144        default_acpi_madt_oem_check(madt->header.oem_id,
 145                                    madt->header.oem_table_id);
 146
 147        return 0;
 148}
 149
 150/**
 151 * acpi_register_lapic - register a local apic and generates a logic cpu number
 152 * @id: local apic id to register
 153 * @acpiid: ACPI id to register
 154 * @enabled: this cpu is enabled or not
 155 *
 156 * Returns the logic cpu number which maps to the local apic
 157 */
 158static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
 159{
 160        unsigned int ver = 0;
 161        int cpu;
 162
 163        if (id >= MAX_LOCAL_APIC) {
 164                printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
 165                return -EINVAL;
 166        }
 167
 168        if (!enabled) {
 169                ++disabled_cpus;
 170                return -EINVAL;
 171        }
 172
 173        if (boot_cpu_physical_apicid != -1U)
 174                ver = boot_cpu_apic_version;
 175
 176        cpu = generic_processor_info(id, ver);
 177        if (cpu >= 0)
 178                early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
 179
 180        return cpu;
 181}
 182
 183static int __init
 184acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
 185{
 186        struct acpi_madt_local_x2apic *processor = NULL;
 187#ifdef CONFIG_X86_X2APIC
 188        u32 apic_id;
 189        u8 enabled;
 190#endif
 191
 192        processor = (struct acpi_madt_local_x2apic *)header;
 193
 194        if (BAD_MADT_ENTRY(processor, end))
 195                return -EINVAL;
 196
 197        acpi_table_print_madt_entry(&header->common);
 198
 199#ifdef CONFIG_X86_X2APIC
 200        apic_id = processor->local_apic_id;
 201        enabled = processor->lapic_flags & ACPI_MADT_ENABLED;
 202
 203        /* Ignore invalid ID */
 204        if (apic_id == 0xffffffff)
 205                return 0;
 206
 207        /*
 208         * We need to register disabled CPU as well to permit
 209         * counting disabled CPUs. This allows us to size
 210         * cpus_possible_map more accurately, to permit
 211         * to not preallocating memory for all NR_CPUS
 212         * when we use CPU hotplug.
 213         */
 214        if (!apic->apic_id_valid(apic_id)) {
 215                if (enabled)
 216                        pr_warn(PREFIX "x2apic entry ignored\n");
 217                return 0;
 218        }
 219
 220        acpi_register_lapic(apic_id, processor->uid, enabled);
 221#else
 222        printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
 223#endif
 224
 225        return 0;
 226}
 227
 228static int __init
 229acpi_parse_lapic(union acpi_subtable_headers * header, const unsigned long end)
 230{
 231        struct acpi_madt_local_apic *processor = NULL;
 232
 233        processor = (struct acpi_madt_local_apic *)header;
 234
 235        if (BAD_MADT_ENTRY(processor, end))
 236                return -EINVAL;
 237
 238        acpi_table_print_madt_entry(&header->common);
 239
 240        /* Ignore invalid ID */
 241        if (processor->id == 0xff)
 242                return 0;
 243
 244        /*
 245         * We need to register disabled CPU as well to permit
 246         * counting disabled CPUs. This allows us to size
 247         * cpus_possible_map more accurately, to permit
 248         * to not preallocating memory for all NR_CPUS
 249         * when we use CPU hotplug.
 250         */
 251        acpi_register_lapic(processor->id,      /* APIC ID */
 252                            processor->processor_id, /* ACPI ID */
 253                            processor->lapic_flags & ACPI_MADT_ENABLED);
 254
 255        return 0;
 256}
 257
 258static int __init
 259acpi_parse_sapic(union acpi_subtable_headers *header, const unsigned long end)
 260{
 261        struct acpi_madt_local_sapic *processor = NULL;
 262
 263        processor = (struct acpi_madt_local_sapic *)header;
 264
 265        if (BAD_MADT_ENTRY(processor, end))
 266                return -EINVAL;
 267
 268        acpi_table_print_madt_entry(&header->common);
 269
 270        acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */
 271                            processor->processor_id, /* ACPI ID */
 272                            processor->lapic_flags & ACPI_MADT_ENABLED);
 273
 274        return 0;
 275}
 276
 277static int __init
 278acpi_parse_lapic_addr_ovr(union acpi_subtable_headers * header,
 279                          const unsigned long end)
 280{
 281        struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
 282
 283        lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
 284
 285        if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
 286                return -EINVAL;
 287
 288        acpi_table_print_madt_entry(&header->common);
 289
 290        acpi_lapic_addr = lapic_addr_ovr->address;
 291
 292        return 0;
 293}
 294
 295static int __init
 296acpi_parse_x2apic_nmi(union acpi_subtable_headers *header,
 297                      const unsigned long end)
 298{
 299        struct acpi_madt_local_x2apic_nmi *x2apic_nmi = NULL;
 300
 301        x2apic_nmi = (struct acpi_madt_local_x2apic_nmi *)header;
 302
 303        if (BAD_MADT_ENTRY(x2apic_nmi, end))
 304                return -EINVAL;
 305
 306        acpi_table_print_madt_entry(&header->common);
 307
 308        if (x2apic_nmi->lint != 1)
 309                printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
 310
 311        return 0;
 312}
 313
 314static int __init
 315acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long end)
 316{
 317        struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
 318
 319        lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
 320
 321        if (BAD_MADT_ENTRY(lapic_nmi, end))
 322                return -EINVAL;
 323
 324        acpi_table_print_madt_entry(&header->common);
 325
 326        if (lapic_nmi->lint != 1)
 327                printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
 328
 329        return 0;
 330}
 331
 332#endif                          /*CONFIG_X86_LOCAL_APIC */
 333
 334#ifdef CONFIG_X86_IO_APIC
 335#define MP_ISA_BUS              0
 336
 337static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity,
 338                                                u8 trigger, u32 gsi);
 339
 340static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
 341                                          u32 gsi)
 342{
 343        /*
 344         * Check bus_irq boundary.
 345         */
 346        if (bus_irq >= NR_IRQS_LEGACY) {
 347                pr_warn("Invalid bus_irq %u for legacy override\n", bus_irq);
 348                return;
 349        }
 350
 351        /*
 352         * TBD: This check is for faulty timer entries, where the override
 353         *      erroneously sets the trigger to level, resulting in a HUGE
 354         *      increase of timer interrupts!
 355         */
 356        if ((bus_irq == 0) && (trigger == 3))
 357                trigger = 1;
 358
 359        if (mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi) < 0)
 360                return;
 361        /*
 362         * Reset default identity mapping if gsi is also an legacy IRQ,
 363         * otherwise there will be more than one entry with the same GSI
 364         * and acpi_isa_irq_to_gsi() may give wrong result.
 365         */
 366        if (gsi < nr_legacy_irqs() && isa_irq_to_gsi[gsi] == gsi)
 367                isa_irq_to_gsi[gsi] = INVALID_ACPI_IRQ;
 368        isa_irq_to_gsi[bus_irq] = gsi;
 369}
 370
 371static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger,
 372                        int polarity)
 373{
 374#ifdef CONFIG_X86_MPPARSE
 375        struct mpc_intsrc mp_irq;
 376        struct pci_dev *pdev;
 377        unsigned char number;
 378        unsigned int devfn;
 379        int ioapic;
 380        u8 pin;
 381
 382        if (!acpi_ioapic)
 383                return 0;
 384        if (!dev || !dev_is_pci(dev))
 385                return 0;
 386
 387        pdev = to_pci_dev(dev);
 388        number = pdev->bus->number;
 389        devfn = pdev->devfn;
 390        pin = pdev->pin;
 391        /* print the entry should happen on mptable identically */
 392        mp_irq.type = MP_INTSRC;
 393        mp_irq.irqtype = mp_INT;
 394        mp_irq.irqflag = (trigger == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
 395                                (polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
 396        mp_irq.srcbus = number;
 397        mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
 398        ioapic = mp_find_ioapic(gsi);
 399        mp_irq.dstapic = mpc_ioapic_id(ioapic);
 400        mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi);
 401
 402        mp_save_irq(&mp_irq);
 403#endif
 404        return 0;
 405}
 406
 407static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity,
 408                                                u8 trigger, u32 gsi)
 409{
 410        struct mpc_intsrc mp_irq;
 411        int ioapic, pin;
 412
 413        /* Convert 'gsi' to 'ioapic.pin'(INTIN#) */
 414        ioapic = mp_find_ioapic(gsi);
 415        if (ioapic < 0) {
 416                pr_warn("Failed to find ioapic for gsi : %u\n", gsi);
 417                return ioapic;
 418        }
 419
 420        pin = mp_find_ioapic_pin(ioapic, gsi);
 421
 422        mp_irq.type = MP_INTSRC;
 423        mp_irq.irqtype = mp_INT;
 424        mp_irq.irqflag = (trigger << 2) | polarity;
 425        mp_irq.srcbus = MP_ISA_BUS;
 426        mp_irq.srcbusirq = bus_irq;
 427        mp_irq.dstapic = mpc_ioapic_id(ioapic);
 428        mp_irq.dstirq = pin;
 429
 430        mp_save_irq(&mp_irq);
 431
 432        return 0;
 433}
 434
 435static int __init
 436acpi_parse_ioapic(union acpi_subtable_headers * header, const unsigned long end)
 437{
 438        struct acpi_madt_io_apic *ioapic = NULL;
 439        struct ioapic_domain_cfg cfg = {
 440                .type = IOAPIC_DOMAIN_DYNAMIC,
 441                .ops = &mp_ioapic_irqdomain_ops,
 442        };
 443
 444        ioapic = (struct acpi_madt_io_apic *)header;
 445
 446        if (BAD_MADT_ENTRY(ioapic, end))
 447                return -EINVAL;
 448
 449        acpi_table_print_madt_entry(&header->common);
 450
 451        /* Statically assign IRQ numbers for IOAPICs hosting legacy IRQs */
 452        if (ioapic->global_irq_base < nr_legacy_irqs())
 453                cfg.type = IOAPIC_DOMAIN_LEGACY;
 454
 455        mp_register_ioapic(ioapic->id, ioapic->address, ioapic->global_irq_base,
 456                           &cfg);
 457
 458        return 0;
 459}
 460
 461/*
 462 * Parse Interrupt Source Override for the ACPI SCI
 463 */
 464static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, u32 gsi)
 465{
 466        if (trigger == 0)       /* compatible SCI trigger is level */
 467                trigger = 3;
 468
 469        if (polarity == 0)      /* compatible SCI polarity is low */
 470                polarity = 3;
 471
 472        /* Command-line over-ride via acpi_sci= */
 473        if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
 474                trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
 475
 476        if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
 477                polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
 478
 479        if (bus_irq < NR_IRQS_LEGACY)
 480                mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
 481        else
 482                mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi);
 483
 484        acpi_penalize_sci_irq(bus_irq, trigger, polarity);
 485
 486        /*
 487         * stash over-ride to indicate we've been here
 488         * and for later update of acpi_gbl_FADT
 489         */
 490        acpi_sci_override_gsi = gsi;
 491        return;
 492}
 493
 494static int __init
 495acpi_parse_int_src_ovr(union acpi_subtable_headers * header,
 496                       const unsigned long end)
 497{
 498        struct acpi_madt_interrupt_override *intsrc = NULL;
 499
 500        intsrc = (struct acpi_madt_interrupt_override *)header;
 501
 502        if (BAD_MADT_ENTRY(intsrc, end))
 503                return -EINVAL;
 504
 505        acpi_table_print_madt_entry(&header->common);
 506
 507        if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
 508                acpi_sci_ioapic_setup(intsrc->source_irq,
 509                                      intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
 510                                      (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
 511                                      intsrc->global_irq);
 512                return 0;
 513        }
 514
 515        if (intsrc->source_irq == 0) {
 516                if (acpi_skip_timer_override) {
 517                        printk(PREFIX "BIOS IRQ0 override ignored.\n");
 518                        return 0;
 519                }
 520
 521                if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
 522                        && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
 523                        intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
 524                        printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
 525                }
 526        }
 527
 528        mp_override_legacy_irq(intsrc->source_irq,
 529                                intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
 530                                (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
 531                                intsrc->global_irq);
 532
 533        return 0;
 534}
 535
 536static int __init
 537acpi_parse_nmi_src(union acpi_subtable_headers * header, const unsigned long end)
 538{
 539        struct acpi_madt_nmi_source *nmi_src = NULL;
 540
 541        nmi_src = (struct acpi_madt_nmi_source *)header;
 542
 543        if (BAD_MADT_ENTRY(nmi_src, end))
 544                return -EINVAL;
 545
 546        acpi_table_print_madt_entry(&header->common);
 547
 548        /* TBD: Support nimsrc entries? */
 549
 550        return 0;
 551}
 552
 553#endif                          /* CONFIG_X86_IO_APIC */
 554
 555/*
 556 * acpi_pic_sci_set_trigger()
 557 *
 558 * use ELCR to set PIC-mode trigger type for SCI
 559 *
 560 * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
 561 * it may require Edge Trigger -- use "acpi_sci=edge"
 562 *
 563 * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
 564 * for the 8259 PIC.  bit[n] = 1 means irq[n] is Level, otherwise Edge.
 565 * ECLR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0)
 566 * ECLR2 is IRQs 8-15 (IRQ 8, 13 must be 0)
 567 */
 568
 569void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
 570{
 571        unsigned int mask = 1 << irq;
 572        unsigned int old, new;
 573
 574        /* Real old ELCR mask */
 575        old = inb(0x4d0) | (inb(0x4d1) << 8);
 576
 577        /*
 578         * If we use ACPI to set PCI IRQs, then we should clear ELCR
 579         * since we will set it correctly as we enable the PCI irq
 580         * routing.
 581         */
 582        new = acpi_noirq ? old : 0;
 583
 584        /*
 585         * Update SCI information in the ELCR, it isn't in the PCI
 586         * routing tables..
 587         */
 588        switch (trigger) {
 589        case 1:         /* Edge - clear */
 590                new &= ~mask;
 591                break;
 592        case 3:         /* Level - set */
 593                new |= mask;
 594                break;
 595        }
 596
 597        if (old == new)
 598                return;
 599
 600        printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
 601        outb(new, 0x4d0);
 602        outb(new >> 8, 0x4d1);
 603}
 604
 605int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
 606{
 607        int rc, irq, trigger, polarity;
 608
 609        if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
 610                *irqp = gsi;
 611                return 0;
 612        }
 613
 614        rc = acpi_get_override_irq(gsi, &trigger, &polarity);
 615        if (rc)
 616                return rc;
 617
 618        trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
 619        polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
 620        irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
 621        if (irq < 0)
 622                return irq;
 623
 624        *irqp = irq;
 625        return 0;
 626}
 627EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
 628
 629int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
 630{
 631        if (isa_irq < nr_legacy_irqs() &&
 632            isa_irq_to_gsi[isa_irq] != INVALID_ACPI_IRQ) {
 633                *gsi = isa_irq_to_gsi[isa_irq];
 634                return 0;
 635        }
 636
 637        return -1;
 638}
 639
 640static int acpi_register_gsi_pic(struct device *dev, u32 gsi,
 641                                 int trigger, int polarity)
 642{
 643#ifdef CONFIG_PCI
 644        /*
 645         * Make sure all (legacy) PCI IRQs are set as level-triggered.
 646         */
 647        if (trigger == ACPI_LEVEL_SENSITIVE)
 648                elcr_set_level_irq(gsi);
 649#endif
 650
 651        return gsi;
 652}
 653
 654#ifdef CONFIG_X86_LOCAL_APIC
 655static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
 656                                    int trigger, int polarity)
 657{
 658        int irq = gsi;
 659#ifdef CONFIG_X86_IO_APIC
 660        int node;
 661        struct irq_alloc_info info;
 662
 663        node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
 664        trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1;
 665        polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1;
 666        ioapic_set_alloc_attr(&info, node, trigger, polarity);
 667
 668        mutex_lock(&acpi_ioapic_lock);
 669        irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info);
 670        /* Don't set up the ACPI SCI because it's already set up */
 671        if (irq >= 0 && enable_update_mptable && gsi != acpi_gbl_FADT.sci_interrupt)
 672                mp_config_acpi_gsi(dev, gsi, trigger, polarity);
 673        mutex_unlock(&acpi_ioapic_lock);
 674#endif
 675
 676        return irq;
 677}
 678
 679static void acpi_unregister_gsi_ioapic(u32 gsi)
 680{
 681#ifdef CONFIG_X86_IO_APIC
 682        int irq;
 683
 684        mutex_lock(&acpi_ioapic_lock);
 685        irq = mp_map_gsi_to_irq(gsi, 0, NULL);
 686        if (irq > 0)
 687                mp_unmap_irq(irq);
 688        mutex_unlock(&acpi_ioapic_lock);
 689#endif
 690}
 691#endif
 692
 693int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
 694                           int trigger, int polarity) = acpi_register_gsi_pic;
 695void (*__acpi_unregister_gsi)(u32 gsi) = NULL;
 696
 697#ifdef CONFIG_ACPI_SLEEP
 698int (*acpi_suspend_lowlevel)(void) = x86_acpi_suspend_lowlevel;
 699#else
 700int (*acpi_suspend_lowlevel)(void);
 701#endif
 702
 703/*
 704 * success: return IRQ number (>=0)
 705 * failure: return < 0
 706 */
 707int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
 708{
 709        return __acpi_register_gsi(dev, gsi, trigger, polarity);
 710}
 711EXPORT_SYMBOL_GPL(acpi_register_gsi);
 712
 713void acpi_unregister_gsi(u32 gsi)
 714{
 715        if (__acpi_unregister_gsi)
 716                __acpi_unregister_gsi(gsi);
 717}
 718EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
 719
 720#ifdef CONFIG_X86_LOCAL_APIC
 721static void __init acpi_set_irq_model_ioapic(void)
 722{
 723        acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
 724        __acpi_register_gsi = acpi_register_gsi_ioapic;
 725        __acpi_unregister_gsi = acpi_unregister_gsi_ioapic;
 726        acpi_ioapic = 1;
 727}
 728#endif
 729
 730/*
 731 *  ACPI based hotplug support for CPU
 732 */
 733#ifdef CONFIG_ACPI_HOTPLUG_CPU
 734#include <acpi/processor.h>
 735
 736static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
 737{
 738#ifdef CONFIG_ACPI_NUMA
 739        int nid;
 740
 741        nid = acpi_get_node(handle);
 742        if (nid != NUMA_NO_NODE) {
 743                set_apicid_to_node(physid, nid);
 744                numa_set_node(cpu, nid);
 745        }
 746#endif
 747        return 0;
 748}
 749
 750int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
 751                 int *pcpu)
 752{
 753        int cpu;
 754
 755        cpu = acpi_register_lapic(physid, acpi_id, ACPI_MADT_ENABLED);
 756        if (cpu < 0) {
 757                pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
 758                return cpu;
 759        }
 760
 761        acpi_processor_set_pdc(handle);
 762        acpi_map_cpu2node(handle, cpu, physid);
 763
 764        *pcpu = cpu;
 765        return 0;
 766}
 767EXPORT_SYMBOL(acpi_map_cpu);
 768
 769int acpi_unmap_cpu(int cpu)
 770{
 771#ifdef CONFIG_ACPI_NUMA
 772        set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
 773#endif
 774
 775        per_cpu(x86_cpu_to_apicid, cpu) = -1;
 776        set_cpu_present(cpu, false);
 777        num_processors--;
 778
 779        return (0);
 780}
 781EXPORT_SYMBOL(acpi_unmap_cpu);
 782#endif                          /* CONFIG_ACPI_HOTPLUG_CPU */
 783
 784int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
 785{
 786        int ret = -ENOSYS;
 787#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
 788        int ioapic_id;
 789        u64 addr;
 790        struct ioapic_domain_cfg cfg = {
 791                .type = IOAPIC_DOMAIN_DYNAMIC,
 792                .ops = &mp_ioapic_irqdomain_ops,
 793        };
 794
 795        ioapic_id = acpi_get_ioapic_id(handle, gsi_base, &addr);
 796        if (ioapic_id < 0) {
 797                unsigned long long uid;
 798                acpi_status status;
 799
 800                status = acpi_evaluate_integer(handle, METHOD_NAME__UID,
 801                                               NULL, &uid);
 802                if (ACPI_FAILURE(status)) {
 803                        acpi_handle_warn(handle, "failed to get IOAPIC ID.\n");
 804                        return -EINVAL;
 805                }
 806                ioapic_id = (int)uid;
 807        }
 808
 809        mutex_lock(&acpi_ioapic_lock);
 810        ret  = mp_register_ioapic(ioapic_id, phys_addr, gsi_base, &cfg);
 811        mutex_unlock(&acpi_ioapic_lock);
 812#endif
 813
 814        return ret;
 815}
 816EXPORT_SYMBOL(acpi_register_ioapic);
 817
 818int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
 819{
 820        int ret = -ENOSYS;
 821
 822#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
 823        mutex_lock(&acpi_ioapic_lock);
 824        ret  = mp_unregister_ioapic(gsi_base);
 825        mutex_unlock(&acpi_ioapic_lock);
 826#endif
 827
 828        return ret;
 829}
 830EXPORT_SYMBOL(acpi_unregister_ioapic);
 831
 832/**
 833 * acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base
 834 *                          has been registered
 835 * @handle:     ACPI handle of the IOAPIC device
 836 * @gsi_base:   GSI base associated with the IOAPIC
 837 *
 838 * Assume caller holds some type of lock to serialize acpi_ioapic_registered()
 839 * with acpi_register_ioapic()/acpi_unregister_ioapic().
 840 */
 841int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base)
 842{
 843        int ret = 0;
 844
 845#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
 846        mutex_lock(&acpi_ioapic_lock);
 847        ret  = mp_ioapic_registered(gsi_base);
 848        mutex_unlock(&acpi_ioapic_lock);
 849#endif
 850
 851        return ret;
 852}
 853
 854static int __init acpi_parse_sbf(struct acpi_table_header *table)
 855{
 856        struct acpi_table_boot *sb = (struct acpi_table_boot *)table;
 857
 858        sbf_port = sb->cmos_index;      /* Save CMOS port */
 859
 860        return 0;
 861}
 862
 863#ifdef CONFIG_HPET_TIMER
 864#include <asm/hpet.h>
 865
 866static struct resource *hpet_res __initdata;
 867
 868static int __init acpi_parse_hpet(struct acpi_table_header *table)
 869{
 870        struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
 871
 872        if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
 873                printk(KERN_WARNING PREFIX "HPET timers must be located in "
 874                       "memory.\n");
 875                return -1;
 876        }
 877
 878        hpet_address = hpet_tbl->address.address;
 879        hpet_blockid = hpet_tbl->sequence;
 880
 881        /*
 882         * Some broken BIOSes advertise HPET at 0x0. We really do not
 883         * want to allocate a resource there.
 884         */
 885        if (!hpet_address) {
 886                printk(KERN_WARNING PREFIX
 887                       "HPET id: %#x base: %#lx is invalid\n",
 888                       hpet_tbl->id, hpet_address);
 889                return 0;
 890        }
 891#ifdef CONFIG_X86_64
 892        /*
 893         * Some even more broken BIOSes advertise HPET at
 894         * 0xfed0000000000000 instead of 0xfed00000. Fix it up and add
 895         * some noise:
 896         */
 897        if (hpet_address == 0xfed0000000000000UL) {
 898                if (!hpet_force_user) {
 899                        printk(KERN_WARNING PREFIX "HPET id: %#x "
 900                               "base: 0xfed0000000000000 is bogus\n "
 901                               "try hpet=force on the kernel command line to "
 902                               "fix it up to 0xfed00000.\n", hpet_tbl->id);
 903                        hpet_address = 0;
 904                        return 0;
 905                }
 906                printk(KERN_WARNING PREFIX
 907                       "HPET id: %#x base: 0xfed0000000000000 fixed up "
 908                       "to 0xfed00000.\n", hpet_tbl->id);
 909                hpet_address >>= 32;
 910        }
 911#endif
 912        printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
 913               hpet_tbl->id, hpet_address);
 914
 915        /*
 916         * Allocate and initialize the HPET firmware resource for adding into
 917         * the resource tree during the lateinit timeframe.
 918         */
 919#define HPET_RESOURCE_NAME_SIZE 9
 920        hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE,
 921                                  SMP_CACHE_BYTES);
 922        if (!hpet_res)
 923                panic("%s: Failed to allocate %zu bytes\n", __func__,
 924                      sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
 925
 926        hpet_res->name = (void *)&hpet_res[1];
 927        hpet_res->flags = IORESOURCE_MEM;
 928        snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u",
 929                 hpet_tbl->sequence);
 930
 931        hpet_res->start = hpet_address;
 932        hpet_res->end = hpet_address + (1 * 1024) - 1;
 933
 934        return 0;
 935}
 936
 937/*
 938 * hpet_insert_resource inserts the HPET resources used into the resource
 939 * tree.
 940 */
 941static __init int hpet_insert_resource(void)
 942{
 943        if (!hpet_res)
 944                return 1;
 945
 946        return insert_resource(&iomem_resource, hpet_res);
 947}
 948
 949late_initcall(hpet_insert_resource);
 950
 951#else
 952#define acpi_parse_hpet NULL
 953#endif
 954
 955static int __init acpi_parse_fadt(struct acpi_table_header *table)
 956{
 957        if (!(acpi_gbl_FADT.boot_flags & ACPI_FADT_LEGACY_DEVICES)) {
 958                pr_debug("ACPI: no legacy devices present\n");
 959                x86_platform.legacy.devices.pnpbios = 0;
 960        }
 961
 962        if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
 963            !(acpi_gbl_FADT.boot_flags & ACPI_FADT_8042) &&
 964            x86_platform.legacy.i8042 != X86_LEGACY_I8042_PLATFORM_ABSENT) {
 965                pr_debug("ACPI: i8042 controller is absent\n");
 966                x86_platform.legacy.i8042 = X86_LEGACY_I8042_FIRMWARE_ABSENT;
 967        }
 968
 969        if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) {
 970                pr_debug("ACPI: not registering RTC platform device\n");
 971                x86_platform.legacy.rtc = 0;
 972        }
 973
 974        if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_VGA) {
 975                pr_debug("ACPI: probing for VGA not safe\n");
 976                x86_platform.legacy.no_vga = 1;
 977        }
 978
 979#ifdef CONFIG_X86_PM_TIMER
 980        /* detect the location of the ACPI PM Timer */
 981        if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
 982                /* FADT rev. 2 */
 983                if (acpi_gbl_FADT.xpm_timer_block.space_id !=
 984                    ACPI_ADR_SPACE_SYSTEM_IO)
 985                        return 0;
 986
 987                pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
 988                /*
 989                 * "X" fields are optional extensions to the original V1.0
 990                 * fields, so we must selectively expand V1.0 fields if the
 991                 * corresponding X field is zero.
 992                 */
 993                if (!pmtmr_ioport)
 994                        pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
 995        } else {
 996                /* FADT rev. 1 */
 997                pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
 998        }
 999        if (pmtmr_ioport)
1000                printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
1001                       pmtmr_ioport);
1002#endif
1003        return 0;
1004}
1005
1006#ifdef  CONFIG_X86_LOCAL_APIC
1007/*
1008 * Parse LAPIC entries in MADT
1009 * returns 0 on success, < 0 on error
1010 */
1011
1012static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
1013{
1014        int count;
1015
1016        if (!boot_cpu_has(X86_FEATURE_APIC))
1017                return -ENODEV;
1018
1019        /*
1020         * Note that the LAPIC address is obtained from the MADT (32-bit value)
1021         * and (optionally) overridden by a LAPIC_ADDR_OVR entry (64-bit value).
1022         */
1023
1024        count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
1025                                      acpi_parse_lapic_addr_ovr, 0);
1026        if (count < 0) {
1027                printk(KERN_ERR PREFIX
1028                       "Error parsing LAPIC address override entry\n");
1029                return count;
1030        }
1031
1032        register_lapic_address(acpi_lapic_addr);
1033
1034        return count;
1035}
1036
1037static int __init acpi_parse_madt_lapic_entries(void)
1038{
1039        int count;
1040        int x2count = 0;
1041        int ret;
1042        struct acpi_subtable_proc madt_proc[2];
1043
1044        if (!boot_cpu_has(X86_FEATURE_APIC))
1045                return -ENODEV;
1046
1047        count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
1048                                      acpi_parse_sapic, MAX_LOCAL_APIC);
1049
1050        if (!count) {
1051                memset(madt_proc, 0, sizeof(madt_proc));
1052                madt_proc[0].id = ACPI_MADT_TYPE_LOCAL_APIC;
1053                madt_proc[0].handler = acpi_parse_lapic;
1054                madt_proc[1].id = ACPI_MADT_TYPE_LOCAL_X2APIC;
1055                madt_proc[1].handler = acpi_parse_x2apic;
1056                ret = acpi_table_parse_entries_array(ACPI_SIG_MADT,
1057                                sizeof(struct acpi_table_madt),
1058                                madt_proc, ARRAY_SIZE(madt_proc), MAX_LOCAL_APIC);
1059                if (ret < 0) {
1060                        printk(KERN_ERR PREFIX
1061                                        "Error parsing LAPIC/X2APIC entries\n");
1062                        return ret;
1063                }
1064
1065                count = madt_proc[0].count;
1066                x2count = madt_proc[1].count;
1067        }
1068        if (!count && !x2count) {
1069                printk(KERN_ERR PREFIX "No LAPIC entries present\n");
1070                /* TBD: Cleanup to allow fallback to MPS */
1071                return -ENODEV;
1072        } else if (count < 0 || x2count < 0) {
1073                printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
1074                /* TBD: Cleanup to allow fallback to MPS */
1075                return count;
1076        }
1077
1078        x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI,
1079                                        acpi_parse_x2apic_nmi, 0);
1080        count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI,
1081                                      acpi_parse_lapic_nmi, 0);
1082        if (count < 0 || x2count < 0) {
1083                printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
1084                /* TBD: Cleanup to allow fallback to MPS */
1085                return count;
1086        }
1087        return 0;
1088}
1089#endif                          /* CONFIG_X86_LOCAL_APIC */
1090
1091#ifdef  CONFIG_X86_IO_APIC
1092static void __init mp_config_acpi_legacy_irqs(void)
1093{
1094        int i;
1095        struct mpc_intsrc mp_irq;
1096
1097#ifdef CONFIG_EISA
1098        /*
1099         * Fabricate the legacy ISA bus (bus #31).
1100         */
1101        mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
1102#endif
1103        set_bit(MP_ISA_BUS, mp_bus_not_pci);
1104        pr_debug("Bus #%d is ISA (nIRQs: %d)\n", MP_ISA_BUS, nr_legacy_irqs());
1105
1106        /*
1107         * Use the default configuration for the IRQs 0-15.  Unless
1108         * overridden by (MADT) interrupt source override entries.
1109         */
1110        for (i = 0; i < nr_legacy_irqs(); i++) {
1111                int ioapic, pin;
1112                unsigned int dstapic;
1113                int idx;
1114                u32 gsi;
1115
1116                /* Locate the gsi that irq i maps to. */
1117                if (acpi_isa_irq_to_gsi(i, &gsi))
1118                        continue;
1119
1120                /*
1121                 * Locate the IOAPIC that manages the ISA IRQ.
1122                 */
1123                ioapic = mp_find_ioapic(gsi);
1124                if (ioapic < 0)
1125                        continue;
1126                pin = mp_find_ioapic_pin(ioapic, gsi);
1127                dstapic = mpc_ioapic_id(ioapic);
1128
1129                for (idx = 0; idx < mp_irq_entries; idx++) {
1130                        struct mpc_intsrc *irq = mp_irqs + idx;
1131
1132                        /* Do we already have a mapping for this ISA IRQ? */
1133                        if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i)
1134                                break;
1135
1136                        /* Do we already have a mapping for this IOAPIC pin */
1137                        if (irq->dstapic == dstapic && irq->dstirq == pin)
1138                                break;
1139                }
1140
1141                if (idx != mp_irq_entries) {
1142                        printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
1143                        continue;       /* IRQ already used */
1144                }
1145
1146                mp_irq.type = MP_INTSRC;
1147                mp_irq.irqflag = 0;     /* Conforming */
1148                mp_irq.srcbus = MP_ISA_BUS;
1149                mp_irq.dstapic = dstapic;
1150                mp_irq.irqtype = mp_INT;
1151                mp_irq.srcbusirq = i; /* Identity mapped */
1152                mp_irq.dstirq = pin;
1153
1154                mp_save_irq(&mp_irq);
1155        }
1156}
1157
1158/*
1159 * Parse IOAPIC related entries in MADT
1160 * returns 0 on success, < 0 on error
1161 */
1162static int __init acpi_parse_madt_ioapic_entries(void)
1163{
1164        int count;
1165
1166        /*
1167         * ACPI interpreter is required to complete interrupt setup,
1168         * so if it is off, don't enumerate the io-apics with ACPI.
1169         * If MPS is present, it will handle them,
1170         * otherwise the system will stay in PIC mode
1171         */
1172        if (acpi_disabled || acpi_noirq)
1173                return -ENODEV;
1174
1175        if (!boot_cpu_has(X86_FEATURE_APIC))
1176                return -ENODEV;
1177
1178        /*
1179         * if "noapic" boot option, don't look for IO-APICs
1180         */
1181        if (skip_ioapic_setup) {
1182                printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
1183                       "due to 'noapic' option.\n");
1184                return -ENODEV;
1185        }
1186
1187        count = acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
1188                                      MAX_IO_APICS);
1189        if (!count) {
1190                printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
1191                return -ENODEV;
1192        } else if (count < 0) {
1193                printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
1194                return count;
1195        }
1196
1197        count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE,
1198                                      acpi_parse_int_src_ovr, nr_irqs);
1199        if (count < 0) {
1200                printk(KERN_ERR PREFIX
1201                       "Error parsing interrupt source overrides entry\n");
1202                /* TBD: Cleanup to allow fallback to MPS */
1203                return count;
1204        }
1205
1206        /*
1207         * If BIOS did not supply an INT_SRC_OVR for the SCI
1208         * pretend we got one so we can set the SCI flags.
1209         * But ignore setting up SCI on hardware reduced platforms.
1210         */
1211        if (acpi_sci_override_gsi == INVALID_ACPI_IRQ && !acpi_gbl_reduced_hardware)
1212                acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0,
1213                                      acpi_gbl_FADT.sci_interrupt);
1214
1215        /* Fill in identity legacy mappings where no override */
1216        mp_config_acpi_legacy_irqs();
1217
1218        count = acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE,
1219                                      acpi_parse_nmi_src, nr_irqs);
1220        if (count < 0) {
1221                printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
1222                /* TBD: Cleanup to allow fallback to MPS */
1223                return count;
1224        }
1225
1226        return 0;
1227}
1228#else
1229static inline int acpi_parse_madt_ioapic_entries(void)
1230{
1231        return -1;
1232}
1233#endif  /* !CONFIG_X86_IO_APIC */
1234
1235static void __init early_acpi_process_madt(void)
1236{
1237#ifdef CONFIG_X86_LOCAL_APIC
1238        int error;
1239
1240        if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
1241
1242                /*
1243                 * Parse MADT LAPIC entries
1244                 */
1245                error = early_acpi_parse_madt_lapic_addr_ovr();
1246                if (!error) {
1247                        acpi_lapic = 1;
1248                        smp_found_config = 1;
1249                }
1250                if (error == -EINVAL) {
1251                        /*
1252                         * Dell Precision Workstation 410, 610 come here.
1253                         */
1254                        printk(KERN_ERR PREFIX
1255                               "Invalid BIOS MADT, disabling ACPI\n");
1256                        disable_acpi();
1257                }
1258        }
1259#endif
1260}
1261
1262static void __init acpi_process_madt(void)
1263{
1264#ifdef CONFIG_X86_LOCAL_APIC
1265        int error;
1266
1267        if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
1268
1269                /*
1270                 * Parse MADT LAPIC entries
1271                 */
1272                error = acpi_parse_madt_lapic_entries();
1273                if (!error) {
1274                        acpi_lapic = 1;
1275
1276                        /*
1277                         * Parse MADT IO-APIC entries
1278                         */
1279                        mutex_lock(&acpi_ioapic_lock);
1280                        error = acpi_parse_madt_ioapic_entries();
1281                        mutex_unlock(&acpi_ioapic_lock);
1282                        if (!error) {
1283                                acpi_set_irq_model_ioapic();
1284
1285                                smp_found_config = 1;
1286                        }
1287                }
1288                if (error == -EINVAL) {
1289                        /*
1290                         * Dell Precision Workstation 410, 610 come here.
1291                         */
1292                        printk(KERN_ERR PREFIX
1293                               "Invalid BIOS MADT, disabling ACPI\n");
1294                        disable_acpi();
1295                }
1296        } else {
1297                /*
1298                 * ACPI found no MADT, and so ACPI wants UP PIC mode.
1299                 * In the event an MPS table was found, forget it.
1300                 * Boot with "acpi=off" to use MPS on such a system.
1301                 */
1302                if (smp_found_config) {
1303                        printk(KERN_WARNING PREFIX
1304                                "No APIC-table, disabling MPS\n");
1305                        smp_found_config = 0;
1306                }
1307        }
1308
1309        /*
1310         * ACPI supports both logical (e.g. Hyper-Threading) and physical
1311         * processors, where MPS only supports physical.
1312         */
1313        if (acpi_lapic && acpi_ioapic)
1314                printk(KERN_INFO "Using ACPI (MADT) for SMP configuration "
1315                       "information\n");
1316        else if (acpi_lapic)
1317                printk(KERN_INFO "Using ACPI for processor (LAPIC) "
1318                       "configuration information\n");
1319#endif
1320        return;
1321}
1322
1323static int __init disable_acpi_irq(const struct dmi_system_id *d)
1324{
1325        if (!acpi_force) {
1326                printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
1327                       d->ident);
1328                acpi_noirq_set();
1329        }
1330        return 0;
1331}
1332
1333static int __init disable_acpi_pci(const struct dmi_system_id *d)
1334{
1335        if (!acpi_force) {
1336                printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
1337                       d->ident);
1338                acpi_disable_pci();
1339        }
1340        return 0;
1341}
1342
1343static int __init dmi_disable_acpi(const struct dmi_system_id *d)
1344{
1345        if (!acpi_force) {
1346                printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
1347                disable_acpi();
1348        } else {
1349                printk(KERN_NOTICE
1350                       "Warning: DMI blacklist says broken, but acpi forced\n");
1351        }
1352        return 0;
1353}
1354
1355/*
1356 * Force ignoring BIOS IRQ0 override
1357 */
1358static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
1359{
1360        if (!acpi_skip_timer_override) {
1361                pr_notice("%s detected: Ignoring BIOS IRQ0 override\n",
1362                        d->ident);
1363                acpi_skip_timer_override = 1;
1364        }
1365        return 0;
1366}
1367
1368/*
1369 * ACPI offers an alternative platform interface model that removes
1370 * ACPI hardware requirements for platforms that do not implement
1371 * the PC Architecture.
1372 *
1373 * We initialize the Hardware-reduced ACPI model here:
1374 */
1375void __init acpi_generic_reduced_hw_init(void)
1376{
1377        /*
1378         * Override x86_init functions and bypass legacy PIC in
1379         * hardware reduced ACPI mode.
1380         */
1381        x86_init.timers.timer_init      = x86_init_noop;
1382        x86_init.irqs.pre_vector_init   = x86_init_noop;
1383        legacy_pic                      = &null_legacy_pic;
1384}
1385
1386static void __init acpi_reduced_hw_init(void)
1387{
1388        if (acpi_gbl_reduced_hardware)
1389                x86_init.acpi.reduced_hw_early_init();
1390}
1391
1392/*
1393 * If your system is blacklisted here, but you find that acpi=force
1394 * works for you, please contact linux-acpi@vger.kernel.org
1395 */
1396static const struct dmi_system_id acpi_dmi_table[] __initconst = {
1397        /*
1398         * Boxes that need ACPI disabled
1399         */
1400        {
1401         .callback = dmi_disable_acpi,
1402         .ident = "IBM Thinkpad",
1403         .matches = {
1404                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1405                     DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
1406                     },
1407         },
1408
1409        /*
1410         * Boxes that need ACPI PCI IRQ routing disabled
1411         */
1412        {
1413         .callback = disable_acpi_irq,
1414         .ident = "ASUS A7V",
1415         .matches = {
1416                     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
1417                     DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
1418                     /* newer BIOS, Revision 1011, does work */
1419                     DMI_MATCH(DMI_BIOS_VERSION,
1420                               "ASUS A7V ACPI BIOS Revision 1007"),
1421                     },
1422         },
1423        {
1424                /*
1425                 * Latest BIOS for IBM 600E (1.16) has bad pcinum
1426                 * for LPC bridge, which is needed for the PCI
1427                 * interrupt links to work. DSDT fix is in bug 5966.
1428                 * 2645, 2646 model numbers are shared with 600/600E/600X
1429                 */
1430         .callback = disable_acpi_irq,
1431         .ident = "IBM Thinkpad 600 Series 2645",
1432         .matches = {
1433                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1434                     DMI_MATCH(DMI_BOARD_NAME, "2645"),
1435                     },
1436         },
1437        {
1438         .callback = disable_acpi_irq,
1439         .ident = "IBM Thinkpad 600 Series 2646",
1440         .matches = {
1441                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1442                     DMI_MATCH(DMI_BOARD_NAME, "2646"),
1443                     },
1444         },
1445        /*
1446         * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
1447         */
1448        {                       /* _BBN 0 bug */
1449         .callback = disable_acpi_pci,
1450         .ident = "ASUS PR-DLS",
1451         .matches = {
1452                     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1453                     DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
1454                     DMI_MATCH(DMI_BIOS_VERSION,
1455                               "ASUS PR-DLS ACPI BIOS Revision 1010"),
1456                     DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
1457                     },
1458         },
1459        {
1460         .callback = disable_acpi_pci,
1461         .ident = "Acer TravelMate 36x Laptop",
1462         .matches = {
1463                     DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1464                     DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
1465                     },
1466         },
1467        {}
1468};
1469
1470/* second table for DMI checks that should run after early-quirks */
1471static const struct dmi_system_id acpi_dmi_table_late[] __initconst = {
1472        /*
1473         * HP laptops which use a DSDT reporting as HP/SB400/10000,
1474         * which includes some code which overrides all temperature
1475         * trip points to 16C if the INTIN2 input of the I/O APIC
1476         * is enabled.  This input is incorrectly designated the
1477         * ISA IRQ 0 via an interrupt source override even though
1478         * it is wired to the output of the master 8259A and INTIN0
1479         * is not connected at all.  Force ignoring BIOS IRQ0
1480         * override in that cases.
1481         */
1482        {
1483         .callback = dmi_ignore_irq0_timer_override,
1484         .ident = "HP nx6115 laptop",
1485         .matches = {
1486                     DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1487                     DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6115"),
1488                     },
1489         },
1490        {
1491         .callback = dmi_ignore_irq0_timer_override,
1492         .ident = "HP NX6125 laptop",
1493         .matches = {
1494                     DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1495                     DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6125"),
1496                     },
1497         },
1498        {
1499         .callback = dmi_ignore_irq0_timer_override,
1500         .ident = "HP NX6325 laptop",
1501         .matches = {
1502                     DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1503                     DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
1504                     },
1505         },
1506        {
1507         .callback = dmi_ignore_irq0_timer_override,
1508         .ident = "HP 6715b laptop",
1509         .matches = {
1510                     DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1511                     DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
1512                     },
1513         },
1514        {
1515         .callback = dmi_ignore_irq0_timer_override,
1516         .ident = "FUJITSU SIEMENS",
1517         .matches = {
1518                     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1519                     DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
1520                     },
1521         },
1522        {}
1523};
1524
1525/*
1526 * acpi_boot_table_init() and acpi_boot_init()
1527 *  called from setup_arch(), always.
1528 *      1. checksums all tables
1529 *      2. enumerates lapics
1530 *      3. enumerates io-apics
1531 *
1532 * acpi_table_init() is separate to allow reading SRAT without
1533 * other side effects.
1534 *
1535 * side effects of acpi_boot_init:
1536 *      acpi_lapic = 1 if LAPIC found
1537 *      acpi_ioapic = 1 if IOAPIC found
1538 *      if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
1539 *      if acpi_blacklisted() acpi_disabled = 1;
1540 *      acpi_irq_model=...
1541 *      ...
1542 */
1543
1544void __init acpi_boot_table_init(void)
1545{
1546        dmi_check_system(acpi_dmi_table);
1547
1548        /*
1549         * If acpi_disabled, bail out
1550         */
1551        if (acpi_disabled)
1552                return;
1553
1554        /*
1555         * Initialize the ACPI boot-time table parser.
1556         */
1557        if (acpi_table_init()) {
1558                disable_acpi();
1559                return;
1560        }
1561
1562        acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1563
1564        /*
1565         * blacklist may disable ACPI entirely
1566         */
1567        if (acpi_blacklisted()) {
1568                if (acpi_force) {
1569                        printk(KERN_WARNING PREFIX "acpi=force override\n");
1570                } else {
1571                        printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
1572                        disable_acpi();
1573                        return;
1574                }
1575        }
1576}
1577
1578int __init early_acpi_boot_init(void)
1579{
1580        /*
1581         * If acpi_disabled, bail out
1582         */
1583        if (acpi_disabled)
1584                return 1;
1585
1586        /*
1587         * Process the Multiple APIC Description Table (MADT), if present
1588         */
1589        early_acpi_process_madt();
1590
1591        /*
1592         * Hardware-reduced ACPI mode initialization:
1593         */
1594        acpi_reduced_hw_init();
1595
1596        return 0;
1597}
1598
1599int __init acpi_boot_init(void)
1600{
1601        /* those are executed after early-quirks are executed */
1602        dmi_check_system(acpi_dmi_table_late);
1603
1604        /*
1605         * If acpi_disabled, bail out
1606         */
1607        if (acpi_disabled)
1608                return 1;
1609
1610        acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1611
1612        /*
1613         * set sci_int and PM timer address
1614         */
1615        acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
1616
1617        /*
1618         * Process the Multiple APIC Description Table (MADT), if present
1619         */
1620        acpi_process_madt();
1621
1622        acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
1623        if (IS_ENABLED(CONFIG_ACPI_BGRT) && !acpi_nobgrt)
1624                acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
1625
1626        if (!acpi_noirq)
1627                x86_init.pci.init = pci_acpi_init;
1628
1629        /* Do not enable ACPI SPCR console by default */
1630        acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
1631        return 0;
1632}
1633
1634static int __init parse_acpi(char *arg)
1635{
1636        if (!arg)
1637                return -EINVAL;
1638
1639        /* "acpi=off" disables both ACPI table parsing and interpreter */
1640        if (strcmp(arg, "off") == 0) {
1641                disable_acpi();
1642        }
1643        /* acpi=force to over-ride black-list */
1644        else if (strcmp(arg, "force") == 0) {
1645                acpi_force = 1;
1646                acpi_disabled = 0;
1647        }
1648        /* acpi=strict disables out-of-spec workarounds */
1649        else if (strcmp(arg, "strict") == 0) {
1650                acpi_strict = 1;
1651        }
1652        /* acpi=rsdt use RSDT instead of XSDT */
1653        else if (strcmp(arg, "rsdt") == 0) {
1654                acpi_gbl_do_not_use_xsdt = TRUE;
1655        }
1656        /* "acpi=noirq" disables ACPI interrupt routing */
1657        else if (strcmp(arg, "noirq") == 0) {
1658                acpi_noirq_set();
1659        }
1660        /* "acpi=copy_dsdt" copys DSDT */
1661        else if (strcmp(arg, "copy_dsdt") == 0) {
1662                acpi_gbl_copy_dsdt_locally = 1;
1663        }
1664        /* "acpi=nocmcff" disables FF mode for corrected errors */
1665        else if (strcmp(arg, "nocmcff") == 0) {
1666                acpi_disable_cmcff = 1;
1667        } else {
1668                /* Core will printk when we return error. */
1669                return -EINVAL;
1670        }
1671        return 0;
1672}
1673early_param("acpi", parse_acpi);
1674
1675static int __init parse_acpi_bgrt(char *arg)
1676{
1677        acpi_nobgrt = true;
1678        return 0;
1679}
1680early_param("bgrt_disable", parse_acpi_bgrt);
1681
1682/* FIXME: Using pci= for an ACPI parameter is a travesty. */
1683static int __init parse_pci(char *arg)
1684{
1685        if (arg && strcmp(arg, "noacpi") == 0)
1686                acpi_disable_pci();
1687        return 0;
1688}
1689early_param("pci", parse_pci);
1690
1691int __init acpi_mps_check(void)
1692{
1693#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE)
1694/* mptable code is not built-in*/
1695        if (acpi_disabled || acpi_noirq) {
1696                printk(KERN_WARNING "MPS support code is not built-in.\n"
1697                       "Using acpi=off or acpi=noirq or pci=noacpi "
1698                       "may have problem\n");
1699                return 1;
1700        }
1701#endif
1702        return 0;
1703}
1704
1705#ifdef CONFIG_X86_IO_APIC
1706static int __init parse_acpi_skip_timer_override(char *arg)
1707{
1708        acpi_skip_timer_override = 1;
1709        return 0;
1710}
1711early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
1712
1713static int __init parse_acpi_use_timer_override(char *arg)
1714{
1715        acpi_use_timer_override = 1;
1716        return 0;
1717}
1718early_param("acpi_use_timer_override", parse_acpi_use_timer_override);
1719#endif /* CONFIG_X86_IO_APIC */
1720
1721static int __init setup_acpi_sci(char *s)
1722{
1723        if (!s)
1724                return -EINVAL;
1725        if (!strcmp(s, "edge"))
1726                acpi_sci_flags =  ACPI_MADT_TRIGGER_EDGE |
1727                        (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1728        else if (!strcmp(s, "level"))
1729                acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
1730                        (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1731        else if (!strcmp(s, "high"))
1732                acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
1733                        (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1734        else if (!strcmp(s, "low"))
1735                acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
1736                        (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1737        else
1738                return -EINVAL;
1739        return 0;
1740}
1741early_param("acpi_sci", setup_acpi_sci);
1742
1743int __acpi_acquire_global_lock(unsigned int *lock)
1744{
1745        unsigned int old, new, val;
1746        do {
1747                old = *lock;
1748                new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
1749                val = cmpxchg(lock, old, new);
1750        } while (unlikely (val != old));
1751        return ((new & 0x3) < 3) ? -1 : 0;
1752}
1753
1754int __acpi_release_global_lock(unsigned int *lock)
1755{
1756        unsigned int old, new, val;
1757        do {
1758                old = *lock;
1759                new = old & ~0x3;
1760                val = cmpxchg(lock, old, new);
1761        } while (unlikely (val != old));
1762        return old & 0x1;
1763}
1764
1765void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
1766{
1767        e820__range_add(addr, size, E820_TYPE_ACPI);
1768        e820__update_table_print();
1769}
1770
1771void x86_default_set_root_pointer(u64 addr)
1772{
1773        boot_params.acpi_rsdp_addr = addr;
1774}
1775
1776u64 x86_default_get_root_pointer(void)
1777{
1778        return boot_params.acpi_rsdp_addr;
1779}
1780