linux/arch/x86/kernel/acpi/boot.c
<<
>>
Prefs
   1/*
   2 *  boot.c - Architecture-Specific Low-Level ACPI Boot Support
   3 *
   4 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
   5 *  Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
   6 *
   7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   8 *
   9 *  This program is free software; you can redistribute it and/or modify
  10 *  it under the terms of the GNU General Public License as published by
  11 *  the Free Software Foundation; either version 2 of the License, or
  12 *  (at your option) any later version.
  13 *
  14 *  This program is distributed in the hope that it will be useful,
  15 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 *  GNU General Public License for more details.
  18 *
  19 *  You should have received a copy of the GNU General Public License
  20 *  along with this program; if not, write to the Free Software
  21 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  22 *
  23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  24 */
  25
  26#include <linux/init.h>
  27#include <linux/acpi.h>
  28#include <linux/acpi_pmtmr.h>
  29#include <linux/efi.h>
  30#include <linux/cpumask.h>
  31#include <linux/module.h>
  32#include <linux/dmi.h>
  33#include <linux/irq.h>
  34#include <linux/bootmem.h>
  35#include <linux/ioport.h>
  36
  37#include <asm/pgtable.h>
  38#include <asm/io_apic.h>
  39#include <asm/apic.h>
  40#include <asm/io.h>
  41#include <asm/mpspec.h>
  42
  43static int __initdata acpi_force = 0;
  44
  45#ifdef  CONFIG_ACPI
  46int acpi_disabled = 0;
  47#else
  48int acpi_disabled = 1;
  49#endif
  50EXPORT_SYMBOL(acpi_disabled);
  51
  52#ifdef  CONFIG_X86_64
  53
  54#include <asm/proto.h>
  55
  56static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
  57
  58
  59#else                           /* X86 */
  60
  61#ifdef  CONFIG_X86_LOCAL_APIC
  62#include <mach_apic.h>
  63#include <mach_mpparse.h>
  64#endif                          /* CONFIG_X86_LOCAL_APIC */
  65
  66#endif                          /* X86 */
  67
  68#define BAD_MADT_ENTRY(entry, end) (                                        \
  69                (!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
  70                ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
  71
  72#define PREFIX                  "ACPI: "
  73
  74int acpi_noirq;                         /* skip ACPI IRQ initialization */
  75int acpi_pci_disabled __initdata;       /* skip ACPI PCI scan and IRQ initialization */
  76int acpi_ht __initdata = 1;     /* enable HT */
  77
  78int acpi_lapic;
  79int acpi_ioapic;
  80int acpi_strict;
  81EXPORT_SYMBOL(acpi_strict);
  82
  83u8 acpi_sci_flags __initdata;
  84int acpi_sci_override_gsi __initdata;
  85int acpi_skip_timer_override __initdata;
  86int acpi_use_timer_override __initdata;
  87
  88#ifdef CONFIG_X86_LOCAL_APIC
  89static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
  90#endif
  91
  92#ifndef __HAVE_ARCH_CMPXCHG
  93#warning ACPI uses CMPXCHG, i486 and later hardware
  94#endif
  95
  96/* --------------------------------------------------------------------------
  97                              Boot-time Configuration
  98   -------------------------------------------------------------------------- */
  99
 100/*
 101 * The default interrupt routing model is PIC (8259).  This gets
 102 * overridden if IOAPICs are enumerated (below).
 103 */
 104enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
 105
 106#ifdef  CONFIG_X86_64
 107
 108/* rely on all ACPI tables being in the direct mapping */
 109char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
 110{
 111        if (!phys_addr || !size)
 112                return NULL;
 113
 114        if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE)
 115                return __va(phys_addr);
 116
 117        return NULL;
 118}
 119
 120#else
 121
 122/*
 123 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
 124 * to map the target physical address. The problem is that set_fixmap()
 125 * provides a single page, and it is possible that the page is not
 126 * sufficient.
 127 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
 128 * i.e. until the next __va_range() call.
 129 *
 130 * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
 131 * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
 132 * count idx down while incrementing the phys address.
 133 */
 134char *__acpi_map_table(unsigned long phys, unsigned long size)
 135{
 136        unsigned long base, offset, mapped_size;
 137        int idx;
 138
 139        if (phys + size < 8 * 1024 * 1024)
 140                return __va(phys);
 141
 142        offset = phys & (PAGE_SIZE - 1);
 143        mapped_size = PAGE_SIZE - offset;
 144        set_fixmap(FIX_ACPI_END, phys);
 145        base = fix_to_virt(FIX_ACPI_END);
 146
 147        /*
 148         * Most cases can be covered by the below.
 149         */
 150        idx = FIX_ACPI_END;
 151        while (mapped_size < size) {
 152                if (--idx < FIX_ACPI_BEGIN)
 153                        return NULL;    /* cannot handle this */
 154                phys += PAGE_SIZE;
 155                set_fixmap(idx, phys);
 156                mapped_size += PAGE_SIZE;
 157        }
 158
 159        return ((unsigned char *)base + offset);
 160}
 161#endif
 162
 163#ifdef CONFIG_PCI_MMCONFIG
 164/* The physical address of the MMCONFIG aperture.  Set from ACPI tables. */
 165struct acpi_mcfg_allocation *pci_mmcfg_config;
 166int pci_mmcfg_config_num;
 167
 168int __init acpi_parse_mcfg(struct acpi_table_header *header)
 169{
 170        struct acpi_table_mcfg *mcfg;
 171        unsigned long i;
 172        int config_size;
 173
 174        if (!header)
 175                return -EINVAL;
 176
 177        mcfg = (struct acpi_table_mcfg *)header;
 178
 179        /* how many config structures do we have */
 180        pci_mmcfg_config_num = 0;
 181        i = header->length - sizeof(struct acpi_table_mcfg);
 182        while (i >= sizeof(struct acpi_mcfg_allocation)) {
 183                ++pci_mmcfg_config_num;
 184                i -= sizeof(struct acpi_mcfg_allocation);
 185        };
 186        if (pci_mmcfg_config_num == 0) {
 187                printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
 188                return -ENODEV;
 189        }
 190
 191        config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
 192        pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
 193        if (!pci_mmcfg_config) {
 194                printk(KERN_WARNING PREFIX
 195                       "No memory for MCFG config tables\n");
 196                return -ENOMEM;
 197        }
 198
 199        memcpy(pci_mmcfg_config, &mcfg[1], config_size);
 200        for (i = 0; i < pci_mmcfg_config_num; ++i) {
 201                if (pci_mmcfg_config[i].address > 0xFFFFFFFF) {
 202                        printk(KERN_ERR PREFIX
 203                               "MMCONFIG not in low 4GB of memory\n");
 204                        kfree(pci_mmcfg_config);
 205                        pci_mmcfg_config_num = 0;
 206                        return -ENODEV;
 207                }
 208        }
 209
 210        return 0;
 211}
 212#endif                          /* CONFIG_PCI_MMCONFIG */
 213
 214#ifdef CONFIG_X86_LOCAL_APIC
 215static int __init acpi_parse_madt(struct acpi_table_header *table)
 216{
 217        struct acpi_table_madt *madt = NULL;
 218
 219        if (!cpu_has_apic)
 220                return -EINVAL;
 221
 222        madt = (struct acpi_table_madt *)table;
 223        if (!madt) {
 224                printk(KERN_WARNING PREFIX "Unable to map MADT\n");
 225                return -ENODEV;
 226        }
 227
 228        if (madt->address) {
 229                acpi_lapic_addr = (u64) madt->address;
 230
 231                printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
 232                       madt->address);
 233        }
 234
 235        acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
 236
 237        return 0;
 238}
 239
 240static int __init
 241acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
 242{
 243        struct acpi_madt_local_apic *processor = NULL;
 244
 245        processor = (struct acpi_madt_local_apic *)header;
 246
 247        if (BAD_MADT_ENTRY(processor, end))
 248                return -EINVAL;
 249
 250        acpi_table_print_madt_entry(header);
 251
 252        /*
 253         * We need to register disabled CPU as well to permit
 254         * counting disabled CPUs. This allows us to size
 255         * cpus_possible_map more accurately, to permit
 256         * to not preallocating memory for all NR_CPUS
 257         * when we use CPU hotplug.
 258         */
 259        mp_register_lapic(processor->id,        /* APIC ID */
 260                          processor->lapic_flags & ACPI_MADT_ENABLED);  /* Enabled? */
 261
 262        return 0;
 263}
 264
 265static int __init
 266acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
 267                          const unsigned long end)
 268{
 269        struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
 270
 271        lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
 272
 273        if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
 274                return -EINVAL;
 275
 276        acpi_lapic_addr = lapic_addr_ovr->address;
 277
 278        return 0;
 279}
 280
 281static int __init
 282acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
 283{
 284        struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
 285
 286        lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
 287
 288        if (BAD_MADT_ENTRY(lapic_nmi, end))
 289                return -EINVAL;
 290
 291        acpi_table_print_madt_entry(header);
 292
 293        if (lapic_nmi->lint != 1)
 294                printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
 295
 296        return 0;
 297}
 298
 299#endif                          /*CONFIG_X86_LOCAL_APIC */
 300
 301#ifdef CONFIG_X86_IO_APIC
 302
 303static int __init
 304acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
 305{
 306        struct acpi_madt_io_apic *ioapic = NULL;
 307
 308        ioapic = (struct acpi_madt_io_apic *)header;
 309
 310        if (BAD_MADT_ENTRY(ioapic, end))
 311                return -EINVAL;
 312
 313        acpi_table_print_madt_entry(header);
 314
 315        mp_register_ioapic(ioapic->id,
 316                           ioapic->address, ioapic->global_irq_base);
 317
 318        return 0;
 319}
 320
 321/*
 322 * Parse Interrupt Source Override for the ACPI SCI
 323 */
 324static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
 325{
 326        if (trigger == 0)       /* compatible SCI trigger is level */
 327                trigger = 3;
 328
 329        if (polarity == 0)      /* compatible SCI polarity is low */
 330                polarity = 3;
 331
 332        /* Command-line over-ride via acpi_sci= */
 333        if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
 334                trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
 335
 336        if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
 337                polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
 338
 339        /*
 340         * mp_config_acpi_legacy_irqs() already setup IRQs < 16
 341         * If GSI is < 16, this will update its flags,
 342         * else it will create a new mp_irqs[] entry.
 343         */
 344        mp_override_legacy_irq(gsi, polarity, trigger, gsi);
 345
 346        /*
 347         * stash over-ride to indicate we've been here
 348         * and for later update of acpi_gbl_FADT
 349         */
 350        acpi_sci_override_gsi = gsi;
 351        return;
 352}
 353
 354static int __init
 355acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
 356                       const unsigned long end)
 357{
 358        struct acpi_madt_interrupt_override *intsrc = NULL;
 359
 360        intsrc = (struct acpi_madt_interrupt_override *)header;
 361
 362        if (BAD_MADT_ENTRY(intsrc, end))
 363                return -EINVAL;
 364
 365        acpi_table_print_madt_entry(header);
 366
 367        if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
 368                acpi_sci_ioapic_setup(intsrc->global_irq,
 369                                      intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
 370                                      (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
 371                return 0;
 372        }
 373
 374        if (acpi_skip_timer_override &&
 375            intsrc->source_irq == 0 && intsrc->global_irq == 2) {
 376                printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
 377                return 0;
 378        }
 379
 380        mp_override_legacy_irq(intsrc->source_irq,
 381                                intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
 382                                (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
 383                                intsrc->global_irq);
 384
 385        return 0;
 386}
 387
 388static int __init
 389acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
 390{
 391        struct acpi_madt_nmi_source *nmi_src = NULL;
 392
 393        nmi_src = (struct acpi_madt_nmi_source *)header;
 394
 395        if (BAD_MADT_ENTRY(nmi_src, end))
 396                return -EINVAL;
 397
 398        acpi_table_print_madt_entry(header);
 399
 400        /* TBD: Support nimsrc entries? */
 401
 402        return 0;
 403}
 404
 405#endif                          /* CONFIG_X86_IO_APIC */
 406
 407/*
 408 * acpi_pic_sci_set_trigger()
 409 *
 410 * use ELCR to set PIC-mode trigger type for SCI
 411 *
 412 * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
 413 * it may require Edge Trigger -- use "acpi_sci=edge"
 414 *
 415 * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
 416 * for the 8259 PIC.  bit[n] = 1 means irq[n] is Level, otherwise Edge.
 417 * ECLR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0)
 418 * ECLR2 is IRQs 8-15 (IRQ 8, 13 must be 0)
 419 */
 420
 421void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
 422{
 423        unsigned int mask = 1 << irq;
 424        unsigned int old, new;
 425
 426        /* Real old ELCR mask */
 427        old = inb(0x4d0) | (inb(0x4d1) << 8);
 428
 429        /*
 430         * If we use ACPI to set PCI IRQs, then we should clear ELCR
 431         * since we will set it correctly as we enable the PCI irq
 432         * routing.
 433         */
 434        new = acpi_noirq ? old : 0;
 435
 436        /*
 437         * Update SCI information in the ELCR, it isn't in the PCI
 438         * routing tables..
 439         */
 440        switch (trigger) {
 441        case 1:         /* Edge - clear */
 442                new &= ~mask;
 443                break;
 444        case 3:         /* Level - set */
 445                new |= mask;
 446                break;
 447        }
 448
 449        if (old == new)
 450                return;
 451
 452        printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
 453        outb(new, 0x4d0);
 454        outb(new >> 8, 0x4d1);
 455}
 456
 457int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
 458{
 459        *irq = gsi;
 460        return 0;
 461}
 462
 463/*
 464 * success: return IRQ number (>=0)
 465 * failure: return < 0
 466 */
 467int acpi_register_gsi(u32 gsi, int triggering, int polarity)
 468{
 469        unsigned int irq;
 470        unsigned int plat_gsi = gsi;
 471
 472#ifdef CONFIG_PCI
 473        /*
 474         * Make sure all (legacy) PCI IRQs are set as level-triggered.
 475         */
 476        if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
 477                extern void eisa_set_level_irq(unsigned int irq);
 478
 479                if (triggering == ACPI_LEVEL_SENSITIVE)
 480                        eisa_set_level_irq(gsi);
 481        }
 482#endif
 483
 484#ifdef CONFIG_X86_IO_APIC
 485        if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
 486                plat_gsi = mp_register_gsi(gsi, triggering, polarity);
 487        }
 488#endif
 489        acpi_gsi_to_irq(plat_gsi, &irq);
 490        return irq;
 491}
 492
 493EXPORT_SYMBOL(acpi_register_gsi);
 494
 495/*
 496 *  ACPI based hotplug support for CPU
 497 */
 498#ifdef CONFIG_ACPI_HOTPLUG_CPU
 499int acpi_map_lsapic(acpi_handle handle, int *pcpu)
 500{
 501        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 502        union acpi_object *obj;
 503        struct acpi_madt_local_apic *lapic;
 504        cpumask_t tmp_map, new_map;
 505        u8 physid;
 506        int cpu;
 507
 508        if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
 509                return -EINVAL;
 510
 511        if (!buffer.length || !buffer.pointer)
 512                return -EINVAL;
 513
 514        obj = buffer.pointer;
 515        if (obj->type != ACPI_TYPE_BUFFER ||
 516            obj->buffer.length < sizeof(*lapic)) {
 517                kfree(buffer.pointer);
 518                return -EINVAL;
 519        }
 520
 521        lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
 522
 523        if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
 524            !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
 525                kfree(buffer.pointer);
 526                return -EINVAL;
 527        }
 528
 529        physid = lapic->id;
 530
 531        kfree(buffer.pointer);
 532        buffer.length = ACPI_ALLOCATE_BUFFER;
 533        buffer.pointer = NULL;
 534
 535        tmp_map = cpu_present_map;
 536        mp_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
 537
 538        /*
 539         * If mp_register_lapic successfully generates a new logical cpu
 540         * number, then the following will get us exactly what was mapped
 541         */
 542        cpus_andnot(new_map, cpu_present_map, tmp_map);
 543        if (cpus_empty(new_map)) {
 544                printk ("Unable to map lapic to logical cpu number\n");
 545                return -EINVAL;
 546        }
 547
 548        cpu = first_cpu(new_map);
 549
 550        *pcpu = cpu;
 551        return 0;
 552}
 553
 554EXPORT_SYMBOL(acpi_map_lsapic);
 555
 556int acpi_unmap_lsapic(int cpu)
 557{
 558        per_cpu(x86_cpu_to_apicid, cpu) = -1;
 559        cpu_clear(cpu, cpu_present_map);
 560        num_processors--;
 561
 562        return (0);
 563}
 564
 565EXPORT_SYMBOL(acpi_unmap_lsapic);
 566#endif                          /* CONFIG_ACPI_HOTPLUG_CPU */
 567
 568int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
 569{
 570        /* TBD */
 571        return -EINVAL;
 572}
 573
 574EXPORT_SYMBOL(acpi_register_ioapic);
 575
 576int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
 577{
 578        /* TBD */
 579        return -EINVAL;
 580}
 581
 582EXPORT_SYMBOL(acpi_unregister_ioapic);
 583
 584static unsigned long __init
 585acpi_scan_rsdp(unsigned long start, unsigned long length)
 586{
 587        unsigned long offset = 0;
 588        unsigned long sig_len = sizeof("RSD PTR ") - 1;
 589
 590        /*
 591         * Scan all 16-byte boundaries of the physical memory region for the
 592         * RSDP signature.
 593         */
 594        for (offset = 0; offset < length; offset += 16) {
 595                if (strncmp((char *)(phys_to_virt(start) + offset), "RSD PTR ", sig_len))
 596                        continue;
 597                return (start + offset);
 598        }
 599
 600        return 0;
 601}
 602
 603static int __init acpi_parse_sbf(struct acpi_table_header *table)
 604{
 605        struct acpi_table_boot *sb;
 606
 607        sb = (struct acpi_table_boot *)table;
 608        if (!sb) {
 609                printk(KERN_WARNING PREFIX "Unable to map SBF\n");
 610                return -ENODEV;
 611        }
 612
 613        sbf_port = sb->cmos_index;      /* Save CMOS port */
 614
 615        return 0;
 616}
 617
 618#ifdef CONFIG_HPET_TIMER
 619#include <asm/hpet.h>
 620
 621static struct __initdata resource *hpet_res;
 622
 623static int __init acpi_parse_hpet(struct acpi_table_header *table)
 624{
 625        struct acpi_table_hpet *hpet_tbl;
 626
 627        hpet_tbl = (struct acpi_table_hpet *)table;
 628        if (!hpet_tbl) {
 629                printk(KERN_WARNING PREFIX "Unable to map HPET\n");
 630                return -ENODEV;
 631        }
 632
 633        if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
 634                printk(KERN_WARNING PREFIX "HPET timers must be located in "
 635                       "memory.\n");
 636                return -1;
 637        }
 638
 639        hpet_address = hpet_tbl->address.address;
 640
 641        /*
 642         * Some broken BIOSes advertise HPET at 0x0. We really do not
 643         * want to allocate a resource there.
 644         */
 645        if (!hpet_address) {
 646                printk(KERN_WARNING PREFIX
 647                       "HPET id: %#x base: %#lx is invalid\n",
 648                       hpet_tbl->id, hpet_address);
 649                return 0;
 650        }
 651#ifdef CONFIG_X86_64
 652        /*
 653         * Some even more broken BIOSes advertise HPET at
 654         * 0xfed0000000000000 instead of 0xfed00000. Fix it up and add
 655         * some noise:
 656         */
 657        if (hpet_address == 0xfed0000000000000UL) {
 658                if (!hpet_force_user) {
 659                        printk(KERN_WARNING PREFIX "HPET id: %#x "
 660                               "base: 0xfed0000000000000 is bogus\n "
 661                               "try hpet=force on the kernel command line to "
 662                               "fix it up to 0xfed00000.\n", hpet_tbl->id);
 663                        hpet_address = 0;
 664                        return 0;
 665                }
 666                printk(KERN_WARNING PREFIX
 667                       "HPET id: %#x base: 0xfed0000000000000 fixed up "
 668                       "to 0xfed00000.\n", hpet_tbl->id);
 669                hpet_address >>= 32;
 670        }
 671#endif
 672        printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
 673               hpet_tbl->id, hpet_address);
 674
 675        /*
 676         * Allocate and initialize the HPET firmware resource for adding into
 677         * the resource tree during the lateinit timeframe.
 678         */
 679#define HPET_RESOURCE_NAME_SIZE 9
 680        hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
 681
 682        if (!hpet_res)
 683                return 0;
 684
 685        memset(hpet_res, 0, sizeof(*hpet_res));
 686        hpet_res->name = (void *)&hpet_res[1];
 687        hpet_res->flags = IORESOURCE_MEM;
 688        snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u",
 689                 hpet_tbl->sequence);
 690
 691        hpet_res->start = hpet_address;
 692        hpet_res->end = hpet_address + (1 * 1024) - 1;
 693
 694        return 0;
 695}
 696
 697/*
 698 * hpet_insert_resource inserts the HPET resources used into the resource
 699 * tree.
 700 */
 701static __init int hpet_insert_resource(void)
 702{
 703        if (!hpet_res)
 704                return 1;
 705
 706        return insert_resource(&iomem_resource, hpet_res);
 707}
 708
 709late_initcall(hpet_insert_resource);
 710
 711#else
 712#define acpi_parse_hpet NULL
 713#endif
 714
 715static int __init acpi_parse_fadt(struct acpi_table_header *table)
 716{
 717
 718#ifdef CONFIG_X86_PM_TIMER
 719        /* detect the location of the ACPI PM Timer */
 720        if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
 721                /* FADT rev. 2 */
 722                if (acpi_gbl_FADT.xpm_timer_block.space_id !=
 723                    ACPI_ADR_SPACE_SYSTEM_IO)
 724                        return 0;
 725
 726                pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
 727                /*
 728                 * "X" fields are optional extensions to the original V1.0
 729                 * fields, so we must selectively expand V1.0 fields if the
 730                 * corresponding X field is zero.
 731                 */
 732                if (!pmtmr_ioport)
 733                        pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
 734        } else {
 735                /* FADT rev. 1 */
 736                pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
 737        }
 738        if (pmtmr_ioport)
 739                printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
 740                       pmtmr_ioport);
 741#endif
 742        return 0;
 743}
 744
 745unsigned long __init acpi_find_rsdp(void)
 746{
 747        unsigned long rsdp_phys = 0;
 748
 749        if (efi_enabled) {
 750                if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
 751                        return efi.acpi20;
 752                else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
 753                        return efi.acpi;
 754        }
 755        /*
 756         * Scan memory looking for the RSDP signature. First search EBDA (low
 757         * memory) paragraphs and then search upper memory (E0000-FFFFF).
 758         */
 759        rsdp_phys = acpi_scan_rsdp(0, 0x400);
 760        if (!rsdp_phys)
 761                rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000);
 762
 763        return rsdp_phys;
 764}
 765
 766#ifdef  CONFIG_X86_LOCAL_APIC
 767/*
 768 * Parse LAPIC entries in MADT
 769 * returns 0 on success, < 0 on error
 770 */
 771static int __init acpi_parse_madt_lapic_entries(void)
 772{
 773        int count;
 774
 775        if (!cpu_has_apic)
 776                return -ENODEV;
 777
 778        /*
 779         * Note that the LAPIC address is obtained from the MADT (32-bit value)
 780         * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
 781         */
 782
 783        count =
 784            acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
 785                                  acpi_parse_lapic_addr_ovr, 0);
 786        if (count < 0) {
 787                printk(KERN_ERR PREFIX
 788                       "Error parsing LAPIC address override entry\n");
 789                return count;
 790        }
 791
 792        mp_register_lapic_address(acpi_lapic_addr);
 793
 794        count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic,
 795                                      MAX_APICS);
 796        if (!count) {
 797                printk(KERN_ERR PREFIX "No LAPIC entries present\n");
 798                /* TBD: Cleanup to allow fallback to MPS */
 799                return -ENODEV;
 800        } else if (count < 0) {
 801                printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
 802                /* TBD: Cleanup to allow fallback to MPS */
 803                return count;
 804        }
 805
 806        count =
 807            acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0);
 808        if (count < 0) {
 809                printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
 810                /* TBD: Cleanup to allow fallback to MPS */
 811                return count;
 812        }
 813        return 0;
 814}
 815#endif                          /* CONFIG_X86_LOCAL_APIC */
 816
 817#ifdef  CONFIG_X86_IO_APIC
 818/*
 819 * Parse IOAPIC related entries in MADT
 820 * returns 0 on success, < 0 on error
 821 */
 822static int __init acpi_parse_madt_ioapic_entries(void)
 823{
 824        int count;
 825
 826        /*
 827         * ACPI interpreter is required to complete interrupt setup,
 828         * so if it is off, don't enumerate the io-apics with ACPI.
 829         * If MPS is present, it will handle them,
 830         * otherwise the system will stay in PIC mode
 831         */
 832        if (acpi_disabled || acpi_noirq) {
 833                return -ENODEV;
 834        }
 835
 836        if (!cpu_has_apic)
 837                return -ENODEV;
 838
 839        /*
 840         * if "noapic" boot option, don't look for IO-APICs
 841         */
 842        if (skip_ioapic_setup) {
 843                printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
 844                       "due to 'noapic' option.\n");
 845                return -ENODEV;
 846        }
 847
 848        count =
 849            acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
 850                                  MAX_IO_APICS);
 851        if (!count) {
 852                printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
 853                return -ENODEV;
 854        } else if (count < 0) {
 855                printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
 856                return count;
 857        }
 858
 859        count =
 860            acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
 861                                  NR_IRQ_VECTORS);
 862        if (count < 0) {
 863                printk(KERN_ERR PREFIX
 864                       "Error parsing interrupt source overrides entry\n");
 865                /* TBD: Cleanup to allow fallback to MPS */
 866                return count;
 867        }
 868
 869        /*
 870         * If BIOS did not supply an INT_SRC_OVR for the SCI
 871         * pretend we got one so we can set the SCI flags.
 872         */
 873        if (!acpi_sci_override_gsi)
 874                acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
 875
 876        /* Fill in identity legacy mapings where no override */
 877        mp_config_acpi_legacy_irqs();
 878
 879        count =
 880            acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
 881                                  NR_IRQ_VECTORS);
 882        if (count < 0) {
 883                printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
 884                /* TBD: Cleanup to allow fallback to MPS */
 885                return count;
 886        }
 887
 888        return 0;
 889}
 890#else
 891static inline int acpi_parse_madt_ioapic_entries(void)
 892{
 893        return -1;
 894}
 895#endif  /* !CONFIG_X86_IO_APIC */
 896
 897static void __init acpi_process_madt(void)
 898{
 899#ifdef CONFIG_X86_LOCAL_APIC
 900        int error;
 901
 902        if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
 903
 904                /*
 905                 * Parse MADT LAPIC entries
 906                 */
 907                error = acpi_parse_madt_lapic_entries();
 908                if (!error) {
 909                        acpi_lapic = 1;
 910
 911#ifdef CONFIG_X86_GENERICARCH
 912                        generic_bigsmp_probe();
 913#endif
 914                        /*
 915                         * Parse MADT IO-APIC entries
 916                         */
 917                        error = acpi_parse_madt_ioapic_entries();
 918                        if (!error) {
 919                                acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
 920                                acpi_irq_balance_set(NULL);
 921                                acpi_ioapic = 1;
 922
 923                                smp_found_config = 1;
 924                                setup_apic_routing();
 925                        }
 926                }
 927                if (error == -EINVAL) {
 928                        /*
 929                         * Dell Precision Workstation 410, 610 come here.
 930                         */
 931                        printk(KERN_ERR PREFIX
 932                               "Invalid BIOS MADT, disabling ACPI\n");
 933                        disable_acpi();
 934                }
 935        }
 936#endif
 937        return;
 938}
 939
 940#ifdef __i386__
 941
 942static int __init disable_acpi_irq(const struct dmi_system_id *d)
 943{
 944        if (!acpi_force) {
 945                printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
 946                       d->ident);
 947                acpi_noirq_set();
 948        }
 949        return 0;
 950}
 951
 952static int __init disable_acpi_pci(const struct dmi_system_id *d)
 953{
 954        if (!acpi_force) {
 955                printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
 956                       d->ident);
 957                acpi_disable_pci();
 958        }
 959        return 0;
 960}
 961
 962static int __init dmi_disable_acpi(const struct dmi_system_id *d)
 963{
 964        if (!acpi_force) {
 965                printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
 966                disable_acpi();
 967        } else {
 968                printk(KERN_NOTICE
 969                       "Warning: DMI blacklist says broken, but acpi forced\n");
 970        }
 971        return 0;
 972}
 973
 974/*
 975 * Limit ACPI to CPU enumeration for HT
 976 */
 977static int __init force_acpi_ht(const struct dmi_system_id *d)
 978{
 979        if (!acpi_force) {
 980                printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
 981                       d->ident);
 982                disable_acpi();
 983                acpi_ht = 1;
 984        } else {
 985                printk(KERN_NOTICE
 986                       "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
 987        }
 988        return 0;
 989}
 990
 991/*
 992 * If your system is blacklisted here, but you find that acpi=force
 993 * works for you, please contact acpi-devel@sourceforge.net
 994 */
 995static struct dmi_system_id __initdata acpi_dmi_table[] = {
 996        /*
 997         * Boxes that need ACPI disabled
 998         */
 999        {
1000         .callback = dmi_disable_acpi,
1001         .ident = "IBM Thinkpad",
1002         .matches = {
1003                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1004                     DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
1005                     },
1006         },
1007
1008        /*
1009         * Boxes that need acpi=ht
1010         */
1011        {
1012         .callback = force_acpi_ht,
1013         .ident = "FSC Primergy T850",
1014         .matches = {
1015                     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1016                     DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
1017                     },
1018         },
1019        {
1020         .callback = force_acpi_ht,
1021         .ident = "HP VISUALIZE NT Workstation",
1022         .matches = {
1023                     DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
1024                     DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
1025                     },
1026         },
1027        {
1028         .callback = force_acpi_ht,
1029         .ident = "Compaq Workstation W8000",
1030         .matches = {
1031                     DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
1032                     DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
1033                     },
1034         },
1035        {
1036         .callback = force_acpi_ht,
1037         .ident = "ASUS P4B266",
1038         .matches = {
1039                     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1040                     DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
1041                     },
1042         },
1043        {
1044         .callback = force_acpi_ht,
1045         .ident = "ASUS P2B-DS",
1046         .matches = {
1047                     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1048                     DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
1049                     },
1050         },
1051        {
1052         .callback = force_acpi_ht,
1053         .ident = "ASUS CUR-DLS",
1054         .matches = {
1055                     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1056                     DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
1057                     },
1058         },
1059        {
1060         .callback = force_acpi_ht,
1061         .ident = "ABIT i440BX-W83977",
1062         .matches = {
1063                     DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
1064                     DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
1065                     },
1066         },
1067        {
1068         .callback = force_acpi_ht,
1069         .ident = "IBM Bladecenter",
1070         .matches = {
1071                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1072                     DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
1073                     },
1074         },
1075        {
1076         .callback = force_acpi_ht,
1077         .ident = "IBM eServer xSeries 360",
1078         .matches = {
1079                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1080                     DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
1081                     },
1082         },
1083        {
1084         .callback = force_acpi_ht,
1085         .ident = "IBM eserver xSeries 330",
1086         .matches = {
1087                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1088                     DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
1089                     },
1090         },
1091        {
1092         .callback = force_acpi_ht,
1093         .ident = "IBM eserver xSeries 440",
1094         .matches = {
1095                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1096                     DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
1097                     },
1098         },
1099
1100        /*
1101         * Boxes that need ACPI PCI IRQ routing disabled
1102         */
1103        {
1104         .callback = disable_acpi_irq,
1105         .ident = "ASUS A7V",
1106         .matches = {
1107                     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
1108                     DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
1109                     /* newer BIOS, Revision 1011, does work */
1110                     DMI_MATCH(DMI_BIOS_VERSION,
1111                               "ASUS A7V ACPI BIOS Revision 1007"),
1112                     },
1113         },
1114        {
1115                /*
1116                 * Latest BIOS for IBM 600E (1.16) has bad pcinum
1117                 * for LPC bridge, which is needed for the PCI
1118                 * interrupt links to work. DSDT fix is in bug 5966.
1119                 * 2645, 2646 model numbers are shared with 600/600E/600X
1120                 */
1121         .callback = disable_acpi_irq,
1122         .ident = "IBM Thinkpad 600 Series 2645",
1123         .matches = {
1124                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1125                     DMI_MATCH(DMI_BOARD_NAME, "2645"),
1126                     },
1127         },
1128        {
1129         .callback = disable_acpi_irq,
1130         .ident = "IBM Thinkpad 600 Series 2646",
1131         .matches = {
1132                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1133                     DMI_MATCH(DMI_BOARD_NAME, "2646"),
1134                     },
1135         },
1136        /*
1137         * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
1138         */
1139        {                       /* _BBN 0 bug */
1140         .callback = disable_acpi_pci,
1141         .ident = "ASUS PR-DLS",
1142         .matches = {
1143                     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1144                     DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
1145                     DMI_MATCH(DMI_BIOS_VERSION,
1146                               "ASUS PR-DLS ACPI BIOS Revision 1010"),
1147                     DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
1148                     },
1149         },
1150        {
1151         .callback = disable_acpi_pci,
1152         .ident = "Acer TravelMate 36x Laptop",
1153         .matches = {
1154                     DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1155                     DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
1156                     },
1157         },
1158        {}
1159};
1160
1161#endif                          /* __i386__ */
1162
1163/*
1164 * acpi_boot_table_init() and acpi_boot_init()
1165 *  called from setup_arch(), always.
1166 *      1. checksums all tables
1167 *      2. enumerates lapics
1168 *      3. enumerates io-apics
1169 *
1170 * acpi_table_init() is separate to allow reading SRAT without
1171 * other side effects.
1172 *
1173 * side effects of acpi_boot_init:
1174 *      acpi_lapic = 1 if LAPIC found
1175 *      acpi_ioapic = 1 if IOAPIC found
1176 *      if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
1177 *      if acpi_blacklisted() acpi_disabled = 1;
1178 *      acpi_irq_model=...
1179 *      ...
1180 *
1181 * return value: (currently ignored)
1182 *      0: success
1183 *      !0: failure
1184 */
1185
1186int __init acpi_boot_table_init(void)
1187{
1188        int error;
1189
1190#ifdef __i386__
1191        dmi_check_system(acpi_dmi_table);
1192#endif
1193
1194        /*
1195         * If acpi_disabled, bail out
1196         * One exception: acpi=ht continues far enough to enumerate LAPICs
1197         */
1198        if (acpi_disabled && !acpi_ht)
1199                return 1;
1200
1201        /*
1202         * Initialize the ACPI boot-time table parser.
1203         */
1204        error = acpi_table_init();
1205        if (error) {
1206                disable_acpi();
1207                return error;
1208        }
1209
1210        acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1211
1212        /*
1213         * blacklist may disable ACPI entirely
1214         */
1215        error = acpi_blacklisted();
1216        if (error) {
1217                if (acpi_force) {
1218                        printk(KERN_WARNING PREFIX "acpi=force override\n");
1219                } else {
1220                        printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
1221                        disable_acpi();
1222                        return error;
1223                }
1224        }
1225
1226        return 0;
1227}
1228
1229int __init acpi_boot_init(void)
1230{
1231        /*
1232         * If acpi_disabled, bail out
1233         * One exception: acpi=ht continues far enough to enumerate LAPICs
1234         */
1235        if (acpi_disabled && !acpi_ht)
1236                return 1;
1237
1238        acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1239
1240        /*
1241         * set sci_int and PM timer address
1242         */
1243        acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
1244
1245        /*
1246         * Process the Multiple APIC Description Table (MADT), if present
1247         */
1248        acpi_process_madt();
1249
1250        acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
1251
1252        return 0;
1253}
1254
1255static int __init parse_acpi(char *arg)
1256{
1257        if (!arg)
1258                return -EINVAL;
1259
1260        /* "acpi=off" disables both ACPI table parsing and interpreter */
1261        if (strcmp(arg, "off") == 0) {
1262                disable_acpi();
1263        }
1264        /* acpi=force to over-ride black-list */
1265        else if (strcmp(arg, "force") == 0) {
1266                acpi_force = 1;
1267                acpi_ht = 1;
1268                acpi_disabled = 0;
1269        }
1270        /* acpi=strict disables out-of-spec workarounds */
1271        else if (strcmp(arg, "strict") == 0) {
1272                acpi_strict = 1;
1273        }
1274        /* Limit ACPI just to boot-time to enable HT */
1275        else if (strcmp(arg, "ht") == 0) {
1276                if (!acpi_force)
1277                        disable_acpi();
1278                acpi_ht = 1;
1279        }
1280        /* "acpi=noirq" disables ACPI interrupt routing */
1281        else if (strcmp(arg, "noirq") == 0) {
1282                acpi_noirq_set();
1283        } else {
1284                /* Core will printk when we return error. */
1285                return -EINVAL;
1286        }
1287        return 0;
1288}
1289early_param("acpi", parse_acpi);
1290
1291/* FIXME: Using pci= for an ACPI parameter is a travesty. */
1292static int __init parse_pci(char *arg)
1293{
1294        if (arg && strcmp(arg, "noacpi") == 0)
1295                acpi_disable_pci();
1296        return 0;
1297}
1298early_param("pci", parse_pci);
1299
1300#ifdef CONFIG_X86_IO_APIC
1301static int __init parse_acpi_skip_timer_override(char *arg)
1302{
1303        acpi_skip_timer_override = 1;
1304        return 0;
1305}
1306early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
1307
1308static int __init parse_acpi_use_timer_override(char *arg)
1309{
1310        acpi_use_timer_override = 1;
1311        return 0;
1312}
1313early_param("acpi_use_timer_override", parse_acpi_use_timer_override);
1314#endif /* CONFIG_X86_IO_APIC */
1315
1316static int __init setup_acpi_sci(char *s)
1317{
1318        if (!s)
1319                return -EINVAL;
1320        if (!strcmp(s, "edge"))
1321                acpi_sci_flags =  ACPI_MADT_TRIGGER_EDGE |
1322                        (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1323        else if (!strcmp(s, "level"))
1324                acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
1325                        (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1326        else if (!strcmp(s, "high"))
1327                acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
1328                        (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1329        else if (!strcmp(s, "low"))
1330                acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
1331                        (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1332        else
1333                return -EINVAL;
1334        return 0;
1335}
1336early_param("acpi_sci", setup_acpi_sci);
1337
1338int __acpi_acquire_global_lock(unsigned int *lock)
1339{
1340        unsigned int old, new, val;
1341        do {
1342                old = *lock;
1343                new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
1344                val = cmpxchg(lock, old, new);
1345        } while (unlikely (val != old));
1346        return (new < 3) ? -1 : 0;
1347}
1348
1349int __acpi_release_global_lock(unsigned int *lock)
1350{
1351        unsigned int old, new, val;
1352        do {
1353                old = *lock;
1354                new = old & ~0x3;
1355                val = cmpxchg(lock, old, new);
1356        } while (unlikely (val != old));
1357        return old & 0x1;
1358}
1359