linux/arch/x86/kernel/acpi/boot.c
<<
>>
Prefs
   1/*
   2 *  boot.c - Architecture-Specific Low-Level ACPI Boot Support
   3 *
   4 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
   5 *  Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
   6 *
   7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   8 *
   9 *  This program is free software; you can redistribute it and/or modify
  10 *  it under the terms of the GNU General Public License as published by
  11 *  the Free Software Foundation; either version 2 of the License, or
  12 *  (at your option) any later version.
  13 *
  14 *  This program is distributed in the hope that it will be useful,
  15 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 *  GNU General Public License for more details.
  18 *
  19 *  You should have received a copy of the GNU General Public License
  20 *  along with this program; if not, write to the Free Software
  21 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  22 *
  23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  24 */
  25
  26#include <linux/init.h>
  27#include <linux/acpi.h>
  28#include <linux/acpi_pmtmr.h>
  29#include <linux/efi.h>
  30#include <linux/cpumask.h>
  31#include <linux/module.h>
  32#include <linux/dmi.h>
  33#include <linux/irq.h>
  34#include <linux/bootmem.h>
  35#include <linux/ioport.h>
  36#include <linux/pci.h>
  37
  38#include <asm/pgtable.h>
  39#include <asm/io_apic.h>
  40#include <asm/apic.h>
  41#include <asm/io.h>
  42#include <asm/mpspec.h>
  43#include <asm/smp.h>
  44
  45static int __initdata acpi_force = 0;
  46u32 acpi_rsdt_forced;
  47int acpi_disabled;
  48EXPORT_SYMBOL(acpi_disabled);
  49
  50#ifdef  CONFIG_X86_64
  51# include <asm/proto.h>
  52#endif                          /* X86 */
  53
  54#define BAD_MADT_ENTRY(entry, end) (                                        \
  55                (!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
  56                ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
  57
  58#define PREFIX                  "ACPI: "
  59
  60int acpi_noirq;                         /* skip ACPI IRQ initialization */
  61int acpi_pci_disabled;          /* skip ACPI PCI scan and IRQ initialization */
  62EXPORT_SYMBOL(acpi_pci_disabled);
  63int acpi_ht __initdata = 1;     /* enable HT */
  64
  65int acpi_lapic;
  66int acpi_ioapic;
  67int acpi_strict;
  68
  69u8 acpi_sci_flags __initdata;
  70int acpi_sci_override_gsi __initdata;
  71int acpi_skip_timer_override __initdata;
  72int acpi_use_timer_override __initdata;
  73
  74#ifdef CONFIG_X86_LOCAL_APIC
  75static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
  76#endif
  77
  78#ifndef __HAVE_ARCH_CMPXCHG
  79#warning ACPI uses CMPXCHG, i486 and later hardware
  80#endif
  81
  82/* --------------------------------------------------------------------------
  83                              Boot-time Configuration
  84   -------------------------------------------------------------------------- */
  85
  86/*
  87 * The default interrupt routing model is PIC (8259).  This gets
  88 * overridden if IOAPICs are enumerated (below).
  89 */
  90enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
  91
  92
  93/*
  94 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
  95 * to map the target physical address. The problem is that set_fixmap()
  96 * provides a single page, and it is possible that the page is not
  97 * sufficient.
  98 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
  99 * i.e. until the next __va_range() call.
 100 *
 101 * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
 102 * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
 103 * count idx down while incrementing the phys address.
 104 */
 105char *__init __acpi_map_table(unsigned long phys, unsigned long size)
 106{
 107
 108        if (!phys || !size)
 109                return NULL;
 110
 111        return early_ioremap(phys, size);
 112}
 113void __init __acpi_unmap_table(char *map, unsigned long size)
 114{
 115        if (!map || !size)
 116                return;
 117
 118        early_iounmap(map, size);
 119}
 120
 121#ifdef CONFIG_X86_LOCAL_APIC
 122static int __init acpi_parse_madt(struct acpi_table_header *table)
 123{
 124        struct acpi_table_madt *madt = NULL;
 125
 126        if (!cpu_has_apic)
 127                return -EINVAL;
 128
 129        madt = (struct acpi_table_madt *)table;
 130        if (!madt) {
 131                printk(KERN_WARNING PREFIX "Unable to map MADT\n");
 132                return -ENODEV;
 133        }
 134
 135        if (madt->address) {
 136                acpi_lapic_addr = (u64) madt->address;
 137
 138                printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
 139                       madt->address);
 140        }
 141
 142        default_acpi_madt_oem_check(madt->header.oem_id,
 143                                    madt->header.oem_table_id);
 144
 145        return 0;
 146}
 147
 148static void __cpuinit acpi_register_lapic(int id, u8 enabled)
 149{
 150        unsigned int ver = 0;
 151
 152        if (!enabled) {
 153                ++disabled_cpus;
 154                return;
 155        }
 156
 157        if (boot_cpu_physical_apicid != -1U)
 158                ver = apic_version[boot_cpu_physical_apicid];
 159
 160        generic_processor_info(id, ver);
 161}
 162
 163static int __init
 164acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
 165{
 166        struct acpi_madt_local_x2apic *processor = NULL;
 167
 168        processor = (struct acpi_madt_local_x2apic *)header;
 169
 170        if (BAD_MADT_ENTRY(processor, end))
 171                return -EINVAL;
 172
 173        acpi_table_print_madt_entry(header);
 174
 175#ifdef CONFIG_X86_X2APIC
 176        /*
 177         * We need to register disabled CPU as well to permit
 178         * counting disabled CPUs. This allows us to size
 179         * cpus_possible_map more accurately, to permit
 180         * to not preallocating memory for all NR_CPUS
 181         * when we use CPU hotplug.
 182         */
 183        acpi_register_lapic(processor->local_apic_id,   /* APIC ID */
 184                            processor->lapic_flags & ACPI_MADT_ENABLED);
 185#else
 186        printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
 187#endif
 188
 189        return 0;
 190}
 191
 192static int __init
 193acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
 194{
 195        struct acpi_madt_local_apic *processor = NULL;
 196
 197        processor = (struct acpi_madt_local_apic *)header;
 198
 199        if (BAD_MADT_ENTRY(processor, end))
 200                return -EINVAL;
 201
 202        acpi_table_print_madt_entry(header);
 203
 204        /*
 205         * We need to register disabled CPU as well to permit
 206         * counting disabled CPUs. This allows us to size
 207         * cpus_possible_map more accurately, to permit
 208         * to not preallocating memory for all NR_CPUS
 209         * when we use CPU hotplug.
 210         */
 211        acpi_register_lapic(processor->id,      /* APIC ID */
 212                            processor->lapic_flags & ACPI_MADT_ENABLED);
 213
 214        return 0;
 215}
 216
 217static int __init
 218acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end)
 219{
 220        struct acpi_madt_local_sapic *processor = NULL;
 221
 222        processor = (struct acpi_madt_local_sapic *)header;
 223
 224        if (BAD_MADT_ENTRY(processor, end))
 225                return -EINVAL;
 226
 227        acpi_table_print_madt_entry(header);
 228
 229        acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */
 230                            processor->lapic_flags & ACPI_MADT_ENABLED);
 231
 232        return 0;
 233}
 234
 235static int __init
 236acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
 237                          const unsigned long end)
 238{
 239        struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
 240
 241        lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
 242
 243        if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
 244                return -EINVAL;
 245
 246        acpi_lapic_addr = lapic_addr_ovr->address;
 247
 248        return 0;
 249}
 250
 251static int __init
 252acpi_parse_x2apic_nmi(struct acpi_subtable_header *header,
 253                      const unsigned long end)
 254{
 255        struct acpi_madt_local_x2apic_nmi *x2apic_nmi = NULL;
 256
 257        x2apic_nmi = (struct acpi_madt_local_x2apic_nmi *)header;
 258
 259        if (BAD_MADT_ENTRY(x2apic_nmi, end))
 260                return -EINVAL;
 261
 262        acpi_table_print_madt_entry(header);
 263
 264        if (x2apic_nmi->lint != 1)
 265                printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
 266
 267        return 0;
 268}
 269
 270static int __init
 271acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
 272{
 273        struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
 274
 275        lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
 276
 277        if (BAD_MADT_ENTRY(lapic_nmi, end))
 278                return -EINVAL;
 279
 280        acpi_table_print_madt_entry(header);
 281
 282        if (lapic_nmi->lint != 1)
 283                printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
 284
 285        return 0;
 286}
 287
 288#endif                          /*CONFIG_X86_LOCAL_APIC */
 289
 290#ifdef CONFIG_X86_IO_APIC
 291
 292static int __init
 293acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
 294{
 295        struct acpi_madt_io_apic *ioapic = NULL;
 296
 297        ioapic = (struct acpi_madt_io_apic *)header;
 298
 299        if (BAD_MADT_ENTRY(ioapic, end))
 300                return -EINVAL;
 301
 302        acpi_table_print_madt_entry(header);
 303
 304        mp_register_ioapic(ioapic->id,
 305                           ioapic->address, ioapic->global_irq_base);
 306
 307        return 0;
 308}
 309
 310/*
 311 * Parse Interrupt Source Override for the ACPI SCI
 312 */
 313static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
 314{
 315        if (trigger == 0)       /* compatible SCI trigger is level */
 316                trigger = 3;
 317
 318        if (polarity == 0)      /* compatible SCI polarity is low */
 319                polarity = 3;
 320
 321        /* Command-line over-ride via acpi_sci= */
 322        if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
 323                trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
 324
 325        if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
 326                polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
 327
 328        /*
 329         * mp_config_acpi_legacy_irqs() already setup IRQs < 16
 330         * If GSI is < 16, this will update its flags,
 331         * else it will create a new mp_irqs[] entry.
 332         */
 333        mp_override_legacy_irq(gsi, polarity, trigger, gsi);
 334
 335        /*
 336         * stash over-ride to indicate we've been here
 337         * and for later update of acpi_gbl_FADT
 338         */
 339        acpi_sci_override_gsi = gsi;
 340        return;
 341}
 342
 343static int __init
 344acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
 345                       const unsigned long end)
 346{
 347        struct acpi_madt_interrupt_override *intsrc = NULL;
 348
 349        intsrc = (struct acpi_madt_interrupt_override *)header;
 350
 351        if (BAD_MADT_ENTRY(intsrc, end))
 352                return -EINVAL;
 353
 354        acpi_table_print_madt_entry(header);
 355
 356        if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
 357                acpi_sci_ioapic_setup(intsrc->global_irq,
 358                                      intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
 359                                      (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
 360                return 0;
 361        }
 362
 363        if (acpi_skip_timer_override &&
 364            intsrc->source_irq == 0 && intsrc->global_irq == 2) {
 365                printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
 366                return 0;
 367        }
 368
 369        mp_override_legacy_irq(intsrc->source_irq,
 370                                intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
 371                                (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
 372                                intsrc->global_irq);
 373
 374        return 0;
 375}
 376
 377static int __init
 378acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
 379{
 380        struct acpi_madt_nmi_source *nmi_src = NULL;
 381
 382        nmi_src = (struct acpi_madt_nmi_source *)header;
 383
 384        if (BAD_MADT_ENTRY(nmi_src, end))
 385                return -EINVAL;
 386
 387        acpi_table_print_madt_entry(header);
 388
 389        /* TBD: Support nimsrc entries? */
 390
 391        return 0;
 392}
 393
 394#endif                          /* CONFIG_X86_IO_APIC */
 395
 396/*
 397 * acpi_pic_sci_set_trigger()
 398 *
 399 * use ELCR to set PIC-mode trigger type for SCI
 400 *
 401 * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
 402 * it may require Edge Trigger -- use "acpi_sci=edge"
 403 *
 404 * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
 405 * for the 8259 PIC.  bit[n] = 1 means irq[n] is Level, otherwise Edge.
 406 * ECLR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0)
 407 * ECLR2 is IRQs 8-15 (IRQ 8, 13 must be 0)
 408 */
 409
 410void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
 411{
 412        unsigned int mask = 1 << irq;
 413        unsigned int old, new;
 414
 415        /* Real old ELCR mask */
 416        old = inb(0x4d0) | (inb(0x4d1) << 8);
 417
 418        /*
 419         * If we use ACPI to set PCI IRQs, then we should clear ELCR
 420         * since we will set it correctly as we enable the PCI irq
 421         * routing.
 422         */
 423        new = acpi_noirq ? old : 0;
 424
 425        /*
 426         * Update SCI information in the ELCR, it isn't in the PCI
 427         * routing tables..
 428         */
 429        switch (trigger) {
 430        case 1:         /* Edge - clear */
 431                new &= ~mask;
 432                break;
 433        case 3:         /* Level - set */
 434                new |= mask;
 435                break;
 436        }
 437
 438        if (old == new)
 439                return;
 440
 441        printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
 442        outb(new, 0x4d0);
 443        outb(new >> 8, 0x4d1);
 444}
 445
 446int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
 447{
 448        *irq = gsi;
 449        return 0;
 450}
 451
 452/*
 453 * success: return IRQ number (>=0)
 454 * failure: return < 0
 455 */
 456int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
 457{
 458        unsigned int irq;
 459        unsigned int plat_gsi = gsi;
 460
 461#ifdef CONFIG_PCI
 462        /*
 463         * Make sure all (legacy) PCI IRQs are set as level-triggered.
 464         */
 465        if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
 466                if (trigger == ACPI_LEVEL_SENSITIVE)
 467                        eisa_set_level_irq(gsi);
 468        }
 469#endif
 470
 471#ifdef CONFIG_X86_IO_APIC
 472        if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
 473                plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity);
 474        }
 475#endif
 476        acpi_gsi_to_irq(plat_gsi, &irq);
 477        return irq;
 478}
 479
 480/*
 481 *  ACPI based hotplug support for CPU
 482 */
 483#ifdef CONFIG_ACPI_HOTPLUG_CPU
 484
 485static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
 486{
 487        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 488        union acpi_object *obj;
 489        struct acpi_madt_local_apic *lapic;
 490        cpumask_var_t tmp_map, new_map;
 491        u8 physid;
 492        int cpu;
 493        int retval = -ENOMEM;
 494
 495        if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
 496                return -EINVAL;
 497
 498        if (!buffer.length || !buffer.pointer)
 499                return -EINVAL;
 500
 501        obj = buffer.pointer;
 502        if (obj->type != ACPI_TYPE_BUFFER ||
 503            obj->buffer.length < sizeof(*lapic)) {
 504                kfree(buffer.pointer);
 505                return -EINVAL;
 506        }
 507
 508        lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
 509
 510        if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
 511            !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
 512                kfree(buffer.pointer);
 513                return -EINVAL;
 514        }
 515
 516        physid = lapic->id;
 517
 518        kfree(buffer.pointer);
 519        buffer.length = ACPI_ALLOCATE_BUFFER;
 520        buffer.pointer = NULL;
 521
 522        if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
 523                goto out;
 524
 525        if (!alloc_cpumask_var(&new_map, GFP_KERNEL))
 526                goto free_tmp_map;
 527
 528        cpumask_copy(tmp_map, cpu_present_mask);
 529        acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
 530
 531        /*
 532         * If mp_register_lapic successfully generates a new logical cpu
 533         * number, then the following will get us exactly what was mapped
 534         */
 535        cpumask_andnot(new_map, cpu_present_mask, tmp_map);
 536        if (cpumask_empty(new_map)) {
 537                printk ("Unable to map lapic to logical cpu number\n");
 538                retval = -EINVAL;
 539                goto free_new_map;
 540        }
 541
 542        cpu = cpumask_first(new_map);
 543
 544        *pcpu = cpu;
 545        retval = 0;
 546
 547free_new_map:
 548        free_cpumask_var(new_map);
 549free_tmp_map:
 550        free_cpumask_var(tmp_map);
 551out:
 552        return retval;
 553}
 554
 555/* wrapper to silence section mismatch warning */
 556int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu)
 557{
 558        return _acpi_map_lsapic(handle, pcpu);
 559}
 560EXPORT_SYMBOL(acpi_map_lsapic);
 561
 562int acpi_unmap_lsapic(int cpu)
 563{
 564        per_cpu(x86_cpu_to_apicid, cpu) = -1;
 565        set_cpu_present(cpu, false);
 566        num_processors--;
 567
 568        return (0);
 569}
 570
 571EXPORT_SYMBOL(acpi_unmap_lsapic);
 572#endif                          /* CONFIG_ACPI_HOTPLUG_CPU */
 573
 574int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
 575{
 576        /* TBD */
 577        return -EINVAL;
 578}
 579
 580EXPORT_SYMBOL(acpi_register_ioapic);
 581
 582int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
 583{
 584        /* TBD */
 585        return -EINVAL;
 586}
 587
 588EXPORT_SYMBOL(acpi_unregister_ioapic);
 589
 590static int __init acpi_parse_sbf(struct acpi_table_header *table)
 591{
 592        struct acpi_table_boot *sb;
 593
 594        sb = (struct acpi_table_boot *)table;
 595        if (!sb) {
 596                printk(KERN_WARNING PREFIX "Unable to map SBF\n");
 597                return -ENODEV;
 598        }
 599
 600        sbf_port = sb->cmos_index;      /* Save CMOS port */
 601
 602        return 0;
 603}
 604
 605#ifdef CONFIG_HPET_TIMER
 606#include <asm/hpet.h>
 607
 608static struct __initdata resource *hpet_res;
 609
 610static int __init acpi_parse_hpet(struct acpi_table_header *table)
 611{
 612        struct acpi_table_hpet *hpet_tbl;
 613
 614        hpet_tbl = (struct acpi_table_hpet *)table;
 615        if (!hpet_tbl) {
 616                printk(KERN_WARNING PREFIX "Unable to map HPET\n");
 617                return -ENODEV;
 618        }
 619
 620        if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
 621                printk(KERN_WARNING PREFIX "HPET timers must be located in "
 622                       "memory.\n");
 623                return -1;
 624        }
 625
 626        hpet_address = hpet_tbl->address.address;
 627
 628        /*
 629         * Some broken BIOSes advertise HPET at 0x0. We really do not
 630         * want to allocate a resource there.
 631         */
 632        if (!hpet_address) {
 633                printk(KERN_WARNING PREFIX
 634                       "HPET id: %#x base: %#lx is invalid\n",
 635                       hpet_tbl->id, hpet_address);
 636                return 0;
 637        }
 638#ifdef CONFIG_X86_64
 639        /*
 640         * Some even more broken BIOSes advertise HPET at
 641         * 0xfed0000000000000 instead of 0xfed00000. Fix it up and add
 642         * some noise:
 643         */
 644        if (hpet_address == 0xfed0000000000000UL) {
 645                if (!hpet_force_user) {
 646                        printk(KERN_WARNING PREFIX "HPET id: %#x "
 647                               "base: 0xfed0000000000000 is bogus\n "
 648                               "try hpet=force on the kernel command line to "
 649                               "fix it up to 0xfed00000.\n", hpet_tbl->id);
 650                        hpet_address = 0;
 651                        return 0;
 652                }
 653                printk(KERN_WARNING PREFIX
 654                       "HPET id: %#x base: 0xfed0000000000000 fixed up "
 655                       "to 0xfed00000.\n", hpet_tbl->id);
 656                hpet_address >>= 32;
 657        }
 658#endif
 659        printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
 660               hpet_tbl->id, hpet_address);
 661
 662        /*
 663         * Allocate and initialize the HPET firmware resource for adding into
 664         * the resource tree during the lateinit timeframe.
 665         */
 666#define HPET_RESOURCE_NAME_SIZE 9
 667        hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
 668
 669        hpet_res->name = (void *)&hpet_res[1];
 670        hpet_res->flags = IORESOURCE_MEM;
 671        snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u",
 672                 hpet_tbl->sequence);
 673
 674        hpet_res->start = hpet_address;
 675        hpet_res->end = hpet_address + (1 * 1024) - 1;
 676
 677        return 0;
 678}
 679
 680/*
 681 * hpet_insert_resource inserts the HPET resources used into the resource
 682 * tree.
 683 */
 684static __init int hpet_insert_resource(void)
 685{
 686        if (!hpet_res)
 687                return 1;
 688
 689        return insert_resource(&iomem_resource, hpet_res);
 690}
 691
 692late_initcall(hpet_insert_resource);
 693
 694#else
 695#define acpi_parse_hpet NULL
 696#endif
 697
 698static int __init acpi_parse_fadt(struct acpi_table_header *table)
 699{
 700
 701#ifdef CONFIG_X86_PM_TIMER
 702        /* detect the location of the ACPI PM Timer */
 703        if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
 704                /* FADT rev. 2 */
 705                if (acpi_gbl_FADT.xpm_timer_block.space_id !=
 706                    ACPI_ADR_SPACE_SYSTEM_IO)
 707                        return 0;
 708
 709                pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
 710                /*
 711                 * "X" fields are optional extensions to the original V1.0
 712                 * fields, so we must selectively expand V1.0 fields if the
 713                 * corresponding X field is zero.
 714                 */
 715                if (!pmtmr_ioport)
 716                        pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
 717        } else {
 718                /* FADT rev. 1 */
 719                pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
 720        }
 721        if (pmtmr_ioport)
 722                printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
 723                       pmtmr_ioport);
 724#endif
 725        return 0;
 726}
 727
 728#ifdef  CONFIG_X86_LOCAL_APIC
 729/*
 730 * Parse LAPIC entries in MADT
 731 * returns 0 on success, < 0 on error
 732 */
 733
 734static void __init acpi_register_lapic_address(unsigned long address)
 735{
 736        mp_lapic_addr = address;
 737
 738        set_fixmap_nocache(FIX_APIC_BASE, address);
 739        if (boot_cpu_physical_apicid == -1U) {
 740                boot_cpu_physical_apicid  = read_apic_id();
 741                apic_version[boot_cpu_physical_apicid] =
 742                         GET_APIC_VERSION(apic_read(APIC_LVR));
 743        }
 744}
 745
 746static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
 747{
 748        int count;
 749
 750        if (!cpu_has_apic)
 751                return -ENODEV;
 752
 753        /*
 754         * Note that the LAPIC address is obtained from the MADT (32-bit value)
 755         * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
 756         */
 757
 758        count =
 759            acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
 760                                  acpi_parse_lapic_addr_ovr, 0);
 761        if (count < 0) {
 762                printk(KERN_ERR PREFIX
 763                       "Error parsing LAPIC address override entry\n");
 764                return count;
 765        }
 766
 767        acpi_register_lapic_address(acpi_lapic_addr);
 768
 769        return count;
 770}
 771
 772static int __init acpi_parse_madt_lapic_entries(void)
 773{
 774        int count;
 775        int x2count = 0;
 776
 777        if (!cpu_has_apic)
 778                return -ENODEV;
 779
 780        /*
 781         * Note that the LAPIC address is obtained from the MADT (32-bit value)
 782         * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
 783         */
 784
 785        count =
 786            acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
 787                                  acpi_parse_lapic_addr_ovr, 0);
 788        if (count < 0) {
 789                printk(KERN_ERR PREFIX
 790                       "Error parsing LAPIC address override entry\n");
 791                return count;
 792        }
 793
 794        acpi_register_lapic_address(acpi_lapic_addr);
 795
 796        count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
 797                                      acpi_parse_sapic, MAX_APICS);
 798
 799        if (!count) {
 800                x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
 801                                                acpi_parse_x2apic, MAX_APICS);
 802                count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
 803                                              acpi_parse_lapic, MAX_APICS);
 804        }
 805        if (!count && !x2count) {
 806                printk(KERN_ERR PREFIX "No LAPIC entries present\n");
 807                /* TBD: Cleanup to allow fallback to MPS */
 808                return -ENODEV;
 809        } else if (count < 0 || x2count < 0) {
 810                printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
 811                /* TBD: Cleanup to allow fallback to MPS */
 812                return count;
 813        }
 814
 815        x2count =
 816            acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI,
 817                                  acpi_parse_x2apic_nmi, 0);
 818        count =
 819            acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0);
 820        if (count < 0 || x2count < 0) {
 821                printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
 822                /* TBD: Cleanup to allow fallback to MPS */
 823                return count;
 824        }
 825        return 0;
 826}
 827#endif                          /* CONFIG_X86_LOCAL_APIC */
 828
 829#ifdef  CONFIG_X86_IO_APIC
 830#define MP_ISA_BUS              0
 831
 832#ifdef CONFIG_X86_ES7000
 833extern int es7000_plat;
 834#endif
 835
 836int __init acpi_probe_gsi(void)
 837{
 838        int idx;
 839        int gsi;
 840        int max_gsi = 0;
 841
 842        if (acpi_disabled)
 843                return 0;
 844
 845        if (!acpi_ioapic)
 846                return 0;
 847
 848        max_gsi = 0;
 849        for (idx = 0; idx < nr_ioapics; idx++) {
 850                gsi = mp_gsi_routing[idx].gsi_end;
 851
 852                if (gsi > max_gsi)
 853                        max_gsi = gsi;
 854        }
 855
 856        return max_gsi + 1;
 857}
 858
 859static void assign_to_mp_irq(struct mpc_intsrc *m,
 860                                    struct mpc_intsrc *mp_irq)
 861{
 862        memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
 863}
 864
 865static int mp_irq_cmp(struct mpc_intsrc *mp_irq,
 866                                struct mpc_intsrc *m)
 867{
 868        return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
 869}
 870
 871static void save_mp_irq(struct mpc_intsrc *m)
 872{
 873        int i;
 874
 875        for (i = 0; i < mp_irq_entries; i++) {
 876                if (!mp_irq_cmp(&mp_irqs[i], m))
 877                        return;
 878        }
 879
 880        assign_to_mp_irq(m, &mp_irqs[mp_irq_entries]);
 881        if (++mp_irq_entries == MAX_IRQ_SOURCES)
 882                panic("Max # of irq sources exceeded!!\n");
 883}
 884
 885void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
 886{
 887        int ioapic;
 888        int pin;
 889        struct mpc_intsrc mp_irq;
 890
 891        /*
 892         * Convert 'gsi' to 'ioapic.pin'.
 893         */
 894        ioapic = mp_find_ioapic(gsi);
 895        if (ioapic < 0)
 896                return;
 897        pin = mp_find_ioapic_pin(ioapic, gsi);
 898
 899        /*
 900         * TBD: This check is for faulty timer entries, where the override
 901         *      erroneously sets the trigger to level, resulting in a HUGE
 902         *      increase of timer interrupts!
 903         */
 904        if ((bus_irq == 0) && (trigger == 3))
 905                trigger = 1;
 906
 907        mp_irq.type = MP_INTSRC;
 908        mp_irq.irqtype = mp_INT;
 909        mp_irq.irqflag = (trigger << 2) | polarity;
 910        mp_irq.srcbus = MP_ISA_BUS;
 911        mp_irq.srcbusirq = bus_irq;     /* IRQ */
 912        mp_irq.dstapic = mp_ioapics[ioapic].apicid; /* APIC ID */
 913        mp_irq.dstirq = pin;    /* INTIN# */
 914
 915        save_mp_irq(&mp_irq);
 916}
 917
 918void __init mp_config_acpi_legacy_irqs(void)
 919{
 920        int i;
 921        int ioapic;
 922        unsigned int dstapic;
 923        struct mpc_intsrc mp_irq;
 924
 925#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
 926        /*
 927         * Fabricate the legacy ISA bus (bus #31).
 928         */
 929        mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
 930#endif
 931        set_bit(MP_ISA_BUS, mp_bus_not_pci);
 932        pr_debug("Bus #%d is ISA\n", MP_ISA_BUS);
 933
 934#ifdef CONFIG_X86_ES7000
 935        /*
 936         * Older generations of ES7000 have no legacy identity mappings
 937         */
 938        if (es7000_plat == 1)
 939                return;
 940#endif
 941
 942        /*
 943         * Locate the IOAPIC that manages the ISA IRQs (0-15).
 944         */
 945        ioapic = mp_find_ioapic(0);
 946        if (ioapic < 0)
 947                return;
 948        dstapic = mp_ioapics[ioapic].apicid;
 949
 950        /*
 951         * Use the default configuration for the IRQs 0-15.  Unless
 952         * overridden by (MADT) interrupt source override entries.
 953         */
 954        for (i = 0; i < 16; i++) {
 955                int idx;
 956
 957                for (idx = 0; idx < mp_irq_entries; idx++) {
 958                        struct mpc_intsrc *irq = mp_irqs + idx;
 959
 960                        /* Do we already have a mapping for this ISA IRQ? */
 961                        if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i)
 962                                break;
 963
 964                        /* Do we already have a mapping for this IOAPIC pin */
 965                        if (irq->dstapic == dstapic && irq->dstirq == i)
 966                                break;
 967                }
 968
 969                if (idx != mp_irq_entries) {
 970                        printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
 971                        continue;       /* IRQ already used */
 972                }
 973
 974                mp_irq.type = MP_INTSRC;
 975                mp_irq.irqflag = 0;     /* Conforming */
 976                mp_irq.srcbus = MP_ISA_BUS;
 977                mp_irq.dstapic = dstapic;
 978                mp_irq.irqtype = mp_INT;
 979                mp_irq.srcbusirq = i; /* Identity mapped */
 980                mp_irq.dstirq = i;
 981
 982                save_mp_irq(&mp_irq);
 983        }
 984}
 985
 986static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger,
 987                        int polarity)
 988{
 989#ifdef CONFIG_X86_MPPARSE
 990        struct mpc_intsrc mp_irq;
 991        struct pci_dev *pdev;
 992        unsigned char number;
 993        unsigned int devfn;
 994        int ioapic;
 995        u8 pin;
 996
 997        if (!acpi_ioapic)
 998                return 0;
 999        if (!dev)
1000                return 0;
1001        if (dev->bus != &pci_bus_type)
1002                return 0;
1003
1004        pdev = to_pci_dev(dev);
1005        number = pdev->bus->number;
1006        devfn = pdev->devfn;
1007        pin = pdev->pin;
1008        /* print the entry should happen on mptable identically */
1009        mp_irq.type = MP_INTSRC;
1010        mp_irq.irqtype = mp_INT;
1011        mp_irq.irqflag = (trigger == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
1012                                (polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
1013        mp_irq.srcbus = number;
1014        mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
1015        ioapic = mp_find_ioapic(gsi);
1016        mp_irq.dstapic = mp_ioapics[ioapic].apicid;
1017        mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi);
1018
1019        save_mp_irq(&mp_irq);
1020#endif
1021        return 0;
1022}
1023
1024int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
1025{
1026        int ioapic;
1027        int ioapic_pin;
1028        struct io_apic_irq_attr irq_attr;
1029
1030        if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
1031                return gsi;
1032
1033        /* Don't set up the ACPI SCI because it's already set up */
1034        if (acpi_gbl_FADT.sci_interrupt == gsi)
1035                return gsi;
1036
1037        ioapic = mp_find_ioapic(gsi);
1038        if (ioapic < 0) {
1039                printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
1040                return gsi;
1041        }
1042
1043        ioapic_pin = mp_find_ioapic_pin(ioapic, gsi);
1044
1045#ifdef CONFIG_X86_32
1046        if (ioapic_renumber_irq)
1047                gsi = ioapic_renumber_irq(ioapic, gsi);
1048#endif
1049
1050        if (ioapic_pin > MP_MAX_IOAPIC_PIN) {
1051                printk(KERN_ERR "Invalid reference to IOAPIC pin "
1052                       "%d-%d\n", mp_ioapics[ioapic].apicid,
1053                       ioapic_pin);
1054                return gsi;
1055        }
1056
1057        if (enable_update_mptable)
1058                mp_config_acpi_gsi(dev, gsi, trigger, polarity);
1059
1060        set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin,
1061                             trigger == ACPI_EDGE_SENSITIVE ? 0 : 1,
1062                             polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
1063        io_apic_set_pci_routing(dev, gsi, &irq_attr);
1064
1065        return gsi;
1066}
1067
1068/*
1069 * Parse IOAPIC related entries in MADT
1070 * returns 0 on success, < 0 on error
1071 */
1072static int __init acpi_parse_madt_ioapic_entries(void)
1073{
1074        int count;
1075
1076        /*
1077         * ACPI interpreter is required to complete interrupt setup,
1078         * so if it is off, don't enumerate the io-apics with ACPI.
1079         * If MPS is present, it will handle them,
1080         * otherwise the system will stay in PIC mode
1081         */
1082        if (acpi_disabled || acpi_noirq)
1083                return -ENODEV;
1084
1085        if (!cpu_has_apic)
1086                return -ENODEV;
1087
1088        /*
1089         * if "noapic" boot option, don't look for IO-APICs
1090         */
1091        if (skip_ioapic_setup) {
1092                printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
1093                       "due to 'noapic' option.\n");
1094                return -ENODEV;
1095        }
1096
1097        count =
1098            acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
1099                                  MAX_IO_APICS);
1100        if (!count) {
1101                printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
1102                return -ENODEV;
1103        } else if (count < 0) {
1104                printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
1105                return count;
1106        }
1107
1108        count =
1109            acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
1110                                  nr_irqs);
1111        if (count < 0) {
1112                printk(KERN_ERR PREFIX
1113                       "Error parsing interrupt source overrides entry\n");
1114                /* TBD: Cleanup to allow fallback to MPS */
1115                return count;
1116        }
1117
1118        /*
1119         * If BIOS did not supply an INT_SRC_OVR for the SCI
1120         * pretend we got one so we can set the SCI flags.
1121         */
1122        if (!acpi_sci_override_gsi)
1123                acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
1124
1125        /* Fill in identity legacy mapings where no override */
1126        mp_config_acpi_legacy_irqs();
1127
1128        count =
1129            acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
1130                                  nr_irqs);
1131        if (count < 0) {
1132                printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
1133                /* TBD: Cleanup to allow fallback to MPS */
1134                return count;
1135        }
1136
1137        return 0;
1138}
1139#else
1140static inline int acpi_parse_madt_ioapic_entries(void)
1141{
1142        return -1;
1143}
1144#endif  /* !CONFIG_X86_IO_APIC */
1145
1146static void __init early_acpi_process_madt(void)
1147{
1148#ifdef CONFIG_X86_LOCAL_APIC
1149        int error;
1150
1151        if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
1152
1153                /*
1154                 * Parse MADT LAPIC entries
1155                 */
1156                error = early_acpi_parse_madt_lapic_addr_ovr();
1157                if (!error) {
1158                        acpi_lapic = 1;
1159                        smp_found_config = 1;
1160                }
1161                if (error == -EINVAL) {
1162                        /*
1163                         * Dell Precision Workstation 410, 610 come here.
1164                         */
1165                        printk(KERN_ERR PREFIX
1166                               "Invalid BIOS MADT, disabling ACPI\n");
1167                        disable_acpi();
1168                }
1169        }
1170#endif
1171}
1172
1173static void __init acpi_process_madt(void)
1174{
1175#ifdef CONFIG_X86_LOCAL_APIC
1176        int error;
1177
1178        if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
1179
1180                /*
1181                 * Parse MADT LAPIC entries
1182                 */
1183                error = acpi_parse_madt_lapic_entries();
1184                if (!error) {
1185                        acpi_lapic = 1;
1186
1187#ifdef CONFIG_X86_BIGSMP
1188                        generic_bigsmp_probe();
1189#endif
1190                        /*
1191                         * Parse MADT IO-APIC entries
1192                         */
1193                        error = acpi_parse_madt_ioapic_entries();
1194                        if (!error) {
1195                                acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
1196                                acpi_ioapic = 1;
1197
1198                                smp_found_config = 1;
1199                                if (apic->setup_apic_routing)
1200                                        apic->setup_apic_routing();
1201                        }
1202                }
1203                if (error == -EINVAL) {
1204                        /*
1205                         * Dell Precision Workstation 410, 610 come here.
1206                         */
1207                        printk(KERN_ERR PREFIX
1208                               "Invalid BIOS MADT, disabling ACPI\n");
1209                        disable_acpi();
1210                }
1211        } else {
1212                /*
1213                 * ACPI found no MADT, and so ACPI wants UP PIC mode.
1214                 * In the event an MPS table was found, forget it.
1215                 * Boot with "acpi=off" to use MPS on such a system.
1216                 */
1217                if (smp_found_config) {
1218                        printk(KERN_WARNING PREFIX
1219                                "No APIC-table, disabling MPS\n");
1220                        smp_found_config = 0;
1221                }
1222        }
1223
1224        /*
1225         * ACPI supports both logical (e.g. Hyper-Threading) and physical
1226         * processors, where MPS only supports physical.
1227         */
1228        if (acpi_lapic && acpi_ioapic)
1229                printk(KERN_INFO "Using ACPI (MADT) for SMP configuration "
1230                       "information\n");
1231        else if (acpi_lapic)
1232                printk(KERN_INFO "Using ACPI for processor (LAPIC) "
1233                       "configuration information\n");
1234#endif
1235        return;
1236}
1237
1238static int __init disable_acpi_irq(const struct dmi_system_id *d)
1239{
1240        if (!acpi_force) {
1241                printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
1242                       d->ident);
1243                acpi_noirq_set();
1244        }
1245        return 0;
1246}
1247
1248static int __init disable_acpi_pci(const struct dmi_system_id *d)
1249{
1250        if (!acpi_force) {
1251                printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
1252                       d->ident);
1253                acpi_disable_pci();
1254        }
1255        return 0;
1256}
1257
1258static int __init dmi_disable_acpi(const struct dmi_system_id *d)
1259{
1260        if (!acpi_force) {
1261                printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
1262                disable_acpi();
1263        } else {
1264                printk(KERN_NOTICE
1265                       "Warning: DMI blacklist says broken, but acpi forced\n");
1266        }
1267        return 0;
1268}
1269
1270/*
1271 * Limit ACPI to CPU enumeration for HT
1272 */
1273static int __init force_acpi_ht(const struct dmi_system_id *d)
1274{
1275        if (!acpi_force) {
1276                printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
1277                       d->ident);
1278                disable_acpi();
1279                acpi_ht = 1;
1280        } else {
1281                printk(KERN_NOTICE
1282                       "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
1283        }
1284        return 0;
1285}
1286
1287/*
1288 * Force ignoring BIOS IRQ0 pin2 override
1289 */
1290static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
1291{
1292        /*
1293         * The ati_ixp4x0_rev() early PCI quirk should have set
1294         * the acpi_skip_timer_override flag already:
1295         */
1296        if (!acpi_skip_timer_override) {
1297                WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n");
1298                pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n",
1299                        d->ident);
1300                acpi_skip_timer_override = 1;
1301        }
1302        return 0;
1303}
1304
1305/*
1306 * If your system is blacklisted here, but you find that acpi=force
1307 * works for you, please contact linux-acpi@vger.kernel.org
1308 */
1309static struct dmi_system_id __initdata acpi_dmi_table[] = {
1310        /*
1311         * Boxes that need ACPI disabled
1312         */
1313        {
1314         .callback = dmi_disable_acpi,
1315         .ident = "IBM Thinkpad",
1316         .matches = {
1317                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1318                     DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
1319                     },
1320         },
1321
1322        /*
1323         * Boxes that need acpi=ht
1324         */
1325        {
1326         .callback = force_acpi_ht,
1327         .ident = "FSC Primergy T850",
1328         .matches = {
1329                     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1330                     DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
1331                     },
1332         },
1333        {
1334         .callback = force_acpi_ht,
1335         .ident = "HP VISUALIZE NT Workstation",
1336         .matches = {
1337                     DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
1338                     DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
1339                     },
1340         },
1341        {
1342         .callback = force_acpi_ht,
1343         .ident = "Compaq Workstation W8000",
1344         .matches = {
1345                     DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
1346                     DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
1347                     },
1348         },
1349        {
1350         .callback = force_acpi_ht,
1351         .ident = "ASUS P2B-DS",
1352         .matches = {
1353                     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1354                     DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
1355                     },
1356         },
1357        {
1358         .callback = force_acpi_ht,
1359         .ident = "ASUS CUR-DLS",
1360         .matches = {
1361                     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1362                     DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
1363                     },
1364         },
1365        {
1366         .callback = force_acpi_ht,
1367         .ident = "ABIT i440BX-W83977",
1368         .matches = {
1369                     DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
1370                     DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
1371                     },
1372         },
1373        {
1374         .callback = force_acpi_ht,
1375         .ident = "IBM Bladecenter",
1376         .matches = {
1377                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1378                     DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
1379                     },
1380         },
1381        {
1382         .callback = force_acpi_ht,
1383         .ident = "IBM eServer xSeries 360",
1384         .matches = {
1385                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1386                     DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
1387                     },
1388         },
1389        {
1390         .callback = force_acpi_ht,
1391         .ident = "IBM eserver xSeries 330",
1392         .matches = {
1393                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1394                     DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
1395                     },
1396         },
1397        {
1398         .callback = force_acpi_ht,
1399         .ident = "IBM eserver xSeries 440",
1400         .matches = {
1401                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1402                     DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
1403                     },
1404         },
1405
1406        /*
1407         * Boxes that need ACPI PCI IRQ routing disabled
1408         */
1409        {
1410         .callback = disable_acpi_irq,
1411         .ident = "ASUS A7V",
1412         .matches = {
1413                     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
1414                     DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
1415                     /* newer BIOS, Revision 1011, does work */
1416                     DMI_MATCH(DMI_BIOS_VERSION,
1417                               "ASUS A7V ACPI BIOS Revision 1007"),
1418                     },
1419         },
1420        {
1421                /*
1422                 * Latest BIOS for IBM 600E (1.16) has bad pcinum
1423                 * for LPC bridge, which is needed for the PCI
1424                 * interrupt links to work. DSDT fix is in bug 5966.
1425                 * 2645, 2646 model numbers are shared with 600/600E/600X
1426                 */
1427         .callback = disable_acpi_irq,
1428         .ident = "IBM Thinkpad 600 Series 2645",
1429         .matches = {
1430                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1431                     DMI_MATCH(DMI_BOARD_NAME, "2645"),
1432                     },
1433         },
1434        {
1435         .callback = disable_acpi_irq,
1436         .ident = "IBM Thinkpad 600 Series 2646",
1437         .matches = {
1438                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1439                     DMI_MATCH(DMI_BOARD_NAME, "2646"),
1440                     },
1441         },
1442        /*
1443         * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
1444         */
1445        {                       /* _BBN 0 bug */
1446         .callback = disable_acpi_pci,
1447         .ident = "ASUS PR-DLS",
1448         .matches = {
1449                     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1450                     DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
1451                     DMI_MATCH(DMI_BIOS_VERSION,
1452                               "ASUS PR-DLS ACPI BIOS Revision 1010"),
1453                     DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
1454                     },
1455         },
1456        {
1457         .callback = disable_acpi_pci,
1458         .ident = "Acer TravelMate 36x Laptop",
1459         .matches = {
1460                     DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1461                     DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
1462                     },
1463         },
1464        {}
1465};
1466
1467/* second table for DMI checks that should run after early-quirks */
1468static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
1469        /*
1470         * HP laptops which use a DSDT reporting as HP/SB400/10000,
1471         * which includes some code which overrides all temperature
1472         * trip points to 16C if the INTIN2 input of the I/O APIC
1473         * is enabled.  This input is incorrectly designated the
1474         * ISA IRQ 0 via an interrupt source override even though
1475         * it is wired to the output of the master 8259A and INTIN0
1476         * is not connected at all.  Force ignoring BIOS IRQ0 pin2
1477         * override in that cases.
1478         */
1479        {
1480         .callback = dmi_ignore_irq0_timer_override,
1481         .ident = "HP nx6115 laptop",
1482         .matches = {
1483                     DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1484                     DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6115"),
1485                     },
1486         },
1487        {
1488         .callback = dmi_ignore_irq0_timer_override,
1489         .ident = "HP NX6125 laptop",
1490         .matches = {
1491                     DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1492                     DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6125"),
1493                     },
1494         },
1495        {
1496         .callback = dmi_ignore_irq0_timer_override,
1497         .ident = "HP NX6325 laptop",
1498         .matches = {
1499                     DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1500                     DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
1501                     },
1502         },
1503        {
1504         .callback = dmi_ignore_irq0_timer_override,
1505         .ident = "HP 6715b laptop",
1506         .matches = {
1507                     DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1508                     DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
1509                     },
1510         },
1511        {}
1512};
1513
1514/*
1515 * acpi_boot_table_init() and acpi_boot_init()
1516 *  called from setup_arch(), always.
1517 *      1. checksums all tables
1518 *      2. enumerates lapics
1519 *      3. enumerates io-apics
1520 *
1521 * acpi_table_init() is separate to allow reading SRAT without
1522 * other side effects.
1523 *
1524 * side effects of acpi_boot_init:
1525 *      acpi_lapic = 1 if LAPIC found
1526 *      acpi_ioapic = 1 if IOAPIC found
1527 *      if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
1528 *      if acpi_blacklisted() acpi_disabled = 1;
1529 *      acpi_irq_model=...
1530 *      ...
1531 *
1532 * return value: (currently ignored)
1533 *      0: success
1534 *      !0: failure
1535 */
1536
1537int __init acpi_boot_table_init(void)
1538{
1539        int error;
1540
1541        dmi_check_system(acpi_dmi_table);
1542
1543        /*
1544         * If acpi_disabled, bail out
1545         * One exception: acpi=ht continues far enough to enumerate LAPICs
1546         */
1547        if (acpi_disabled && !acpi_ht)
1548                return 1;
1549
1550        /*
1551         * Initialize the ACPI boot-time table parser.
1552         */
1553        error = acpi_table_init();
1554        if (error) {
1555                disable_acpi();
1556                return error;
1557        }
1558
1559        acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1560
1561        /*
1562         * blacklist may disable ACPI entirely
1563         */
1564        error = acpi_blacklisted();
1565        if (error) {
1566                if (acpi_force) {
1567                        printk(KERN_WARNING PREFIX "acpi=force override\n");
1568                } else {
1569                        printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
1570                        disable_acpi();
1571                        return error;
1572                }
1573        }
1574
1575        return 0;
1576}
1577
1578int __init early_acpi_boot_init(void)
1579{
1580        /*
1581         * If acpi_disabled, bail out
1582         * One exception: acpi=ht continues far enough to enumerate LAPICs
1583         */
1584        if (acpi_disabled && !acpi_ht)
1585                return 1;
1586
1587        /*
1588         * Process the Multiple APIC Description Table (MADT), if present
1589         */
1590        early_acpi_process_madt();
1591
1592        return 0;
1593}
1594
1595int __init acpi_boot_init(void)
1596{
1597        /* those are executed after early-quirks are executed */
1598        dmi_check_system(acpi_dmi_table_late);
1599
1600        /*
1601         * If acpi_disabled, bail out
1602         * One exception: acpi=ht continues far enough to enumerate LAPICs
1603         */
1604        if (acpi_disabled && !acpi_ht)
1605                return 1;
1606
1607        acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1608
1609        /*
1610         * set sci_int and PM timer address
1611         */
1612        acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
1613
1614        /*
1615         * Process the Multiple APIC Description Table (MADT), if present
1616         */
1617        acpi_process_madt();
1618
1619        acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
1620
1621        return 0;
1622}
1623
1624static int __init parse_acpi(char *arg)
1625{
1626        if (!arg)
1627                return -EINVAL;
1628
1629        /* "acpi=off" disables both ACPI table parsing and interpreter */
1630        if (strcmp(arg, "off") == 0) {
1631                disable_acpi();
1632        }
1633        /* acpi=force to over-ride black-list */
1634        else if (strcmp(arg, "force") == 0) {
1635                acpi_force = 1;
1636                acpi_ht = 1;
1637                acpi_disabled = 0;
1638        }
1639        /* acpi=strict disables out-of-spec workarounds */
1640        else if (strcmp(arg, "strict") == 0) {
1641                acpi_strict = 1;
1642        }
1643        /* Limit ACPI just to boot-time to enable HT */
1644        else if (strcmp(arg, "ht") == 0) {
1645                if (!acpi_force)
1646                        disable_acpi();
1647                acpi_ht = 1;
1648        }
1649        /* acpi=rsdt use RSDT instead of XSDT */
1650        else if (strcmp(arg, "rsdt") == 0) {
1651                acpi_rsdt_forced = 1;
1652        }
1653        /* "acpi=noirq" disables ACPI interrupt routing */
1654        else if (strcmp(arg, "noirq") == 0) {
1655                acpi_noirq_set();
1656        } else {
1657                /* Core will printk when we return error. */
1658                return -EINVAL;
1659        }
1660        return 0;
1661}
1662early_param("acpi", parse_acpi);
1663
1664/* FIXME: Using pci= for an ACPI parameter is a travesty. */
1665static int __init parse_pci(char *arg)
1666{
1667        if (arg && strcmp(arg, "noacpi") == 0)
1668                acpi_disable_pci();
1669        return 0;
1670}
1671early_param("pci", parse_pci);
1672
1673int __init acpi_mps_check(void)
1674{
1675#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE)
1676/* mptable code is not built-in*/
1677        if (acpi_disabled || acpi_noirq) {
1678                printk(KERN_WARNING "MPS support code is not built-in.\n"
1679                       "Using acpi=off or acpi=noirq or pci=noacpi "
1680                       "may have problem\n");
1681                return 1;
1682        }
1683#endif
1684        return 0;
1685}
1686
1687#ifdef CONFIG_X86_IO_APIC
1688static int __init parse_acpi_skip_timer_override(char *arg)
1689{
1690        acpi_skip_timer_override = 1;
1691        return 0;
1692}
1693early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
1694
1695static int __init parse_acpi_use_timer_override(char *arg)
1696{
1697        acpi_use_timer_override = 1;
1698        return 0;
1699}
1700early_param("acpi_use_timer_override", parse_acpi_use_timer_override);
1701#endif /* CONFIG_X86_IO_APIC */
1702
1703static int __init setup_acpi_sci(char *s)
1704{
1705        if (!s)
1706                return -EINVAL;
1707        if (!strcmp(s, "edge"))
1708                acpi_sci_flags =  ACPI_MADT_TRIGGER_EDGE |
1709                        (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1710        else if (!strcmp(s, "level"))
1711                acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
1712                        (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1713        else if (!strcmp(s, "high"))
1714                acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
1715                        (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1716        else if (!strcmp(s, "low"))
1717                acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
1718                        (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1719        else
1720                return -EINVAL;
1721        return 0;
1722}
1723early_param("acpi_sci", setup_acpi_sci);
1724
1725int __acpi_acquire_global_lock(unsigned int *lock)
1726{
1727        unsigned int old, new, val;
1728        do {
1729                old = *lock;
1730                new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
1731                val = cmpxchg(lock, old, new);
1732        } while (unlikely (val != old));
1733        return (new < 3) ? -1 : 0;
1734}
1735
1736int __acpi_release_global_lock(unsigned int *lock)
1737{
1738        unsigned int old, new, val;
1739        do {
1740                old = *lock;
1741                new = old & ~0x3;
1742                val = cmpxchg(lock, old, new);
1743        } while (unlikely (val != old));
1744        return old & 0x1;
1745}
1746