linux/arch/x86/kernel/apic/apic.c
<<
>>
Prefs
   1/*
   2 *      Local APIC handling, local APIC timers
   3 *
   4 *      (c) 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
   5 *
   6 *      Fixes
   7 *      Maciej W. Rozycki       :       Bits for genuine 82489DX APICs;
   8 *                                      thanks to Eric Gilmore
   9 *                                      and Rolf G. Tews
  10 *                                      for testing these extensively.
  11 *      Maciej W. Rozycki       :       Various updates and fixes.
  12 *      Mikael Pettersson       :       Power Management for UP-APIC.
  13 *      Pavel Machek and
  14 *      Mikael Pettersson       :       PM converted to driver model.
  15 */
  16
  17#include <linux/perf_event.h>
  18#include <linux/kernel_stat.h>
  19#include <linux/mc146818rtc.h>
  20#include <linux/acpi_pmtmr.h>
  21#include <linux/clockchips.h>
  22#include <linux/interrupt.h>
  23#include <linux/bootmem.h>
  24#include <linux/ftrace.h>
  25#include <linux/ioport.h>
  26#include <linux/module.h>
  27#include <linux/sysdev.h>
  28#include <linux/delay.h>
  29#include <linux/timex.h>
  30#include <linux/dmar.h>
  31#include <linux/init.h>
  32#include <linux/cpu.h>
  33#include <linux/dmi.h>
  34#include <linux/smp.h>
  35#include <linux/mm.h>
  36
  37#include <asm/perf_event.h>
  38#include <asm/x86_init.h>
  39#include <asm/pgalloc.h>
  40#include <asm/atomic.h>
  41#include <asm/mpspec.h>
  42#include <asm/i8253.h>
  43#include <asm/i8259.h>
  44#include <asm/proto.h>
  45#include <asm/apic.h>
  46#include <asm/desc.h>
  47#include <asm/hpet.h>
  48#include <asm/idle.h>
  49#include <asm/mtrr.h>
  50#include <asm/smp.h>
  51#include <asm/mce.h>
  52#include <asm/tsc.h>
  53#include <asm/hypervisor.h>
  54
  55unsigned int num_processors;
  56
  57unsigned disabled_cpus __cpuinitdata;
  58
  59/* Processor that is doing the boot up */
  60unsigned int boot_cpu_physical_apicid = -1U;
  61
  62/*
  63 * The highest APIC ID seen during enumeration.
  64 */
  65unsigned int max_physical_apicid;
  66
  67/*
  68 * Bitmask of physically existing CPUs:
  69 */
  70physid_mask_t phys_cpu_present_map;
  71
  72/*
  73 * Map cpu index to physical APIC ID
  74 */
  75DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
  76DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
  77EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
  78EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
  79
  80#ifdef CONFIG_X86_32
  81/*
  82 * Knob to control our willingness to enable the local APIC.
  83 *
  84 * +1=force-enable
  85 */
  86static int force_enable_local_apic;
  87/*
  88 * APIC command line parameters
  89 */
  90static int __init parse_lapic(char *arg)
  91{
  92        force_enable_local_apic = 1;
  93        return 0;
  94}
  95early_param("lapic", parse_lapic);
  96/* Local APIC was disabled by the BIOS and enabled by the kernel */
  97static int enabled_via_apicbase;
  98
  99/*
 100 * Handle interrupt mode configuration register (IMCR).
 101 * This register controls whether the interrupt signals
 102 * that reach the BSP come from the master PIC or from the
 103 * local APIC. Before entering Symmetric I/O Mode, either
 104 * the BIOS or the operating system must switch out of
 105 * PIC Mode by changing the IMCR.
 106 */
 107static inline void imcr_pic_to_apic(void)
 108{
 109        /* select IMCR register */
 110        outb(0x70, 0x22);
 111        /* NMI and 8259 INTR go through APIC */
 112        outb(0x01, 0x23);
 113}
 114
 115static inline void imcr_apic_to_pic(void)
 116{
 117        /* select IMCR register */
 118        outb(0x70, 0x22);
 119        /* NMI and 8259 INTR go directly to BSP */
 120        outb(0x00, 0x23);
 121}
 122#endif
 123
 124#ifdef CONFIG_X86_64
 125static int apic_calibrate_pmtmr __initdata;
 126static __init int setup_apicpmtimer(char *s)
 127{
 128        apic_calibrate_pmtmr = 1;
 129        notsc_setup(NULL);
 130        return 0;
 131}
 132__setup("apicpmtimer", setup_apicpmtimer);
 133#endif
 134
 135int x2apic_mode;
 136#ifdef CONFIG_X86_X2APIC
 137/* x2apic enabled before OS handover */
 138static int x2apic_preenabled;
 139static __init int setup_nox2apic(char *str)
 140{
 141        if (x2apic_enabled()) {
 142                pr_warning("Bios already enabled x2apic, "
 143                           "can't enforce nox2apic");
 144                return 0;
 145        }
 146
 147        setup_clear_cpu_cap(X86_FEATURE_X2APIC);
 148        return 0;
 149}
 150early_param("nox2apic", setup_nox2apic);
 151#endif
 152
 153unsigned long mp_lapic_addr;
 154int disable_apic;
 155/* Disable local APIC timer from the kernel commandline or via dmi quirk */
 156static int disable_apic_timer __cpuinitdata;
 157/* Local APIC timer works in C2 */
 158int local_apic_timer_c2_ok;
 159EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
 160
 161int first_system_vector = 0xfe;
 162
 163/*
 164 * Debug level, exported for io_apic.c
 165 */
 166unsigned int apic_verbosity;
 167
 168int pic_mode;
 169
 170/* Have we found an MP table */
 171int smp_found_config;
 172
 173static struct resource lapic_resource = {
 174        .name = "Local APIC",
 175        .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
 176};
 177
 178static unsigned int calibration_result;
 179
 180static int lapic_next_event(unsigned long delta,
 181                            struct clock_event_device *evt);
 182static void lapic_timer_setup(enum clock_event_mode mode,
 183                              struct clock_event_device *evt);
 184static void lapic_timer_broadcast(const struct cpumask *mask);
 185static void apic_pm_activate(void);
 186
 187/*
 188 * The local apic timer can be used for any function which is CPU local.
 189 */
 190static struct clock_event_device lapic_clockevent = {
 191        .name           = "lapic",
 192        .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
 193                        | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
 194        .shift          = 32,
 195        .set_mode       = lapic_timer_setup,
 196        .set_next_event = lapic_next_event,
 197        .broadcast      = lapic_timer_broadcast,
 198        .rating         = 100,
 199        .irq            = -1,
 200};
 201static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
 202
 203static unsigned long apic_phys;
 204
 205/*
 206 * Get the LAPIC version
 207 */
 208static inline int lapic_get_version(void)
 209{
 210        return GET_APIC_VERSION(apic_read(APIC_LVR));
 211}
 212
 213/*
 214 * Check, if the APIC is integrated or a separate chip
 215 */
 216static inline int lapic_is_integrated(void)
 217{
 218#ifdef CONFIG_X86_64
 219        return 1;
 220#else
 221        return APIC_INTEGRATED(lapic_get_version());
 222#endif
 223}
 224
 225/*
 226 * Check, whether this is a modern or a first generation APIC
 227 */
 228static int modern_apic(void)
 229{
 230        /* AMD systems use old APIC versions, so check the CPU */
 231        if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
 232            boot_cpu_data.x86 >= 0xf)
 233                return 1;
 234        return lapic_get_version() >= 0x14;
 235}
 236
 237/*
 238 * right after this call apic become NOOP driven
 239 * so apic->write/read doesn't do anything
 240 */
 241void apic_disable(void)
 242{
 243        pr_info("APIC: switched to apic NOOP\n");
 244        apic = &apic_noop;
 245}
 246
 247void native_apic_wait_icr_idle(void)
 248{
 249        while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
 250                cpu_relax();
 251}
 252
 253u32 native_safe_apic_wait_icr_idle(void)
 254{
 255        u32 send_status;
 256        int timeout;
 257
 258        timeout = 0;
 259        do {
 260                send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
 261                if (!send_status)
 262                        break;
 263                udelay(100);
 264        } while (timeout++ < 1000);
 265
 266        return send_status;
 267}
 268
 269void native_apic_icr_write(u32 low, u32 id)
 270{
 271        apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
 272        apic_write(APIC_ICR, low);
 273}
 274
 275u64 native_apic_icr_read(void)
 276{
 277        u32 icr1, icr2;
 278
 279        icr2 = apic_read(APIC_ICR2);
 280        icr1 = apic_read(APIC_ICR);
 281
 282        return icr1 | ((u64)icr2 << 32);
 283}
 284
 285/**
 286 * enable_NMI_through_LVT0 - enable NMI through local vector table 0
 287 */
 288void __cpuinit enable_NMI_through_LVT0(void)
 289{
 290        unsigned int v;
 291
 292        /* unmask and set to NMI */
 293        v = APIC_DM_NMI;
 294
 295        /* Level triggered for 82489DX (32bit mode) */
 296        if (!lapic_is_integrated())
 297                v |= APIC_LVT_LEVEL_TRIGGER;
 298
 299        apic_write(APIC_LVT0, v);
 300}
 301
 302#ifdef CONFIG_X86_32
 303/**
 304 * get_physical_broadcast - Get number of physical broadcast IDs
 305 */
 306int get_physical_broadcast(void)
 307{
 308        return modern_apic() ? 0xff : 0xf;
 309}
 310#endif
 311
 312/**
 313 * lapic_get_maxlvt - get the maximum number of local vector table entries
 314 */
 315int lapic_get_maxlvt(void)
 316{
 317        unsigned int v;
 318
 319        v = apic_read(APIC_LVR);
 320        /*
 321         * - we always have APIC integrated on 64bit mode
 322         * - 82489DXs do not report # of LVT entries
 323         */
 324        return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
 325}
 326
 327/*
 328 * Local APIC timer
 329 */
 330
 331/* Clock divisor */
 332#define APIC_DIVISOR 16
 333
 334/*
 335 * This function sets up the local APIC timer, with a timeout of
 336 * 'clocks' APIC bus clock. During calibration we actually call
 337 * this function twice on the boot CPU, once with a bogus timeout
 338 * value, second time for real. The other (noncalibrating) CPUs
 339 * call this function only once, with the real, calibrated value.
 340 *
 341 * We do reads before writes even if unnecessary, to get around the
 342 * P5 APIC double write bug.
 343 */
 344static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
 345{
 346        unsigned int lvtt_value, tmp_value;
 347
 348        lvtt_value = LOCAL_TIMER_VECTOR;
 349        if (!oneshot)
 350                lvtt_value |= APIC_LVT_TIMER_PERIODIC;
 351        if (!lapic_is_integrated())
 352                lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
 353
 354        if (!irqen)
 355                lvtt_value |= APIC_LVT_MASKED;
 356
 357        apic_write(APIC_LVTT, lvtt_value);
 358
 359        /*
 360         * Divide PICLK by 16
 361         */
 362        tmp_value = apic_read(APIC_TDCR);
 363        apic_write(APIC_TDCR,
 364                (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
 365                APIC_TDR_DIV_16);
 366
 367        if (!oneshot)
 368                apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
 369}
 370
 371/*
 372 * Setup extended LVT, AMD specific
 373 *
 374 * Software should use the LVT offsets the BIOS provides.  The offsets
 375 * are determined by the subsystems using it like those for MCE
 376 * threshold or IBS.  On K8 only offset 0 (APIC500) and MCE interrupts
 377 * are supported. Beginning with family 10h at least 4 offsets are
 378 * available.
 379 *
 380 * Since the offsets must be consistent for all cores, we keep track
 381 * of the LVT offsets in software and reserve the offset for the same
 382 * vector also to be used on other cores. An offset is freed by
 383 * setting the entry to APIC_EILVT_MASKED.
 384 *
 385 * If the BIOS is right, there should be no conflicts. Otherwise a
 386 * "[Firmware Bug]: ..." error message is generated. However, if
 387 * software does not properly determines the offsets, it is not
 388 * necessarily a BIOS bug.
 389 */
 390
 391static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX];
 392
 393static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
 394{
 395        return (old & APIC_EILVT_MASKED)
 396                || (new == APIC_EILVT_MASKED)
 397                || ((new & ~APIC_EILVT_MASKED) == old);
 398}
 399
 400static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
 401{
 402        unsigned int rsvd;                      /* 0: uninitialized */
 403
 404        if (offset >= APIC_EILVT_NR_MAX)
 405                return ~0;
 406
 407        rsvd = atomic_read(&eilvt_offsets[offset]) & ~APIC_EILVT_MASKED;
 408        do {
 409                if (rsvd &&
 410                    !eilvt_entry_is_changeable(rsvd, new))
 411                        /* may not change if vectors are different */
 412                        return rsvd;
 413                rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
 414        } while (rsvd != new);
 415
 416        return new;
 417}
 418
 419/*
 420 * If mask=1, the LVT entry does not generate interrupts while mask=0
 421 * enables the vector. See also the BKDGs.
 422 */
 423
 424int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
 425{
 426        unsigned long reg = APIC_EILVTn(offset);
 427        unsigned int new, old, reserved;
 428
 429        new = (mask << 16) | (msg_type << 8) | vector;
 430        old = apic_read(reg);
 431        reserved = reserve_eilvt_offset(offset, new);
 432
 433        if (reserved != new) {
 434                pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
 435                       "vector 0x%x, but the register is already in use for "
 436                       "vector 0x%x on another cpu\n",
 437                       smp_processor_id(), reg, offset, new, reserved);
 438                return -EINVAL;
 439        }
 440
 441        if (!eilvt_entry_is_changeable(old, new)) {
 442                pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
 443                       "vector 0x%x, but the register is already in use for "
 444                       "vector 0x%x on this cpu\n",
 445                       smp_processor_id(), reg, offset, new, old);
 446                return -EBUSY;
 447        }
 448
 449        apic_write(reg, new);
 450
 451        return 0;
 452}
 453EXPORT_SYMBOL_GPL(setup_APIC_eilvt);
 454
 455/*
 456 * Program the next event, relative to now
 457 */
 458static int lapic_next_event(unsigned long delta,
 459                            struct clock_event_device *evt)
 460{
 461        apic_write(APIC_TMICT, delta);
 462        return 0;
 463}
 464
 465/*
 466 * Setup the lapic timer in periodic or oneshot mode
 467 */
 468static void lapic_timer_setup(enum clock_event_mode mode,
 469                              struct clock_event_device *evt)
 470{
 471        unsigned long flags;
 472        unsigned int v;
 473
 474        /* Lapic used as dummy for broadcast ? */
 475        if (evt->features & CLOCK_EVT_FEAT_DUMMY)
 476                return;
 477
 478        local_irq_save(flags);
 479
 480        switch (mode) {
 481        case CLOCK_EVT_MODE_PERIODIC:
 482        case CLOCK_EVT_MODE_ONESHOT:
 483                __setup_APIC_LVTT(calibration_result,
 484                                  mode != CLOCK_EVT_MODE_PERIODIC, 1);
 485                break;
 486        case CLOCK_EVT_MODE_UNUSED:
 487        case CLOCK_EVT_MODE_SHUTDOWN:
 488                v = apic_read(APIC_LVTT);
 489                v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
 490                apic_write(APIC_LVTT, v);
 491                apic_write(APIC_TMICT, 0);
 492                break;
 493        case CLOCK_EVT_MODE_RESUME:
 494                /* Nothing to do here */
 495                break;
 496        }
 497
 498        local_irq_restore(flags);
 499}
 500
 501/*
 502 * Local APIC timer broadcast function
 503 */
 504static void lapic_timer_broadcast(const struct cpumask *mask)
 505{
 506#ifdef CONFIG_SMP
 507        apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
 508#endif
 509}
 510
 511/*
 512 * Setup the local APIC timer for this CPU. Copy the initialized values
 513 * of the boot CPU and register the clock event in the framework.
 514 */
 515static void __cpuinit setup_APIC_timer(void)
 516{
 517        struct clock_event_device *levt = &__get_cpu_var(lapic_events);
 518
 519        if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_ARAT)) {
 520                lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
 521                /* Make LAPIC timer preferrable over percpu HPET */
 522                lapic_clockevent.rating = 150;
 523        }
 524
 525        memcpy(levt, &lapic_clockevent, sizeof(*levt));
 526        levt->cpumask = cpumask_of(smp_processor_id());
 527
 528        clockevents_register_device(levt);
 529}
 530
 531/*
 532 * In this functions we calibrate APIC bus clocks to the external timer.
 533 *
 534 * We want to do the calibration only once since we want to have local timer
 535 * irqs syncron. CPUs connected by the same APIC bus have the very same bus
 536 * frequency.
 537 *
 538 * This was previously done by reading the PIT/HPET and waiting for a wrap
 539 * around to find out, that a tick has elapsed. I have a box, where the PIT
 540 * readout is broken, so it never gets out of the wait loop again. This was
 541 * also reported by others.
 542 *
 543 * Monitoring the jiffies value is inaccurate and the clockevents
 544 * infrastructure allows us to do a simple substitution of the interrupt
 545 * handler.
 546 *
 547 * The calibration routine also uses the pm_timer when possible, as the PIT
 548 * happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes
 549 * back to normal later in the boot process).
 550 */
 551
 552#define LAPIC_CAL_LOOPS         (HZ/10)
 553
 554static __initdata int lapic_cal_loops = -1;
 555static __initdata long lapic_cal_t1, lapic_cal_t2;
 556static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
 557static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
 558static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
 559
 560/*
 561 * Temporary interrupt handler.
 562 */
 563static void __init lapic_cal_handler(struct clock_event_device *dev)
 564{
 565        unsigned long long tsc = 0;
 566        long tapic = apic_read(APIC_TMCCT);
 567        unsigned long pm = acpi_pm_read_early();
 568
 569        if (cpu_has_tsc)
 570                rdtscll(tsc);
 571
 572        switch (lapic_cal_loops++) {
 573        case 0:
 574                lapic_cal_t1 = tapic;
 575                lapic_cal_tsc1 = tsc;
 576                lapic_cal_pm1 = pm;
 577                lapic_cal_j1 = jiffies;
 578                break;
 579
 580        case LAPIC_CAL_LOOPS:
 581                lapic_cal_t2 = tapic;
 582                lapic_cal_tsc2 = tsc;
 583                if (pm < lapic_cal_pm1)
 584                        pm += ACPI_PM_OVRRUN;
 585                lapic_cal_pm2 = pm;
 586                lapic_cal_j2 = jiffies;
 587                break;
 588        }
 589}
 590
 591static int __init
 592calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
 593{
 594        const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
 595        const long pm_thresh = pm_100ms / 100;
 596        unsigned long mult;
 597        u64 res;
 598
 599#ifndef CONFIG_X86_PM_TIMER
 600        return -1;
 601#endif
 602
 603        apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm);
 604
 605        /* Check, if the PM timer is available */
 606        if (!deltapm)
 607                return -1;
 608
 609        mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
 610
 611        if (deltapm > (pm_100ms - pm_thresh) &&
 612            deltapm < (pm_100ms + pm_thresh)) {
 613                apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n");
 614                return 0;
 615        }
 616
 617        res = (((u64)deltapm) *  mult) >> 22;
 618        do_div(res, 1000000);
 619        pr_warning("APIC calibration not consistent "
 620                   "with PM-Timer: %ldms instead of 100ms\n",(long)res);
 621
 622        /* Correct the lapic counter value */
 623        res = (((u64)(*delta)) * pm_100ms);
 624        do_div(res, deltapm);
 625        pr_info("APIC delta adjusted to PM-Timer: "
 626                "%lu (%ld)\n", (unsigned long)res, *delta);
 627        *delta = (long)res;
 628
 629        /* Correct the tsc counter value */
 630        if (cpu_has_tsc) {
 631                res = (((u64)(*deltatsc)) * pm_100ms);
 632                do_div(res, deltapm);
 633                apic_printk(APIC_VERBOSE, "TSC delta adjusted to "
 634                                          "PM-Timer: %lu (%ld)\n",
 635                                        (unsigned long)res, *deltatsc);
 636                *deltatsc = (long)res;
 637        }
 638
 639        return 0;
 640}
 641
 642static int __init calibrate_APIC_clock(void)
 643{
 644        struct clock_event_device *levt = &__get_cpu_var(lapic_events);
 645        void (*real_handler)(struct clock_event_device *dev);
 646        unsigned long deltaj;
 647        long delta, deltatsc;
 648        int pm_referenced = 0;
 649
 650        local_irq_disable();
 651
 652        /* Replace the global interrupt handler */
 653        real_handler = global_clock_event->event_handler;
 654        global_clock_event->event_handler = lapic_cal_handler;
 655
 656        /*
 657         * Setup the APIC counter to maximum. There is no way the lapic
 658         * can underflow in the 100ms detection time frame
 659         */
 660        __setup_APIC_LVTT(0xffffffff, 0, 0);
 661
 662        /* Let the interrupts run */
 663        local_irq_enable();
 664
 665        while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
 666                cpu_relax();
 667
 668        local_irq_disable();
 669
 670        /* Restore the real event handler */
 671        global_clock_event->event_handler = real_handler;
 672
 673        /* Build delta t1-t2 as apic timer counts down */
 674        delta = lapic_cal_t1 - lapic_cal_t2;
 675        apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
 676
 677        deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
 678
 679        /* we trust the PM based calibration if possible */
 680        pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
 681                                        &delta, &deltatsc);
 682
 683        /* Calculate the scaled math multiplication factor */
 684        lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
 685                                       lapic_clockevent.shift);
 686        lapic_clockevent.max_delta_ns =
 687                clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
 688        lapic_clockevent.min_delta_ns =
 689                clockevent_delta2ns(0xF, &lapic_clockevent);
 690
 691        calibration_result = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
 692
 693        apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
 694        apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
 695        apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
 696                    calibration_result);
 697
 698        if (cpu_has_tsc) {
 699                apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
 700                            "%ld.%04ld MHz.\n",
 701                            (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
 702                            (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
 703        }
 704
 705        apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
 706                    "%u.%04u MHz.\n",
 707                    calibration_result / (1000000 / HZ),
 708                    calibration_result % (1000000 / HZ));
 709
 710        /*
 711         * Do a sanity check on the APIC calibration result
 712         */
 713        if (calibration_result < (1000000 / HZ)) {
 714                local_irq_enable();
 715                pr_warning("APIC frequency too slow, disabling apic timer\n");
 716                return -1;
 717        }
 718
 719        levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
 720
 721        /*
 722         * PM timer calibration failed or not turned on
 723         * so lets try APIC timer based calibration
 724         */
 725        if (!pm_referenced) {
 726                apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
 727
 728                /*
 729                 * Setup the apic timer manually
 730                 */
 731                levt->event_handler = lapic_cal_handler;
 732                lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt);
 733                lapic_cal_loops = -1;
 734
 735                /* Let the interrupts run */
 736                local_irq_enable();
 737
 738                while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
 739                        cpu_relax();
 740
 741                /* Stop the lapic timer */
 742                lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt);
 743
 744                /* Jiffies delta */
 745                deltaj = lapic_cal_j2 - lapic_cal_j1;
 746                apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
 747
 748                /* Check, if the jiffies result is consistent */
 749                if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
 750                        apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
 751                else
 752                        levt->features |= CLOCK_EVT_FEAT_DUMMY;
 753        } else
 754                local_irq_enable();
 755
 756        if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
 757                pr_warning("APIC timer disabled due to verification failure\n");
 758                        return -1;
 759        }
 760
 761        return 0;
 762}
 763
 764/*
 765 * Setup the boot APIC
 766 *
 767 * Calibrate and verify the result.
 768 */
 769void __init setup_boot_APIC_clock(void)
 770{
 771        /*
 772         * The local apic timer can be disabled via the kernel
 773         * commandline or from the CPU detection code. Register the lapic
 774         * timer as a dummy clock event source on SMP systems, so the
 775         * broadcast mechanism is used. On UP systems simply ignore it.
 776         */
 777        if (disable_apic_timer) {
 778                pr_info("Disabling APIC timer\n");
 779                /* No broadcast on UP ! */
 780                if (num_possible_cpus() > 1) {
 781                        lapic_clockevent.mult = 1;
 782                        setup_APIC_timer();
 783                }
 784                return;
 785        }
 786
 787        apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
 788                    "calibrating APIC timer ...\n");
 789
 790        if (calibrate_APIC_clock()) {
 791                /* No broadcast on UP ! */
 792                if (num_possible_cpus() > 1)
 793                        setup_APIC_timer();
 794                return;
 795        }
 796
 797        /*
 798         * If nmi_watchdog is set to IO_APIC, we need the
 799         * PIT/HPET going.  Otherwise register lapic as a dummy
 800         * device.
 801         */
 802        lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
 803
 804        /* Setup the lapic or request the broadcast */
 805        setup_APIC_timer();
 806}
 807
 808void __cpuinit setup_secondary_APIC_clock(void)
 809{
 810        setup_APIC_timer();
 811}
 812
 813/*
 814 * The guts of the apic timer interrupt
 815 */
 816static void local_apic_timer_interrupt(void)
 817{
 818        int cpu = smp_processor_id();
 819        struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
 820
 821        /*
 822         * Normally we should not be here till LAPIC has been initialized but
 823         * in some cases like kdump, its possible that there is a pending LAPIC
 824         * timer interrupt from previous kernel's context and is delivered in
 825         * new kernel the moment interrupts are enabled.
 826         *
 827         * Interrupts are enabled early and LAPIC is setup much later, hence
 828         * its possible that when we get here evt->event_handler is NULL.
 829         * Check for event_handler being NULL and discard the interrupt as
 830         * spurious.
 831         */
 832        if (!evt->event_handler) {
 833                pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu);
 834                /* Switch it off */
 835                lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
 836                return;
 837        }
 838
 839        /*
 840         * the NMI deadlock-detector uses this.
 841         */
 842        inc_irq_stat(apic_timer_irqs);
 843
 844        evt->event_handler(evt);
 845}
 846
 847/*
 848 * Local APIC timer interrupt. This is the most natural way for doing
 849 * local interrupts, but local timer interrupts can be emulated by
 850 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
 851 *
 852 * [ if a single-CPU system runs an SMP kernel then we call the local
 853 *   interrupt as well. Thus we cannot inline the local irq ... ]
 854 */
 855void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
 856{
 857        struct pt_regs *old_regs = set_irq_regs(regs);
 858
 859        /*
 860         * NOTE! We'd better ACK the irq immediately,
 861         * because timer handling can be slow.
 862         */
 863        ack_APIC_irq();
 864        /*
 865         * update_process_times() expects us to have done irq_enter().
 866         * Besides, if we don't timer interrupts ignore the global
 867         * interrupt lock, which is the WrongThing (tm) to do.
 868         */
 869        exit_idle();
 870        irq_enter();
 871        local_apic_timer_interrupt();
 872        irq_exit();
 873
 874        set_irq_regs(old_regs);
 875}
 876
 877int setup_profiling_timer(unsigned int multiplier)
 878{
 879        return -EINVAL;
 880}
 881
 882/*
 883 * Local APIC start and shutdown
 884 */
 885
 886/**
 887 * clear_local_APIC - shutdown the local APIC
 888 *
 889 * This is called, when a CPU is disabled and before rebooting, so the state of
 890 * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
 891 * leftovers during boot.
 892 */
 893void clear_local_APIC(void)
 894{
 895        int maxlvt;
 896        u32 v;
 897
 898        /* APIC hasn't been mapped yet */
 899        if (!x2apic_mode && !apic_phys)
 900                return;
 901
 902        maxlvt = lapic_get_maxlvt();
 903        /*
 904         * Masking an LVT entry can trigger a local APIC error
 905         * if the vector is zero. Mask LVTERR first to prevent this.
 906         */
 907        if (maxlvt >= 3) {
 908                v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
 909                apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
 910        }
 911        /*
 912         * Careful: we have to set masks only first to deassert
 913         * any level-triggered sources.
 914         */
 915        v = apic_read(APIC_LVTT);
 916        apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
 917        v = apic_read(APIC_LVT0);
 918        apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
 919        v = apic_read(APIC_LVT1);
 920        apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
 921        if (maxlvt >= 4) {
 922                v = apic_read(APIC_LVTPC);
 923                apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
 924        }
 925
 926        /* lets not touch this if we didn't frob it */
 927#ifdef CONFIG_X86_THERMAL_VECTOR
 928        if (maxlvt >= 5) {
 929                v = apic_read(APIC_LVTTHMR);
 930                apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
 931        }
 932#endif
 933#ifdef CONFIG_X86_MCE_INTEL
 934        if (maxlvt >= 6) {
 935                v = apic_read(APIC_LVTCMCI);
 936                if (!(v & APIC_LVT_MASKED))
 937                        apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED);
 938        }
 939#endif
 940
 941        /*
 942         * Clean APIC state for other OSs:
 943         */
 944        apic_write(APIC_LVTT, APIC_LVT_MASKED);
 945        apic_write(APIC_LVT0, APIC_LVT_MASKED);
 946        apic_write(APIC_LVT1, APIC_LVT_MASKED);
 947        if (maxlvt >= 3)
 948                apic_write(APIC_LVTERR, APIC_LVT_MASKED);
 949        if (maxlvt >= 4)
 950                apic_write(APIC_LVTPC, APIC_LVT_MASKED);
 951
 952        /* Integrated APIC (!82489DX) ? */
 953        if (lapic_is_integrated()) {
 954                if (maxlvt > 3)
 955                        /* Clear ESR due to Pentium errata 3AP and 11AP */
 956                        apic_write(APIC_ESR, 0);
 957                apic_read(APIC_ESR);
 958        }
 959}
 960
 961/**
 962 * disable_local_APIC - clear and disable the local APIC
 963 */
 964void disable_local_APIC(void)
 965{
 966        unsigned int value;
 967
 968        /* APIC hasn't been mapped yet */
 969        if (!x2apic_mode && !apic_phys)
 970                return;
 971
 972        clear_local_APIC();
 973
 974        /*
 975         * Disable APIC (implies clearing of registers
 976         * for 82489DX!).
 977         */
 978        value = apic_read(APIC_SPIV);
 979        value &= ~APIC_SPIV_APIC_ENABLED;
 980        apic_write(APIC_SPIV, value);
 981
 982#ifdef CONFIG_X86_32
 983        /*
 984         * When LAPIC was disabled by the BIOS and enabled by the kernel,
 985         * restore the disabled state.
 986         */
 987        if (enabled_via_apicbase) {
 988                unsigned int l, h;
 989
 990                rdmsr(MSR_IA32_APICBASE, l, h);
 991                l &= ~MSR_IA32_APICBASE_ENABLE;
 992                wrmsr(MSR_IA32_APICBASE, l, h);
 993        }
 994#endif
 995}
 996
 997/*
 998 * If Linux enabled the LAPIC against the BIOS default disable it down before
 999 * re-entering the BIOS on shutdown.  Otherwise the BIOS may get confused and
1000 * not power-off.  Additionally clear all LVT entries before disable_local_APIC
1001 * for the case where Linux didn't enable the LAPIC.
1002 */
1003void lapic_shutdown(void)
1004{
1005        unsigned long flags;
1006
1007        if (!cpu_has_apic && !apic_from_smp_config())
1008                return;
1009
1010        local_irq_save(flags);
1011
1012#ifdef CONFIG_X86_32
1013        if (!enabled_via_apicbase)
1014                clear_local_APIC();
1015        else
1016#endif
1017                disable_local_APIC();
1018
1019
1020        local_irq_restore(flags);
1021}
1022
1023/*
1024 * This is to verify that we're looking at a real local APIC.
1025 * Check these against your board if the CPUs aren't getting
1026 * started for no apparent reason.
1027 */
1028int __init verify_local_APIC(void)
1029{
1030        unsigned int reg0, reg1;
1031
1032        /*
1033         * The version register is read-only in a real APIC.
1034         */
1035        reg0 = apic_read(APIC_LVR);
1036        apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
1037        apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
1038        reg1 = apic_read(APIC_LVR);
1039        apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
1040
1041        /*
1042         * The two version reads above should print the same
1043         * numbers.  If the second one is different, then we
1044         * poke at a non-APIC.
1045         */
1046        if (reg1 != reg0)
1047                return 0;
1048
1049        /*
1050         * Check if the version looks reasonably.
1051         */
1052        reg1 = GET_APIC_VERSION(reg0);
1053        if (reg1 == 0x00 || reg1 == 0xff)
1054                return 0;
1055        reg1 = lapic_get_maxlvt();
1056        if (reg1 < 0x02 || reg1 == 0xff)
1057                return 0;
1058
1059        /*
1060         * The ID register is read/write in a real APIC.
1061         */
1062        reg0 = apic_read(APIC_ID);
1063        apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
1064        apic_write(APIC_ID, reg0 ^ apic->apic_id_mask);
1065        reg1 = apic_read(APIC_ID);
1066        apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
1067        apic_write(APIC_ID, reg0);
1068        if (reg1 != (reg0 ^ apic->apic_id_mask))
1069                return 0;
1070
1071        /*
1072         * The next two are just to see if we have sane values.
1073         * They're only really relevant if we're in Virtual Wire
1074         * compatibility mode, but most boxes are anymore.
1075         */
1076        reg0 = apic_read(APIC_LVT0);
1077        apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
1078        reg1 = apic_read(APIC_LVT1);
1079        apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
1080
1081        return 1;
1082}
1083
1084/**
1085 * sync_Arb_IDs - synchronize APIC bus arbitration IDs
1086 */
1087void __init sync_Arb_IDs(void)
1088{
1089        /*
1090         * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
1091         * needed on AMD.
1092         */
1093        if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1094                return;
1095
1096        /*
1097         * Wait for idle.
1098         */
1099        apic_wait_icr_idle();
1100
1101        apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
1102        apic_write(APIC_ICR, APIC_DEST_ALLINC |
1103                        APIC_INT_LEVELTRIG | APIC_DM_INIT);
1104}
1105
1106/*
1107 * An initial setup of the virtual wire mode.
1108 */
1109void __init init_bsp_APIC(void)
1110{
1111        unsigned int value;
1112
1113        /*
1114         * Don't do the setup now if we have a SMP BIOS as the
1115         * through-I/O-APIC virtual wire mode might be active.
1116         */
1117        if (smp_found_config || !cpu_has_apic)
1118                return;
1119
1120        /*
1121         * Do not trust the local APIC being empty at bootup.
1122         */
1123        clear_local_APIC();
1124
1125        /*
1126         * Enable APIC.
1127         */
1128        value = apic_read(APIC_SPIV);
1129        value &= ~APIC_VECTOR_MASK;
1130        value |= APIC_SPIV_APIC_ENABLED;
1131
1132#ifdef CONFIG_X86_32
1133        /* This bit is reserved on P4/Xeon and should be cleared */
1134        if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
1135            (boot_cpu_data.x86 == 15))
1136                value &= ~APIC_SPIV_FOCUS_DISABLED;
1137        else
1138#endif
1139                value |= APIC_SPIV_FOCUS_DISABLED;
1140        value |= SPURIOUS_APIC_VECTOR;
1141        apic_write(APIC_SPIV, value);
1142
1143        /*
1144         * Set up the virtual wire mode.
1145         */
1146        apic_write(APIC_LVT0, APIC_DM_EXTINT);
1147        value = APIC_DM_NMI;
1148        if (!lapic_is_integrated())             /* 82489DX */
1149                value |= APIC_LVT_LEVEL_TRIGGER;
1150        apic_write(APIC_LVT1, value);
1151}
1152
1153static void __cpuinit lapic_setup_esr(void)
1154{
1155        unsigned int oldvalue, value, maxlvt;
1156
1157        if (!lapic_is_integrated()) {
1158                pr_info("No ESR for 82489DX.\n");
1159                return;
1160        }
1161
1162        if (apic->disable_esr) {
1163                /*
1164                 * Something untraceable is creating bad interrupts on
1165                 * secondary quads ... for the moment, just leave the
1166                 * ESR disabled - we can't do anything useful with the
1167                 * errors anyway - mbligh
1168                 */
1169                pr_info("Leaving ESR disabled.\n");
1170                return;
1171        }
1172
1173        maxlvt = lapic_get_maxlvt();
1174        if (maxlvt > 3)         /* Due to the Pentium erratum 3AP. */
1175                apic_write(APIC_ESR, 0);
1176        oldvalue = apic_read(APIC_ESR);
1177
1178        /* enables sending errors */
1179        value = ERROR_APIC_VECTOR;
1180        apic_write(APIC_LVTERR, value);
1181
1182        /*
1183         * spec says clear errors after enabling vector.
1184         */
1185        if (maxlvt > 3)
1186                apic_write(APIC_ESR, 0);
1187        value = apic_read(APIC_ESR);
1188        if (value != oldvalue)
1189                apic_printk(APIC_VERBOSE, "ESR value before enabling "
1190                        "vector: 0x%08x  after: 0x%08x\n",
1191                        oldvalue, value);
1192}
1193
1194/**
1195 * setup_local_APIC - setup the local APIC
1196 *
1197 * Used to setup local APIC while initializing BSP or bringin up APs.
1198 * Always called with preemption disabled.
1199 */
1200void __cpuinit setup_local_APIC(void)
1201{
1202        int cpu = smp_processor_id();
1203        unsigned int value, queued;
1204        int i, j, acked = 0;
1205        unsigned long long tsc = 0, ntsc;
1206        long long max_loops = cpu_khz;
1207
1208        if (cpu_has_tsc)
1209                rdtscll(tsc);
1210
1211        if (disable_apic) {
1212                arch_disable_smp_support();
1213                return;
1214        }
1215
1216#ifdef CONFIG_X86_32
1217        /* Pound the ESR really hard over the head with a big hammer - mbligh */
1218        if (lapic_is_integrated() && apic->disable_esr) {
1219                apic_write(APIC_ESR, 0);
1220                apic_write(APIC_ESR, 0);
1221                apic_write(APIC_ESR, 0);
1222                apic_write(APIC_ESR, 0);
1223        }
1224#endif
1225        perf_events_lapic_init();
1226
1227        /*
1228         * Double-check whether this APIC is really registered.
1229         * This is meaningless in clustered apic mode, so we skip it.
1230         */
1231        BUG_ON(!apic->apic_id_registered());
1232
1233        /*
1234         * Intel recommends to set DFR, LDR and TPR before enabling
1235         * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
1236         * document number 292116).  So here it goes...
1237         */
1238        apic->init_apic_ldr();
1239
1240        /*
1241         * Set Task Priority to 'accept all'. We never change this
1242         * later on.
1243         */
1244        value = apic_read(APIC_TASKPRI);
1245        value &= ~APIC_TPRI_MASK;
1246        apic_write(APIC_TASKPRI, value);
1247
1248        /*
1249         * After a crash, we no longer service the interrupts and a pending
1250         * interrupt from previous kernel might still have ISR bit set.
1251         *
1252         * Most probably by now CPU has serviced that pending interrupt and
1253         * it might not have done the ack_APIC_irq() because it thought,
1254         * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
1255         * does not clear the ISR bit and cpu thinks it has already serivced
1256         * the interrupt. Hence a vector might get locked. It was noticed
1257         * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
1258         */
1259        do {
1260                queued = 0;
1261                for (i = APIC_ISR_NR - 1; i >= 0; i--)
1262                        queued |= apic_read(APIC_IRR + i*0x10);
1263
1264                for (i = APIC_ISR_NR - 1; i >= 0; i--) {
1265                        value = apic_read(APIC_ISR + i*0x10);
1266                        for (j = 31; j >= 0; j--) {
1267                                if (value & (1<<j)) {
1268                                        ack_APIC_irq();
1269                                        acked++;
1270                                }
1271                        }
1272                }
1273                if (acked > 256) {
1274                        printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n",
1275                               acked);
1276                        break;
1277                }
1278                if (cpu_has_tsc) {
1279                        rdtscll(ntsc);
1280                        max_loops = (cpu_khz << 10) - (ntsc - tsc);
1281                } else
1282                        max_loops--;
1283        } while (queued && max_loops > 0);
1284        WARN_ON(max_loops <= 0);
1285
1286        /*
1287         * Now that we are all set up, enable the APIC
1288         */
1289        value = apic_read(APIC_SPIV);
1290        value &= ~APIC_VECTOR_MASK;
1291        /*
1292         * Enable APIC
1293         */
1294        value |= APIC_SPIV_APIC_ENABLED;
1295
1296#ifdef CONFIG_X86_32
1297        /*
1298         * Some unknown Intel IO/APIC (or APIC) errata is biting us with
1299         * certain networking cards. If high frequency interrupts are
1300         * happening on a particular IOAPIC pin, plus the IOAPIC routing
1301         * entry is masked/unmasked at a high rate as well then sooner or
1302         * later IOAPIC line gets 'stuck', no more interrupts are received
1303         * from the device. If focus CPU is disabled then the hang goes
1304         * away, oh well :-(
1305         *
1306         * [ This bug can be reproduced easily with a level-triggered
1307         *   PCI Ne2000 networking cards and PII/PIII processors, dual
1308         *   BX chipset. ]
1309         */
1310        /*
1311         * Actually disabling the focus CPU check just makes the hang less
1312         * frequent as it makes the interrupt distributon model be more
1313         * like LRU than MRU (the short-term load is more even across CPUs).
1314         * See also the comment in end_level_ioapic_irq().  --macro
1315         */
1316
1317        /*
1318         * - enable focus processor (bit==0)
1319         * - 64bit mode always use processor focus
1320         *   so no need to set it
1321         */
1322        value &= ~APIC_SPIV_FOCUS_DISABLED;
1323#endif
1324
1325        /*
1326         * Set spurious IRQ vector
1327         */
1328        value |= SPURIOUS_APIC_VECTOR;
1329        apic_write(APIC_SPIV, value);
1330
1331        /*
1332         * Set up LVT0, LVT1:
1333         *
1334         * set up through-local-APIC on the BP's LINT0. This is not
1335         * strictly necessary in pure symmetric-IO mode, but sometimes
1336         * we delegate interrupts to the 8259A.
1337         */
1338        /*
1339         * TODO: set up through-local-APIC from through-I/O-APIC? --macro
1340         */
1341        value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
1342        if (!cpu && (pic_mode || !value)) {
1343                value = APIC_DM_EXTINT;
1344                apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
1345        } else {
1346                value = APIC_DM_EXTINT | APIC_LVT_MASKED;
1347                apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", cpu);
1348        }
1349        apic_write(APIC_LVT0, value);
1350
1351        /*
1352         * only the BP should see the LINT1 NMI signal, obviously.
1353         */
1354        if (!cpu)
1355                value = APIC_DM_NMI;
1356        else
1357                value = APIC_DM_NMI | APIC_LVT_MASKED;
1358        if (!lapic_is_integrated())             /* 82489DX */
1359                value |= APIC_LVT_LEVEL_TRIGGER;
1360        apic_write(APIC_LVT1, value);
1361
1362#ifdef CONFIG_X86_MCE_INTEL
1363        /* Recheck CMCI information after local APIC is up on CPU #0 */
1364        if (!cpu)
1365                cmci_recheck();
1366#endif
1367}
1368
1369void __cpuinit end_local_APIC_setup(void)
1370{
1371        lapic_setup_esr();
1372
1373#ifdef CONFIG_X86_32
1374        {
1375                unsigned int value;
1376                /* Disable the local apic timer */
1377                value = apic_read(APIC_LVTT);
1378                value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1379                apic_write(APIC_LVTT, value);
1380        }
1381#endif
1382
1383        apic_pm_activate();
1384}
1385
1386void __init bsp_end_local_APIC_setup(void)
1387{
1388        end_local_APIC_setup();
1389
1390        /*
1391         * Now that local APIC setup is completed for BP, configure the fault
1392         * handling for interrupt remapping.
1393         */
1394        if (intr_remapping_enabled)
1395                enable_drhd_fault_handling();
1396
1397}
1398
1399#ifdef CONFIG_X86_X2APIC
1400void check_x2apic(void)
1401{
1402        if (x2apic_enabled()) {
1403                pr_info("x2apic enabled by BIOS, switching to x2apic ops\n");
1404                x2apic_preenabled = x2apic_mode = 1;
1405        }
1406}
1407
1408void enable_x2apic(void)
1409{
1410        int msr, msr2;
1411
1412        if (!x2apic_mode)
1413                return;
1414
1415        rdmsr(MSR_IA32_APICBASE, msr, msr2);
1416        if (!(msr & X2APIC_ENABLE)) {
1417                printk_once(KERN_INFO "Enabling x2apic\n");
1418                wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
1419        }
1420}
1421#endif /* CONFIG_X86_X2APIC */
1422
1423int __init enable_IR(void)
1424{
1425#ifdef CONFIG_INTR_REMAP
1426        if (!intr_remapping_supported()) {
1427                pr_debug("intr-remapping not supported\n");
1428                return 0;
1429        }
1430
1431        if (!x2apic_preenabled && skip_ioapic_setup) {
1432                pr_info("Skipped enabling intr-remap because of skipping "
1433                        "io-apic setup\n");
1434                return 0;
1435        }
1436
1437        if (enable_intr_remapping(x2apic_supported()))
1438                return 0;
1439
1440        pr_info("Enabled Interrupt-remapping\n");
1441
1442        return 1;
1443
1444#endif
1445        return 0;
1446}
1447
1448void __init enable_IR_x2apic(void)
1449{
1450        unsigned long flags;
1451        struct IO_APIC_route_entry **ioapic_entries = NULL;
1452        int ret, x2apic_enabled = 0;
1453        int dmar_table_init_ret;
1454
1455        dmar_table_init_ret = dmar_table_init();
1456        if (dmar_table_init_ret && !x2apic_supported())
1457                return;
1458
1459        ioapic_entries = alloc_ioapic_entries();
1460        if (!ioapic_entries) {
1461                pr_err("Allocate ioapic_entries failed\n");
1462                goto out;
1463        }
1464
1465        ret = save_IO_APIC_setup(ioapic_entries);
1466        if (ret) {
1467                pr_info("Saving IO-APIC state failed: %d\n", ret);
1468                goto out;
1469        }
1470
1471        local_irq_save(flags);
1472        legacy_pic->mask_all();
1473        mask_IO_APIC_setup(ioapic_entries);
1474
1475        if (dmar_table_init_ret)
1476                ret = 0;
1477        else
1478                ret = enable_IR();
1479
1480        if (!ret) {
1481                /* IR is required if there is APIC ID > 255 even when running
1482                 * under KVM
1483                 */
1484                if (max_physical_apicid > 255 ||
1485                    !hypervisor_x2apic_available())
1486                        goto nox2apic;
1487                /*
1488                 * without IR all CPUs can be addressed by IOAPIC/MSI
1489                 * only in physical mode
1490                 */
1491                x2apic_force_phys();
1492        }
1493
1494        x2apic_enabled = 1;
1495
1496        if (x2apic_supported() && !x2apic_mode) {
1497                x2apic_mode = 1;
1498                enable_x2apic();
1499                pr_info("Enabled x2apic\n");
1500        }
1501
1502nox2apic:
1503        if (!ret) /* IR enabling failed */
1504                restore_IO_APIC_setup(ioapic_entries);
1505        legacy_pic->restore_mask();
1506        local_irq_restore(flags);
1507
1508out:
1509        if (ioapic_entries)
1510                free_ioapic_entries(ioapic_entries);
1511
1512        if (x2apic_enabled)
1513                return;
1514
1515        if (x2apic_preenabled)
1516                panic("x2apic: enabled by BIOS but kernel init failed.");
1517        else if (cpu_has_x2apic)
1518                pr_info("Not enabling x2apic, Intr-remapping init failed.\n");
1519}
1520
1521#ifdef CONFIG_X86_64
1522/*
1523 * Detect and enable local APICs on non-SMP boards.
1524 * Original code written by Keir Fraser.
1525 * On AMD64 we trust the BIOS - if it says no APIC it is likely
1526 * not correctly set up (usually the APIC timer won't work etc.)
1527 */
1528static int __init detect_init_APIC(void)
1529{
1530        if (!cpu_has_apic) {
1531                pr_info("No local APIC present\n");
1532                return -1;
1533        }
1534
1535        mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1536        return 0;
1537}
1538#else
1539
1540static int apic_verify(void)
1541{
1542        u32 features, h, l;
1543
1544        /*
1545         * The APIC feature bit should now be enabled
1546         * in `cpuid'
1547         */
1548        features = cpuid_edx(1);
1549        if (!(features & (1 << X86_FEATURE_APIC))) {
1550                pr_warning("Could not enable APIC!\n");
1551                return -1;
1552        }
1553        set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1554        mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1555
1556        /* The BIOS may have set up the APIC at some other address */
1557        rdmsr(MSR_IA32_APICBASE, l, h);
1558        if (l & MSR_IA32_APICBASE_ENABLE)
1559                mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
1560
1561        pr_info("Found and enabled local APIC!\n");
1562        return 0;
1563}
1564
1565int apic_force_enable(void)
1566{
1567        u32 h, l;
1568
1569        if (disable_apic)
1570                return -1;
1571
1572        /*
1573         * Some BIOSes disable the local APIC in the APIC_BASE
1574         * MSR. This can only be done in software for Intel P6 or later
1575         * and AMD K7 (Model > 1) or later.
1576         */
1577        rdmsr(MSR_IA32_APICBASE, l, h);
1578        if (!(l & MSR_IA32_APICBASE_ENABLE)) {
1579                pr_info("Local APIC disabled by BIOS -- reenabling.\n");
1580                l &= ~MSR_IA32_APICBASE_BASE;
1581                l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
1582                wrmsr(MSR_IA32_APICBASE, l, h);
1583                enabled_via_apicbase = 1;
1584        }
1585        return apic_verify();
1586}
1587
1588/*
1589 * Detect and initialize APIC
1590 */
1591static int __init detect_init_APIC(void)
1592{
1593        /* Disabled by kernel option? */
1594        if (disable_apic)
1595                return -1;
1596
1597        switch (boot_cpu_data.x86_vendor) {
1598        case X86_VENDOR_AMD:
1599                if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
1600                    (boot_cpu_data.x86 >= 15))
1601                        break;
1602                goto no_apic;
1603        case X86_VENDOR_INTEL:
1604                if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
1605                    (boot_cpu_data.x86 == 5 && cpu_has_apic))
1606                        break;
1607                goto no_apic;
1608        default:
1609                goto no_apic;
1610        }
1611
1612        if (!cpu_has_apic) {
1613                /*
1614                 * Over-ride BIOS and try to enable the local APIC only if
1615                 * "lapic" specified.
1616                 */
1617                if (!force_enable_local_apic) {
1618                        pr_info("Local APIC disabled by BIOS -- "
1619                                "you can enable it with \"lapic\"\n");
1620                        return -1;
1621                }
1622                if (apic_force_enable())
1623                        return -1;
1624        } else {
1625                if (apic_verify())
1626                        return -1;
1627        }
1628
1629        apic_pm_activate();
1630
1631        return 0;
1632
1633no_apic:
1634        pr_info("No local APIC present or hardware disabled\n");
1635        return -1;
1636}
1637#endif
1638
1639/**
1640 * init_apic_mappings - initialize APIC mappings
1641 */
1642void __init init_apic_mappings(void)
1643{
1644        unsigned int new_apicid;
1645
1646        if (x2apic_mode) {
1647                boot_cpu_physical_apicid = read_apic_id();
1648                return;
1649        }
1650
1651        /* If no local APIC can be found return early */
1652        if (!smp_found_config && detect_init_APIC()) {
1653                /* lets NOP'ify apic operations */
1654                pr_info("APIC: disable apic facility\n");
1655                apic_disable();
1656        } else {
1657                apic_phys = mp_lapic_addr;
1658
1659                /*
1660                 * acpi lapic path already maps that address in
1661                 * acpi_register_lapic_address()
1662                 */
1663                if (!acpi_lapic && !smp_found_config)
1664                        register_lapic_address(apic_phys);
1665        }
1666
1667        /*
1668         * Fetch the APIC ID of the BSP in case we have a
1669         * default configuration (or the MP table is broken).
1670         */
1671        new_apicid = read_apic_id();
1672        if (boot_cpu_physical_apicid != new_apicid) {
1673                boot_cpu_physical_apicid = new_apicid;
1674                /*
1675                 * yeah -- we lie about apic_version
1676                 * in case if apic was disabled via boot option
1677                 * but it's not a problem for SMP compiled kernel
1678                 * since smp_sanity_check is prepared for such a case
1679                 * and disable smp mode
1680                 */
1681                apic_version[new_apicid] =
1682                         GET_APIC_VERSION(apic_read(APIC_LVR));
1683        }
1684}
1685
1686void __init register_lapic_address(unsigned long address)
1687{
1688        mp_lapic_addr = address;
1689
1690        if (!x2apic_mode) {
1691                set_fixmap_nocache(FIX_APIC_BASE, address);
1692                apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
1693                            APIC_BASE, mp_lapic_addr);
1694        }
1695        if (boot_cpu_physical_apicid == -1U) {
1696                boot_cpu_physical_apicid  = read_apic_id();
1697                apic_version[boot_cpu_physical_apicid] =
1698                         GET_APIC_VERSION(apic_read(APIC_LVR));
1699        }
1700}
1701
1702/*
1703 * This initializes the IO-APIC and APIC hardware if this is
1704 * a UP kernel.
1705 */
1706int apic_version[MAX_LOCAL_APIC];
1707
1708int __init APIC_init_uniprocessor(void)
1709{
1710        if (disable_apic) {
1711                pr_info("Apic disabled\n");
1712                return -1;
1713        }
1714#ifdef CONFIG_X86_64
1715        if (!cpu_has_apic) {
1716                disable_apic = 1;
1717                pr_info("Apic disabled by BIOS\n");
1718                return -1;
1719        }
1720#else
1721        if (!smp_found_config && !cpu_has_apic)
1722                return -1;
1723
1724        /*
1725         * Complain if the BIOS pretends there is one.
1726         */
1727        if (!cpu_has_apic &&
1728            APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
1729                pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
1730                        boot_cpu_physical_apicid);
1731                return -1;
1732        }
1733#endif
1734
1735        default_setup_apic_routing();
1736
1737        verify_local_APIC();
1738        connect_bsp_APIC();
1739
1740#ifdef CONFIG_X86_64
1741        apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
1742#else
1743        /*
1744         * Hack: In case of kdump, after a crash, kernel might be booting
1745         * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
1746         * might be zero if read from MP tables. Get it from LAPIC.
1747         */
1748# ifdef CONFIG_CRASH_DUMP
1749        boot_cpu_physical_apicid = read_apic_id();
1750# endif
1751#endif
1752        physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1753        setup_local_APIC();
1754
1755#ifdef CONFIG_X86_IO_APIC
1756        /*
1757         * Now enable IO-APICs, actually call clear_IO_APIC
1758         * We need clear_IO_APIC before enabling error vector
1759         */
1760        if (!skip_ioapic_setup && nr_ioapics)
1761                enable_IO_APIC();
1762#endif
1763
1764        bsp_end_local_APIC_setup();
1765
1766#ifdef CONFIG_X86_IO_APIC
1767        if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1768                setup_IO_APIC();
1769        else {
1770                nr_ioapics = 0;
1771        }
1772#endif
1773
1774        x86_init.timers.setup_percpu_clockev();
1775        return 0;
1776}
1777
1778/*
1779 * Local APIC interrupts
1780 */
1781
1782/*
1783 * This interrupt should _never_ happen with our APIC/SMP architecture
1784 */
1785void smp_spurious_interrupt(struct pt_regs *regs)
1786{
1787        u32 v;
1788
1789        exit_idle();
1790        irq_enter();
1791        /*
1792         * Check if this really is a spurious interrupt and ACK it
1793         * if it is a vectored one.  Just in case...
1794         * Spurious interrupts should not be ACKed.
1795         */
1796        v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1797        if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1798                ack_APIC_irq();
1799
1800        inc_irq_stat(irq_spurious_count);
1801
1802        /* see sw-dev-man vol 3, chapter 7.4.13.5 */
1803        pr_info("spurious APIC interrupt on CPU#%d, "
1804                "should never happen.\n", smp_processor_id());
1805        irq_exit();
1806}
1807
1808/*
1809 * This interrupt should never happen with our APIC/SMP architecture
1810 */
1811void smp_error_interrupt(struct pt_regs *regs)
1812{
1813        u32 v, v1;
1814
1815        exit_idle();
1816        irq_enter();
1817        /* First tickle the hardware, only then report what went on. -- REW */
1818        v = apic_read(APIC_ESR);
1819        apic_write(APIC_ESR, 0);
1820        v1 = apic_read(APIC_ESR);
1821        ack_APIC_irq();
1822        atomic_inc(&irq_err_count);
1823
1824        /*
1825         * Here is what the APIC error bits mean:
1826         * 0: Send CS error
1827         * 1: Receive CS error
1828         * 2: Send accept error
1829         * 3: Receive accept error
1830         * 4: Reserved
1831         * 5: Send illegal vector
1832         * 6: Received illegal vector
1833         * 7: Illegal register address
1834         */
1835        pr_debug("APIC error on CPU%d: %02x(%02x)\n",
1836                smp_processor_id(), v , v1);
1837        irq_exit();
1838}
1839
1840/**
1841 * connect_bsp_APIC - attach the APIC to the interrupt system
1842 */
1843void __init connect_bsp_APIC(void)
1844{
1845#ifdef CONFIG_X86_32
1846        if (pic_mode) {
1847                /*
1848                 * Do not trust the local APIC being empty at bootup.
1849                 */
1850                clear_local_APIC();
1851                /*
1852                 * PIC mode, enable APIC mode in the IMCR, i.e.  connect BSP's
1853                 * local APIC to INT and NMI lines.
1854                 */
1855                apic_printk(APIC_VERBOSE, "leaving PIC mode, "
1856                                "enabling APIC mode.\n");
1857                imcr_pic_to_apic();
1858        }
1859#endif
1860        if (apic->enable_apic_mode)
1861                apic->enable_apic_mode();
1862}
1863
1864/**
1865 * disconnect_bsp_APIC - detach the APIC from the interrupt system
1866 * @virt_wire_setup:    indicates, whether virtual wire mode is selected
1867 *
1868 * Virtual wire mode is necessary to deliver legacy interrupts even when the
1869 * APIC is disabled.
1870 */
1871void disconnect_bsp_APIC(int virt_wire_setup)
1872{
1873        unsigned int value;
1874
1875#ifdef CONFIG_X86_32
1876        if (pic_mode) {
1877                /*
1878                 * Put the board back into PIC mode (has an effect only on
1879                 * certain older boards).  Note that APIC interrupts, including
1880                 * IPIs, won't work beyond this point!  The only exception are
1881                 * INIT IPIs.
1882                 */
1883                apic_printk(APIC_VERBOSE, "disabling APIC mode, "
1884                                "entering PIC mode.\n");
1885                imcr_apic_to_pic();
1886                return;
1887        }
1888#endif
1889
1890        /* Go back to Virtual Wire compatibility mode */
1891
1892        /* For the spurious interrupt use vector F, and enable it */
1893        value = apic_read(APIC_SPIV);
1894        value &= ~APIC_VECTOR_MASK;
1895        value |= APIC_SPIV_APIC_ENABLED;
1896        value |= 0xf;
1897        apic_write(APIC_SPIV, value);
1898
1899        if (!virt_wire_setup) {
1900                /*
1901                 * For LVT0 make it edge triggered, active high,
1902                 * external and enabled
1903                 */
1904                value = apic_read(APIC_LVT0);
1905                value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1906                        APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1907                        APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1908                value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1909                value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
1910                apic_write(APIC_LVT0, value);
1911        } else {
1912                /* Disable LVT0 */
1913                apic_write(APIC_LVT0, APIC_LVT_MASKED);
1914        }
1915
1916        /*
1917         * For LVT1 make it edge triggered, active high,
1918         * nmi and enabled
1919         */
1920        value = apic_read(APIC_LVT1);
1921        value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1922                        APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1923                        APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1924        value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1925        value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
1926        apic_write(APIC_LVT1, value);
1927}
1928
1929void __cpuinit generic_processor_info(int apicid, int version)
1930{
1931        int cpu;
1932
1933        /*
1934         * Validate version
1935         */
1936        if (version == 0x0) {
1937                pr_warning("BIOS bug, APIC version is 0 for CPU#%d! "
1938                           "fixing up to 0x10. (tell your hw vendor)\n",
1939                                version);
1940                version = 0x10;
1941        }
1942        apic_version[apicid] = version;
1943
1944        if (num_processors >= nr_cpu_ids) {
1945                int max = nr_cpu_ids;
1946                int thiscpu = max + disabled_cpus;
1947
1948                pr_warning(
1949                        "ACPI: NR_CPUS/possible_cpus limit of %i reached."
1950                        "  Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
1951
1952                disabled_cpus++;
1953                return;
1954        }
1955
1956        num_processors++;
1957        cpu = cpumask_next_zero(-1, cpu_present_mask);
1958
1959        if (version != apic_version[boot_cpu_physical_apicid])
1960                WARN_ONCE(1,
1961                        "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n",
1962                        apic_version[boot_cpu_physical_apicid], cpu, version);
1963
1964        physid_set(apicid, phys_cpu_present_map);
1965        if (apicid == boot_cpu_physical_apicid) {
1966                /*
1967                 * x86_bios_cpu_apicid is required to have processors listed
1968                 * in same order as logical cpu numbers. Hence the first
1969                 * entry is BSP, and so on.
1970                 */
1971                cpu = 0;
1972        }
1973        if (apicid > max_physical_apicid)
1974                max_physical_apicid = apicid;
1975
1976#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
1977        early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1978        early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1979#endif
1980
1981        set_cpu_possible(cpu, true);
1982        set_cpu_present(cpu, true);
1983}
1984
1985int hard_smp_processor_id(void)
1986{
1987        return read_apic_id();
1988}
1989
1990void default_init_apic_ldr(void)
1991{
1992        unsigned long val;
1993
1994        apic_write(APIC_DFR, APIC_DFR_VALUE);
1995        val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
1996        val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
1997        apic_write(APIC_LDR, val);
1998}
1999
2000#ifdef CONFIG_X86_32
2001int default_apicid_to_node(int logical_apicid)
2002{
2003#ifdef CONFIG_SMP
2004        return apicid_2_node[hard_smp_processor_id()];
2005#else
2006        return 0;
2007#endif
2008}
2009#endif
2010
2011/*
2012 * Power management
2013 */
2014#ifdef CONFIG_PM
2015
2016static struct {
2017        /*
2018         * 'active' is true if the local APIC was enabled by us and
2019         * not the BIOS; this signifies that we are also responsible
2020         * for disabling it before entering apm/acpi suspend
2021         */
2022        int active;
2023        /* r/w apic fields */
2024        unsigned int apic_id;
2025        unsigned int apic_taskpri;
2026        unsigned int apic_ldr;
2027        unsigned int apic_dfr;
2028        unsigned int apic_spiv;
2029        unsigned int apic_lvtt;
2030        unsigned int apic_lvtpc;
2031        unsigned int apic_lvt0;
2032        unsigned int apic_lvt1;
2033        unsigned int apic_lvterr;
2034        unsigned int apic_tmict;
2035        unsigned int apic_tdcr;
2036        unsigned int apic_thmr;
2037} apic_pm_state;
2038
2039static int lapic_suspend(struct sys_device *dev, pm_message_t state)
2040{
2041        unsigned long flags;
2042        int maxlvt;
2043
2044        if (!apic_pm_state.active)
2045                return 0;
2046
2047        maxlvt = lapic_get_maxlvt();
2048
2049        apic_pm_state.apic_id = apic_read(APIC_ID);
2050        apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
2051        apic_pm_state.apic_ldr = apic_read(APIC_LDR);
2052        apic_pm_state.apic_dfr = apic_read(APIC_DFR);
2053        apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
2054        apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
2055        if (maxlvt >= 4)
2056                apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
2057        apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
2058        apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
2059        apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
2060        apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
2061        apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
2062#ifdef CONFIG_X86_THERMAL_VECTOR
2063        if (maxlvt >= 5)
2064                apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
2065#endif
2066
2067        local_irq_save(flags);
2068        disable_local_APIC();
2069
2070        if (intr_remapping_enabled)
2071                disable_intr_remapping();
2072
2073        local_irq_restore(flags);
2074        return 0;
2075}
2076
2077static int lapic_resume(struct sys_device *dev)
2078{
2079        unsigned int l, h;
2080        unsigned long flags;
2081        int maxlvt;
2082        int ret = 0;
2083        struct IO_APIC_route_entry **ioapic_entries = NULL;
2084
2085        if (!apic_pm_state.active)
2086                return 0;
2087
2088        local_irq_save(flags);
2089        if (intr_remapping_enabled) {
2090                ioapic_entries = alloc_ioapic_entries();
2091                if (!ioapic_entries) {
2092                        WARN(1, "Alloc ioapic_entries in lapic resume failed.");
2093                        ret = -ENOMEM;
2094                        goto restore;
2095                }
2096
2097                ret = save_IO_APIC_setup(ioapic_entries);
2098                if (ret) {
2099                        WARN(1, "Saving IO-APIC state failed: %d\n", ret);
2100                        free_ioapic_entries(ioapic_entries);
2101                        goto restore;
2102                }
2103
2104                mask_IO_APIC_setup(ioapic_entries);
2105                legacy_pic->mask_all();
2106        }
2107
2108        if (x2apic_mode)
2109                enable_x2apic();
2110        else {
2111                /*
2112                 * Make sure the APICBASE points to the right address
2113                 *
2114                 * FIXME! This will be wrong if we ever support suspend on
2115                 * SMP! We'll need to do this as part of the CPU restore!
2116                 */
2117                rdmsr(MSR_IA32_APICBASE, l, h);
2118                l &= ~MSR_IA32_APICBASE_BASE;
2119                l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
2120                wrmsr(MSR_IA32_APICBASE, l, h);
2121        }
2122
2123        maxlvt = lapic_get_maxlvt();
2124        apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
2125        apic_write(APIC_ID, apic_pm_state.apic_id);
2126        apic_write(APIC_DFR, apic_pm_state.apic_dfr);
2127        apic_write(APIC_LDR, apic_pm_state.apic_ldr);
2128        apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
2129        apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
2130        apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
2131        apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
2132#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
2133        if (maxlvt >= 5)
2134                apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
2135#endif
2136        if (maxlvt >= 4)
2137                apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
2138        apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
2139        apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
2140        apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
2141        apic_write(APIC_ESR, 0);
2142        apic_read(APIC_ESR);
2143        apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
2144        apic_write(APIC_ESR, 0);
2145        apic_read(APIC_ESR);
2146
2147        if (intr_remapping_enabled) {
2148                reenable_intr_remapping(x2apic_mode);
2149                legacy_pic->restore_mask();
2150                restore_IO_APIC_setup(ioapic_entries);
2151                free_ioapic_entries(ioapic_entries);
2152        }
2153restore:
2154        local_irq_restore(flags);
2155
2156        return ret;
2157}
2158
2159/*
2160 * This device has no shutdown method - fully functioning local APICs
2161 * are needed on every CPU up until machine_halt/restart/poweroff.
2162 */
2163
2164static struct sysdev_class lapic_sysclass = {
2165        .name           = "lapic",
2166        .resume         = lapic_resume,
2167        .suspend        = lapic_suspend,
2168};
2169
2170static struct sys_device device_lapic = {
2171        .id     = 0,
2172        .cls    = &lapic_sysclass,
2173};
2174
2175static void __cpuinit apic_pm_activate(void)
2176{
2177        apic_pm_state.active = 1;
2178}
2179
2180static int __init init_lapic_sysfs(void)
2181{
2182        int error;
2183
2184        if (!cpu_has_apic)
2185                return 0;
2186        /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
2187
2188        error = sysdev_class_register(&lapic_sysclass);
2189        if (!error)
2190                error = sysdev_register(&device_lapic);
2191        return error;
2192}
2193
2194/* local apic needs to resume before other devices access its registers. */
2195core_initcall(init_lapic_sysfs);
2196
2197#else   /* CONFIG_PM */
2198
2199static void apic_pm_activate(void) { }
2200
2201#endif  /* CONFIG_PM */
2202
2203#ifdef CONFIG_X86_64
2204
2205static int __cpuinit apic_cluster_num(void)
2206{
2207        int i, clusters, zeros;
2208        unsigned id;
2209        u16 *bios_cpu_apicid;
2210        DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
2211
2212        bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
2213        bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
2214
2215        for (i = 0; i < nr_cpu_ids; i++) {
2216                /* are we being called early in kernel startup? */
2217                if (bios_cpu_apicid) {
2218                        id = bios_cpu_apicid[i];
2219                } else if (i < nr_cpu_ids) {
2220                        if (cpu_present(i))
2221                                id = per_cpu(x86_bios_cpu_apicid, i);
2222                        else
2223                                continue;
2224                } else
2225                        break;
2226
2227                if (id != BAD_APICID)
2228                        __set_bit(APIC_CLUSTERID(id), clustermap);
2229        }
2230
2231        /* Problem:  Partially populated chassis may not have CPUs in some of
2232         * the APIC clusters they have been allocated.  Only present CPUs have
2233         * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
2234         * Since clusters are allocated sequentially, count zeros only if
2235         * they are bounded by ones.
2236         */
2237        clusters = 0;
2238        zeros = 0;
2239        for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
2240                if (test_bit(i, clustermap)) {
2241                        clusters += 1 + zeros;
2242                        zeros = 0;
2243                } else
2244                        ++zeros;
2245        }
2246
2247        return clusters;
2248}
2249
2250static int __cpuinitdata multi_checked;
2251static int __cpuinitdata multi;
2252
2253static int __cpuinit set_multi(const struct dmi_system_id *d)
2254{
2255        if (multi)
2256                return 0;
2257        pr_info("APIC: %s detected, Multi Chassis\n", d->ident);
2258        multi = 1;
2259        return 0;
2260}
2261
2262static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = {
2263        {
2264                .callback = set_multi,
2265                .ident = "IBM System Summit2",
2266                .matches = {
2267                        DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
2268                        DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"),
2269                },
2270        },
2271        {}
2272};
2273
2274static void __cpuinit dmi_check_multi(void)
2275{
2276        if (multi_checked)
2277                return;
2278
2279        dmi_check_system(multi_dmi_table);
2280        multi_checked = 1;
2281}
2282
2283/*
2284 * apic_is_clustered_box() -- Check if we can expect good TSC
2285 *
2286 * Thus far, the major user of this is IBM's Summit2 series:
2287 * Clustered boxes may have unsynced TSC problems if they are
2288 * multi-chassis.
2289 * Use DMI to check them
2290 */
2291__cpuinit int apic_is_clustered_box(void)
2292{
2293        dmi_check_multi();
2294        if (multi)
2295                return 1;
2296
2297        if (!is_vsmp_box())
2298                return 0;
2299
2300        /*
2301         * ScaleMP vSMPowered boxes have one cluster per board and TSCs are
2302         * not guaranteed to be synced between boards
2303         */
2304        if (apic_cluster_num() > 1)
2305                return 1;
2306
2307        return 0;
2308}
2309#endif
2310
2311/*
2312 * APIC command line parameters
2313 */
2314static int __init setup_disableapic(char *arg)
2315{
2316        disable_apic = 1;
2317        setup_clear_cpu_cap(X86_FEATURE_APIC);
2318        return 0;
2319}
2320early_param("disableapic", setup_disableapic);
2321
2322/* same as disableapic, for compatibility */
2323static int __init setup_nolapic(char *arg)
2324{
2325        return setup_disableapic(arg);
2326}
2327early_param("nolapic", setup_nolapic);
2328
2329static int __init parse_lapic_timer_c2_ok(char *arg)
2330{
2331        local_apic_timer_c2_ok = 1;
2332        return 0;
2333}
2334early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
2335
2336static int __init parse_disable_apic_timer(char *arg)
2337{
2338        disable_apic_timer = 1;
2339        return 0;
2340}
2341early_param("noapictimer", parse_disable_apic_timer);
2342
2343static int __init parse_nolapic_timer(char *arg)
2344{
2345        disable_apic_timer = 1;
2346        return 0;
2347}
2348early_param("nolapic_timer", parse_nolapic_timer);
2349
2350static int __init apic_set_verbosity(char *arg)
2351{
2352        if (!arg)  {
2353#ifdef CONFIG_X86_64
2354                skip_ioapic_setup = 0;
2355                return 0;
2356#endif
2357                return -EINVAL;
2358        }
2359
2360        if (strcmp("debug", arg) == 0)
2361                apic_verbosity = APIC_DEBUG;
2362        else if (strcmp("verbose", arg) == 0)
2363                apic_verbosity = APIC_VERBOSE;
2364        else {
2365                pr_warning("APIC Verbosity level %s not recognised"
2366                        " use apic=verbose or apic=debug\n", arg);
2367                return -EINVAL;
2368        }
2369
2370        return 0;
2371}
2372early_param("apic", apic_set_verbosity);
2373
2374static int __init lapic_insert_resource(void)
2375{
2376        if (!apic_phys)
2377                return -1;
2378
2379        /* Put local APIC into the resource map. */
2380        lapic_resource.start = apic_phys;
2381        lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
2382        insert_resource(&iomem_resource, &lapic_resource);
2383
2384        return 0;
2385}
2386
2387/*
2388 * need call insert after e820_reserve_resources()
2389 * that is using request_resource
2390 */
2391late_initcall(lapic_insert_resource);
2392