linux/arch/x86/kvm/lapic.c
<<
>>
Prefs
   1
   2/*
   3 * Local APIC virtualization
   4 *
   5 * Copyright (C) 2006 Qumranet, Inc.
   6 * Copyright (C) 2007 Novell
   7 * Copyright (C) 2007 Intel
   8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
   9 *
  10 * Authors:
  11 *   Dor Laor <dor.laor@qumranet.com>
  12 *   Gregory Haskins <ghaskins@novell.com>
  13 *   Yaozu (Eddie) Dong <eddie.dong@intel.com>
  14 *
  15 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
  16 *
  17 * This work is licensed under the terms of the GNU GPL, version 2.  See
  18 * the COPYING file in the top-level directory.
  19 */
  20
  21#include <linux/kvm_host.h>
  22#include <linux/kvm.h>
  23#include <linux/mm.h>
  24#include <linux/highmem.h>
  25#include <linux/smp.h>
  26#include <linux/hrtimer.h>
  27#include <linux/io.h>
  28#include <linux/module.h>
  29#include <linux/math64.h>
  30#include <linux/slab.h>
  31#include <asm/processor.h>
  32#include <asm/msr.h>
  33#include <asm/page.h>
  34#include <asm/current.h>
  35#include <asm/apicdef.h>
  36#include <linux/atomic.h>
  37#include "kvm_cache_regs.h"
  38#include "irq.h"
  39#include "trace.h"
  40#include "x86.h"
  41
  42#ifndef CONFIG_X86_64
  43#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
  44#else
  45#define mod_64(x, y) ((x) % (y))
  46#endif
  47
  48#define PRId64 "d"
  49#define PRIx64 "llx"
  50#define PRIu64 "u"
  51#define PRIo64 "o"
  52
  53#define APIC_BUS_CYCLE_NS 1
  54
  55/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
  56#define apic_debug(fmt, arg...)
  57
  58#define APIC_LVT_NUM                    6
  59/* 14 is the version for Xeon and Pentium 8.4.8*/
  60#define APIC_VERSION                    (0x14UL | ((APIC_LVT_NUM - 1) << 16))
  61#define LAPIC_MMIO_LENGTH               (1 << 12)
  62/* followed define is not in apicdef.h */
  63#define APIC_SHORT_MASK                 0xc0000
  64#define APIC_DEST_NOSHORT               0x0
  65#define APIC_DEST_MASK                  0x800
  66#define MAX_APIC_VECTOR                 256
  67
  68#define VEC_POS(v) ((v) & (32 - 1))
  69#define REG_POS(v) (((v) >> 5) << 4)
  70
  71static unsigned int min_timer_period_us = 500;
  72module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
  73
  74static inline u32 apic_get_reg(struct kvm_lapic *apic, int reg_off)
  75{
  76        return *((u32 *) (apic->regs + reg_off));
  77}
  78
  79static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
  80{
  81        *((u32 *) (apic->regs + reg_off)) = val;
  82}
  83
  84static inline int apic_test_and_set_vector(int vec, void *bitmap)
  85{
  86        return test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
  87}
  88
  89static inline int apic_test_and_clear_vector(int vec, void *bitmap)
  90{
  91        return test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
  92}
  93
  94static inline void apic_set_vector(int vec, void *bitmap)
  95{
  96        set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
  97}
  98
  99static inline void apic_clear_vector(int vec, void *bitmap)
 100{
 101        clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
 102}
 103
 104static inline int apic_hw_enabled(struct kvm_lapic *apic)
 105{
 106        return (apic)->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE;
 107}
 108
 109static inline int  apic_sw_enabled(struct kvm_lapic *apic)
 110{
 111        return apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED;
 112}
 113
 114static inline int apic_enabled(struct kvm_lapic *apic)
 115{
 116        return apic_sw_enabled(apic) && apic_hw_enabled(apic);
 117}
 118
 119#define LVT_MASK        \
 120        (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
 121
 122#define LINT_MASK       \
 123        (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
 124         APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
 125
 126static inline int kvm_apic_id(struct kvm_lapic *apic)
 127{
 128        return (apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
 129}
 130
 131static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
 132{
 133        return !(apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
 134}
 135
 136static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
 137{
 138        return apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
 139}
 140
 141static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
 142{
 143        return ((apic_get_reg(apic, APIC_LVTT) &
 144                apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_ONESHOT);
 145}
 146
 147static inline int apic_lvtt_period(struct kvm_lapic *apic)
 148{
 149        return ((apic_get_reg(apic, APIC_LVTT) &
 150                apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_PERIODIC);
 151}
 152
 153static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
 154{
 155        return ((apic_get_reg(apic, APIC_LVTT) &
 156                apic->lapic_timer.timer_mode_mask) ==
 157                        APIC_LVT_TIMER_TSCDEADLINE);
 158}
 159
 160static inline int apic_lvt_nmi_mode(u32 lvt_val)
 161{
 162        return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
 163}
 164
 165void kvm_apic_set_version(struct kvm_vcpu *vcpu)
 166{
 167        struct kvm_lapic *apic = vcpu->arch.apic;
 168        struct kvm_cpuid_entry2 *feat;
 169        u32 v = APIC_VERSION;
 170
 171        if (!irqchip_in_kernel(vcpu->kvm))
 172                return;
 173
 174        feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
 175        if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
 176                v |= APIC_LVR_DIRECTED_EOI;
 177        apic_set_reg(apic, APIC_LVR, v);
 178}
 179
 180static inline int apic_x2apic_mode(struct kvm_lapic *apic)
 181{
 182        return apic->vcpu->arch.apic_base & X2APIC_ENABLE;
 183}
 184
 185static unsigned int apic_lvt_mask[APIC_LVT_NUM] = {
 186        LVT_MASK ,      /* part LVTT mask, timer mode mask added at runtime */
 187        LVT_MASK | APIC_MODE_MASK,      /* LVTTHMR */
 188        LVT_MASK | APIC_MODE_MASK,      /* LVTPC */
 189        LINT_MASK, LINT_MASK,   /* LVT0-1 */
 190        LVT_MASK                /* LVTERR */
 191};
 192
 193static int find_highest_vector(void *bitmap)
 194{
 195        u32 *word = bitmap;
 196        int word_offset = MAX_APIC_VECTOR >> 5;
 197
 198        while ((word_offset != 0) && (word[(--word_offset) << 2] == 0))
 199                continue;
 200
 201        if (likely(!word_offset && !word[0]))
 202                return -1;
 203        else
 204                return fls(word[word_offset << 2]) - 1 + (word_offset << 5);
 205}
 206
 207static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
 208{
 209        apic->irr_pending = true;
 210        return apic_test_and_set_vector(vec, apic->regs + APIC_IRR);
 211}
 212
 213static inline int apic_search_irr(struct kvm_lapic *apic)
 214{
 215        return find_highest_vector(apic->regs + APIC_IRR);
 216}
 217
 218static inline int apic_find_highest_irr(struct kvm_lapic *apic)
 219{
 220        int result;
 221
 222        if (!apic->irr_pending)
 223                return -1;
 224
 225        result = apic_search_irr(apic);
 226        ASSERT(result == -1 || result >= 16);
 227
 228        return result;
 229}
 230
 231static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
 232{
 233        apic->irr_pending = false;
 234        apic_clear_vector(vec, apic->regs + APIC_IRR);
 235        if (apic_search_irr(apic) != -1)
 236                apic->irr_pending = true;
 237}
 238
 239int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
 240{
 241        struct kvm_lapic *apic = vcpu->arch.apic;
 242        int highest_irr;
 243
 244        /* This may race with setting of irr in __apic_accept_irq() and
 245         * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
 246         * will cause vmexit immediately and the value will be recalculated
 247         * on the next vmentry.
 248         */
 249        if (!apic)
 250                return 0;
 251        highest_irr = apic_find_highest_irr(apic);
 252
 253        return highest_irr;
 254}
 255
 256static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
 257                             int vector, int level, int trig_mode);
 258
 259int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
 260{
 261        struct kvm_lapic *apic = vcpu->arch.apic;
 262
 263        return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
 264                        irq->level, irq->trig_mode);
 265}
 266
 267static inline int apic_find_highest_isr(struct kvm_lapic *apic)
 268{
 269        int result;
 270
 271        result = find_highest_vector(apic->regs + APIC_ISR);
 272        ASSERT(result == -1 || result >= 16);
 273
 274        return result;
 275}
 276
 277static void apic_update_ppr(struct kvm_lapic *apic)
 278{
 279        u32 tpr, isrv, ppr, old_ppr;
 280        int isr;
 281
 282        old_ppr = apic_get_reg(apic, APIC_PROCPRI);
 283        tpr = apic_get_reg(apic, APIC_TASKPRI);
 284        isr = apic_find_highest_isr(apic);
 285        isrv = (isr != -1) ? isr : 0;
 286
 287        if ((tpr & 0xf0) >= (isrv & 0xf0))
 288                ppr = tpr & 0xff;
 289        else
 290                ppr = isrv & 0xf0;
 291
 292        apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
 293                   apic, ppr, isr, isrv);
 294
 295        if (old_ppr != ppr) {
 296                apic_set_reg(apic, APIC_PROCPRI, ppr);
 297                if (ppr < old_ppr)
 298                        kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
 299        }
 300}
 301
 302static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
 303{
 304        apic_set_reg(apic, APIC_TASKPRI, tpr);
 305        apic_update_ppr(apic);
 306}
 307
 308int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
 309{
 310        return dest == 0xff || kvm_apic_id(apic) == dest;
 311}
 312
 313int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
 314{
 315        int result = 0;
 316        u32 logical_id;
 317
 318        if (apic_x2apic_mode(apic)) {
 319                logical_id = apic_get_reg(apic, APIC_LDR);
 320                return logical_id & mda;
 321        }
 322
 323        logical_id = GET_APIC_LOGICAL_ID(apic_get_reg(apic, APIC_LDR));
 324
 325        switch (apic_get_reg(apic, APIC_DFR)) {
 326        case APIC_DFR_FLAT:
 327                if (logical_id & mda)
 328                        result = 1;
 329                break;
 330        case APIC_DFR_CLUSTER:
 331                if (((logical_id >> 4) == (mda >> 0x4))
 332                    && (logical_id & mda & 0xf))
 333                        result = 1;
 334                break;
 335        default:
 336                apic_debug("Bad DFR vcpu %d: %08x\n",
 337                           apic->vcpu->vcpu_id, apic_get_reg(apic, APIC_DFR));
 338                break;
 339        }
 340
 341        return result;
 342}
 343
 344int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
 345                           int short_hand, int dest, int dest_mode)
 346{
 347        int result = 0;
 348        struct kvm_lapic *target = vcpu->arch.apic;
 349
 350        apic_debug("target %p, source %p, dest 0x%x, "
 351                   "dest_mode 0x%x, short_hand 0x%x\n",
 352                   target, source, dest, dest_mode, short_hand);
 353
 354        ASSERT(target);
 355        switch (short_hand) {
 356        case APIC_DEST_NOSHORT:
 357                if (dest_mode == 0)
 358                        /* Physical mode. */
 359                        result = kvm_apic_match_physical_addr(target, dest);
 360                else
 361                        /* Logical mode. */
 362                        result = kvm_apic_match_logical_addr(target, dest);
 363                break;
 364        case APIC_DEST_SELF:
 365                result = (target == source);
 366                break;
 367        case APIC_DEST_ALLINC:
 368                result = 1;
 369                break;
 370        case APIC_DEST_ALLBUT:
 371                result = (target != source);
 372                break;
 373        default:
 374                apic_debug("kvm: apic: Bad dest shorthand value %x\n",
 375                           short_hand);
 376                break;
 377        }
 378
 379        return result;
 380}
 381
 382/*
 383 * Add a pending IRQ into lapic.
 384 * Return 1 if successfully added and 0 if discarded.
 385 */
 386static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
 387                             int vector, int level, int trig_mode)
 388{
 389        int result = 0;
 390        struct kvm_vcpu *vcpu = apic->vcpu;
 391
 392        switch (delivery_mode) {
 393        case APIC_DM_LOWEST:
 394                vcpu->arch.apic_arb_prio++;
 395        case APIC_DM_FIXED:
 396                /* FIXME add logic for vcpu on reset */
 397                if (unlikely(!apic_enabled(apic)))
 398                        break;
 399
 400                if (trig_mode) {
 401                        apic_debug("level trig mode for vector %d", vector);
 402                        apic_set_vector(vector, apic->regs + APIC_TMR);
 403                } else
 404                        apic_clear_vector(vector, apic->regs + APIC_TMR);
 405
 406                result = !apic_test_and_set_irr(vector, apic);
 407                trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
 408                                          trig_mode, vector, !result);
 409                if (!result) {
 410                        if (trig_mode)
 411                                apic_debug("level trig mode repeatedly for "
 412                                                "vector %d", vector);
 413                        break;
 414                }
 415
 416                kvm_make_request(KVM_REQ_EVENT, vcpu);
 417                kvm_vcpu_kick(vcpu);
 418                break;
 419
 420        case APIC_DM_REMRD:
 421                apic_debug("Ignoring delivery mode 3\n");
 422                break;
 423
 424        case APIC_DM_SMI:
 425                apic_debug("Ignoring guest SMI\n");
 426                break;
 427
 428        case APIC_DM_NMI:
 429                result = 1;
 430                kvm_inject_nmi(vcpu);
 431                kvm_vcpu_kick(vcpu);
 432                break;
 433
 434        case APIC_DM_INIT:
 435                if (level) {
 436                        result = 1;
 437                        vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
 438                        kvm_make_request(KVM_REQ_EVENT, vcpu);
 439                        kvm_vcpu_kick(vcpu);
 440                } else {
 441                        apic_debug("Ignoring de-assert INIT to vcpu %d\n",
 442                                   vcpu->vcpu_id);
 443                }
 444                break;
 445
 446        case APIC_DM_STARTUP:
 447                apic_debug("SIPI to vcpu %d vector 0x%02x\n",
 448                           vcpu->vcpu_id, vector);
 449                if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
 450                        result = 1;
 451                        vcpu->arch.sipi_vector = vector;
 452                        vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED;
 453                        kvm_make_request(KVM_REQ_EVENT, vcpu);
 454                        kvm_vcpu_kick(vcpu);
 455                }
 456                break;
 457
 458        case APIC_DM_EXTINT:
 459                /*
 460                 * Should only be called by kvm_apic_local_deliver() with LVT0,
 461                 * before NMI watchdog was enabled. Already handled by
 462                 * kvm_apic_accept_pic_intr().
 463                 */
 464                break;
 465
 466        default:
 467                printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
 468                       delivery_mode);
 469                break;
 470        }
 471        return result;
 472}
 473
 474int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
 475{
 476        return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
 477}
 478
 479static void apic_set_eoi(struct kvm_lapic *apic)
 480{
 481        int vector = apic_find_highest_isr(apic);
 482        int trigger_mode;
 483        /*
 484         * Not every write EOI will has corresponding ISR,
 485         * one example is when Kernel check timer on setup_IO_APIC
 486         */
 487        if (vector == -1)
 488                return;
 489
 490        apic_clear_vector(vector, apic->regs + APIC_ISR);
 491        apic_update_ppr(apic);
 492
 493        if (apic_test_and_clear_vector(vector, apic->regs + APIC_TMR))
 494                trigger_mode = IOAPIC_LEVEL_TRIG;
 495        else
 496                trigger_mode = IOAPIC_EDGE_TRIG;
 497        if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI))
 498                kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
 499        kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
 500}
 501
 502static void apic_send_ipi(struct kvm_lapic *apic)
 503{
 504        u32 icr_low = apic_get_reg(apic, APIC_ICR);
 505        u32 icr_high = apic_get_reg(apic, APIC_ICR2);
 506        struct kvm_lapic_irq irq;
 507
 508        irq.vector = icr_low & APIC_VECTOR_MASK;
 509        irq.delivery_mode = icr_low & APIC_MODE_MASK;
 510        irq.dest_mode = icr_low & APIC_DEST_MASK;
 511        irq.level = icr_low & APIC_INT_ASSERT;
 512        irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
 513        irq.shorthand = icr_low & APIC_SHORT_MASK;
 514        if (apic_x2apic_mode(apic))
 515                irq.dest_id = icr_high;
 516        else
 517                irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
 518
 519        trace_kvm_apic_ipi(icr_low, irq.dest_id);
 520
 521        apic_debug("icr_high 0x%x, icr_low 0x%x, "
 522                   "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
 523                   "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n",
 524                   icr_high, icr_low, irq.shorthand, irq.dest_id,
 525                   irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
 526                   irq.vector);
 527
 528        kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq);
 529}
 530
 531static u32 apic_get_tmcct(struct kvm_lapic *apic)
 532{
 533        ktime_t remaining;
 534        s64 ns;
 535        u32 tmcct;
 536
 537        ASSERT(apic != NULL);
 538
 539        /* if initial count is 0, current count should also be 0 */
 540        if (apic_get_reg(apic, APIC_TMICT) == 0)
 541                return 0;
 542
 543        remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
 544        if (ktime_to_ns(remaining) < 0)
 545                remaining = ktime_set(0, 0);
 546
 547        ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
 548        tmcct = div64_u64(ns,
 549                         (APIC_BUS_CYCLE_NS * apic->divide_count));
 550
 551        return tmcct;
 552}
 553
 554static void __report_tpr_access(struct kvm_lapic *apic, bool write)
 555{
 556        struct kvm_vcpu *vcpu = apic->vcpu;
 557        struct kvm_run *run = vcpu->run;
 558
 559        kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
 560        run->tpr_access.rip = kvm_rip_read(vcpu);
 561        run->tpr_access.is_write = write;
 562}
 563
 564static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
 565{
 566        if (apic->vcpu->arch.tpr_access_reporting)
 567                __report_tpr_access(apic, write);
 568}
 569
 570static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
 571{
 572        u32 val = 0;
 573
 574        if (offset >= LAPIC_MMIO_LENGTH)
 575                return 0;
 576
 577        switch (offset) {
 578        case APIC_ID:
 579                if (apic_x2apic_mode(apic))
 580                        val = kvm_apic_id(apic);
 581                else
 582                        val = kvm_apic_id(apic) << 24;
 583                break;
 584        case APIC_ARBPRI:
 585                apic_debug("Access APIC ARBPRI register which is for P6\n");
 586                break;
 587
 588        case APIC_TMCCT:        /* Timer CCR */
 589                if (apic_lvtt_tscdeadline(apic))
 590                        return 0;
 591
 592                val = apic_get_tmcct(apic);
 593                break;
 594
 595        case APIC_TASKPRI:
 596                report_tpr_access(apic, false);
 597                /* fall thru */
 598        default:
 599                apic_update_ppr(apic);
 600                val = apic_get_reg(apic, offset);
 601                break;
 602        }
 603
 604        return val;
 605}
 606
 607static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
 608{
 609        return container_of(dev, struct kvm_lapic, dev);
 610}
 611
 612static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
 613                void *data)
 614{
 615        unsigned char alignment = offset & 0xf;
 616        u32 result;
 617        /* this bitmask has a bit cleared for each reserver register */
 618        static const u64 rmask = 0x43ff01ffffffe70cULL;
 619
 620        if ((alignment + len) > 4) {
 621                apic_debug("KVM_APIC_READ: alignment error %x %d\n",
 622                           offset, len);
 623                return 1;
 624        }
 625
 626        if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) {
 627                apic_debug("KVM_APIC_READ: read reserved register %x\n",
 628                           offset);
 629                return 1;
 630        }
 631
 632        result = __apic_read(apic, offset & ~0xf);
 633
 634        trace_kvm_apic_read(offset, result);
 635
 636        switch (len) {
 637        case 1:
 638        case 2:
 639        case 4:
 640                memcpy(data, (char *)&result + alignment, len);
 641                break;
 642        default:
 643                printk(KERN_ERR "Local APIC read with len = %x, "
 644                       "should be 1,2, or 4 instead\n", len);
 645                break;
 646        }
 647        return 0;
 648}
 649
 650static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
 651{
 652        return apic_hw_enabled(apic) &&
 653            addr >= apic->base_address &&
 654            addr < apic->base_address + LAPIC_MMIO_LENGTH;
 655}
 656
 657static int apic_mmio_read(struct kvm_io_device *this,
 658                           gpa_t address, int len, void *data)
 659{
 660        struct kvm_lapic *apic = to_lapic(this);
 661        u32 offset = address - apic->base_address;
 662
 663        if (!apic_mmio_in_range(apic, address))
 664                return -EOPNOTSUPP;
 665
 666        apic_reg_read(apic, offset, len, data);
 667
 668        return 0;
 669}
 670
 671static void update_divide_count(struct kvm_lapic *apic)
 672{
 673        u32 tmp1, tmp2, tdcr;
 674
 675        tdcr = apic_get_reg(apic, APIC_TDCR);
 676        tmp1 = tdcr & 0xf;
 677        tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
 678        apic->divide_count = 0x1 << (tmp2 & 0x7);
 679
 680        apic_debug("timer divide count is 0x%x\n",
 681                                   apic->divide_count);
 682}
 683
 684static void start_apic_timer(struct kvm_lapic *apic)
 685{
 686        ktime_t now;
 687        atomic_set(&apic->lapic_timer.pending, 0);
 688
 689        if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
 690                /* lapic timer in oneshot or peroidic mode */
 691                now = apic->lapic_timer.timer.base->get_time();
 692                apic->lapic_timer.period = (u64)apic_get_reg(apic, APIC_TMICT)
 693                            * APIC_BUS_CYCLE_NS * apic->divide_count;
 694
 695                if (!apic->lapic_timer.period)
 696                        return;
 697                /*
 698                 * Do not allow the guest to program periodic timers with small
 699                 * interval, since the hrtimers are not throttled by the host
 700                 * scheduler.
 701                 */
 702                if (apic_lvtt_period(apic)) {
 703                        s64 min_period = min_timer_period_us * 1000LL;
 704
 705                        if (apic->lapic_timer.period < min_period) {
 706                                pr_info_ratelimited(
 707                                    "kvm: vcpu %i: requested %lld ns "
 708                                    "lapic timer period limited to %lld ns\n",
 709                                    apic->vcpu->vcpu_id,
 710                                    apic->lapic_timer.period, min_period);
 711                                apic->lapic_timer.period = min_period;
 712                        }
 713                }
 714
 715                hrtimer_start(&apic->lapic_timer.timer,
 716                              ktime_add_ns(now, apic->lapic_timer.period),
 717                              HRTIMER_MODE_ABS);
 718
 719                apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
 720                           PRIx64 ", "
 721                           "timer initial count 0x%x, period %lldns, "
 722                           "expire @ 0x%016" PRIx64 ".\n", __func__,
 723                           APIC_BUS_CYCLE_NS, ktime_to_ns(now),
 724                           apic_get_reg(apic, APIC_TMICT),
 725                           apic->lapic_timer.period,
 726                           ktime_to_ns(ktime_add_ns(now,
 727                                        apic->lapic_timer.period)));
 728        } else if (apic_lvtt_tscdeadline(apic)) {
 729                /* lapic timer in tsc deadline mode */
 730                u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
 731                u64 ns = 0;
 732                struct kvm_vcpu *vcpu = apic->vcpu;
 733                unsigned long this_tsc_khz = vcpu_tsc_khz(vcpu);
 734                unsigned long flags;
 735
 736                if (unlikely(!tscdeadline || !this_tsc_khz))
 737                        return;
 738
 739                local_irq_save(flags);
 740
 741                now = apic->lapic_timer.timer.base->get_time();
 742                guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
 743                if (likely(tscdeadline > guest_tsc)) {
 744                        ns = (tscdeadline - guest_tsc) * 1000000ULL;
 745                        do_div(ns, this_tsc_khz);
 746                }
 747                hrtimer_start(&apic->lapic_timer.timer,
 748                        ktime_add_ns(now, ns), HRTIMER_MODE_ABS);
 749
 750                local_irq_restore(flags);
 751        }
 752}
 753
 754static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
 755{
 756        int nmi_wd_enabled = apic_lvt_nmi_mode(apic_get_reg(apic, APIC_LVT0));
 757
 758        if (apic_lvt_nmi_mode(lvt0_val)) {
 759                if (!nmi_wd_enabled) {
 760                        apic_debug("Receive NMI setting on APIC_LVT0 "
 761                                   "for cpu %d\n", apic->vcpu->vcpu_id);
 762                        apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
 763                }
 764        } else if (nmi_wd_enabled)
 765                apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
 766}
 767
 768static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
 769{
 770        int ret = 0;
 771
 772        trace_kvm_apic_write(reg, val);
 773
 774        switch (reg) {
 775        case APIC_ID:           /* Local APIC ID */
 776                if (!apic_x2apic_mode(apic))
 777                        apic_set_reg(apic, APIC_ID, val);
 778                else
 779                        ret = 1;
 780                break;
 781
 782        case APIC_TASKPRI:
 783                report_tpr_access(apic, true);
 784                apic_set_tpr(apic, val & 0xff);
 785                break;
 786
 787        case APIC_EOI:
 788                apic_set_eoi(apic);
 789                break;
 790
 791        case APIC_LDR:
 792                if (!apic_x2apic_mode(apic))
 793                        apic_set_reg(apic, APIC_LDR, val & APIC_LDR_MASK);
 794                else
 795                        ret = 1;
 796                break;
 797
 798        case APIC_DFR:
 799                if (!apic_x2apic_mode(apic))
 800                        apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
 801                else
 802                        ret = 1;
 803                break;
 804
 805        case APIC_SPIV: {
 806                u32 mask = 0x3ff;
 807                if (apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
 808                        mask |= APIC_SPIV_DIRECTED_EOI;
 809                apic_set_reg(apic, APIC_SPIV, val & mask);
 810                if (!(val & APIC_SPIV_APIC_ENABLED)) {
 811                        int i;
 812                        u32 lvt_val;
 813
 814                        for (i = 0; i < APIC_LVT_NUM; i++) {
 815                                lvt_val = apic_get_reg(apic,
 816                                                       APIC_LVTT + 0x10 * i);
 817                                apic_set_reg(apic, APIC_LVTT + 0x10 * i,
 818                                             lvt_val | APIC_LVT_MASKED);
 819                        }
 820                        atomic_set(&apic->lapic_timer.pending, 0);
 821
 822                }
 823                break;
 824        }
 825        case APIC_ICR:
 826                /* No delay here, so we always clear the pending bit */
 827                apic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
 828                apic_send_ipi(apic);
 829                break;
 830
 831        case APIC_ICR2:
 832                if (!apic_x2apic_mode(apic))
 833                        val &= 0xff000000;
 834                apic_set_reg(apic, APIC_ICR2, val);
 835                break;
 836
 837        case APIC_LVT0:
 838                apic_manage_nmi_watchdog(apic, val);
 839        case APIC_LVTTHMR:
 840        case APIC_LVTPC:
 841        case APIC_LVT1:
 842        case APIC_LVTERR:
 843                /* TODO: Check vector */
 844                if (!apic_sw_enabled(apic))
 845                        val |= APIC_LVT_MASKED;
 846
 847                val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
 848                apic_set_reg(apic, reg, val);
 849
 850                break;
 851
 852        case APIC_LVTT:
 853                if ((apic_get_reg(apic, APIC_LVTT) &
 854                    apic->lapic_timer.timer_mode_mask) !=
 855                   (val & apic->lapic_timer.timer_mode_mask))
 856                        hrtimer_cancel(&apic->lapic_timer.timer);
 857
 858                if (!apic_sw_enabled(apic))
 859                        val |= APIC_LVT_MASKED;
 860                val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
 861                apic_set_reg(apic, APIC_LVTT, val);
 862                break;
 863
 864        case APIC_TMICT:
 865                if (apic_lvtt_tscdeadline(apic))
 866                        break;
 867
 868                hrtimer_cancel(&apic->lapic_timer.timer);
 869                apic_set_reg(apic, APIC_TMICT, val);
 870                start_apic_timer(apic);
 871                break;
 872
 873        case APIC_TDCR:
 874                if (val & 4)
 875                        apic_debug("KVM_WRITE:TDCR %x\n", val);
 876                apic_set_reg(apic, APIC_TDCR, val);
 877                update_divide_count(apic);
 878                break;
 879
 880        case APIC_ESR:
 881                if (apic_x2apic_mode(apic) && val != 0) {
 882                        apic_debug("KVM_WRITE:ESR not zero %x\n", val);
 883                        ret = 1;
 884                }
 885                break;
 886
 887        case APIC_SELF_IPI:
 888                if (apic_x2apic_mode(apic)) {
 889                        apic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
 890                } else
 891                        ret = 1;
 892                break;
 893        default:
 894                ret = 1;
 895                break;
 896        }
 897        if (ret)
 898                apic_debug("Local APIC Write to read-only register %x\n", reg);
 899        return ret;
 900}
 901
 902static int apic_mmio_write(struct kvm_io_device *this,
 903                            gpa_t address, int len, const void *data)
 904{
 905        struct kvm_lapic *apic = to_lapic(this);
 906        unsigned int offset = address - apic->base_address;
 907        u32 val;
 908
 909        if (!apic_mmio_in_range(apic, address))
 910                return -EOPNOTSUPP;
 911
 912        /*
 913         * APIC register must be aligned on 128-bits boundary.
 914         * 32/64/128 bits registers must be accessed thru 32 bits.
 915         * Refer SDM 8.4.1
 916         */
 917        if (len != 4 || (offset & 0xf)) {
 918                /* Don't shout loud, $infamous_os would cause only noise. */
 919                apic_debug("apic write: bad size=%d %lx\n", len, (long)address);
 920                return 0;
 921        }
 922
 923        val = *(u32*)data;
 924
 925        /* too common printing */
 926        if (offset != APIC_EOI)
 927                apic_debug("%s: offset 0x%x with length 0x%x, and value is "
 928                           "0x%x\n", __func__, offset, len, val);
 929
 930        apic_reg_write(apic, offset & 0xff0, val);
 931
 932        return 0;
 933}
 934
 935void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
 936{
 937        struct kvm_lapic *apic = vcpu->arch.apic;
 938
 939        if (apic)
 940                apic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
 941}
 942EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
 943
 944void kvm_free_lapic(struct kvm_vcpu *vcpu)
 945{
 946        if (!vcpu->arch.apic)
 947                return;
 948
 949        hrtimer_cancel(&vcpu->arch.apic->lapic_timer.timer);
 950
 951        if (vcpu->arch.apic->regs)
 952                free_page((unsigned long)vcpu->arch.apic->regs);
 953
 954        kfree(vcpu->arch.apic);
 955}
 956
 957/*
 958 *----------------------------------------------------------------------
 959 * LAPIC interface
 960 *----------------------------------------------------------------------
 961 */
 962
 963u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
 964{
 965        struct kvm_lapic *apic = vcpu->arch.apic;
 966        if (!apic)
 967                return 0;
 968
 969        if (apic_lvtt_oneshot(apic) || apic_lvtt_period(apic))
 970                return 0;
 971
 972        return apic->lapic_timer.tscdeadline;
 973}
 974
 975void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
 976{
 977        struct kvm_lapic *apic = vcpu->arch.apic;
 978        if (!apic)
 979                return;
 980
 981        if (apic_lvtt_oneshot(apic) || apic_lvtt_period(apic))
 982                return;
 983
 984        hrtimer_cancel(&apic->lapic_timer.timer);
 985        apic->lapic_timer.tscdeadline = data;
 986        start_apic_timer(apic);
 987}
 988
 989void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
 990{
 991        struct kvm_lapic *apic = vcpu->arch.apic;
 992
 993        if (!apic)
 994                return;
 995        apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
 996                     | (apic_get_reg(apic, APIC_TASKPRI) & 4));
 997}
 998
 999u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
1000{
1001        struct kvm_lapic *apic = vcpu->arch.apic;
1002        u64 tpr;
1003
1004        if (!apic)
1005                return 0;
1006        tpr = (u64) apic_get_reg(apic, APIC_TASKPRI);
1007
1008        return (tpr & 0xf0) >> 4;
1009}
1010
1011void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
1012{
1013        struct kvm_lapic *apic = vcpu->arch.apic;
1014
1015        if (!apic) {
1016                value |= MSR_IA32_APICBASE_BSP;
1017                vcpu->arch.apic_base = value;
1018                return;
1019        }
1020
1021        if (!kvm_vcpu_is_bsp(apic->vcpu))
1022                value &= ~MSR_IA32_APICBASE_BSP;
1023
1024        vcpu->arch.apic_base = value;
1025        if (apic_x2apic_mode(apic)) {
1026                u32 id = kvm_apic_id(apic);
1027                u32 ldr = ((id & ~0xf) << 16) | (1 << (id & 0xf));
1028                apic_set_reg(apic, APIC_LDR, ldr);
1029        }
1030        apic->base_address = apic->vcpu->arch.apic_base &
1031                             MSR_IA32_APICBASE_BASE;
1032
1033        /* with FSB delivery interrupt, we can restart APIC functionality */
1034        apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
1035                   "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
1036
1037}
1038
1039void kvm_lapic_reset(struct kvm_vcpu *vcpu)
1040{
1041        struct kvm_lapic *apic;
1042        int i;
1043
1044        apic_debug("%s\n", __func__);
1045
1046        ASSERT(vcpu);
1047        apic = vcpu->arch.apic;
1048        ASSERT(apic != NULL);
1049
1050        /* Stop the timer in case it's a reset to an active apic */
1051        hrtimer_cancel(&apic->lapic_timer.timer);
1052
1053        apic_set_reg(apic, APIC_ID, vcpu->vcpu_id << 24);
1054        kvm_apic_set_version(apic->vcpu);
1055
1056        for (i = 0; i < APIC_LVT_NUM; i++)
1057                apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
1058        apic_set_reg(apic, APIC_LVT0,
1059                     SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
1060
1061        apic_set_reg(apic, APIC_DFR, 0xffffffffU);
1062        apic_set_reg(apic, APIC_SPIV, 0xff);
1063        apic_set_reg(apic, APIC_TASKPRI, 0);
1064        apic_set_reg(apic, APIC_LDR, 0);
1065        apic_set_reg(apic, APIC_ESR, 0);
1066        apic_set_reg(apic, APIC_ICR, 0);
1067        apic_set_reg(apic, APIC_ICR2, 0);
1068        apic_set_reg(apic, APIC_TDCR, 0);
1069        apic_set_reg(apic, APIC_TMICT, 0);
1070        for (i = 0; i < 8; i++) {
1071                apic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
1072                apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
1073                apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
1074        }
1075        apic->irr_pending = false;
1076        update_divide_count(apic);
1077        atomic_set(&apic->lapic_timer.pending, 0);
1078        if (kvm_vcpu_is_bsp(vcpu))
1079                vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
1080        apic_update_ppr(apic);
1081
1082        vcpu->arch.apic_arb_prio = 0;
1083
1084        apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
1085                   "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
1086                   vcpu, kvm_apic_id(apic),
1087                   vcpu->arch.apic_base, apic->base_address);
1088}
1089
1090bool kvm_apic_present(struct kvm_vcpu *vcpu)
1091{
1092        return vcpu->arch.apic && apic_hw_enabled(vcpu->arch.apic);
1093}
1094
1095int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
1096{
1097        return kvm_apic_present(vcpu) && apic_sw_enabled(vcpu->arch.apic);
1098}
1099
1100/*
1101 *----------------------------------------------------------------------
1102 * timer interface
1103 *----------------------------------------------------------------------
1104 */
1105
1106static bool lapic_is_periodic(struct kvm_timer *ktimer)
1107{
1108        struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic,
1109                                              lapic_timer);
1110        return apic_lvtt_period(apic);
1111}
1112
1113int apic_has_pending_timer(struct kvm_vcpu *vcpu)
1114{
1115        struct kvm_lapic *lapic = vcpu->arch.apic;
1116
1117        if (lapic && apic_enabled(lapic) && apic_lvt_enabled(lapic, APIC_LVTT))
1118                return atomic_read(&lapic->lapic_timer.pending);
1119
1120        return 0;
1121}
1122
1123static int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
1124{
1125        u32 reg = apic_get_reg(apic, lvt_type);
1126        int vector, mode, trig_mode;
1127
1128        if (apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
1129                vector = reg & APIC_VECTOR_MASK;
1130                mode = reg & APIC_MODE_MASK;
1131                trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
1132                return __apic_accept_irq(apic, mode, vector, 1, trig_mode);
1133        }
1134        return 0;
1135}
1136
1137void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
1138{
1139        struct kvm_lapic *apic = vcpu->arch.apic;
1140
1141        if (apic)
1142                kvm_apic_local_deliver(apic, APIC_LVT0);
1143}
1144
1145static struct kvm_timer_ops lapic_timer_ops = {
1146        .is_periodic = lapic_is_periodic,
1147};
1148
1149static const struct kvm_io_device_ops apic_mmio_ops = {
1150        .read     = apic_mmio_read,
1151        .write    = apic_mmio_write,
1152};
1153
1154int kvm_create_lapic(struct kvm_vcpu *vcpu)
1155{
1156        struct kvm_lapic *apic;
1157
1158        ASSERT(vcpu != NULL);
1159        apic_debug("apic_init %d\n", vcpu->vcpu_id);
1160
1161        apic = kzalloc(sizeof(*apic), GFP_KERNEL);
1162        if (!apic)
1163                goto nomem;
1164
1165        vcpu->arch.apic = apic;
1166
1167        apic->regs = (void *)get_zeroed_page(GFP_KERNEL);
1168        if (!apic->regs) {
1169                printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
1170                       vcpu->vcpu_id);
1171                goto nomem_free_apic;
1172        }
1173        apic->vcpu = vcpu;
1174
1175        hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
1176                     HRTIMER_MODE_ABS);
1177        apic->lapic_timer.timer.function = kvm_timer_fn;
1178        apic->lapic_timer.t_ops = &lapic_timer_ops;
1179        apic->lapic_timer.kvm = vcpu->kvm;
1180        apic->lapic_timer.vcpu = vcpu;
1181
1182        apic->base_address = APIC_DEFAULT_PHYS_BASE;
1183        vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE;
1184
1185        kvm_lapic_reset(vcpu);
1186        kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
1187
1188        return 0;
1189nomem_free_apic:
1190        kfree(apic);
1191nomem:
1192        return -ENOMEM;
1193}
1194
1195int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
1196{
1197        struct kvm_lapic *apic = vcpu->arch.apic;
1198        int highest_irr;
1199
1200        if (!apic || !apic_enabled(apic))
1201                return -1;
1202
1203        apic_update_ppr(apic);
1204        highest_irr = apic_find_highest_irr(apic);
1205        if ((highest_irr == -1) ||
1206            ((highest_irr & 0xF0) <= apic_get_reg(apic, APIC_PROCPRI)))
1207                return -1;
1208        return highest_irr;
1209}
1210
1211int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
1212{
1213        u32 lvt0 = apic_get_reg(vcpu->arch.apic, APIC_LVT0);
1214        int r = 0;
1215
1216        if (!apic_hw_enabled(vcpu->arch.apic))
1217                r = 1;
1218        if ((lvt0 & APIC_LVT_MASKED) == 0 &&
1219            GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
1220                r = 1;
1221        return r;
1222}
1223
1224void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
1225{
1226        struct kvm_lapic *apic = vcpu->arch.apic;
1227
1228        if (apic && atomic_read(&apic->lapic_timer.pending) > 0) {
1229                if (kvm_apic_local_deliver(apic, APIC_LVTT))
1230                        atomic_dec(&apic->lapic_timer.pending);
1231        }
1232}
1233
1234int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
1235{
1236        int vector = kvm_apic_has_interrupt(vcpu);
1237        struct kvm_lapic *apic = vcpu->arch.apic;
1238
1239        if (vector == -1)
1240                return -1;
1241
1242        apic_set_vector(vector, apic->regs + APIC_ISR);
1243        apic_update_ppr(apic);
1244        apic_clear_irr(vector, apic);
1245        return vector;
1246}
1247
1248void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
1249{
1250        struct kvm_lapic *apic = vcpu->arch.apic;
1251
1252        apic->base_address = vcpu->arch.apic_base &
1253                             MSR_IA32_APICBASE_BASE;
1254        kvm_apic_set_version(vcpu);
1255
1256        apic_update_ppr(apic);
1257        hrtimer_cancel(&apic->lapic_timer.timer);
1258        update_divide_count(apic);
1259        start_apic_timer(apic);
1260        apic->irr_pending = true;
1261        kvm_make_request(KVM_REQ_EVENT, vcpu);
1262}
1263
1264void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
1265{
1266        struct kvm_lapic *apic = vcpu->arch.apic;
1267        struct hrtimer *timer;
1268
1269        if (!apic)
1270                return;
1271
1272        timer = &apic->lapic_timer.timer;
1273        if (hrtimer_cancel(timer))
1274                hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
1275}
1276
1277void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
1278{
1279        u32 data;
1280        void *vapic;
1281
1282        if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
1283                return;
1284
1285        vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
1286        data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
1287        kunmap_atomic(vapic, KM_USER0);
1288
1289        apic_set_tpr(vcpu->arch.apic, data & 0xff);
1290}
1291
1292void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
1293{
1294        u32 data, tpr;
1295        int max_irr, max_isr;
1296        struct kvm_lapic *apic;
1297        void *vapic;
1298
1299        if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
1300                return;
1301
1302        apic = vcpu->arch.apic;
1303        tpr = apic_get_reg(apic, APIC_TASKPRI) & 0xff;
1304        max_irr = apic_find_highest_irr(apic);
1305        if (max_irr < 0)
1306                max_irr = 0;
1307        max_isr = apic_find_highest_isr(apic);
1308        if (max_isr < 0)
1309                max_isr = 0;
1310        data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
1311
1312        vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
1313        *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
1314        kunmap_atomic(vapic, KM_USER0);
1315}
1316
1317void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
1318{
1319        if (!irqchip_in_kernel(vcpu->kvm))
1320                return;
1321
1322        vcpu->arch.apic->vapic_addr = vapic_addr;
1323}
1324
1325int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1326{
1327        struct kvm_lapic *apic = vcpu->arch.apic;
1328        u32 reg = (msr - APIC_BASE_MSR) << 4;
1329
1330        if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
1331                return 1;
1332
1333        /* if this is ICR write vector before command */
1334        if (msr == 0x830)
1335                apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
1336        return apic_reg_write(apic, reg, (u32)data);
1337}
1338
1339int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
1340{
1341        struct kvm_lapic *apic = vcpu->arch.apic;
1342        u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
1343
1344        if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
1345                return 1;
1346
1347        if (apic_reg_read(apic, reg, 4, &low))
1348                return 1;
1349        if (msr == 0x830)
1350                apic_reg_read(apic, APIC_ICR2, 4, &high);
1351
1352        *data = (((u64)high) << 32) | low;
1353
1354        return 0;
1355}
1356
1357int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
1358{
1359        struct kvm_lapic *apic = vcpu->arch.apic;
1360
1361        if (!irqchip_in_kernel(vcpu->kvm))
1362                return 1;
1363
1364        /* if this is ICR write vector before command */
1365        if (reg == APIC_ICR)
1366                apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
1367        return apic_reg_write(apic, reg, (u32)data);
1368}
1369
1370int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
1371{
1372        struct kvm_lapic *apic = vcpu->arch.apic;
1373        u32 low, high = 0;
1374
1375        if (!irqchip_in_kernel(vcpu->kvm))
1376                return 1;
1377
1378        if (apic_reg_read(apic, reg, 4, &low))
1379                return 1;
1380        if (reg == APIC_ICR)
1381                apic_reg_read(apic, APIC_ICR2, 4, &high);
1382
1383        *data = (((u64)high) << 32) | low;
1384
1385        return 0;
1386}
1387