linux/arch/x86/include/asm/kvm_host.h
<<
>>
Prefs
   1/*
   2 * Kernel-based Virtual Machine driver for Linux
   3 *
   4 * This header defines architecture specific interfaces, x86 version
   5 *
   6 * This work is licensed under the terms of the GNU GPL, version 2.  See
   7 * the COPYING file in the top-level directory.
   8 *
   9 */
  10
  11#ifndef _ASM_X86_KVM_HOST_H
  12#define _ASM_X86_KVM_HOST_H
  13
  14#include <linux/types.h>
  15#include <linux/mm.h>
  16#include <linux/mmu_notifier.h>
  17#include <linux/tracepoint.h>
  18#include <linux/cpumask.h>
  19#include <linux/irq_work.h>
  20
  21#include <linux/kvm.h>
  22#include <linux/kvm_para.h>
  23#include <linux/kvm_types.h>
  24#include <linux/perf_event.h>
  25#include <linux/pvclock_gtod.h>
  26#include <linux/clocksource.h>
  27
  28#include <asm/pvclock-abi.h>
  29#include <asm/desc.h>
  30#include <asm/mtrr.h>
  31#include <asm/msr-index.h>
  32#include <asm/asm.h>
  33
  34#define KVM_MAX_VCPUS 255
  35#define KVM_SOFT_MAX_VCPUS 160
  36#define KVM_USER_MEM_SLOTS 509
  37/* memory slots that are not exposed to userspace */
  38#define KVM_PRIVATE_MEM_SLOTS 3
  39#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
  40
  41#define KVM_PIO_PAGE_OFFSET 1
  42#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
  43
  44#define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS
  45
  46#define CR0_RESERVED_BITS                                               \
  47        (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
  48                          | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
  49                          | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
  50
  51#define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
  52#define CR3_PCID_INVD            BIT_64(63)
  53#define CR4_RESERVED_BITS                                               \
  54        (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
  55                          | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
  56                          | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
  57                          | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
  58                          | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP))
  59
  60#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
  61
  62
  63
  64#define INVALID_PAGE (~(hpa_t)0)
  65#define VALID_PAGE(x) ((x) != INVALID_PAGE)
  66
  67#define UNMAPPED_GVA (~(gpa_t)0)
  68
  69/* KVM Hugepage definitions for x86 */
  70#define KVM_NR_PAGE_SIZES       3
  71#define KVM_HPAGE_GFN_SHIFT(x)  (((x) - 1) * 9)
  72#define KVM_HPAGE_SHIFT(x)      (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
  73#define KVM_HPAGE_SIZE(x)       (1UL << KVM_HPAGE_SHIFT(x))
  74#define KVM_HPAGE_MASK(x)       (~(KVM_HPAGE_SIZE(x) - 1))
  75#define KVM_PAGES_PER_HPAGE(x)  (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
  76
  77static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
  78{
  79        /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
  80        return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
  81                (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
  82}
  83
  84#define KVM_PERMILLE_MMU_PAGES 20
  85#define KVM_MIN_ALLOC_MMU_PAGES 64
  86#define KVM_MMU_HASH_SHIFT 10
  87#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
  88#define KVM_MIN_FREE_MMU_PAGES 5
  89#define KVM_REFILL_PAGES 25
  90#define KVM_MAX_CPUID_ENTRIES 80
  91#define KVM_NR_FIXED_MTRR_REGION 88
  92#define KVM_NR_VAR_MTRR 8
  93
  94#define ASYNC_PF_PER_VCPU 64
  95
  96enum kvm_reg {
  97        VCPU_REGS_RAX = 0,
  98        VCPU_REGS_RCX = 1,
  99        VCPU_REGS_RDX = 2,
 100        VCPU_REGS_RBX = 3,
 101        VCPU_REGS_RSP = 4,
 102        VCPU_REGS_RBP = 5,
 103        VCPU_REGS_RSI = 6,
 104        VCPU_REGS_RDI = 7,
 105#ifdef CONFIG_X86_64
 106        VCPU_REGS_R8 = 8,
 107        VCPU_REGS_R9 = 9,
 108        VCPU_REGS_R10 = 10,
 109        VCPU_REGS_R11 = 11,
 110        VCPU_REGS_R12 = 12,
 111        VCPU_REGS_R13 = 13,
 112        VCPU_REGS_R14 = 14,
 113        VCPU_REGS_R15 = 15,
 114#endif
 115        VCPU_REGS_RIP,
 116        NR_VCPU_REGS
 117};
 118
 119enum kvm_reg_ex {
 120        VCPU_EXREG_PDPTR = NR_VCPU_REGS,
 121        VCPU_EXREG_CR3,
 122        VCPU_EXREG_RFLAGS,
 123        VCPU_EXREG_SEGMENTS,
 124};
 125
 126enum {
 127        VCPU_SREG_ES,
 128        VCPU_SREG_CS,
 129        VCPU_SREG_SS,
 130        VCPU_SREG_DS,
 131        VCPU_SREG_FS,
 132        VCPU_SREG_GS,
 133        VCPU_SREG_TR,
 134        VCPU_SREG_LDTR,
 135};
 136
 137#include <asm/kvm_emulate.h>
 138
 139#define KVM_NR_MEM_OBJS 40
 140
 141#define KVM_NR_DB_REGS  4
 142
 143#define DR6_BD          (1 << 13)
 144#define DR6_BS          (1 << 14)
 145#define DR6_RTM         (1 << 16)
 146#define DR6_FIXED_1     0xfffe0ff0
 147#define DR6_INIT        0xffff0ff0
 148#define DR6_VOLATILE    0x0001e00f
 149
 150#define DR7_BP_EN_MASK  0x000000ff
 151#define DR7_GE          (1 << 9)
 152#define DR7_GD          (1 << 13)
 153#define DR7_FIXED_1     0x00000400
 154#define DR7_VOLATILE    0xffff2bff
 155
 156#define PFERR_PRESENT_BIT 0
 157#define PFERR_WRITE_BIT 1
 158#define PFERR_USER_BIT 2
 159#define PFERR_RSVD_BIT 3
 160#define PFERR_FETCH_BIT 4
 161
 162#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
 163#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
 164#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
 165#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
 166#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
 167
 168/* apic attention bits */
 169#define KVM_APIC_CHECK_VAPIC    0
 170/*
 171 * The following bit is set with PV-EOI, unset on EOI.
 172 * We detect PV-EOI changes by guest by comparing
 173 * this bit with PV-EOI in guest memory.
 174 * See the implementation in apic_update_pv_eoi.
 175 */
 176#define KVM_APIC_PV_EOI_PENDING 1
 177
 178/*
 179 * We don't want allocation failures within the mmu code, so we preallocate
 180 * enough memory for a single page fault in a cache.
 181 */
 182struct kvm_mmu_memory_cache {
 183        int nobjs;
 184        void *objects[KVM_NR_MEM_OBJS];
 185};
 186
 187/*
 188 * kvm_mmu_page_role, below, is defined as:
 189 *
 190 *   bits 0:3 - total guest paging levels (2-4, or zero for real mode)
 191 *   bits 4:7 - page table level for this shadow (1-4)
 192 *   bits 8:9 - page table quadrant for 2-level guests
 193 *   bit   16 - direct mapping of virtual to physical mapping at gfn
 194 *              used for real mode and two-dimensional paging
 195 *   bits 17:19 - common access permissions for all ptes in this shadow page
 196 */
 197union kvm_mmu_page_role {
 198        unsigned word;
 199        struct {
 200                unsigned level:4;
 201                unsigned cr4_pae:1;
 202                unsigned quadrant:2;
 203                unsigned pad_for_nice_hex_output:6;
 204                unsigned direct:1;
 205                unsigned access:3;
 206                unsigned invalid:1;
 207                unsigned nxe:1;
 208                unsigned cr0_wp:1;
 209                unsigned smep_andnot_wp:1;
 210                unsigned smap_andnot_wp:1;
 211        };
 212};
 213
 214struct kvm_mmu_page {
 215        struct list_head link;
 216        struct hlist_node hash_link;
 217
 218        /*
 219         * The following two entries are used to key the shadow page in the
 220         * hash table.
 221         */
 222        gfn_t gfn;
 223        union kvm_mmu_page_role role;
 224
 225        u64 *spt;
 226        /* hold the gfn of each spte inside spt */
 227        gfn_t *gfns;
 228        bool unsync;
 229        int root_count;          /* Currently serving as active root */
 230        unsigned int unsync_children;
 231        unsigned long parent_ptes;      /* Reverse mapping for parent_pte */
 232
 233        /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen.  */
 234        unsigned long mmu_valid_gen;
 235
 236        DECLARE_BITMAP(unsync_child_bitmap, 512);
 237
 238#ifdef CONFIG_X86_32
 239        /*
 240         * Used out of the mmu-lock to avoid reading spte values while an
 241         * update is in progress; see the comments in __get_spte_lockless().
 242         */
 243        int clear_spte_count;
 244#endif
 245
 246        /* Number of writes since the last time traversal visited this page.  */
 247        int write_flooding_count;
 248};
 249
 250struct kvm_pio_request {
 251        unsigned long count;
 252        int in;
 253        int port;
 254        int size;
 255};
 256
 257/*
 258 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
 259 * 32-bit).  The kvm_mmu structure abstracts the details of the current mmu
 260 * mode.
 261 */
 262struct kvm_mmu {
 263        void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
 264        unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
 265        u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
 266        int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
 267                          bool prefault);
 268        void (*inject_page_fault)(struct kvm_vcpu *vcpu,
 269                                  struct x86_exception *fault);
 270        gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
 271                            struct x86_exception *exception);
 272        gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
 273                               struct x86_exception *exception);
 274        int (*sync_page)(struct kvm_vcpu *vcpu,
 275                         struct kvm_mmu_page *sp);
 276        void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
 277        void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 278                           u64 *spte, const void *pte);
 279        hpa_t root_hpa;
 280        int root_level;
 281        int shadow_root_level;
 282        union kvm_mmu_page_role base_role;
 283        bool direct_map;
 284
 285        /*
 286         * Bitmap; bit set = permission fault
 287         * Byte index: page fault error code [4:1]
 288         * Bit index: pte permissions in ACC_* format
 289         */
 290        u8 permissions[16];
 291
 292        u64 *pae_root;
 293        u64 *lm_root;
 294        u64 rsvd_bits_mask[2][4];
 295        u64 bad_mt_xwr;
 296
 297        /*
 298         * Bitmap: bit set = last pte in walk
 299         * index[0:1]: level (zero-based)
 300         * index[2]: pte.ps
 301         */
 302        u8 last_pte_bitmap;
 303
 304        bool nx;
 305
 306        u64 pdptrs[4]; /* pae */
 307};
 308
 309enum pmc_type {
 310        KVM_PMC_GP = 0,
 311        KVM_PMC_FIXED,
 312};
 313
 314struct kvm_pmc {
 315        enum pmc_type type;
 316        u8 idx;
 317        u64 counter;
 318        u64 eventsel;
 319        struct perf_event *perf_event;
 320        struct kvm_vcpu *vcpu;
 321};
 322
 323struct kvm_pmu {
 324        unsigned nr_arch_gp_counters;
 325        unsigned nr_arch_fixed_counters;
 326        unsigned available_event_types;
 327        u64 fixed_ctr_ctrl;
 328        u64 global_ctrl;
 329        u64 global_status;
 330        u64 global_ovf_ctrl;
 331        u64 counter_bitmask[2];
 332        u64 global_ctrl_mask;
 333        u64 reserved_bits;
 334        u8 version;
 335        struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
 336        struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
 337        struct irq_work irq_work;
 338        u64 reprogram_pmi;
 339};
 340
 341enum {
 342        KVM_DEBUGREG_BP_ENABLED = 1,
 343        KVM_DEBUGREG_WONT_EXIT = 2,
 344        KVM_DEBUGREG_RELOAD = 4,
 345};
 346
 347struct kvm_vcpu_arch {
 348        /*
 349         * rip and regs accesses must go through
 350         * kvm_{register,rip}_{read,write} functions.
 351         */
 352        unsigned long regs[NR_VCPU_REGS];
 353        u32 regs_avail;
 354        u32 regs_dirty;
 355
 356        unsigned long cr0;
 357        unsigned long cr0_guest_owned_bits;
 358        unsigned long cr2;
 359        unsigned long cr3;
 360        unsigned long cr4;
 361        unsigned long cr4_guest_owned_bits;
 362        unsigned long cr8;
 363        u32 hflags;
 364        u64 efer;
 365        u64 apic_base;
 366        struct kvm_lapic *apic;    /* kernel irqchip context */
 367        unsigned long apic_attention;
 368        int32_t apic_arb_prio;
 369        int mp_state;
 370        u64 ia32_misc_enable_msr;
 371        bool tpr_access_reporting;
 372        u64 ia32_xss;
 373
 374        /*
 375         * Paging state of the vcpu
 376         *
 377         * If the vcpu runs in guest mode with two level paging this still saves
 378         * the paging mode of the l1 guest. This context is always used to
 379         * handle faults.
 380         */
 381        struct kvm_mmu mmu;
 382
 383        /*
 384         * Paging state of an L2 guest (used for nested npt)
 385         *
 386         * This context will save all necessary information to walk page tables
 387         * of the an L2 guest. This context is only initialized for page table
 388         * walking and not for faulting since we never handle l2 page faults on
 389         * the host.
 390         */
 391        struct kvm_mmu nested_mmu;
 392
 393        /*
 394         * Pointer to the mmu context currently used for
 395         * gva_to_gpa translations.
 396         */
 397        struct kvm_mmu *walk_mmu;
 398
 399        struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
 400        struct kvm_mmu_memory_cache mmu_page_cache;
 401        struct kvm_mmu_memory_cache mmu_page_header_cache;
 402
 403        struct fpu guest_fpu;
 404        bool eager_fpu;
 405        u64 xcr0;
 406        u64 guest_supported_xcr0;
 407        u32 guest_xstate_size;
 408
 409        struct kvm_pio_request pio;
 410        void *pio_data;
 411
 412        u8 event_exit_inst_len;
 413
 414        struct kvm_queued_exception {
 415                bool pending;
 416                bool has_error_code;
 417                bool reinject;
 418                u8 nr;
 419                u32 error_code;
 420        } exception;
 421
 422        struct kvm_queued_interrupt {
 423                bool pending;
 424                bool soft;
 425                u8 nr;
 426        } interrupt;
 427
 428        int halt_request; /* real mode on Intel only */
 429
 430        int cpuid_nent;
 431        struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
 432
 433        int maxphyaddr;
 434
 435        /* emulate context */
 436
 437        struct x86_emulate_ctxt emulate_ctxt;
 438        bool emulate_regs_need_sync_to_vcpu;
 439        bool emulate_regs_need_sync_from_vcpu;
 440        int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
 441
 442        gpa_t time;
 443        struct pvclock_vcpu_time_info hv_clock;
 444        unsigned int hw_tsc_khz;
 445        struct gfn_to_hva_cache pv_time;
 446        bool pv_time_enabled;
 447        /* set guest stopped flag in pvclock flags field */
 448        bool pvclock_set_guest_stopped_request;
 449
 450        struct {
 451                u64 msr_val;
 452                u64 last_steal;
 453                u64 accum_steal;
 454                struct gfn_to_hva_cache stime;
 455                struct kvm_steal_time steal;
 456        } st;
 457
 458        u64 last_guest_tsc;
 459        u64 last_host_tsc;
 460        u64 tsc_offset_adjustment;
 461        u64 this_tsc_nsec;
 462        u64 this_tsc_write;
 463        u64 this_tsc_generation;
 464        bool tsc_catchup;
 465        bool tsc_always_catchup;
 466        s8 virtual_tsc_shift;
 467        u32 virtual_tsc_mult;
 468        u32 virtual_tsc_khz;
 469        s64 ia32_tsc_adjust_msr;
 470
 471        atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
 472        unsigned nmi_pending; /* NMI queued after currently running handler */
 473        bool nmi_injected;    /* Trying to inject an NMI this entry */
 474
 475        struct mtrr_state_type mtrr_state;
 476        u64 pat;
 477
 478        unsigned switch_db_regs;
 479        unsigned long db[KVM_NR_DB_REGS];
 480        unsigned long dr6;
 481        unsigned long dr7;
 482        unsigned long eff_db[KVM_NR_DB_REGS];
 483        unsigned long guest_debug_dr7;
 484
 485        u64 mcg_cap;
 486        u64 mcg_status;
 487        u64 mcg_ctl;
 488        u64 *mce_banks;
 489
 490        /* Cache MMIO info */
 491        u64 mmio_gva;
 492        unsigned access;
 493        gfn_t mmio_gfn;
 494        u64 mmio_gen;
 495
 496        struct kvm_pmu pmu;
 497
 498        /* used for guest single stepping over the given code position */
 499        unsigned long singlestep_rip;
 500
 501        /* fields used by HYPER-V emulation */
 502        u64 hv_vapic;
 503
 504        cpumask_var_t wbinvd_dirty_mask;
 505
 506        unsigned long last_retry_eip;
 507        unsigned long last_retry_addr;
 508
 509        struct {
 510                bool halted;
 511                gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
 512                struct gfn_to_hva_cache data;
 513                u64 msr_val;
 514                u32 id;
 515                bool send_user_only;
 516        } apf;
 517
 518        /* OSVW MSRs (AMD only) */
 519        struct {
 520                u64 length;
 521                u64 status;
 522        } osvw;
 523
 524        struct {
 525                u64 msr_val;
 526                struct gfn_to_hva_cache data;
 527        } pv_eoi;
 528
 529        /*
 530         * Indicate whether the access faults on its page table in guest
 531         * which is set when fix page fault and used to detect unhandeable
 532         * instruction.
 533         */
 534        bool write_fault_to_shadow_pgtable;
 535
 536        /* set at EPT violation at this point */
 537        unsigned long exit_qualification;
 538
 539        /* pv related host specific info */
 540        struct {
 541                bool pv_unhalted;
 542        } pv;
 543};
 544
 545struct kvm_lpage_info {
 546        int write_count;
 547};
 548
 549struct kvm_arch_memory_slot {
 550        unsigned long *rmap[KVM_NR_PAGE_SIZES];
 551        struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
 552};
 553
 554/*
 555 * We use as the mode the number of bits allocated in the LDR for the
 556 * logical processor ID.  It happens that these are all powers of two.
 557 * This makes it is very easy to detect cases where the APICs are
 558 * configured for multiple modes; in that case, we cannot use the map and
 559 * hence cannot use kvm_irq_delivery_to_apic_fast either.
 560 */
 561#define KVM_APIC_MODE_XAPIC_CLUSTER          4
 562#define KVM_APIC_MODE_XAPIC_FLAT             8
 563#define KVM_APIC_MODE_X2APIC                16
 564
 565struct kvm_apic_map {
 566        struct rcu_head rcu;
 567        u8 mode;
 568        struct kvm_lapic *phys_map[256];
 569        /* first index is cluster id second is cpu id in a cluster */
 570        struct kvm_lapic *logical_map[16][16];
 571};
 572
 573struct kvm_arch {
 574        unsigned int n_used_mmu_pages;
 575        unsigned int n_requested_mmu_pages;
 576        unsigned int n_max_mmu_pages;
 577        unsigned int indirect_shadow_pages;
 578        unsigned long mmu_valid_gen;
 579        struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
 580        /*
 581         * Hash table of struct kvm_mmu_page.
 582         */
 583        struct list_head active_mmu_pages;
 584        struct list_head zapped_obsolete_pages;
 585
 586        struct list_head assigned_dev_head;
 587        struct iommu_domain *iommu_domain;
 588        bool iommu_noncoherent;
 589#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
 590        atomic_t noncoherent_dma_count;
 591        struct kvm_pic *vpic;
 592        struct kvm_ioapic *vioapic;
 593        struct kvm_pit *vpit;
 594        int vapics_in_nmi_mode;
 595        struct mutex apic_map_lock;
 596        struct kvm_apic_map *apic_map;
 597
 598        unsigned int tss_addr;
 599        bool apic_access_page_done;
 600
 601        gpa_t wall_clock;
 602
 603        bool ept_identity_pagetable_done;
 604        gpa_t ept_identity_map_addr;
 605
 606        unsigned long irq_sources_bitmap;
 607        s64 kvmclock_offset;
 608        raw_spinlock_t tsc_write_lock;
 609        u64 last_tsc_nsec;
 610        u64 last_tsc_write;
 611        u32 last_tsc_khz;
 612        u64 cur_tsc_nsec;
 613        u64 cur_tsc_write;
 614        u64 cur_tsc_offset;
 615        u64 cur_tsc_generation;
 616        int nr_vcpus_matched_tsc;
 617
 618        spinlock_t pvclock_gtod_sync_lock;
 619        bool use_master_clock;
 620        u64 master_kernel_ns;
 621        cycle_t master_cycle_now;
 622        struct delayed_work kvmclock_update_work;
 623        struct delayed_work kvmclock_sync_work;
 624
 625        struct kvm_xen_hvm_config xen_hvm_config;
 626
 627        /* reads protected by irq_srcu, writes by irq_lock */
 628        struct hlist_head mask_notifier_list;
 629
 630        /* fields used by HYPER-V emulation */
 631        u64 hv_guest_os_id;
 632        u64 hv_hypercall;
 633        u64 hv_tsc_page;
 634
 635        #ifdef CONFIG_KVM_MMU_AUDIT
 636        int audit_point;
 637        #endif
 638
 639        bool boot_vcpu_runs_old_kvmclock;
 640};
 641
 642struct kvm_vm_stat {
 643        u32 mmu_shadow_zapped;
 644        u32 mmu_pte_write;
 645        u32 mmu_pte_updated;
 646        u32 mmu_pde_zapped;
 647        u32 mmu_flooded;
 648        u32 mmu_recycled;
 649        u32 mmu_cache_miss;
 650        u32 mmu_unsync;
 651        u32 remote_tlb_flush;
 652        u32 lpages;
 653};
 654
 655struct kvm_vcpu_stat {
 656        u32 pf_fixed;
 657        u32 pf_guest;
 658        u32 tlb_flush;
 659        u32 invlpg;
 660
 661        u32 exits;
 662        u32 io_exits;
 663        u32 mmio_exits;
 664        u32 signal_exits;
 665        u32 irq_window_exits;
 666        u32 nmi_window_exits;
 667        u32 halt_exits;
 668        u32 halt_successful_poll;
 669        u32 halt_wakeup;
 670        u32 request_irq_exits;
 671        u32 irq_exits;
 672        u32 host_state_reload;
 673        u32 efer_reload;
 674        u32 fpu_reload;
 675        u32 insn_emulation;
 676        u32 insn_emulation_fail;
 677        u32 hypercalls;
 678        u32 irq_injections;
 679        u32 nmi_injections;
 680};
 681
 682struct x86_instruction_info;
 683
 684struct msr_data {
 685        bool host_initiated;
 686        u32 index;
 687        u64 data;
 688};
 689
 690struct kvm_lapic_irq {
 691        u32 vector;
 692        u32 delivery_mode;
 693        u32 dest_mode;
 694        u32 level;
 695        u32 trig_mode;
 696        u32 shorthand;
 697        u32 dest_id;
 698};
 699
 700struct kvm_x86_ops {
 701        int (*cpu_has_kvm_support)(void);          /* __init */
 702        int (*disabled_by_bios)(void);             /* __init */
 703        int (*hardware_enable)(void);
 704        void (*hardware_disable)(void);
 705        void (*check_processor_compatibility)(void *rtn);
 706        int (*hardware_setup)(void);               /* __init */
 707        void (*hardware_unsetup)(void);            /* __exit */
 708        bool (*cpu_has_accelerated_tpr)(void);
 709        void (*cpuid_update)(struct kvm_vcpu *vcpu);
 710
 711        /* Create, but do not attach this VCPU */
 712        struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
 713        void (*vcpu_free)(struct kvm_vcpu *vcpu);
 714        void (*vcpu_reset)(struct kvm_vcpu *vcpu);
 715
 716        void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
 717        void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
 718        void (*vcpu_put)(struct kvm_vcpu *vcpu);
 719
 720        void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu);
 721        int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
 722        int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
 723        u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
 724        void (*get_segment)(struct kvm_vcpu *vcpu,
 725                            struct kvm_segment *var, int seg);
 726        int (*get_cpl)(struct kvm_vcpu *vcpu);
 727        void (*set_segment)(struct kvm_vcpu *vcpu,
 728                            struct kvm_segment *var, int seg);
 729        void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
 730        void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
 731        void (*decache_cr3)(struct kvm_vcpu *vcpu);
 732        void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
 733        void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
 734        void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
 735        int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
 736        void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
 737        void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
 738        void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
 739        void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
 740        void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
 741        u64 (*get_dr6)(struct kvm_vcpu *vcpu);
 742        void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
 743        void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
 744        void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
 745        void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
 746        unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
 747        void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
 748        void (*fpu_activate)(struct kvm_vcpu *vcpu);
 749        void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
 750
 751        void (*tlb_flush)(struct kvm_vcpu *vcpu);
 752
 753        void (*run)(struct kvm_vcpu *vcpu);
 754        int (*handle_exit)(struct kvm_vcpu *vcpu);
 755        void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
 756        void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
 757        u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
 758        void (*patch_hypercall)(struct kvm_vcpu *vcpu,
 759                                unsigned char *hypercall_addr);
 760        void (*set_irq)(struct kvm_vcpu *vcpu);
 761        void (*set_nmi)(struct kvm_vcpu *vcpu);
 762        void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
 763                                bool has_error_code, u32 error_code,
 764                                bool reinject);
 765        void (*cancel_injection)(struct kvm_vcpu *vcpu);
 766        int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
 767        int (*nmi_allowed)(struct kvm_vcpu *vcpu);
 768        bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
 769        void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
 770        void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
 771        void (*enable_irq_window)(struct kvm_vcpu *vcpu);
 772        void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
 773        int (*vm_has_apicv)(struct kvm *kvm);
 774        void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
 775        void (*hwapic_isr_update)(struct kvm *kvm, int isr);
 776        void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
 777        void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
 778        void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
 779        void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
 780        void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
 781        int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
 782        int (*get_tdp_level)(void);
 783        u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
 784        int (*get_lpage_level)(void);
 785        bool (*rdtscp_supported)(void);
 786        bool (*invpcid_supported)(void);
 787        void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
 788
 789        void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
 790
 791        void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
 792
 793        bool (*has_wbinvd_exit)(void);
 794
 795        void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
 796        u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
 797        void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 798
 799        u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
 800        u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
 801
 802        void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
 803
 804        int (*check_intercept)(struct kvm_vcpu *vcpu,
 805                               struct x86_instruction_info *info,
 806                               enum x86_intercept_stage stage);
 807        void (*handle_external_intr)(struct kvm_vcpu *vcpu);
 808        bool (*mpx_supported)(void);
 809        bool (*xsaves_supported)(void);
 810
 811        int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
 812
 813        void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
 814
 815        /*
 816         * Arch-specific dirty logging hooks. These hooks are only supposed to
 817         * be valid if the specific arch has hardware-accelerated dirty logging
 818         * mechanism. Currently only for PML on VMX.
 819         *
 820         *  - slot_enable_log_dirty:
 821         *      called when enabling log dirty mode for the slot.
 822         *  - slot_disable_log_dirty:
 823         *      called when disabling log dirty mode for the slot.
 824         *      also called when slot is created with log dirty disabled.
 825         *  - flush_log_dirty:
 826         *      called before reporting dirty_bitmap to userspace.
 827         *  - enable_log_dirty_pt_masked:
 828         *      called when reenabling log dirty for the GFNs in the mask after
 829         *      corresponding bits are cleared in slot->dirty_bitmap.
 830         */
 831        void (*slot_enable_log_dirty)(struct kvm *kvm,
 832                                      struct kvm_memory_slot *slot);
 833        void (*slot_disable_log_dirty)(struct kvm *kvm,
 834                                       struct kvm_memory_slot *slot);
 835        void (*flush_log_dirty)(struct kvm *kvm);
 836        void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
 837                                           struct kvm_memory_slot *slot,
 838                                           gfn_t offset, unsigned long mask);
 839};
 840
 841struct kvm_arch_async_pf {
 842        u32 token;
 843        gfn_t gfn;
 844        unsigned long cr3;
 845        bool direct_map;
 846};
 847
 848extern struct kvm_x86_ops *kvm_x86_ops;
 849
 850static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
 851                                           s64 adjustment)
 852{
 853        kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
 854}
 855
 856static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
 857{
 858        kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
 859}
 860
 861int kvm_mmu_module_init(void);
 862void kvm_mmu_module_exit(void);
 863
 864void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
 865int kvm_mmu_create(struct kvm_vcpu *vcpu);
 866void kvm_mmu_setup(struct kvm_vcpu *vcpu);
 867void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 868                u64 dirty_mask, u64 nx_mask, u64 x_mask);
 869
 870void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
 871void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
 872                                      struct kvm_memory_slot *memslot);
 873void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
 874                                        struct kvm_memory_slot *memslot);
 875void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
 876                                   struct kvm_memory_slot *memslot);
 877void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
 878                                        struct kvm_memory_slot *memslot);
 879void kvm_mmu_slot_set_dirty(struct kvm *kvm,
 880                            struct kvm_memory_slot *memslot);
 881void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
 882                                   struct kvm_memory_slot *slot,
 883                                   gfn_t gfn_offset, unsigned long mask);
 884void kvm_mmu_zap_all(struct kvm *kvm);
 885void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm);
 886unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
 887void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
 888
 889int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
 890
 891int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
 892                          const void *val, int bytes);
 893u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
 894
 895struct kvm_irq_mask_notifier {
 896        void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
 897        int irq;
 898        struct hlist_node link;
 899};
 900
 901void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
 902                                    struct kvm_irq_mask_notifier *kimn);
 903void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
 904                                      struct kvm_irq_mask_notifier *kimn);
 905void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
 906                             bool mask);
 907
 908extern bool tdp_enabled;
 909
 910u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
 911
 912/* control of guest tsc rate supported? */
 913extern bool kvm_has_tsc_control;
 914/* minimum supported tsc_khz for guests */
 915extern u32  kvm_min_guest_tsc_khz;
 916/* maximum supported tsc_khz for guests */
 917extern u32  kvm_max_guest_tsc_khz;
 918
 919enum emulation_result {
 920        EMULATE_DONE,         /* no further processing */
 921        EMULATE_USER_EXIT,    /* kvm_run ready for userspace exit */
 922        EMULATE_FAIL,         /* can't emulate this instruction */
 923};
 924
 925#define EMULTYPE_NO_DECODE          (1 << 0)
 926#define EMULTYPE_TRAP_UD            (1 << 1)
 927#define EMULTYPE_SKIP               (1 << 2)
 928#define EMULTYPE_RETRY              (1 << 3)
 929#define EMULTYPE_NO_REEXECUTE       (1 << 4)
 930int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
 931                            int emulation_type, void *insn, int insn_len);
 932
 933static inline int emulate_instruction(struct kvm_vcpu *vcpu,
 934                        int emulation_type)
 935{
 936        return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
 937}
 938
 939void kvm_enable_efer_bits(u64);
 940bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
 941int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
 942int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
 943
 944struct x86_emulate_ctxt;
 945
 946int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
 947void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
 948int kvm_emulate_halt(struct kvm_vcpu *vcpu);
 949int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
 950int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
 951
 952void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
 953int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
 954void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
 955
 956int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
 957                    int reason, bool has_error_code, u32 error_code);
 958
 959int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 960int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
 961int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
 962int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
 963int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
 964int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
 965unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
 966void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
 967void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
 968int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
 969
 970int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
 971int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
 972
 973unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
 974void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
 975bool kvm_rdpmc(struct kvm_vcpu *vcpu);
 976
 977void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
 978void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
 979void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
 980void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
 981void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
 982int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 983                            gfn_t gfn, void *data, int offset, int len,
 984                            u32 access);
 985bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
 986bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
 987
 988static inline int __kvm_irq_line_state(unsigned long *irq_state,
 989                                       int irq_source_id, int level)
 990{
 991        /* Logical OR for level trig interrupt */
 992        if (level)
 993                __set_bit(irq_source_id, irq_state);
 994        else
 995                __clear_bit(irq_source_id, irq_state);
 996
 997        return !!(*irq_state);
 998}
 999
1000int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
1001void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
1002
1003void kvm_inject_nmi(struct kvm_vcpu *vcpu);
1004
1005int fx_init(struct kvm_vcpu *vcpu);
1006
1007void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1008                       const u8 *new, int bytes);
1009int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
1010int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
1011void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
1012int kvm_mmu_load(struct kvm_vcpu *vcpu);
1013void kvm_mmu_unload(struct kvm_vcpu *vcpu);
1014void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
1015gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1016                           struct x86_exception *exception);
1017gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
1018                              struct x86_exception *exception);
1019gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
1020                               struct x86_exception *exception);
1021gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
1022                               struct x86_exception *exception);
1023gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
1024                                struct x86_exception *exception);
1025
1026int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
1027
1028int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
1029                       void *insn, int insn_len);
1030void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
1031void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
1032
1033void kvm_enable_tdp(void);
1034void kvm_disable_tdp(void);
1035
1036static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1037                                  struct x86_exception *exception)
1038{
1039        return gpa;
1040}
1041
1042static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
1043{
1044        struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
1045
1046        return (struct kvm_mmu_page *)page_private(page);
1047}
1048
1049static inline u16 kvm_read_ldt(void)
1050{
1051        u16 ldt;
1052        asm("sldt %0" : "=g"(ldt));
1053        return ldt;
1054}
1055
1056static inline void kvm_load_ldt(u16 sel)
1057{
1058        asm("lldt %0" : : "rm"(sel));
1059}
1060
1061#ifdef CONFIG_X86_64
1062static inline unsigned long read_msr(unsigned long msr)
1063{
1064        u64 value;
1065
1066        rdmsrl(msr, value);
1067        return value;
1068}
1069#endif
1070
1071static inline u32 get_rdx_init_val(void)
1072{
1073        return 0x600; /* P6 family */
1074}
1075
1076static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
1077{
1078        kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
1079}
1080
1081static inline u64 get_canonical(u64 la)
1082{
1083        return ((int64_t)la << 16) >> 16;
1084}
1085
1086static inline bool is_noncanonical_address(u64 la)
1087{
1088#ifdef CONFIG_X86_64
1089        return get_canonical(la) != la;
1090#else
1091        return false;
1092#endif
1093}
1094
1095#define TSS_IOPB_BASE_OFFSET 0x66
1096#define TSS_BASE_SIZE 0x68
1097#define TSS_IOPB_SIZE (65536 / 8)
1098#define TSS_REDIRECTION_SIZE (256 / 8)
1099#define RMODE_TSS_SIZE                                                  \
1100        (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
1101
1102enum {
1103        TASK_SWITCH_CALL = 0,
1104        TASK_SWITCH_IRET = 1,
1105        TASK_SWITCH_JMP = 2,
1106        TASK_SWITCH_GATE = 3,
1107};
1108
1109#define HF_GIF_MASK             (1 << 0)
1110#define HF_HIF_MASK             (1 << 1)
1111#define HF_VINTR_MASK           (1 << 2)
1112#define HF_NMI_MASK             (1 << 3)
1113#define HF_IRET_MASK            (1 << 4)
1114#define HF_GUEST_MASK           (1 << 5) /* VCPU is in guest-mode */
1115
1116/*
1117 * Hardware virtualization extension instructions may fault if a
1118 * reboot turns off virtualization while processes are running.
1119 * Trap the fault and ignore the instruction if that happens.
1120 */
1121asmlinkage void kvm_spurious_fault(void);
1122
1123#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn)      \
1124        "666: " insn "\n\t" \
1125        "668: \n\t"                           \
1126        ".pushsection .fixup, \"ax\" \n" \
1127        "667: \n\t" \
1128        cleanup_insn "\n\t"                   \
1129        "cmpb $0, kvm_rebooting \n\t"         \
1130        "jne 668b \n\t"                       \
1131        __ASM_SIZE(push) " $666b \n\t"        \
1132        "call kvm_spurious_fault \n\t"        \
1133        ".popsection \n\t" \
1134        _ASM_EXTABLE(666b, 667b)
1135
1136#define __kvm_handle_fault_on_reboot(insn)              \
1137        ____kvm_handle_fault_on_reboot(insn, "")
1138
1139#define KVM_ARCH_WANT_MMU_NOTIFIER
1140int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
1141int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
1142int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
1143int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
1144void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
1145int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
1146int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
1147int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
1148int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
1149void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
1150void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
1151void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
1152                                           unsigned long address);
1153
1154void kvm_define_shared_msr(unsigned index, u32 msr);
1155int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
1156
1157unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
1158bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
1159
1160void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1161                                     struct kvm_async_pf *work);
1162void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1163                                 struct kvm_async_pf *work);
1164void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1165                               struct kvm_async_pf *work);
1166bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
1167extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1168
1169void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1170
1171int kvm_is_in_guest(void);
1172
1173void kvm_pmu_init(struct kvm_vcpu *vcpu);
1174void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
1175void kvm_pmu_reset(struct kvm_vcpu *vcpu);
1176void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
1177bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
1178int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
1179int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
1180int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc);
1181int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
1182void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
1183void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
1184
1185#endif /* _ASM_X86_KVM_HOST_H */
1186