linux/arch/x86/include/asm/kvm_host.h
<<
>>
Prefs
   1/*
   2 * Kernel-based Virtual Machine driver for Linux
   3 *
   4 * This header defines architecture specific interfaces, x86 version
   5 *
   6 * This work is licensed under the terms of the GNU GPL, version 2.  See
   7 * the COPYING file in the top-level directory.
   8 *
   9 */
  10
  11#ifndef _ASM_X86_KVM_HOST_H
  12#define _ASM_X86_KVM_HOST_H
  13
  14#include <linux/types.h>
  15#include <linux/mm.h>
  16#include <linux/mmu_notifier.h>
  17#include <linux/tracepoint.h>
  18#include <linux/cpumask.h>
  19#include <linux/irq_work.h>
  20
  21#include <linux/kvm.h>
  22#include <linux/kvm_para.h>
  23#include <linux/kvm_types.h>
  24#include <linux/perf_event.h>
  25#include <linux/pvclock_gtod.h>
  26#include <linux/clocksource.h>
  27#include <linux/irqbypass.h>
  28#include <linux/hyperv.h>
  29
  30#include <asm/apic.h>
  31#include <asm/pvclock-abi.h>
  32#include <asm/desc.h>
  33#include <asm/mtrr.h>
  34#include <asm/msr-index.h>
  35#include <asm/asm.h>
  36#include <asm/kvm_page_track.h>
  37
  38#define KVM_MAX_VCPUS 288
  39#define KVM_SOFT_MAX_VCPUS 240
  40#define KVM_MAX_VCPU_ID 1023
  41#define KVM_USER_MEM_SLOTS 509
  42/* memory slots that are not exposed to userspace */
  43#define KVM_PRIVATE_MEM_SLOTS 3
  44#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
  45
  46#define KVM_HALT_POLL_NS_DEFAULT 200000
  47
  48#define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS
  49
  50/* x86-specific vcpu->requests bit members */
  51#define KVM_REQ_MIGRATE_TIMER           KVM_ARCH_REQ(0)
  52#define KVM_REQ_REPORT_TPR_ACCESS       KVM_ARCH_REQ(1)
  53#define KVM_REQ_TRIPLE_FAULT            KVM_ARCH_REQ(2)
  54#define KVM_REQ_MMU_SYNC                KVM_ARCH_REQ(3)
  55#define KVM_REQ_CLOCK_UPDATE            KVM_ARCH_REQ(4)
  56#define KVM_REQ_EVENT                   KVM_ARCH_REQ(6)
  57#define KVM_REQ_APF_HALT                KVM_ARCH_REQ(7)
  58#define KVM_REQ_STEAL_UPDATE            KVM_ARCH_REQ(8)
  59#define KVM_REQ_NMI                     KVM_ARCH_REQ(9)
  60#define KVM_REQ_PMU                     KVM_ARCH_REQ(10)
  61#define KVM_REQ_PMI                     KVM_ARCH_REQ(11)
  62#define KVM_REQ_SMI                     KVM_ARCH_REQ(12)
  63#define KVM_REQ_MASTERCLOCK_UPDATE      KVM_ARCH_REQ(13)
  64#define KVM_REQ_MCLOCK_INPROGRESS \
  65        KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  66#define KVM_REQ_SCAN_IOAPIC \
  67        KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  68#define KVM_REQ_GLOBAL_CLOCK_UPDATE     KVM_ARCH_REQ(16)
  69#define KVM_REQ_APIC_PAGE_RELOAD \
  70        KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  71#define KVM_REQ_HV_CRASH                KVM_ARCH_REQ(18)
  72#define KVM_REQ_IOAPIC_EOI_EXIT         KVM_ARCH_REQ(19)
  73#define KVM_REQ_HV_RESET                KVM_ARCH_REQ(20)
  74#define KVM_REQ_HV_EXIT                 KVM_ARCH_REQ(21)
  75#define KVM_REQ_HV_STIMER               KVM_ARCH_REQ(22)
  76
  77#define CR0_RESERVED_BITS                                               \
  78        (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
  79                          | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
  80                          | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
  81
  82#define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
  83#define CR3_PCID_INVD            BIT_64(63)
  84#define CR4_RESERVED_BITS                                               \
  85        (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
  86                          | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
  87                          | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
  88                          | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
  89                          | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP \
  90                          | X86_CR4_PKE))
  91
  92#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
  93
  94
  95
  96#define INVALID_PAGE (~(hpa_t)0)
  97#define VALID_PAGE(x) ((x) != INVALID_PAGE)
  98
  99#define UNMAPPED_GVA (~(gpa_t)0)
 100
 101/* KVM Hugepage definitions for x86 */
 102#define KVM_NR_PAGE_SIZES       3
 103#define KVM_HPAGE_GFN_SHIFT(x)  (((x) - 1) * 9)
 104#define KVM_HPAGE_SHIFT(x)      (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
 105#define KVM_HPAGE_SIZE(x)       (1UL << KVM_HPAGE_SHIFT(x))
 106#define KVM_HPAGE_MASK(x)       (~(KVM_HPAGE_SIZE(x) - 1))
 107#define KVM_PAGES_PER_HPAGE(x)  (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
 108
 109static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 110{
 111        /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
 112        return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
 113                (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
 114}
 115
 116#define KVM_PERMILLE_MMU_PAGES 20
 117#define KVM_MIN_ALLOC_MMU_PAGES 64
 118#define KVM_MMU_HASH_SHIFT 12
 119#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
 120#define KVM_MIN_FREE_MMU_PAGES 5
 121#define KVM_REFILL_PAGES 25
 122#define KVM_MAX_CPUID_ENTRIES 80
 123#define KVM_NR_FIXED_MTRR_REGION 88
 124#define KVM_NR_VAR_MTRR 8
 125
 126#define ASYNC_PF_PER_VCPU 64
 127
 128enum kvm_reg {
 129        VCPU_REGS_RAX = 0,
 130        VCPU_REGS_RCX = 1,
 131        VCPU_REGS_RDX = 2,
 132        VCPU_REGS_RBX = 3,
 133        VCPU_REGS_RSP = 4,
 134        VCPU_REGS_RBP = 5,
 135        VCPU_REGS_RSI = 6,
 136        VCPU_REGS_RDI = 7,
 137#ifdef CONFIG_X86_64
 138        VCPU_REGS_R8 = 8,
 139        VCPU_REGS_R9 = 9,
 140        VCPU_REGS_R10 = 10,
 141        VCPU_REGS_R11 = 11,
 142        VCPU_REGS_R12 = 12,
 143        VCPU_REGS_R13 = 13,
 144        VCPU_REGS_R14 = 14,
 145        VCPU_REGS_R15 = 15,
 146#endif
 147        VCPU_REGS_RIP,
 148        NR_VCPU_REGS
 149};
 150
 151enum kvm_reg_ex {
 152        VCPU_EXREG_PDPTR = NR_VCPU_REGS,
 153        VCPU_EXREG_CR3,
 154        VCPU_EXREG_RFLAGS,
 155        VCPU_EXREG_SEGMENTS,
 156};
 157
 158enum {
 159        VCPU_SREG_ES,
 160        VCPU_SREG_CS,
 161        VCPU_SREG_SS,
 162        VCPU_SREG_DS,
 163        VCPU_SREG_FS,
 164        VCPU_SREG_GS,
 165        VCPU_SREG_TR,
 166        VCPU_SREG_LDTR,
 167};
 168
 169#include <asm/kvm_emulate.h>
 170
 171#define KVM_NR_MEM_OBJS 40
 172
 173#define KVM_NR_DB_REGS  4
 174
 175#define DR6_BD          (1 << 13)
 176#define DR6_BS          (1 << 14)
 177#define DR6_RTM         (1 << 16)
 178#define DR6_FIXED_1     0xfffe0ff0
 179#define DR6_INIT        0xffff0ff0
 180#define DR6_VOLATILE    0x0001e00f
 181
 182#define DR7_BP_EN_MASK  0x000000ff
 183#define DR7_GE          (1 << 9)
 184#define DR7_GD          (1 << 13)
 185#define DR7_FIXED_1     0x00000400
 186#define DR7_VOLATILE    0xffff2bff
 187
 188#define PFERR_PRESENT_BIT 0
 189#define PFERR_WRITE_BIT 1
 190#define PFERR_USER_BIT 2
 191#define PFERR_RSVD_BIT 3
 192#define PFERR_FETCH_BIT 4
 193#define PFERR_PK_BIT 5
 194#define PFERR_GUEST_FINAL_BIT 32
 195#define PFERR_GUEST_PAGE_BIT 33
 196
 197#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
 198#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
 199#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
 200#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
 201#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
 202#define PFERR_PK_MASK (1U << PFERR_PK_BIT)
 203#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
 204#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
 205
 206#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK |        \
 207                                 PFERR_USER_MASK |              \
 208                                 PFERR_WRITE_MASK |             \
 209                                 PFERR_PRESENT_MASK)
 210
 211/*
 212 * The mask used to denote special SPTEs, which can be either MMIO SPTEs or
 213 * Access Tracking SPTEs. We use bit 62 instead of bit 63 to avoid conflicting
 214 * with the SVE bit in EPT PTEs.
 215 */
 216#define SPTE_SPECIAL_MASK (1ULL << 62)
 217
 218/* apic attention bits */
 219#define KVM_APIC_CHECK_VAPIC    0
 220/*
 221 * The following bit is set with PV-EOI, unset on EOI.
 222 * We detect PV-EOI changes by guest by comparing
 223 * this bit with PV-EOI in guest memory.
 224 * See the implementation in apic_update_pv_eoi.
 225 */
 226#define KVM_APIC_PV_EOI_PENDING 1
 227
 228struct kvm_kernel_irq_routing_entry;
 229
 230/*
 231 * We don't want allocation failures within the mmu code, so we preallocate
 232 * enough memory for a single page fault in a cache.
 233 */
 234struct kvm_mmu_memory_cache {
 235        int nobjs;
 236        void *objects[KVM_NR_MEM_OBJS];
 237};
 238
 239/*
 240 * the pages used as guest page table on soft mmu are tracked by
 241 * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
 242 * by indirect shadow page can not be more than 15 bits.
 243 *
 244 * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access,
 245 * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
 246 */
 247union kvm_mmu_page_role {
 248        unsigned word;
 249        struct {
 250                unsigned level:4;
 251                unsigned cr4_pae:1;
 252                unsigned quadrant:2;
 253                unsigned direct:1;
 254                unsigned access:3;
 255                unsigned invalid:1;
 256                unsigned nxe:1;
 257                unsigned cr0_wp:1;
 258                unsigned smep_andnot_wp:1;
 259                unsigned smap_andnot_wp:1;
 260                unsigned ad_disabled:1;
 261                unsigned :7;
 262
 263                /*
 264                 * This is left at the top of the word so that
 265                 * kvm_memslots_for_spte_role can extract it with a
 266                 * simple shift.  While there is room, give it a whole
 267                 * byte so it is also faster to load it from memory.
 268                 */
 269                unsigned smm:8;
 270        };
 271};
 272
 273struct kvm_rmap_head {
 274        unsigned long val;
 275};
 276
 277struct kvm_mmu_page {
 278        struct list_head link;
 279        struct hlist_node hash_link;
 280
 281        /*
 282         * The following two entries are used to key the shadow page in the
 283         * hash table.
 284         */
 285        gfn_t gfn;
 286        union kvm_mmu_page_role role;
 287
 288        u64 *spt;
 289        /* hold the gfn of each spte inside spt */
 290        gfn_t *gfns;
 291        bool unsync;
 292        int root_count;          /* Currently serving as active root */
 293        unsigned int unsync_children;
 294        struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
 295
 296        /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen.  */
 297        unsigned long mmu_valid_gen;
 298
 299        DECLARE_BITMAP(unsync_child_bitmap, 512);
 300
 301#ifdef CONFIG_X86_32
 302        /*
 303         * Used out of the mmu-lock to avoid reading spte values while an
 304         * update is in progress; see the comments in __get_spte_lockless().
 305         */
 306        int clear_spte_count;
 307#endif
 308
 309        /* Number of writes since the last time traversal visited this page.  */
 310        atomic_t write_flooding_count;
 311};
 312
 313struct kvm_pio_request {
 314        unsigned long count;
 315        int in;
 316        int port;
 317        int size;
 318};
 319
 320struct rsvd_bits_validate {
 321        u64 rsvd_bits_mask[2][4];
 322        u64 bad_mt_xwr;
 323};
 324
 325/*
 326 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
 327 * 32-bit).  The kvm_mmu structure abstracts the details of the current mmu
 328 * mode.
 329 */
 330struct kvm_mmu {
 331        void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
 332        unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
 333        u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
 334        int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
 335                          bool prefault);
 336        void (*inject_page_fault)(struct kvm_vcpu *vcpu,
 337                                  struct x86_exception *fault);
 338        gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
 339                            struct x86_exception *exception);
 340        gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
 341                               struct x86_exception *exception);
 342        int (*sync_page)(struct kvm_vcpu *vcpu,
 343                         struct kvm_mmu_page *sp);
 344        void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
 345        void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 346                           u64 *spte, const void *pte);
 347        hpa_t root_hpa;
 348        union kvm_mmu_page_role base_role;
 349        u8 root_level;
 350        u8 shadow_root_level;
 351        u8 ept_ad;
 352        bool direct_map;
 353
 354        /*
 355         * Bitmap; bit set = permission fault
 356         * Byte index: page fault error code [4:1]
 357         * Bit index: pte permissions in ACC_* format
 358         */
 359        u8 permissions[16];
 360
 361        /*
 362        * The pkru_mask indicates if protection key checks are needed.  It
 363        * consists of 16 domains indexed by page fault error code bits [4:1],
 364        * with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
 365        * Each domain has 2 bits which are ANDed with AD and WD from PKRU.
 366        */
 367        u32 pkru_mask;
 368
 369        u64 *pae_root;
 370        u64 *lm_root;
 371
 372        /*
 373         * check zero bits on shadow page table entries, these
 374         * bits include not only hardware reserved bits but also
 375         * the bits spte never used.
 376         */
 377        struct rsvd_bits_validate shadow_zero_check;
 378
 379        struct rsvd_bits_validate guest_rsvd_check;
 380
 381        /* Can have large pages at levels 2..last_nonleaf_level-1. */
 382        u8 last_nonleaf_level;
 383
 384        bool nx;
 385
 386        u64 pdptrs[4]; /* pae */
 387};
 388
 389enum pmc_type {
 390        KVM_PMC_GP = 0,
 391        KVM_PMC_FIXED,
 392};
 393
 394struct kvm_pmc {
 395        enum pmc_type type;
 396        u8 idx;
 397        u64 counter;
 398        u64 eventsel;
 399        struct perf_event *perf_event;
 400        struct kvm_vcpu *vcpu;
 401};
 402
 403struct kvm_pmu {
 404        unsigned nr_arch_gp_counters;
 405        unsigned nr_arch_fixed_counters;
 406        unsigned available_event_types;
 407        u64 fixed_ctr_ctrl;
 408        u64 global_ctrl;
 409        u64 global_status;
 410        u64 global_ovf_ctrl;
 411        u64 counter_bitmask[2];
 412        u64 global_ctrl_mask;
 413        u64 reserved_bits;
 414        u8 version;
 415        struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
 416        struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
 417        struct irq_work irq_work;
 418        u64 reprogram_pmi;
 419};
 420
 421struct kvm_pmu_ops;
 422
 423enum {
 424        KVM_DEBUGREG_BP_ENABLED = 1,
 425        KVM_DEBUGREG_WONT_EXIT = 2,
 426        KVM_DEBUGREG_RELOAD = 4,
 427};
 428
 429struct kvm_mtrr_range {
 430        u64 base;
 431        u64 mask;
 432        struct list_head node;
 433};
 434
 435struct kvm_mtrr {
 436        struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
 437        mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
 438        u64 deftype;
 439
 440        struct list_head head;
 441};
 442
 443/* Hyper-V SynIC timer */
 444struct kvm_vcpu_hv_stimer {
 445        struct hrtimer timer;
 446        int index;
 447        u64 config;
 448        u64 count;
 449        u64 exp_time;
 450        struct hv_message msg;
 451        bool msg_pending;
 452};
 453
 454/* Hyper-V synthetic interrupt controller (SynIC)*/
 455struct kvm_vcpu_hv_synic {
 456        u64 version;
 457        u64 control;
 458        u64 msg_page;
 459        u64 evt_page;
 460        atomic64_t sint[HV_SYNIC_SINT_COUNT];
 461        atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
 462        DECLARE_BITMAP(auto_eoi_bitmap, 256);
 463        DECLARE_BITMAP(vec_bitmap, 256);
 464        bool active;
 465        bool dont_zero_synic_pages;
 466};
 467
 468/* Hyper-V per vcpu emulation context */
 469struct kvm_vcpu_hv {
 470        u32 vp_index;
 471        u64 hv_vapic;
 472        s64 runtime_offset;
 473        struct kvm_vcpu_hv_synic synic;
 474        struct kvm_hyperv_exit exit;
 475        struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
 476        DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
 477};
 478
 479struct kvm_vcpu_arch {
 480        /*
 481         * rip and regs accesses must go through
 482         * kvm_{register,rip}_{read,write} functions.
 483         */
 484        unsigned long regs[NR_VCPU_REGS];
 485        u32 regs_avail;
 486        u32 regs_dirty;
 487
 488        unsigned long cr0;
 489        unsigned long cr0_guest_owned_bits;
 490        unsigned long cr2;
 491        unsigned long cr3;
 492        unsigned long cr4;
 493        unsigned long cr4_guest_owned_bits;
 494        unsigned long cr8;
 495        u32 pkru;
 496        u32 hflags;
 497        u64 efer;
 498        u64 apic_base;
 499        struct kvm_lapic *apic;    /* kernel irqchip context */
 500        bool apicv_active;
 501        DECLARE_BITMAP(ioapic_handled_vectors, 256);
 502        unsigned long apic_attention;
 503        int32_t apic_arb_prio;
 504        int mp_state;
 505        u64 ia32_misc_enable_msr;
 506        u64 smbase;
 507        bool tpr_access_reporting;
 508        u64 ia32_xss;
 509
 510        /*
 511         * Paging state of the vcpu
 512         *
 513         * If the vcpu runs in guest mode with two level paging this still saves
 514         * the paging mode of the l1 guest. This context is always used to
 515         * handle faults.
 516         */
 517        struct kvm_mmu mmu;
 518
 519        /*
 520         * Paging state of an L2 guest (used for nested npt)
 521         *
 522         * This context will save all necessary information to walk page tables
 523         * of the an L2 guest. This context is only initialized for page table
 524         * walking and not for faulting since we never handle l2 page faults on
 525         * the host.
 526         */
 527        struct kvm_mmu nested_mmu;
 528
 529        /*
 530         * Pointer to the mmu context currently used for
 531         * gva_to_gpa translations.
 532         */
 533        struct kvm_mmu *walk_mmu;
 534
 535        struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
 536        struct kvm_mmu_memory_cache mmu_page_cache;
 537        struct kvm_mmu_memory_cache mmu_page_header_cache;
 538
 539        struct fpu guest_fpu;
 540        u64 xcr0;
 541        u64 guest_supported_xcr0;
 542        u32 guest_xstate_size;
 543
 544        struct kvm_pio_request pio;
 545        void *pio_data;
 546
 547        u8 event_exit_inst_len;
 548
 549        struct kvm_queued_exception {
 550                bool pending;
 551                bool has_error_code;
 552                bool reinject;
 553                u8 nr;
 554                u32 error_code;
 555                u8 nested_apf;
 556        } exception;
 557
 558        struct kvm_queued_interrupt {
 559                bool pending;
 560                bool soft;
 561                u8 nr;
 562        } interrupt;
 563
 564        int halt_request; /* real mode on Intel only */
 565
 566        int cpuid_nent;
 567        struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
 568
 569        int maxphyaddr;
 570
 571        /* emulate context */
 572
 573        struct x86_emulate_ctxt emulate_ctxt;
 574        bool emulate_regs_need_sync_to_vcpu;
 575        bool emulate_regs_need_sync_from_vcpu;
 576        int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
 577
 578        gpa_t time;
 579        struct pvclock_vcpu_time_info hv_clock;
 580        unsigned int hw_tsc_khz;
 581        struct gfn_to_hva_cache pv_time;
 582        bool pv_time_enabled;
 583        /* set guest stopped flag in pvclock flags field */
 584        bool pvclock_set_guest_stopped_request;
 585
 586        struct {
 587                u64 msr_val;
 588                u64 last_steal;
 589                struct gfn_to_hva_cache stime;
 590                struct kvm_steal_time steal;
 591        } st;
 592
 593        u64 tsc_offset;
 594        u64 last_guest_tsc;
 595        u64 last_host_tsc;
 596        u64 tsc_offset_adjustment;
 597        u64 this_tsc_nsec;
 598        u64 this_tsc_write;
 599        u64 this_tsc_generation;
 600        bool tsc_catchup;
 601        bool tsc_always_catchup;
 602        s8 virtual_tsc_shift;
 603        u32 virtual_tsc_mult;
 604        u32 virtual_tsc_khz;
 605        s64 ia32_tsc_adjust_msr;
 606        u64 tsc_scaling_ratio;
 607
 608        atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
 609        unsigned nmi_pending; /* NMI queued after currently running handler */
 610        bool nmi_injected;    /* Trying to inject an NMI this entry */
 611        bool smi_pending;    /* SMI queued after currently running handler */
 612
 613        struct kvm_mtrr mtrr_state;
 614        u64 pat;
 615
 616        unsigned switch_db_regs;
 617        unsigned long db[KVM_NR_DB_REGS];
 618        unsigned long dr6;
 619        unsigned long dr7;
 620        unsigned long eff_db[KVM_NR_DB_REGS];
 621        unsigned long guest_debug_dr7;
 622        u64 msr_platform_info;
 623        u64 msr_misc_features_enables;
 624
 625        u64 mcg_cap;
 626        u64 mcg_status;
 627        u64 mcg_ctl;
 628        u64 mcg_ext_ctl;
 629        u64 *mce_banks;
 630
 631        /* Cache MMIO info */
 632        u64 mmio_gva;
 633        unsigned access;
 634        gfn_t mmio_gfn;
 635        u64 mmio_gen;
 636
 637        struct kvm_pmu pmu;
 638
 639        /* used for guest single stepping over the given code position */
 640        unsigned long singlestep_rip;
 641
 642        struct kvm_vcpu_hv hyperv;
 643
 644        cpumask_var_t wbinvd_dirty_mask;
 645
 646        unsigned long last_retry_eip;
 647        unsigned long last_retry_addr;
 648
 649        struct {
 650                bool halted;
 651                gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
 652                struct gfn_to_hva_cache data;
 653                u64 msr_val;
 654                u32 id;
 655                bool send_user_only;
 656                u32 host_apf_reason;
 657                unsigned long nested_apf_token;
 658                bool delivery_as_pf_vmexit;
 659        } apf;
 660
 661        /* OSVW MSRs (AMD only) */
 662        struct {
 663                u64 length;
 664                u64 status;
 665        } osvw;
 666
 667        struct {
 668                u64 msr_val;
 669                struct gfn_to_hva_cache data;
 670        } pv_eoi;
 671
 672        /*
 673         * Indicate whether the access faults on its page table in guest
 674         * which is set when fix page fault and used to detect unhandeable
 675         * instruction.
 676         */
 677        bool write_fault_to_shadow_pgtable;
 678
 679        /* set at EPT violation at this point */
 680        unsigned long exit_qualification;
 681
 682        /* pv related host specific info */
 683        struct {
 684                bool pv_unhalted;
 685        } pv;
 686
 687        int pending_ioapic_eoi;
 688        int pending_external_vector;
 689
 690        /* GPA available (AMD only) */
 691        bool gpa_available;
 692};
 693
 694struct kvm_lpage_info {
 695        int disallow_lpage;
 696};
 697
 698struct kvm_arch_memory_slot {
 699        struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
 700        struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
 701        unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
 702};
 703
 704/*
 705 * We use as the mode the number of bits allocated in the LDR for the
 706 * logical processor ID.  It happens that these are all powers of two.
 707 * This makes it is very easy to detect cases where the APICs are
 708 * configured for multiple modes; in that case, we cannot use the map and
 709 * hence cannot use kvm_irq_delivery_to_apic_fast either.
 710 */
 711#define KVM_APIC_MODE_XAPIC_CLUSTER          4
 712#define KVM_APIC_MODE_XAPIC_FLAT             8
 713#define KVM_APIC_MODE_X2APIC                16
 714
 715struct kvm_apic_map {
 716        struct rcu_head rcu;
 717        u8 mode;
 718        u32 max_apic_id;
 719        union {
 720                struct kvm_lapic *xapic_flat_map[8];
 721                struct kvm_lapic *xapic_cluster_map[16][4];
 722        };
 723        struct kvm_lapic *phys_map[];
 724};
 725
 726/* Hyper-V emulation context */
 727struct kvm_hv {
 728        struct mutex hv_lock;
 729        u64 hv_guest_os_id;
 730        u64 hv_hypercall;
 731        u64 hv_tsc_page;
 732
 733        /* Hyper-v based guest crash (NT kernel bugcheck) parameters */
 734        u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
 735        u64 hv_crash_ctl;
 736
 737        HV_REFERENCE_TSC_PAGE tsc_ref;
 738};
 739
 740enum kvm_irqchip_mode {
 741        KVM_IRQCHIP_NONE,
 742        KVM_IRQCHIP_KERNEL,       /* created with KVM_CREATE_IRQCHIP */
 743        KVM_IRQCHIP_SPLIT,        /* created with KVM_CAP_SPLIT_IRQCHIP */
 744};
 745
 746struct kvm_arch {
 747        unsigned int n_used_mmu_pages;
 748        unsigned int n_requested_mmu_pages;
 749        unsigned int n_max_mmu_pages;
 750        unsigned int indirect_shadow_pages;
 751        unsigned long mmu_valid_gen;
 752        struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
 753        /*
 754         * Hash table of struct kvm_mmu_page.
 755         */
 756        struct list_head active_mmu_pages;
 757        struct list_head zapped_obsolete_pages;
 758        struct kvm_page_track_notifier_node mmu_sp_tracker;
 759        struct kvm_page_track_notifier_head track_notifier_head;
 760
 761        struct list_head assigned_dev_head;
 762        struct iommu_domain *iommu_domain;
 763        bool iommu_noncoherent;
 764#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
 765        atomic_t noncoherent_dma_count;
 766#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
 767        atomic_t assigned_device_count;
 768        struct kvm_pic *vpic;
 769        struct kvm_ioapic *vioapic;
 770        struct kvm_pit *vpit;
 771        atomic_t vapics_in_nmi_mode;
 772        struct mutex apic_map_lock;
 773        struct kvm_apic_map *apic_map;
 774
 775        unsigned int tss_addr;
 776        bool apic_access_page_done;
 777
 778        gpa_t wall_clock;
 779
 780        bool ept_identity_pagetable_done;
 781        gpa_t ept_identity_map_addr;
 782
 783        unsigned long irq_sources_bitmap;
 784        s64 kvmclock_offset;
 785        raw_spinlock_t tsc_write_lock;
 786        u64 last_tsc_nsec;
 787        u64 last_tsc_write;
 788        u32 last_tsc_khz;
 789        u64 cur_tsc_nsec;
 790        u64 cur_tsc_write;
 791        u64 cur_tsc_offset;
 792        u64 cur_tsc_generation;
 793        int nr_vcpus_matched_tsc;
 794
 795        spinlock_t pvclock_gtod_sync_lock;
 796        bool use_master_clock;
 797        u64 master_kernel_ns;
 798        u64 master_cycle_now;
 799        struct delayed_work kvmclock_update_work;
 800        struct delayed_work kvmclock_sync_work;
 801
 802        struct kvm_xen_hvm_config xen_hvm_config;
 803
 804        /* reads protected by irq_srcu, writes by irq_lock */
 805        struct hlist_head mask_notifier_list;
 806
 807        struct kvm_hv hyperv;
 808
 809        #ifdef CONFIG_KVM_MMU_AUDIT
 810        int audit_point;
 811        #endif
 812
 813        bool backwards_tsc_observed;
 814        bool boot_vcpu_runs_old_kvmclock;
 815        u32 bsp_vcpu_id;
 816
 817        u64 disabled_quirks;
 818
 819        enum kvm_irqchip_mode irqchip_mode;
 820        u8 nr_reserved_ioapic_pins;
 821
 822        bool disabled_lapic_found;
 823
 824        /* Struct members for AVIC */
 825        u32 avic_vm_id;
 826        u32 ldr_mode;
 827        struct page *avic_logical_id_table_page;
 828        struct page *avic_physical_id_table_page;
 829        struct hlist_node hnode;
 830
 831        bool x2apic_format;
 832        bool x2apic_broadcast_quirk_disabled;
 833};
 834
 835struct kvm_vm_stat {
 836        ulong mmu_shadow_zapped;
 837        ulong mmu_pte_write;
 838        ulong mmu_pte_updated;
 839        ulong mmu_pde_zapped;
 840        ulong mmu_flooded;
 841        ulong mmu_recycled;
 842        ulong mmu_cache_miss;
 843        ulong mmu_unsync;
 844        ulong remote_tlb_flush;
 845        ulong lpages;
 846        ulong max_mmu_page_hash_collisions;
 847};
 848
 849struct kvm_vcpu_stat {
 850        u64 pf_fixed;
 851        u64 pf_guest;
 852        u64 tlb_flush;
 853        u64 invlpg;
 854
 855        u64 exits;
 856        u64 io_exits;
 857        u64 mmio_exits;
 858        u64 signal_exits;
 859        u64 irq_window_exits;
 860        u64 nmi_window_exits;
 861        u64 halt_exits;
 862        u64 halt_successful_poll;
 863        u64 halt_attempted_poll;
 864        u64 halt_poll_invalid;
 865        u64 halt_wakeup;
 866        u64 request_irq_exits;
 867        u64 irq_exits;
 868        u64 host_state_reload;
 869        u64 efer_reload;
 870        u64 fpu_reload;
 871        u64 insn_emulation;
 872        u64 insn_emulation_fail;
 873        u64 hypercalls;
 874        u64 irq_injections;
 875        u64 nmi_injections;
 876        u64 req_event;
 877};
 878
 879struct x86_instruction_info;
 880
 881struct msr_data {
 882        bool host_initiated;
 883        u32 index;
 884        u64 data;
 885};
 886
 887struct kvm_lapic_irq {
 888        u32 vector;
 889        u16 delivery_mode;
 890        u16 dest_mode;
 891        bool level;
 892        u16 trig_mode;
 893        u32 shorthand;
 894        u32 dest_id;
 895        bool msi_redir_hint;
 896};
 897
 898struct kvm_x86_ops {
 899        int (*cpu_has_kvm_support)(void);          /* __init */
 900        int (*disabled_by_bios)(void);             /* __init */
 901        int (*hardware_enable)(void);
 902        void (*hardware_disable)(void);
 903        void (*check_processor_compatibility)(void *rtn);
 904        int (*hardware_setup)(void);               /* __init */
 905        void (*hardware_unsetup)(void);            /* __exit */
 906        bool (*cpu_has_accelerated_tpr)(void);
 907        bool (*cpu_has_high_real_mode_segbase)(void);
 908        void (*cpuid_update)(struct kvm_vcpu *vcpu);
 909
 910        int (*vm_init)(struct kvm *kvm);
 911        void (*vm_destroy)(struct kvm *kvm);
 912
 913        /* Create, but do not attach this VCPU */
 914        struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
 915        void (*vcpu_free)(struct kvm_vcpu *vcpu);
 916        void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
 917
 918        void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
 919        void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
 920        void (*vcpu_put)(struct kvm_vcpu *vcpu);
 921
 922        void (*update_bp_intercept)(struct kvm_vcpu *vcpu);
 923        int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
 924        int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
 925        u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
 926        void (*get_segment)(struct kvm_vcpu *vcpu,
 927                            struct kvm_segment *var, int seg);
 928        int (*get_cpl)(struct kvm_vcpu *vcpu);
 929        void (*set_segment)(struct kvm_vcpu *vcpu,
 930                            struct kvm_segment *var, int seg);
 931        void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
 932        void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
 933        void (*decache_cr3)(struct kvm_vcpu *vcpu);
 934        void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
 935        void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
 936        void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
 937        int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
 938        void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
 939        void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
 940        void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
 941        void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
 942        void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
 943        u64 (*get_dr6)(struct kvm_vcpu *vcpu);
 944        void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
 945        void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
 946        void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
 947        void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
 948        unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
 949        void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
 950        u32 (*get_pkru)(struct kvm_vcpu *vcpu);
 951
 952        void (*tlb_flush)(struct kvm_vcpu *vcpu);
 953
 954        void (*run)(struct kvm_vcpu *vcpu);
 955        int (*handle_exit)(struct kvm_vcpu *vcpu);
 956        void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
 957        void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
 958        u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
 959        void (*patch_hypercall)(struct kvm_vcpu *vcpu,
 960                                unsigned char *hypercall_addr);
 961        void (*set_irq)(struct kvm_vcpu *vcpu);
 962        void (*set_nmi)(struct kvm_vcpu *vcpu);
 963        void (*queue_exception)(struct kvm_vcpu *vcpu);
 964        void (*cancel_injection)(struct kvm_vcpu *vcpu);
 965        int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
 966        int (*nmi_allowed)(struct kvm_vcpu *vcpu);
 967        bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
 968        void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
 969        void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
 970        void (*enable_irq_window)(struct kvm_vcpu *vcpu);
 971        void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
 972        bool (*get_enable_apicv)(void);
 973        void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
 974        void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
 975        void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
 976        void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
 977        void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
 978        void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
 979        void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
 980        int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
 981        int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
 982        int (*get_tdp_level)(void);
 983        u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
 984        int (*get_lpage_level)(void);
 985        bool (*rdtscp_supported)(void);
 986        bool (*invpcid_supported)(void);
 987
 988        void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
 989
 990        void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
 991
 992        bool (*has_wbinvd_exit)(void);
 993
 994        void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 995
 996        void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
 997
 998        int (*check_intercept)(struct kvm_vcpu *vcpu,
 999                               struct x86_instruction_info *info,
1000                               enum x86_intercept_stage stage);
1001        void (*handle_external_intr)(struct kvm_vcpu *vcpu);
1002        bool (*mpx_supported)(void);
1003        bool (*xsaves_supported)(void);
1004
1005        int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
1006
1007        void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
1008
1009        /*
1010         * Arch-specific dirty logging hooks. These hooks are only supposed to
1011         * be valid if the specific arch has hardware-accelerated dirty logging
1012         * mechanism. Currently only for PML on VMX.
1013         *
1014         *  - slot_enable_log_dirty:
1015         *      called when enabling log dirty mode for the slot.
1016         *  - slot_disable_log_dirty:
1017         *      called when disabling log dirty mode for the slot.
1018         *      also called when slot is created with log dirty disabled.
1019         *  - flush_log_dirty:
1020         *      called before reporting dirty_bitmap to userspace.
1021         *  - enable_log_dirty_pt_masked:
1022         *      called when reenabling log dirty for the GFNs in the mask after
1023         *      corresponding bits are cleared in slot->dirty_bitmap.
1024         */
1025        void (*slot_enable_log_dirty)(struct kvm *kvm,
1026                                      struct kvm_memory_slot *slot);
1027        void (*slot_disable_log_dirty)(struct kvm *kvm,
1028                                       struct kvm_memory_slot *slot);
1029        void (*flush_log_dirty)(struct kvm *kvm);
1030        void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
1031                                           struct kvm_memory_slot *slot,
1032                                           gfn_t offset, unsigned long mask);
1033        int (*write_log_dirty)(struct kvm_vcpu *vcpu);
1034
1035        /* pmu operations of sub-arch */
1036        const struct kvm_pmu_ops *pmu_ops;
1037
1038        /*
1039         * Architecture specific hooks for vCPU blocking due to
1040         * HLT instruction.
1041         * Returns for .pre_block():
1042         *    - 0 means continue to block the vCPU.
1043         *    - 1 means we cannot block the vCPU since some event
1044         *        happens during this period, such as, 'ON' bit in
1045         *        posted-interrupts descriptor is set.
1046         */
1047        int (*pre_block)(struct kvm_vcpu *vcpu);
1048        void (*post_block)(struct kvm_vcpu *vcpu);
1049
1050        void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
1051        void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
1052
1053        int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
1054                              uint32_t guest_irq, bool set);
1055        void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
1056
1057        int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
1058        void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
1059
1060        void (*setup_mce)(struct kvm_vcpu *vcpu);
1061};
1062
1063struct kvm_arch_async_pf {
1064        u32 token;
1065        gfn_t gfn;
1066        unsigned long cr3;
1067        bool direct_map;
1068};
1069
1070extern struct kvm_x86_ops *kvm_x86_ops;
1071
1072int kvm_mmu_module_init(void);
1073void kvm_mmu_module_exit(void);
1074
1075void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
1076int kvm_mmu_create(struct kvm_vcpu *vcpu);
1077void kvm_mmu_setup(struct kvm_vcpu *vcpu);
1078void kvm_mmu_init_vm(struct kvm *kvm);
1079void kvm_mmu_uninit_vm(struct kvm *kvm);
1080void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
1081                u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
1082                u64 acc_track_mask);
1083
1084void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
1085void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
1086                                      struct kvm_memory_slot *memslot);
1087void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
1088                                   const struct kvm_memory_slot *memslot);
1089void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
1090                                   struct kvm_memory_slot *memslot);
1091void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
1092                                        struct kvm_memory_slot *memslot);
1093void kvm_mmu_slot_set_dirty(struct kvm *kvm,
1094                            struct kvm_memory_slot *memslot);
1095void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1096                                   struct kvm_memory_slot *slot,
1097                                   gfn_t gfn_offset, unsigned long mask);
1098void kvm_mmu_zap_all(struct kvm *kvm);
1099void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
1100unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
1101void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
1102
1103int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
1104bool pdptrs_changed(struct kvm_vcpu *vcpu);
1105
1106int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1107                          const void *val, int bytes);
1108
1109struct kvm_irq_mask_notifier {
1110        void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
1111        int irq;
1112        struct hlist_node link;
1113};
1114
1115void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
1116                                    struct kvm_irq_mask_notifier *kimn);
1117void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
1118                                      struct kvm_irq_mask_notifier *kimn);
1119void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
1120                             bool mask);
1121
1122extern bool tdp_enabled;
1123
1124u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
1125
1126/* control of guest tsc rate supported? */
1127extern bool kvm_has_tsc_control;
1128/* maximum supported tsc_khz for guests */
1129extern u32  kvm_max_guest_tsc_khz;
1130/* number of bits of the fractional part of the TSC scaling ratio */
1131extern u8   kvm_tsc_scaling_ratio_frac_bits;
1132/* maximum allowed value of TSC scaling ratio */
1133extern u64  kvm_max_tsc_scaling_ratio;
1134/* 1ull << kvm_tsc_scaling_ratio_frac_bits */
1135extern u64  kvm_default_tsc_scaling_ratio;
1136
1137extern u64 kvm_mce_cap_supported;
1138
1139enum emulation_result {
1140        EMULATE_DONE,         /* no further processing */
1141        EMULATE_USER_EXIT,    /* kvm_run ready for userspace exit */
1142        EMULATE_FAIL,         /* can't emulate this instruction */
1143};
1144
1145#define EMULTYPE_NO_DECODE          (1 << 0)
1146#define EMULTYPE_TRAP_UD            (1 << 1)
1147#define EMULTYPE_SKIP               (1 << 2)
1148#define EMULTYPE_RETRY              (1 << 3)
1149#define EMULTYPE_NO_REEXECUTE       (1 << 4)
1150int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
1151                            int emulation_type, void *insn, int insn_len);
1152
1153static inline int emulate_instruction(struct kvm_vcpu *vcpu,
1154                        int emulation_type)
1155{
1156        return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
1157}
1158
1159void kvm_enable_efer_bits(u64);
1160bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
1161int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
1162int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
1163
1164struct x86_emulate_ctxt;
1165
1166int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
1167int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, unsigned short port);
1168int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
1169int kvm_emulate_halt(struct kvm_vcpu *vcpu);
1170int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1171int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
1172
1173void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
1174int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
1175void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
1176
1177int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
1178                    int reason, bool has_error_code, u32 error_code);
1179
1180int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
1181int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
1182int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
1183int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
1184int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
1185int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
1186unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
1187void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
1188void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
1189int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
1190
1191int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1192int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1193
1194unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
1195void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
1196bool kvm_rdpmc(struct kvm_vcpu *vcpu);
1197
1198void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1199void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1200void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1201void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1202void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
1203int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
1204                            gfn_t gfn, void *data, int offset, int len,
1205                            u32 access);
1206bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
1207bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
1208
1209static inline int __kvm_irq_line_state(unsigned long *irq_state,
1210                                       int irq_source_id, int level)
1211{
1212        /* Logical OR for level trig interrupt */
1213        if (level)
1214                __set_bit(irq_source_id, irq_state);
1215        else
1216                __clear_bit(irq_source_id, irq_state);
1217
1218        return !!(*irq_state);
1219}
1220
1221int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
1222void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
1223
1224void kvm_inject_nmi(struct kvm_vcpu *vcpu);
1225
1226int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
1227int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
1228void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
1229int kvm_mmu_load(struct kvm_vcpu *vcpu);
1230void kvm_mmu_unload(struct kvm_vcpu *vcpu);
1231void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
1232gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1233                           struct x86_exception *exception);
1234gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
1235                              struct x86_exception *exception);
1236gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
1237                               struct x86_exception *exception);
1238gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
1239                               struct x86_exception *exception);
1240gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
1241                                struct x86_exception *exception);
1242
1243void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
1244
1245int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
1246
1247int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
1248                       void *insn, int insn_len);
1249void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
1250void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
1251
1252void kvm_enable_tdp(void);
1253void kvm_disable_tdp(void);
1254
1255static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1256                                  struct x86_exception *exception)
1257{
1258        return gpa;
1259}
1260
1261static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
1262{
1263        struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
1264
1265        return (struct kvm_mmu_page *)page_private(page);
1266}
1267
1268static inline u16 kvm_read_ldt(void)
1269{
1270        u16 ldt;
1271        asm("sldt %0" : "=g"(ldt));
1272        return ldt;
1273}
1274
1275static inline void kvm_load_ldt(u16 sel)
1276{
1277        asm("lldt %0" : : "rm"(sel));
1278}
1279
1280#ifdef CONFIG_X86_64
1281static inline unsigned long read_msr(unsigned long msr)
1282{
1283        u64 value;
1284
1285        rdmsrl(msr, value);
1286        return value;
1287}
1288#endif
1289
1290static inline u32 get_rdx_init_val(void)
1291{
1292        return 0x600; /* P6 family */
1293}
1294
1295static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
1296{
1297        kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
1298}
1299
1300static inline u64 get_canonical(u64 la)
1301{
1302        return ((int64_t)la << 16) >> 16;
1303}
1304
1305static inline bool is_noncanonical_address(u64 la)
1306{
1307#ifdef CONFIG_X86_64
1308        return get_canonical(la) != la;
1309#else
1310        return false;
1311#endif
1312}
1313
1314#define TSS_IOPB_BASE_OFFSET 0x66
1315#define TSS_BASE_SIZE 0x68
1316#define TSS_IOPB_SIZE (65536 / 8)
1317#define TSS_REDIRECTION_SIZE (256 / 8)
1318#define RMODE_TSS_SIZE                                                  \
1319        (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
1320
1321enum {
1322        TASK_SWITCH_CALL = 0,
1323        TASK_SWITCH_IRET = 1,
1324        TASK_SWITCH_JMP = 2,
1325        TASK_SWITCH_GATE = 3,
1326};
1327
1328#define HF_GIF_MASK             (1 << 0)
1329#define HF_HIF_MASK             (1 << 1)
1330#define HF_VINTR_MASK           (1 << 2)
1331#define HF_NMI_MASK             (1 << 3)
1332#define HF_IRET_MASK            (1 << 4)
1333#define HF_GUEST_MASK           (1 << 5) /* VCPU is in guest-mode */
1334#define HF_SMM_MASK             (1 << 6)
1335#define HF_SMM_INSIDE_NMI_MASK  (1 << 7)
1336
1337#define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
1338#define KVM_ADDRESS_SPACE_NUM 2
1339
1340#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
1341#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
1342
1343/*
1344 * Hardware virtualization extension instructions may fault if a
1345 * reboot turns off virtualization while processes are running.
1346 * Trap the fault and ignore the instruction if that happens.
1347 */
1348asmlinkage void kvm_spurious_fault(void);
1349
1350#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn)      \
1351        "666: " insn "\n\t" \
1352        "668: \n\t"                           \
1353        ".pushsection .fixup, \"ax\" \n" \
1354        "667: \n\t" \
1355        cleanup_insn "\n\t"                   \
1356        "cmpb $0, kvm_rebooting \n\t"         \
1357        "jne 668b \n\t"                       \
1358        __ASM_SIZE(push) " $666b \n\t"        \
1359        "call kvm_spurious_fault \n\t"        \
1360        ".popsection \n\t" \
1361        _ASM_EXTABLE(666b, 667b)
1362
1363#define __kvm_handle_fault_on_reboot(insn)              \
1364        ____kvm_handle_fault_on_reboot(insn, "")
1365
1366#define KVM_ARCH_WANT_MMU_NOTIFIER
1367int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
1368int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
1369int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
1370int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
1371void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
1372int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
1373int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
1374int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
1375int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
1376void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
1377void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
1378
1379void kvm_define_shared_msr(unsigned index, u32 msr);
1380int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
1381
1382u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
1383u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
1384
1385unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
1386bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
1387
1388void kvm_make_mclock_inprogress_request(struct kvm *kvm);
1389void kvm_make_scan_ioapic_request(struct kvm *kvm);
1390
1391void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1392                                     struct kvm_async_pf *work);
1393void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1394                                 struct kvm_async_pf *work);
1395void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1396                               struct kvm_async_pf *work);
1397bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
1398extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1399
1400int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
1401int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1402
1403int kvm_is_in_guest(void);
1404
1405int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1406int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1407bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
1408bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
1409
1410bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
1411                             struct kvm_vcpu **dest_vcpu);
1412
1413void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
1414                     struct kvm_lapic_irq *irq);
1415
1416static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
1417{
1418        if (kvm_x86_ops->vcpu_blocking)
1419                kvm_x86_ops->vcpu_blocking(vcpu);
1420}
1421
1422static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
1423{
1424        if (kvm_x86_ops->vcpu_unblocking)
1425                kvm_x86_ops->vcpu_unblocking(vcpu);
1426}
1427
1428static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
1429
1430static inline int kvm_cpu_get_apicid(int mps_cpu)
1431{
1432#ifdef CONFIG_X86_LOCAL_APIC
1433        return __default_cpu_present_to_apicid(mps_cpu);
1434#else
1435        WARN_ON_ONCE(1);
1436        return BAD_APICID;
1437#endif
1438}
1439
1440#endif /* _ASM_X86_KVM_HOST_H */
1441