linux/arch/x86/include/asm/kvm_host.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Kernel-based Virtual Machine driver for Linux
   4 *
   5 * This header defines architecture specific interfaces, x86 version
   6 */
   7
   8#ifndef _ASM_X86_KVM_HOST_H
   9#define _ASM_X86_KVM_HOST_H
  10
  11#include <linux/types.h>
  12#include <linux/mm.h>
  13#include <linux/mmu_notifier.h>
  14#include <linux/tracepoint.h>
  15#include <linux/cpumask.h>
  16#include <linux/irq_work.h>
  17#include <linux/irq.h>
  18
  19#include <linux/kvm.h>
  20#include <linux/kvm_para.h>
  21#include <linux/kvm_types.h>
  22#include <linux/perf_event.h>
  23#include <linux/pvclock_gtod.h>
  24#include <linux/clocksource.h>
  25#include <linux/irqbypass.h>
  26#include <linux/hyperv.h>
  27
  28#include <asm/apic.h>
  29#include <asm/pvclock-abi.h>
  30#include <asm/desc.h>
  31#include <asm/mtrr.h>
  32#include <asm/msr-index.h>
  33#include <asm/asm.h>
  34#include <asm/kvm_page_track.h>
  35#include <asm/kvm_vcpu_regs.h>
  36#include <asm/hyperv-tlfs.h>
  37
  38#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
  39
  40#define KVM_MAX_VCPUS 288
  41#define KVM_SOFT_MAX_VCPUS 240
  42#define KVM_MAX_VCPU_ID 1023
  43#define KVM_USER_MEM_SLOTS 509
  44/* memory slots that are not exposed to userspace */
  45#define KVM_PRIVATE_MEM_SLOTS 3
  46#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
  47
  48#define KVM_HALT_POLL_NS_DEFAULT 200000
  49
  50#define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS
  51
  52/* x86-specific vcpu->requests bit members */
  53#define KVM_REQ_MIGRATE_TIMER           KVM_ARCH_REQ(0)
  54#define KVM_REQ_REPORT_TPR_ACCESS       KVM_ARCH_REQ(1)
  55#define KVM_REQ_TRIPLE_FAULT            KVM_ARCH_REQ(2)
  56#define KVM_REQ_MMU_SYNC                KVM_ARCH_REQ(3)
  57#define KVM_REQ_CLOCK_UPDATE            KVM_ARCH_REQ(4)
  58#define KVM_REQ_LOAD_CR3                KVM_ARCH_REQ(5)
  59#define KVM_REQ_EVENT                   KVM_ARCH_REQ(6)
  60#define KVM_REQ_APF_HALT                KVM_ARCH_REQ(7)
  61#define KVM_REQ_STEAL_UPDATE            KVM_ARCH_REQ(8)
  62#define KVM_REQ_NMI                     KVM_ARCH_REQ(9)
  63#define KVM_REQ_PMU                     KVM_ARCH_REQ(10)
  64#define KVM_REQ_PMI                     KVM_ARCH_REQ(11)
  65#define KVM_REQ_SMI                     KVM_ARCH_REQ(12)
  66#define KVM_REQ_MASTERCLOCK_UPDATE      KVM_ARCH_REQ(13)
  67#define KVM_REQ_MCLOCK_INPROGRESS \
  68        KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  69#define KVM_REQ_SCAN_IOAPIC \
  70        KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  71#define KVM_REQ_GLOBAL_CLOCK_UPDATE     KVM_ARCH_REQ(16)
  72#define KVM_REQ_APIC_PAGE_RELOAD \
  73        KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  74#define KVM_REQ_HV_CRASH                KVM_ARCH_REQ(18)
  75#define KVM_REQ_IOAPIC_EOI_EXIT         KVM_ARCH_REQ(19)
  76#define KVM_REQ_HV_RESET                KVM_ARCH_REQ(20)
  77#define KVM_REQ_HV_EXIT                 KVM_ARCH_REQ(21)
  78#define KVM_REQ_HV_STIMER               KVM_ARCH_REQ(22)
  79#define KVM_REQ_LOAD_EOI_EXITMAP        KVM_ARCH_REQ(23)
  80#define KVM_REQ_GET_VMCS12_PAGES        KVM_ARCH_REQ(24)
  81
  82#define CR0_RESERVED_BITS                                               \
  83        (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
  84                          | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
  85                          | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
  86
  87#define CR4_RESERVED_BITS                                               \
  88        (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
  89                          | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
  90                          | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
  91                          | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
  92                          | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
  93                          | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP))
  94
  95#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
  96
  97
  98
  99#define INVALID_PAGE (~(hpa_t)0)
 100#define VALID_PAGE(x) ((x) != INVALID_PAGE)
 101
 102#define UNMAPPED_GVA (~(gpa_t)0)
 103
 104/* KVM Hugepage definitions for x86 */
 105enum {
 106        PT_PAGE_TABLE_LEVEL   = 1,
 107        PT_DIRECTORY_LEVEL    = 2,
 108        PT_PDPE_LEVEL         = 3,
 109        /* set max level to the biggest one */
 110        PT_MAX_HUGEPAGE_LEVEL = PT_PDPE_LEVEL,
 111};
 112#define KVM_NR_PAGE_SIZES       (PT_MAX_HUGEPAGE_LEVEL - \
 113                                 PT_PAGE_TABLE_LEVEL + 1)
 114#define KVM_HPAGE_GFN_SHIFT(x)  (((x) - 1) * 9)
 115#define KVM_HPAGE_SHIFT(x)      (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
 116#define KVM_HPAGE_SIZE(x)       (1UL << KVM_HPAGE_SHIFT(x))
 117#define KVM_HPAGE_MASK(x)       (~(KVM_HPAGE_SIZE(x) - 1))
 118#define KVM_PAGES_PER_HPAGE(x)  (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
 119
 120static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 121{
 122        /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
 123        return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
 124                (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
 125}
 126
 127#define KVM_PERMILLE_MMU_PAGES 20
 128#define KVM_MIN_ALLOC_MMU_PAGES 64UL
 129#define KVM_MMU_HASH_SHIFT 12
 130#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
 131#define KVM_MIN_FREE_MMU_PAGES 5
 132#define KVM_REFILL_PAGES 25
 133#define KVM_MAX_CPUID_ENTRIES 80
 134#define KVM_NR_FIXED_MTRR_REGION 88
 135#define KVM_NR_VAR_MTRR 8
 136
 137#define ASYNC_PF_PER_VCPU 64
 138
 139enum kvm_reg {
 140        VCPU_REGS_RAX = __VCPU_REGS_RAX,
 141        VCPU_REGS_RCX = __VCPU_REGS_RCX,
 142        VCPU_REGS_RDX = __VCPU_REGS_RDX,
 143        VCPU_REGS_RBX = __VCPU_REGS_RBX,
 144        VCPU_REGS_RSP = __VCPU_REGS_RSP,
 145        VCPU_REGS_RBP = __VCPU_REGS_RBP,
 146        VCPU_REGS_RSI = __VCPU_REGS_RSI,
 147        VCPU_REGS_RDI = __VCPU_REGS_RDI,
 148#ifdef CONFIG_X86_64
 149        VCPU_REGS_R8  = __VCPU_REGS_R8,
 150        VCPU_REGS_R9  = __VCPU_REGS_R9,
 151        VCPU_REGS_R10 = __VCPU_REGS_R10,
 152        VCPU_REGS_R11 = __VCPU_REGS_R11,
 153        VCPU_REGS_R12 = __VCPU_REGS_R12,
 154        VCPU_REGS_R13 = __VCPU_REGS_R13,
 155        VCPU_REGS_R14 = __VCPU_REGS_R14,
 156        VCPU_REGS_R15 = __VCPU_REGS_R15,
 157#endif
 158        VCPU_REGS_RIP,
 159        NR_VCPU_REGS
 160};
 161
 162enum kvm_reg_ex {
 163        VCPU_EXREG_PDPTR = NR_VCPU_REGS,
 164        VCPU_EXREG_CR3,
 165        VCPU_EXREG_RFLAGS,
 166        VCPU_EXREG_SEGMENTS,
 167};
 168
 169enum {
 170        VCPU_SREG_ES,
 171        VCPU_SREG_CS,
 172        VCPU_SREG_SS,
 173        VCPU_SREG_DS,
 174        VCPU_SREG_FS,
 175        VCPU_SREG_GS,
 176        VCPU_SREG_TR,
 177        VCPU_SREG_LDTR,
 178};
 179
 180#include <asm/kvm_emulate.h>
 181
 182#define KVM_NR_MEM_OBJS 40
 183
 184#define KVM_NR_DB_REGS  4
 185
 186#define DR6_BD          (1 << 13)
 187#define DR6_BS          (1 << 14)
 188#define DR6_BT          (1 << 15)
 189#define DR6_RTM         (1 << 16)
 190#define DR6_FIXED_1     0xfffe0ff0
 191#define DR6_INIT        0xffff0ff0
 192#define DR6_VOLATILE    0x0001e00f
 193
 194#define DR7_BP_EN_MASK  0x000000ff
 195#define DR7_GE          (1 << 9)
 196#define DR7_GD          (1 << 13)
 197#define DR7_FIXED_1     0x00000400
 198#define DR7_VOLATILE    0xffff2bff
 199
 200#define PFERR_PRESENT_BIT 0
 201#define PFERR_WRITE_BIT 1
 202#define PFERR_USER_BIT 2
 203#define PFERR_RSVD_BIT 3
 204#define PFERR_FETCH_BIT 4
 205#define PFERR_PK_BIT 5
 206#define PFERR_GUEST_FINAL_BIT 32
 207#define PFERR_GUEST_PAGE_BIT 33
 208
 209#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
 210#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
 211#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
 212#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
 213#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
 214#define PFERR_PK_MASK (1U << PFERR_PK_BIT)
 215#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
 216#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
 217
 218#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK |        \
 219                                 PFERR_WRITE_MASK |             \
 220                                 PFERR_PRESENT_MASK)
 221
 222/*
 223 * The mask used to denote special SPTEs, which can be either MMIO SPTEs or
 224 * Access Tracking SPTEs. We use bit 62 instead of bit 63 to avoid conflicting
 225 * with the SVE bit in EPT PTEs.
 226 */
 227#define SPTE_SPECIAL_MASK (1ULL << 62)
 228
 229/* apic attention bits */
 230#define KVM_APIC_CHECK_VAPIC    0
 231/*
 232 * The following bit is set with PV-EOI, unset on EOI.
 233 * We detect PV-EOI changes by guest by comparing
 234 * this bit with PV-EOI in guest memory.
 235 * See the implementation in apic_update_pv_eoi.
 236 */
 237#define KVM_APIC_PV_EOI_PENDING 1
 238
 239struct kvm_kernel_irq_routing_entry;
 240
 241/*
 242 * We don't want allocation failures within the mmu code, so we preallocate
 243 * enough memory for a single page fault in a cache.
 244 */
 245struct kvm_mmu_memory_cache {
 246        int nobjs;
 247        void *objects[KVM_NR_MEM_OBJS];
 248};
 249
 250/*
 251 * the pages used as guest page table on soft mmu are tracked by
 252 * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
 253 * by indirect shadow page can not be more than 15 bits.
 254 *
 255 * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access,
 256 * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
 257 */
 258union kvm_mmu_page_role {
 259        u32 word;
 260        struct {
 261                unsigned level:4;
 262                unsigned gpte_is_8_bytes:1;
 263                unsigned quadrant:2;
 264                unsigned direct:1;
 265                unsigned access:3;
 266                unsigned invalid:1;
 267                unsigned nxe:1;
 268                unsigned cr0_wp:1;
 269                unsigned smep_andnot_wp:1;
 270                unsigned smap_andnot_wp:1;
 271                unsigned ad_disabled:1;
 272                unsigned guest_mode:1;
 273                unsigned :6;
 274
 275                /*
 276                 * This is left at the top of the word so that
 277                 * kvm_memslots_for_spte_role can extract it with a
 278                 * simple shift.  While there is room, give it a whole
 279                 * byte so it is also faster to load it from memory.
 280                 */
 281                unsigned smm:8;
 282        };
 283};
 284
 285union kvm_mmu_extended_role {
 286/*
 287 * This structure complements kvm_mmu_page_role caching everything needed for
 288 * MMU configuration. If nothing in both these structures changed, MMU
 289 * re-configuration can be skipped. @valid bit is set on first usage so we don't
 290 * treat all-zero structure as valid data.
 291 */
 292        u32 word;
 293        struct {
 294                unsigned int valid:1;
 295                unsigned int execonly:1;
 296                unsigned int cr0_pg:1;
 297                unsigned int cr4_pae:1;
 298                unsigned int cr4_pse:1;
 299                unsigned int cr4_pke:1;
 300                unsigned int cr4_smap:1;
 301                unsigned int cr4_smep:1;
 302                unsigned int cr4_la57:1;
 303                unsigned int maxphyaddr:6;
 304        };
 305};
 306
 307union kvm_mmu_role {
 308        u64 as_u64;
 309        struct {
 310                union kvm_mmu_page_role base;
 311                union kvm_mmu_extended_role ext;
 312        };
 313};
 314
 315struct kvm_rmap_head {
 316        unsigned long val;
 317};
 318
 319struct kvm_mmu_page {
 320        struct list_head link;
 321        struct hlist_node hash_link;
 322        bool unsync;
 323        bool mmio_cached;
 324
 325        /*
 326         * The following two entries are used to key the shadow page in the
 327         * hash table.
 328         */
 329        union kvm_mmu_page_role role;
 330        gfn_t gfn;
 331
 332        u64 *spt;
 333        /* hold the gfn of each spte inside spt */
 334        gfn_t *gfns;
 335        int root_count;          /* Currently serving as active root */
 336        unsigned int unsync_children;
 337        struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
 338        unsigned long mmu_valid_gen;
 339        DECLARE_BITMAP(unsync_child_bitmap, 512);
 340
 341#ifdef CONFIG_X86_32
 342        /*
 343         * Used out of the mmu-lock to avoid reading spte values while an
 344         * update is in progress; see the comments in __get_spte_lockless().
 345         */
 346        int clear_spte_count;
 347#endif
 348
 349        /* Number of writes since the last time traversal visited this page.  */
 350        atomic_t write_flooding_count;
 351};
 352
 353struct kvm_pio_request {
 354        unsigned long linear_rip;
 355        unsigned long count;
 356        int in;
 357        int port;
 358        int size;
 359};
 360
 361#define PT64_ROOT_MAX_LEVEL 5
 362
 363struct rsvd_bits_validate {
 364        u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL];
 365        u64 bad_mt_xwr;
 366};
 367
 368struct kvm_mmu_root_info {
 369        gpa_t cr3;
 370        hpa_t hpa;
 371};
 372
 373#define KVM_MMU_ROOT_INFO_INVALID \
 374        ((struct kvm_mmu_root_info) { .cr3 = INVALID_PAGE, .hpa = INVALID_PAGE })
 375
 376#define KVM_MMU_NUM_PREV_ROOTS 3
 377
 378/*
 379 * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
 380 * and 2-level 32-bit).  The kvm_mmu structure abstracts the details of the
 381 * current mmu mode.
 382 */
 383struct kvm_mmu {
 384        void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
 385        unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
 386        u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
 387        int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
 388                          bool prefault);
 389        void (*inject_page_fault)(struct kvm_vcpu *vcpu,
 390                                  struct x86_exception *fault);
 391        gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
 392                            struct x86_exception *exception);
 393        gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
 394                               struct x86_exception *exception);
 395        int (*sync_page)(struct kvm_vcpu *vcpu,
 396                         struct kvm_mmu_page *sp);
 397        void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
 398        void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 399                           u64 *spte, const void *pte);
 400        hpa_t root_hpa;
 401        gpa_t root_cr3;
 402        union kvm_mmu_role mmu_role;
 403        u8 root_level;
 404        u8 shadow_root_level;
 405        u8 ept_ad;
 406        bool direct_map;
 407        struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
 408
 409        /*
 410         * Bitmap; bit set = permission fault
 411         * Byte index: page fault error code [4:1]
 412         * Bit index: pte permissions in ACC_* format
 413         */
 414        u8 permissions[16];
 415
 416        /*
 417        * The pkru_mask indicates if protection key checks are needed.  It
 418        * consists of 16 domains indexed by page fault error code bits [4:1],
 419        * with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
 420        * Each domain has 2 bits which are ANDed with AD and WD from PKRU.
 421        */
 422        u32 pkru_mask;
 423
 424        u64 *pae_root;
 425        u64 *lm_root;
 426
 427        /*
 428         * check zero bits on shadow page table entries, these
 429         * bits include not only hardware reserved bits but also
 430         * the bits spte never used.
 431         */
 432        struct rsvd_bits_validate shadow_zero_check;
 433
 434        struct rsvd_bits_validate guest_rsvd_check;
 435
 436        /* Can have large pages at levels 2..last_nonleaf_level-1. */
 437        u8 last_nonleaf_level;
 438
 439        bool nx;
 440
 441        u64 pdptrs[4]; /* pae */
 442};
 443
 444struct kvm_tlb_range {
 445        u64 start_gfn;
 446        u64 pages;
 447};
 448
 449enum pmc_type {
 450        KVM_PMC_GP = 0,
 451        KVM_PMC_FIXED,
 452};
 453
 454struct kvm_pmc {
 455        enum pmc_type type;
 456        u8 idx;
 457        u64 counter;
 458        u64 eventsel;
 459        struct perf_event *perf_event;
 460        struct kvm_vcpu *vcpu;
 461};
 462
 463struct kvm_pmu {
 464        unsigned nr_arch_gp_counters;
 465        unsigned nr_arch_fixed_counters;
 466        unsigned available_event_types;
 467        u64 fixed_ctr_ctrl;
 468        u64 global_ctrl;
 469        u64 global_status;
 470        u64 global_ovf_ctrl;
 471        u64 counter_bitmask[2];
 472        u64 global_ctrl_mask;
 473        u64 global_ovf_ctrl_mask;
 474        u64 reserved_bits;
 475        u8 version;
 476        struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
 477        struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
 478        struct irq_work irq_work;
 479        u64 reprogram_pmi;
 480};
 481
 482struct kvm_pmu_ops;
 483
 484enum {
 485        KVM_DEBUGREG_BP_ENABLED = 1,
 486        KVM_DEBUGREG_WONT_EXIT = 2,
 487        KVM_DEBUGREG_RELOAD = 4,
 488};
 489
 490struct kvm_mtrr_range {
 491        u64 base;
 492        u64 mask;
 493        struct list_head node;
 494};
 495
 496struct kvm_mtrr {
 497        struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
 498        mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
 499        u64 deftype;
 500
 501        struct list_head head;
 502};
 503
 504/* Hyper-V SynIC timer */
 505struct kvm_vcpu_hv_stimer {
 506        struct hrtimer timer;
 507        int index;
 508        union hv_stimer_config config;
 509        u64 count;
 510        u64 exp_time;
 511        struct hv_message msg;
 512        bool msg_pending;
 513};
 514
 515/* Hyper-V synthetic interrupt controller (SynIC)*/
 516struct kvm_vcpu_hv_synic {
 517        u64 version;
 518        u64 control;
 519        u64 msg_page;
 520        u64 evt_page;
 521        atomic64_t sint[HV_SYNIC_SINT_COUNT];
 522        atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
 523        DECLARE_BITMAP(auto_eoi_bitmap, 256);
 524        DECLARE_BITMAP(vec_bitmap, 256);
 525        bool active;
 526        bool dont_zero_synic_pages;
 527};
 528
 529/* Hyper-V per vcpu emulation context */
 530struct kvm_vcpu_hv {
 531        u32 vp_index;
 532        u64 hv_vapic;
 533        s64 runtime_offset;
 534        struct kvm_vcpu_hv_synic synic;
 535        struct kvm_hyperv_exit exit;
 536        struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
 537        DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
 538        cpumask_t tlb_flush;
 539};
 540
 541struct kvm_vcpu_arch {
 542        /*
 543         * rip and regs accesses must go through
 544         * kvm_{register,rip}_{read,write} functions.
 545         */
 546        unsigned long regs[NR_VCPU_REGS];
 547        u32 regs_avail;
 548        u32 regs_dirty;
 549
 550        unsigned long cr0;
 551        unsigned long cr0_guest_owned_bits;
 552        unsigned long cr2;
 553        unsigned long cr3;
 554        unsigned long cr4;
 555        unsigned long cr4_guest_owned_bits;
 556        unsigned long cr8;
 557        u32 pkru;
 558        u32 hflags;
 559        u64 efer;
 560        u64 apic_base;
 561        struct kvm_lapic *apic;    /* kernel irqchip context */
 562        bool apicv_active;
 563        bool load_eoi_exitmap_pending;
 564        DECLARE_BITMAP(ioapic_handled_vectors, 256);
 565        unsigned long apic_attention;
 566        int32_t apic_arb_prio;
 567        int mp_state;
 568        u64 ia32_misc_enable_msr;
 569        u64 smbase;
 570        u64 smi_count;
 571        bool tpr_access_reporting;
 572        u64 ia32_xss;
 573        u64 microcode_version;
 574        u64 arch_capabilities;
 575
 576        /*
 577         * Paging state of the vcpu
 578         *
 579         * If the vcpu runs in guest mode with two level paging this still saves
 580         * the paging mode of the l1 guest. This context is always used to
 581         * handle faults.
 582         */
 583        struct kvm_mmu *mmu;
 584
 585        /* Non-nested MMU for L1 */
 586        struct kvm_mmu root_mmu;
 587
 588        /* L1 MMU when running nested */
 589        struct kvm_mmu guest_mmu;
 590
 591        /*
 592         * Paging state of an L2 guest (used for nested npt)
 593         *
 594         * This context will save all necessary information to walk page tables
 595         * of the an L2 guest. This context is only initialized for page table
 596         * walking and not for faulting since we never handle l2 page faults on
 597         * the host.
 598         */
 599        struct kvm_mmu nested_mmu;
 600
 601        /*
 602         * Pointer to the mmu context currently used for
 603         * gva_to_gpa translations.
 604         */
 605        struct kvm_mmu *walk_mmu;
 606
 607        struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
 608        struct kvm_mmu_memory_cache mmu_page_cache;
 609        struct kvm_mmu_memory_cache mmu_page_header_cache;
 610
 611        /*
 612         * QEMU userspace and the guest each have their own FPU state.
 613         * In vcpu_run, we switch between the user and guest FPU contexts.
 614         * While running a VCPU, the VCPU thread will have the guest FPU
 615         * context.
 616         *
 617         * Note that while the PKRU state lives inside the fpu registers,
 618         * it is switched out separately at VMENTER and VMEXIT time. The
 619         * "guest_fpu" state here contains the guest FPU context, with the
 620         * host PRKU bits.
 621         */
 622        struct fpu *user_fpu;
 623        struct fpu *guest_fpu;
 624
 625        u64 xcr0;
 626        u64 guest_supported_xcr0;
 627        u32 guest_xstate_size;
 628
 629        struct kvm_pio_request pio;
 630        void *pio_data;
 631
 632        u8 event_exit_inst_len;
 633
 634        struct kvm_queued_exception {
 635                bool pending;
 636                bool injected;
 637                bool has_error_code;
 638                u8 nr;
 639                u32 error_code;
 640                unsigned long payload;
 641                bool has_payload;
 642                u8 nested_apf;
 643        } exception;
 644
 645        struct kvm_queued_interrupt {
 646                bool injected;
 647                bool soft;
 648                u8 nr;
 649        } interrupt;
 650
 651        int halt_request; /* real mode on Intel only */
 652
 653        int cpuid_nent;
 654        struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
 655
 656        int maxphyaddr;
 657
 658        /* emulate context */
 659
 660        struct x86_emulate_ctxt emulate_ctxt;
 661        bool emulate_regs_need_sync_to_vcpu;
 662        bool emulate_regs_need_sync_from_vcpu;
 663        int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
 664
 665        gpa_t time;
 666        struct pvclock_vcpu_time_info hv_clock;
 667        unsigned int hw_tsc_khz;
 668        struct gfn_to_hva_cache pv_time;
 669        bool pv_time_enabled;
 670        /* set guest stopped flag in pvclock flags field */
 671        bool pvclock_set_guest_stopped_request;
 672
 673        struct {
 674                u64 msr_val;
 675                u64 last_steal;
 676                struct gfn_to_hva_cache stime;
 677                struct kvm_steal_time steal;
 678        } st;
 679
 680        u64 tsc_offset;
 681        u64 last_guest_tsc;
 682        u64 last_host_tsc;
 683        u64 tsc_offset_adjustment;
 684        u64 this_tsc_nsec;
 685        u64 this_tsc_write;
 686        u64 this_tsc_generation;
 687        bool tsc_catchup;
 688        bool tsc_always_catchup;
 689        s8 virtual_tsc_shift;
 690        u32 virtual_tsc_mult;
 691        u32 virtual_tsc_khz;
 692        s64 ia32_tsc_adjust_msr;
 693        u64 msr_ia32_power_ctl;
 694        u64 tsc_scaling_ratio;
 695
 696        atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
 697        unsigned nmi_pending; /* NMI queued after currently running handler */
 698        bool nmi_injected;    /* Trying to inject an NMI this entry */
 699        bool smi_pending;    /* SMI queued after currently running handler */
 700
 701        struct kvm_mtrr mtrr_state;
 702        u64 pat;
 703
 704        unsigned switch_db_regs;
 705        unsigned long db[KVM_NR_DB_REGS];
 706        unsigned long dr6;
 707        unsigned long dr7;
 708        unsigned long eff_db[KVM_NR_DB_REGS];
 709        unsigned long guest_debug_dr7;
 710        u64 msr_platform_info;
 711        u64 msr_misc_features_enables;
 712
 713        u64 mcg_cap;
 714        u64 mcg_status;
 715        u64 mcg_ctl;
 716        u64 mcg_ext_ctl;
 717        u64 *mce_banks;
 718
 719        /* Cache MMIO info */
 720        u64 mmio_gva;
 721        unsigned access;
 722        gfn_t mmio_gfn;
 723        u64 mmio_gen;
 724
 725        struct kvm_pmu pmu;
 726
 727        /* used for guest single stepping over the given code position */
 728        unsigned long singlestep_rip;
 729
 730        struct kvm_vcpu_hv hyperv;
 731
 732        cpumask_var_t wbinvd_dirty_mask;
 733
 734        unsigned long last_retry_eip;
 735        unsigned long last_retry_addr;
 736
 737        struct {
 738                bool halted;
 739                gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
 740                struct gfn_to_hva_cache data;
 741                u64 msr_val;
 742                u32 id;
 743                bool send_user_only;
 744                u32 host_apf_reason;
 745                unsigned long nested_apf_token;
 746                bool delivery_as_pf_vmexit;
 747        } apf;
 748
 749        /* OSVW MSRs (AMD only) */
 750        struct {
 751                u64 length;
 752                u64 status;
 753        } osvw;
 754
 755        struct {
 756                u64 msr_val;
 757                struct gfn_to_hva_cache data;
 758        } pv_eoi;
 759
 760        u64 msr_kvm_poll_control;
 761
 762        /*
 763         * Indicate whether the access faults on its page table in guest
 764         * which is set when fix page fault and used to detect unhandeable
 765         * instruction.
 766         */
 767        bool write_fault_to_shadow_pgtable;
 768
 769        /* set at EPT violation at this point */
 770        unsigned long exit_qualification;
 771
 772        /* pv related host specific info */
 773        struct {
 774                bool pv_unhalted;
 775        } pv;
 776
 777        int pending_ioapic_eoi;
 778        int pending_external_vector;
 779
 780        /* GPA available */
 781        bool gpa_available;
 782        gpa_t gpa_val;
 783
 784        /* be preempted when it's in kernel-mode(cpl=0) */
 785        bool preempted_in_kernel;
 786
 787        /* Flush the L1 Data cache for L1TF mitigation on VMENTER */
 788        bool l1tf_flush_l1d;
 789
 790        /* AMD MSRC001_0015 Hardware Configuration */
 791        u64 msr_hwcr;
 792};
 793
 794struct kvm_lpage_info {
 795        int disallow_lpage;
 796};
 797
 798struct kvm_arch_memory_slot {
 799        struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
 800        struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
 801        unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
 802};
 803
 804/*
 805 * We use as the mode the number of bits allocated in the LDR for the
 806 * logical processor ID.  It happens that these are all powers of two.
 807 * This makes it is very easy to detect cases where the APICs are
 808 * configured for multiple modes; in that case, we cannot use the map and
 809 * hence cannot use kvm_irq_delivery_to_apic_fast either.
 810 */
 811#define KVM_APIC_MODE_XAPIC_CLUSTER          4
 812#define KVM_APIC_MODE_XAPIC_FLAT             8
 813#define KVM_APIC_MODE_X2APIC                16
 814
 815struct kvm_apic_map {
 816        struct rcu_head rcu;
 817        u8 mode;
 818        u32 max_apic_id;
 819        union {
 820                struct kvm_lapic *xapic_flat_map[8];
 821                struct kvm_lapic *xapic_cluster_map[16][4];
 822        };
 823        struct kvm_lapic *phys_map[];
 824};
 825
 826/* Hyper-V emulation context */
 827struct kvm_hv {
 828        struct mutex hv_lock;
 829        u64 hv_guest_os_id;
 830        u64 hv_hypercall;
 831        u64 hv_tsc_page;
 832
 833        /* Hyper-v based guest crash (NT kernel bugcheck) parameters */
 834        u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
 835        u64 hv_crash_ctl;
 836
 837        HV_REFERENCE_TSC_PAGE tsc_ref;
 838
 839        struct idr conn_to_evt;
 840
 841        u64 hv_reenlightenment_control;
 842        u64 hv_tsc_emulation_control;
 843        u64 hv_tsc_emulation_status;
 844
 845        /* How many vCPUs have VP index != vCPU index */
 846        atomic_t num_mismatched_vp_indexes;
 847};
 848
 849enum kvm_irqchip_mode {
 850        KVM_IRQCHIP_NONE,
 851        KVM_IRQCHIP_KERNEL,       /* created with KVM_CREATE_IRQCHIP */
 852        KVM_IRQCHIP_SPLIT,        /* created with KVM_CAP_SPLIT_IRQCHIP */
 853};
 854
 855struct kvm_arch {
 856        unsigned long n_used_mmu_pages;
 857        unsigned long n_requested_mmu_pages;
 858        unsigned long n_max_mmu_pages;
 859        unsigned int indirect_shadow_pages;
 860        unsigned long mmu_valid_gen;
 861        struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
 862        /*
 863         * Hash table of struct kvm_mmu_page.
 864         */
 865        struct list_head active_mmu_pages;
 866        struct kvm_page_track_notifier_node mmu_sp_tracker;
 867        struct kvm_page_track_notifier_head track_notifier_head;
 868
 869        struct list_head assigned_dev_head;
 870        struct iommu_domain *iommu_domain;
 871        bool iommu_noncoherent;
 872#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
 873        atomic_t noncoherent_dma_count;
 874#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
 875        atomic_t assigned_device_count;
 876        struct kvm_pic *vpic;
 877        struct kvm_ioapic *vioapic;
 878        struct kvm_pit *vpit;
 879        atomic_t vapics_in_nmi_mode;
 880        struct mutex apic_map_lock;
 881        struct kvm_apic_map *apic_map;
 882
 883        bool apic_access_page_done;
 884
 885        gpa_t wall_clock;
 886
 887        bool mwait_in_guest;
 888        bool hlt_in_guest;
 889        bool pause_in_guest;
 890        bool cstate_in_guest;
 891
 892        unsigned long irq_sources_bitmap;
 893        s64 kvmclock_offset;
 894        raw_spinlock_t tsc_write_lock;
 895        u64 last_tsc_nsec;
 896        u64 last_tsc_write;
 897        u32 last_tsc_khz;
 898        u64 cur_tsc_nsec;
 899        u64 cur_tsc_write;
 900        u64 cur_tsc_offset;
 901        u64 cur_tsc_generation;
 902        int nr_vcpus_matched_tsc;
 903
 904        spinlock_t pvclock_gtod_sync_lock;
 905        bool use_master_clock;
 906        u64 master_kernel_ns;
 907        u64 master_cycle_now;
 908        struct delayed_work kvmclock_update_work;
 909        struct delayed_work kvmclock_sync_work;
 910
 911        struct kvm_xen_hvm_config xen_hvm_config;
 912
 913        /* reads protected by irq_srcu, writes by irq_lock */
 914        struct hlist_head mask_notifier_list;
 915
 916        struct kvm_hv hyperv;
 917
 918        #ifdef CONFIG_KVM_MMU_AUDIT
 919        int audit_point;
 920        #endif
 921
 922        bool backwards_tsc_observed;
 923        bool boot_vcpu_runs_old_kvmclock;
 924        u32 bsp_vcpu_id;
 925
 926        u64 disabled_quirks;
 927
 928        enum kvm_irqchip_mode irqchip_mode;
 929        u8 nr_reserved_ioapic_pins;
 930
 931        bool disabled_lapic_found;
 932
 933        bool x2apic_format;
 934        bool x2apic_broadcast_quirk_disabled;
 935
 936        bool guest_can_read_msr_platform_info;
 937        bool exception_payload_enabled;
 938
 939        struct kvm_pmu_event_filter *pmu_event_filter;
 940};
 941
 942struct kvm_vm_stat {
 943        ulong mmu_shadow_zapped;
 944        ulong mmu_pte_write;
 945        ulong mmu_pte_updated;
 946        ulong mmu_pde_zapped;
 947        ulong mmu_flooded;
 948        ulong mmu_recycled;
 949        ulong mmu_cache_miss;
 950        ulong mmu_unsync;
 951        ulong remote_tlb_flush;
 952        ulong lpages;
 953        ulong max_mmu_page_hash_collisions;
 954};
 955
 956struct kvm_vcpu_stat {
 957        u64 pf_fixed;
 958        u64 pf_guest;
 959        u64 tlb_flush;
 960        u64 invlpg;
 961
 962        u64 exits;
 963        u64 io_exits;
 964        u64 mmio_exits;
 965        u64 signal_exits;
 966        u64 irq_window_exits;
 967        u64 nmi_window_exits;
 968        u64 l1d_flush;
 969        u64 halt_exits;
 970        u64 halt_successful_poll;
 971        u64 halt_attempted_poll;
 972        u64 halt_poll_invalid;
 973        u64 halt_wakeup;
 974        u64 request_irq_exits;
 975        u64 irq_exits;
 976        u64 host_state_reload;
 977        u64 fpu_reload;
 978        u64 insn_emulation;
 979        u64 insn_emulation_fail;
 980        u64 hypercalls;
 981        u64 irq_injections;
 982        u64 nmi_injections;
 983        u64 req_event;
 984};
 985
 986struct x86_instruction_info;
 987
 988struct msr_data {
 989        bool host_initiated;
 990        u32 index;
 991        u64 data;
 992};
 993
 994struct kvm_lapic_irq {
 995        u32 vector;
 996        u16 delivery_mode;
 997        u16 dest_mode;
 998        bool level;
 999        u16 trig_mode;
1000        u32 shorthand;
1001        u32 dest_id;
1002        bool msi_redir_hint;
1003};
1004
1005struct kvm_x86_ops {
1006        int (*cpu_has_kvm_support)(void);          /* __init */
1007        int (*disabled_by_bios)(void);             /* __init */
1008        int (*hardware_enable)(void);
1009        void (*hardware_disable)(void);
1010        int (*check_processor_compatibility)(void);/* __init */
1011        int (*hardware_setup)(void);               /* __init */
1012        void (*hardware_unsetup)(void);            /* __exit */
1013        bool (*cpu_has_accelerated_tpr)(void);
1014        bool (*has_emulated_msr)(int index);
1015        void (*cpuid_update)(struct kvm_vcpu *vcpu);
1016
1017        struct kvm *(*vm_alloc)(void);
1018        void (*vm_free)(struct kvm *);
1019        int (*vm_init)(struct kvm *kvm);
1020        void (*vm_destroy)(struct kvm *kvm);
1021
1022        /* Create, but do not attach this VCPU */
1023        struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
1024        void (*vcpu_free)(struct kvm_vcpu *vcpu);
1025        void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
1026
1027        void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
1028        void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
1029        void (*vcpu_put)(struct kvm_vcpu *vcpu);
1030
1031        void (*update_bp_intercept)(struct kvm_vcpu *vcpu);
1032        int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1033        int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1034        u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
1035        void (*get_segment)(struct kvm_vcpu *vcpu,
1036                            struct kvm_segment *var, int seg);
1037        int (*get_cpl)(struct kvm_vcpu *vcpu);
1038        void (*set_segment)(struct kvm_vcpu *vcpu,
1039                            struct kvm_segment *var, int seg);
1040        void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
1041        void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
1042        void (*decache_cr3)(struct kvm_vcpu *vcpu);
1043        void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
1044        void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
1045        void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
1046        int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
1047        void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
1048        void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1049        void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1050        void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1051        void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1052        u64 (*get_dr6)(struct kvm_vcpu *vcpu);
1053        void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
1054        void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
1055        void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
1056        void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
1057        unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
1058        void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
1059
1060        void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
1061        int  (*tlb_remote_flush)(struct kvm *kvm);
1062        int  (*tlb_remote_flush_with_range)(struct kvm *kvm,
1063                        struct kvm_tlb_range *range);
1064
1065        /*
1066         * Flush any TLB entries associated with the given GVA.
1067         * Does not need to flush GPA->HPA mappings.
1068         * Can potentially get non-canonical addresses through INVLPGs, which
1069         * the implementation may choose to ignore if appropriate.
1070         */
1071        void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
1072
1073        void (*run)(struct kvm_vcpu *vcpu);
1074        int (*handle_exit)(struct kvm_vcpu *vcpu);
1075        void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
1076        void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
1077        u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
1078        void (*patch_hypercall)(struct kvm_vcpu *vcpu,
1079                                unsigned char *hypercall_addr);
1080        void (*set_irq)(struct kvm_vcpu *vcpu);
1081        void (*set_nmi)(struct kvm_vcpu *vcpu);
1082        void (*queue_exception)(struct kvm_vcpu *vcpu);
1083        void (*cancel_injection)(struct kvm_vcpu *vcpu);
1084        int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
1085        int (*nmi_allowed)(struct kvm_vcpu *vcpu);
1086        bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
1087        void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
1088        void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
1089        void (*enable_irq_window)(struct kvm_vcpu *vcpu);
1090        void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
1091        bool (*get_enable_apicv)(struct kvm_vcpu *vcpu);
1092        void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
1093        void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
1094        void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
1095        bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
1096        void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
1097        void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
1098        void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
1099        void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
1100        int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
1101        int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
1102        int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
1103        int (*get_tdp_level)(struct kvm_vcpu *vcpu);
1104        u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1105        int (*get_lpage_level)(void);
1106        bool (*rdtscp_supported)(void);
1107        bool (*invpcid_supported)(void);
1108
1109        void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
1110
1111        void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
1112
1113        bool (*has_wbinvd_exit)(void);
1114
1115        u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
1116        /* Returns actual tsc_offset set in active VMCS */
1117        u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
1118
1119        void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
1120
1121        int (*check_intercept)(struct kvm_vcpu *vcpu,
1122                               struct x86_instruction_info *info,
1123                               enum x86_intercept_stage stage);
1124        void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
1125        bool (*mpx_supported)(void);
1126        bool (*xsaves_supported)(void);
1127        bool (*umip_emulated)(void);
1128        bool (*pt_supported)(void);
1129
1130        int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
1131        void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
1132
1133        void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
1134
1135        /*
1136         * Arch-specific dirty logging hooks. These hooks are only supposed to
1137         * be valid if the specific arch has hardware-accelerated dirty logging
1138         * mechanism. Currently only for PML on VMX.
1139         *
1140         *  - slot_enable_log_dirty:
1141         *      called when enabling log dirty mode for the slot.
1142         *  - slot_disable_log_dirty:
1143         *      called when disabling log dirty mode for the slot.
1144         *      also called when slot is created with log dirty disabled.
1145         *  - flush_log_dirty:
1146         *      called before reporting dirty_bitmap to userspace.
1147         *  - enable_log_dirty_pt_masked:
1148         *      called when reenabling log dirty for the GFNs in the mask after
1149         *      corresponding bits are cleared in slot->dirty_bitmap.
1150         */
1151        void (*slot_enable_log_dirty)(struct kvm *kvm,
1152                                      struct kvm_memory_slot *slot);
1153        void (*slot_disable_log_dirty)(struct kvm *kvm,
1154                                       struct kvm_memory_slot *slot);
1155        void (*flush_log_dirty)(struct kvm *kvm);
1156        void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
1157                                           struct kvm_memory_slot *slot,
1158                                           gfn_t offset, unsigned long mask);
1159        int (*write_log_dirty)(struct kvm_vcpu *vcpu);
1160
1161        /* pmu operations of sub-arch */
1162        const struct kvm_pmu_ops *pmu_ops;
1163
1164        /*
1165         * Architecture specific hooks for vCPU blocking due to
1166         * HLT instruction.
1167         * Returns for .pre_block():
1168         *    - 0 means continue to block the vCPU.
1169         *    - 1 means we cannot block the vCPU since some event
1170         *        happens during this period, such as, 'ON' bit in
1171         *        posted-interrupts descriptor is set.
1172         */
1173        int (*pre_block)(struct kvm_vcpu *vcpu);
1174        void (*post_block)(struct kvm_vcpu *vcpu);
1175
1176        void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
1177        void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
1178
1179        int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
1180                              uint32_t guest_irq, bool set);
1181        void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
1182        bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
1183
1184        int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
1185                            bool *expired);
1186        void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
1187
1188        void (*setup_mce)(struct kvm_vcpu *vcpu);
1189
1190        int (*get_nested_state)(struct kvm_vcpu *vcpu,
1191                                struct kvm_nested_state __user *user_kvm_nested_state,
1192                                unsigned user_data_size);
1193        int (*set_nested_state)(struct kvm_vcpu *vcpu,
1194                                struct kvm_nested_state __user *user_kvm_nested_state,
1195                                struct kvm_nested_state *kvm_state);
1196        void (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
1197
1198        int (*smi_allowed)(struct kvm_vcpu *vcpu);
1199        int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
1200        int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
1201        int (*enable_smi_window)(struct kvm_vcpu *vcpu);
1202
1203        int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
1204        int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1205        int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1206
1207        int (*get_msr_feature)(struct kvm_msr_entry *entry);
1208
1209        int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
1210                                   uint16_t *vmcs_version);
1211        uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
1212
1213        bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
1214};
1215
1216struct kvm_arch_async_pf {
1217        u32 token;
1218        gfn_t gfn;
1219        unsigned long cr3;
1220        bool direct_map;
1221};
1222
1223extern struct kvm_x86_ops *kvm_x86_ops;
1224extern struct kmem_cache *x86_fpu_cache;
1225
1226#define __KVM_HAVE_ARCH_VM_ALLOC
1227static inline struct kvm *kvm_arch_alloc_vm(void)
1228{
1229        return kvm_x86_ops->vm_alloc();
1230}
1231
1232static inline void kvm_arch_free_vm(struct kvm *kvm)
1233{
1234        return kvm_x86_ops->vm_free(kvm);
1235}
1236
1237#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
1238static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
1239{
1240        if (kvm_x86_ops->tlb_remote_flush &&
1241            !kvm_x86_ops->tlb_remote_flush(kvm))
1242                return 0;
1243        else
1244                return -ENOTSUPP;
1245}
1246
1247int kvm_mmu_module_init(void);
1248void kvm_mmu_module_exit(void);
1249
1250void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
1251int kvm_mmu_create(struct kvm_vcpu *vcpu);
1252void kvm_mmu_init_vm(struct kvm *kvm);
1253void kvm_mmu_uninit_vm(struct kvm *kvm);
1254void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
1255                u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
1256                u64 acc_track_mask, u64 me_mask);
1257
1258void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
1259void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
1260                                      struct kvm_memory_slot *memslot);
1261void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
1262                                   const struct kvm_memory_slot *memslot);
1263void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
1264                                   struct kvm_memory_slot *memslot);
1265void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
1266                                        struct kvm_memory_slot *memslot);
1267void kvm_mmu_slot_set_dirty(struct kvm *kvm,
1268                            struct kvm_memory_slot *memslot);
1269void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1270                                   struct kvm_memory_slot *slot,
1271                                   gfn_t gfn_offset, unsigned long mask);
1272void kvm_mmu_zap_all(struct kvm *kvm);
1273void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
1274unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
1275void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
1276
1277int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
1278bool pdptrs_changed(struct kvm_vcpu *vcpu);
1279
1280int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1281                          const void *val, int bytes);
1282
1283struct kvm_irq_mask_notifier {
1284        void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
1285        int irq;
1286        struct hlist_node link;
1287};
1288
1289void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
1290                                    struct kvm_irq_mask_notifier *kimn);
1291void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
1292                                      struct kvm_irq_mask_notifier *kimn);
1293void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
1294                             bool mask);
1295
1296extern bool tdp_enabled;
1297
1298u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
1299
1300/* control of guest tsc rate supported? */
1301extern bool kvm_has_tsc_control;
1302/* maximum supported tsc_khz for guests */
1303extern u32  kvm_max_guest_tsc_khz;
1304/* number of bits of the fractional part of the TSC scaling ratio */
1305extern u8   kvm_tsc_scaling_ratio_frac_bits;
1306/* maximum allowed value of TSC scaling ratio */
1307extern u64  kvm_max_tsc_scaling_ratio;
1308/* 1ull << kvm_tsc_scaling_ratio_frac_bits */
1309extern u64  kvm_default_tsc_scaling_ratio;
1310
1311extern u64 kvm_mce_cap_supported;
1312
1313enum emulation_result {
1314        EMULATE_DONE,         /* no further processing */
1315        EMULATE_USER_EXIT,    /* kvm_run ready for userspace exit */
1316        EMULATE_FAIL,         /* can't emulate this instruction */
1317};
1318
1319#define EMULTYPE_NO_DECODE          (1 << 0)
1320#define EMULTYPE_TRAP_UD            (1 << 1)
1321#define EMULTYPE_SKIP               (1 << 2)
1322#define EMULTYPE_ALLOW_RETRY        (1 << 3)
1323#define EMULTYPE_NO_UD_ON_FAIL      (1 << 4)
1324#define EMULTYPE_VMWARE             (1 << 5)
1325int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
1326int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
1327                                        void *insn, int insn_len);
1328
1329void kvm_enable_efer_bits(u64);
1330bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
1331int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
1332int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
1333
1334struct x86_emulate_ctxt;
1335
1336int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
1337int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
1338int kvm_emulate_halt(struct kvm_vcpu *vcpu);
1339int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1340int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
1341
1342void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
1343int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
1344void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
1345
1346int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
1347                    int reason, bool has_error_code, u32 error_code);
1348
1349int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
1350int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
1351int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
1352int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
1353int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
1354int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
1355unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
1356void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
1357void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
1358int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
1359
1360int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1361int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1362
1363unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
1364void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
1365bool kvm_rdpmc(struct kvm_vcpu *vcpu);
1366
1367void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1368void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1369void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1370void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1371void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
1372int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
1373                            gfn_t gfn, void *data, int offset, int len,
1374                            u32 access);
1375bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
1376bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
1377
1378static inline int __kvm_irq_line_state(unsigned long *irq_state,
1379                                       int irq_source_id, int level)
1380{
1381        /* Logical OR for level trig interrupt */
1382        if (level)
1383                __set_bit(irq_source_id, irq_state);
1384        else
1385                __clear_bit(irq_source_id, irq_state);
1386
1387        return !!(*irq_state);
1388}
1389
1390#define KVM_MMU_ROOT_CURRENT            BIT(0)
1391#define KVM_MMU_ROOT_PREVIOUS(i)        BIT(1+i)
1392#define KVM_MMU_ROOTS_ALL               (~0UL)
1393
1394int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
1395void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
1396
1397void kvm_inject_nmi(struct kvm_vcpu *vcpu);
1398
1399int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
1400int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
1401void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
1402int kvm_mmu_load(struct kvm_vcpu *vcpu);
1403void kvm_mmu_unload(struct kvm_vcpu *vcpu);
1404void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
1405void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
1406                        ulong roots_to_free);
1407gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1408                           struct x86_exception *exception);
1409gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
1410                              struct x86_exception *exception);
1411gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
1412                               struct x86_exception *exception);
1413gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
1414                               struct x86_exception *exception);
1415gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
1416                                struct x86_exception *exception);
1417
1418void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
1419
1420int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
1421
1422int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
1423                       void *insn, int insn_len);
1424void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
1425void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
1426void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
1427
1428void kvm_enable_tdp(void);
1429void kvm_disable_tdp(void);
1430
1431static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1432                                  struct x86_exception *exception)
1433{
1434        return gpa;
1435}
1436
1437static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
1438{
1439        struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
1440
1441        return (struct kvm_mmu_page *)page_private(page);
1442}
1443
1444static inline u16 kvm_read_ldt(void)
1445{
1446        u16 ldt;
1447        asm("sldt %0" : "=g"(ldt));
1448        return ldt;
1449}
1450
1451static inline void kvm_load_ldt(u16 sel)
1452{
1453        asm("lldt %0" : : "rm"(sel));
1454}
1455
1456#ifdef CONFIG_X86_64
1457static inline unsigned long read_msr(unsigned long msr)
1458{
1459        u64 value;
1460
1461        rdmsrl(msr, value);
1462        return value;
1463}
1464#endif
1465
1466static inline u32 get_rdx_init_val(void)
1467{
1468        return 0x600; /* P6 family */
1469}
1470
1471static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
1472{
1473        kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
1474}
1475
1476#define TSS_IOPB_BASE_OFFSET 0x66
1477#define TSS_BASE_SIZE 0x68
1478#define TSS_IOPB_SIZE (65536 / 8)
1479#define TSS_REDIRECTION_SIZE (256 / 8)
1480#define RMODE_TSS_SIZE                                                  \
1481        (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
1482
1483enum {
1484        TASK_SWITCH_CALL = 0,
1485        TASK_SWITCH_IRET = 1,
1486        TASK_SWITCH_JMP = 2,
1487        TASK_SWITCH_GATE = 3,
1488};
1489
1490#define HF_GIF_MASK             (1 << 0)
1491#define HF_HIF_MASK             (1 << 1)
1492#define HF_VINTR_MASK           (1 << 2)
1493#define HF_NMI_MASK             (1 << 3)
1494#define HF_IRET_MASK            (1 << 4)
1495#define HF_GUEST_MASK           (1 << 5) /* VCPU is in guest-mode */
1496#define HF_SMM_MASK             (1 << 6)
1497#define HF_SMM_INSIDE_NMI_MASK  (1 << 7)
1498
1499#define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
1500#define KVM_ADDRESS_SPACE_NUM 2
1501
1502#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
1503#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
1504
1505asmlinkage void __noreturn kvm_spurious_fault(void);
1506
1507/*
1508 * Hardware virtualization extension instructions may fault if a
1509 * reboot turns off virtualization while processes are running.
1510 * Usually after catching the fault we just panic; during reboot
1511 * instead the instruction is ignored.
1512 */
1513#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn)              \
1514        "666: \n\t"                                                     \
1515        insn "\n\t"                                                     \
1516        "jmp    668f \n\t"                                              \
1517        "667: \n\t"                                                     \
1518        "call   kvm_spurious_fault \n\t"                                \
1519        "668: \n\t"                                                     \
1520        ".pushsection .fixup, \"ax\" \n\t"                              \
1521        "700: \n\t"                                                     \
1522        cleanup_insn "\n\t"                                             \
1523        "cmpb   $0, kvm_rebooting\n\t"                                  \
1524        "je     667b \n\t"                                              \
1525        "jmp    668b \n\t"                                              \
1526        ".popsection \n\t"                                              \
1527        _ASM_EXTABLE(666b, 700b)
1528
1529#define __kvm_handle_fault_on_reboot(insn)              \
1530        ____kvm_handle_fault_on_reboot(insn, "")
1531
1532#define KVM_ARCH_WANT_MMU_NOTIFIER
1533int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
1534int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
1535int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
1536int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
1537int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
1538int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
1539int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
1540int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
1541void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
1542void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
1543
1544int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
1545                    unsigned long ipi_bitmap_high, u32 min,
1546                    unsigned long icr, int op_64_bit);
1547
1548void kvm_define_shared_msr(unsigned index, u32 msr);
1549int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
1550
1551u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
1552u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
1553
1554unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
1555bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
1556
1557void kvm_make_mclock_inprogress_request(struct kvm *kvm);
1558void kvm_make_scan_ioapic_request(struct kvm *kvm);
1559
1560void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1561                                     struct kvm_async_pf *work);
1562void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1563                                 struct kvm_async_pf *work);
1564void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1565                               struct kvm_async_pf *work);
1566bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
1567extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1568
1569int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
1570int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1571void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
1572
1573int kvm_is_in_guest(void);
1574
1575int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1576int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1577bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
1578bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
1579
1580bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
1581                             struct kvm_vcpu **dest_vcpu);
1582
1583void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
1584                     struct kvm_lapic_irq *irq);
1585
1586static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
1587{
1588        if (kvm_x86_ops->vcpu_blocking)
1589                kvm_x86_ops->vcpu_blocking(vcpu);
1590}
1591
1592static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
1593{
1594        if (kvm_x86_ops->vcpu_unblocking)
1595                kvm_x86_ops->vcpu_unblocking(vcpu);
1596}
1597
1598static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
1599
1600static inline int kvm_cpu_get_apicid(int mps_cpu)
1601{
1602#ifdef CONFIG_X86_LOCAL_APIC
1603        return default_cpu_present_to_apicid(mps_cpu);
1604#else
1605        WARN_ON_ONCE(1);
1606        return BAD_APICID;
1607#endif
1608}
1609
1610#define put_smstate(type, buf, offset, val)                      \
1611        *(type *)((buf) + (offset) - 0x7e00) = val
1612
1613#define GET_SMSTATE(type, buf, offset)          \
1614        (*(type *)((buf) + (offset) - 0x7e00))
1615
1616#endif /* _ASM_X86_KVM_HOST_H */
1617