linux/arch/x86/kvm/svm/svm.h
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Kernel-based Virtual Machine driver for Linux
   4 *
   5 * AMD SVM support
   6 *
   7 * Copyright (C) 2006 Qumranet, Inc.
   8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
   9 *
  10 * Authors:
  11 *   Yaniv Kamay  <yaniv@qumranet.com>
  12 *   Avi Kivity   <avi@qumranet.com>
  13 */
  14
  15#ifndef __SVM_SVM_H
  16#define __SVM_SVM_H
  17
  18#include <linux/kvm_types.h>
  19#include <linux/kvm_host.h>
  20#include <linux/bits.h>
  21
  22#include <asm/svm.h>
  23#include <asm/sev-common.h>
  24
  25#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
  26
  27#define IOPM_SIZE PAGE_SIZE * 3
  28#define MSRPM_SIZE PAGE_SIZE * 2
  29
  30#define MAX_DIRECT_ACCESS_MSRS  20
  31#define MSRPM_OFFSETS   16
  32extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
  33extern bool npt_enabled;
  34extern bool intercept_smi;
  35
  36/*
  37 * Clean bits in VMCB.
  38 * VMCB_ALL_CLEAN_MASK might also need to
  39 * be updated if this enum is modified.
  40 */
  41enum {
  42        VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
  43                            pause filter count */
  44        VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
  45        VMCB_ASID,       /* ASID */
  46        VMCB_INTR,       /* int_ctl, int_vector */
  47        VMCB_NPT,        /* npt_en, nCR3, gPAT */
  48        VMCB_CR,         /* CR0, CR3, CR4, EFER */
  49        VMCB_DR,         /* DR6, DR7 */
  50        VMCB_DT,         /* GDT, IDT */
  51        VMCB_SEG,        /* CS, DS, SS, ES, CPL */
  52        VMCB_CR2,        /* CR2 only */
  53        VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
  54        VMCB_AVIC,       /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
  55                          * AVIC PHYSICAL_TABLE pointer,
  56                          * AVIC LOGICAL_TABLE pointer
  57                          */
  58        VMCB_SW = 31,    /* Reserved for hypervisor/software use */
  59};
  60
  61#define VMCB_ALL_CLEAN_MASK (                                   \
  62        (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) |       \
  63        (1U << VMCB_ASID) | (1U << VMCB_INTR) |                 \
  64        (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) |  \
  65        (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
  66        (1U << VMCB_LBR) | (1U << VMCB_AVIC) |                  \
  67        (1U << VMCB_SW))
  68
  69/* TPR and CR2 are always written before VMRUN */
  70#define VMCB_ALWAYS_DIRTY_MASK  ((1U << VMCB_INTR) | (1U << VMCB_CR2))
  71
  72struct kvm_sev_info {
  73        bool active;            /* SEV enabled guest */
  74        bool es_active;         /* SEV-ES enabled guest */
  75        unsigned int asid;      /* ASID used for this guest */
  76        unsigned int handle;    /* SEV firmware handle */
  77        int fd;                 /* SEV device fd */
  78        unsigned long pages_locked; /* Number of pages locked */
  79        struct list_head regions_list;  /* List of registered regions */
  80        u64 ap_jump_table;      /* SEV-ES AP Jump Table address */
  81        struct kvm *enc_context_owner; /* Owner of copied encryption context */
  82        unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */
  83        struct misc_cg *misc_cg; /* For misc cgroup accounting */
  84        atomic_t migration_in_progress;
  85};
  86
  87struct kvm_svm {
  88        struct kvm kvm;
  89
  90        /* Struct members for AVIC */
  91        u32 avic_vm_id;
  92        struct page *avic_logical_id_table_page;
  93        struct page *avic_physical_id_table_page;
  94        struct hlist_node hnode;
  95
  96        struct kvm_sev_info sev_info;
  97};
  98
  99struct kvm_vcpu;
 100
 101struct kvm_vmcb_info {
 102        struct vmcb *ptr;
 103        unsigned long pa;
 104        int cpu;
 105        uint64_t asid_generation;
 106};
 107
 108struct vmcb_save_area_cached {
 109        u64 efer;
 110        u64 cr4;
 111        u64 cr3;
 112        u64 cr0;
 113        u64 dr7;
 114        u64 dr6;
 115};
 116
 117struct vmcb_ctrl_area_cached {
 118        u32 intercepts[MAX_INTERCEPT];
 119        u16 pause_filter_thresh;
 120        u16 pause_filter_count;
 121        u64 iopm_base_pa;
 122        u64 msrpm_base_pa;
 123        u64 tsc_offset;
 124        u32 asid;
 125        u8 tlb_ctl;
 126        u32 int_ctl;
 127        u32 int_vector;
 128        u32 int_state;
 129        u32 exit_code;
 130        u32 exit_code_hi;
 131        u64 exit_info_1;
 132        u64 exit_info_2;
 133        u32 exit_int_info;
 134        u32 exit_int_info_err;
 135        u64 nested_ctl;
 136        u32 event_inj;
 137        u32 event_inj_err;
 138        u64 nested_cr3;
 139        u64 virt_ext;
 140};
 141
 142struct svm_nested_state {
 143        struct kvm_vmcb_info vmcb02;
 144        u64 hsave_msr;
 145        u64 vm_cr_msr;
 146        u64 vmcb12_gpa;
 147        u64 last_vmcb12_gpa;
 148
 149        /* These are the merged vectors */
 150        u32 *msrpm;
 151
 152        /* A VMRUN has started but has not yet been performed, so
 153         * we cannot inject a nested vmexit yet.  */
 154        bool nested_run_pending;
 155
 156        /* cache for control fields of the guest */
 157        struct vmcb_ctrl_area_cached ctl;
 158
 159        /*
 160         * Note: this struct is not kept up-to-date while L2 runs; it is only
 161         * valid within nested_svm_vmrun.
 162         */
 163        struct vmcb_save_area_cached save;
 164
 165        bool initialized;
 166};
 167
 168struct vcpu_sev_es_state {
 169        /* SEV-ES support */
 170        struct vmcb_save_area *vmsa;
 171        struct ghcb *ghcb;
 172        struct kvm_host_map ghcb_map;
 173        bool received_first_sipi;
 174
 175        /* SEV-ES scratch area support */
 176        void *ghcb_sa;
 177        u32 ghcb_sa_len;
 178        bool ghcb_sa_sync;
 179        bool ghcb_sa_free;
 180};
 181
 182struct vcpu_svm {
 183        struct kvm_vcpu vcpu;
 184        /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
 185        struct vmcb *vmcb;
 186        struct kvm_vmcb_info vmcb01;
 187        struct kvm_vmcb_info *current_vmcb;
 188        struct svm_cpu_data *svm_data;
 189        u32 asid;
 190        u32 sysenter_esp_hi;
 191        u32 sysenter_eip_hi;
 192        uint64_t tsc_aux;
 193
 194        u64 msr_decfg;
 195
 196        u64 next_rip;
 197
 198        u64 spec_ctrl;
 199
 200        u64 tsc_ratio_msr;
 201        /*
 202         * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
 203         * translated into the appropriate L2_CFG bits on the host to
 204         * perform speculative control.
 205         */
 206        u64 virt_spec_ctrl;
 207
 208        u32 *msrpm;
 209
 210        ulong nmi_iret_rip;
 211
 212        struct svm_nested_state nested;
 213
 214        bool nmi_singlestep;
 215        u64 nmi_singlestep_guest_rflags;
 216
 217        unsigned int3_injected;
 218        unsigned long int3_rip;
 219
 220        /* cached guest cpuid flags for faster access */
 221        bool nrips_enabled                : 1;
 222        bool tsc_scaling_enabled          : 1;
 223
 224        u32 ldr_reg;
 225        u32 dfr_reg;
 226        struct page *avic_backing_page;
 227        u64 *avic_physical_id_cache;
 228
 229        /*
 230         * Per-vcpu list of struct amd_svm_iommu_ir:
 231         * This is used mainly to store interrupt remapping information used
 232         * when update the vcpu affinity. This avoids the need to scan for
 233         * IRTE and try to match ga_tag in the IOMMU driver.
 234         */
 235        struct list_head ir_list;
 236        spinlock_t ir_list_lock;
 237
 238        /* Save desired MSR intercept (read: pass-through) state */
 239        struct {
 240                DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
 241                DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
 242        } shadow_msr_intercept;
 243
 244        struct vcpu_sev_es_state sev_es;
 245
 246        bool guest_state_loaded;
 247};
 248
 249struct svm_cpu_data {
 250        int cpu;
 251
 252        u64 asid_generation;
 253        u32 max_asid;
 254        u32 next_asid;
 255        u32 min_asid;
 256        struct kvm_ldttss_desc *tss_desc;
 257
 258        struct page *save_area;
 259        struct vmcb *current_vmcb;
 260
 261        /* index = sev_asid, value = vmcb pointer */
 262        struct vmcb **sev_vmcbs;
 263};
 264
 265DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
 266
 267void recalc_intercepts(struct vcpu_svm *svm);
 268
 269static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
 270{
 271        return container_of(kvm, struct kvm_svm, kvm);
 272}
 273
 274static __always_inline bool sev_guest(struct kvm *kvm)
 275{
 276#ifdef CONFIG_KVM_AMD_SEV
 277        struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
 278
 279        return sev->active;
 280#else
 281        return false;
 282#endif
 283}
 284
 285static __always_inline bool sev_es_guest(struct kvm *kvm)
 286{
 287#ifdef CONFIG_KVM_AMD_SEV
 288        struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
 289
 290        return sev->es_active && !WARN_ON_ONCE(!sev->active);
 291#else
 292        return false;
 293#endif
 294}
 295
 296static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
 297{
 298        vmcb->control.clean = 0;
 299}
 300
 301static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
 302{
 303        vmcb->control.clean = VMCB_ALL_CLEAN_MASK
 304                               & ~VMCB_ALWAYS_DIRTY_MASK;
 305}
 306
 307static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
 308{
 309        vmcb->control.clean &= ~(1 << bit);
 310}
 311
 312static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
 313{
 314        return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
 315}
 316
 317static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
 318{
 319        return container_of(vcpu, struct vcpu_svm, vcpu);
 320}
 321
 322/*
 323 * Only the PDPTRs are loaded on demand into the shadow MMU.  All other
 324 * fields are synchronized in handle_exit, because accessing the VMCB is cheap.
 325 *
 326 * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
 327 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
 328 * is changed.  svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
 329 */
 330#define SVM_REGS_LAZY_LOAD_SET  (1 << VCPU_EXREG_PDPTR)
 331
 332static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
 333{
 334        WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
 335        __set_bit(bit, (unsigned long *)&control->intercepts);
 336}
 337
 338static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
 339{
 340        WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
 341        __clear_bit(bit, (unsigned long *)&control->intercepts);
 342}
 343
 344static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
 345{
 346        WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
 347        return test_bit(bit, (unsigned long *)&control->intercepts);
 348}
 349
 350static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
 351{
 352        WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
 353        return test_bit(bit, (unsigned long *)&control->intercepts);
 354}
 355
 356static inline void set_dr_intercepts(struct vcpu_svm *svm)
 357{
 358        struct vmcb *vmcb = svm->vmcb01.ptr;
 359
 360        if (!sev_es_guest(svm->vcpu.kvm)) {
 361                vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
 362                vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
 363                vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
 364                vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
 365                vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
 366                vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
 367                vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
 368                vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
 369                vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
 370                vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
 371                vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
 372                vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
 373                vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
 374                vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
 375        }
 376
 377        vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
 378        vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
 379
 380        recalc_intercepts(svm);
 381}
 382
 383static inline void clr_dr_intercepts(struct vcpu_svm *svm)
 384{
 385        struct vmcb *vmcb = svm->vmcb01.ptr;
 386
 387        vmcb->control.intercepts[INTERCEPT_DR] = 0;
 388
 389        /* DR7 access must remain intercepted for an SEV-ES guest */
 390        if (sev_es_guest(svm->vcpu.kvm)) {
 391                vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
 392                vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
 393        }
 394
 395        recalc_intercepts(svm);
 396}
 397
 398static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
 399{
 400        struct vmcb *vmcb = svm->vmcb01.ptr;
 401
 402        WARN_ON_ONCE(bit >= 32);
 403        vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
 404
 405        recalc_intercepts(svm);
 406}
 407
 408static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
 409{
 410        struct vmcb *vmcb = svm->vmcb01.ptr;
 411
 412        WARN_ON_ONCE(bit >= 32);
 413        vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
 414
 415        recalc_intercepts(svm);
 416}
 417
 418static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
 419{
 420        struct vmcb *vmcb = svm->vmcb01.ptr;
 421
 422        vmcb_set_intercept(&vmcb->control, bit);
 423
 424        recalc_intercepts(svm);
 425}
 426
 427static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
 428{
 429        struct vmcb *vmcb = svm->vmcb01.ptr;
 430
 431        vmcb_clr_intercept(&vmcb->control, bit);
 432
 433        recalc_intercepts(svm);
 434}
 435
 436static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
 437{
 438        return vmcb_is_intercept(&svm->vmcb->control, bit);
 439}
 440
 441static inline bool vgif_enabled(struct vcpu_svm *svm)
 442{
 443        return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
 444}
 445
 446static inline void enable_gif(struct vcpu_svm *svm)
 447{
 448        if (vgif_enabled(svm))
 449                svm->vmcb->control.int_ctl |= V_GIF_MASK;
 450        else
 451                svm->vcpu.arch.hflags |= HF_GIF_MASK;
 452}
 453
 454static inline void disable_gif(struct vcpu_svm *svm)
 455{
 456        if (vgif_enabled(svm))
 457                svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
 458        else
 459                svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
 460}
 461
 462static inline bool gif_set(struct vcpu_svm *svm)
 463{
 464        if (vgif_enabled(svm))
 465                return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
 466        else
 467                return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
 468}
 469
 470/* svm.c */
 471#define MSR_INVALID                             0xffffffffU
 472
 473extern bool dump_invalid_vmcb;
 474
 475u32 svm_msrpm_offset(u32 msr);
 476u32 *svm_vcpu_alloc_msrpm(void);
 477void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
 478void svm_vcpu_free_msrpm(u32 *msrpm);
 479
 480int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
 481void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 482void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
 483void svm_flush_tlb(struct kvm_vcpu *vcpu);
 484void disable_nmi_singlestep(struct vcpu_svm *svm);
 485bool svm_smi_blocked(struct kvm_vcpu *vcpu);
 486bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
 487bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
 488void svm_set_gif(struct vcpu_svm *svm, bool value);
 489int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
 490void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
 491                          int read, int write);
 492void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
 493                                     int trig_mode, int vec);
 494
 495/* nested.c */
 496
 497#define NESTED_EXIT_HOST        0       /* Exit handled on host level */
 498#define NESTED_EXIT_DONE        1       /* Exit caused nested vmexit  */
 499#define NESTED_EXIT_CONTINUE    2       /* Further checks needed      */
 500
 501static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
 502{
 503        struct vcpu_svm *svm = to_svm(vcpu);
 504
 505        return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
 506}
 507
 508static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
 509{
 510        return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
 511}
 512
 513static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
 514{
 515        return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
 516}
 517
 518static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
 519{
 520        return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
 521}
 522
 523int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
 524                         u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
 525void svm_leave_nested(struct kvm_vcpu *vcpu);
 526void svm_free_nested(struct vcpu_svm *svm);
 527int svm_allocate_nested(struct vcpu_svm *svm);
 528int nested_svm_vmrun(struct kvm_vcpu *vcpu);
 529void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
 530                          struct vmcb_save_area *from_save);
 531void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
 532int nested_svm_vmexit(struct vcpu_svm *svm);
 533
 534static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
 535{
 536        svm->vmcb->control.exit_code   = exit_code;
 537        svm->vmcb->control.exit_info_1 = 0;
 538        svm->vmcb->control.exit_info_2 = 0;
 539        return nested_svm_vmexit(svm);
 540}
 541
 542int nested_svm_exit_handled(struct vcpu_svm *svm);
 543int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
 544int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
 545                               bool has_error_code, u32 error_code);
 546int nested_svm_exit_special(struct vcpu_svm *svm);
 547void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
 548void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier);
 549void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
 550                                       struct vmcb_control_area *control);
 551void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
 552                                    struct vmcb_save_area *save);
 553void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
 554void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
 555void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
 556
 557extern struct kvm_x86_nested_ops svm_nested_ops;
 558
 559/* avic.c */
 560
 561int avic_ga_log_notifier(u32 ga_tag);
 562void avic_vm_destroy(struct kvm *kvm);
 563int avic_vm_init(struct kvm *kvm);
 564void avic_init_vmcb(struct vcpu_svm *svm);
 565int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
 566int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
 567int avic_init_vcpu(struct vcpu_svm *svm);
 568void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 569void avic_vcpu_put(struct kvm_vcpu *vcpu);
 570void avic_post_state_restore(struct kvm_vcpu *vcpu);
 571void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
 572void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
 573bool svm_check_apicv_inhibit_reasons(ulong bit);
 574void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
 575void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
 576void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
 577bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
 578int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
 579                       uint32_t guest_irq, bool set);
 580void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
 581void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
 582void avic_ring_doorbell(struct kvm_vcpu *vcpu);
 583
 584/* sev.c */
 585
 586#define GHCB_VERSION_MAX        1ULL
 587#define GHCB_VERSION_MIN        1ULL
 588
 589
 590extern unsigned int max_sev_asid;
 591
 592void sev_vm_destroy(struct kvm *kvm);
 593int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
 594int svm_register_enc_region(struct kvm *kvm,
 595                            struct kvm_enc_region *range);
 596int svm_unregister_enc_region(struct kvm *kvm,
 597                              struct kvm_enc_region *range);
 598int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd);
 599int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd);
 600void pre_sev_run(struct vcpu_svm *svm, int cpu);
 601void __init sev_set_cpu_caps(void);
 602void __init sev_hardware_setup(void);
 603void sev_hardware_teardown(void);
 604int sev_cpu_init(struct svm_cpu_data *sd);
 605void sev_free_vcpu(struct kvm_vcpu *vcpu);
 606int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
 607int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
 608void sev_es_init_vmcb(struct vcpu_svm *svm);
 609void sev_es_vcpu_reset(struct vcpu_svm *svm);
 610void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
 611void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
 612void sev_es_unmap_ghcb(struct vcpu_svm *svm);
 613
 614/* vmenter.S */
 615
 616void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
 617void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
 618
 619#endif
 620