linux/arch/x86/include/asm/svm.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __SVM_H
   3#define __SVM_H
   4
   5#include <uapi/asm/svm.h>
   6#include <uapi/asm/kvm.h>
   7
   8/*
   9 * 32-bit intercept words in the VMCB Control Area, starting
  10 * at Byte offset 000h.
  11 */
  12
  13enum intercept_words {
  14        INTERCEPT_CR = 0,
  15        INTERCEPT_DR,
  16        INTERCEPT_EXCEPTION,
  17        INTERCEPT_WORD3,
  18        INTERCEPT_WORD4,
  19        INTERCEPT_WORD5,
  20        MAX_INTERCEPT,
  21};
  22
  23enum {
  24        /* Byte offset 000h (word 0) */
  25        INTERCEPT_CR0_READ = 0,
  26        INTERCEPT_CR3_READ = 3,
  27        INTERCEPT_CR4_READ = 4,
  28        INTERCEPT_CR8_READ = 8,
  29        INTERCEPT_CR0_WRITE = 16,
  30        INTERCEPT_CR3_WRITE = 16 + 3,
  31        INTERCEPT_CR4_WRITE = 16 + 4,
  32        INTERCEPT_CR8_WRITE = 16 + 8,
  33        /* Byte offset 004h (word 1) */
  34        INTERCEPT_DR0_READ = 32,
  35        INTERCEPT_DR1_READ,
  36        INTERCEPT_DR2_READ,
  37        INTERCEPT_DR3_READ,
  38        INTERCEPT_DR4_READ,
  39        INTERCEPT_DR5_READ,
  40        INTERCEPT_DR6_READ,
  41        INTERCEPT_DR7_READ,
  42        INTERCEPT_DR0_WRITE = 48,
  43        INTERCEPT_DR1_WRITE,
  44        INTERCEPT_DR2_WRITE,
  45        INTERCEPT_DR3_WRITE,
  46        INTERCEPT_DR4_WRITE,
  47        INTERCEPT_DR5_WRITE,
  48        INTERCEPT_DR6_WRITE,
  49        INTERCEPT_DR7_WRITE,
  50        /* Byte offset 008h (word 2) */
  51        INTERCEPT_EXCEPTION_OFFSET = 64,
  52        /* Byte offset 00Ch (word 3) */
  53        INTERCEPT_INTR = 96,
  54        INTERCEPT_NMI,
  55        INTERCEPT_SMI,
  56        INTERCEPT_INIT,
  57        INTERCEPT_VINTR,
  58        INTERCEPT_SELECTIVE_CR0,
  59        INTERCEPT_STORE_IDTR,
  60        INTERCEPT_STORE_GDTR,
  61        INTERCEPT_STORE_LDTR,
  62        INTERCEPT_STORE_TR,
  63        INTERCEPT_LOAD_IDTR,
  64        INTERCEPT_LOAD_GDTR,
  65        INTERCEPT_LOAD_LDTR,
  66        INTERCEPT_LOAD_TR,
  67        INTERCEPT_RDTSC,
  68        INTERCEPT_RDPMC,
  69        INTERCEPT_PUSHF,
  70        INTERCEPT_POPF,
  71        INTERCEPT_CPUID,
  72        INTERCEPT_RSM,
  73        INTERCEPT_IRET,
  74        INTERCEPT_INTn,
  75        INTERCEPT_INVD,
  76        INTERCEPT_PAUSE,
  77        INTERCEPT_HLT,
  78        INTERCEPT_INVLPG,
  79        INTERCEPT_INVLPGA,
  80        INTERCEPT_IOIO_PROT,
  81        INTERCEPT_MSR_PROT,
  82        INTERCEPT_TASK_SWITCH,
  83        INTERCEPT_FERR_FREEZE,
  84        INTERCEPT_SHUTDOWN,
  85        /* Byte offset 010h (word 4) */
  86        INTERCEPT_VMRUN = 128,
  87        INTERCEPT_VMMCALL,
  88        INTERCEPT_VMLOAD,
  89        INTERCEPT_VMSAVE,
  90        INTERCEPT_STGI,
  91        INTERCEPT_CLGI,
  92        INTERCEPT_SKINIT,
  93        INTERCEPT_RDTSCP,
  94        INTERCEPT_ICEBP,
  95        INTERCEPT_WBINVD,
  96        INTERCEPT_MONITOR,
  97        INTERCEPT_MWAIT,
  98        INTERCEPT_MWAIT_COND,
  99        INTERCEPT_XSETBV,
 100        INTERCEPT_RDPRU,
 101        TRAP_EFER_WRITE,
 102        TRAP_CR0_WRITE,
 103        TRAP_CR1_WRITE,
 104        TRAP_CR2_WRITE,
 105        TRAP_CR3_WRITE,
 106        TRAP_CR4_WRITE,
 107        TRAP_CR5_WRITE,
 108        TRAP_CR6_WRITE,
 109        TRAP_CR7_WRITE,
 110        TRAP_CR8_WRITE,
 111        /* Byte offset 014h (word 5) */
 112        INTERCEPT_INVLPGB = 160,
 113        INTERCEPT_INVLPGB_ILLEGAL,
 114        INTERCEPT_INVPCID,
 115        INTERCEPT_MCOMMIT,
 116        INTERCEPT_TLBSYNC,
 117};
 118
 119
 120struct __attribute__ ((__packed__)) vmcb_control_area {
 121        u32 intercepts[MAX_INTERCEPT];
 122        u32 reserved_1[15 - MAX_INTERCEPT];
 123        u16 pause_filter_thresh;
 124        u16 pause_filter_count;
 125        u64 iopm_base_pa;
 126        u64 msrpm_base_pa;
 127        u64 tsc_offset;
 128        u32 asid;
 129        u8 tlb_ctl;
 130        u8 reserved_2[3];
 131        u32 int_ctl;
 132        u32 int_vector;
 133        u32 int_state;
 134        u8 reserved_3[4];
 135        u32 exit_code;
 136        u32 exit_code_hi;
 137        u64 exit_info_1;
 138        u64 exit_info_2;
 139        u32 exit_int_info;
 140        u32 exit_int_info_err;
 141        u64 nested_ctl;
 142        u64 avic_vapic_bar;
 143        u64 ghcb_gpa;
 144        u32 event_inj;
 145        u32 event_inj_err;
 146        u64 nested_cr3;
 147        u64 virt_ext;
 148        u32 clean;
 149        u32 reserved_5;
 150        u64 next_rip;
 151        u8 insn_len;
 152        u8 insn_bytes[15];
 153        u64 avic_backing_page;  /* Offset 0xe0 */
 154        u8 reserved_6[8];       /* Offset 0xe8 */
 155        u64 avic_logical_id;    /* Offset 0xf0 */
 156        u64 avic_physical_id;   /* Offset 0xf8 */
 157        u8 reserved_7[8];
 158        u64 vmsa_pa;            /* Used for an SEV-ES guest */
 159        u8 reserved_8[720];
 160        /*
 161         * Offset 0x3e0, 32 bytes reserved
 162         * for use by hypervisor/software.
 163         */
 164        u8 reserved_sw[32];
 165};
 166
 167
 168#define TLB_CONTROL_DO_NOTHING 0
 169#define TLB_CONTROL_FLUSH_ALL_ASID 1
 170#define TLB_CONTROL_FLUSH_ASID 3
 171#define TLB_CONTROL_FLUSH_ASID_LOCAL 7
 172
 173#define V_TPR_MASK 0x0f
 174
 175#define V_IRQ_SHIFT 8
 176#define V_IRQ_MASK (1 << V_IRQ_SHIFT)
 177
 178#define V_GIF_SHIFT 9
 179#define V_GIF_MASK (1 << V_GIF_SHIFT)
 180
 181#define V_INTR_PRIO_SHIFT 16
 182#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT)
 183
 184#define V_IGN_TPR_SHIFT 20
 185#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
 186
 187#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK)
 188
 189#define V_INTR_MASKING_SHIFT 24
 190#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
 191
 192#define V_GIF_ENABLE_SHIFT 25
 193#define V_GIF_ENABLE_MASK (1 << V_GIF_ENABLE_SHIFT)
 194
 195#define AVIC_ENABLE_SHIFT 31
 196#define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT)
 197
 198#define LBR_CTL_ENABLE_MASK BIT_ULL(0)
 199#define VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK BIT_ULL(1)
 200
 201#define SVM_INTERRUPT_SHADOW_MASK       BIT_ULL(0)
 202#define SVM_GUEST_INTERRUPT_MASK        BIT_ULL(1)
 203
 204#define SVM_IOIO_STR_SHIFT 2
 205#define SVM_IOIO_REP_SHIFT 3
 206#define SVM_IOIO_SIZE_SHIFT 4
 207#define SVM_IOIO_ASIZE_SHIFT 7
 208
 209#define SVM_IOIO_TYPE_MASK 1
 210#define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT)
 211#define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT)
 212#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
 213#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
 214
 215#define SVM_VM_CR_VALID_MASK    0x001fULL
 216#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
 217#define SVM_VM_CR_SVM_DIS_MASK  0x0010ULL
 218
 219#define SVM_NESTED_CTL_NP_ENABLE        BIT(0)
 220#define SVM_NESTED_CTL_SEV_ENABLE       BIT(1)
 221#define SVM_NESTED_CTL_SEV_ES_ENABLE    BIT(2)
 222
 223struct vmcb_seg {
 224        u16 selector;
 225        u16 attrib;
 226        u32 limit;
 227        u64 base;
 228} __packed;
 229
 230struct vmcb_save_area {
 231        struct vmcb_seg es;
 232        struct vmcb_seg cs;
 233        struct vmcb_seg ss;
 234        struct vmcb_seg ds;
 235        struct vmcb_seg fs;
 236        struct vmcb_seg gs;
 237        struct vmcb_seg gdtr;
 238        struct vmcb_seg ldtr;
 239        struct vmcb_seg idtr;
 240        struct vmcb_seg tr;
 241        u8 reserved_1[43];
 242        u8 cpl;
 243        u8 reserved_2[4];
 244        u64 efer;
 245        u8 reserved_3[104];
 246        u64 xss;                /* Valid for SEV-ES only */
 247        u64 cr4;
 248        u64 cr3;
 249        u64 cr0;
 250        u64 dr7;
 251        u64 dr6;
 252        u64 rflags;
 253        u64 rip;
 254        u8 reserved_4[88];
 255        u64 rsp;
 256        u8 reserved_5[24];
 257        u64 rax;
 258        u64 star;
 259        u64 lstar;
 260        u64 cstar;
 261        u64 sfmask;
 262        u64 kernel_gs_base;
 263        u64 sysenter_cs;
 264        u64 sysenter_esp;
 265        u64 sysenter_eip;
 266        u64 cr2;
 267        u8 reserved_6[32];
 268        u64 g_pat;
 269        u64 dbgctl;
 270        u64 br_from;
 271        u64 br_to;
 272        u64 last_excp_from;
 273        u64 last_excp_to;
 274
 275        /*
 276         * The following part of the save area is valid only for
 277         * SEV-ES guests when referenced through the GHCB or for
 278         * saving to the host save area.
 279         */
 280        u8 reserved_7[72];
 281        u32 spec_ctrl;          /* Guest version of SPEC_CTRL at 0x2E0 */
 282        u8 reserved_7b[4];
 283        u32 pkru;
 284        u8 reserved_7a[20];
 285        u64 reserved_8;         /* rax already available at 0x01f8 */
 286        u64 rcx;
 287        u64 rdx;
 288        u64 rbx;
 289        u64 reserved_9;         /* rsp already available at 0x01d8 */
 290        u64 rbp;
 291        u64 rsi;
 292        u64 rdi;
 293        u64 r8;
 294        u64 r9;
 295        u64 r10;
 296        u64 r11;
 297        u64 r12;
 298        u64 r13;
 299        u64 r14;
 300        u64 r15;
 301        u8 reserved_10[16];
 302        u64 sw_exit_code;
 303        u64 sw_exit_info_1;
 304        u64 sw_exit_info_2;
 305        u64 sw_scratch;
 306        u8 reserved_11[56];
 307        u64 xcr0;
 308        u8 valid_bitmap[16];
 309        u64 x87_state_gpa;
 310} __packed;
 311
 312struct ghcb {
 313        struct vmcb_save_area save;
 314        u8 reserved_save[2048 - sizeof(struct vmcb_save_area)];
 315
 316        u8 shared_buffer[2032];
 317
 318        u8 reserved_1[10];
 319        u16 protocol_version;   /* negotiated SEV-ES/GHCB protocol version */
 320        u32 ghcb_usage;
 321} __packed;
 322
 323
 324#define EXPECTED_VMCB_SAVE_AREA_SIZE            1032
 325#define EXPECTED_VMCB_CONTROL_AREA_SIZE         1024
 326#define EXPECTED_GHCB_SIZE                      PAGE_SIZE
 327
 328static inline void __unused_size_checks(void)
 329{
 330        BUILD_BUG_ON(sizeof(struct vmcb_save_area)      != EXPECTED_VMCB_SAVE_AREA_SIZE);
 331        BUILD_BUG_ON(sizeof(struct vmcb_control_area)   != EXPECTED_VMCB_CONTROL_AREA_SIZE);
 332        BUILD_BUG_ON(sizeof(struct ghcb)                != EXPECTED_GHCB_SIZE);
 333}
 334
 335struct vmcb {
 336        struct vmcb_control_area control;
 337        struct vmcb_save_area save;
 338} __packed;
 339
 340#define SVM_CPUID_FUNC 0x8000000a
 341
 342#define SVM_VM_CR_SVM_DISABLE 4
 343
 344#define SVM_SELECTOR_S_SHIFT 4
 345#define SVM_SELECTOR_DPL_SHIFT 5
 346#define SVM_SELECTOR_P_SHIFT 7
 347#define SVM_SELECTOR_AVL_SHIFT 8
 348#define SVM_SELECTOR_L_SHIFT 9
 349#define SVM_SELECTOR_DB_SHIFT 10
 350#define SVM_SELECTOR_G_SHIFT 11
 351
 352#define SVM_SELECTOR_TYPE_MASK (0xf)
 353#define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT)
 354#define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT)
 355#define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT)
 356#define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT)
 357#define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT)
 358#define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT)
 359#define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT)
 360
 361#define SVM_SELECTOR_WRITE_MASK (1 << 1)
 362#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
 363#define SVM_SELECTOR_CODE_MASK (1 << 3)
 364
 365#define SVM_EVTINJ_VEC_MASK 0xff
 366
 367#define SVM_EVTINJ_TYPE_SHIFT 8
 368#define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT)
 369
 370#define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT)
 371#define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT)
 372#define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT)
 373#define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT)
 374
 375#define SVM_EVTINJ_VALID (1 << 31)
 376#define SVM_EVTINJ_VALID_ERR (1 << 11)
 377
 378#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK
 379#define SVM_EXITINTINFO_TYPE_MASK SVM_EVTINJ_TYPE_MASK
 380
 381#define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR
 382#define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI
 383#define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT
 384#define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT
 385
 386#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
 387#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
 388
 389#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36
 390#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
 391#define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
 392
 393#define SVM_EXITINFO_REG_MASK 0x0F
 394
 395#define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
 396
 397/* GHCB Accessor functions */
 398
 399#define GHCB_BITMAP_IDX(field)                                                  \
 400        (offsetof(struct vmcb_save_area, field) / sizeof(u64))
 401
 402#define DEFINE_GHCB_ACCESSORS(field)                                            \
 403        static inline bool ghcb_##field##_is_valid(const struct ghcb *ghcb)     \
 404        {                                                                       \
 405                return test_bit(GHCB_BITMAP_IDX(field),                         \
 406                                (unsigned long *)&ghcb->save.valid_bitmap);     \
 407        }                                                                       \
 408                                                                                \
 409        static inline u64 ghcb_get_##field(struct ghcb *ghcb)                   \
 410        {                                                                       \
 411                return ghcb->save.field;                                        \
 412        }                                                                       \
 413                                                                                \
 414        static inline u64 ghcb_get_##field##_if_valid(struct ghcb *ghcb)        \
 415        {                                                                       \
 416                return ghcb_##field##_is_valid(ghcb) ? ghcb->save.field : 0;    \
 417        }                                                                       \
 418                                                                                \
 419        static inline void ghcb_set_##field(struct ghcb *ghcb, u64 value)       \
 420        {                                                                       \
 421                __set_bit(GHCB_BITMAP_IDX(field),                               \
 422                          (unsigned long *)&ghcb->save.valid_bitmap);           \
 423                ghcb->save.field = value;                                       \
 424        }
 425
 426DEFINE_GHCB_ACCESSORS(cpl)
 427DEFINE_GHCB_ACCESSORS(rip)
 428DEFINE_GHCB_ACCESSORS(rsp)
 429DEFINE_GHCB_ACCESSORS(rax)
 430DEFINE_GHCB_ACCESSORS(rcx)
 431DEFINE_GHCB_ACCESSORS(rdx)
 432DEFINE_GHCB_ACCESSORS(rbx)
 433DEFINE_GHCB_ACCESSORS(rbp)
 434DEFINE_GHCB_ACCESSORS(rsi)
 435DEFINE_GHCB_ACCESSORS(rdi)
 436DEFINE_GHCB_ACCESSORS(r8)
 437DEFINE_GHCB_ACCESSORS(r9)
 438DEFINE_GHCB_ACCESSORS(r10)
 439DEFINE_GHCB_ACCESSORS(r11)
 440DEFINE_GHCB_ACCESSORS(r12)
 441DEFINE_GHCB_ACCESSORS(r13)
 442DEFINE_GHCB_ACCESSORS(r14)
 443DEFINE_GHCB_ACCESSORS(r15)
 444DEFINE_GHCB_ACCESSORS(sw_exit_code)
 445DEFINE_GHCB_ACCESSORS(sw_exit_info_1)
 446DEFINE_GHCB_ACCESSORS(sw_exit_info_2)
 447DEFINE_GHCB_ACCESSORS(sw_scratch)
 448DEFINE_GHCB_ACCESSORS(xcr0)
 449
 450#endif
 451