linux/arch/x86/kvm/vmx/vmx_ops.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __KVM_X86_VMX_INSN_H
   3#define __KVM_X86_VMX_INSN_H
   4
   5#include <linux/nospec.h>
   6
   7#include <asm/kvm_host.h>
   8#include <asm/vmx.h>
   9
  10#include "evmcs.h"
  11#include "vmcs.h"
  12
  13#define __ex(x) __kvm_handle_fault_on_reboot(x)
  14
  15asmlinkage void vmread_error(unsigned long field, bool fault);
  16__attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
  17                                                         bool fault);
  18void vmwrite_error(unsigned long field, unsigned long value);
  19void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
  20void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
  21void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
  22void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
  23
  24static __always_inline void vmcs_check16(unsigned long field)
  25{
  26        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
  27                         "16-bit accessor invalid for 64-bit field");
  28        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
  29                         "16-bit accessor invalid for 64-bit high field");
  30        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
  31                         "16-bit accessor invalid for 32-bit high field");
  32        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
  33                         "16-bit accessor invalid for natural width field");
  34}
  35
  36static __always_inline void vmcs_check32(unsigned long field)
  37{
  38        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
  39                         "32-bit accessor invalid for 16-bit field");
  40        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
  41                         "32-bit accessor invalid for natural width field");
  42}
  43
  44static __always_inline void vmcs_check64(unsigned long field)
  45{
  46        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
  47                         "64-bit accessor invalid for 16-bit field");
  48        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
  49                         "64-bit accessor invalid for 64-bit high field");
  50        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
  51                         "64-bit accessor invalid for 32-bit field");
  52        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
  53                         "64-bit accessor invalid for natural width field");
  54}
  55
  56static __always_inline void vmcs_checkl(unsigned long field)
  57{
  58        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
  59                         "Natural width accessor invalid for 16-bit field");
  60        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
  61                         "Natural width accessor invalid for 64-bit field");
  62        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
  63                         "Natural width accessor invalid for 64-bit high field");
  64        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
  65                         "Natural width accessor invalid for 32-bit field");
  66}
  67
  68static __always_inline unsigned long __vmcs_readl(unsigned long field)
  69{
  70        unsigned long value;
  71
  72        asm volatile("1: vmread %2, %1\n\t"
  73                     ".byte 0x3e\n\t" /* branch taken hint */
  74                     "ja 3f\n\t"
  75
  76                     /*
  77                      * VMREAD failed.  Push '0' for @fault, push the failing
  78                      * @field, and bounce through the trampoline to preserve
  79                      * volatile registers.
  80                      */
  81                     "push $0\n\t"
  82                     "push %2\n\t"
  83                     "2:call vmread_error_trampoline\n\t"
  84
  85                     /*
  86                      * Unwind the stack.  Note, the trampoline zeros out the
  87                      * memory for @fault so that the result is '0' on error.
  88                      */
  89                     "pop %2\n\t"
  90                     "pop %1\n\t"
  91                     "3:\n\t"
  92
  93                     /* VMREAD faulted.  As above, except push '1' for @fault. */
  94                     ".pushsection .fixup, \"ax\"\n\t"
  95                     "4: push $1\n\t"
  96                     "push %2\n\t"
  97                     "jmp 2b\n\t"
  98                     ".popsection\n\t"
  99                     _ASM_EXTABLE(1b, 4b)
 100                     : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
 101        return value;
 102}
 103
 104static __always_inline u16 vmcs_read16(unsigned long field)
 105{
 106        vmcs_check16(field);
 107        if (static_branch_unlikely(&enable_evmcs))
 108                return evmcs_read16(field);
 109        return __vmcs_readl(field);
 110}
 111
 112static __always_inline u32 vmcs_read32(unsigned long field)
 113{
 114        vmcs_check32(field);
 115        if (static_branch_unlikely(&enable_evmcs))
 116                return evmcs_read32(field);
 117        return __vmcs_readl(field);
 118}
 119
 120static __always_inline u64 vmcs_read64(unsigned long field)
 121{
 122        vmcs_check64(field);
 123        if (static_branch_unlikely(&enable_evmcs))
 124                return evmcs_read64(field);
 125#ifdef CONFIG_X86_64
 126        return __vmcs_readl(field);
 127#else
 128        return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
 129#endif
 130}
 131
 132static __always_inline unsigned long vmcs_readl(unsigned long field)
 133{
 134        vmcs_checkl(field);
 135        if (static_branch_unlikely(&enable_evmcs))
 136                return evmcs_read64(field);
 137        return __vmcs_readl(field);
 138}
 139
 140#define vmx_asm1(insn, op1, error_args...)                              \
 141do {                                                                    \
 142        asm_volatile_goto("1: " __stringify(insn) " %0\n\t"             \
 143                          ".byte 0x2e\n\t" /* branch not taken hint */  \
 144                          "jna %l[error]\n\t"                           \
 145                          _ASM_EXTABLE(1b, %l[fault])                   \
 146                          : : op1 : "cc" : error, fault);               \
 147        return;                                                         \
 148error:                                                                  \
 149        instrumentation_begin();                                        \
 150        insn##_error(error_args);                                       \
 151        instrumentation_end();                                          \
 152        return;                                                         \
 153fault:                                                                  \
 154        kvm_spurious_fault();                                           \
 155} while (0)
 156
 157#define vmx_asm2(insn, op1, op2, error_args...)                         \
 158do {                                                                    \
 159        asm_volatile_goto("1: "  __stringify(insn) " %1, %0\n\t"        \
 160                          ".byte 0x2e\n\t" /* branch not taken hint */  \
 161                          "jna %l[error]\n\t"                           \
 162                          _ASM_EXTABLE(1b, %l[fault])                   \
 163                          : : op1, op2 : "cc" : error, fault);          \
 164        return;                                                         \
 165error:                                                                  \
 166        instrumentation_begin();                                        \
 167        insn##_error(error_args);                                       \
 168        instrumentation_end();                                          \
 169        return;                                                         \
 170fault:                                                                  \
 171        kvm_spurious_fault();                                           \
 172} while (0)
 173
 174static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
 175{
 176        vmx_asm2(vmwrite, "r"(field), "rm"(value), field, value);
 177}
 178
 179static __always_inline void vmcs_write16(unsigned long field, u16 value)
 180{
 181        vmcs_check16(field);
 182        if (static_branch_unlikely(&enable_evmcs))
 183                return evmcs_write16(field, value);
 184
 185        __vmcs_writel(field, value);
 186}
 187
 188static __always_inline void vmcs_write32(unsigned long field, u32 value)
 189{
 190        vmcs_check32(field);
 191        if (static_branch_unlikely(&enable_evmcs))
 192                return evmcs_write32(field, value);
 193
 194        __vmcs_writel(field, value);
 195}
 196
 197static __always_inline void vmcs_write64(unsigned long field, u64 value)
 198{
 199        vmcs_check64(field);
 200        if (static_branch_unlikely(&enable_evmcs))
 201                return evmcs_write64(field, value);
 202
 203        __vmcs_writel(field, value);
 204#ifndef CONFIG_X86_64
 205        __vmcs_writel(field+1, value >> 32);
 206#endif
 207}
 208
 209static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
 210{
 211        vmcs_checkl(field);
 212        if (static_branch_unlikely(&enable_evmcs))
 213                return evmcs_write64(field, value);
 214
 215        __vmcs_writel(field, value);
 216}
 217
 218static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
 219{
 220        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
 221                         "vmcs_clear_bits does not support 64-bit fields");
 222        if (static_branch_unlikely(&enable_evmcs))
 223                return evmcs_write32(field, evmcs_read32(field) & ~mask);
 224
 225        __vmcs_writel(field, __vmcs_readl(field) & ~mask);
 226}
 227
 228static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
 229{
 230        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
 231                         "vmcs_set_bits does not support 64-bit fields");
 232        if (static_branch_unlikely(&enable_evmcs))
 233                return evmcs_write32(field, evmcs_read32(field) | mask);
 234
 235        __vmcs_writel(field, __vmcs_readl(field) | mask);
 236}
 237
 238static inline void vmcs_clear(struct vmcs *vmcs)
 239{
 240        u64 phys_addr = __pa(vmcs);
 241
 242        vmx_asm1(vmclear, "m"(phys_addr), vmcs, phys_addr);
 243}
 244
 245static inline void vmcs_load(struct vmcs *vmcs)
 246{
 247        u64 phys_addr = __pa(vmcs);
 248
 249        if (static_branch_unlikely(&enable_evmcs))
 250                return evmcs_load(phys_addr);
 251
 252        vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
 253}
 254
 255static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
 256{
 257        struct {
 258                u64 vpid : 16;
 259                u64 rsvd : 48;
 260                u64 gva;
 261        } operand = { vpid, 0, gva };
 262
 263        vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva);
 264}
 265
 266static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
 267{
 268        struct {
 269                u64 eptp, gpa;
 270        } operand = {eptp, gpa};
 271
 272        vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
 273}
 274
 275static inline void vpid_sync_vcpu_single(int vpid)
 276{
 277        if (vpid == 0)
 278                return;
 279
 280        __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
 281}
 282
 283static inline void vpid_sync_vcpu_global(void)
 284{
 285        __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
 286}
 287
 288static inline void vpid_sync_context(int vpid)
 289{
 290        if (cpu_has_vmx_invvpid_single())
 291                vpid_sync_vcpu_single(vpid);
 292        else if (vpid != 0)
 293                vpid_sync_vcpu_global();
 294}
 295
 296static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
 297{
 298        if (vpid == 0)
 299                return;
 300
 301        if (cpu_has_vmx_invvpid_individual_addr())
 302                __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
 303        else
 304                vpid_sync_context(vpid);
 305}
 306
 307static inline void ept_sync_global(void)
 308{
 309        __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
 310}
 311
 312static inline void ept_sync_context(u64 eptp)
 313{
 314        if (cpu_has_vmx_invept_context())
 315                __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
 316        else
 317                ept_sync_global();
 318}
 319
 320#endif /* __KVM_X86_VMX_INSN_H */
 321