linux/arch/x86/kvm/vmx/ops.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __KVM_X86_VMX_INSN_H
   3#define __KVM_X86_VMX_INSN_H
   4
   5#include <linux/nospec.h>
   6
   7#include <asm/kvm_host.h>
   8#include <asm/vmx.h>
   9
  10#include "evmcs.h"
  11#include "vmcs.h"
  12
  13#define __ex(x) __kvm_handle_fault_on_reboot(x)
  14#define __ex_clear(x, reg) \
  15        ____kvm_handle_fault_on_reboot(x, "xor " reg ", " reg)
  16
  17static __always_inline void vmcs_check16(unsigned long field)
  18{
  19        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
  20                         "16-bit accessor invalid for 64-bit field");
  21        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
  22                         "16-bit accessor invalid for 64-bit high field");
  23        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
  24                         "16-bit accessor invalid for 32-bit high field");
  25        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
  26                         "16-bit accessor invalid for natural width field");
  27}
  28
  29static __always_inline void vmcs_check32(unsigned long field)
  30{
  31        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
  32                         "32-bit accessor invalid for 16-bit field");
  33        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
  34                         "32-bit accessor invalid for natural width field");
  35}
  36
  37static __always_inline void vmcs_check64(unsigned long field)
  38{
  39        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
  40                         "64-bit accessor invalid for 16-bit field");
  41        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
  42                         "64-bit accessor invalid for 64-bit high field");
  43        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
  44                         "64-bit accessor invalid for 32-bit field");
  45        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
  46                         "64-bit accessor invalid for natural width field");
  47}
  48
  49static __always_inline void vmcs_checkl(unsigned long field)
  50{
  51        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
  52                         "Natural width accessor invalid for 16-bit field");
  53        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
  54                         "Natural width accessor invalid for 64-bit field");
  55        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
  56                         "Natural width accessor invalid for 64-bit high field");
  57        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
  58                         "Natural width accessor invalid for 32-bit field");
  59}
  60
  61static __always_inline unsigned long __vmcs_readl(unsigned long field)
  62{
  63        unsigned long value;
  64
  65        asm volatile (__ex_clear("vmread %1, %0", "%k0")
  66                      : "=r"(value) : "r"(field));
  67        return value;
  68}
  69
  70static __always_inline u16 vmcs_read16(unsigned long field)
  71{
  72        vmcs_check16(field);
  73        if (static_branch_unlikely(&enable_evmcs))
  74                return evmcs_read16(field);
  75        return __vmcs_readl(field);
  76}
  77
  78static __always_inline u32 vmcs_read32(unsigned long field)
  79{
  80        vmcs_check32(field);
  81        if (static_branch_unlikely(&enable_evmcs))
  82                return evmcs_read32(field);
  83        return __vmcs_readl(field);
  84}
  85
  86static __always_inline u64 vmcs_read64(unsigned long field)
  87{
  88        vmcs_check64(field);
  89        if (static_branch_unlikely(&enable_evmcs))
  90                return evmcs_read64(field);
  91#ifdef CONFIG_X86_64
  92        return __vmcs_readl(field);
  93#else
  94        return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
  95#endif
  96}
  97
  98static __always_inline unsigned long vmcs_readl(unsigned long field)
  99{
 100        vmcs_checkl(field);
 101        if (static_branch_unlikely(&enable_evmcs))
 102                return evmcs_read64(field);
 103        return __vmcs_readl(field);
 104}
 105
 106static noinline void vmwrite_error(unsigned long field, unsigned long value)
 107{
 108        printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
 109               field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
 110        dump_stack();
 111}
 112
 113static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
 114{
 115        bool error;
 116
 117        asm volatile (__ex("vmwrite %2, %1") CC_SET(na)
 118                      : CC_OUT(na) (error) : "r"(field), "rm"(value));
 119        if (unlikely(error))
 120                vmwrite_error(field, value);
 121}
 122
 123static __always_inline void vmcs_write16(unsigned long field, u16 value)
 124{
 125        vmcs_check16(field);
 126        if (static_branch_unlikely(&enable_evmcs))
 127                return evmcs_write16(field, value);
 128
 129        __vmcs_writel(field, value);
 130}
 131
 132static __always_inline void vmcs_write32(unsigned long field, u32 value)
 133{
 134        vmcs_check32(field);
 135        if (static_branch_unlikely(&enable_evmcs))
 136                return evmcs_write32(field, value);
 137
 138        __vmcs_writel(field, value);
 139}
 140
 141static __always_inline void vmcs_write64(unsigned long field, u64 value)
 142{
 143        vmcs_check64(field);
 144        if (static_branch_unlikely(&enable_evmcs))
 145                return evmcs_write64(field, value);
 146
 147        __vmcs_writel(field, value);
 148#ifndef CONFIG_X86_64
 149        __vmcs_writel(field+1, value >> 32);
 150#endif
 151}
 152
 153static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
 154{
 155        vmcs_checkl(field);
 156        if (static_branch_unlikely(&enable_evmcs))
 157                return evmcs_write64(field, value);
 158
 159        __vmcs_writel(field, value);
 160}
 161
 162static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
 163{
 164        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
 165                         "vmcs_clear_bits does not support 64-bit fields");
 166        if (static_branch_unlikely(&enable_evmcs))
 167                return evmcs_write32(field, evmcs_read32(field) & ~mask);
 168
 169        __vmcs_writel(field, __vmcs_readl(field) & ~mask);
 170}
 171
 172static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
 173{
 174        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
 175                         "vmcs_set_bits does not support 64-bit fields");
 176        if (static_branch_unlikely(&enable_evmcs))
 177                return evmcs_write32(field, evmcs_read32(field) | mask);
 178
 179        __vmcs_writel(field, __vmcs_readl(field) | mask);
 180}
 181
 182static inline void vmcs_clear(struct vmcs *vmcs)
 183{
 184        u64 phys_addr = __pa(vmcs);
 185        bool error;
 186
 187        asm volatile (__ex("vmclear %1") CC_SET(na)
 188                      : CC_OUT(na) (error) : "m"(phys_addr));
 189        if (unlikely(error))
 190                printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
 191                       vmcs, phys_addr);
 192}
 193
 194static inline void vmcs_load(struct vmcs *vmcs)
 195{
 196        u64 phys_addr = __pa(vmcs);
 197        bool error;
 198
 199        if (static_branch_unlikely(&enable_evmcs))
 200                return evmcs_load(phys_addr);
 201
 202        asm volatile (__ex("vmptrld %1") CC_SET(na)
 203                      : CC_OUT(na) (error) : "m"(phys_addr));
 204        if (unlikely(error))
 205                printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
 206                       vmcs, phys_addr);
 207}
 208
 209static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
 210{
 211        struct {
 212                u64 vpid : 16;
 213                u64 rsvd : 48;
 214                u64 gva;
 215        } operand = { vpid, 0, gva };
 216        bool error;
 217
 218        asm volatile (__ex("invvpid %2, %1") CC_SET(na)
 219                      : CC_OUT(na) (error) : "r"(ext), "m"(operand));
 220        BUG_ON(error);
 221}
 222
 223static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
 224{
 225        struct {
 226                u64 eptp, gpa;
 227        } operand = {eptp, gpa};
 228        bool error;
 229
 230        asm volatile (__ex("invept %2, %1") CC_SET(na)
 231                      : CC_OUT(na) (error) : "r"(ext), "m"(operand));
 232        BUG_ON(error);
 233}
 234
 235static inline bool vpid_sync_vcpu_addr(int vpid, gva_t addr)
 236{
 237        if (vpid == 0)
 238                return true;
 239
 240        if (cpu_has_vmx_invvpid_individual_addr()) {
 241                __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
 242                return true;
 243        }
 244
 245        return false;
 246}
 247
 248static inline void vpid_sync_vcpu_single(int vpid)
 249{
 250        if (vpid == 0)
 251                return;
 252
 253        if (cpu_has_vmx_invvpid_single())
 254                __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
 255}
 256
 257static inline void vpid_sync_vcpu_global(void)
 258{
 259        if (cpu_has_vmx_invvpid_global())
 260                __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
 261}
 262
 263static inline void vpid_sync_context(int vpid)
 264{
 265        if (cpu_has_vmx_invvpid_single())
 266                vpid_sync_vcpu_single(vpid);
 267        else
 268                vpid_sync_vcpu_global();
 269}
 270
 271static inline void ept_sync_global(void)
 272{
 273        __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
 274}
 275
 276static inline void ept_sync_context(u64 eptp)
 277{
 278        if (cpu_has_vmx_invept_context())
 279                __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
 280        else
 281                ept_sync_global();
 282}
 283
 284#endif /* __KVM_X86_VMX_INSN_H */
 285