linux/arch/x86/kvm/vmx/vmcs.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __KVM_X86_VMX_VMCS_H
   3#define __KVM_X86_VMX_VMCS_H
   4
   5#include <linux/ktime.h>
   6#include <linux/list.h>
   7#include <linux/nospec.h>
   8
   9#include <asm/kvm.h>
  10#include <asm/vmx.h>
  11
  12#include "capabilities.h"
  13
  14struct vmcs_hdr {
  15        u32 revision_id:31;
  16        u32 shadow_vmcs:1;
  17};
  18
  19struct vmcs {
  20        struct vmcs_hdr hdr;
  21        u32 abort;
  22        char data[];
  23};
  24
  25DECLARE_PER_CPU(struct vmcs *, current_vmcs);
  26
  27/*
  28 * vmcs_host_state tracks registers that are loaded from the VMCS on VMEXIT
  29 * and whose values change infrequently, but are not constant.  I.e. this is
  30 * used as a write-through cache of the corresponding VMCS fields.
  31 */
  32struct vmcs_host_state {
  33        unsigned long cr3;      /* May not match real cr3 */
  34        unsigned long cr4;      /* May not match real cr4 */
  35        unsigned long gs_base;
  36        unsigned long fs_base;
  37        unsigned long rsp;
  38
  39        u16           fs_sel, gs_sel, ldt_sel;
  40#ifdef CONFIG_X86_64
  41        u16           ds_sel, es_sel;
  42#endif
  43};
  44
  45struct vmcs_controls_shadow {
  46        u32 vm_entry;
  47        u32 vm_exit;
  48        u32 pin;
  49        u32 exec;
  50        u32 secondary_exec;
  51};
  52
  53/*
  54 * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
  55 * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
  56 * loaded on this CPU (so we can clear them if the CPU goes down).
  57 */
  58struct loaded_vmcs {
  59        struct vmcs *vmcs;
  60        struct vmcs *shadow_vmcs;
  61        int cpu;
  62        bool launched;
  63        bool nmi_known_unmasked;
  64        bool hv_timer_soft_disabled;
  65        /* Support for vnmi-less CPUs */
  66        int soft_vnmi_blocked;
  67        ktime_t entry_time;
  68        s64 vnmi_blocked_time;
  69        unsigned long *msr_bitmap;
  70        struct list_head loaded_vmcss_on_cpu_link;
  71        struct vmcs_host_state host_state;
  72        struct vmcs_controls_shadow controls_shadow;
  73};
  74
  75static inline bool is_intr_type(u32 intr_info, u32 type)
  76{
  77        const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK;
  78
  79        return (intr_info & mask) == (INTR_INFO_VALID_MASK | type);
  80}
  81
  82static inline bool is_intr_type_n(u32 intr_info, u32 type, u8 vector)
  83{
  84        const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK |
  85                         INTR_INFO_VECTOR_MASK;
  86
  87        return (intr_info & mask) == (INTR_INFO_VALID_MASK | type | vector);
  88}
  89
  90static inline bool is_exception_n(u32 intr_info, u8 vector)
  91{
  92        return is_intr_type_n(intr_info, INTR_TYPE_HARD_EXCEPTION, vector);
  93}
  94
  95static inline bool is_debug(u32 intr_info)
  96{
  97        return is_exception_n(intr_info, DB_VECTOR);
  98}
  99
 100static inline bool is_breakpoint(u32 intr_info)
 101{
 102        return is_exception_n(intr_info, BP_VECTOR);
 103}
 104
 105static inline bool is_page_fault(u32 intr_info)
 106{
 107        return is_exception_n(intr_info, PF_VECTOR);
 108}
 109
 110static inline bool is_invalid_opcode(u32 intr_info)
 111{
 112        return is_exception_n(intr_info, UD_VECTOR);
 113}
 114
 115static inline bool is_gp_fault(u32 intr_info)
 116{
 117        return is_exception_n(intr_info, GP_VECTOR);
 118}
 119
 120static inline bool is_alignment_check(u32 intr_info)
 121{
 122        return is_exception_n(intr_info, AC_VECTOR);
 123}
 124
 125static inline bool is_machine_check(u32 intr_info)
 126{
 127        return is_exception_n(intr_info, MC_VECTOR);
 128}
 129
 130/* Undocumented: icebp/int1 */
 131static inline bool is_icebp(u32 intr_info)
 132{
 133        return is_intr_type(intr_info, INTR_TYPE_PRIV_SW_EXCEPTION);
 134}
 135
 136static inline bool is_nmi(u32 intr_info)
 137{
 138        return is_intr_type(intr_info, INTR_TYPE_NMI_INTR);
 139}
 140
 141static inline bool is_external_intr(u32 intr_info)
 142{
 143        return is_intr_type(intr_info, INTR_TYPE_EXT_INTR);
 144}
 145
 146static inline bool is_exception_with_error_code(u32 intr_info)
 147{
 148        const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK;
 149
 150        return (intr_info & mask) == mask;
 151}
 152
 153enum vmcs_field_width {
 154        VMCS_FIELD_WIDTH_U16 = 0,
 155        VMCS_FIELD_WIDTH_U64 = 1,
 156        VMCS_FIELD_WIDTH_U32 = 2,
 157        VMCS_FIELD_WIDTH_NATURAL_WIDTH = 3
 158};
 159
 160static inline int vmcs_field_width(unsigned long field)
 161{
 162        if (0x1 & field)        /* the *_HIGH fields are all 32 bit */
 163                return VMCS_FIELD_WIDTH_U32;
 164        return (field >> 13) & 0x3;
 165}
 166
 167static inline int vmcs_field_readonly(unsigned long field)
 168{
 169        return (((field >> 10) & 0x3) == 1);
 170}
 171
 172#define VMCS_FIELD_INDEX_SHIFT          (1)
 173#define VMCS_FIELD_INDEX_MASK           GENMASK(9, 1)
 174
 175static inline unsigned int vmcs_field_index(unsigned long field)
 176{
 177        return (field & VMCS_FIELD_INDEX_MASK) >> VMCS_FIELD_INDEX_SHIFT;
 178}
 179
 180#endif /* __KVM_X86_VMX_VMCS_H */
 181