1
2#ifndef __KVM_X86_VMX_NESTED_H
3#define __KVM_X86_VMX_NESTED_H
4
5#include "kvm_cache_regs.h"
6#include "vmcs12.h"
7#include "vmx.h"
8
9
10
11
12enum nvmx_vmentry_status {
13 NVMX_VMENTRY_SUCCESS,
14 NVMX_VMENTRY_VMFAIL,
15 NVMX_VMENTRY_VMEXIT,
16 NVMX_VMENTRY_KVM_INTERNAL_ERROR,
17};
18
19void vmx_leave_nested(struct kvm_vcpu *vcpu);
20void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
21void nested_vmx_hardware_unsetup(void);
22__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
23void nested_vmx_set_vmcs_shadowing_bitmap(void);
24void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
25enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
26 bool from_vmentry);
27bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason);
28void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
29 u32 exit_intr_info, unsigned long exit_qualification);
30void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu);
31int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
32int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
33int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
34 u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
35void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu);
36bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
37 int size);
38
39static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
40{
41 return to_vmx(vcpu)->nested.cached_vmcs12;
42}
43
44static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
45{
46 return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
47}
48
49static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
50{
51 struct vcpu_vmx *vmx = to_vmx(vcpu);
52
53
54
55
56
57
58
59 return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull ||
60 vmx->nested.hv_evmcs;
61}
62
63static inline unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
64{
65
66 return get_vmcs12(vcpu)->ept_pointer;
67}
68
69static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
70{
71 return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
72}
73
74
75
76
77static inline int nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu,
78 u32 exit_reason)
79{
80 u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
81
82
83
84
85
86
87 WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT);
88 if ((exit_intr_info &
89 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
90 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) {
91 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
92
93 vmcs12->vm_exit_intr_error_code =
94 vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
95 }
96
97 nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info,
98 vmcs_readl(EXIT_QUALIFICATION));
99 return 1;
100}
101
102
103
104
105
106
107static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
108{
109 return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
110 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
111}
112static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
113{
114 return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
115 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
116}
117
118static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
119{
120 return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
121}
122
123
124
125
126
127
128static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
129{
130 return to_vmx(vcpu)->nested.msrs.misc_low &
131 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
132}
133
134static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
135{
136 return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
137}
138
139static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
140{
141 return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
142 CPU_BASED_MONITOR_TRAP_FLAG;
143}
144
145static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
146{
147 return to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
148 SECONDARY_EXEC_SHADOW_VMCS;
149}
150
151static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
152{
153 return vmcs12->cpu_based_vm_exec_control & bit;
154}
155
156static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
157{
158 return (vmcs12->cpu_based_vm_exec_control &
159 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
160 (vmcs12->secondary_vm_exec_control & bit);
161}
162
163static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
164{
165 return vmcs12->pin_based_vm_exec_control &
166 PIN_BASED_VMX_PREEMPTION_TIMER;
167}
168
169static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
170{
171 return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
172}
173
174static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
175{
176 return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
177}
178
179static inline int nested_cpu_has_mtf(struct vmcs12 *vmcs12)
180{
181 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
182}
183
184static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
185{
186 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
187}
188
189static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
190{
191 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
192}
193
194static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
195{
196 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
197}
198
199static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
200{
201 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
202}
203
204static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
205{
206 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
207}
208
209static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
210{
211 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
212}
213
214static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
215{
216 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
217}
218
219static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
220{
221 return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
222}
223
224static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
225{
226 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
227}
228
229static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
230{
231 return nested_cpu_has_vmfunc(vmcs12) &&
232 (vmcs12->vm_function_control &
233 VMX_VMFUNC_EPTP_SWITCHING);
234}
235
236static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
237{
238 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS);
239}
240
241static inline bool nested_cpu_has_save_preemption_timer(struct vmcs12 *vmcs12)
242{
243 return vmcs12->vm_exit_controls &
244 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
245}
246
247
248
249
250
251static inline bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
252{
253 return get_vmcs12(vcpu)->pin_based_vm_exec_control &
254 PIN_BASED_EXT_INTR_MASK;
255}
256
257
258
259
260
261static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
262{
263 return ((val & fixed1) | fixed0) == val;
264}
265
266static inline bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
267{
268 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
269 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
270 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
271
272 if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
273 SECONDARY_EXEC_UNRESTRICTED_GUEST &&
274 nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
275 fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
276
277 return fixed_bits_valid(val, fixed0, fixed1);
278}
279
280static inline bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
281{
282 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
283 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
284
285 return fixed_bits_valid(val, fixed0, fixed1);
286}
287
288static inline bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
289{
290 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0;
291 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1;
292
293 return fixed_bits_valid(val, fixed0, fixed1);
294}
295
296
297#define nested_guest_cr4_valid nested_cr4_valid
298#define nested_host_cr4_valid nested_cr4_valid
299
300#endif
301