1
2#ifndef __KVM_X86_VMX_H
3#define __KVM_X86_VMX_H
4
5#include <linux/kvm_host.h>
6
7#include <asm/kvm.h>
8#include <asm/intel_pt.h>
9
10#include "capabilities.h"
11#include "kvm_cache_regs.h"
12#include "posted_intr.h"
13#include "vmcs.h"
14#include "vmx_ops.h"
15#include "cpuid.h"
16
17#define MSR_TYPE_R 1
18#define MSR_TYPE_W 2
19#define MSR_TYPE_RW 3
20
21#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
22
23#ifdef CONFIG_X86_64
24#define MAX_NR_USER_RETURN_MSRS 7
25#else
26#define MAX_NR_USER_RETURN_MSRS 4
27#endif
28
29#define MAX_NR_LOADSTORE_MSRS 8
30
31struct vmx_msrs {
32 unsigned int nr;
33 struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS];
34};
35
36struct vmx_uret_msr {
37 bool load_into_hardware;
38 u64 data;
39 u64 mask;
40};
41
42enum segment_cache_field {
43 SEG_FIELD_SEL = 0,
44 SEG_FIELD_BASE = 1,
45 SEG_FIELD_LIMIT = 2,
46 SEG_FIELD_AR = 3,
47
48 SEG_FIELD_NR = 4
49};
50
51#define RTIT_ADDR_RANGE 4
52
53struct pt_ctx {
54 u64 ctl;
55 u64 status;
56 u64 output_base;
57 u64 output_mask;
58 u64 cr3_match;
59 u64 addr_a[RTIT_ADDR_RANGE];
60 u64 addr_b[RTIT_ADDR_RANGE];
61};
62
63struct pt_desc {
64 u64 ctl_bitmask;
65 u32 addr_range;
66 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
67 struct pt_ctx host;
68 struct pt_ctx guest;
69};
70
71union vmx_exit_reason {
72 struct {
73 u32 basic : 16;
74 u32 reserved16 : 1;
75 u32 reserved17 : 1;
76 u32 reserved18 : 1;
77 u32 reserved19 : 1;
78 u32 reserved20 : 1;
79 u32 reserved21 : 1;
80 u32 reserved22 : 1;
81 u32 reserved23 : 1;
82 u32 reserved24 : 1;
83 u32 reserved25 : 1;
84 u32 bus_lock_detected : 1;
85 u32 enclave_mode : 1;
86 u32 smi_pending_mtf : 1;
87 u32 smi_from_vmx_root : 1;
88 u32 reserved30 : 1;
89 u32 failed_vmentry : 1;
90 };
91 u32 full;
92};
93
94#define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc)
95#define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records)
96
97bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu);
98bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);
99
100int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
101void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
102
103struct lbr_desc {
104
105 struct x86_pmu_lbr records;
106
107
108
109
110
111
112
113 struct perf_event *event;
114
115
116 bool msr_passthrough;
117};
118
119
120
121
122
123struct nested_vmx {
124
125 bool vmxon;
126 gpa_t vmxon_ptr;
127 bool pml_full;
128
129
130 gpa_t current_vmptr;
131
132
133
134
135
136 struct vmcs12 *cached_vmcs12;
137
138
139
140
141
142 struct vmcs12 *cached_shadow_vmcs12;
143
144
145
146
147
148 bool need_vmcs12_to_shadow_sync;
149 bool dirty_vmcs12;
150
151
152
153
154
155 bool need_sync_vmcs02_to_vmcs12_rare;
156
157
158
159
160
161
162 bool vmcs02_initialized;
163
164 bool change_vmcs01_virtual_apic_mode;
165 bool reload_vmcs01_apic_access_page;
166 bool update_vmcs01_cpu_dirty_logging;
167
168
169
170
171
172
173 bool enlightened_vmcs_enabled;
174
175
176 bool nested_run_pending;
177
178
179 bool mtf_pending;
180
181 struct loaded_vmcs vmcs02;
182
183
184
185
186
187 struct page *apic_access_page;
188 struct kvm_host_map virtual_apic_map;
189 struct kvm_host_map pi_desc_map;
190
191 struct kvm_host_map msr_bitmap_map;
192
193 struct pi_desc *pi_desc;
194 bool pi_pending;
195 u16 posted_intr_nv;
196
197 struct hrtimer preemption_timer;
198 u64 preemption_timer_deadline;
199 bool has_preemption_timer_deadline;
200 bool preemption_timer_expired;
201
202
203 u64 vmcs01_debugctl;
204 u64 vmcs01_guest_bndcfgs;
205
206
207 int l1_tpr_threshold;
208
209 u16 vpid02;
210 u16 last_vpid;
211
212 struct nested_vmx_msrs msrs;
213
214
215 struct {
216
217 bool vmxon;
218
219 bool guest_mode;
220 } smm;
221
222 gpa_t hv_evmcs_vmptr;
223 struct kvm_host_map hv_evmcs_map;
224 struct hv_enlightened_vmcs *hv_evmcs;
225};
226
227struct vcpu_vmx {
228 struct kvm_vcpu vcpu;
229 u8 fail;
230 u8 x2apic_msr_bitmap_mode;
231
232
233
234
235
236
237
238
239 bool guest_state_loaded;
240
241 unsigned long exit_qualification;
242 u32 exit_intr_info;
243 u32 idt_vectoring_info;
244 ulong rflags;
245
246
247
248
249
250
251
252 struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
253 bool guest_uret_msrs_loaded;
254#ifdef CONFIG_X86_64
255 u64 msr_host_kernel_gs_base;
256 u64 msr_guest_kernel_gs_base;
257#endif
258
259 u64 spec_ctrl;
260 u32 msr_ia32_umwait_control;
261
262
263
264
265
266
267 struct loaded_vmcs vmcs01;
268 struct loaded_vmcs *loaded_vmcs;
269
270 struct msr_autoload {
271 struct vmx_msrs guest;
272 struct vmx_msrs host;
273 } msr_autoload;
274
275 struct msr_autostore {
276 struct vmx_msrs guest;
277 } msr_autostore;
278
279 struct {
280 int vm86_active;
281 ulong save_rflags;
282 struct kvm_segment segs[8];
283 } rmode;
284 struct {
285 u32 bitmask;
286 struct kvm_save_segment {
287 u16 selector;
288 unsigned long base;
289 u32 limit;
290 u32 ar;
291 } seg[8];
292 } segment_cache;
293 int vpid;
294 bool emulation_required;
295
296 union vmx_exit_reason exit_reason;
297
298
299 struct pi_desc pi_desc;
300
301
302 struct nested_vmx nested;
303
304
305 unsigned int ple_window;
306 bool ple_window_dirty;
307
308 bool req_immediate_exit;
309
310
311#define PML_ENTITY_NUM 512
312 struct page *pml_pg;
313
314
315 u64 hv_deadline_tsc;
316
317 unsigned long host_debugctlmsr;
318
319
320
321
322
323
324 u64 msr_ia32_feature_control;
325 u64 msr_ia32_feature_control_valid_bits;
326
327 u64 msr_ia32_sgxlepubkeyhash[4];
328
329 struct pt_desc pt_desc;
330 struct lbr_desc lbr_desc;
331
332
333#define MAX_POSSIBLE_PASSTHROUGH_MSRS 13
334 struct {
335 DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
336 DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
337 } shadow_msr_intercept;
338};
339
340struct kvm_vmx {
341 struct kvm kvm;
342
343 unsigned int tss_addr;
344 bool ept_identity_pagetable_done;
345 gpa_t ept_identity_map_addr;
346};
347
348bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
349void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
350 struct loaded_vmcs *buddy);
351int allocate_vpid(void);
352void free_vpid(int vpid);
353void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
354void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
355void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
356 unsigned long fs_base, unsigned long gs_base);
357int vmx_get_cpl(struct kvm_vcpu *vcpu);
358bool vmx_emulation_required(struct kvm_vcpu *vcpu);
359unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
360void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
361u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
362void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
363int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
364void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
365void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
366void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
367void ept_save_pdptrs(struct kvm_vcpu *vcpu);
368void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
369void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
370u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
371
372bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
373void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
374bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
375bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
376bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
377void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
378void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
379struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
380void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
381void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
382bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
383int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
384void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
385
386void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
387void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
388
389u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
390u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
391
392static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
393 int type, bool value)
394{
395 if (value)
396 vmx_enable_intercept_for_msr(vcpu, msr, type);
397 else
398 vmx_disable_intercept_for_msr(vcpu, msr, type);
399}
400
401void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
402
403static inline u8 vmx_get_rvi(void)
404{
405 return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
406}
407
408#define BUILD_CONTROLS_SHADOW(lname, uname) \
409static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \
410{ \
411 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
412 vmcs_write32(uname, val); \
413 vmx->loaded_vmcs->controls_shadow.lname = val; \
414 } \
415} \
416static inline u32 __##lname##_controls_get(struct loaded_vmcs *vmcs) \
417{ \
418 return vmcs->controls_shadow.lname; \
419} \
420static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \
421{ \
422 return __##lname##_controls_get(vmx->loaded_vmcs); \
423} \
424static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \
425{ \
426 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
427} \
428static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
429{ \
430 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
431}
432BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
433BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
434BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
435BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
436BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
437
438static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu)
439{
440 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
441 | (1 << VCPU_EXREG_RFLAGS)
442 | (1 << VCPU_EXREG_PDPTR)
443 | (1 << VCPU_EXREG_SEGMENTS)
444 | (1 << VCPU_EXREG_CR0)
445 | (1 << VCPU_EXREG_CR3)
446 | (1 << VCPU_EXREG_CR4)
447 | (1 << VCPU_EXREG_EXIT_INFO_1)
448 | (1 << VCPU_EXREG_EXIT_INFO_2));
449 vcpu->arch.regs_dirty = 0;
450}
451
452static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
453{
454 return container_of(kvm, struct kvm_vmx, kvm);
455}
456
457static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
458{
459 return container_of(vcpu, struct vcpu_vmx, vcpu);
460}
461
462static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
463{
464 struct vcpu_vmx *vmx = to_vmx(vcpu);
465
466 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
467 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
468 vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
469 }
470 return vmx->exit_qualification;
471}
472
473static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
474{
475 struct vcpu_vmx *vmx = to_vmx(vcpu);
476
477 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) {
478 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
479 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
480 }
481 return vmx->exit_intr_info;
482}
483
484struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
485void free_vmcs(struct vmcs *vmcs);
486int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
487void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
488void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
489
490static inline struct vmcs *alloc_vmcs(bool shadow)
491{
492 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
493 GFP_KERNEL_ACCOUNT);
494}
495
496static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
497{
498 return secondary_exec_controls_get(vmx) &
499 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
500}
501
502static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
503{
504 if (!enable_ept)
505 return true;
506
507 return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
508}
509
510static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
511{
512 return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
513 (secondary_exec_controls_get(to_vmx(vcpu)) &
514 SECONDARY_EXEC_UNRESTRICTED_GUEST));
515}
516
517bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
518static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
519{
520 return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
521}
522
523void dump_vmcs(struct kvm_vcpu *vcpu);
524
525#endif
526