1
2#ifndef __KVM_X86_VMX_H
3#define __KVM_X86_VMX_H
4
5#include <linux/kvm_host.h>
6
7#include <asm/kvm.h>
8#include <asm/intel_pt.h>
9
10#include "capabilities.h"
11#include "ops.h"
12#include "vmcs.h"
13
14extern const u32 vmx_msr_index[];
15extern u64 host_efer;
16
17#define MSR_TYPE_R 1
18#define MSR_TYPE_W 2
19#define MSR_TYPE_RW 3
20
21#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
22
23#define NR_AUTOLOAD_MSRS 8
24
25struct vmx_msrs {
26 unsigned int nr;
27 struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
28};
29
30struct shared_msr_entry {
31 unsigned index;
32 u64 data;
33 u64 mask;
34};
35
36enum segment_cache_field {
37 SEG_FIELD_SEL = 0,
38 SEG_FIELD_BASE = 1,
39 SEG_FIELD_LIMIT = 2,
40 SEG_FIELD_AR = 3,
41
42 SEG_FIELD_NR = 4
43};
44
45
46struct pi_desc {
47 u32 pir[8];
48 union {
49 struct {
50
51 u16 on : 1,
52
53 sn : 1,
54
55 rsvd_1 : 14;
56
57 u8 nv;
58
59 u8 rsvd_2;
60
61 u32 ndst;
62 };
63 u64 control;
64 };
65 u32 rsvd[6];
66} __aligned(64);
67
68#define RTIT_ADDR_RANGE 4
69
70struct pt_ctx {
71 u64 ctl;
72 u64 status;
73 u64 output_base;
74 u64 output_mask;
75 u64 cr3_match;
76 u64 addr_a[RTIT_ADDR_RANGE];
77 u64 addr_b[RTIT_ADDR_RANGE];
78};
79
80struct pt_desc {
81 u64 ctl_bitmask;
82 u32 addr_range;
83 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
84 struct pt_ctx host;
85 struct pt_ctx guest;
86};
87
88
89
90
91
92struct nested_vmx {
93
94 bool vmxon;
95 gpa_t vmxon_ptr;
96 bool pml_full;
97
98
99 gpa_t current_vmptr;
100
101
102
103
104
105 struct vmcs12 *cached_vmcs12;
106
107
108
109
110
111 struct vmcs12 *cached_shadow_vmcs12;
112
113
114
115
116 bool need_vmcs12_sync;
117 bool dirty_vmcs12;
118
119
120
121
122
123
124 bool vmcs02_initialized;
125
126 bool change_vmcs01_virtual_apic_mode;
127
128
129
130
131
132
133 bool enlightened_vmcs_enabled;
134
135
136 bool nested_run_pending;
137
138 struct loaded_vmcs vmcs02;
139
140
141
142
143
144 struct page *apic_access_page;
145 struct page *virtual_apic_page;
146 struct page *pi_desc_page;
147 struct pi_desc *pi_desc;
148 bool pi_pending;
149 u16 posted_intr_nv;
150
151 struct hrtimer preemption_timer;
152 bool preemption_timer_expired;
153
154
155 u64 vmcs01_debugctl;
156 u64 vmcs01_guest_bndcfgs;
157
158 u16 vpid02;
159 u16 last_vpid;
160
161 struct nested_vmx_msrs msrs;
162
163
164 struct {
165
166 bool vmxon;
167
168 bool guest_mode;
169 } smm;
170
171 gpa_t hv_evmcs_vmptr;
172 struct page *hv_evmcs_page;
173 struct hv_enlightened_vmcs *hv_evmcs;
174};
175
176struct vcpu_vmx {
177 struct kvm_vcpu vcpu;
178 u8 fail;
179 u8 msr_bitmap_mode;
180 u32 exit_intr_info;
181 u32 idt_vectoring_info;
182 ulong rflags;
183 struct shared_msr_entry *guest_msrs;
184 int nmsrs;
185 int save_nmsrs;
186 bool guest_msrs_dirty;
187 unsigned long host_idt_base;
188#ifdef CONFIG_X86_64
189 u64 msr_host_kernel_gs_base;
190 u64 msr_guest_kernel_gs_base;
191#endif
192
193 u64 spec_ctrl;
194
195 u32 vm_entry_controls_shadow;
196 u32 vm_exit_controls_shadow;
197 u32 secondary_exec_control;
198
199
200
201
202
203
204
205
206
207 struct loaded_vmcs vmcs01;
208 struct loaded_vmcs *loaded_vmcs;
209 struct loaded_vmcs *loaded_cpu_state;
210
211 struct msr_autoload {
212 struct vmx_msrs guest;
213 struct vmx_msrs host;
214 } msr_autoload;
215
216 struct {
217 int vm86_active;
218 ulong save_rflags;
219 struct kvm_segment segs[8];
220 } rmode;
221 struct {
222 u32 bitmask;
223 struct kvm_save_segment {
224 u16 selector;
225 unsigned long base;
226 u32 limit;
227 u32 ar;
228 } seg[8];
229 } segment_cache;
230 int vpid;
231 bool emulation_required;
232
233 u32 exit_reason;
234
235
236 struct pi_desc pi_desc;
237
238
239 struct nested_vmx nested;
240
241
242 int ple_window;
243 bool ple_window_dirty;
244
245 bool req_immediate_exit;
246
247
248#define PML_ENTITY_NUM 512
249 struct page *pml_pg;
250
251
252 u64 hv_deadline_tsc;
253
254 u64 current_tsc_ratio;
255
256 u32 host_pkru;
257
258 unsigned long host_debugctlmsr;
259
260
261
262
263
264
265 u64 msr_ia32_feature_control;
266 u64 msr_ia32_feature_control_valid_bits;
267 u64 ept_pointer;
268
269 struct pt_desc pt_desc;
270};
271
272enum ept_pointers_status {
273 EPT_POINTERS_CHECK = 0,
274 EPT_POINTERS_MATCH = 1,
275 EPT_POINTERS_MISMATCH = 2
276};
277
278struct kvm_vmx {
279 struct kvm kvm;
280
281 unsigned int tss_addr;
282 bool ept_identity_pagetable_done;
283 gpa_t ept_identity_map_addr;
284
285 enum ept_pointers_status ept_pointers_match;
286 spinlock_t ept_pointer_lock;
287};
288
289bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
290void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
291void vmx_vcpu_put(struct kvm_vcpu *vcpu);
292int allocate_vpid(void);
293void free_vpid(int vpid);
294void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
295void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
296int vmx_get_cpl(struct kvm_vcpu *vcpu);
297unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
298void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
299u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
300void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
301void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
302void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
303void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
304int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
305void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
306void ept_save_pdptrs(struct kvm_vcpu *vcpu);
307void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
308void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
309u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
310void update_exception_bitmap(struct kvm_vcpu *vcpu);
311void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
312bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
313void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
314void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
315struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
316void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
317
318#define POSTED_INTR_ON 0
319#define POSTED_INTR_SN 1
320
321static inline bool pi_test_and_set_on(struct pi_desc *pi_desc)
322{
323 return test_and_set_bit(POSTED_INTR_ON,
324 (unsigned long *)&pi_desc->control);
325}
326
327static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc)
328{
329 return test_and_clear_bit(POSTED_INTR_ON,
330 (unsigned long *)&pi_desc->control);
331}
332
333static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
334{
335 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
336}
337
338static inline void pi_set_sn(struct pi_desc *pi_desc)
339{
340 set_bit(POSTED_INTR_SN,
341 (unsigned long *)&pi_desc->control);
342}
343
344static inline void pi_set_on(struct pi_desc *pi_desc)
345{
346 set_bit(POSTED_INTR_ON,
347 (unsigned long *)&pi_desc->control);
348}
349
350static inline void pi_clear_on(struct pi_desc *pi_desc)
351{
352 clear_bit(POSTED_INTR_ON,
353 (unsigned long *)&pi_desc->control);
354}
355
356static inline int pi_test_on(struct pi_desc *pi_desc)
357{
358 return test_bit(POSTED_INTR_ON,
359 (unsigned long *)&pi_desc->control);
360}
361
362static inline int pi_test_sn(struct pi_desc *pi_desc)
363{
364 return test_bit(POSTED_INTR_SN,
365 (unsigned long *)&pi_desc->control);
366}
367
368static inline u8 vmx_get_rvi(void)
369{
370 return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
371}
372
373static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
374{
375 vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
376}
377
378static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
379{
380 vmcs_write32(VM_ENTRY_CONTROLS, val);
381 vmx->vm_entry_controls_shadow = val;
382}
383
384static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
385{
386 if (vmx->vm_entry_controls_shadow != val)
387 vm_entry_controls_init(vmx, val);
388}
389
390static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
391{
392 return vmx->vm_entry_controls_shadow;
393}
394
395static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
396{
397 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
398}
399
400static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
401{
402 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
403}
404
405static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
406{
407 vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
408}
409
410static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
411{
412 vmcs_write32(VM_EXIT_CONTROLS, val);
413 vmx->vm_exit_controls_shadow = val;
414}
415
416static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
417{
418 if (vmx->vm_exit_controls_shadow != val)
419 vm_exit_controls_init(vmx, val);
420}
421
422static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
423{
424 return vmx->vm_exit_controls_shadow;
425}
426
427static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
428{
429 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
430}
431
432static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
433{
434 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
435}
436
437static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
438{
439 vmx->segment_cache.bitmask = 0;
440}
441
442static inline u32 vmx_vmentry_ctrl(void)
443{
444 u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
445 if (pt_mode == PT_MODE_SYSTEM)
446 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
447 VM_ENTRY_LOAD_IA32_RTIT_CTL);
448
449 return vmentry_ctrl &
450 ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
451}
452
453static inline u32 vmx_vmexit_ctrl(void)
454{
455 u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
456 if (pt_mode == PT_MODE_SYSTEM)
457 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
458 VM_EXIT_CLEAR_IA32_RTIT_CTL);
459
460 return vmexit_ctrl &
461 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
462}
463
464u32 vmx_exec_control(struct vcpu_vmx *vmx);
465
466static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
467{
468 return container_of(kvm, struct kvm_vmx, kvm);
469}
470
471static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
472{
473 return container_of(vcpu, struct vcpu_vmx, vcpu);
474}
475
476static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
477{
478 return &(to_vmx(vcpu)->pi_desc);
479}
480
481struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
482void free_vmcs(struct vmcs *vmcs);
483int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
484void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
485void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs);
486void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
487
488static inline struct vmcs *alloc_vmcs(bool shadow)
489{
490 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
491 GFP_KERNEL_ACCOUNT);
492}
493
494u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
495
496static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
497 bool invalidate_gpa)
498{
499 if (enable_ept && (invalidate_gpa || !enable_vpid)) {
500 if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
501 return;
502 ept_sync_context(construct_eptp(vcpu,
503 vcpu->arch.mmu->root_hpa));
504 } else {
505 vpid_sync_context(vpid);
506 }
507}
508
509static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
510{
511 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
512}
513
514static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
515{
516 vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
517 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
518}
519
520void dump_vmcs(void);
521
522#endif
523