1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "irq.h"
20#include "mmu.h"
21#include "cpuid.h"
22
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/highmem.h>
28#include <linux/sched.h>
29#include <linux/moduleparam.h>
30#include <linux/mod_devicetable.h>
31#include <linux/trace_events.h>
32#include <linux/slab.h>
33#include <linux/tboot.h>
34#include <linux/hrtimer.h>
35#include "kvm_cache_regs.h"
36#include "x86.h"
37
38#include <asm/io.h>
39#include <asm/desc.h>
40#include <asm/vmx.h>
41#include <asm/virtext.h>
42#include <asm/mce.h>
43#include <asm/fpu/internal.h>
44#include <asm/perf_event.h>
45#include <asm/debugreg.h>
46#include <asm/kexec.h>
47#include <asm/apic.h>
48
49#include "trace.h"
50#include "pmu.h"
51
52#define __ex(x) __kvm_handle_fault_on_reboot(x)
53#define __ex_clear(x, reg) \
54 ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
55
56MODULE_AUTHOR("Qumranet");
57MODULE_LICENSE("GPL");
58
59static const struct x86_cpu_id vmx_cpu_id[] = {
60 X86_FEATURE_MATCH(X86_FEATURE_VMX),
61 {}
62};
63MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
64
65static bool __read_mostly enable_vpid = 1;
66module_param_named(vpid, enable_vpid, bool, 0444);
67
68static bool __read_mostly flexpriority_enabled = 1;
69module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
70
71static bool __read_mostly enable_ept = 1;
72module_param_named(ept, enable_ept, bool, S_IRUGO);
73
74static bool __read_mostly enable_unrestricted_guest = 1;
75module_param_named(unrestricted_guest,
76 enable_unrestricted_guest, bool, S_IRUGO);
77
78static bool __read_mostly enable_ept_ad_bits = 1;
79module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
80
81static bool __read_mostly emulate_invalid_guest_state = true;
82module_param(emulate_invalid_guest_state, bool, S_IRUGO);
83
84static bool __read_mostly vmm_exclusive = 1;
85module_param(vmm_exclusive, bool, S_IRUGO);
86
87static bool __read_mostly fasteoi = 1;
88module_param(fasteoi, bool, S_IRUGO);
89
90static bool __read_mostly enable_apicv = 1;
91module_param(enable_apicv, bool, S_IRUGO);
92
93static bool __read_mostly enable_shadow_vmcs = 1;
94module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
95
96
97
98
99
100static bool __read_mostly nested = 0;
101module_param(nested, bool, S_IRUGO);
102
103static u64 __read_mostly host_xss;
104
105static bool __read_mostly enable_pml = 1;
106module_param_named(pml, enable_pml, bool, S_IRUGO);
107
108#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
109#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
110#define KVM_VM_CR0_ALWAYS_ON \
111 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
112#define KVM_CR4_GUEST_OWNED_BITS \
113 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
114 | X86_CR4_OSXMMEXCPT | X86_CR4_TSD)
115
116#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
117#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
118
119#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
120
121#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
122
123
124
125
126
127
128
129
130
131
132
133
134#define KVM_VMX_DEFAULT_PLE_GAP 128
135#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
136#define KVM_VMX_DEFAULT_PLE_WINDOW_GROW 2
137#define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0
138#define KVM_VMX_DEFAULT_PLE_WINDOW_MAX \
139 INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW
140
141static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
142module_param(ple_gap, int, S_IRUGO);
143
144static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
145module_param(ple_window, int, S_IRUGO);
146
147
148static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW;
149module_param(ple_window_grow, int, S_IRUGO);
150
151
152static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK;
153module_param(ple_window_shrink, int, S_IRUGO);
154
155
156static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
157static int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
158module_param(ple_window_max, int, S_IRUGO);
159
160extern const ulong vmx_return;
161
162#define NR_AUTOLOAD_MSRS 8
163#define VMCS02_POOL_SIZE 1
164
165struct vmcs {
166 u32 revision_id;
167 u32 abort;
168 char data[0];
169};
170
171
172
173
174
175
176struct loaded_vmcs {
177 struct vmcs *vmcs;
178 int cpu;
179 int launched;
180 struct list_head loaded_vmcss_on_cpu_link;
181};
182
183struct shared_msr_entry {
184 unsigned index;
185 u64 data;
186 u64 mask;
187};
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202typedef u64 natural_width;
203struct __packed vmcs12 {
204
205
206
207 u32 revision_id;
208 u32 abort;
209
210 u32 launch_state;
211 u32 padding[7];
212
213 u64 io_bitmap_a;
214 u64 io_bitmap_b;
215 u64 msr_bitmap;
216 u64 vm_exit_msr_store_addr;
217 u64 vm_exit_msr_load_addr;
218 u64 vm_entry_msr_load_addr;
219 u64 tsc_offset;
220 u64 virtual_apic_page_addr;
221 u64 apic_access_addr;
222 u64 posted_intr_desc_addr;
223 u64 ept_pointer;
224 u64 eoi_exit_bitmap0;
225 u64 eoi_exit_bitmap1;
226 u64 eoi_exit_bitmap2;
227 u64 eoi_exit_bitmap3;
228 u64 xss_exit_bitmap;
229 u64 guest_physical_address;
230 u64 vmcs_link_pointer;
231 u64 guest_ia32_debugctl;
232 u64 guest_ia32_pat;
233 u64 guest_ia32_efer;
234 u64 guest_ia32_perf_global_ctrl;
235 u64 guest_pdptr0;
236 u64 guest_pdptr1;
237 u64 guest_pdptr2;
238 u64 guest_pdptr3;
239 u64 guest_bndcfgs;
240 u64 host_ia32_pat;
241 u64 host_ia32_efer;
242 u64 host_ia32_perf_global_ctrl;
243 u64 padding64[8];
244
245
246
247
248
249
250 natural_width cr0_guest_host_mask;
251 natural_width cr4_guest_host_mask;
252 natural_width cr0_read_shadow;
253 natural_width cr4_read_shadow;
254 natural_width cr3_target_value0;
255 natural_width cr3_target_value1;
256 natural_width cr3_target_value2;
257 natural_width cr3_target_value3;
258 natural_width exit_qualification;
259 natural_width guest_linear_address;
260 natural_width guest_cr0;
261 natural_width guest_cr3;
262 natural_width guest_cr4;
263 natural_width guest_es_base;
264 natural_width guest_cs_base;
265 natural_width guest_ss_base;
266 natural_width guest_ds_base;
267 natural_width guest_fs_base;
268 natural_width guest_gs_base;
269 natural_width guest_ldtr_base;
270 natural_width guest_tr_base;
271 natural_width guest_gdtr_base;
272 natural_width guest_idtr_base;
273 natural_width guest_dr7;
274 natural_width guest_rsp;
275 natural_width guest_rip;
276 natural_width guest_rflags;
277 natural_width guest_pending_dbg_exceptions;
278 natural_width guest_sysenter_esp;
279 natural_width guest_sysenter_eip;
280 natural_width host_cr0;
281 natural_width host_cr3;
282 natural_width host_cr4;
283 natural_width host_fs_base;
284 natural_width host_gs_base;
285 natural_width host_tr_base;
286 natural_width host_gdtr_base;
287 natural_width host_idtr_base;
288 natural_width host_ia32_sysenter_esp;
289 natural_width host_ia32_sysenter_eip;
290 natural_width host_rsp;
291 natural_width host_rip;
292 natural_width paddingl[8];
293 u32 pin_based_vm_exec_control;
294 u32 cpu_based_vm_exec_control;
295 u32 exception_bitmap;
296 u32 page_fault_error_code_mask;
297 u32 page_fault_error_code_match;
298 u32 cr3_target_count;
299 u32 vm_exit_controls;
300 u32 vm_exit_msr_store_count;
301 u32 vm_exit_msr_load_count;
302 u32 vm_entry_controls;
303 u32 vm_entry_msr_load_count;
304 u32 vm_entry_intr_info_field;
305 u32 vm_entry_exception_error_code;
306 u32 vm_entry_instruction_len;
307 u32 tpr_threshold;
308 u32 secondary_vm_exec_control;
309 u32 vm_instruction_error;
310 u32 vm_exit_reason;
311 u32 vm_exit_intr_info;
312 u32 vm_exit_intr_error_code;
313 u32 idt_vectoring_info_field;
314 u32 idt_vectoring_error_code;
315 u32 vm_exit_instruction_len;
316 u32 vmx_instruction_info;
317 u32 guest_es_limit;
318 u32 guest_cs_limit;
319 u32 guest_ss_limit;
320 u32 guest_ds_limit;
321 u32 guest_fs_limit;
322 u32 guest_gs_limit;
323 u32 guest_ldtr_limit;
324 u32 guest_tr_limit;
325 u32 guest_gdtr_limit;
326 u32 guest_idtr_limit;
327 u32 guest_es_ar_bytes;
328 u32 guest_cs_ar_bytes;
329 u32 guest_ss_ar_bytes;
330 u32 guest_ds_ar_bytes;
331 u32 guest_fs_ar_bytes;
332 u32 guest_gs_ar_bytes;
333 u32 guest_ldtr_ar_bytes;
334 u32 guest_tr_ar_bytes;
335 u32 guest_interruptibility_info;
336 u32 guest_activity_state;
337 u32 guest_sysenter_cs;
338 u32 host_ia32_sysenter_cs;
339 u32 vmx_preemption_timer_value;
340 u32 padding32[7];
341 u16 virtual_processor_id;
342 u16 posted_intr_nv;
343 u16 guest_es_selector;
344 u16 guest_cs_selector;
345 u16 guest_ss_selector;
346 u16 guest_ds_selector;
347 u16 guest_fs_selector;
348 u16 guest_gs_selector;
349 u16 guest_ldtr_selector;
350 u16 guest_tr_selector;
351 u16 guest_intr_status;
352 u16 host_es_selector;
353 u16 host_cs_selector;
354 u16 host_ss_selector;
355 u16 host_ds_selector;
356 u16 host_fs_selector;
357 u16 host_gs_selector;
358 u16 host_tr_selector;
359};
360
361
362
363
364
365
366#define VMCS12_REVISION 0x11e57ed0
367
368
369
370
371
372
373#define VMCS12_SIZE 0x1000
374
375
376struct vmcs02_list {
377 struct list_head list;
378 gpa_t vmptr;
379 struct loaded_vmcs vmcs02;
380};
381
382
383
384
385
386struct nested_vmx {
387
388 bool vmxon;
389 gpa_t vmxon_ptr;
390
391
392 gpa_t current_vmptr;
393
394 struct page *current_vmcs12_page;
395 struct vmcs12 *current_vmcs12;
396 struct vmcs *current_shadow_vmcs;
397
398
399
400
401 bool sync_shadow_vmcs;
402
403
404 struct list_head vmcs02_pool;
405 int vmcs02_num;
406 u64 vmcs01_tsc_offset;
407
408 bool nested_run_pending;
409
410
411
412
413 struct page *apic_access_page;
414 struct page *virtual_apic_page;
415 struct page *pi_desc_page;
416 struct pi_desc *pi_desc;
417 bool pi_pending;
418 u16 posted_intr_nv;
419 u64 msr_ia32_feature_control;
420
421 struct hrtimer preemption_timer;
422 bool preemption_timer_expired;
423
424
425 u64 vmcs01_debugctl;
426
427 u32 nested_vmx_procbased_ctls_low;
428 u32 nested_vmx_procbased_ctls_high;
429 u32 nested_vmx_true_procbased_ctls_low;
430 u32 nested_vmx_secondary_ctls_low;
431 u32 nested_vmx_secondary_ctls_high;
432 u32 nested_vmx_pinbased_ctls_low;
433 u32 nested_vmx_pinbased_ctls_high;
434 u32 nested_vmx_exit_ctls_low;
435 u32 nested_vmx_exit_ctls_high;
436 u32 nested_vmx_true_exit_ctls_low;
437 u32 nested_vmx_entry_ctls_low;
438 u32 nested_vmx_entry_ctls_high;
439 u32 nested_vmx_true_entry_ctls_low;
440 u32 nested_vmx_misc_low;
441 u32 nested_vmx_misc_high;
442 u32 nested_vmx_ept_caps;
443};
444
445#define POSTED_INTR_ON 0
446
447struct pi_desc {
448 u32 pir[8];
449 u32 control;
450 u32 rsvd[7];
451} __aligned(64);
452
453static bool pi_test_and_set_on(struct pi_desc *pi_desc)
454{
455 return test_and_set_bit(POSTED_INTR_ON,
456 (unsigned long *)&pi_desc->control);
457}
458
459static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
460{
461 return test_and_clear_bit(POSTED_INTR_ON,
462 (unsigned long *)&pi_desc->control);
463}
464
465static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
466{
467 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
468}
469
470struct vcpu_vmx {
471 struct kvm_vcpu vcpu;
472 unsigned long host_rsp;
473 u8 fail;
474 bool nmi_known_unmasked;
475 u32 exit_intr_info;
476 u32 idt_vectoring_info;
477 ulong rflags;
478 struct shared_msr_entry *guest_msrs;
479 int nmsrs;
480 int save_nmsrs;
481 unsigned long host_idt_base;
482#ifdef CONFIG_X86_64
483 u64 msr_host_kernel_gs_base;
484 u64 msr_guest_kernel_gs_base;
485#endif
486 u32 vm_entry_controls_shadow;
487 u32 vm_exit_controls_shadow;
488
489
490
491
492
493 struct loaded_vmcs vmcs01;
494 struct loaded_vmcs *loaded_vmcs;
495 bool __launched;
496 struct msr_autoload {
497 unsigned nr;
498 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
499 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
500 } msr_autoload;
501 struct {
502 int loaded;
503 u16 fs_sel, gs_sel, ldt_sel;
504#ifdef CONFIG_X86_64
505 u16 ds_sel, es_sel;
506#endif
507 int gs_ldt_reload_needed;
508 int fs_reload_needed;
509 u64 msr_host_bndcfgs;
510 unsigned long vmcs_host_cr4;
511 } host_state;
512 struct {
513 int vm86_active;
514 ulong save_rflags;
515 struct kvm_segment segs[8];
516 } rmode;
517 struct {
518 u32 bitmask;
519 struct kvm_save_segment {
520 u16 selector;
521 unsigned long base;
522 u32 limit;
523 u32 ar;
524 } seg[8];
525 } segment_cache;
526 int vpid;
527 bool emulation_required;
528
529
530 int soft_vnmi_blocked;
531 ktime_t entry_time;
532 s64 vnmi_blocked_time;
533 u32 exit_reason;
534
535 bool rdtscp_enabled;
536
537
538 struct pi_desc pi_desc;
539
540
541 struct nested_vmx nested;
542
543
544 int ple_window;
545 bool ple_window_dirty;
546
547
548#define PML_ENTITY_NUM 512
549 struct page *pml_pg;
550};
551
552enum segment_cache_field {
553 SEG_FIELD_SEL = 0,
554 SEG_FIELD_BASE = 1,
555 SEG_FIELD_LIMIT = 2,
556 SEG_FIELD_AR = 3,
557
558 SEG_FIELD_NR = 4
559};
560
561static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
562{
563 return container_of(vcpu, struct vcpu_vmx, vcpu);
564}
565
566#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
567#define FIELD(number, name) [number] = VMCS12_OFFSET(name)
568#define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \
569 [number##_HIGH] = VMCS12_OFFSET(name)+4
570
571
572static unsigned long shadow_read_only_fields[] = {
573
574
575
576
577
578
579
580
581
582
583
584
585 VM_EXIT_REASON,
586 VM_EXIT_INTR_INFO,
587 VM_EXIT_INSTRUCTION_LEN,
588 IDT_VECTORING_INFO_FIELD,
589 IDT_VECTORING_ERROR_CODE,
590 VM_EXIT_INTR_ERROR_CODE,
591 EXIT_QUALIFICATION,
592 GUEST_LINEAR_ADDRESS,
593 GUEST_PHYSICAL_ADDRESS
594};
595static int max_shadow_read_only_fields =
596 ARRAY_SIZE(shadow_read_only_fields);
597
598static unsigned long shadow_read_write_fields[] = {
599 TPR_THRESHOLD,
600 GUEST_RIP,
601 GUEST_RSP,
602 GUEST_CR0,
603 GUEST_CR3,
604 GUEST_CR4,
605 GUEST_INTERRUPTIBILITY_INFO,
606 GUEST_RFLAGS,
607 GUEST_CS_SELECTOR,
608 GUEST_CS_AR_BYTES,
609 GUEST_CS_LIMIT,
610 GUEST_CS_BASE,
611 GUEST_ES_BASE,
612 GUEST_BNDCFGS,
613 CR0_GUEST_HOST_MASK,
614 CR0_READ_SHADOW,
615 CR4_READ_SHADOW,
616 TSC_OFFSET,
617 EXCEPTION_BITMAP,
618 CPU_BASED_VM_EXEC_CONTROL,
619 VM_ENTRY_EXCEPTION_ERROR_CODE,
620 VM_ENTRY_INTR_INFO_FIELD,
621 VM_ENTRY_INSTRUCTION_LEN,
622 VM_ENTRY_EXCEPTION_ERROR_CODE,
623 HOST_FS_BASE,
624 HOST_GS_BASE,
625 HOST_FS_SELECTOR,
626 HOST_GS_SELECTOR
627};
628static int max_shadow_read_write_fields =
629 ARRAY_SIZE(shadow_read_write_fields);
630
631static const unsigned short vmcs_field_to_offset_table[] = {
632 FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
633 FIELD(POSTED_INTR_NV, posted_intr_nv),
634 FIELD(GUEST_ES_SELECTOR, guest_es_selector),
635 FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
636 FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
637 FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
638 FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
639 FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
640 FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
641 FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
642 FIELD(GUEST_INTR_STATUS, guest_intr_status),
643 FIELD(HOST_ES_SELECTOR, host_es_selector),
644 FIELD(HOST_CS_SELECTOR, host_cs_selector),
645 FIELD(HOST_SS_SELECTOR, host_ss_selector),
646 FIELD(HOST_DS_SELECTOR, host_ds_selector),
647 FIELD(HOST_FS_SELECTOR, host_fs_selector),
648 FIELD(HOST_GS_SELECTOR, host_gs_selector),
649 FIELD(HOST_TR_SELECTOR, host_tr_selector),
650 FIELD64(IO_BITMAP_A, io_bitmap_a),
651 FIELD64(IO_BITMAP_B, io_bitmap_b),
652 FIELD64(MSR_BITMAP, msr_bitmap),
653 FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
654 FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
655 FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
656 FIELD64(TSC_OFFSET, tsc_offset),
657 FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
658 FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
659 FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
660 FIELD64(EPT_POINTER, ept_pointer),
661 FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
662 FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
663 FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
664 FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
665 FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
666 FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
667 FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
668 FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
669 FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
670 FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
671 FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
672 FIELD64(GUEST_PDPTR0, guest_pdptr0),
673 FIELD64(GUEST_PDPTR1, guest_pdptr1),
674 FIELD64(GUEST_PDPTR2, guest_pdptr2),
675 FIELD64(GUEST_PDPTR3, guest_pdptr3),
676 FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
677 FIELD64(HOST_IA32_PAT, host_ia32_pat),
678 FIELD64(HOST_IA32_EFER, host_ia32_efer),
679 FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
680 FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
681 FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
682 FIELD(EXCEPTION_BITMAP, exception_bitmap),
683 FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
684 FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
685 FIELD(CR3_TARGET_COUNT, cr3_target_count),
686 FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
687 FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
688 FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
689 FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
690 FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
691 FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
692 FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
693 FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
694 FIELD(TPR_THRESHOLD, tpr_threshold),
695 FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
696 FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
697 FIELD(VM_EXIT_REASON, vm_exit_reason),
698 FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
699 FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
700 FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
701 FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
702 FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
703 FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
704 FIELD(GUEST_ES_LIMIT, guest_es_limit),
705 FIELD(GUEST_CS_LIMIT, guest_cs_limit),
706 FIELD(GUEST_SS_LIMIT, guest_ss_limit),
707 FIELD(GUEST_DS_LIMIT, guest_ds_limit),
708 FIELD(GUEST_FS_LIMIT, guest_fs_limit),
709 FIELD(GUEST_GS_LIMIT, guest_gs_limit),
710 FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
711 FIELD(GUEST_TR_LIMIT, guest_tr_limit),
712 FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
713 FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
714 FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
715 FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
716 FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
717 FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
718 FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
719 FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
720 FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
721 FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
722 FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
723 FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
724 FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
725 FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
726 FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
727 FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
728 FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
729 FIELD(CR0_READ_SHADOW, cr0_read_shadow),
730 FIELD(CR4_READ_SHADOW, cr4_read_shadow),
731 FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
732 FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
733 FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
734 FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
735 FIELD(EXIT_QUALIFICATION, exit_qualification),
736 FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
737 FIELD(GUEST_CR0, guest_cr0),
738 FIELD(GUEST_CR3, guest_cr3),
739 FIELD(GUEST_CR4, guest_cr4),
740 FIELD(GUEST_ES_BASE, guest_es_base),
741 FIELD(GUEST_CS_BASE, guest_cs_base),
742 FIELD(GUEST_SS_BASE, guest_ss_base),
743 FIELD(GUEST_DS_BASE, guest_ds_base),
744 FIELD(GUEST_FS_BASE, guest_fs_base),
745 FIELD(GUEST_GS_BASE, guest_gs_base),
746 FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
747 FIELD(GUEST_TR_BASE, guest_tr_base),
748 FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
749 FIELD(GUEST_IDTR_BASE, guest_idtr_base),
750 FIELD(GUEST_DR7, guest_dr7),
751 FIELD(GUEST_RSP, guest_rsp),
752 FIELD(GUEST_RIP, guest_rip),
753 FIELD(GUEST_RFLAGS, guest_rflags),
754 FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
755 FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
756 FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
757 FIELD(HOST_CR0, host_cr0),
758 FIELD(HOST_CR3, host_cr3),
759 FIELD(HOST_CR4, host_cr4),
760 FIELD(HOST_FS_BASE, host_fs_base),
761 FIELD(HOST_GS_BASE, host_gs_base),
762 FIELD(HOST_TR_BASE, host_tr_base),
763 FIELD(HOST_GDTR_BASE, host_gdtr_base),
764 FIELD(HOST_IDTR_BASE, host_idtr_base),
765 FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
766 FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
767 FIELD(HOST_RSP, host_rsp),
768 FIELD(HOST_RIP, host_rip),
769};
770
771static inline short vmcs_field_to_offset(unsigned long field)
772{
773 BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
774
775 if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) ||
776 vmcs_field_to_offset_table[field] == 0)
777 return -ENOENT;
778
779 return vmcs_field_to_offset_table[field];
780}
781
782static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
783{
784 return to_vmx(vcpu)->nested.current_vmcs12;
785}
786
787static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
788{
789 struct page *page = kvm_vcpu_gfn_to_page(vcpu, addr >> PAGE_SHIFT);
790 if (is_error_page(page))
791 return NULL;
792
793 return page;
794}
795
796static void nested_release_page(struct page *page)
797{
798 kvm_release_page_dirty(page);
799}
800
801static void nested_release_page_clean(struct page *page)
802{
803 kvm_release_page_clean(page);
804}
805
806static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
807static u64 construct_eptp(unsigned long root_hpa);
808static void kvm_cpu_vmxon(u64 addr);
809static void kvm_cpu_vmxoff(void);
810static bool vmx_mpx_supported(void);
811static bool vmx_xsaves_supported(void);
812static int vmx_vm_has_apicv(struct kvm *kvm);
813static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
814static void vmx_set_segment(struct kvm_vcpu *vcpu,
815 struct kvm_segment *var, int seg);
816static void vmx_get_segment(struct kvm_vcpu *vcpu,
817 struct kvm_segment *var, int seg);
818static bool guest_state_valid(struct kvm_vcpu *vcpu);
819static u32 vmx_segment_access_rights(struct kvm_segment *var);
820static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
821static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
822static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
823static int alloc_identity_pagetable(struct kvm *kvm);
824
825static DEFINE_PER_CPU(struct vmcs *, vmxarea);
826static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
827
828
829
830
831static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
832static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
833
834static unsigned long *vmx_io_bitmap_a;
835static unsigned long *vmx_io_bitmap_b;
836static unsigned long *vmx_msr_bitmap_legacy;
837static unsigned long *vmx_msr_bitmap_longmode;
838static unsigned long *vmx_msr_bitmap_legacy_x2apic;
839static unsigned long *vmx_msr_bitmap_longmode_x2apic;
840static unsigned long *vmx_msr_bitmap_nested;
841static unsigned long *vmx_vmread_bitmap;
842static unsigned long *vmx_vmwrite_bitmap;
843
844static bool cpu_has_load_ia32_efer;
845static bool cpu_has_load_perf_global_ctrl;
846
847static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
848static DEFINE_SPINLOCK(vmx_vpid_lock);
849
850static struct vmcs_config {
851 int size;
852 int order;
853 u32 revision_id;
854 u32 pin_based_exec_ctrl;
855 u32 cpu_based_exec_ctrl;
856 u32 cpu_based_2nd_exec_ctrl;
857 u32 vmexit_ctrl;
858 u32 vmentry_ctrl;
859} vmcs_config;
860
861static struct vmx_capability {
862 u32 ept;
863 u32 vpid;
864} vmx_capability;
865
866#define VMX_SEGMENT_FIELD(seg) \
867 [VCPU_SREG_##seg] = { \
868 .selector = GUEST_##seg##_SELECTOR, \
869 .base = GUEST_##seg##_BASE, \
870 .limit = GUEST_##seg##_LIMIT, \
871 .ar_bytes = GUEST_##seg##_AR_BYTES, \
872 }
873
874static const struct kvm_vmx_segment_field {
875 unsigned selector;
876 unsigned base;
877 unsigned limit;
878 unsigned ar_bytes;
879} kvm_vmx_segment_fields[] = {
880 VMX_SEGMENT_FIELD(CS),
881 VMX_SEGMENT_FIELD(DS),
882 VMX_SEGMENT_FIELD(ES),
883 VMX_SEGMENT_FIELD(FS),
884 VMX_SEGMENT_FIELD(GS),
885 VMX_SEGMENT_FIELD(SS),
886 VMX_SEGMENT_FIELD(TR),
887 VMX_SEGMENT_FIELD(LDTR),
888};
889
890static u64 host_efer;
891
892static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
893
894
895
896
897
898static const u32 vmx_msr_index[] = {
899#ifdef CONFIG_X86_64
900 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
901#endif
902 MSR_EFER, MSR_TSC_AUX, MSR_STAR,
903};
904
905static inline bool is_page_fault(u32 intr_info)
906{
907 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
908 INTR_INFO_VALID_MASK)) ==
909 (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
910}
911
912static inline bool is_no_device(u32 intr_info)
913{
914 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
915 INTR_INFO_VALID_MASK)) ==
916 (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
917}
918
919static inline bool is_invalid_opcode(u32 intr_info)
920{
921 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
922 INTR_INFO_VALID_MASK)) ==
923 (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
924}
925
926static inline bool is_external_interrupt(u32 intr_info)
927{
928 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
929 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
930}
931
932static inline bool is_machine_check(u32 intr_info)
933{
934 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
935 INTR_INFO_VALID_MASK)) ==
936 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
937}
938
939static inline bool cpu_has_vmx_msr_bitmap(void)
940{
941 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
942}
943
944static inline bool cpu_has_vmx_tpr_shadow(void)
945{
946 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
947}
948
949static inline bool vm_need_tpr_shadow(struct kvm *kvm)
950{
951 return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
952}
953
954static inline bool cpu_has_secondary_exec_ctrls(void)
955{
956 return vmcs_config.cpu_based_exec_ctrl &
957 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
958}
959
960static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
961{
962 return vmcs_config.cpu_based_2nd_exec_ctrl &
963 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
964}
965
966static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
967{
968 return vmcs_config.cpu_based_2nd_exec_ctrl &
969 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
970}
971
972static inline bool cpu_has_vmx_apic_register_virt(void)
973{
974 return vmcs_config.cpu_based_2nd_exec_ctrl &
975 SECONDARY_EXEC_APIC_REGISTER_VIRT;
976}
977
978static inline bool cpu_has_vmx_virtual_intr_delivery(void)
979{
980 return vmcs_config.cpu_based_2nd_exec_ctrl &
981 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
982}
983
984static inline bool cpu_has_vmx_posted_intr(void)
985{
986 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
987}
988
989static inline bool cpu_has_vmx_apicv(void)
990{
991 return cpu_has_vmx_apic_register_virt() &&
992 cpu_has_vmx_virtual_intr_delivery() &&
993 cpu_has_vmx_posted_intr();
994}
995
996static inline bool cpu_has_vmx_flexpriority(void)
997{
998 return cpu_has_vmx_tpr_shadow() &&
999 cpu_has_vmx_virtualize_apic_accesses();
1000}
1001
1002static inline bool cpu_has_vmx_ept_execute_only(void)
1003{
1004 return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
1005}
1006
1007static inline bool cpu_has_vmx_ept_2m_page(void)
1008{
1009 return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
1010}
1011
1012static inline bool cpu_has_vmx_ept_1g_page(void)
1013{
1014 return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
1015}
1016
1017static inline bool cpu_has_vmx_ept_4levels(void)
1018{
1019 return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
1020}
1021
1022static inline bool cpu_has_vmx_ept_ad_bits(void)
1023{
1024 return vmx_capability.ept & VMX_EPT_AD_BIT;
1025}
1026
1027static inline bool cpu_has_vmx_invept_context(void)
1028{
1029 return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
1030}
1031
1032static inline bool cpu_has_vmx_invept_global(void)
1033{
1034 return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
1035}
1036
1037static inline bool cpu_has_vmx_invvpid_single(void)
1038{
1039 return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
1040}
1041
1042static inline bool cpu_has_vmx_invvpid_global(void)
1043{
1044 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1045}
1046
1047static inline bool cpu_has_vmx_ept(void)
1048{
1049 return vmcs_config.cpu_based_2nd_exec_ctrl &
1050 SECONDARY_EXEC_ENABLE_EPT;
1051}
1052
1053static inline bool cpu_has_vmx_unrestricted_guest(void)
1054{
1055 return vmcs_config.cpu_based_2nd_exec_ctrl &
1056 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1057}
1058
1059static inline bool cpu_has_vmx_ple(void)
1060{
1061 return vmcs_config.cpu_based_2nd_exec_ctrl &
1062 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1063}
1064
1065static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
1066{
1067 return flexpriority_enabled && irqchip_in_kernel(kvm);
1068}
1069
1070static inline bool cpu_has_vmx_vpid(void)
1071{
1072 return vmcs_config.cpu_based_2nd_exec_ctrl &
1073 SECONDARY_EXEC_ENABLE_VPID;
1074}
1075
1076static inline bool cpu_has_vmx_rdtscp(void)
1077{
1078 return vmcs_config.cpu_based_2nd_exec_ctrl &
1079 SECONDARY_EXEC_RDTSCP;
1080}
1081
1082static inline bool cpu_has_vmx_invpcid(void)
1083{
1084 return vmcs_config.cpu_based_2nd_exec_ctrl &
1085 SECONDARY_EXEC_ENABLE_INVPCID;
1086}
1087
1088static inline bool cpu_has_virtual_nmis(void)
1089{
1090 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1091}
1092
1093static inline bool cpu_has_vmx_wbinvd_exit(void)
1094{
1095 return vmcs_config.cpu_based_2nd_exec_ctrl &
1096 SECONDARY_EXEC_WBINVD_EXITING;
1097}
1098
1099static inline bool cpu_has_vmx_shadow_vmcs(void)
1100{
1101 u64 vmx_msr;
1102 rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1103
1104 if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1105 return false;
1106
1107 return vmcs_config.cpu_based_2nd_exec_ctrl &
1108 SECONDARY_EXEC_SHADOW_VMCS;
1109}
1110
1111static inline bool cpu_has_vmx_pml(void)
1112{
1113 return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
1114}
1115
1116static inline bool report_flexpriority(void)
1117{
1118 return flexpriority_enabled;
1119}
1120
1121static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1122{
1123 return vmcs12->cpu_based_vm_exec_control & bit;
1124}
1125
1126static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1127{
1128 return (vmcs12->cpu_based_vm_exec_control &
1129 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1130 (vmcs12->secondary_vm_exec_control & bit);
1131}
1132
1133static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
1134{
1135 return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1136}
1137
1138static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1139{
1140 return vmcs12->pin_based_vm_exec_control &
1141 PIN_BASED_VMX_PREEMPTION_TIMER;
1142}
1143
1144static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1145{
1146 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1147}
1148
1149static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
1150{
1151 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES) &&
1152 vmx_xsaves_supported();
1153}
1154
1155static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
1156{
1157 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
1158}
1159
1160static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
1161{
1162 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
1163}
1164
1165static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
1166{
1167 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
1168}
1169
1170static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
1171{
1172 return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
1173}
1174
1175static inline bool is_exception(u32 intr_info)
1176{
1177 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1178 == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
1179}
1180
1181static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1182 u32 exit_intr_info,
1183 unsigned long exit_qualification);
1184static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1185 struct vmcs12 *vmcs12,
1186 u32 reason, unsigned long qualification);
1187
1188static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
1189{
1190 int i;
1191
1192 for (i = 0; i < vmx->nmsrs; ++i)
1193 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
1194 return i;
1195 return -1;
1196}
1197
1198static inline void __invvpid(int ext, u16 vpid, gva_t gva)
1199{
1200 struct {
1201 u64 vpid : 16;
1202 u64 rsvd : 48;
1203 u64 gva;
1204 } operand = { vpid, 0, gva };
1205
1206 asm volatile (__ex(ASM_VMX_INVVPID)
1207
1208 "; ja 1f ; ud2 ; 1:"
1209 : : "a"(&operand), "c"(ext) : "cc", "memory");
1210}
1211
1212static inline void __invept(int ext, u64 eptp, gpa_t gpa)
1213{
1214 struct {
1215 u64 eptp, gpa;
1216 } operand = {eptp, gpa};
1217
1218 asm volatile (__ex(ASM_VMX_INVEPT)
1219
1220 "; ja 1f ; ud2 ; 1:\n"
1221 : : "a" (&operand), "c" (ext) : "cc", "memory");
1222}
1223
1224static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
1225{
1226 int i;
1227
1228 i = __find_msr_index(vmx, msr);
1229 if (i >= 0)
1230 return &vmx->guest_msrs[i];
1231 return NULL;
1232}
1233
1234static void vmcs_clear(struct vmcs *vmcs)
1235{
1236 u64 phys_addr = __pa(vmcs);
1237 u8 error;
1238
1239 asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
1240 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1241 : "cc", "memory");
1242 if (error)
1243 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
1244 vmcs, phys_addr);
1245}
1246
1247static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1248{
1249 vmcs_clear(loaded_vmcs->vmcs);
1250 loaded_vmcs->cpu = -1;
1251 loaded_vmcs->launched = 0;
1252}
1253
1254static void vmcs_load(struct vmcs *vmcs)
1255{
1256 u64 phys_addr = __pa(vmcs);
1257 u8 error;
1258
1259 asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
1260 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1261 : "cc", "memory");
1262 if (error)
1263 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
1264 vmcs, phys_addr);
1265}
1266
1267#ifdef CONFIG_KEXEC_CORE
1268
1269
1270
1271
1272
1273static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
1274
1275static inline void crash_enable_local_vmclear(int cpu)
1276{
1277 cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
1278}
1279
1280static inline void crash_disable_local_vmclear(int cpu)
1281{
1282 cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
1283}
1284
1285static inline int crash_local_vmclear_enabled(int cpu)
1286{
1287 return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
1288}
1289
1290static void crash_vmclear_local_loaded_vmcss(void)
1291{
1292 int cpu = raw_smp_processor_id();
1293 struct loaded_vmcs *v;
1294
1295 if (!crash_local_vmclear_enabled(cpu))
1296 return;
1297
1298 list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
1299 loaded_vmcss_on_cpu_link)
1300 vmcs_clear(v->vmcs);
1301}
1302#else
1303static inline void crash_enable_local_vmclear(int cpu) { }
1304static inline void crash_disable_local_vmclear(int cpu) { }
1305#endif
1306
1307static void __loaded_vmcs_clear(void *arg)
1308{
1309 struct loaded_vmcs *loaded_vmcs = arg;
1310 int cpu = raw_smp_processor_id();
1311
1312 if (loaded_vmcs->cpu != cpu)
1313 return;
1314 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
1315 per_cpu(current_vmcs, cpu) = NULL;
1316 crash_disable_local_vmclear(cpu);
1317 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
1318
1319
1320
1321
1322
1323
1324
1325 smp_wmb();
1326
1327 loaded_vmcs_init(loaded_vmcs);
1328 crash_enable_local_vmclear(cpu);
1329}
1330
1331static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
1332{
1333 int cpu = loaded_vmcs->cpu;
1334
1335 if (cpu != -1)
1336 smp_call_function_single(cpu,
1337 __loaded_vmcs_clear, loaded_vmcs, 1);
1338}
1339
1340static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx)
1341{
1342 if (vmx->vpid == 0)
1343 return;
1344
1345 if (cpu_has_vmx_invvpid_single())
1346 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
1347}
1348
1349static inline void vpid_sync_vcpu_global(void)
1350{
1351 if (cpu_has_vmx_invvpid_global())
1352 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
1353}
1354
1355static inline void vpid_sync_context(struct vcpu_vmx *vmx)
1356{
1357 if (cpu_has_vmx_invvpid_single())
1358 vpid_sync_vcpu_single(vmx);
1359 else
1360 vpid_sync_vcpu_global();
1361}
1362
1363static inline void ept_sync_global(void)
1364{
1365 if (cpu_has_vmx_invept_global())
1366 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
1367}
1368
1369static inline void ept_sync_context(u64 eptp)
1370{
1371 if (enable_ept) {
1372 if (cpu_has_vmx_invept_context())
1373 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
1374 else
1375 ept_sync_global();
1376 }
1377}
1378
1379static __always_inline unsigned long vmcs_readl(unsigned long field)
1380{
1381 unsigned long value;
1382
1383 asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
1384 : "=a"(value) : "d"(field) : "cc");
1385 return value;
1386}
1387
1388static __always_inline u16 vmcs_read16(unsigned long field)
1389{
1390 return vmcs_readl(field);
1391}
1392
1393static __always_inline u32 vmcs_read32(unsigned long field)
1394{
1395 return vmcs_readl(field);
1396}
1397
1398static __always_inline u64 vmcs_read64(unsigned long field)
1399{
1400#ifdef CONFIG_X86_64
1401 return vmcs_readl(field);
1402#else
1403 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
1404#endif
1405}
1406
1407static noinline void vmwrite_error(unsigned long field, unsigned long value)
1408{
1409 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
1410 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
1411 dump_stack();
1412}
1413
1414static void vmcs_writel(unsigned long field, unsigned long value)
1415{
1416 u8 error;
1417
1418 asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
1419 : "=q"(error) : "a"(value), "d"(field) : "cc");
1420 if (unlikely(error))
1421 vmwrite_error(field, value);
1422}
1423
1424static void vmcs_write16(unsigned long field, u16 value)
1425{
1426 vmcs_writel(field, value);
1427}
1428
1429static void vmcs_write32(unsigned long field, u32 value)
1430{
1431 vmcs_writel(field, value);
1432}
1433
1434static void vmcs_write64(unsigned long field, u64 value)
1435{
1436 vmcs_writel(field, value);
1437#ifndef CONFIG_X86_64
1438 asm volatile ("");
1439 vmcs_writel(field+1, value >> 32);
1440#endif
1441}
1442
1443static void vmcs_clear_bits(unsigned long field, u32 mask)
1444{
1445 vmcs_writel(field, vmcs_readl(field) & ~mask);
1446}
1447
1448static void vmcs_set_bits(unsigned long field, u32 mask)
1449{
1450 vmcs_writel(field, vmcs_readl(field) | mask);
1451}
1452
1453static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
1454{
1455 vmcs_write32(VM_ENTRY_CONTROLS, val);
1456 vmx->vm_entry_controls_shadow = val;
1457}
1458
1459static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
1460{
1461 if (vmx->vm_entry_controls_shadow != val)
1462 vm_entry_controls_init(vmx, val);
1463}
1464
1465static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
1466{
1467 return vmx->vm_entry_controls_shadow;
1468}
1469
1470
1471static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1472{
1473 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
1474}
1475
1476static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1477{
1478 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
1479}
1480
1481static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
1482{
1483 vmcs_write32(VM_EXIT_CONTROLS, val);
1484 vmx->vm_exit_controls_shadow = val;
1485}
1486
1487static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
1488{
1489 if (vmx->vm_exit_controls_shadow != val)
1490 vm_exit_controls_init(vmx, val);
1491}
1492
1493static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
1494{
1495 return vmx->vm_exit_controls_shadow;
1496}
1497
1498
1499static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1500{
1501 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
1502}
1503
1504static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1505{
1506 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
1507}
1508
1509static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
1510{
1511 vmx->segment_cache.bitmask = 0;
1512}
1513
1514static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
1515 unsigned field)
1516{
1517 bool ret;
1518 u32 mask = 1 << (seg * SEG_FIELD_NR + field);
1519
1520 if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
1521 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
1522 vmx->segment_cache.bitmask = 0;
1523 }
1524 ret = vmx->segment_cache.bitmask & mask;
1525 vmx->segment_cache.bitmask |= mask;
1526 return ret;
1527}
1528
1529static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
1530{
1531 u16 *p = &vmx->segment_cache.seg[seg].selector;
1532
1533 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
1534 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
1535 return *p;
1536}
1537
1538static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
1539{
1540 ulong *p = &vmx->segment_cache.seg[seg].base;
1541
1542 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
1543 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
1544 return *p;
1545}
1546
1547static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
1548{
1549 u32 *p = &vmx->segment_cache.seg[seg].limit;
1550
1551 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
1552 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
1553 return *p;
1554}
1555
1556static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
1557{
1558 u32 *p = &vmx->segment_cache.seg[seg].ar;
1559
1560 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
1561 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
1562 return *p;
1563}
1564
1565static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1566{
1567 u32 eb;
1568
1569 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1570 (1u << NM_VECTOR) | (1u << DB_VECTOR);
1571 if ((vcpu->guest_debug &
1572 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
1573 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
1574 eb |= 1u << BP_VECTOR;
1575 if (to_vmx(vcpu)->rmode.vm86_active)
1576 eb = ~0;
1577 if (enable_ept)
1578 eb &= ~(1u << PF_VECTOR);
1579 if (vcpu->fpu_active)
1580 eb &= ~(1u << NM_VECTOR);
1581
1582
1583
1584
1585
1586
1587 if (is_guest_mode(vcpu))
1588 eb |= get_vmcs12(vcpu)->exception_bitmap;
1589
1590 vmcs_write32(EXCEPTION_BITMAP, eb);
1591}
1592
1593static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1594 unsigned long entry, unsigned long exit)
1595{
1596 vm_entry_controls_clearbit(vmx, entry);
1597 vm_exit_controls_clearbit(vmx, exit);
1598}
1599
1600static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1601{
1602 unsigned i;
1603 struct msr_autoload *m = &vmx->msr_autoload;
1604
1605 switch (msr) {
1606 case MSR_EFER:
1607 if (cpu_has_load_ia32_efer) {
1608 clear_atomic_switch_msr_special(vmx,
1609 VM_ENTRY_LOAD_IA32_EFER,
1610 VM_EXIT_LOAD_IA32_EFER);
1611 return;
1612 }
1613 break;
1614 case MSR_CORE_PERF_GLOBAL_CTRL:
1615 if (cpu_has_load_perf_global_ctrl) {
1616 clear_atomic_switch_msr_special(vmx,
1617 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1618 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1619 return;
1620 }
1621 break;
1622 }
1623
1624 for (i = 0; i < m->nr; ++i)
1625 if (m->guest[i].index == msr)
1626 break;
1627
1628 if (i == m->nr)
1629 return;
1630 --m->nr;
1631 m->guest[i] = m->guest[m->nr];
1632 m->host[i] = m->host[m->nr];
1633 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1634 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1635}
1636
1637static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1638 unsigned long entry, unsigned long exit,
1639 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
1640 u64 guest_val, u64 host_val)
1641{
1642 vmcs_write64(guest_val_vmcs, guest_val);
1643 vmcs_write64(host_val_vmcs, host_val);
1644 vm_entry_controls_setbit(vmx, entry);
1645 vm_exit_controls_setbit(vmx, exit);
1646}
1647
1648static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1649 u64 guest_val, u64 host_val)
1650{
1651 unsigned i;
1652 struct msr_autoload *m = &vmx->msr_autoload;
1653
1654 switch (msr) {
1655 case MSR_EFER:
1656 if (cpu_has_load_ia32_efer) {
1657 add_atomic_switch_msr_special(vmx,
1658 VM_ENTRY_LOAD_IA32_EFER,
1659 VM_EXIT_LOAD_IA32_EFER,
1660 GUEST_IA32_EFER,
1661 HOST_IA32_EFER,
1662 guest_val, host_val);
1663 return;
1664 }
1665 break;
1666 case MSR_CORE_PERF_GLOBAL_CTRL:
1667 if (cpu_has_load_perf_global_ctrl) {
1668 add_atomic_switch_msr_special(vmx,
1669 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1670 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1671 GUEST_IA32_PERF_GLOBAL_CTRL,
1672 HOST_IA32_PERF_GLOBAL_CTRL,
1673 guest_val, host_val);
1674 return;
1675 }
1676 break;
1677 }
1678
1679 for (i = 0; i < m->nr; ++i)
1680 if (m->guest[i].index == msr)
1681 break;
1682
1683 if (i == NR_AUTOLOAD_MSRS) {
1684 printk_once(KERN_WARNING "Not enough msr switch entries. "
1685 "Can't add msr %x\n", msr);
1686 return;
1687 } else if (i == m->nr) {
1688 ++m->nr;
1689 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1690 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1691 }
1692
1693 m->guest[i].index = msr;
1694 m->guest[i].value = guest_val;
1695 m->host[i].index = msr;
1696 m->host[i].value = host_val;
1697}
1698
1699static void reload_tss(void)
1700{
1701
1702
1703
1704 struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
1705 struct desc_struct *descs;
1706
1707 descs = (void *)gdt->address;
1708 descs[GDT_ENTRY_TSS].type = 9;
1709 load_TR_desc();
1710}
1711
1712static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
1713{
1714 u64 guest_efer;
1715 u64 ignore_bits;
1716
1717 guest_efer = vmx->vcpu.arch.efer;
1718
1719
1720
1721
1722
1723 ignore_bits = EFER_NX | EFER_SCE;
1724#ifdef CONFIG_X86_64
1725 ignore_bits |= EFER_LMA | EFER_LME;
1726
1727 if (guest_efer & EFER_LMA)
1728 ignore_bits &= ~(u64)EFER_SCE;
1729#endif
1730 guest_efer &= ~ignore_bits;
1731 guest_efer |= host_efer & ignore_bits;
1732 vmx->guest_msrs[efer_offset].data = guest_efer;
1733 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
1734
1735 clear_atomic_switch_msr(vmx, MSR_EFER);
1736
1737
1738
1739
1740
1741
1742 if (cpu_has_load_ia32_efer ||
1743 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
1744 guest_efer = vmx->vcpu.arch.efer;
1745 if (!(guest_efer & EFER_LMA))
1746 guest_efer &= ~EFER_LME;
1747 if (guest_efer != host_efer)
1748 add_atomic_switch_msr(vmx, MSR_EFER,
1749 guest_efer, host_efer);
1750 return false;
1751 }
1752
1753 return true;
1754}
1755
1756static unsigned long segment_base(u16 selector)
1757{
1758 struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
1759 struct desc_struct *d;
1760 unsigned long table_base;
1761 unsigned long v;
1762
1763 if (!(selector & ~3))
1764 return 0;
1765
1766 table_base = gdt->address;
1767
1768 if (selector & 4) {
1769 u16 ldt_selector = kvm_read_ldt();
1770
1771 if (!(ldt_selector & ~3))
1772 return 0;
1773
1774 table_base = segment_base(ldt_selector);
1775 }
1776 d = (struct desc_struct *)(table_base + (selector & ~7));
1777 v = get_desc_base(d);
1778#ifdef CONFIG_X86_64
1779 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
1780 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
1781#endif
1782 return v;
1783}
1784
1785static inline unsigned long kvm_read_tr_base(void)
1786{
1787 u16 tr;
1788 asm("str %0" : "=g"(tr));
1789 return segment_base(tr);
1790}
1791
1792static void vmx_save_host_state(struct kvm_vcpu *vcpu)
1793{
1794 struct vcpu_vmx *vmx = to_vmx(vcpu);
1795 int i;
1796
1797 if (vmx->host_state.loaded)
1798 return;
1799
1800 vmx->host_state.loaded = 1;
1801
1802
1803
1804
1805 vmx->host_state.ldt_sel = kvm_read_ldt();
1806 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
1807 savesegment(fs, vmx->host_state.fs_sel);
1808 if (!(vmx->host_state.fs_sel & 7)) {
1809 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
1810 vmx->host_state.fs_reload_needed = 0;
1811 } else {
1812 vmcs_write16(HOST_FS_SELECTOR, 0);
1813 vmx->host_state.fs_reload_needed = 1;
1814 }
1815 savesegment(gs, vmx->host_state.gs_sel);
1816 if (!(vmx->host_state.gs_sel & 7))
1817 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
1818 else {
1819 vmcs_write16(HOST_GS_SELECTOR, 0);
1820 vmx->host_state.gs_ldt_reload_needed = 1;
1821 }
1822
1823#ifdef CONFIG_X86_64
1824 savesegment(ds, vmx->host_state.ds_sel);
1825 savesegment(es, vmx->host_state.es_sel);
1826#endif
1827
1828#ifdef CONFIG_X86_64
1829 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
1830 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
1831#else
1832 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
1833 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
1834#endif
1835
1836#ifdef CONFIG_X86_64
1837 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1838 if (is_long_mode(&vmx->vcpu))
1839 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1840#endif
1841 if (boot_cpu_has(X86_FEATURE_MPX))
1842 rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
1843 for (i = 0; i < vmx->save_nmsrs; ++i)
1844 kvm_set_shared_msr(vmx->guest_msrs[i].index,
1845 vmx->guest_msrs[i].data,
1846 vmx->guest_msrs[i].mask);
1847}
1848
1849static void __vmx_load_host_state(struct vcpu_vmx *vmx)
1850{
1851 if (!vmx->host_state.loaded)
1852 return;
1853
1854 ++vmx->vcpu.stat.host_state_reload;
1855 vmx->host_state.loaded = 0;
1856#ifdef CONFIG_X86_64
1857 if (is_long_mode(&vmx->vcpu))
1858 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1859#endif
1860 if (vmx->host_state.gs_ldt_reload_needed) {
1861 kvm_load_ldt(vmx->host_state.ldt_sel);
1862#ifdef CONFIG_X86_64
1863 load_gs_index(vmx->host_state.gs_sel);
1864#else
1865 loadsegment(gs, vmx->host_state.gs_sel);
1866#endif
1867 }
1868 if (vmx->host_state.fs_reload_needed)
1869 loadsegment(fs, vmx->host_state.fs_sel);
1870#ifdef CONFIG_X86_64
1871 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
1872 loadsegment(ds, vmx->host_state.ds_sel);
1873 loadsegment(es, vmx->host_state.es_sel);
1874 }
1875#endif
1876 reload_tss();
1877#ifdef CONFIG_X86_64
1878 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1879#endif
1880 if (vmx->host_state.msr_host_bndcfgs)
1881 wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
1882
1883
1884
1885
1886 if (!fpregs_active() && !vmx->vcpu.guest_fpu_loaded)
1887 stts();
1888 load_gdt(this_cpu_ptr(&host_gdt));
1889}
1890
1891static void vmx_load_host_state(struct vcpu_vmx *vmx)
1892{
1893 preempt_disable();
1894 __vmx_load_host_state(vmx);
1895 preempt_enable();
1896}
1897
1898
1899
1900
1901
1902static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1903{
1904 struct vcpu_vmx *vmx = to_vmx(vcpu);
1905 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
1906
1907 if (!vmm_exclusive)
1908 kvm_cpu_vmxon(phys_addr);
1909 else if (vmx->loaded_vmcs->cpu != cpu)
1910 loaded_vmcs_clear(vmx->loaded_vmcs);
1911
1912 if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
1913 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1914 vmcs_load(vmx->loaded_vmcs->vmcs);
1915 }
1916
1917 if (vmx->loaded_vmcs->cpu != cpu) {
1918 struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
1919 unsigned long sysenter_esp;
1920
1921 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1922 local_irq_disable();
1923 crash_disable_local_vmclear(cpu);
1924
1925
1926
1927
1928
1929
1930 smp_rmb();
1931
1932 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1933 &per_cpu(loaded_vmcss_on_cpu, cpu));
1934 crash_enable_local_vmclear(cpu);
1935 local_irq_enable();
1936
1937
1938
1939
1940
1941 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base());
1942 vmcs_writel(HOST_GDTR_BASE, gdt->address);
1943
1944 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
1945 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp);
1946 vmx->loaded_vmcs->cpu = cpu;
1947 }
1948}
1949
1950static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1951{
1952 __vmx_load_host_state(to_vmx(vcpu));
1953 if (!vmm_exclusive) {
1954 __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs);
1955 vcpu->cpu = -1;
1956 kvm_cpu_vmxoff();
1957 }
1958}
1959
1960static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
1961{
1962 ulong cr0;
1963
1964 if (vcpu->fpu_active)
1965 return;
1966 vcpu->fpu_active = 1;
1967 cr0 = vmcs_readl(GUEST_CR0);
1968 cr0 &= ~(X86_CR0_TS | X86_CR0_MP);
1969 cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP);
1970 vmcs_writel(GUEST_CR0, cr0);
1971 update_exception_bitmap(vcpu);
1972 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
1973 if (is_guest_mode(vcpu))
1974 vcpu->arch.cr0_guest_owned_bits &=
1975 ~get_vmcs12(vcpu)->cr0_guest_host_mask;
1976 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
1977}
1978
1979static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
1980
1981
1982
1983
1984
1985
1986static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
1987{
1988 return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
1989 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
1990}
1991static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
1992{
1993 return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
1994 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
1995}
1996
1997static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
1998{
1999
2000
2001
2002 vmx_decache_cr0_guest_bits(vcpu);
2003 vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
2004 update_exception_bitmap(vcpu);
2005 vcpu->arch.cr0_guest_owned_bits = 0;
2006 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2007 if (is_guest_mode(vcpu)) {
2008
2009
2010
2011
2012
2013
2014
2015
2016 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2017 vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) |
2018 (vcpu->arch.cr0 & X86_CR0_TS);
2019 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2020 } else
2021 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
2022}
2023
2024static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
2025{
2026 unsigned long rflags, save_rflags;
2027
2028 if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
2029 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2030 rflags = vmcs_readl(GUEST_RFLAGS);
2031 if (to_vmx(vcpu)->rmode.vm86_active) {
2032 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
2033 save_rflags = to_vmx(vcpu)->rmode.save_rflags;
2034 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
2035 }
2036 to_vmx(vcpu)->rflags = rflags;
2037 }
2038 return to_vmx(vcpu)->rflags;
2039}
2040
2041static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2042{
2043 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2044 to_vmx(vcpu)->rflags = rflags;
2045 if (to_vmx(vcpu)->rmode.vm86_active) {
2046 to_vmx(vcpu)->rmode.save_rflags = rflags;
2047 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
2048 }
2049 vmcs_writel(GUEST_RFLAGS, rflags);
2050}
2051
2052static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
2053{
2054 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2055 int ret = 0;
2056
2057 if (interruptibility & GUEST_INTR_STATE_STI)
2058 ret |= KVM_X86_SHADOW_INT_STI;
2059 if (interruptibility & GUEST_INTR_STATE_MOV_SS)
2060 ret |= KVM_X86_SHADOW_INT_MOV_SS;
2061
2062 return ret;
2063}
2064
2065static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
2066{
2067 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2068 u32 interruptibility = interruptibility_old;
2069
2070 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
2071
2072 if (mask & KVM_X86_SHADOW_INT_MOV_SS)
2073 interruptibility |= GUEST_INTR_STATE_MOV_SS;
2074 else if (mask & KVM_X86_SHADOW_INT_STI)
2075 interruptibility |= GUEST_INTR_STATE_STI;
2076
2077 if ((interruptibility != interruptibility_old))
2078 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
2079}
2080
2081static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
2082{
2083 unsigned long rip;
2084
2085 rip = kvm_rip_read(vcpu);
2086 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
2087 kvm_rip_write(vcpu, rip);
2088
2089
2090 vmx_set_interrupt_shadow(vcpu, 0);
2091}
2092
2093
2094
2095
2096
2097static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
2098{
2099 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2100
2101 if (!(vmcs12->exception_bitmap & (1u << nr)))
2102 return 0;
2103
2104 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
2105 vmcs_read32(VM_EXIT_INTR_INFO),
2106 vmcs_readl(EXIT_QUALIFICATION));
2107 return 1;
2108}
2109
2110static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
2111 bool has_error_code, u32 error_code,
2112 bool reinject)
2113{
2114 struct vcpu_vmx *vmx = to_vmx(vcpu);
2115 u32 intr_info = nr | INTR_INFO_VALID_MASK;
2116
2117 if (!reinject && is_guest_mode(vcpu) &&
2118 nested_vmx_check_exception(vcpu, nr))
2119 return;
2120
2121 if (has_error_code) {
2122 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
2123 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2124 }
2125
2126 if (vmx->rmode.vm86_active) {
2127 int inc_eip = 0;
2128 if (kvm_exception_is_soft(nr))
2129 inc_eip = vcpu->arch.event_exit_inst_len;
2130 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
2131 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2132 return;
2133 }
2134
2135 if (kvm_exception_is_soft(nr)) {
2136 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2137 vmx->vcpu.arch.event_exit_inst_len);
2138 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2139 } else
2140 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2141
2142 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
2143}
2144
2145static bool vmx_rdtscp_supported(void)
2146{
2147 return cpu_has_vmx_rdtscp();
2148}
2149
2150static bool vmx_invpcid_supported(void)
2151{
2152 return cpu_has_vmx_invpcid() && enable_ept;
2153}
2154
2155
2156
2157
2158static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
2159{
2160 struct shared_msr_entry tmp;
2161
2162 tmp = vmx->guest_msrs[to];
2163 vmx->guest_msrs[to] = vmx->guest_msrs[from];
2164 vmx->guest_msrs[from] = tmp;
2165}
2166
2167static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
2168{
2169 unsigned long *msr_bitmap;
2170
2171 if (is_guest_mode(vcpu))
2172 msr_bitmap = vmx_msr_bitmap_nested;
2173 else if (vcpu->arch.apic_base & X2APIC_ENABLE) {
2174 if (is_long_mode(vcpu))
2175 msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
2176 else
2177 msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
2178 } else {
2179 if (is_long_mode(vcpu))
2180 msr_bitmap = vmx_msr_bitmap_longmode;
2181 else
2182 msr_bitmap = vmx_msr_bitmap_legacy;
2183 }
2184
2185 vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
2186}
2187
2188
2189
2190
2191
2192
2193static void setup_msrs(struct vcpu_vmx *vmx)
2194{
2195 int save_nmsrs, index;
2196
2197 save_nmsrs = 0;
2198#ifdef CONFIG_X86_64
2199 if (is_long_mode(&vmx->vcpu)) {
2200 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
2201 if (index >= 0)
2202 move_msr_up(vmx, index, save_nmsrs++);
2203 index = __find_msr_index(vmx, MSR_LSTAR);
2204 if (index >= 0)
2205 move_msr_up(vmx, index, save_nmsrs++);
2206 index = __find_msr_index(vmx, MSR_CSTAR);
2207 if (index >= 0)
2208 move_msr_up(vmx, index, save_nmsrs++);
2209 index = __find_msr_index(vmx, MSR_TSC_AUX);
2210 if (index >= 0 && vmx->rdtscp_enabled)
2211 move_msr_up(vmx, index, save_nmsrs++);
2212
2213
2214
2215
2216 index = __find_msr_index(vmx, MSR_STAR);
2217 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
2218 move_msr_up(vmx, index, save_nmsrs++);
2219 }
2220#endif
2221 index = __find_msr_index(vmx, MSR_EFER);
2222 if (index >= 0 && update_transition_efer(vmx, index))
2223 move_msr_up(vmx, index, save_nmsrs++);
2224
2225 vmx->save_nmsrs = save_nmsrs;
2226
2227 if (cpu_has_vmx_msr_bitmap())
2228 vmx_set_msr_bitmap(&vmx->vcpu);
2229}
2230
2231
2232
2233
2234
2235static u64 guest_read_tsc(void)
2236{
2237 u64 host_tsc, tsc_offset;
2238
2239 host_tsc = rdtsc();
2240 tsc_offset = vmcs_read64(TSC_OFFSET);
2241 return host_tsc + tsc_offset;
2242}
2243
2244
2245
2246
2247
2248static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
2249{
2250 u64 tsc_offset;
2251
2252 tsc_offset = is_guest_mode(vcpu) ?
2253 to_vmx(vcpu)->nested.vmcs01_tsc_offset :
2254 vmcs_read64(TSC_OFFSET);
2255 return host_tsc + tsc_offset;
2256}
2257
2258
2259
2260
2261
2262static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
2263{
2264 if (!scale)
2265 return;
2266
2267 if (user_tsc_khz > tsc_khz) {
2268 vcpu->arch.tsc_catchup = 1;
2269 vcpu->arch.tsc_always_catchup = 1;
2270 } else
2271 WARN(1, "user requested TSC rate below hardware speed\n");
2272}
2273
2274static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
2275{
2276 return vmcs_read64(TSC_OFFSET);
2277}
2278
2279
2280
2281
2282static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2283{
2284 if (is_guest_mode(vcpu)) {
2285
2286
2287
2288
2289
2290
2291 struct vmcs12 *vmcs12;
2292 to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
2293
2294 vmcs12 = get_vmcs12(vcpu);
2295 vmcs_write64(TSC_OFFSET, offset +
2296 (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
2297 vmcs12->tsc_offset : 0));
2298 } else {
2299 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2300 vmcs_read64(TSC_OFFSET), offset);
2301 vmcs_write64(TSC_OFFSET, offset);
2302 }
2303}
2304
2305static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
2306{
2307 u64 offset = vmcs_read64(TSC_OFFSET);
2308
2309 vmcs_write64(TSC_OFFSET, offset + adjustment);
2310 if (is_guest_mode(vcpu)) {
2311
2312 to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
2313 } else
2314 trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset,
2315 offset + adjustment);
2316}
2317
2318static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
2319{
2320 return target_tsc - rdtsc();
2321}
2322
2323static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
2324{
2325 struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
2326 return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31)));
2327}
2328
2329
2330
2331
2332
2333
2334
2335static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
2336{
2337 return nested && guest_cpuid_has_vmx(vcpu);
2338}
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2351{
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
2369 vmx->nested.nested_vmx_pinbased_ctls_low,
2370 vmx->nested.nested_vmx_pinbased_ctls_high);
2371 vmx->nested.nested_vmx_pinbased_ctls_low |=
2372 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2373 vmx->nested.nested_vmx_pinbased_ctls_high &=
2374 PIN_BASED_EXT_INTR_MASK |
2375 PIN_BASED_NMI_EXITING |
2376 PIN_BASED_VIRTUAL_NMIS;
2377 vmx->nested.nested_vmx_pinbased_ctls_high |=
2378 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2379 PIN_BASED_VMX_PREEMPTION_TIMER;
2380 if (vmx_vm_has_apicv(vmx->vcpu.kvm))
2381 vmx->nested.nested_vmx_pinbased_ctls_high |=
2382 PIN_BASED_POSTED_INTR;
2383
2384
2385 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
2386 vmx->nested.nested_vmx_exit_ctls_low,
2387 vmx->nested.nested_vmx_exit_ctls_high);
2388 vmx->nested.nested_vmx_exit_ctls_low =
2389 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
2390
2391 vmx->nested.nested_vmx_exit_ctls_high &=
2392#ifdef CONFIG_X86_64
2393 VM_EXIT_HOST_ADDR_SPACE_SIZE |
2394#endif
2395 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
2396 vmx->nested.nested_vmx_exit_ctls_high |=
2397 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
2398 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
2399 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
2400
2401 if (vmx_mpx_supported())
2402 vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
2403
2404
2405 vmx->nested.nested_vmx_true_exit_ctls_low =
2406 vmx->nested.nested_vmx_exit_ctls_low &
2407 ~VM_EXIT_SAVE_DEBUG_CONTROLS;
2408
2409
2410 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
2411 vmx->nested.nested_vmx_entry_ctls_low,
2412 vmx->nested.nested_vmx_entry_ctls_high);
2413 vmx->nested.nested_vmx_entry_ctls_low =
2414 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
2415 vmx->nested.nested_vmx_entry_ctls_high &=
2416#ifdef CONFIG_X86_64
2417 VM_ENTRY_IA32E_MODE |
2418#endif
2419 VM_ENTRY_LOAD_IA32_PAT;
2420 vmx->nested.nested_vmx_entry_ctls_high |=
2421 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
2422 if (vmx_mpx_supported())
2423 vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
2424
2425
2426 vmx->nested.nested_vmx_true_entry_ctls_low =
2427 vmx->nested.nested_vmx_entry_ctls_low &
2428 ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
2429
2430
2431 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
2432 vmx->nested.nested_vmx_procbased_ctls_low,
2433 vmx->nested.nested_vmx_procbased_ctls_high);
2434 vmx->nested.nested_vmx_procbased_ctls_low =
2435 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2436 vmx->nested.nested_vmx_procbased_ctls_high &=
2437 CPU_BASED_VIRTUAL_INTR_PENDING |
2438 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
2439 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
2440 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
2441 CPU_BASED_CR3_STORE_EXITING |
2442#ifdef CONFIG_X86_64
2443 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
2444#endif
2445 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
2446 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
2447 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
2448 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
2449 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2450
2451
2452
2453
2454
2455
2456 vmx->nested.nested_vmx_procbased_ctls_high |=
2457 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2458 CPU_BASED_USE_MSR_BITMAPS;
2459
2460
2461 vmx->nested.nested_vmx_true_procbased_ctls_low =
2462 vmx->nested.nested_vmx_procbased_ctls_low &
2463 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
2464
2465
2466 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
2467 vmx->nested.nested_vmx_secondary_ctls_low,
2468 vmx->nested.nested_vmx_secondary_ctls_high);
2469 vmx->nested.nested_vmx_secondary_ctls_low = 0;
2470 vmx->nested.nested_vmx_secondary_ctls_high &=
2471 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2472 SECONDARY_EXEC_RDTSCP |
2473 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2474 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2475 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2476 SECONDARY_EXEC_WBINVD_EXITING |
2477 SECONDARY_EXEC_XSAVES;
2478
2479 if (enable_ept) {
2480
2481 vmx->nested.nested_vmx_secondary_ctls_high |=
2482 SECONDARY_EXEC_ENABLE_EPT;
2483 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2484 VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
2485 VMX_EPT_INVEPT_BIT;
2486 vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept;
2487
2488
2489
2490
2491
2492 vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT;
2493 } else
2494 vmx->nested.nested_vmx_ept_caps = 0;
2495
2496 if (enable_unrestricted_guest)
2497 vmx->nested.nested_vmx_secondary_ctls_high |=
2498 SECONDARY_EXEC_UNRESTRICTED_GUEST;
2499
2500
2501 rdmsr(MSR_IA32_VMX_MISC,
2502 vmx->nested.nested_vmx_misc_low,
2503 vmx->nested.nested_vmx_misc_high);
2504 vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA;
2505 vmx->nested.nested_vmx_misc_low |=
2506 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
2507 VMX_MISC_ACTIVITY_HLT;
2508 vmx->nested.nested_vmx_misc_high = 0;
2509}
2510
2511static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
2512{
2513
2514
2515
2516 return ((control & high) | low) == control;
2517}
2518
2519static inline u64 vmx_control_msr(u32 low, u32 high)
2520{
2521 return low | ((u64)high << 32);
2522}
2523
2524
2525static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2526{
2527 struct vcpu_vmx *vmx = to_vmx(vcpu);
2528
2529 switch (msr_index) {
2530 case MSR_IA32_VMX_BASIC:
2531
2532
2533
2534
2535
2536
2537 *pdata = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS |
2538 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
2539 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
2540 break;
2541 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
2542 case MSR_IA32_VMX_PINBASED_CTLS:
2543 *pdata = vmx_control_msr(
2544 vmx->nested.nested_vmx_pinbased_ctls_low,
2545 vmx->nested.nested_vmx_pinbased_ctls_high);
2546 break;
2547 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
2548 *pdata = vmx_control_msr(
2549 vmx->nested.nested_vmx_true_procbased_ctls_low,
2550 vmx->nested.nested_vmx_procbased_ctls_high);
2551 break;
2552 case MSR_IA32_VMX_PROCBASED_CTLS:
2553 *pdata = vmx_control_msr(
2554 vmx->nested.nested_vmx_procbased_ctls_low,
2555 vmx->nested.nested_vmx_procbased_ctls_high);
2556 break;
2557 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
2558 *pdata = vmx_control_msr(
2559 vmx->nested.nested_vmx_true_exit_ctls_low,
2560 vmx->nested.nested_vmx_exit_ctls_high);
2561 break;
2562 case MSR_IA32_VMX_EXIT_CTLS:
2563 *pdata = vmx_control_msr(
2564 vmx->nested.nested_vmx_exit_ctls_low,
2565 vmx->nested.nested_vmx_exit_ctls_high);
2566 break;
2567 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
2568 *pdata = vmx_control_msr(
2569 vmx->nested.nested_vmx_true_entry_ctls_low,
2570 vmx->nested.nested_vmx_entry_ctls_high);
2571 break;
2572 case MSR_IA32_VMX_ENTRY_CTLS:
2573 *pdata = vmx_control_msr(
2574 vmx->nested.nested_vmx_entry_ctls_low,
2575 vmx->nested.nested_vmx_entry_ctls_high);
2576 break;
2577 case MSR_IA32_VMX_MISC:
2578 *pdata = vmx_control_msr(
2579 vmx->nested.nested_vmx_misc_low,
2580 vmx->nested.nested_vmx_misc_high);
2581 break;
2582
2583
2584
2585
2586
2587#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
2588#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
2589 case MSR_IA32_VMX_CR0_FIXED0:
2590 *pdata = VMXON_CR0_ALWAYSON;
2591 break;
2592 case MSR_IA32_VMX_CR0_FIXED1:
2593 *pdata = -1ULL;
2594 break;
2595 case MSR_IA32_VMX_CR4_FIXED0:
2596 *pdata = VMXON_CR4_ALWAYSON;
2597 break;
2598 case MSR_IA32_VMX_CR4_FIXED1:
2599 *pdata = -1ULL;
2600 break;
2601 case MSR_IA32_VMX_VMCS_ENUM:
2602 *pdata = 0x2e;
2603 break;
2604 case MSR_IA32_VMX_PROCBASED_CTLS2:
2605 *pdata = vmx_control_msr(
2606 vmx->nested.nested_vmx_secondary_ctls_low,
2607 vmx->nested.nested_vmx_secondary_ctls_high);
2608 break;
2609 case MSR_IA32_VMX_EPT_VPID_CAP:
2610
2611 *pdata = vmx->nested.nested_vmx_ept_caps;
2612 break;
2613 default:
2614 return 1;
2615 }
2616
2617 return 0;
2618}
2619
2620
2621
2622
2623
2624
2625static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2626{
2627 struct shared_msr_entry *msr;
2628
2629 switch (msr_info->index) {
2630#ifdef CONFIG_X86_64
2631 case MSR_FS_BASE:
2632 msr_info->data = vmcs_readl(GUEST_FS_BASE);
2633 break;
2634 case MSR_GS_BASE:
2635 msr_info->data = vmcs_readl(GUEST_GS_BASE);
2636 break;
2637 case MSR_KERNEL_GS_BASE:
2638 vmx_load_host_state(to_vmx(vcpu));
2639 msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
2640 break;
2641#endif
2642 case MSR_EFER:
2643 return kvm_get_msr_common(vcpu, msr_info);
2644 case MSR_IA32_TSC:
2645 msr_info->data = guest_read_tsc();
2646 break;
2647 case MSR_IA32_SYSENTER_CS:
2648 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
2649 break;
2650 case MSR_IA32_SYSENTER_EIP:
2651 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
2652 break;
2653 case MSR_IA32_SYSENTER_ESP:
2654 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
2655 break;
2656 case MSR_IA32_BNDCFGS:
2657 if (!vmx_mpx_supported())
2658 return 1;
2659 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
2660 break;
2661 case MSR_IA32_FEATURE_CONTROL:
2662 if (!nested_vmx_allowed(vcpu))
2663 return 1;
2664 msr_info->data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
2665 break;
2666 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
2667 if (!nested_vmx_allowed(vcpu))
2668 return 1;
2669 return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data);
2670 case MSR_IA32_XSS:
2671 if (!vmx_xsaves_supported())
2672 return 1;
2673 msr_info->data = vcpu->arch.ia32_xss;
2674 break;
2675 case MSR_TSC_AUX:
2676 if (!to_vmx(vcpu)->rdtscp_enabled)
2677 return 1;
2678
2679 default:
2680 msr = find_msr_entry(to_vmx(vcpu), msr_info->index);
2681 if (msr) {
2682 msr_info->data = msr->data;
2683 break;
2684 }
2685 return kvm_get_msr_common(vcpu, msr_info);
2686 }
2687
2688 return 0;
2689}
2690
2691static void vmx_leave_nested(struct kvm_vcpu *vcpu);
2692
2693
2694
2695
2696
2697
2698static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2699{
2700 struct vcpu_vmx *vmx = to_vmx(vcpu);
2701 struct shared_msr_entry *msr;
2702 int ret = 0;
2703 u32 msr_index = msr_info->index;
2704 u64 data = msr_info->data;
2705
2706 switch (msr_index) {
2707 case MSR_EFER:
2708 ret = kvm_set_msr_common(vcpu, msr_info);
2709 break;
2710#ifdef CONFIG_X86_64
2711 case MSR_FS_BASE:
2712 vmx_segment_cache_clear(vmx);
2713 vmcs_writel(GUEST_FS_BASE, data);
2714 break;
2715 case MSR_GS_BASE:
2716 vmx_segment_cache_clear(vmx);
2717 vmcs_writel(GUEST_GS_BASE, data);
2718 break;
2719 case MSR_KERNEL_GS_BASE:
2720 vmx_load_host_state(vmx);
2721 vmx->msr_guest_kernel_gs_base = data;
2722 break;
2723#endif
2724 case MSR_IA32_SYSENTER_CS:
2725 vmcs_write32(GUEST_SYSENTER_CS, data);
2726 break;
2727 case MSR_IA32_SYSENTER_EIP:
2728 vmcs_writel(GUEST_SYSENTER_EIP, data);
2729 break;
2730 case MSR_IA32_SYSENTER_ESP:
2731 vmcs_writel(GUEST_SYSENTER_ESP, data);
2732 break;
2733 case MSR_IA32_BNDCFGS:
2734 if (!vmx_mpx_supported())
2735 return 1;
2736 vmcs_write64(GUEST_BNDCFGS, data);
2737 break;
2738 case MSR_IA32_TSC:
2739 kvm_write_tsc(vcpu, msr_info);
2740 break;
2741 case MSR_IA32_CR_PAT:
2742 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2743 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
2744 return 1;
2745 vmcs_write64(GUEST_IA32_PAT, data);
2746 vcpu->arch.pat = data;
2747 break;
2748 }
2749 ret = kvm_set_msr_common(vcpu, msr_info);
2750 break;
2751 case MSR_IA32_TSC_ADJUST:
2752 ret = kvm_set_msr_common(vcpu, msr_info);
2753 break;
2754 case MSR_IA32_FEATURE_CONTROL:
2755 if (!nested_vmx_allowed(vcpu) ||
2756 (to_vmx(vcpu)->nested.msr_ia32_feature_control &
2757 FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
2758 return 1;
2759 vmx->nested.msr_ia32_feature_control = data;
2760 if (msr_info->host_initiated && data == 0)
2761 vmx_leave_nested(vcpu);
2762 break;
2763 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
2764 return 1;
2765 case MSR_IA32_XSS:
2766 if (!vmx_xsaves_supported())
2767 return 1;
2768
2769
2770
2771
2772 if (data != 0)
2773 return 1;
2774 vcpu->arch.ia32_xss = data;
2775 if (vcpu->arch.ia32_xss != host_xss)
2776 add_atomic_switch_msr(vmx, MSR_IA32_XSS,
2777 vcpu->arch.ia32_xss, host_xss);
2778 else
2779 clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
2780 break;
2781 case MSR_TSC_AUX:
2782 if (!vmx->rdtscp_enabled)
2783 return 1;
2784
2785 if ((data >> 32) != 0)
2786 return 1;
2787
2788 default:
2789 msr = find_msr_entry(vmx, msr_index);
2790 if (msr) {
2791 u64 old_msr_data = msr->data;
2792 msr->data = data;
2793 if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
2794 preempt_disable();
2795 ret = kvm_set_shared_msr(msr->index, msr->data,
2796 msr->mask);
2797 preempt_enable();
2798 if (ret)
2799 msr->data = old_msr_data;
2800 }
2801 break;
2802 }
2803 ret = kvm_set_msr_common(vcpu, msr_info);
2804 }
2805
2806 return ret;
2807}
2808
2809static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2810{
2811 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
2812 switch (reg) {
2813 case VCPU_REGS_RSP:
2814 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
2815 break;
2816 case VCPU_REGS_RIP:
2817 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
2818 break;
2819 case VCPU_EXREG_PDPTR:
2820 if (enable_ept)
2821 ept_save_pdptrs(vcpu);
2822 break;
2823 default:
2824 break;
2825 }
2826}
2827
2828static __init int cpu_has_kvm_support(void)
2829{
2830 return cpu_has_vmx();
2831}
2832
2833static __init int vmx_disabled_by_bios(void)
2834{
2835 u64 msr;
2836
2837 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
2838 if (msr & FEATURE_CONTROL_LOCKED) {
2839
2840 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
2841 && tboot_enabled())
2842 return 1;
2843
2844 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
2845 && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
2846 && !tboot_enabled()) {
2847 printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
2848 "activate TXT before enabling KVM\n");
2849 return 1;
2850 }
2851
2852 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
2853 && !tboot_enabled())
2854 return 1;
2855 }
2856
2857 return 0;
2858}
2859
2860static void kvm_cpu_vmxon(u64 addr)
2861{
2862 asm volatile (ASM_VMX_VMXON_RAX
2863 : : "a"(&addr), "m"(addr)
2864 : "memory", "cc");
2865}
2866
2867static int hardware_enable(void)
2868{
2869 int cpu = raw_smp_processor_id();
2870 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
2871 u64 old, test_bits;
2872
2873 if (cr4_read_shadow() & X86_CR4_VMXE)
2874 return -EBUSY;
2875
2876 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887 crash_enable_local_vmclear(cpu);
2888
2889 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
2890
2891 test_bits = FEATURE_CONTROL_LOCKED;
2892 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
2893 if (tboot_enabled())
2894 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
2895
2896 if ((old & test_bits) != test_bits) {
2897
2898 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
2899 }
2900 cr4_set_bits(X86_CR4_VMXE);
2901
2902 if (vmm_exclusive) {
2903 kvm_cpu_vmxon(phys_addr);
2904 ept_sync_global();
2905 }
2906
2907 native_store_gdt(this_cpu_ptr(&host_gdt));
2908
2909 return 0;
2910}
2911
2912static void vmclear_local_loaded_vmcss(void)
2913{
2914 int cpu = raw_smp_processor_id();
2915 struct loaded_vmcs *v, *n;
2916
2917 list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
2918 loaded_vmcss_on_cpu_link)
2919 __loaded_vmcs_clear(v);
2920}
2921
2922
2923
2924
2925
2926static void kvm_cpu_vmxoff(void)
2927{
2928 asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
2929}
2930
2931static void hardware_disable(void)
2932{
2933 if (vmm_exclusive) {
2934 vmclear_local_loaded_vmcss();
2935 kvm_cpu_vmxoff();
2936 }
2937 cr4_clear_bits(X86_CR4_VMXE);
2938}
2939
2940static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
2941 u32 msr, u32 *result)
2942{
2943 u32 vmx_msr_low, vmx_msr_high;
2944 u32 ctl = ctl_min | ctl_opt;
2945
2946 rdmsr(msr, vmx_msr_low, vmx_msr_high);
2947
2948 ctl &= vmx_msr_high;
2949 ctl |= vmx_msr_low;
2950
2951
2952 if (ctl_min & ~ctl)
2953 return -EIO;
2954
2955 *result = ctl;
2956 return 0;
2957}
2958
2959static __init bool allow_1_setting(u32 msr, u32 ctl)
2960{
2961 u32 vmx_msr_low, vmx_msr_high;
2962
2963 rdmsr(msr, vmx_msr_low, vmx_msr_high);
2964 return vmx_msr_high & ctl;
2965}
2966
2967static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
2968{
2969 u32 vmx_msr_low, vmx_msr_high;
2970 u32 min, opt, min2, opt2;
2971 u32 _pin_based_exec_control = 0;
2972 u32 _cpu_based_exec_control = 0;
2973 u32 _cpu_based_2nd_exec_control = 0;
2974 u32 _vmexit_control = 0;
2975 u32 _vmentry_control = 0;
2976
2977 min = CPU_BASED_HLT_EXITING |
2978#ifdef CONFIG_X86_64
2979 CPU_BASED_CR8_LOAD_EXITING |
2980 CPU_BASED_CR8_STORE_EXITING |
2981#endif
2982 CPU_BASED_CR3_LOAD_EXITING |
2983 CPU_BASED_CR3_STORE_EXITING |
2984 CPU_BASED_USE_IO_BITMAPS |
2985 CPU_BASED_MOV_DR_EXITING |
2986 CPU_BASED_USE_TSC_OFFSETING |
2987 CPU_BASED_MWAIT_EXITING |
2988 CPU_BASED_MONITOR_EXITING |
2989 CPU_BASED_INVLPG_EXITING |
2990 CPU_BASED_RDPMC_EXITING;
2991
2992 opt = CPU_BASED_TPR_SHADOW |
2993 CPU_BASED_USE_MSR_BITMAPS |
2994 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2995 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
2996 &_cpu_based_exec_control) < 0)
2997 return -EIO;
2998#ifdef CONFIG_X86_64
2999 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
3000 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
3001 ~CPU_BASED_CR8_STORE_EXITING;
3002#endif
3003 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
3004 min2 = 0;
3005 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3006 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3007 SECONDARY_EXEC_WBINVD_EXITING |
3008 SECONDARY_EXEC_ENABLE_VPID |
3009 SECONDARY_EXEC_ENABLE_EPT |
3010 SECONDARY_EXEC_UNRESTRICTED_GUEST |
3011 SECONDARY_EXEC_PAUSE_LOOP_EXITING |
3012 SECONDARY_EXEC_RDTSCP |
3013 SECONDARY_EXEC_ENABLE_INVPCID |
3014 SECONDARY_EXEC_APIC_REGISTER_VIRT |
3015 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3016 SECONDARY_EXEC_SHADOW_VMCS |
3017 SECONDARY_EXEC_XSAVES |
3018 SECONDARY_EXEC_ENABLE_PML;
3019 if (adjust_vmx_controls(min2, opt2,
3020 MSR_IA32_VMX_PROCBASED_CTLS2,
3021 &_cpu_based_2nd_exec_control) < 0)
3022 return -EIO;
3023 }
3024#ifndef CONFIG_X86_64
3025 if (!(_cpu_based_2nd_exec_control &
3026 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
3027 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
3028#endif
3029
3030 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
3031 _cpu_based_2nd_exec_control &= ~(
3032 SECONDARY_EXEC_APIC_REGISTER_VIRT |
3033 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3034 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
3035
3036 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
3037
3038
3039 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
3040 CPU_BASED_CR3_STORE_EXITING |
3041 CPU_BASED_INVLPG_EXITING);
3042 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
3043 vmx_capability.ept, vmx_capability.vpid);
3044 }
3045
3046 min = VM_EXIT_SAVE_DEBUG_CONTROLS;
3047#ifdef CONFIG_X86_64
3048 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
3049#endif
3050 opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
3051 VM_EXIT_ACK_INTR_ON_EXIT | VM_EXIT_CLEAR_BNDCFGS;
3052 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
3053 &_vmexit_control) < 0)
3054 return -EIO;
3055
3056 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
3057 opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR;
3058 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
3059 &_pin_based_exec_control) < 0)
3060 return -EIO;
3061
3062 if (!(_cpu_based_2nd_exec_control &
3063 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) ||
3064 !(_vmexit_control & VM_EXIT_ACK_INTR_ON_EXIT))
3065 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
3066
3067 min = VM_ENTRY_LOAD_DEBUG_CONTROLS;
3068 opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
3069 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
3070 &_vmentry_control) < 0)
3071 return -EIO;
3072
3073 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
3074
3075
3076 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
3077 return -EIO;
3078
3079#ifdef CONFIG_X86_64
3080
3081 if (vmx_msr_high & (1u<<16))
3082 return -EIO;
3083#endif
3084
3085
3086 if (((vmx_msr_high >> 18) & 15) != 6)
3087 return -EIO;
3088
3089 vmcs_conf->size = vmx_msr_high & 0x1fff;
3090 vmcs_conf->order = get_order(vmcs_config.size);
3091 vmcs_conf->revision_id = vmx_msr_low;
3092
3093 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
3094 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
3095 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
3096 vmcs_conf->vmexit_ctrl = _vmexit_control;
3097 vmcs_conf->vmentry_ctrl = _vmentry_control;
3098
3099 cpu_has_load_ia32_efer =
3100 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
3101 VM_ENTRY_LOAD_IA32_EFER)
3102 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
3103 VM_EXIT_LOAD_IA32_EFER);
3104
3105 cpu_has_load_perf_global_ctrl =
3106 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
3107 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
3108 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
3109 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125 if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
3126 switch (boot_cpu_data.x86_model) {
3127 case 26:
3128 case 30:
3129 case 37:
3130 case 44:
3131 case 46:
3132 cpu_has_load_perf_global_ctrl = false;
3133 printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
3134 "does not work properly. Using workaround\n");
3135 break;
3136 default:
3137 break;
3138 }
3139 }
3140
3141 if (cpu_has_xsaves)
3142 rdmsrl(MSR_IA32_XSS, host_xss);
3143
3144 return 0;
3145}
3146
3147static struct vmcs *alloc_vmcs_cpu(int cpu)
3148{
3149 int node = cpu_to_node(cpu);
3150 struct page *pages;
3151 struct vmcs *vmcs;
3152
3153 pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
3154 if (!pages)
3155 return NULL;
3156 vmcs = page_address(pages);
3157 memset(vmcs, 0, vmcs_config.size);
3158 vmcs->revision_id = vmcs_config.revision_id;
3159 return vmcs;
3160}
3161
3162static struct vmcs *alloc_vmcs(void)
3163{
3164 return alloc_vmcs_cpu(raw_smp_processor_id());
3165}
3166
3167static void free_vmcs(struct vmcs *vmcs)
3168{
3169 free_pages((unsigned long)vmcs, vmcs_config.order);
3170}
3171
3172
3173
3174
3175static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
3176{
3177 if (!loaded_vmcs->vmcs)
3178 return;
3179 loaded_vmcs_clear(loaded_vmcs);
3180 free_vmcs(loaded_vmcs->vmcs);
3181 loaded_vmcs->vmcs = NULL;
3182}
3183
3184static void free_kvm_area(void)
3185{
3186 int cpu;
3187
3188 for_each_possible_cpu(cpu) {
3189 free_vmcs(per_cpu(vmxarea, cpu));
3190 per_cpu(vmxarea, cpu) = NULL;
3191 }
3192}
3193
3194static void init_vmcs_shadow_fields(void)
3195{
3196 int i, j;
3197
3198
3199
3200 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
3201 switch (shadow_read_write_fields[i]) {
3202 case GUEST_BNDCFGS:
3203 if (!vmx_mpx_supported())
3204 continue;
3205 break;
3206 default:
3207 break;
3208 }
3209
3210 if (j < i)
3211 shadow_read_write_fields[j] =
3212 shadow_read_write_fields[i];
3213 j++;
3214 }
3215 max_shadow_read_write_fields = j;
3216
3217
3218 for (i = 0; i < max_shadow_read_write_fields; i++) {
3219 clear_bit(shadow_read_write_fields[i],
3220 vmx_vmwrite_bitmap);
3221 clear_bit(shadow_read_write_fields[i],
3222 vmx_vmread_bitmap);
3223 }
3224 for (i = 0; i < max_shadow_read_only_fields; i++)
3225 clear_bit(shadow_read_only_fields[i],
3226 vmx_vmread_bitmap);
3227}
3228
3229static __init int alloc_kvm_area(void)
3230{
3231 int cpu;
3232
3233 for_each_possible_cpu(cpu) {
3234 struct vmcs *vmcs;
3235
3236 vmcs = alloc_vmcs_cpu(cpu);
3237 if (!vmcs) {
3238 free_kvm_area();
3239 return -ENOMEM;
3240 }
3241
3242 per_cpu(vmxarea, cpu) = vmcs;
3243 }
3244 return 0;
3245}
3246
3247static bool emulation_required(struct kvm_vcpu *vcpu)
3248{
3249 return emulate_invalid_guest_state && !guest_state_valid(vcpu);
3250}
3251
3252static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
3253 struct kvm_segment *save)
3254{
3255 if (!emulate_invalid_guest_state) {
3256
3257
3258
3259
3260
3261
3262
3263 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
3264 save->selector &= ~SEGMENT_RPL_MASK;
3265 save->dpl = save->selector & SEGMENT_RPL_MASK;
3266 save->s = 1;
3267 }
3268 vmx_set_segment(vcpu, save, seg);
3269}
3270
3271static void enter_pmode(struct kvm_vcpu *vcpu)
3272{
3273 unsigned long flags;
3274 struct vcpu_vmx *vmx = to_vmx(vcpu);
3275
3276
3277
3278
3279
3280 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3281 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3282 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3283 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3284 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3285 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3286
3287 vmx->rmode.vm86_active = 0;
3288
3289 vmx_segment_cache_clear(vmx);
3290
3291 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3292
3293 flags = vmcs_readl(GUEST_RFLAGS);
3294 flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
3295 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
3296 vmcs_writel(GUEST_RFLAGS, flags);
3297
3298 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
3299 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
3300
3301 update_exception_bitmap(vcpu);
3302
3303 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3304 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3305 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3306 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3307 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3308 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3309}
3310
3311static void fix_rmode_seg(int seg, struct kvm_segment *save)
3312{
3313 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3314 struct kvm_segment var = *save;
3315
3316 var.dpl = 0x3;
3317 if (seg == VCPU_SREG_CS)
3318 var.type = 0x3;
3319
3320 if (!emulate_invalid_guest_state) {
3321 var.selector = var.base >> 4;
3322 var.base = var.base & 0xffff0;
3323 var.limit = 0xffff;
3324 var.g = 0;
3325 var.db = 0;
3326 var.present = 1;
3327 var.s = 1;
3328 var.l = 0;
3329 var.unusable = 0;
3330 var.type = 0x3;
3331 var.avl = 0;
3332 if (save->base & 0xf)
3333 printk_once(KERN_WARNING "kvm: segment base is not "
3334 "paragraph aligned when entering "
3335 "protected mode (seg=%d)", seg);
3336 }
3337
3338 vmcs_write16(sf->selector, var.selector);
3339 vmcs_write32(sf->base, var.base);
3340 vmcs_write32(sf->limit, var.limit);
3341 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
3342}
3343
3344static void enter_rmode(struct kvm_vcpu *vcpu)
3345{
3346 unsigned long flags;
3347 struct vcpu_vmx *vmx = to_vmx(vcpu);
3348
3349 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3350 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3351 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3352 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3353 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3354 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3355 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3356
3357 vmx->rmode.vm86_active = 1;
3358
3359
3360
3361
3362
3363 if (!vcpu->kvm->arch.tss_addr)
3364 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
3365 "called before entering vcpu\n");
3366
3367 vmx_segment_cache_clear(vmx);
3368
3369 vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr);
3370 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
3371 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
3372
3373 flags = vmcs_readl(GUEST_RFLAGS);
3374 vmx->rmode.save_rflags = flags;
3375
3376 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
3377
3378 vmcs_writel(GUEST_RFLAGS, flags);
3379 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
3380 update_exception_bitmap(vcpu);
3381
3382 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3383 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3384 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3385 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3386 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3387 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3388
3389 kvm_mmu_reset_context(vcpu);
3390}
3391
3392static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
3393{
3394 struct vcpu_vmx *vmx = to_vmx(vcpu);
3395 struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
3396
3397 if (!msr)
3398 return;
3399
3400
3401
3402
3403
3404 vmx_load_host_state(to_vmx(vcpu));
3405 vcpu->arch.efer = efer;
3406 if (efer & EFER_LMA) {
3407 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
3408 msr->data = efer;
3409 } else {
3410 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
3411
3412 msr->data = efer & ~EFER_LME;
3413 }
3414 setup_msrs(vmx);
3415}
3416
3417#ifdef CONFIG_X86_64
3418
3419static void enter_lmode(struct kvm_vcpu *vcpu)
3420{
3421 u32 guest_tr_ar;
3422
3423 vmx_segment_cache_clear(to_vmx(vcpu));
3424
3425 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
3426 if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
3427 pr_debug_ratelimited("%s: tss fixup for long mode. \n",
3428 __func__);
3429 vmcs_write32(GUEST_TR_AR_BYTES,
3430 (guest_tr_ar & ~VMX_AR_TYPE_MASK)
3431 | VMX_AR_TYPE_BUSY_64_TSS);
3432 }
3433 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
3434}
3435
3436static void exit_lmode(struct kvm_vcpu *vcpu)
3437{
3438 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
3439 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
3440}
3441
3442#endif
3443
3444static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
3445{
3446 vpid_sync_context(to_vmx(vcpu));
3447 if (enable_ept) {
3448 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3449 return;
3450 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
3451 }
3452}
3453
3454static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
3455{
3456 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
3457
3458 vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
3459 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
3460}
3461
3462static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
3463{
3464 if (enable_ept && is_paging(vcpu))
3465 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3466 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
3467}
3468
3469static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
3470{
3471 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
3472
3473 vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
3474 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
3475}
3476
3477static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
3478{
3479 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3480
3481 if (!test_bit(VCPU_EXREG_PDPTR,
3482 (unsigned long *)&vcpu->arch.regs_dirty))
3483 return;
3484
3485 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3486 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
3487 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
3488 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
3489 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
3490 }
3491}
3492
3493static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3494{
3495 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3496
3497 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3498 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3499 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3500 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3501 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3502 }
3503
3504 __set_bit(VCPU_EXREG_PDPTR,
3505 (unsigned long *)&vcpu->arch.regs_avail);
3506 __set_bit(VCPU_EXREG_PDPTR,
3507 (unsigned long *)&vcpu->arch.regs_dirty);
3508}
3509
3510static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
3511
3512static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
3513 unsigned long cr0,
3514 struct kvm_vcpu *vcpu)
3515{
3516 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
3517 vmx_decache_cr3(vcpu);
3518 if (!(cr0 & X86_CR0_PG)) {
3519
3520 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
3521 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
3522 (CPU_BASED_CR3_LOAD_EXITING |
3523 CPU_BASED_CR3_STORE_EXITING));
3524 vcpu->arch.cr0 = cr0;
3525 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3526 } else if (!is_paging(vcpu)) {
3527
3528 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
3529 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
3530 ~(CPU_BASED_CR3_LOAD_EXITING |
3531 CPU_BASED_CR3_STORE_EXITING));
3532 vcpu->arch.cr0 = cr0;
3533 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3534 }
3535
3536 if (!(cr0 & X86_CR0_WP))
3537 *hw_cr0 &= ~X86_CR0_WP;
3538}
3539
3540static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3541{
3542 struct vcpu_vmx *vmx = to_vmx(vcpu);
3543 unsigned long hw_cr0;
3544
3545 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK);
3546 if (enable_unrestricted_guest)
3547 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
3548 else {
3549 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
3550
3551 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3552 enter_pmode(vcpu);
3553
3554 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
3555 enter_rmode(vcpu);
3556 }
3557
3558#ifdef CONFIG_X86_64
3559 if (vcpu->arch.efer & EFER_LME) {
3560 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
3561 enter_lmode(vcpu);
3562 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
3563 exit_lmode(vcpu);
3564 }
3565#endif
3566
3567 if (enable_ept)
3568 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
3569
3570 if (!vcpu->fpu_active)
3571 hw_cr0 |= X86_CR0_TS | X86_CR0_MP;
3572
3573 vmcs_writel(CR0_READ_SHADOW, cr0);
3574 vmcs_writel(GUEST_CR0, hw_cr0);
3575 vcpu->arch.cr0 = cr0;
3576
3577
3578 vmx->emulation_required = emulation_required(vcpu);
3579}
3580
3581static u64 construct_eptp(unsigned long root_hpa)
3582{
3583 u64 eptp;
3584
3585
3586 eptp = VMX_EPT_DEFAULT_MT |
3587 VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
3588 if (enable_ept_ad_bits)
3589 eptp |= VMX_EPT_AD_ENABLE_BIT;
3590 eptp |= (root_hpa & PAGE_MASK);
3591
3592 return eptp;
3593}
3594
3595static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
3596{
3597 unsigned long guest_cr3;
3598 u64 eptp;
3599
3600 guest_cr3 = cr3;
3601 if (enable_ept) {
3602 eptp = construct_eptp(cr3);
3603 vmcs_write64(EPT_POINTER, eptp);
3604 if (is_paging(vcpu) || is_guest_mode(vcpu))
3605 guest_cr3 = kvm_read_cr3(vcpu);
3606 else
3607 guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr;
3608 ept_load_pdptrs(vcpu);
3609 }
3610
3611 vmx_flush_tlb(vcpu);
3612 vmcs_writel(GUEST_CR3, guest_cr3);
3613}
3614
3615static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3616{
3617
3618
3619
3620
3621
3622 unsigned long hw_cr4 =
3623 (cr4_read_shadow() & X86_CR4_MCE) |
3624 (cr4 & ~X86_CR4_MCE) |
3625 (to_vmx(vcpu)->rmode.vm86_active ?
3626 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
3627
3628 if (cr4 & X86_CR4_VMXE) {
3629
3630
3631
3632
3633
3634
3635 if (!nested_vmx_allowed(vcpu))
3636 return 1;
3637 }
3638 if (to_vmx(vcpu)->nested.vmxon &&
3639 ((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON))
3640 return 1;
3641
3642 vcpu->arch.cr4 = cr4;
3643 if (enable_ept) {
3644 if (!is_paging(vcpu)) {
3645 hw_cr4 &= ~X86_CR4_PAE;
3646 hw_cr4 |= X86_CR4_PSE;
3647
3648
3649
3650
3651
3652
3653
3654
3655 hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
3656 } else if (!(cr4 & X86_CR4_PAE)) {
3657 hw_cr4 &= ~X86_CR4_PAE;
3658 }
3659 }
3660
3661 vmcs_writel(CR4_READ_SHADOW, cr4);
3662 vmcs_writel(GUEST_CR4, hw_cr4);
3663 return 0;
3664}
3665
3666static void vmx_get_segment(struct kvm_vcpu *vcpu,
3667 struct kvm_segment *var, int seg)
3668{
3669 struct vcpu_vmx *vmx = to_vmx(vcpu);
3670 u32 ar;
3671
3672 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3673 *var = vmx->rmode.segs[seg];
3674 if (seg == VCPU_SREG_TR
3675 || var->selector == vmx_read_guest_seg_selector(vmx, seg))
3676 return;
3677 var->base = vmx_read_guest_seg_base(vmx, seg);
3678 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3679 return;
3680 }
3681 var->base = vmx_read_guest_seg_base(vmx, seg);
3682 var->limit = vmx_read_guest_seg_limit(vmx, seg);
3683 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3684 ar = vmx_read_guest_seg_ar(vmx, seg);
3685 var->unusable = (ar >> 16) & 1;
3686 var->type = ar & 15;
3687 var->s = (ar >> 4) & 1;
3688 var->dpl = (ar >> 5) & 3;
3689
3690
3691
3692
3693
3694
3695
3696 var->present = !var->unusable;
3697 var->avl = (ar >> 12) & 1;
3698 var->l = (ar >> 13) & 1;
3699 var->db = (ar >> 14) & 1;
3700 var->g = (ar >> 15) & 1;
3701}
3702
3703static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
3704{
3705 struct kvm_segment s;
3706
3707 if (to_vmx(vcpu)->rmode.vm86_active) {
3708 vmx_get_segment(vcpu, &s, seg);
3709 return s.base;
3710 }
3711 return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
3712}
3713
3714static int vmx_get_cpl(struct kvm_vcpu *vcpu)
3715{
3716 struct vcpu_vmx *vmx = to_vmx(vcpu);
3717
3718 if (unlikely(vmx->rmode.vm86_active))
3719 return 0;
3720 else {
3721 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
3722 return VMX_AR_DPL(ar);
3723 }
3724}
3725
3726static u32 vmx_segment_access_rights(struct kvm_segment *var)
3727{
3728 u32 ar;
3729
3730 if (var->unusable || !var->present)
3731 ar = 1 << 16;
3732 else {
3733 ar = var->type & 15;
3734 ar |= (var->s & 1) << 4;
3735 ar |= (var->dpl & 3) << 5;
3736 ar |= (var->present & 1) << 7;
3737 ar |= (var->avl & 1) << 12;
3738 ar |= (var->l & 1) << 13;
3739 ar |= (var->db & 1) << 14;
3740 ar |= (var->g & 1) << 15;
3741 }
3742
3743 return ar;
3744}
3745
3746static void vmx_set_segment(struct kvm_vcpu *vcpu,
3747 struct kvm_segment *var, int seg)
3748{
3749 struct vcpu_vmx *vmx = to_vmx(vcpu);
3750 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3751
3752 vmx_segment_cache_clear(vmx);
3753
3754 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3755 vmx->rmode.segs[seg] = *var;
3756 if (seg == VCPU_SREG_TR)
3757 vmcs_write16(sf->selector, var->selector);
3758 else if (var->s)
3759 fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
3760 goto out;
3761 }
3762
3763 vmcs_writel(sf->base, var->base);
3764 vmcs_write32(sf->limit, var->limit);
3765 vmcs_write16(sf->selector, var->selector);
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778 if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
3779 var->type |= 0x1;
3780
3781 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
3782
3783out:
3784 vmx->emulation_required = emulation_required(vcpu);
3785}
3786
3787static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3788{
3789 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
3790
3791 *db = (ar >> 14) & 1;
3792 *l = (ar >> 13) & 1;
3793}
3794
3795static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3796{
3797 dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
3798 dt->address = vmcs_readl(GUEST_IDTR_BASE);
3799}
3800
3801static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3802{
3803 vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
3804 vmcs_writel(GUEST_IDTR_BASE, dt->address);
3805}
3806
3807static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3808{
3809 dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
3810 dt->address = vmcs_readl(GUEST_GDTR_BASE);
3811}
3812
3813static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3814{
3815 vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
3816 vmcs_writel(GUEST_GDTR_BASE, dt->address);
3817}
3818
3819static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
3820{
3821 struct kvm_segment var;
3822 u32 ar;
3823
3824 vmx_get_segment(vcpu, &var, seg);
3825 var.dpl = 0x3;
3826 if (seg == VCPU_SREG_CS)
3827 var.type = 0x3;
3828 ar = vmx_segment_access_rights(&var);
3829
3830 if (var.base != (var.selector << 4))
3831 return false;
3832 if (var.limit != 0xffff)
3833 return false;
3834 if (ar != 0xf3)
3835 return false;
3836
3837 return true;
3838}
3839
3840static bool code_segment_valid(struct kvm_vcpu *vcpu)
3841{
3842 struct kvm_segment cs;
3843 unsigned int cs_rpl;
3844
3845 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3846 cs_rpl = cs.selector & SEGMENT_RPL_MASK;
3847
3848 if (cs.unusable)
3849 return false;
3850 if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
3851 return false;
3852 if (!cs.s)
3853 return false;
3854 if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
3855 if (cs.dpl > cs_rpl)
3856 return false;
3857 } else {
3858 if (cs.dpl != cs_rpl)
3859 return false;
3860 }
3861 if (!cs.present)
3862 return false;
3863
3864
3865 return true;
3866}
3867
3868static bool stack_segment_valid(struct kvm_vcpu *vcpu)
3869{
3870 struct kvm_segment ss;
3871 unsigned int ss_rpl;
3872
3873 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3874 ss_rpl = ss.selector & SEGMENT_RPL_MASK;
3875
3876 if (ss.unusable)
3877 return true;
3878 if (ss.type != 3 && ss.type != 7)
3879 return false;
3880 if (!ss.s)
3881 return false;
3882 if (ss.dpl != ss_rpl)
3883 return false;
3884 if (!ss.present)
3885 return false;
3886
3887 return true;
3888}
3889
3890static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
3891{
3892 struct kvm_segment var;
3893 unsigned int rpl;
3894
3895 vmx_get_segment(vcpu, &var, seg);
3896 rpl = var.selector & SEGMENT_RPL_MASK;
3897
3898 if (var.unusable)
3899 return true;
3900 if (!var.s)
3901 return false;
3902 if (!var.present)
3903 return false;
3904 if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
3905 if (var.dpl < rpl)
3906 return false;
3907 }
3908
3909
3910
3911
3912 return true;
3913}
3914
3915static bool tr_valid(struct kvm_vcpu *vcpu)
3916{
3917 struct kvm_segment tr;
3918
3919 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
3920
3921 if (tr.unusable)
3922 return false;
3923 if (tr.selector & SEGMENT_TI_MASK)
3924 return false;
3925 if (tr.type != 3 && tr.type != 11)
3926 return false;
3927 if (!tr.present)
3928 return false;
3929
3930 return true;
3931}
3932
3933static bool ldtr_valid(struct kvm_vcpu *vcpu)
3934{
3935 struct kvm_segment ldtr;
3936
3937 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
3938
3939 if (ldtr.unusable)
3940 return true;
3941 if (ldtr.selector & SEGMENT_TI_MASK)
3942 return false;
3943 if (ldtr.type != 2)
3944 return false;
3945 if (!ldtr.present)
3946 return false;
3947
3948 return true;
3949}
3950
3951static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
3952{
3953 struct kvm_segment cs, ss;
3954
3955 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3956 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3957
3958 return ((cs.selector & SEGMENT_RPL_MASK) ==
3959 (ss.selector & SEGMENT_RPL_MASK));
3960}
3961
3962
3963
3964
3965
3966
3967static bool guest_state_valid(struct kvm_vcpu *vcpu)
3968{
3969 if (enable_unrestricted_guest)
3970 return true;
3971
3972
3973 if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
3974 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
3975 return false;
3976 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
3977 return false;
3978 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
3979 return false;
3980 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
3981 return false;
3982 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
3983 return false;
3984 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
3985 return false;
3986 } else {
3987
3988 if (!cs_ss_rpl_check(vcpu))
3989 return false;
3990 if (!code_segment_valid(vcpu))
3991 return false;
3992 if (!stack_segment_valid(vcpu))
3993 return false;
3994 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
3995 return false;
3996 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
3997 return false;
3998 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
3999 return false;
4000 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
4001 return false;
4002 if (!tr_valid(vcpu))
4003 return false;
4004 if (!ldtr_valid(vcpu))
4005 return false;
4006 }
4007
4008
4009
4010
4011
4012 return true;
4013}
4014
4015static int init_rmode_tss(struct kvm *kvm)
4016{
4017 gfn_t fn;
4018 u16 data = 0;
4019 int idx, r;
4020
4021 idx = srcu_read_lock(&kvm->srcu);
4022 fn = kvm->arch.tss_addr >> PAGE_SHIFT;
4023 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
4024 if (r < 0)
4025 goto out;
4026 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
4027 r = kvm_write_guest_page(kvm, fn++, &data,
4028 TSS_IOPB_BASE_OFFSET, sizeof(u16));
4029 if (r < 0)
4030 goto out;
4031 r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
4032 if (r < 0)
4033 goto out;
4034 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
4035 if (r < 0)
4036 goto out;
4037 data = ~0;
4038 r = kvm_write_guest_page(kvm, fn, &data,
4039 RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
4040 sizeof(u8));
4041out:
4042 srcu_read_unlock(&kvm->srcu, idx);
4043 return r;
4044}
4045
4046static int init_rmode_identity_map(struct kvm *kvm)
4047{
4048 int i, idx, r = 0;
4049 pfn_t identity_map_pfn;
4050 u32 tmp;
4051
4052 if (!enable_ept)
4053 return 0;
4054
4055
4056 mutex_lock(&kvm->slots_lock);
4057
4058 if (likely(kvm->arch.ept_identity_pagetable_done))
4059 goto out2;
4060
4061 identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
4062
4063 r = alloc_identity_pagetable(kvm);
4064 if (r < 0)
4065 goto out2;
4066
4067 idx = srcu_read_lock(&kvm->srcu);
4068 r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
4069 if (r < 0)
4070 goto out;
4071
4072 for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
4073 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
4074 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
4075 r = kvm_write_guest_page(kvm, identity_map_pfn,
4076 &tmp, i * sizeof(tmp), sizeof(tmp));
4077 if (r < 0)
4078 goto out;
4079 }
4080 kvm->arch.ept_identity_pagetable_done = true;
4081
4082out:
4083 srcu_read_unlock(&kvm->srcu, idx);
4084
4085out2:
4086 mutex_unlock(&kvm->slots_lock);
4087 return r;
4088}
4089
4090static void seg_setup(int seg)
4091{
4092 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
4093 unsigned int ar;
4094
4095 vmcs_write16(sf->selector, 0);
4096 vmcs_writel(sf->base, 0);
4097 vmcs_write32(sf->limit, 0xffff);
4098 ar = 0x93;
4099 if (seg == VCPU_SREG_CS)
4100 ar |= 0x08;
4101
4102 vmcs_write32(sf->ar_bytes, ar);
4103}
4104
4105static int alloc_apic_access_page(struct kvm *kvm)
4106{
4107 struct page *page;
4108 int r = 0;
4109
4110 mutex_lock(&kvm->slots_lock);
4111 if (kvm->arch.apic_access_page_done)
4112 goto out;
4113 r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
4114 APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
4115 if (r)
4116 goto out;
4117
4118 page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
4119 if (is_error_page(page)) {
4120 r = -EFAULT;
4121 goto out;
4122 }
4123
4124
4125
4126
4127
4128 put_page(page);
4129 kvm->arch.apic_access_page_done = true;
4130out:
4131 mutex_unlock(&kvm->slots_lock);
4132 return r;
4133}
4134
4135static int alloc_identity_pagetable(struct kvm *kvm)
4136{
4137
4138
4139 int r = 0;
4140
4141 BUG_ON(kvm->arch.ept_identity_pagetable_done);
4142
4143 r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
4144 kvm->arch.ept_identity_map_addr, PAGE_SIZE);
4145
4146 return r;
4147}
4148
4149static void allocate_vpid(struct vcpu_vmx *vmx)
4150{
4151 int vpid;
4152
4153 vmx->vpid = 0;
4154 if (!enable_vpid)
4155 return;
4156 spin_lock(&vmx_vpid_lock);
4157 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
4158 if (vpid < VMX_NR_VPIDS) {
4159 vmx->vpid = vpid;
4160 __set_bit(vpid, vmx_vpid_bitmap);
4161 }
4162 spin_unlock(&vmx_vpid_lock);
4163}
4164
4165static void free_vpid(struct vcpu_vmx *vmx)
4166{
4167 if (!enable_vpid)
4168 return;
4169 spin_lock(&vmx_vpid_lock);
4170 if (vmx->vpid != 0)
4171 __clear_bit(vmx->vpid, vmx_vpid_bitmap);
4172 spin_unlock(&vmx_vpid_lock);
4173}
4174
4175#define MSR_TYPE_R 1
4176#define MSR_TYPE_W 2
4177static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
4178 u32 msr, int type)
4179{
4180 int f = sizeof(unsigned long);
4181
4182 if (!cpu_has_vmx_msr_bitmap())
4183 return;
4184
4185
4186
4187
4188
4189
4190 if (msr <= 0x1fff) {
4191 if (type & MSR_TYPE_R)
4192
4193 __clear_bit(msr, msr_bitmap + 0x000 / f);
4194
4195 if (type & MSR_TYPE_W)
4196
4197 __clear_bit(msr, msr_bitmap + 0x800 / f);
4198
4199 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
4200 msr &= 0x1fff;
4201 if (type & MSR_TYPE_R)
4202
4203 __clear_bit(msr, msr_bitmap + 0x400 / f);
4204
4205 if (type & MSR_TYPE_W)
4206
4207 __clear_bit(msr, msr_bitmap + 0xc00 / f);
4208
4209 }
4210}
4211
4212static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
4213 u32 msr, int type)
4214{
4215 int f = sizeof(unsigned long);
4216
4217 if (!cpu_has_vmx_msr_bitmap())
4218 return;
4219
4220
4221
4222
4223
4224
4225 if (msr <= 0x1fff) {
4226 if (type & MSR_TYPE_R)
4227
4228 __set_bit(msr, msr_bitmap + 0x000 / f);
4229
4230 if (type & MSR_TYPE_W)
4231
4232 __set_bit(msr, msr_bitmap + 0x800 / f);
4233
4234 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
4235 msr &= 0x1fff;
4236 if (type & MSR_TYPE_R)
4237
4238 __set_bit(msr, msr_bitmap + 0x400 / f);
4239
4240 if (type & MSR_TYPE_W)
4241
4242 __set_bit(msr, msr_bitmap + 0xc00 / f);
4243
4244 }
4245}
4246
4247
4248
4249
4250
4251static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
4252 unsigned long *msr_bitmap_nested,
4253 u32 msr, int type)
4254{
4255 int f = sizeof(unsigned long);
4256
4257 if (!cpu_has_vmx_msr_bitmap()) {
4258 WARN_ON(1);
4259 return;
4260 }
4261
4262
4263
4264
4265
4266
4267 if (msr <= 0x1fff) {
4268 if (type & MSR_TYPE_R &&
4269 !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
4270
4271 __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
4272
4273 if (type & MSR_TYPE_W &&
4274 !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
4275
4276 __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
4277
4278 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
4279 msr &= 0x1fff;
4280 if (type & MSR_TYPE_R &&
4281 !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
4282
4283 __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
4284
4285 if (type & MSR_TYPE_W &&
4286 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
4287
4288 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
4289
4290 }
4291}
4292
4293static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
4294{
4295 if (!longmode_only)
4296 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy,
4297 msr, MSR_TYPE_R | MSR_TYPE_W);
4298 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode,
4299 msr, MSR_TYPE_R | MSR_TYPE_W);
4300}
4301
4302static void vmx_enable_intercept_msr_read_x2apic(u32 msr)
4303{
4304 __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
4305 msr, MSR_TYPE_R);
4306 __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
4307 msr, MSR_TYPE_R);
4308}
4309
4310static void vmx_disable_intercept_msr_read_x2apic(u32 msr)
4311{
4312 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
4313 msr, MSR_TYPE_R);
4314 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
4315 msr, MSR_TYPE_R);
4316}
4317
4318static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
4319{
4320 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
4321 msr, MSR_TYPE_W);
4322 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
4323 msr, MSR_TYPE_W);
4324}
4325
4326static int vmx_vm_has_apicv(struct kvm *kvm)
4327{
4328 return enable_apicv && irqchip_in_kernel(kvm);
4329}
4330
4331static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
4332{
4333 struct vcpu_vmx *vmx = to_vmx(vcpu);
4334 int max_irr;
4335 void *vapic_page;
4336 u16 status;
4337
4338 if (vmx->nested.pi_desc &&
4339 vmx->nested.pi_pending) {
4340 vmx->nested.pi_pending = false;
4341 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
4342 return 0;
4343
4344 max_irr = find_last_bit(
4345 (unsigned long *)vmx->nested.pi_desc->pir, 256);
4346
4347 if (max_irr == 256)
4348 return 0;
4349
4350 vapic_page = kmap(vmx->nested.virtual_apic_page);
4351 if (!vapic_page) {
4352 WARN_ON(1);
4353 return -ENOMEM;
4354 }
4355 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page);
4356 kunmap(vmx->nested.virtual_apic_page);
4357
4358 status = vmcs_read16(GUEST_INTR_STATUS);
4359 if ((u8)max_irr > ((u8)status & 0xff)) {
4360 status &= ~0xff;
4361 status |= (u8)max_irr;
4362 vmcs_write16(GUEST_INTR_STATUS, status);
4363 }
4364 }
4365 return 0;
4366}
4367
4368static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
4369{
4370#ifdef CONFIG_SMP
4371 if (vcpu->mode == IN_GUEST_MODE) {
4372 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
4373 POSTED_INTR_VECTOR);
4374 return true;
4375 }
4376#endif
4377 return false;
4378}
4379
4380static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
4381 int vector)
4382{
4383 struct vcpu_vmx *vmx = to_vmx(vcpu);
4384
4385 if (is_guest_mode(vcpu) &&
4386 vector == vmx->nested.posted_intr_nv) {
4387
4388 kvm_vcpu_trigger_posted_interrupt(vcpu);
4389
4390
4391
4392
4393 vmx->nested.pi_pending = true;
4394 kvm_make_request(KVM_REQ_EVENT, vcpu);
4395 return 0;
4396 }
4397 return -1;
4398}
4399
4400
4401
4402
4403
4404
4405
4406static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
4407{
4408 struct vcpu_vmx *vmx = to_vmx(vcpu);
4409 int r;
4410
4411 r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
4412 if (!r)
4413 return;
4414
4415 if (pi_test_and_set_pir(vector, &vmx->pi_desc))
4416 return;
4417
4418 r = pi_test_and_set_on(&vmx->pi_desc);
4419 kvm_make_request(KVM_REQ_EVENT, vcpu);
4420 if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu))
4421 kvm_vcpu_kick(vcpu);
4422}
4423
4424static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
4425{
4426 struct vcpu_vmx *vmx = to_vmx(vcpu);
4427
4428 if (!pi_test_and_clear_on(&vmx->pi_desc))
4429 return;
4430
4431 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
4432}
4433
4434static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu)
4435{
4436 return;
4437}
4438
4439
4440
4441
4442
4443
4444
4445static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
4446{
4447 u32 low32, high32;
4448 unsigned long tmpl;
4449 struct desc_ptr dt;
4450 unsigned long cr4;
4451
4452 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS);
4453 vmcs_writel(HOST_CR3, read_cr3());
4454
4455
4456 cr4 = cr4_read_shadow();
4457 vmcs_writel(HOST_CR4, cr4);
4458 vmx->host_state.vmcs_host_cr4 = cr4;
4459
4460 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);
4461#ifdef CONFIG_X86_64
4462
4463
4464
4465
4466
4467 vmcs_write16(HOST_DS_SELECTOR, 0);
4468 vmcs_write16(HOST_ES_SELECTOR, 0);
4469#else
4470 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);
4471 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);
4472#endif
4473 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);
4474 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);
4475
4476 native_store_idt(&dt);
4477 vmcs_writel(HOST_IDTR_BASE, dt.address);
4478 vmx->host_idt_base = dt.address;
4479
4480 vmcs_writel(HOST_RIP, vmx_return);
4481
4482 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
4483 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
4484 rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
4485 vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl);
4486
4487 if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
4488 rdmsr(MSR_IA32_CR_PAT, low32, high32);
4489 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
4490 }
4491}
4492
4493static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
4494{
4495 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
4496 if (enable_ept)
4497 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
4498 if (is_guest_mode(&vmx->vcpu))
4499 vmx->vcpu.arch.cr4_guest_owned_bits &=
4500 ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
4501 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
4502}
4503
4504static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
4505{
4506 u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
4507
4508 if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
4509 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
4510 return pin_based_exec_ctrl;
4511}
4512
4513static u32 vmx_exec_control(struct vcpu_vmx *vmx)
4514{
4515 u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
4516
4517 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
4518 exec_control &= ~CPU_BASED_MOV_DR_EXITING;
4519
4520 if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
4521 exec_control &= ~CPU_BASED_TPR_SHADOW;
4522#ifdef CONFIG_X86_64
4523 exec_control |= CPU_BASED_CR8_STORE_EXITING |
4524 CPU_BASED_CR8_LOAD_EXITING;
4525#endif
4526 }
4527 if (!enable_ept)
4528 exec_control |= CPU_BASED_CR3_STORE_EXITING |
4529 CPU_BASED_CR3_LOAD_EXITING |
4530 CPU_BASED_INVLPG_EXITING;
4531 return exec_control;
4532}
4533
4534static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
4535{
4536 u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
4537 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
4538 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
4539 if (vmx->vpid == 0)
4540 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
4541 if (!enable_ept) {
4542 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
4543 enable_unrestricted_guest = 0;
4544
4545 exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
4546 }
4547 if (!enable_unrestricted_guest)
4548 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
4549 if (!ple_gap)
4550 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
4551 if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
4552 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
4553 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4554 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
4555
4556
4557
4558
4559
4560 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
4561
4562 exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
4563
4564 return exec_control;
4565}
4566
4567static void ept_set_mmio_spte_mask(void)
4568{
4569
4570
4571
4572
4573
4574
4575 kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull);
4576}
4577
4578#define VMX_XSS_EXIT_BITMAP 0
4579
4580
4581
4582static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
4583{
4584#ifdef CONFIG_X86_64
4585 unsigned long a;
4586#endif
4587 int i;
4588
4589
4590 vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
4591 vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
4592
4593 if (enable_shadow_vmcs) {
4594 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
4595 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
4596 }
4597 if (cpu_has_vmx_msr_bitmap())
4598 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
4599
4600 vmcs_write64(VMCS_LINK_POINTER, -1ull);
4601
4602
4603 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
4604
4605 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
4606
4607 if (cpu_has_secondary_exec_ctrls()) {
4608 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
4609 vmx_secondary_exec_control(vmx));
4610 }
4611
4612 if (vmx_vm_has_apicv(vmx->vcpu.kvm)) {
4613 vmcs_write64(EOI_EXIT_BITMAP0, 0);
4614 vmcs_write64(EOI_EXIT_BITMAP1, 0);
4615 vmcs_write64(EOI_EXIT_BITMAP2, 0);
4616 vmcs_write64(EOI_EXIT_BITMAP3, 0);
4617
4618 vmcs_write16(GUEST_INTR_STATUS, 0);
4619
4620 vmcs_write64(POSTED_INTR_NV, POSTED_INTR_VECTOR);
4621 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
4622 }
4623
4624 if (ple_gap) {
4625 vmcs_write32(PLE_GAP, ple_gap);
4626 vmx->ple_window = ple_window;
4627 vmx->ple_window_dirty = true;
4628 }
4629
4630 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
4631 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
4632 vmcs_write32(CR3_TARGET_COUNT, 0);
4633
4634 vmcs_write16(HOST_FS_SELECTOR, 0);
4635 vmcs_write16(HOST_GS_SELECTOR, 0);
4636 vmx_set_constant_host_state(vmx);
4637#ifdef CONFIG_X86_64
4638 rdmsrl(MSR_FS_BASE, a);
4639 vmcs_writel(HOST_FS_BASE, a);
4640 rdmsrl(MSR_GS_BASE, a);
4641 vmcs_writel(HOST_GS_BASE, a);
4642#else
4643 vmcs_writel(HOST_FS_BASE, 0);
4644 vmcs_writel(HOST_GS_BASE, 0);
4645#endif
4646
4647 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
4648 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
4649 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
4650 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
4651 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
4652
4653 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
4654 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
4655
4656 for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
4657 u32 index = vmx_msr_index[i];
4658 u32 data_low, data_high;
4659 int j = vmx->nmsrs;
4660
4661 if (rdmsr_safe(index, &data_low, &data_high) < 0)
4662 continue;
4663 if (wrmsr_safe(index, data_low, data_high) < 0)
4664 continue;
4665 vmx->guest_msrs[j].index = i;
4666 vmx->guest_msrs[j].data = 0;
4667 vmx->guest_msrs[j].mask = -1ull;
4668 ++vmx->nmsrs;
4669 }
4670
4671
4672 vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
4673
4674
4675 vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
4676
4677 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
4678 set_cr4_guest_host_mask(vmx);
4679
4680 if (vmx_xsaves_supported())
4681 vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
4682
4683 return 0;
4684}
4685
4686static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
4687{
4688 struct vcpu_vmx *vmx = to_vmx(vcpu);
4689 struct msr_data apic_base_msr;
4690 u64 cr0;
4691
4692 vmx->rmode.vm86_active = 0;
4693
4694 vmx->soft_vnmi_blocked = 0;
4695
4696 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
4697 kvm_set_cr8(vcpu, 0);
4698
4699 if (!init_event) {
4700 apic_base_msr.data = APIC_DEFAULT_PHYS_BASE |
4701 MSR_IA32_APICBASE_ENABLE;
4702 if (kvm_vcpu_is_reset_bsp(vcpu))
4703 apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
4704 apic_base_msr.host_initiated = true;
4705 kvm_set_apic_base(vcpu, &apic_base_msr);
4706 }
4707
4708 vmx_segment_cache_clear(vmx);
4709
4710 seg_setup(VCPU_SREG_CS);
4711 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
4712 vmcs_write32(GUEST_CS_BASE, 0xffff0000);
4713
4714 seg_setup(VCPU_SREG_DS);
4715 seg_setup(VCPU_SREG_ES);
4716 seg_setup(VCPU_SREG_FS);
4717 seg_setup(VCPU_SREG_GS);
4718 seg_setup(VCPU_SREG_SS);
4719
4720 vmcs_write16(GUEST_TR_SELECTOR, 0);
4721 vmcs_writel(GUEST_TR_BASE, 0);
4722 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
4723 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
4724
4725 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
4726 vmcs_writel(GUEST_LDTR_BASE, 0);
4727 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
4728 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
4729
4730 if (!init_event) {
4731 vmcs_write32(GUEST_SYSENTER_CS, 0);
4732 vmcs_writel(GUEST_SYSENTER_ESP, 0);
4733 vmcs_writel(GUEST_SYSENTER_EIP, 0);
4734 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4735 }
4736
4737 vmcs_writel(GUEST_RFLAGS, 0x02);
4738 kvm_rip_write(vcpu, 0xfff0);
4739
4740 vmcs_writel(GUEST_GDTR_BASE, 0);
4741 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
4742
4743 vmcs_writel(GUEST_IDTR_BASE, 0);
4744 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
4745
4746 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
4747 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
4748 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
4749
4750 setup_msrs(vmx);
4751
4752 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
4753
4754 if (cpu_has_vmx_tpr_shadow() && !init_event) {
4755 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
4756 if (vm_need_tpr_shadow(vcpu->kvm))
4757 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
4758 __pa(vcpu->arch.apic->regs));
4759 vmcs_write32(TPR_THRESHOLD, 0);
4760 }
4761
4762 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4763
4764 if (vmx_vm_has_apicv(vcpu->kvm))
4765 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
4766
4767 if (vmx->vpid != 0)
4768 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
4769
4770 cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
4771 vmx_set_cr0(vcpu, cr0);
4772 vmx->vcpu.arch.cr0 = cr0;
4773 vmx_set_cr4(vcpu, 0);
4774 if (!init_event)
4775 vmx_set_efer(vcpu, 0);
4776 vmx_fpu_activate(vcpu);
4777 update_exception_bitmap(vcpu);
4778
4779 vpid_sync_context(vmx);
4780}
4781
4782
4783
4784
4785
4786static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
4787{
4788 return get_vmcs12(vcpu)->pin_based_vm_exec_control &
4789 PIN_BASED_EXT_INTR_MASK;
4790}
4791
4792
4793
4794
4795
4796static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
4797{
4798 return get_vmcs12(vcpu)->vm_exit_controls &
4799 VM_EXIT_ACK_INTR_ON_EXIT;
4800}
4801
4802static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
4803{
4804 return get_vmcs12(vcpu)->pin_based_vm_exec_control &
4805 PIN_BASED_NMI_EXITING;
4806}
4807
4808static void enable_irq_window(struct kvm_vcpu *vcpu)
4809{
4810 u32 cpu_based_vm_exec_control;
4811
4812 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4813 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
4814 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
4815}
4816
4817static void enable_nmi_window(struct kvm_vcpu *vcpu)
4818{
4819 u32 cpu_based_vm_exec_control;
4820
4821 if (!cpu_has_virtual_nmis() ||
4822 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
4823 enable_irq_window(vcpu);
4824 return;
4825 }
4826
4827 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4828 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
4829 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
4830}
4831
4832static void vmx_inject_irq(struct kvm_vcpu *vcpu)
4833{
4834 struct vcpu_vmx *vmx = to_vmx(vcpu);
4835 uint32_t intr;
4836 int irq = vcpu->arch.interrupt.nr;
4837
4838 trace_kvm_inj_virq(irq);
4839
4840 ++vcpu->stat.irq_injections;
4841 if (vmx->rmode.vm86_active) {
4842 int inc_eip = 0;
4843 if (vcpu->arch.interrupt.soft)
4844 inc_eip = vcpu->arch.event_exit_inst_len;
4845 if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE)
4846 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4847 return;
4848 }
4849 intr = irq | INTR_INFO_VALID_MASK;
4850 if (vcpu->arch.interrupt.soft) {
4851 intr |= INTR_TYPE_SOFT_INTR;
4852 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
4853 vmx->vcpu.arch.event_exit_inst_len);
4854 } else
4855 intr |= INTR_TYPE_EXT_INTR;
4856 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
4857}
4858
4859static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
4860{
4861 struct vcpu_vmx *vmx = to_vmx(vcpu);
4862
4863 if (is_guest_mode(vcpu))
4864 return;
4865
4866 if (!cpu_has_virtual_nmis()) {
4867
4868
4869
4870
4871
4872
4873
4874
4875 vmx->soft_vnmi_blocked = 1;
4876 vmx->vnmi_blocked_time = 0;
4877 }
4878
4879 ++vcpu->stat.nmi_injections;
4880 vmx->nmi_known_unmasked = false;
4881 if (vmx->rmode.vm86_active) {
4882 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
4883 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4884 return;
4885 }
4886 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
4887 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
4888}
4889
4890static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
4891{
4892 if (!cpu_has_virtual_nmis())
4893 return to_vmx(vcpu)->soft_vnmi_blocked;
4894 if (to_vmx(vcpu)->nmi_known_unmasked)
4895 return false;
4896 return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
4897}
4898
4899static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
4900{
4901 struct vcpu_vmx *vmx = to_vmx(vcpu);
4902
4903 if (!cpu_has_virtual_nmis()) {
4904 if (vmx->soft_vnmi_blocked != masked) {
4905 vmx->soft_vnmi_blocked = masked;
4906 vmx->vnmi_blocked_time = 0;
4907 }
4908 } else {
4909 vmx->nmi_known_unmasked = !masked;
4910 if (masked)
4911 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
4912 GUEST_INTR_STATE_NMI);
4913 else
4914 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
4915 GUEST_INTR_STATE_NMI);
4916 }
4917}
4918
4919static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
4920{
4921 if (to_vmx(vcpu)->nested.nested_run_pending)
4922 return 0;
4923
4924 if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
4925 return 0;
4926
4927 return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
4928 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
4929 | GUEST_INTR_STATE_NMI));
4930}
4931
4932static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
4933{
4934 return (!to_vmx(vcpu)->nested.nested_run_pending &&
4935 vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
4936 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
4937 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
4938}
4939
4940static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
4941{
4942 int ret;
4943
4944 ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
4945 PAGE_SIZE * 3);
4946 if (ret)
4947 return ret;
4948 kvm->arch.tss_addr = addr;
4949 return init_rmode_tss(kvm);
4950}
4951
4952static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
4953{
4954 switch (vec) {
4955 case BP_VECTOR:
4956
4957
4958
4959
4960 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
4961 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4962 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
4963 return false;
4964
4965 case DB_VECTOR:
4966 if (vcpu->guest_debug &
4967 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
4968 return false;
4969
4970 case DE_VECTOR:
4971 case OF_VECTOR:
4972 case BR_VECTOR:
4973 case UD_VECTOR:
4974 case DF_VECTOR:
4975 case SS_VECTOR:
4976 case GP_VECTOR:
4977 case MF_VECTOR:
4978 return true;
4979 break;
4980 }
4981 return false;
4982}
4983
4984static int handle_rmode_exception(struct kvm_vcpu *vcpu,
4985 int vec, u32 err_code)
4986{
4987
4988
4989
4990
4991 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
4992 if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
4993 if (vcpu->arch.halt_request) {
4994 vcpu->arch.halt_request = 0;
4995 return kvm_vcpu_halt(vcpu);
4996 }
4997 return 1;
4998 }
4999 return 0;
5000 }
5001
5002
5003
5004
5005
5006
5007 kvm_queue_exception(vcpu, vec);
5008 return 1;
5009}
5010
5011
5012
5013
5014
5015
5016
5017
5018static void kvm_machine_check(void)
5019{
5020#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
5021 struct pt_regs regs = {
5022 .cs = 3,
5023 .flags = X86_EFLAGS_IF,
5024 };
5025
5026 do_machine_check(®s, 0);
5027#endif
5028}
5029
5030static int handle_machine_check(struct kvm_vcpu *vcpu)
5031{
5032
5033 return 1;
5034}
5035
5036static int handle_exception(struct kvm_vcpu *vcpu)
5037{
5038 struct vcpu_vmx *vmx = to_vmx(vcpu);
5039 struct kvm_run *kvm_run = vcpu->run;
5040 u32 intr_info, ex_no, error_code;
5041 unsigned long cr2, rip, dr6;
5042 u32 vect_info;
5043 enum emulation_result er;
5044
5045 vect_info = vmx->idt_vectoring_info;
5046 intr_info = vmx->exit_intr_info;
5047
5048 if (is_machine_check(intr_info))
5049 return handle_machine_check(vcpu);
5050
5051 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
5052 return 1;
5053
5054 if (is_no_device(intr_info)) {
5055 vmx_fpu_activate(vcpu);
5056 return 1;
5057 }
5058
5059 if (is_invalid_opcode(intr_info)) {
5060 if (is_guest_mode(vcpu)) {
5061 kvm_queue_exception(vcpu, UD_VECTOR);
5062 return 1;
5063 }
5064 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
5065 if (er != EMULATE_DONE)
5066 kvm_queue_exception(vcpu, UD_VECTOR);
5067 return 1;
5068 }
5069
5070 error_code = 0;
5071 if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
5072 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
5073
5074
5075
5076
5077
5078
5079 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
5080 !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
5081 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5082 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
5083 vcpu->run->internal.ndata = 3;
5084 vcpu->run->internal.data[0] = vect_info;
5085 vcpu->run->internal.data[1] = intr_info;
5086 vcpu->run->internal.data[2] = error_code;
5087 return 0;
5088 }
5089
5090 if (is_page_fault(intr_info)) {
5091
5092 BUG_ON(enable_ept);
5093 cr2 = vmcs_readl(EXIT_QUALIFICATION);
5094 trace_kvm_page_fault(cr2, error_code);
5095
5096 if (kvm_event_needs_reinjection(vcpu))
5097 kvm_mmu_unprotect_page_virt(vcpu, cr2);
5098 return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
5099 }
5100
5101 ex_no = intr_info & INTR_INFO_VECTOR_MASK;
5102
5103 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
5104 return handle_rmode_exception(vcpu, ex_no, error_code);
5105
5106 switch (ex_no) {
5107 case DB_VECTOR:
5108 dr6 = vmcs_readl(EXIT_QUALIFICATION);
5109 if (!(vcpu->guest_debug &
5110 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
5111 vcpu->arch.dr6 &= ~15;
5112 vcpu->arch.dr6 |= dr6 | DR6_RTM;
5113 if (!(dr6 & ~DR6_RESERVED))
5114 skip_emulated_instruction(vcpu);
5115
5116 kvm_queue_exception(vcpu, DB_VECTOR);
5117 return 1;
5118 }
5119 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
5120 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
5121
5122 case BP_VECTOR:
5123
5124
5125
5126
5127
5128 vmx->vcpu.arch.event_exit_inst_len =
5129 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
5130 kvm_run->exit_reason = KVM_EXIT_DEBUG;
5131 rip = kvm_rip_read(vcpu);
5132 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
5133 kvm_run->debug.arch.exception = ex_no;
5134 break;
5135 default:
5136 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
5137 kvm_run->ex.exception = ex_no;
5138 kvm_run->ex.error_code = error_code;
5139 break;
5140 }
5141 return 0;
5142}
5143
5144static int handle_external_interrupt(struct kvm_vcpu *vcpu)
5145{
5146 ++vcpu->stat.irq_exits;
5147 return 1;
5148}
5149
5150static int handle_triple_fault(struct kvm_vcpu *vcpu)
5151{
5152 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
5153 return 0;
5154}
5155
5156static int handle_io(struct kvm_vcpu *vcpu)
5157{
5158 unsigned long exit_qualification;
5159 int size, in, string;
5160 unsigned port;
5161
5162 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5163 string = (exit_qualification & 16) != 0;
5164 in = (exit_qualification & 8) != 0;
5165
5166 ++vcpu->stat.io_exits;
5167
5168 if (string || in)
5169 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
5170
5171 port = exit_qualification >> 16;
5172 size = (exit_qualification & 7) + 1;
5173 skip_emulated_instruction(vcpu);
5174
5175 return kvm_fast_pio_out(vcpu, size, port);
5176}
5177
5178static void
5179vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5180{
5181
5182
5183
5184 hypercall[0] = 0x0f;
5185 hypercall[1] = 0x01;
5186 hypercall[2] = 0xc1;
5187}
5188
5189static bool nested_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
5190{
5191 unsigned long always_on = VMXON_CR0_ALWAYSON;
5192 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5193
5194 if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high &
5195 SECONDARY_EXEC_UNRESTRICTED_GUEST &&
5196 nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
5197 always_on &= ~(X86_CR0_PE | X86_CR0_PG);
5198 return (val & always_on) == always_on;
5199}
5200
5201
5202static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
5203{
5204 if (is_guest_mode(vcpu)) {
5205 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5206 unsigned long orig_val = val;
5207
5208
5209
5210
5211
5212
5213
5214
5215
5216 val = (val & ~vmcs12->cr0_guest_host_mask) |
5217 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
5218
5219 if (!nested_cr0_valid(vcpu, val))
5220 return 1;
5221
5222 if (kvm_set_cr0(vcpu, val))
5223 return 1;
5224 vmcs_writel(CR0_READ_SHADOW, orig_val);
5225 return 0;
5226 } else {
5227 if (to_vmx(vcpu)->nested.vmxon &&
5228 ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
5229 return 1;
5230 return kvm_set_cr0(vcpu, val);
5231 }
5232}
5233
5234static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
5235{
5236 if (is_guest_mode(vcpu)) {
5237 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5238 unsigned long orig_val = val;
5239
5240
5241 val = (val & ~vmcs12->cr4_guest_host_mask) |
5242 (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
5243 if (kvm_set_cr4(vcpu, val))
5244 return 1;
5245 vmcs_writel(CR4_READ_SHADOW, orig_val);
5246 return 0;
5247 } else
5248 return kvm_set_cr4(vcpu, val);
5249}
5250
5251
5252static void handle_clts(struct kvm_vcpu *vcpu)
5253{
5254 if (is_guest_mode(vcpu)) {
5255
5256
5257
5258
5259
5260 vmcs_writel(CR0_READ_SHADOW,
5261 vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS);
5262 vcpu->arch.cr0 &= ~X86_CR0_TS;
5263 } else
5264 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
5265}
5266
5267static int handle_cr(struct kvm_vcpu *vcpu)
5268{
5269 unsigned long exit_qualification, val;
5270 int cr;
5271 int reg;
5272 int err;
5273
5274 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5275 cr = exit_qualification & 15;
5276 reg = (exit_qualification >> 8) & 15;
5277 switch ((exit_qualification >> 4) & 3) {
5278 case 0:
5279 val = kvm_register_readl(vcpu, reg);
5280 trace_kvm_cr_write(cr, val);
5281 switch (cr) {
5282 case 0:
5283 err = handle_set_cr0(vcpu, val);
5284 kvm_complete_insn_gp(vcpu, err);
5285 return 1;
5286 case 3:
5287 err = kvm_set_cr3(vcpu, val);
5288 kvm_complete_insn_gp(vcpu, err);
5289 return 1;
5290 case 4:
5291 err = handle_set_cr4(vcpu, val);
5292 kvm_complete_insn_gp(vcpu, err);
5293 return 1;
5294 case 8: {
5295 u8 cr8_prev = kvm_get_cr8(vcpu);
5296 u8 cr8 = (u8)val;
5297 err = kvm_set_cr8(vcpu, cr8);
5298 kvm_complete_insn_gp(vcpu, err);
5299 if (irqchip_in_kernel(vcpu->kvm))
5300 return 1;
5301 if (cr8_prev <= cr8)
5302 return 1;
5303 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
5304 return 0;
5305 }
5306 }
5307 break;
5308 case 2:
5309 handle_clts(vcpu);
5310 trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
5311 skip_emulated_instruction(vcpu);
5312 vmx_fpu_activate(vcpu);
5313 return 1;
5314 case 1:
5315 switch (cr) {
5316 case 3:
5317 val = kvm_read_cr3(vcpu);
5318 kvm_register_write(vcpu, reg, val);
5319 trace_kvm_cr_read(cr, val);
5320 skip_emulated_instruction(vcpu);
5321 return 1;
5322 case 8:
5323 val = kvm_get_cr8(vcpu);
5324 kvm_register_write(vcpu, reg, val);
5325 trace_kvm_cr_read(cr, val);
5326 skip_emulated_instruction(vcpu);
5327 return 1;
5328 }
5329 break;
5330 case 3:
5331 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5332 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
5333 kvm_lmsw(vcpu, val);
5334
5335 skip_emulated_instruction(vcpu);
5336 return 1;
5337 default:
5338 break;
5339 }
5340 vcpu->run->exit_reason = 0;
5341 vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
5342 (int)(exit_qualification >> 4) & 3, cr);
5343 return 0;
5344}
5345
5346static int handle_dr(struct kvm_vcpu *vcpu)
5347{
5348 unsigned long exit_qualification;
5349 int dr, dr7, reg;
5350
5351 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5352 dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
5353
5354
5355 if (!kvm_require_dr(vcpu, dr))
5356 return 1;
5357
5358
5359 if (!kvm_require_cpl(vcpu, 0))
5360 return 1;
5361 dr7 = vmcs_readl(GUEST_DR7);
5362 if (dr7 & DR7_GD) {
5363
5364
5365
5366
5367
5368 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5369 vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
5370 vcpu->run->debug.arch.dr7 = dr7;
5371 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5372 vcpu->run->debug.arch.exception = DB_VECTOR;
5373 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
5374 return 0;
5375 } else {
5376 vcpu->arch.dr6 &= ~15;
5377 vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
5378 kvm_queue_exception(vcpu, DB_VECTOR);
5379 return 1;
5380 }
5381 }
5382
5383 if (vcpu->guest_debug == 0) {
5384 u32 cpu_based_vm_exec_control;
5385
5386 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5387 cpu_based_vm_exec_control &= ~CPU_BASED_MOV_DR_EXITING;
5388 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
5389
5390
5391
5392
5393
5394
5395 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
5396 return 1;
5397 }
5398
5399 reg = DEBUG_REG_ACCESS_REG(exit_qualification);
5400 if (exit_qualification & TYPE_MOV_FROM_DR) {
5401 unsigned long val;
5402
5403 if (kvm_get_dr(vcpu, dr, &val))
5404 return 1;
5405 kvm_register_write(vcpu, reg, val);
5406 } else
5407 if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
5408 return 1;
5409
5410 skip_emulated_instruction(vcpu);
5411 return 1;
5412}
5413
5414static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
5415{
5416 return vcpu->arch.dr6;
5417}
5418
5419static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
5420{
5421}
5422
5423static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
5424{
5425 u32 cpu_based_vm_exec_control;
5426
5427 get_debugreg(vcpu->arch.db[0], 0);
5428 get_debugreg(vcpu->arch.db[1], 1);
5429 get_debugreg(vcpu->arch.db[2], 2);
5430 get_debugreg(vcpu->arch.db[3], 3);
5431 get_debugreg(vcpu->arch.dr6, 6);
5432 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
5433
5434 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
5435
5436 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5437 cpu_based_vm_exec_control |= CPU_BASED_MOV_DR_EXITING;
5438 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
5439}
5440
5441static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
5442{
5443 vmcs_writel(GUEST_DR7, val);
5444}
5445
5446static int handle_cpuid(struct kvm_vcpu *vcpu)
5447{
5448 kvm_emulate_cpuid(vcpu);
5449 return 1;
5450}
5451
5452static int handle_rdmsr(struct kvm_vcpu *vcpu)
5453{
5454 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
5455 struct msr_data msr_info;
5456
5457 msr_info.index = ecx;
5458 msr_info.host_initiated = false;
5459 if (vmx_get_msr(vcpu, &msr_info)) {
5460 trace_kvm_msr_read_ex(ecx);
5461 kvm_inject_gp(vcpu, 0);
5462 return 1;
5463 }
5464
5465 trace_kvm_msr_read(ecx, msr_info.data);
5466
5467
5468 vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
5469 vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
5470 skip_emulated_instruction(vcpu);
5471 return 1;
5472}
5473
5474static int handle_wrmsr(struct kvm_vcpu *vcpu)
5475{
5476 struct msr_data msr;
5477 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
5478 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
5479 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
5480
5481 msr.data = data;
5482 msr.index = ecx;
5483 msr.host_initiated = false;
5484 if (kvm_set_msr(vcpu, &msr) != 0) {
5485 trace_kvm_msr_write_ex(ecx, data);
5486 kvm_inject_gp(vcpu, 0);
5487 return 1;
5488 }
5489
5490 trace_kvm_msr_write(ecx, data);
5491 skip_emulated_instruction(vcpu);
5492 return 1;
5493}
5494
5495static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
5496{
5497 kvm_make_request(KVM_REQ_EVENT, vcpu);
5498 return 1;
5499}
5500
5501static int handle_interrupt_window(struct kvm_vcpu *vcpu)
5502{
5503 u32 cpu_based_vm_exec_control;
5504
5505
5506 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5507 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
5508 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
5509
5510 kvm_make_request(KVM_REQ_EVENT, vcpu);
5511
5512 ++vcpu->stat.irq_window_exits;
5513
5514
5515
5516
5517
5518 if (!irqchip_in_kernel(vcpu->kvm) &&
5519 vcpu->run->request_interrupt_window &&
5520 !kvm_cpu_has_interrupt(vcpu)) {
5521 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
5522 return 0;
5523 }
5524 return 1;
5525}
5526
5527static int handle_halt(struct kvm_vcpu *vcpu)
5528{
5529 return kvm_emulate_halt(vcpu);
5530}
5531
5532static int handle_vmcall(struct kvm_vcpu *vcpu)
5533{
5534 kvm_emulate_hypercall(vcpu);
5535 return 1;
5536}
5537
5538static int handle_invd(struct kvm_vcpu *vcpu)
5539{
5540 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
5541}
5542
5543static int handle_invlpg(struct kvm_vcpu *vcpu)
5544{
5545 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5546
5547 kvm_mmu_invlpg(vcpu, exit_qualification);
5548 skip_emulated_instruction(vcpu);
5549 return 1;
5550}
5551
5552static int handle_rdpmc(struct kvm_vcpu *vcpu)
5553{
5554 int err;
5555
5556 err = kvm_rdpmc(vcpu);
5557 kvm_complete_insn_gp(vcpu, err);
5558
5559 return 1;
5560}
5561
5562static int handle_wbinvd(struct kvm_vcpu *vcpu)
5563{
5564 kvm_emulate_wbinvd(vcpu);
5565 return 1;
5566}
5567
5568static int handle_xsetbv(struct kvm_vcpu *vcpu)
5569{
5570 u64 new_bv = kvm_read_edx_eax(vcpu);
5571 u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
5572
5573 if (kvm_set_xcr(vcpu, index, new_bv) == 0)
5574 skip_emulated_instruction(vcpu);
5575 return 1;
5576}
5577
5578static int handle_xsaves(struct kvm_vcpu *vcpu)
5579{
5580 skip_emulated_instruction(vcpu);
5581 WARN(1, "this should never happen\n");
5582 return 1;
5583}
5584
5585static int handle_xrstors(struct kvm_vcpu *vcpu)
5586{
5587 skip_emulated_instruction(vcpu);
5588 WARN(1, "this should never happen\n");
5589 return 1;
5590}
5591
5592static int handle_apic_access(struct kvm_vcpu *vcpu)
5593{
5594 if (likely(fasteoi)) {
5595 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5596 int access_type, offset;
5597
5598 access_type = exit_qualification & APIC_ACCESS_TYPE;
5599 offset = exit_qualification & APIC_ACCESS_OFFSET;
5600
5601
5602
5603
5604
5605 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
5606 (offset == APIC_EOI)) {
5607 kvm_lapic_set_eoi(vcpu);
5608 skip_emulated_instruction(vcpu);
5609 return 1;
5610 }
5611 }
5612 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
5613}
5614
5615static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
5616{
5617 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5618 int vector = exit_qualification & 0xff;
5619
5620
5621 kvm_apic_set_eoi_accelerated(vcpu, vector);
5622 return 1;
5623}
5624
5625static int handle_apic_write(struct kvm_vcpu *vcpu)
5626{
5627 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5628 u32 offset = exit_qualification & 0xfff;
5629
5630
5631 kvm_apic_write_nodecode(vcpu, offset);
5632 return 1;
5633}
5634
5635static int handle_task_switch(struct kvm_vcpu *vcpu)
5636{
5637 struct vcpu_vmx *vmx = to_vmx(vcpu);
5638 unsigned long exit_qualification;
5639 bool has_error_code = false;
5640 u32 error_code = 0;
5641 u16 tss_selector;
5642 int reason, type, idt_v, idt_index;
5643
5644 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
5645 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
5646 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
5647
5648 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5649
5650 reason = (u32)exit_qualification >> 30;
5651 if (reason == TASK_SWITCH_GATE && idt_v) {
5652 switch (type) {
5653 case INTR_TYPE_NMI_INTR:
5654 vcpu->arch.nmi_injected = false;
5655 vmx_set_nmi_mask(vcpu, true);
5656 break;
5657 case INTR_TYPE_EXT_INTR:
5658 case INTR_TYPE_SOFT_INTR:
5659 kvm_clear_interrupt_queue(vcpu);
5660 break;
5661 case INTR_TYPE_HARD_EXCEPTION:
5662 if (vmx->idt_vectoring_info &
5663 VECTORING_INFO_DELIVER_CODE_MASK) {
5664 has_error_code = true;
5665 error_code =
5666 vmcs_read32(IDT_VECTORING_ERROR_CODE);
5667 }
5668
5669 case INTR_TYPE_SOFT_EXCEPTION:
5670 kvm_clear_exception_queue(vcpu);
5671 break;
5672 default:
5673 break;
5674 }
5675 }
5676 tss_selector = exit_qualification;
5677
5678 if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
5679 type != INTR_TYPE_EXT_INTR &&
5680 type != INTR_TYPE_NMI_INTR))
5681 skip_emulated_instruction(vcpu);
5682
5683 if (kvm_task_switch(vcpu, tss_selector,
5684 type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason,
5685 has_error_code, error_code) == EMULATE_FAIL) {
5686 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5687 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
5688 vcpu->run->internal.ndata = 0;
5689 return 0;
5690 }
5691
5692
5693
5694
5695
5696
5697 return 1;
5698}
5699
5700static int handle_ept_violation(struct kvm_vcpu *vcpu)
5701{
5702 unsigned long exit_qualification;
5703 gpa_t gpa;
5704 u32 error_code;
5705 int gla_validity;
5706
5707 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5708
5709 gla_validity = (exit_qualification >> 7) & 0x3;
5710 if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
5711 printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
5712 printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
5713 (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
5714 vmcs_readl(GUEST_LINEAR_ADDRESS));
5715 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
5716 (long unsigned int)exit_qualification);
5717 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
5718 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
5719 return 0;
5720 }
5721
5722
5723
5724
5725
5726
5727
5728 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5729 cpu_has_virtual_nmis() &&
5730 (exit_qualification & INTR_INFO_UNBLOCK_NMI))
5731 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
5732
5733 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5734 trace_kvm_page_fault(gpa, exit_qualification);
5735
5736
5737 error_code = exit_qualification & PFERR_WRITE_MASK;
5738
5739 error_code |= (exit_qualification << 2) & PFERR_FETCH_MASK;
5740
5741 error_code |= (exit_qualification >> 3) & PFERR_PRESENT_MASK;
5742
5743 vcpu->arch.exit_qualification = exit_qualification;
5744
5745 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
5746}
5747
5748static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
5749{
5750 int ret;
5751 gpa_t gpa;
5752
5753 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5754 if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
5755 skip_emulated_instruction(vcpu);
5756 return 1;
5757 }
5758
5759 ret = handle_mmio_page_fault_common(vcpu, gpa, true);
5760 if (likely(ret == RET_MMIO_PF_EMULATE))
5761 return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
5762 EMULATE_DONE;
5763
5764 if (unlikely(ret == RET_MMIO_PF_INVALID))
5765 return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0);
5766
5767 if (unlikely(ret == RET_MMIO_PF_RETRY))
5768 return 1;
5769
5770
5771 WARN_ON(1);
5772
5773 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
5774 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
5775
5776 return 0;
5777}
5778
5779static int handle_nmi_window(struct kvm_vcpu *vcpu)
5780{
5781 u32 cpu_based_vm_exec_control;
5782
5783
5784 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5785 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
5786 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
5787 ++vcpu->stat.nmi_window_exits;
5788 kvm_make_request(KVM_REQ_EVENT, vcpu);
5789
5790 return 1;
5791}
5792
5793static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
5794{
5795 struct vcpu_vmx *vmx = to_vmx(vcpu);
5796 enum emulation_result err = EMULATE_DONE;
5797 int ret = 1;
5798 u32 cpu_exec_ctrl;
5799 bool intr_window_requested;
5800 unsigned count = 130;
5801
5802 cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5803 intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
5804
5805 while (vmx->emulation_required && count-- != 0) {
5806 if (intr_window_requested && vmx_interrupt_allowed(vcpu))
5807 return handle_interrupt_window(&vmx->vcpu);
5808
5809 if (test_bit(KVM_REQ_EVENT, &vcpu->requests))
5810 return 1;
5811
5812 err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
5813
5814 if (err == EMULATE_USER_EXIT) {
5815 ++vcpu->stat.mmio_exits;
5816 ret = 0;
5817 goto out;
5818 }
5819
5820 if (err != EMULATE_DONE) {
5821 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5822 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
5823 vcpu->run->internal.ndata = 0;
5824 return 0;
5825 }
5826
5827 if (vcpu->arch.halt_request) {
5828 vcpu->arch.halt_request = 0;
5829 ret = kvm_vcpu_halt(vcpu);
5830 goto out;
5831 }
5832
5833 if (signal_pending(current))
5834 goto out;
5835 if (need_resched())
5836 schedule();
5837 }
5838
5839out:
5840 return ret;
5841}
5842
5843static int __grow_ple_window(int val)
5844{
5845 if (ple_window_grow < 1)
5846 return ple_window;
5847
5848 val = min(val, ple_window_actual_max);
5849
5850 if (ple_window_grow < ple_window)
5851 val *= ple_window_grow;
5852 else
5853 val += ple_window_grow;
5854
5855 return val;
5856}
5857
5858static int __shrink_ple_window(int val, int modifier, int minimum)
5859{
5860 if (modifier < 1)
5861 return ple_window;
5862
5863 if (modifier < ple_window)
5864 val /= modifier;
5865 else
5866 val -= modifier;
5867
5868 return max(val, minimum);
5869}
5870
5871static void grow_ple_window(struct kvm_vcpu *vcpu)
5872{
5873 struct vcpu_vmx *vmx = to_vmx(vcpu);
5874 int old = vmx->ple_window;
5875
5876 vmx->ple_window = __grow_ple_window(old);
5877
5878 if (vmx->ple_window != old)
5879 vmx->ple_window_dirty = true;
5880
5881 trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old);
5882}
5883
5884static void shrink_ple_window(struct kvm_vcpu *vcpu)
5885{
5886 struct vcpu_vmx *vmx = to_vmx(vcpu);
5887 int old = vmx->ple_window;
5888
5889 vmx->ple_window = __shrink_ple_window(old,
5890 ple_window_shrink, ple_window);
5891
5892 if (vmx->ple_window != old)
5893 vmx->ple_window_dirty = true;
5894
5895 trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old);
5896}
5897
5898
5899
5900
5901
5902
5903
5904
5905
5906static void update_ple_window_actual_max(void)
5907{
5908 ple_window_actual_max =
5909 __shrink_ple_window(max(ple_window_max, ple_window),
5910 ple_window_grow, INT_MIN);
5911}
5912
5913static __init int hardware_setup(void)
5914{
5915 int r = -ENOMEM, i, msr;
5916
5917 rdmsrl_safe(MSR_EFER, &host_efer);
5918
5919 for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
5920 kvm_define_shared_msr(i, vmx_msr_index[i]);
5921
5922 vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
5923 if (!vmx_io_bitmap_a)
5924 return r;
5925
5926 vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
5927 if (!vmx_io_bitmap_b)
5928 goto out;
5929
5930 vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
5931 if (!vmx_msr_bitmap_legacy)
5932 goto out1;
5933
5934 vmx_msr_bitmap_legacy_x2apic =
5935 (unsigned long *)__get_free_page(GFP_KERNEL);
5936 if (!vmx_msr_bitmap_legacy_x2apic)
5937 goto out2;
5938
5939 vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
5940 if (!vmx_msr_bitmap_longmode)
5941 goto out3;
5942
5943 vmx_msr_bitmap_longmode_x2apic =
5944 (unsigned long *)__get_free_page(GFP_KERNEL);
5945 if (!vmx_msr_bitmap_longmode_x2apic)
5946 goto out4;
5947
5948 if (nested) {
5949 vmx_msr_bitmap_nested =
5950 (unsigned long *)__get_free_page(GFP_KERNEL);
5951 if (!vmx_msr_bitmap_nested)
5952 goto out5;
5953 }
5954
5955 vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
5956 if (!vmx_vmread_bitmap)
5957 goto out6;
5958
5959 vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
5960 if (!vmx_vmwrite_bitmap)
5961 goto out7;
5962
5963 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
5964 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
5965
5966
5967
5968
5969
5970 memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
5971 clear_bit(0x80, vmx_io_bitmap_a);
5972
5973 memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
5974
5975 memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
5976 memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
5977 if (nested)
5978 memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
5979
5980 if (setup_vmcs_config(&vmcs_config) < 0) {
5981 r = -EIO;
5982 goto out8;
5983 }
5984
5985 if (boot_cpu_has(X86_FEATURE_NX))
5986 kvm_enable_efer_bits(EFER_NX);
5987
5988 if (!cpu_has_vmx_vpid())
5989 enable_vpid = 0;
5990 if (!cpu_has_vmx_shadow_vmcs())
5991 enable_shadow_vmcs = 0;
5992 if (enable_shadow_vmcs)
5993 init_vmcs_shadow_fields();
5994
5995 if (!cpu_has_vmx_ept() ||
5996 !cpu_has_vmx_ept_4levels()) {
5997 enable_ept = 0;
5998 enable_unrestricted_guest = 0;
5999 enable_ept_ad_bits = 0;
6000 }
6001
6002 if (!cpu_has_vmx_ept_ad_bits())
6003 enable_ept_ad_bits = 0;
6004
6005 if (!cpu_has_vmx_unrestricted_guest())
6006 enable_unrestricted_guest = 0;
6007
6008 if (!cpu_has_vmx_flexpriority())
6009 flexpriority_enabled = 0;
6010
6011
6012
6013
6014
6015
6016 if (!flexpriority_enabled)
6017 kvm_x86_ops->set_apic_access_page_addr = NULL;
6018
6019 if (!cpu_has_vmx_tpr_shadow())
6020 kvm_x86_ops->update_cr8_intercept = NULL;
6021
6022 if (enable_ept && !cpu_has_vmx_ept_2m_page())
6023 kvm_disable_largepages();
6024
6025 if (!cpu_has_vmx_ple())
6026 ple_gap = 0;
6027
6028 if (!cpu_has_vmx_apicv())
6029 enable_apicv = 0;
6030
6031 if (enable_apicv)
6032 kvm_x86_ops->update_cr8_intercept = NULL;
6033 else {
6034 kvm_x86_ops->hwapic_irr_update = NULL;
6035 kvm_x86_ops->hwapic_isr_update = NULL;
6036 kvm_x86_ops->deliver_posted_interrupt = NULL;
6037 kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
6038 }
6039
6040 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
6041 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
6042 vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
6043 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
6044 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
6045 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
6046 vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
6047
6048 memcpy(vmx_msr_bitmap_legacy_x2apic,
6049 vmx_msr_bitmap_legacy, PAGE_SIZE);
6050 memcpy(vmx_msr_bitmap_longmode_x2apic,
6051 vmx_msr_bitmap_longmode, PAGE_SIZE);
6052
6053 set_bit(0, vmx_vpid_bitmap);
6054
6055 if (enable_apicv) {
6056 for (msr = 0x800; msr <= 0x8ff; msr++)
6057 vmx_disable_intercept_msr_read_x2apic(msr);
6058
6059
6060
6061
6062 vmx_enable_intercept_msr_read_x2apic(0x802);
6063
6064 vmx_enable_intercept_msr_read_x2apic(0x839);
6065
6066 vmx_disable_intercept_msr_write_x2apic(0x808);
6067
6068 vmx_disable_intercept_msr_write_x2apic(0x80b);
6069
6070 vmx_disable_intercept_msr_write_x2apic(0x83f);
6071 }
6072
6073 if (enable_ept) {
6074 kvm_mmu_set_mask_ptes(0ull,
6075 (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
6076 (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
6077 0ull, VMX_EPT_EXECUTABLE_MASK);
6078 ept_set_mmio_spte_mask();
6079 kvm_enable_tdp();
6080 } else
6081 kvm_disable_tdp();
6082
6083 update_ple_window_actual_max();
6084
6085
6086
6087
6088
6089 if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
6090 enable_pml = 0;
6091
6092 if (!enable_pml) {
6093 kvm_x86_ops->slot_enable_log_dirty = NULL;
6094 kvm_x86_ops->slot_disable_log_dirty = NULL;
6095 kvm_x86_ops->flush_log_dirty = NULL;
6096 kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
6097 }
6098
6099 return alloc_kvm_area();
6100
6101out8:
6102 free_page((unsigned long)vmx_vmwrite_bitmap);
6103out7:
6104 free_page((unsigned long)vmx_vmread_bitmap);
6105out6:
6106 if (nested)
6107 free_page((unsigned long)vmx_msr_bitmap_nested);
6108out5:
6109 free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
6110out4:
6111 free_page((unsigned long)vmx_msr_bitmap_longmode);
6112out3:
6113 free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
6114out2:
6115 free_page((unsigned long)vmx_msr_bitmap_legacy);
6116out1:
6117 free_page((unsigned long)vmx_io_bitmap_b);
6118out:
6119 free_page((unsigned long)vmx_io_bitmap_a);
6120
6121 return r;
6122}
6123
6124static __exit void hardware_unsetup(void)
6125{
6126 free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
6127 free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
6128 free_page((unsigned long)vmx_msr_bitmap_legacy);
6129 free_page((unsigned long)vmx_msr_bitmap_longmode);
6130 free_page((unsigned long)vmx_io_bitmap_b);
6131 free_page((unsigned long)vmx_io_bitmap_a);
6132 free_page((unsigned long)vmx_vmwrite_bitmap);
6133 free_page((unsigned long)vmx_vmread_bitmap);
6134 if (nested)
6135 free_page((unsigned long)vmx_msr_bitmap_nested);
6136
6137 free_kvm_area();
6138}
6139
6140
6141
6142
6143
6144static int handle_pause(struct kvm_vcpu *vcpu)
6145{
6146 if (ple_gap)
6147 grow_ple_window(vcpu);
6148
6149 skip_emulated_instruction(vcpu);
6150 kvm_vcpu_on_spin(vcpu);
6151
6152 return 1;
6153}
6154
6155static int handle_nop(struct kvm_vcpu *vcpu)
6156{
6157 skip_emulated_instruction(vcpu);
6158 return 1;
6159}
6160
6161static int handle_mwait(struct kvm_vcpu *vcpu)
6162{
6163 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
6164 return handle_nop(vcpu);
6165}
6166
6167static int handle_monitor_trap(struct kvm_vcpu *vcpu)
6168{
6169 return 1;
6170}
6171
6172static int handle_monitor(struct kvm_vcpu *vcpu)
6173{
6174 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
6175 return handle_nop(vcpu);
6176}
6177
6178
6179
6180
6181
6182
6183
6184
6185
6186
6187
6188
6189
6190
6191
6192static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
6193{
6194 struct vmcs02_list *item;
6195 list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
6196 if (item->vmptr == vmx->nested.current_vmptr) {
6197 list_move(&item->list, &vmx->nested.vmcs02_pool);
6198 return &item->vmcs02;
6199 }
6200
6201 if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) {
6202
6203 item = list_entry(vmx->nested.vmcs02_pool.prev,
6204 struct vmcs02_list, list);
6205 item->vmptr = vmx->nested.current_vmptr;
6206 list_move(&item->list, &vmx->nested.vmcs02_pool);
6207 return &item->vmcs02;
6208 }
6209
6210
6211 item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
6212 if (!item)
6213 return NULL;
6214 item->vmcs02.vmcs = alloc_vmcs();
6215 if (!item->vmcs02.vmcs) {
6216 kfree(item);
6217 return NULL;
6218 }
6219 loaded_vmcs_init(&item->vmcs02);
6220 item->vmptr = vmx->nested.current_vmptr;
6221 list_add(&(item->list), &(vmx->nested.vmcs02_pool));
6222 vmx->nested.vmcs02_num++;
6223 return &item->vmcs02;
6224}
6225
6226
6227static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
6228{
6229 struct vmcs02_list *item;
6230 list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
6231 if (item->vmptr == vmptr) {
6232 free_loaded_vmcs(&item->vmcs02);
6233 list_del(&item->list);
6234 kfree(item);
6235 vmx->nested.vmcs02_num--;
6236 return;
6237 }
6238}
6239
6240
6241
6242
6243
6244
6245static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
6246{
6247 struct vmcs02_list *item, *n;
6248
6249 WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01);
6250 list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
6251
6252
6253
6254
6255 if (vmx->loaded_vmcs == &item->vmcs02)
6256 continue;
6257
6258 free_loaded_vmcs(&item->vmcs02);
6259 list_del(&item->list);
6260 kfree(item);
6261 vmx->nested.vmcs02_num--;
6262 }
6263}
6264
6265
6266
6267
6268
6269
6270static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
6271{
6272 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
6273 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
6274 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
6275}
6276
6277static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
6278{
6279 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
6280 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
6281 X86_EFLAGS_SF | X86_EFLAGS_OF))
6282 | X86_EFLAGS_CF);
6283}
6284
6285static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
6286 u32 vm_instruction_error)
6287{
6288 if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
6289
6290
6291
6292
6293 nested_vmx_failInvalid(vcpu);
6294 return;
6295 }
6296 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
6297 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
6298 X86_EFLAGS_SF | X86_EFLAGS_OF))
6299 | X86_EFLAGS_ZF);
6300 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
6301
6302
6303
6304
6305}
6306
6307static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
6308{
6309
6310 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
6311 pr_warn("kvm: nested vmx abort, indicator %d\n", indicator);
6312}
6313
6314static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
6315{
6316 struct vcpu_vmx *vmx =
6317 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
6318
6319 vmx->nested.preemption_timer_expired = true;
6320 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
6321 kvm_vcpu_kick(&vmx->vcpu);
6322
6323 return HRTIMER_NORESTART;
6324}
6325
6326
6327
6328
6329
6330
6331
6332static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
6333 unsigned long exit_qualification,
6334 u32 vmx_instruction_info, bool wr, gva_t *ret)
6335{
6336 gva_t off;
6337 bool exn;
6338 struct kvm_segment s;
6339
6340
6341
6342
6343
6344
6345
6346
6347
6348 int scaling = vmx_instruction_info & 3;
6349 int addr_size = (vmx_instruction_info >> 7) & 7;
6350 bool is_reg = vmx_instruction_info & (1u << 10);
6351 int seg_reg = (vmx_instruction_info >> 15) & 7;
6352 int index_reg = (vmx_instruction_info >> 18) & 0xf;
6353 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
6354 int base_reg = (vmx_instruction_info >> 23) & 0xf;
6355 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
6356
6357 if (is_reg) {
6358 kvm_queue_exception(vcpu, UD_VECTOR);
6359 return 1;
6360 }
6361
6362
6363
6364 off = exit_qualification;
6365 if (base_is_valid)
6366 off += kvm_register_read(vcpu, base_reg);
6367 if (index_is_valid)
6368 off += kvm_register_read(vcpu, index_reg)<<scaling;
6369 vmx_get_segment(vcpu, &s, seg_reg);
6370 *ret = s.base + off;
6371
6372 if (addr_size == 1)
6373 *ret &= 0xffffffff;
6374
6375
6376 exn = false;
6377 if (is_protmode(vcpu)) {
6378
6379
6380
6381
6382
6383
6384 if (wr)
6385
6386
6387
6388 exn = ((s.type & 0xa) == 0 || (s.type & 8));
6389 else
6390
6391
6392
6393 exn = ((s.type & 0xa) == 8);
6394 }
6395 if (exn) {
6396 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
6397 return 1;
6398 }
6399 if (is_long_mode(vcpu)) {
6400
6401
6402
6403 exn = is_noncanonical_address(*ret);
6404 } else if (is_protmode(vcpu)) {
6405
6406
6407 exn = (s.unusable != 0);
6408
6409
6410
6411 exn = exn || (off + sizeof(u64) > s.limit);
6412 }
6413 if (exn) {
6414 kvm_queue_exception_e(vcpu,
6415 seg_reg == VCPU_SREG_SS ?
6416 SS_VECTOR : GP_VECTOR,
6417 0);
6418 return 1;
6419 }
6420
6421 return 0;
6422}
6423
6424
6425
6426
6427
6428
6429
6430
6431static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
6432 gpa_t *vmpointer)
6433{
6434 gva_t gva;
6435 gpa_t vmptr;
6436 struct x86_exception e;
6437 struct page *page;
6438 struct vcpu_vmx *vmx = to_vmx(vcpu);
6439 int maxphyaddr = cpuid_maxphyaddr(vcpu);
6440
6441 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
6442 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
6443 return 1;
6444
6445 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
6446 sizeof(vmptr), &e)) {
6447 kvm_inject_page_fault(vcpu, &e);
6448 return 1;
6449 }
6450
6451 switch (exit_reason) {
6452 case EXIT_REASON_VMON:
6453
6454
6455
6456
6457
6458
6459
6460
6461
6462
6463 if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
6464 nested_vmx_failInvalid(vcpu);
6465 skip_emulated_instruction(vcpu);
6466 return 1;
6467 }
6468
6469 page = nested_get_page(vcpu, vmptr);
6470 if (page == NULL ||
6471 *(u32 *)kmap(page) != VMCS12_REVISION) {
6472 nested_vmx_failInvalid(vcpu);
6473 kunmap(page);
6474 skip_emulated_instruction(vcpu);
6475 return 1;
6476 }
6477 kunmap(page);
6478 vmx->nested.vmxon_ptr = vmptr;
6479 break;
6480 case EXIT_REASON_VMCLEAR:
6481 if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
6482 nested_vmx_failValid(vcpu,
6483 VMXERR_VMCLEAR_INVALID_ADDRESS);
6484 skip_emulated_instruction(vcpu);
6485 return 1;
6486 }
6487
6488 if (vmptr == vmx->nested.vmxon_ptr) {
6489 nested_vmx_failValid(vcpu,
6490 VMXERR_VMCLEAR_VMXON_POINTER);
6491 skip_emulated_instruction(vcpu);
6492 return 1;
6493 }
6494 break;
6495 case EXIT_REASON_VMPTRLD:
6496 if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
6497 nested_vmx_failValid(vcpu,
6498 VMXERR_VMPTRLD_INVALID_ADDRESS);
6499 skip_emulated_instruction(vcpu);
6500 return 1;
6501 }
6502
6503 if (vmptr == vmx->nested.vmxon_ptr) {
6504 nested_vmx_failValid(vcpu,
6505 VMXERR_VMCLEAR_VMXON_POINTER);
6506 skip_emulated_instruction(vcpu);
6507 return 1;
6508 }
6509 break;
6510 default:
6511 return 1;
6512 }
6513
6514 if (vmpointer)
6515 *vmpointer = vmptr;
6516 return 0;
6517}
6518
6519
6520
6521
6522
6523
6524
6525
6526
6527static int handle_vmon(struct kvm_vcpu *vcpu)
6528{
6529 struct kvm_segment cs;
6530 struct vcpu_vmx *vmx = to_vmx(vcpu);
6531 struct vmcs *shadow_vmcs;
6532 const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
6533 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
6534
6535
6536
6537
6538
6539
6540 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) ||
6541 !kvm_read_cr0_bits(vcpu, X86_CR0_PE) ||
6542 (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
6543 kvm_queue_exception(vcpu, UD_VECTOR);
6544 return 1;
6545 }
6546
6547 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
6548 if (is_long_mode(vcpu) && !cs.l) {
6549 kvm_queue_exception(vcpu, UD_VECTOR);
6550 return 1;
6551 }
6552
6553 if (vmx_get_cpl(vcpu)) {
6554 kvm_inject_gp(vcpu, 0);
6555 return 1;
6556 }
6557
6558 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
6559 return 1;
6560
6561 if (vmx->nested.vmxon) {
6562 nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
6563 skip_emulated_instruction(vcpu);
6564 return 1;
6565 }
6566
6567 if ((vmx->nested.msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
6568 != VMXON_NEEDED_FEATURES) {
6569 kvm_inject_gp(vcpu, 0);
6570 return 1;
6571 }
6572
6573 if (enable_shadow_vmcs) {
6574 shadow_vmcs = alloc_vmcs();
6575 if (!shadow_vmcs)
6576 return -ENOMEM;
6577
6578 shadow_vmcs->revision_id |= (1u << 31);
6579
6580 vmcs_clear(shadow_vmcs);
6581 vmx->nested.current_shadow_vmcs = shadow_vmcs;
6582 }
6583
6584 INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
6585 vmx->nested.vmcs02_num = 0;
6586
6587 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
6588 HRTIMER_MODE_REL);
6589 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
6590
6591 vmx->nested.vmxon = true;
6592
6593 skip_emulated_instruction(vcpu);
6594 nested_vmx_succeed(vcpu);
6595 return 1;
6596}
6597
6598
6599
6600
6601
6602
6603static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
6604{
6605 struct kvm_segment cs;
6606 struct vcpu_vmx *vmx = to_vmx(vcpu);
6607
6608 if (!vmx->nested.vmxon) {
6609 kvm_queue_exception(vcpu, UD_VECTOR);
6610 return 0;
6611 }
6612
6613 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
6614 if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) ||
6615 (is_long_mode(vcpu) && !cs.l)) {
6616 kvm_queue_exception(vcpu, UD_VECTOR);
6617 return 0;
6618 }
6619
6620 if (vmx_get_cpl(vcpu)) {
6621 kvm_inject_gp(vcpu, 0);
6622 return 0;
6623 }
6624
6625 return 1;
6626}
6627
6628static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
6629{
6630 u32 exec_control;
6631 if (vmx->nested.current_vmptr == -1ull)
6632 return;
6633
6634
6635 if (WARN_ON(vmx->nested.current_vmcs12 == NULL))
6636 return;
6637
6638 if (enable_shadow_vmcs) {
6639
6640
6641 copy_shadow_to_vmcs12(vmx);
6642 vmx->nested.sync_shadow_vmcs = false;
6643 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6644 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
6645 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
6646 vmcs_write64(VMCS_LINK_POINTER, -1ull);
6647 }
6648 vmx->nested.posted_intr_nv = -1;
6649 kunmap(vmx->nested.current_vmcs12_page);
6650 nested_release_page(vmx->nested.current_vmcs12_page);
6651 vmx->nested.current_vmptr = -1ull;
6652 vmx->nested.current_vmcs12 = NULL;
6653}
6654
6655
6656
6657
6658
6659static void free_nested(struct vcpu_vmx *vmx)
6660{
6661 if (!vmx->nested.vmxon)
6662 return;
6663
6664 vmx->nested.vmxon = false;
6665 nested_release_vmcs12(vmx);
6666 if (enable_shadow_vmcs)
6667 free_vmcs(vmx->nested.current_shadow_vmcs);
6668
6669 if (vmx->nested.apic_access_page) {
6670 nested_release_page(vmx->nested.apic_access_page);
6671 vmx->nested.apic_access_page = NULL;
6672 }
6673 if (vmx->nested.virtual_apic_page) {
6674 nested_release_page(vmx->nested.virtual_apic_page);
6675 vmx->nested.virtual_apic_page = NULL;
6676 }
6677 if (vmx->nested.pi_desc_page) {
6678 kunmap(vmx->nested.pi_desc_page);
6679 nested_release_page(vmx->nested.pi_desc_page);
6680 vmx->nested.pi_desc_page = NULL;
6681 vmx->nested.pi_desc = NULL;
6682 }
6683
6684 nested_free_all_saved_vmcss(vmx);
6685}
6686
6687
6688static int handle_vmoff(struct kvm_vcpu *vcpu)
6689{
6690 if (!nested_vmx_check_permission(vcpu))
6691 return 1;
6692 free_nested(to_vmx(vcpu));
6693 skip_emulated_instruction(vcpu);
6694 nested_vmx_succeed(vcpu);
6695 return 1;
6696}
6697
6698
6699static int handle_vmclear(struct kvm_vcpu *vcpu)
6700{
6701 struct vcpu_vmx *vmx = to_vmx(vcpu);
6702 gpa_t vmptr;
6703 struct vmcs12 *vmcs12;
6704 struct page *page;
6705
6706 if (!nested_vmx_check_permission(vcpu))
6707 return 1;
6708
6709 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
6710 return 1;
6711
6712 if (vmptr == vmx->nested.current_vmptr)
6713 nested_release_vmcs12(vmx);
6714
6715 page = nested_get_page(vcpu, vmptr);
6716 if (page == NULL) {
6717
6718
6719
6720
6721
6722
6723
6724 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
6725 return 1;
6726 }
6727 vmcs12 = kmap(page);
6728 vmcs12->launch_state = 0;
6729 kunmap(page);
6730 nested_release_page(page);
6731
6732 nested_free_vmcs02(vmx, vmptr);
6733
6734 skip_emulated_instruction(vcpu);
6735 nested_vmx_succeed(vcpu);
6736 return 1;
6737}
6738
6739static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
6740
6741
6742static int handle_vmlaunch(struct kvm_vcpu *vcpu)
6743{
6744 return nested_vmx_run(vcpu, true);
6745}
6746
6747
6748static int handle_vmresume(struct kvm_vcpu *vcpu)
6749{
6750
6751 return nested_vmx_run(vcpu, false);
6752}
6753
6754enum vmcs_field_type {
6755 VMCS_FIELD_TYPE_U16 = 0,
6756 VMCS_FIELD_TYPE_U64 = 1,
6757 VMCS_FIELD_TYPE_U32 = 2,
6758 VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
6759};
6760
6761static inline int vmcs_field_type(unsigned long field)
6762{
6763 if (0x1 & field)
6764 return VMCS_FIELD_TYPE_U32;
6765 return (field >> 13) & 0x3 ;
6766}
6767
6768static inline int vmcs_field_readonly(unsigned long field)
6769{
6770 return (((field >> 10) & 0x3) == 1);
6771}
6772
6773
6774
6775
6776
6777
6778
6779
6780static inline int vmcs12_read_any(struct kvm_vcpu *vcpu,
6781 unsigned long field, u64 *ret)
6782{
6783 short offset = vmcs_field_to_offset(field);
6784 char *p;
6785
6786 if (offset < 0)
6787 return offset;
6788
6789 p = ((char *)(get_vmcs12(vcpu))) + offset;
6790
6791 switch (vmcs_field_type(field)) {
6792 case VMCS_FIELD_TYPE_NATURAL_WIDTH:
6793 *ret = *((natural_width *)p);
6794 return 0;
6795 case VMCS_FIELD_TYPE_U16:
6796 *ret = *((u16 *)p);
6797 return 0;
6798 case VMCS_FIELD_TYPE_U32:
6799 *ret = *((u32 *)p);
6800 return 0;
6801 case VMCS_FIELD_TYPE_U64:
6802 *ret = *((u64 *)p);
6803 return 0;
6804 default:
6805 WARN_ON(1);
6806 return -ENOENT;
6807 }
6808}
6809
6810
6811static inline int vmcs12_write_any(struct kvm_vcpu *vcpu,
6812 unsigned long field, u64 field_value){
6813 short offset = vmcs_field_to_offset(field);
6814 char *p = ((char *) get_vmcs12(vcpu)) + offset;
6815 if (offset < 0)
6816 return offset;
6817
6818 switch (vmcs_field_type(field)) {
6819 case VMCS_FIELD_TYPE_U16:
6820 *(u16 *)p = field_value;
6821 return 0;
6822 case VMCS_FIELD_TYPE_U32:
6823 *(u32 *)p = field_value;
6824 return 0;
6825 case VMCS_FIELD_TYPE_U64:
6826 *(u64 *)p = field_value;
6827 return 0;
6828 case VMCS_FIELD_TYPE_NATURAL_WIDTH:
6829 *(natural_width *)p = field_value;
6830 return 0;
6831 default:
6832 WARN_ON(1);
6833 return -ENOENT;
6834 }
6835
6836}
6837
6838static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
6839{
6840 int i;
6841 unsigned long field;
6842 u64 field_value;
6843 struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
6844 const unsigned long *fields = shadow_read_write_fields;
6845 const int num_fields = max_shadow_read_write_fields;
6846
6847 preempt_disable();
6848
6849 vmcs_load(shadow_vmcs);
6850
6851 for (i = 0; i < num_fields; i++) {
6852 field = fields[i];
6853 switch (vmcs_field_type(field)) {
6854 case VMCS_FIELD_TYPE_U16:
6855 field_value = vmcs_read16(field);
6856 break;
6857 case VMCS_FIELD_TYPE_U32:
6858 field_value = vmcs_read32(field);
6859 break;
6860 case VMCS_FIELD_TYPE_U64:
6861 field_value = vmcs_read64(field);
6862 break;
6863 case VMCS_FIELD_TYPE_NATURAL_WIDTH:
6864 field_value = vmcs_readl(field);
6865 break;
6866 default:
6867 WARN_ON(1);
6868 continue;
6869 }
6870 vmcs12_write_any(&vmx->vcpu, field, field_value);
6871 }
6872
6873 vmcs_clear(shadow_vmcs);
6874 vmcs_load(vmx->loaded_vmcs->vmcs);
6875
6876 preempt_enable();
6877}
6878
6879static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
6880{
6881 const unsigned long *fields[] = {
6882 shadow_read_write_fields,
6883 shadow_read_only_fields
6884 };
6885 const int max_fields[] = {
6886 max_shadow_read_write_fields,
6887 max_shadow_read_only_fields
6888 };
6889 int i, q;
6890 unsigned long field;
6891 u64 field_value = 0;
6892 struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
6893
6894 vmcs_load(shadow_vmcs);
6895
6896 for (q = 0; q < ARRAY_SIZE(fields); q++) {
6897 for (i = 0; i < max_fields[q]; i++) {
6898 field = fields[q][i];
6899 vmcs12_read_any(&vmx->vcpu, field, &field_value);
6900
6901 switch (vmcs_field_type(field)) {
6902 case VMCS_FIELD_TYPE_U16:
6903 vmcs_write16(field, (u16)field_value);
6904 break;
6905 case VMCS_FIELD_TYPE_U32:
6906 vmcs_write32(field, (u32)field_value);
6907 break;
6908 case VMCS_FIELD_TYPE_U64:
6909 vmcs_write64(field, (u64)field_value);
6910 break;
6911 case VMCS_FIELD_TYPE_NATURAL_WIDTH:
6912 vmcs_writel(field, (long)field_value);
6913 break;
6914 default:
6915 WARN_ON(1);
6916 break;
6917 }
6918 }
6919 }
6920
6921 vmcs_clear(shadow_vmcs);
6922 vmcs_load(vmx->loaded_vmcs->vmcs);
6923}
6924
6925
6926
6927
6928
6929static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
6930{
6931 struct vcpu_vmx *vmx = to_vmx(vcpu);
6932 if (vmx->nested.current_vmptr == -1ull) {
6933 nested_vmx_failInvalid(vcpu);
6934 skip_emulated_instruction(vcpu);
6935 return 0;
6936 }
6937 return 1;
6938}
6939
6940static int handle_vmread(struct kvm_vcpu *vcpu)
6941{
6942 unsigned long field;
6943 u64 field_value;
6944 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6945 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
6946 gva_t gva = 0;
6947
6948 if (!nested_vmx_check_permission(vcpu) ||
6949 !nested_vmx_check_vmcs12(vcpu))
6950 return 1;
6951
6952
6953 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
6954
6955 if (vmcs12_read_any(vcpu, field, &field_value) < 0) {
6956 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
6957 skip_emulated_instruction(vcpu);
6958 return 1;
6959 }
6960
6961
6962
6963
6964
6965 if (vmx_instruction_info & (1u << 10)) {
6966 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
6967 field_value);
6968 } else {
6969 if (get_vmx_mem_address(vcpu, exit_qualification,
6970 vmx_instruction_info, true, &gva))
6971 return 1;
6972
6973 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
6974 &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
6975 }
6976
6977 nested_vmx_succeed(vcpu);
6978 skip_emulated_instruction(vcpu);
6979 return 1;
6980}
6981
6982
6983static int handle_vmwrite(struct kvm_vcpu *vcpu)
6984{
6985 unsigned long field;
6986 gva_t gva;
6987 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6988 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
6989
6990
6991
6992
6993
6994
6995 u64 field_value = 0;
6996 struct x86_exception e;
6997
6998 if (!nested_vmx_check_permission(vcpu) ||
6999 !nested_vmx_check_vmcs12(vcpu))
7000 return 1;
7001
7002 if (vmx_instruction_info & (1u << 10))
7003 field_value = kvm_register_readl(vcpu,
7004 (((vmx_instruction_info) >> 3) & 0xf));
7005 else {
7006 if (get_vmx_mem_address(vcpu, exit_qualification,
7007 vmx_instruction_info, false, &gva))
7008 return 1;
7009 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
7010 &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
7011 kvm_inject_page_fault(vcpu, &e);
7012 return 1;
7013 }
7014 }
7015
7016
7017 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
7018 if (vmcs_field_readonly(field)) {
7019 nested_vmx_failValid(vcpu,
7020 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
7021 skip_emulated_instruction(vcpu);
7022 return 1;
7023 }
7024
7025 if (vmcs12_write_any(vcpu, field, field_value) < 0) {
7026 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
7027 skip_emulated_instruction(vcpu);
7028 return 1;
7029 }
7030
7031 nested_vmx_succeed(vcpu);
7032 skip_emulated_instruction(vcpu);
7033 return 1;
7034}
7035
7036
7037static int handle_vmptrld(struct kvm_vcpu *vcpu)
7038{
7039 struct vcpu_vmx *vmx = to_vmx(vcpu);
7040 gpa_t vmptr;
7041 u32 exec_control;
7042
7043 if (!nested_vmx_check_permission(vcpu))
7044 return 1;
7045
7046 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
7047 return 1;
7048
7049 if (vmx->nested.current_vmptr != vmptr) {
7050 struct vmcs12 *new_vmcs12;
7051 struct page *page;
7052 page = nested_get_page(vcpu, vmptr);
7053 if (page == NULL) {
7054 nested_vmx_failInvalid(vcpu);
7055 skip_emulated_instruction(vcpu);
7056 return 1;
7057 }
7058 new_vmcs12 = kmap(page);
7059 if (new_vmcs12->revision_id != VMCS12_REVISION) {
7060 kunmap(page);
7061 nested_release_page_clean(page);
7062 nested_vmx_failValid(vcpu,
7063 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
7064 skip_emulated_instruction(vcpu);
7065 return 1;
7066 }
7067
7068 nested_release_vmcs12(vmx);
7069 vmx->nested.current_vmptr = vmptr;
7070 vmx->nested.current_vmcs12 = new_vmcs12;
7071 vmx->nested.current_vmcs12_page = page;
7072 if (enable_shadow_vmcs) {
7073 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
7074 exec_control |= SECONDARY_EXEC_SHADOW_VMCS;
7075 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
7076 vmcs_write64(VMCS_LINK_POINTER,
7077 __pa(vmx->nested.current_shadow_vmcs));
7078 vmx->nested.sync_shadow_vmcs = true;
7079 }
7080 }
7081
7082 nested_vmx_succeed(vcpu);
7083 skip_emulated_instruction(vcpu);
7084 return 1;
7085}
7086
7087
7088static int handle_vmptrst(struct kvm_vcpu *vcpu)
7089{
7090 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7091 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
7092 gva_t vmcs_gva;
7093 struct x86_exception e;
7094
7095 if (!nested_vmx_check_permission(vcpu))
7096 return 1;
7097
7098 if (get_vmx_mem_address(vcpu, exit_qualification,
7099 vmx_instruction_info, true, &vmcs_gva))
7100 return 1;
7101
7102 if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
7103 (void *)&to_vmx(vcpu)->nested.current_vmptr,
7104 sizeof(u64), &e)) {
7105 kvm_inject_page_fault(vcpu, &e);
7106 return 1;
7107 }
7108 nested_vmx_succeed(vcpu);
7109 skip_emulated_instruction(vcpu);
7110 return 1;
7111}
7112
7113
7114static int handle_invept(struct kvm_vcpu *vcpu)
7115{
7116 struct vcpu_vmx *vmx = to_vmx(vcpu);
7117 u32 vmx_instruction_info, types;
7118 unsigned long type;
7119 gva_t gva;
7120 struct x86_exception e;
7121 struct {
7122 u64 eptp, gpa;
7123 } operand;
7124
7125 if (!(vmx->nested.nested_vmx_secondary_ctls_high &
7126 SECONDARY_EXEC_ENABLE_EPT) ||
7127 !(vmx->nested.nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) {
7128 kvm_queue_exception(vcpu, UD_VECTOR);
7129 return 1;
7130 }
7131
7132 if (!nested_vmx_check_permission(vcpu))
7133 return 1;
7134
7135 if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
7136 kvm_queue_exception(vcpu, UD_VECTOR);
7137 return 1;
7138 }
7139
7140 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
7141 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
7142
7143 types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
7144
7145 if (!(types & (1UL << type))) {
7146 nested_vmx_failValid(vcpu,
7147 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
7148 return 1;
7149 }
7150
7151
7152
7153
7154 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
7155 vmx_instruction_info, false, &gva))
7156 return 1;
7157 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
7158 sizeof(operand), &e)) {
7159 kvm_inject_page_fault(vcpu, &e);
7160 return 1;
7161 }
7162
7163 switch (type) {
7164 case VMX_EPT_EXTENT_GLOBAL:
7165 kvm_mmu_sync_roots(vcpu);
7166 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
7167 nested_vmx_succeed(vcpu);
7168 break;
7169 default:
7170
7171 BUG_ON(1);
7172 break;
7173 }
7174
7175 skip_emulated_instruction(vcpu);
7176 return 1;
7177}
7178
7179static int handle_invvpid(struct kvm_vcpu *vcpu)
7180{
7181 kvm_queue_exception(vcpu, UD_VECTOR);
7182 return 1;
7183}
7184
7185static int handle_pml_full(struct kvm_vcpu *vcpu)
7186{
7187 unsigned long exit_qualification;
7188
7189 trace_kvm_pml_full(vcpu->vcpu_id);
7190
7191 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7192
7193
7194
7195
7196
7197 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
7198 cpu_has_virtual_nmis() &&
7199 (exit_qualification & INTR_INFO_UNBLOCK_NMI))
7200 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
7201 GUEST_INTR_STATE_NMI);
7202
7203
7204
7205
7206
7207 return 1;
7208}
7209
7210
7211
7212
7213
7214
7215static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
7216 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
7217 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
7218 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
7219 [EXIT_REASON_NMI_WINDOW] = handle_nmi_window,
7220 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
7221 [EXIT_REASON_CR_ACCESS] = handle_cr,
7222 [EXIT_REASON_DR_ACCESS] = handle_dr,
7223 [EXIT_REASON_CPUID] = handle_cpuid,
7224 [EXIT_REASON_MSR_READ] = handle_rdmsr,
7225 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
7226 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
7227 [EXIT_REASON_HLT] = handle_halt,
7228 [EXIT_REASON_INVD] = handle_invd,
7229 [EXIT_REASON_INVLPG] = handle_invlpg,
7230 [EXIT_REASON_RDPMC] = handle_rdpmc,
7231 [EXIT_REASON_VMCALL] = handle_vmcall,
7232 [EXIT_REASON_VMCLEAR] = handle_vmclear,
7233 [EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
7234 [EXIT_REASON_VMPTRLD] = handle_vmptrld,
7235 [EXIT_REASON_VMPTRST] = handle_vmptrst,
7236 [EXIT_REASON_VMREAD] = handle_vmread,
7237 [EXIT_REASON_VMRESUME] = handle_vmresume,
7238 [EXIT_REASON_VMWRITE] = handle_vmwrite,
7239 [EXIT_REASON_VMOFF] = handle_vmoff,
7240 [EXIT_REASON_VMON] = handle_vmon,
7241 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
7242 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
7243 [EXIT_REASON_APIC_WRITE] = handle_apic_write,
7244 [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced,
7245 [EXIT_REASON_WBINVD] = handle_wbinvd,
7246 [EXIT_REASON_XSETBV] = handle_xsetbv,
7247 [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
7248 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
7249 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
7250 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
7251 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
7252 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait,
7253 [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap,
7254 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
7255 [EXIT_REASON_INVEPT] = handle_invept,
7256 [EXIT_REASON_INVVPID] = handle_invvpid,
7257 [EXIT_REASON_XSAVES] = handle_xsaves,
7258 [EXIT_REASON_XRSTORS] = handle_xrstors,
7259 [EXIT_REASON_PML_FULL] = handle_pml_full,
7260};
7261
7262static const int kvm_vmx_max_exit_handlers =
7263 ARRAY_SIZE(kvm_vmx_exit_handlers);
7264
7265static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
7266 struct vmcs12 *vmcs12)
7267{
7268 unsigned long exit_qualification;
7269 gpa_t bitmap, last_bitmap;
7270 unsigned int port;
7271 int size;
7272 u8 b;
7273
7274 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
7275 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
7276
7277 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7278
7279 port = exit_qualification >> 16;
7280 size = (exit_qualification & 7) + 1;
7281
7282 last_bitmap = (gpa_t)-1;
7283 b = -1;
7284
7285 while (size > 0) {
7286 if (port < 0x8000)
7287 bitmap = vmcs12->io_bitmap_a;
7288 else if (port < 0x10000)
7289 bitmap = vmcs12->io_bitmap_b;
7290 else
7291 return true;
7292 bitmap += (port & 0x7fff) / 8;
7293
7294 if (last_bitmap != bitmap)
7295 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
7296 return true;
7297 if (b & (1 << (port & 7)))
7298 return true;
7299
7300 port++;
7301 size--;
7302 last_bitmap = bitmap;
7303 }
7304
7305 return false;
7306}
7307
7308
7309
7310
7311
7312
7313
7314static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
7315 struct vmcs12 *vmcs12, u32 exit_reason)
7316{
7317 u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
7318 gpa_t bitmap;
7319
7320 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
7321 return true;
7322
7323
7324
7325
7326
7327
7328 bitmap = vmcs12->msr_bitmap;
7329 if (exit_reason == EXIT_REASON_MSR_WRITE)
7330 bitmap += 2048;
7331 if (msr_index >= 0xc0000000) {
7332 msr_index -= 0xc0000000;
7333 bitmap += 1024;
7334 }
7335
7336
7337 if (msr_index < 1024*8) {
7338 unsigned char b;
7339 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
7340 return true;
7341 return 1 & (b >> (msr_index & 7));
7342 } else
7343 return true;
7344}
7345
7346
7347
7348
7349
7350
7351static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
7352 struct vmcs12 *vmcs12)
7353{
7354 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7355 int cr = exit_qualification & 15;
7356 int reg = (exit_qualification >> 8) & 15;
7357 unsigned long val = kvm_register_readl(vcpu, reg);
7358
7359 switch ((exit_qualification >> 4) & 3) {
7360 case 0:
7361 switch (cr) {
7362 case 0:
7363 if (vmcs12->cr0_guest_host_mask &
7364 (val ^ vmcs12->cr0_read_shadow))
7365 return true;
7366 break;
7367 case 3:
7368 if ((vmcs12->cr3_target_count >= 1 &&
7369 vmcs12->cr3_target_value0 == val) ||
7370 (vmcs12->cr3_target_count >= 2 &&
7371 vmcs12->cr3_target_value1 == val) ||
7372 (vmcs12->cr3_target_count >= 3 &&
7373 vmcs12->cr3_target_value2 == val) ||
7374 (vmcs12->cr3_target_count >= 4 &&
7375 vmcs12->cr3_target_value3 == val))
7376 return false;
7377 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
7378 return true;
7379 break;
7380 case 4:
7381 if (vmcs12->cr4_guest_host_mask &
7382 (vmcs12->cr4_read_shadow ^ val))
7383 return true;
7384 break;
7385 case 8:
7386 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
7387 return true;
7388 break;
7389 }
7390 break;
7391 case 2:
7392 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
7393 (vmcs12->cr0_read_shadow & X86_CR0_TS))
7394 return true;
7395 break;
7396 case 1:
7397 switch (cr) {
7398 case 3:
7399 if (vmcs12->cpu_based_vm_exec_control &
7400 CPU_BASED_CR3_STORE_EXITING)
7401 return true;
7402 break;
7403 case 8:
7404 if (vmcs12->cpu_based_vm_exec_control &
7405 CPU_BASED_CR8_STORE_EXITING)
7406 return true;
7407 break;
7408 }
7409 break;
7410 case 3:
7411
7412
7413
7414
7415 if (vmcs12->cr0_guest_host_mask & 0xe &
7416 (val ^ vmcs12->cr0_read_shadow))
7417 return true;
7418 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
7419 !(vmcs12->cr0_read_shadow & 0x1) &&
7420 (val & 0x1))
7421 return true;
7422 break;
7423 }
7424 return false;
7425}
7426
7427
7428
7429
7430
7431
7432static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
7433{
7434 u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
7435 struct vcpu_vmx *vmx = to_vmx(vcpu);
7436 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7437 u32 exit_reason = vmx->exit_reason;
7438
7439 trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
7440 vmcs_readl(EXIT_QUALIFICATION),
7441 vmx->idt_vectoring_info,
7442 intr_info,
7443 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
7444 KVM_ISA_VMX);
7445
7446 if (vmx->nested.nested_run_pending)
7447 return false;
7448
7449 if (unlikely(vmx->fail)) {
7450 pr_info_ratelimited("%s failed vm entry %x\n", __func__,
7451 vmcs_read32(VM_INSTRUCTION_ERROR));
7452 return true;
7453 }
7454
7455 switch (exit_reason) {
7456 case EXIT_REASON_EXCEPTION_NMI:
7457 if (!is_exception(intr_info))
7458 return false;
7459 else if (is_page_fault(intr_info))
7460 return enable_ept;
7461 else if (is_no_device(intr_info) &&
7462 !(vmcs12->guest_cr0 & X86_CR0_TS))
7463 return false;
7464 return vmcs12->exception_bitmap &
7465 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
7466 case EXIT_REASON_EXTERNAL_INTERRUPT:
7467 return false;
7468 case EXIT_REASON_TRIPLE_FAULT:
7469 return true;
7470 case EXIT_REASON_PENDING_INTERRUPT:
7471 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
7472 case EXIT_REASON_NMI_WINDOW:
7473 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
7474 case EXIT_REASON_TASK_SWITCH:
7475 return true;
7476 case EXIT_REASON_CPUID:
7477 if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa)
7478 return false;
7479 return true;
7480 case EXIT_REASON_HLT:
7481 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
7482 case EXIT_REASON_INVD:
7483 return true;
7484 case EXIT_REASON_INVLPG:
7485 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
7486 case EXIT_REASON_RDPMC:
7487 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
7488 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
7489 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
7490 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
7491 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
7492 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
7493 case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
7494 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
7495 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
7496
7497
7498
7499
7500 return true;
7501 case EXIT_REASON_CR_ACCESS:
7502 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
7503 case EXIT_REASON_DR_ACCESS:
7504 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
7505 case EXIT_REASON_IO_INSTRUCTION:
7506 return nested_vmx_exit_handled_io(vcpu, vmcs12);
7507 case EXIT_REASON_MSR_READ:
7508 case EXIT_REASON_MSR_WRITE:
7509 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
7510 case EXIT_REASON_INVALID_STATE:
7511 return true;
7512 case EXIT_REASON_MWAIT_INSTRUCTION:
7513 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
7514 case EXIT_REASON_MONITOR_TRAP_FLAG:
7515 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
7516 case EXIT_REASON_MONITOR_INSTRUCTION:
7517 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
7518 case EXIT_REASON_PAUSE_INSTRUCTION:
7519 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
7520 nested_cpu_has2(vmcs12,
7521 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
7522 case EXIT_REASON_MCE_DURING_VMENTRY:
7523 return false;
7524 case EXIT_REASON_TPR_BELOW_THRESHOLD:
7525 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
7526 case EXIT_REASON_APIC_ACCESS:
7527 return nested_cpu_has2(vmcs12,
7528 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
7529 case EXIT_REASON_APIC_WRITE:
7530 case EXIT_REASON_EOI_INDUCED:
7531
7532 return true;
7533 case EXIT_REASON_EPT_VIOLATION:
7534
7535
7536
7537
7538
7539
7540 return false;
7541 case EXIT_REASON_EPT_MISCONFIG:
7542
7543
7544
7545
7546
7547
7548 return false;
7549 case EXIT_REASON_WBINVD:
7550 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
7551 case EXIT_REASON_XSETBV:
7552 return true;
7553 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
7554
7555
7556
7557
7558
7559
7560 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
7561 default:
7562 return true;
7563 }
7564}
7565
7566static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
7567{
7568 *info1 = vmcs_readl(EXIT_QUALIFICATION);
7569 *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
7570}
7571
7572static int vmx_enable_pml(struct vcpu_vmx *vmx)
7573{
7574 struct page *pml_pg;
7575 u32 exec_control;
7576
7577 pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
7578 if (!pml_pg)
7579 return -ENOMEM;
7580
7581 vmx->pml_pg = pml_pg;
7582
7583 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
7584 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
7585
7586 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
7587 exec_control |= SECONDARY_EXEC_ENABLE_PML;
7588 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
7589
7590 return 0;
7591}
7592
7593static void vmx_disable_pml(struct vcpu_vmx *vmx)
7594{
7595 u32 exec_control;
7596
7597 ASSERT(vmx->pml_pg);
7598 __free_page(vmx->pml_pg);
7599 vmx->pml_pg = NULL;
7600
7601 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
7602 exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
7603 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
7604}
7605
7606static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
7607{
7608 struct vcpu_vmx *vmx = to_vmx(vcpu);
7609 u64 *pml_buf;
7610 u16 pml_idx;
7611
7612 pml_idx = vmcs_read16(GUEST_PML_INDEX);
7613
7614
7615 if (pml_idx == (PML_ENTITY_NUM - 1))
7616 return;
7617
7618
7619 if (pml_idx >= PML_ENTITY_NUM)
7620 pml_idx = 0;
7621 else
7622 pml_idx++;
7623
7624 pml_buf = page_address(vmx->pml_pg);
7625 for (; pml_idx < PML_ENTITY_NUM; pml_idx++) {
7626 u64 gpa;
7627
7628 gpa = pml_buf[pml_idx];
7629 WARN_ON(gpa & (PAGE_SIZE - 1));
7630 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
7631 }
7632
7633
7634 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
7635}
7636
7637
7638
7639
7640
7641static void kvm_flush_pml_buffers(struct kvm *kvm)
7642{
7643 int i;
7644 struct kvm_vcpu *vcpu;
7645
7646
7647
7648
7649
7650
7651 kvm_for_each_vcpu(i, vcpu, kvm)
7652 kvm_vcpu_kick(vcpu);
7653}
7654
7655static void vmx_dump_sel(char *name, uint32_t sel)
7656{
7657 pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
7658 name, vmcs_read32(sel),
7659 vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
7660 vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
7661 vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
7662}
7663
7664static void vmx_dump_dtsel(char *name, uint32_t limit)
7665{
7666 pr_err("%s limit=0x%08x, base=0x%016lx\n",
7667 name, vmcs_read32(limit),
7668 vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
7669}
7670
7671static void dump_vmcs(void)
7672{
7673 u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
7674 u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
7675 u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
7676 u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
7677 u32 secondary_exec_control = 0;
7678 unsigned long cr4 = vmcs_readl(GUEST_CR4);
7679 u64 efer = vmcs_readl(GUEST_IA32_EFER);
7680 int i, n;
7681
7682 if (cpu_has_secondary_exec_ctrls())
7683 secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
7684
7685 pr_err("*** Guest State ***\n");
7686 pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
7687 vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
7688 vmcs_readl(CR0_GUEST_HOST_MASK));
7689 pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
7690 cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
7691 pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
7692 if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
7693 (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
7694 {
7695 pr_err("PDPTR0 = 0x%016lx PDPTR1 = 0x%016lx\n",
7696 vmcs_readl(GUEST_PDPTR0), vmcs_readl(GUEST_PDPTR1));
7697 pr_err("PDPTR2 = 0x%016lx PDPTR3 = 0x%016lx\n",
7698 vmcs_readl(GUEST_PDPTR2), vmcs_readl(GUEST_PDPTR3));
7699 }
7700 pr_err("RSP = 0x%016lx RIP = 0x%016lx\n",
7701 vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP));
7702 pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n",
7703 vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7));
7704 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
7705 vmcs_readl(GUEST_SYSENTER_ESP),
7706 vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP));
7707 vmx_dump_sel("CS: ", GUEST_CS_SELECTOR);
7708 vmx_dump_sel("DS: ", GUEST_DS_SELECTOR);
7709 vmx_dump_sel("SS: ", GUEST_SS_SELECTOR);
7710 vmx_dump_sel("ES: ", GUEST_ES_SELECTOR);
7711 vmx_dump_sel("FS: ", GUEST_FS_SELECTOR);
7712 vmx_dump_sel("GS: ", GUEST_GS_SELECTOR);
7713 vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT);
7714 vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
7715 vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
7716 vmx_dump_sel("TR: ", GUEST_TR_SELECTOR);
7717 if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
7718 (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
7719 pr_err("EFER = 0x%016llx PAT = 0x%016lx\n",
7720 efer, vmcs_readl(GUEST_IA32_PAT));
7721 pr_err("DebugCtl = 0x%016lx DebugExceptions = 0x%016lx\n",
7722 vmcs_readl(GUEST_IA32_DEBUGCTL),
7723 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
7724 if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
7725 pr_err("PerfGlobCtl = 0x%016lx\n",
7726 vmcs_readl(GUEST_IA32_PERF_GLOBAL_CTRL));
7727 if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
7728 pr_err("BndCfgS = 0x%016lx\n", vmcs_readl(GUEST_BNDCFGS));
7729 pr_err("Interruptibility = %08x ActivityState = %08x\n",
7730 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO),
7731 vmcs_read32(GUEST_ACTIVITY_STATE));
7732 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
7733 pr_err("InterruptStatus = %04x\n",
7734 vmcs_read16(GUEST_INTR_STATUS));
7735
7736 pr_err("*** Host State ***\n");
7737 pr_err("RIP = 0x%016lx RSP = 0x%016lx\n",
7738 vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
7739 pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
7740 vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR),
7741 vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR),
7742 vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR),
7743 vmcs_read16(HOST_TR_SELECTOR));
7744 pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
7745 vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE),
7746 vmcs_readl(HOST_TR_BASE));
7747 pr_err("GDTBase=%016lx IDTBase=%016lx\n",
7748 vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE));
7749 pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
7750 vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3),
7751 vmcs_readl(HOST_CR4));
7752 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
7753 vmcs_readl(HOST_IA32_SYSENTER_ESP),
7754 vmcs_read32(HOST_IA32_SYSENTER_CS),
7755 vmcs_readl(HOST_IA32_SYSENTER_EIP));
7756 if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER))
7757 pr_err("EFER = 0x%016lx PAT = 0x%016lx\n",
7758 vmcs_readl(HOST_IA32_EFER), vmcs_readl(HOST_IA32_PAT));
7759 if (vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
7760 pr_err("PerfGlobCtl = 0x%016lx\n",
7761 vmcs_readl(HOST_IA32_PERF_GLOBAL_CTRL));
7762
7763 pr_err("*** Control State ***\n");
7764 pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n",
7765 pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control);
7766 pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl);
7767 pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
7768 vmcs_read32(EXCEPTION_BITMAP),
7769 vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK),
7770 vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH));
7771 pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
7772 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
7773 vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE),
7774 vmcs_read32(VM_ENTRY_INSTRUCTION_LEN));
7775 pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
7776 vmcs_read32(VM_EXIT_INTR_INFO),
7777 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
7778 vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
7779 pr_err(" reason=%08x qualification=%016lx\n",
7780 vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION));
7781 pr_err("IDTVectoring: info=%08x errcode=%08x\n",
7782 vmcs_read32(IDT_VECTORING_INFO_FIELD),
7783 vmcs_read32(IDT_VECTORING_ERROR_CODE));
7784 pr_err("TSC Offset = 0x%016lx\n", vmcs_readl(TSC_OFFSET));
7785 if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW)
7786 pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
7787 if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
7788 pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
7789 if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
7790 pr_err("EPT pointer = 0x%016lx\n", vmcs_readl(EPT_POINTER));
7791 n = vmcs_read32(CR3_TARGET_COUNT);
7792 for (i = 0; i + 1 < n; i += 4)
7793 pr_err("CR3 target%u=%016lx target%u=%016lx\n",
7794 i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2),
7795 i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2));
7796 if (i < n)
7797 pr_err("CR3 target%u=%016lx\n",
7798 i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2));
7799 if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
7800 pr_err("PLE Gap=%08x Window=%08x\n",
7801 vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
7802 if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
7803 pr_err("Virtual processor ID = 0x%04x\n",
7804 vmcs_read16(VIRTUAL_PROCESSOR_ID));
7805}
7806
7807
7808
7809
7810
7811static int vmx_handle_exit(struct kvm_vcpu *vcpu)
7812{
7813 struct vcpu_vmx *vmx = to_vmx(vcpu);
7814 u32 exit_reason = vmx->exit_reason;
7815 u32 vectoring_info = vmx->idt_vectoring_info;
7816
7817
7818
7819
7820
7821
7822
7823
7824 if (enable_pml)
7825 vmx_flush_pml_buffer(vcpu);
7826
7827
7828 if (vmx->emulation_required)
7829 return handle_invalid_guest_state(vcpu);
7830
7831 if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) {
7832 nested_vmx_vmexit(vcpu, exit_reason,
7833 vmcs_read32(VM_EXIT_INTR_INFO),
7834 vmcs_readl(EXIT_QUALIFICATION));
7835 return 1;
7836 }
7837
7838 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
7839 dump_vmcs();
7840 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
7841 vcpu->run->fail_entry.hardware_entry_failure_reason
7842 = exit_reason;
7843 return 0;
7844 }
7845
7846 if (unlikely(vmx->fail)) {
7847 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
7848 vcpu->run->fail_entry.hardware_entry_failure_reason
7849 = vmcs_read32(VM_INSTRUCTION_ERROR);
7850 return 0;
7851 }
7852
7853
7854
7855
7856
7857
7858
7859
7860 if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
7861 (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
7862 exit_reason != EXIT_REASON_EPT_VIOLATION &&
7863 exit_reason != EXIT_REASON_TASK_SWITCH)) {
7864 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7865 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
7866 vcpu->run->internal.ndata = 2;
7867 vcpu->run->internal.data[0] = vectoring_info;
7868 vcpu->run->internal.data[1] = exit_reason;
7869 return 0;
7870 }
7871
7872 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
7873 !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
7874 get_vmcs12(vcpu))))) {
7875 if (vmx_interrupt_allowed(vcpu)) {
7876 vmx->soft_vnmi_blocked = 0;
7877 } else if (vmx->vnmi_blocked_time > 1000000000LL &&
7878 vcpu->arch.nmi_pending) {
7879
7880
7881
7882
7883
7884
7885 printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
7886 "state on VCPU %d after 1 s timeout\n",
7887 __func__, vcpu->vcpu_id);
7888 vmx->soft_vnmi_blocked = 0;
7889 }
7890 }
7891
7892 if (exit_reason < kvm_vmx_max_exit_handlers
7893 && kvm_vmx_exit_handlers[exit_reason])
7894 return kvm_vmx_exit_handlers[exit_reason](vcpu);
7895 else {
7896 WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
7897 kvm_queue_exception(vcpu, UD_VECTOR);
7898 return 1;
7899 }
7900}
7901
7902static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
7903{
7904 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7905
7906 if (is_guest_mode(vcpu) &&
7907 nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
7908 return;
7909
7910 if (irr == -1 || tpr < irr) {
7911 vmcs_write32(TPR_THRESHOLD, 0);
7912 return;
7913 }
7914
7915 vmcs_write32(TPR_THRESHOLD, irr);
7916}
7917
7918static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
7919{
7920 u32 sec_exec_control;
7921
7922
7923
7924
7925
7926 if (!cpu_has_vmx_virtualize_x2apic_mode() ||
7927 !vmx_vm_has_apicv(vcpu->kvm))
7928 return;
7929
7930 if (!vm_need_tpr_shadow(vcpu->kvm))
7931 return;
7932
7933 sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
7934
7935 if (set) {
7936 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
7937 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
7938 } else {
7939 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
7940 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
7941 }
7942 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
7943
7944 vmx_set_msr_bitmap(vcpu);
7945}
7946
7947static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
7948{
7949 struct vcpu_vmx *vmx = to_vmx(vcpu);
7950
7951
7952
7953
7954
7955
7956
7957
7958
7959
7960
7961
7962
7963
7964 if (!is_guest_mode(vcpu) ||
7965 !nested_cpu_has2(vmx->nested.current_vmcs12,
7966 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
7967 vmcs_write64(APIC_ACCESS_ADDR, hpa);
7968}
7969
7970static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
7971{
7972 u16 status;
7973 u8 old;
7974
7975 if (isr == -1)
7976 isr = 0;
7977
7978 status = vmcs_read16(GUEST_INTR_STATUS);
7979 old = status >> 8;
7980 if (isr != old) {
7981 status &= 0xff;
7982 status |= isr << 8;
7983 vmcs_write16(GUEST_INTR_STATUS, status);
7984 }
7985}
7986
7987static void vmx_set_rvi(int vector)
7988{
7989 u16 status;
7990 u8 old;
7991
7992 if (vector == -1)
7993 vector = 0;
7994
7995 status = vmcs_read16(GUEST_INTR_STATUS);
7996 old = (u8)status & 0xff;
7997 if ((u8)vector != old) {
7998 status &= ~0xff;
7999 status |= (u8)vector;
8000 vmcs_write16(GUEST_INTR_STATUS, status);
8001 }
8002}
8003
8004static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
8005{
8006 if (!is_guest_mode(vcpu)) {
8007 vmx_set_rvi(max_irr);
8008 return;
8009 }
8010
8011 if (max_irr == -1)
8012 return;
8013
8014
8015
8016
8017
8018 if (nested_exit_on_intr(vcpu))
8019 return;
8020
8021
8022
8023
8024
8025 if (!kvm_event_needs_reinjection(vcpu) &&
8026 vmx_interrupt_allowed(vcpu)) {
8027 kvm_queue_interrupt(vcpu, max_irr, false);
8028 vmx_inject_irq(vcpu);
8029 }
8030}
8031
8032static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
8033{
8034 if (!vmx_vm_has_apicv(vcpu->kvm))
8035 return;
8036
8037 vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
8038 vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
8039 vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
8040 vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
8041}
8042
8043static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
8044{
8045 u32 exit_intr_info;
8046
8047 if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
8048 || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI))
8049 return;
8050
8051 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
8052 exit_intr_info = vmx->exit_intr_info;
8053
8054
8055 if (is_machine_check(exit_intr_info))
8056 kvm_machine_check();
8057
8058
8059 if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
8060 (exit_intr_info & INTR_INFO_VALID_MASK)) {
8061 kvm_before_handle_nmi(&vmx->vcpu);
8062 asm("int $2");
8063 kvm_after_handle_nmi(&vmx->vcpu);
8064 }
8065}
8066
8067static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
8068{
8069 u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
8070
8071
8072
8073
8074
8075
8076 if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
8077 == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
8078 unsigned int vector;
8079 unsigned long entry;
8080 gate_desc *desc;
8081 struct vcpu_vmx *vmx = to_vmx(vcpu);
8082#ifdef CONFIG_X86_64
8083 unsigned long tmp;
8084#endif
8085
8086 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
8087 desc = (gate_desc *)vmx->host_idt_base + vector;
8088 entry = gate_offset(*desc);
8089 asm volatile(
8090#ifdef CONFIG_X86_64
8091 "mov %%" _ASM_SP ", %[sp]\n\t"
8092 "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t"
8093 "push $%c[ss]\n\t"
8094 "push %[sp]\n\t"
8095#endif
8096 "pushf\n\t"
8097 "orl $0x200, (%%" _ASM_SP ")\n\t"
8098 __ASM_SIZE(push) " $%c[cs]\n\t"
8099 "call *%[entry]\n\t"
8100 :
8101#ifdef CONFIG_X86_64
8102 [sp]"=&r"(tmp)
8103#endif
8104 :
8105 [entry]"r"(entry),
8106 [ss]"i"(__KERNEL_DS),
8107 [cs]"i"(__KERNEL_CS)
8108 );
8109 } else
8110 local_irq_enable();
8111}
8112
8113static bool vmx_has_high_real_mode_segbase(void)
8114{
8115 return enable_unrestricted_guest || emulate_invalid_guest_state;
8116}
8117
8118static bool vmx_mpx_supported(void)
8119{
8120 return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) &&
8121 (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS);
8122}
8123
8124static bool vmx_xsaves_supported(void)
8125{
8126 return vmcs_config.cpu_based_2nd_exec_ctrl &
8127 SECONDARY_EXEC_XSAVES;
8128}
8129
8130static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
8131{
8132 u32 exit_intr_info;
8133 bool unblock_nmi;
8134 u8 vector;
8135 bool idtv_info_valid;
8136
8137 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
8138
8139 if (cpu_has_virtual_nmis()) {
8140 if (vmx->nmi_known_unmasked)
8141 return;
8142
8143
8144
8145
8146 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
8147 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
8148 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
8149
8150
8151
8152
8153
8154
8155
8156
8157
8158
8159 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
8160 vector != DF_VECTOR && !idtv_info_valid)
8161 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
8162 GUEST_INTR_STATE_NMI);
8163 else
8164 vmx->nmi_known_unmasked =
8165 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
8166 & GUEST_INTR_STATE_NMI);
8167 } else if (unlikely(vmx->soft_vnmi_blocked))
8168 vmx->vnmi_blocked_time +=
8169 ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
8170}
8171
8172static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
8173 u32 idt_vectoring_info,
8174 int instr_len_field,
8175 int error_code_field)
8176{
8177 u8 vector;
8178 int type;
8179 bool idtv_info_valid;
8180
8181 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
8182
8183 vcpu->arch.nmi_injected = false;
8184 kvm_clear_exception_queue(vcpu);
8185 kvm_clear_interrupt_queue(vcpu);
8186
8187 if (!idtv_info_valid)
8188 return;
8189
8190 kvm_make_request(KVM_REQ_EVENT, vcpu);
8191
8192 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
8193 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
8194
8195 switch (type) {
8196 case INTR_TYPE_NMI_INTR:
8197 vcpu->arch.nmi_injected = true;
8198
8199
8200
8201
8202
8203 vmx_set_nmi_mask(vcpu, false);
8204 break;
8205 case INTR_TYPE_SOFT_EXCEPTION:
8206 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
8207
8208 case INTR_TYPE_HARD_EXCEPTION:
8209 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
8210 u32 err = vmcs_read32(error_code_field);
8211 kvm_requeue_exception_e(vcpu, vector, err);
8212 } else
8213 kvm_requeue_exception(vcpu, vector);
8214 break;
8215 case INTR_TYPE_SOFT_INTR:
8216 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
8217
8218 case INTR_TYPE_EXT_INTR:
8219 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
8220 break;
8221 default:
8222 break;
8223 }
8224}
8225
8226static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
8227{
8228 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
8229 VM_EXIT_INSTRUCTION_LEN,
8230 IDT_VECTORING_ERROR_CODE);
8231}
8232
8233static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
8234{
8235 __vmx_complete_interrupts(vcpu,
8236 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
8237 VM_ENTRY_INSTRUCTION_LEN,
8238 VM_ENTRY_EXCEPTION_ERROR_CODE);
8239
8240 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
8241}
8242
8243static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
8244{
8245 int i, nr_msrs;
8246 struct perf_guest_switch_msr *msrs;
8247
8248 msrs = perf_guest_get_msrs(&nr_msrs);
8249
8250 if (!msrs)
8251 return;
8252
8253 for (i = 0; i < nr_msrs; i++)
8254 if (msrs[i].host == msrs[i].guest)
8255 clear_atomic_switch_msr(vmx, msrs[i].msr);
8256 else
8257 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
8258 msrs[i].host);
8259}
8260
8261static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
8262{
8263 struct vcpu_vmx *vmx = to_vmx(vcpu);
8264 unsigned long debugctlmsr, cr4;
8265
8266
8267 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
8268 vmx->entry_time = ktime_get();
8269
8270
8271
8272 if (vmx->emulation_required)
8273 return;
8274
8275 if (vmx->ple_window_dirty) {
8276 vmx->ple_window_dirty = false;
8277 vmcs_write32(PLE_WINDOW, vmx->ple_window);
8278 }
8279
8280 if (vmx->nested.sync_shadow_vmcs) {
8281 copy_vmcs12_to_shadow(vmx);
8282 vmx->nested.sync_shadow_vmcs = false;
8283 }
8284
8285 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
8286 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
8287 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
8288 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
8289
8290 cr4 = cr4_read_shadow();
8291 if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
8292 vmcs_writel(HOST_CR4, cr4);
8293 vmx->host_state.vmcs_host_cr4 = cr4;
8294 }
8295
8296
8297
8298
8299
8300
8301 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
8302 vmx_set_interrupt_shadow(vcpu, 0);
8303
8304 atomic_switch_perf_msrs(vmx);
8305 debugctlmsr = get_debugctlmsr();
8306
8307 vmx->__launched = vmx->loaded_vmcs->launched;
8308 asm(
8309
8310 "push %%" _ASM_DX "; push %%" _ASM_BP ";"
8311 "push %%" _ASM_CX " \n\t"
8312 "push %%" _ASM_CX " \n\t"
8313 "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
8314 "je 1f \n\t"
8315 "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
8316 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
8317 "1: \n\t"
8318
8319 "mov %c[cr2](%0), %%" _ASM_AX " \n\t"
8320 "mov %%cr2, %%" _ASM_DX " \n\t"
8321 "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
8322 "je 2f \n\t"
8323 "mov %%" _ASM_AX", %%cr2 \n\t"
8324 "2: \n\t"
8325
8326 "cmpl $0, %c[launched](%0) \n\t"
8327
8328 "mov %c[rax](%0), %%" _ASM_AX " \n\t"
8329 "mov %c[rbx](%0), %%" _ASM_BX " \n\t"
8330 "mov %c[rdx](%0), %%" _ASM_DX " \n\t"
8331 "mov %c[rsi](%0), %%" _ASM_SI " \n\t"
8332 "mov %c[rdi](%0), %%" _ASM_DI " \n\t"
8333 "mov %c[rbp](%0), %%" _ASM_BP " \n\t"
8334#ifdef CONFIG_X86_64
8335 "mov %c[r8](%0), %%r8 \n\t"
8336 "mov %c[r9](%0), %%r9 \n\t"
8337 "mov %c[r10](%0), %%r10 \n\t"
8338 "mov %c[r11](%0), %%r11 \n\t"
8339 "mov %c[r12](%0), %%r12 \n\t"
8340 "mov %c[r13](%0), %%r13 \n\t"
8341 "mov %c[r14](%0), %%r14 \n\t"
8342 "mov %c[r15](%0), %%r15 \n\t"
8343#endif
8344 "mov %c[rcx](%0), %%" _ASM_CX " \n\t"
8345
8346
8347 "jne 1f \n\t"
8348 __ex(ASM_VMX_VMLAUNCH) "\n\t"
8349 "jmp 2f \n\t"
8350 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
8351 "2: "
8352
8353 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
8354 "pop %0 \n\t"
8355 "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
8356 "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
8357 __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
8358 "mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
8359 "mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
8360 "mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
8361 "mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
8362#ifdef CONFIG_X86_64
8363 "mov %%r8, %c[r8](%0) \n\t"
8364 "mov %%r9, %c[r9](%0) \n\t"
8365 "mov %%r10, %c[r10](%0) \n\t"
8366 "mov %%r11, %c[r11](%0) \n\t"
8367 "mov %%r12, %c[r12](%0) \n\t"
8368 "mov %%r13, %c[r13](%0) \n\t"
8369 "mov %%r14, %c[r14](%0) \n\t"
8370 "mov %%r15, %c[r15](%0) \n\t"
8371#endif
8372 "mov %%cr2, %%" _ASM_AX " \n\t"
8373 "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
8374
8375 "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
8376 "setbe %c[fail](%0) \n\t"
8377 ".pushsection .rodata \n\t"
8378 ".global vmx_return \n\t"
8379 "vmx_return: " _ASM_PTR " 2b \n\t"
8380 ".popsection"
8381 : : "c"(vmx), "d"((unsigned long)HOST_RSP),
8382 [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
8383 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
8384 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
8385 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
8386 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
8387 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
8388 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
8389 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
8390 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
8391 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
8392#ifdef CONFIG_X86_64
8393 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
8394 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
8395 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
8396 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
8397 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
8398 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
8399 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
8400 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
8401#endif
8402 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
8403 [wordsize]"i"(sizeof(ulong))
8404 : "cc", "memory"
8405#ifdef CONFIG_X86_64
8406 , "rax", "rbx", "rdi", "rsi"
8407 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
8408#else
8409 , "eax", "ebx", "edi", "esi"
8410#endif
8411 );
8412
8413
8414 if (debugctlmsr)
8415 update_debugctlmsr(debugctlmsr);
8416
8417#ifndef CONFIG_X86_64
8418
8419
8420
8421
8422
8423
8424
8425
8426 loadsegment(ds, __USER_DS);
8427 loadsegment(es, __USER_DS);
8428#endif
8429
8430 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
8431 | (1 << VCPU_EXREG_RFLAGS)
8432 | (1 << VCPU_EXREG_PDPTR)
8433 | (1 << VCPU_EXREG_SEGMENTS)
8434 | (1 << VCPU_EXREG_CR3));
8435 vcpu->arch.regs_dirty = 0;
8436
8437 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
8438
8439 vmx->loaded_vmcs->launched = 1;
8440
8441 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
8442 trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
8443
8444
8445
8446
8447
8448
8449 if (vmx->nested.nested_run_pending)
8450 kvm_make_request(KVM_REQ_EVENT, vcpu);
8451
8452 vmx->nested.nested_run_pending = 0;
8453
8454 vmx_complete_atomic_exit(vmx);
8455 vmx_recover_nmi_blocking(vmx);
8456 vmx_complete_interrupts(vmx);
8457}
8458
8459static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
8460{
8461 struct vcpu_vmx *vmx = to_vmx(vcpu);
8462 int cpu;
8463
8464 if (vmx->loaded_vmcs == &vmx->vmcs01)
8465 return;
8466
8467 cpu = get_cpu();
8468 vmx->loaded_vmcs = &vmx->vmcs01;
8469 vmx_vcpu_put(vcpu);
8470 vmx_vcpu_load(vcpu, cpu);
8471 vcpu->cpu = cpu;
8472 put_cpu();
8473}
8474
8475static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
8476{
8477 struct vcpu_vmx *vmx = to_vmx(vcpu);
8478
8479 if (enable_pml)
8480 vmx_disable_pml(vmx);
8481 free_vpid(vmx);
8482 leave_guest_mode(vcpu);
8483 vmx_load_vmcs01(vcpu);
8484 free_nested(vmx);
8485 free_loaded_vmcs(vmx->loaded_vmcs);
8486 kfree(vmx->guest_msrs);
8487 kvm_vcpu_uninit(vcpu);
8488 kmem_cache_free(kvm_vcpu_cache, vmx);
8489}
8490
8491static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
8492{
8493 int err;
8494 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
8495 int cpu;
8496
8497 if (!vmx)
8498 return ERR_PTR(-ENOMEM);
8499
8500 allocate_vpid(vmx);
8501
8502 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
8503 if (err)
8504 goto free_vcpu;
8505
8506 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
8507 BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
8508 > PAGE_SIZE);
8509
8510 err = -ENOMEM;
8511 if (!vmx->guest_msrs) {
8512 goto uninit_vcpu;
8513 }
8514
8515 vmx->loaded_vmcs = &vmx->vmcs01;
8516 vmx->loaded_vmcs->vmcs = alloc_vmcs();
8517 if (!vmx->loaded_vmcs->vmcs)
8518 goto free_msrs;
8519 if (!vmm_exclusive)
8520 kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
8521 loaded_vmcs_init(vmx->loaded_vmcs);
8522 if (!vmm_exclusive)
8523 kvm_cpu_vmxoff();
8524
8525 cpu = get_cpu();
8526 vmx_vcpu_load(&vmx->vcpu, cpu);
8527 vmx->vcpu.cpu = cpu;
8528 err = vmx_vcpu_setup(vmx);
8529 vmx_vcpu_put(&vmx->vcpu);
8530 put_cpu();
8531 if (err)
8532 goto free_vmcs;
8533 if (vm_need_virtualize_apic_accesses(kvm)) {
8534 err = alloc_apic_access_page(kvm);
8535 if (err)
8536 goto free_vmcs;
8537 }
8538
8539 if (enable_ept) {
8540 if (!kvm->arch.ept_identity_map_addr)
8541 kvm->arch.ept_identity_map_addr =
8542 VMX_EPT_IDENTITY_PAGETABLE_ADDR;
8543 err = init_rmode_identity_map(kvm);
8544 if (err)
8545 goto free_vmcs;
8546 }
8547
8548 if (nested)
8549 nested_vmx_setup_ctls_msrs(vmx);
8550
8551 vmx->nested.posted_intr_nv = -1;
8552 vmx->nested.current_vmptr = -1ull;
8553 vmx->nested.current_vmcs12 = NULL;
8554
8555
8556
8557
8558
8559
8560
8561 if (enable_pml) {
8562 err = vmx_enable_pml(vmx);
8563 if (err)
8564 goto free_vmcs;
8565 }
8566
8567 return &vmx->vcpu;
8568
8569free_vmcs:
8570 free_loaded_vmcs(vmx->loaded_vmcs);
8571free_msrs:
8572 kfree(vmx->guest_msrs);
8573uninit_vcpu:
8574 kvm_vcpu_uninit(&vmx->vcpu);
8575free_vcpu:
8576 free_vpid(vmx);
8577 kmem_cache_free(kvm_vcpu_cache, vmx);
8578 return ERR_PTR(err);
8579}
8580
8581static void __init vmx_check_processor_compat(void *rtn)
8582{
8583 struct vmcs_config vmcs_conf;
8584
8585 *(int *)rtn = 0;
8586 if (setup_vmcs_config(&vmcs_conf) < 0)
8587 *(int *)rtn = -EIO;
8588 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
8589 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
8590 smp_processor_id());
8591 *(int *)rtn = -EIO;
8592 }
8593}
8594
8595static int get_ept_level(void)
8596{
8597 return VMX_EPT_DEFAULT_GAW + 1;
8598}
8599
8600static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
8601{
8602 u8 cache;
8603 u64 ipat = 0;
8604
8605
8606
8607
8608
8609
8610
8611
8612
8613
8614
8615
8616 if (is_mmio) {
8617 cache = MTRR_TYPE_UNCACHABLE;
8618 goto exit;
8619 }
8620
8621 if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
8622 ipat = VMX_EPT_IPAT_BIT;
8623 cache = MTRR_TYPE_WRBACK;
8624 goto exit;
8625 }
8626
8627 if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
8628 ipat = VMX_EPT_IPAT_BIT;
8629 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
8630 cache = MTRR_TYPE_WRBACK;
8631 else
8632 cache = MTRR_TYPE_UNCACHABLE;
8633 goto exit;
8634 }
8635
8636 cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
8637
8638exit:
8639 return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat;
8640}
8641
8642static int vmx_get_lpage_level(void)
8643{
8644 if (enable_ept && !cpu_has_vmx_ept_1g_page())
8645 return PT_DIRECTORY_LEVEL;
8646 else
8647
8648 return PT_PDPE_LEVEL;
8649}
8650
8651static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
8652{
8653 struct kvm_cpuid_entry2 *best;
8654 struct vcpu_vmx *vmx = to_vmx(vcpu);
8655 u32 exec_control;
8656
8657 vmx->rdtscp_enabled = false;
8658 if (vmx_rdtscp_supported()) {
8659 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
8660 if (exec_control & SECONDARY_EXEC_RDTSCP) {
8661 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
8662 if (best && (best->edx & bit(X86_FEATURE_RDTSCP)))
8663 vmx->rdtscp_enabled = true;
8664 else {
8665 exec_control &= ~SECONDARY_EXEC_RDTSCP;
8666 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
8667 exec_control);
8668 }
8669 }
8670 if (nested && !vmx->rdtscp_enabled)
8671 vmx->nested.nested_vmx_secondary_ctls_high &=
8672 ~SECONDARY_EXEC_RDTSCP;
8673 }
8674
8675
8676 best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
8677 if (vmx_invpcid_supported() &&
8678 best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
8679 guest_cpuid_has_pcid(vcpu)) {
8680 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
8681 exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
8682 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
8683 exec_control);
8684 } else {
8685 if (cpu_has_secondary_exec_ctrls()) {
8686 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
8687 exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
8688 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
8689 exec_control);
8690 }
8691 if (best)
8692 best->ebx &= ~bit(X86_FEATURE_INVPCID);
8693 }
8694}
8695
8696static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
8697{
8698 if (func == 1 && nested)
8699 entry->ecx |= bit(X86_FEATURE_VMX);
8700}
8701
8702static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
8703 struct x86_exception *fault)
8704{
8705 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
8706 u32 exit_reason;
8707
8708 if (fault->error_code & PFERR_RSVD_MASK)
8709 exit_reason = EXIT_REASON_EPT_MISCONFIG;
8710 else
8711 exit_reason = EXIT_REASON_EPT_VIOLATION;
8712 nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification);
8713 vmcs12->guest_physical_address = fault->address;
8714}
8715
8716
8717
8718static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
8719{
8720
8721 return get_vmcs12(vcpu)->ept_pointer;
8722}
8723
8724static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
8725{
8726 WARN_ON(mmu_is_nested(vcpu));
8727 kvm_init_shadow_ept_mmu(vcpu,
8728 to_vmx(vcpu)->nested.nested_vmx_ept_caps &
8729 VMX_EPT_EXECUTE_ONLY_BIT);
8730 vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
8731 vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
8732 vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
8733
8734 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
8735}
8736
8737static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
8738{
8739 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
8740}
8741
8742static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
8743 u16 error_code)
8744{
8745 bool inequality, bit;
8746
8747 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
8748 inequality =
8749 (error_code & vmcs12->page_fault_error_code_mask) !=
8750 vmcs12->page_fault_error_code_match;
8751 return inequality ^ bit;
8752}
8753
8754static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
8755 struct x86_exception *fault)
8756{
8757 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
8758
8759 WARN_ON(!is_guest_mode(vcpu));
8760
8761 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code))
8762 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
8763 vmcs_read32(VM_EXIT_INTR_INFO),
8764 vmcs_readl(EXIT_QUALIFICATION));
8765 else
8766 kvm_inject_page_fault(vcpu, fault);
8767}
8768
8769static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
8770 struct vmcs12 *vmcs12)
8771{
8772 struct vcpu_vmx *vmx = to_vmx(vcpu);
8773 int maxphyaddr = cpuid_maxphyaddr(vcpu);
8774
8775 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
8776 if (!PAGE_ALIGNED(vmcs12->apic_access_addr) ||
8777 vmcs12->apic_access_addr >> maxphyaddr)
8778 return false;
8779
8780
8781
8782
8783
8784
8785
8786 if (vmx->nested.apic_access_page)
8787 nested_release_page(vmx->nested.apic_access_page);
8788 vmx->nested.apic_access_page =
8789 nested_get_page(vcpu, vmcs12->apic_access_addr);
8790 }
8791
8792 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
8793 if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr) ||
8794 vmcs12->virtual_apic_page_addr >> maxphyaddr)
8795 return false;
8796
8797 if (vmx->nested.virtual_apic_page)
8798 nested_release_page(vmx->nested.virtual_apic_page);
8799 vmx->nested.virtual_apic_page =
8800 nested_get_page(vcpu, vmcs12->virtual_apic_page_addr);
8801
8802
8803
8804
8805
8806
8807
8808
8809
8810
8811
8812 if (!vmx->nested.virtual_apic_page)
8813 return false;
8814 }
8815
8816 if (nested_cpu_has_posted_intr(vmcs12)) {
8817 if (!IS_ALIGNED(vmcs12->posted_intr_desc_addr, 64) ||
8818 vmcs12->posted_intr_desc_addr >> maxphyaddr)
8819 return false;
8820
8821 if (vmx->nested.pi_desc_page) {
8822 kunmap(vmx->nested.pi_desc_page);
8823 nested_release_page(vmx->nested.pi_desc_page);
8824 }
8825 vmx->nested.pi_desc_page =
8826 nested_get_page(vcpu, vmcs12->posted_intr_desc_addr);
8827 if (!vmx->nested.pi_desc_page)
8828 return false;
8829
8830 vmx->nested.pi_desc =
8831 (struct pi_desc *)kmap(vmx->nested.pi_desc_page);
8832 if (!vmx->nested.pi_desc) {
8833 nested_release_page_clean(vmx->nested.pi_desc_page);
8834 return false;
8835 }
8836 vmx->nested.pi_desc =
8837 (struct pi_desc *)((void *)vmx->nested.pi_desc +
8838 (unsigned long)(vmcs12->posted_intr_desc_addr &
8839 (PAGE_SIZE - 1)));
8840 }
8841
8842 return true;
8843}
8844
8845static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
8846{
8847 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
8848 struct vcpu_vmx *vmx = to_vmx(vcpu);
8849
8850 if (vcpu->arch.virtual_tsc_khz == 0)
8851 return;
8852
8853
8854
8855 if (preemption_timeout <= 1) {
8856 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
8857 return;
8858 }
8859
8860 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
8861 preemption_timeout *= 1000000;
8862 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
8863 hrtimer_start(&vmx->nested.preemption_timer,
8864 ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
8865}
8866
8867static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
8868 struct vmcs12 *vmcs12)
8869{
8870 int maxphyaddr;
8871 u64 addr;
8872
8873 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
8874 return 0;
8875
8876 if (vmcs12_read_any(vcpu, MSR_BITMAP, &addr)) {
8877 WARN_ON(1);
8878 return -EINVAL;
8879 }
8880 maxphyaddr = cpuid_maxphyaddr(vcpu);
8881
8882 if (!PAGE_ALIGNED(vmcs12->msr_bitmap) ||
8883 ((addr + PAGE_SIZE) >> maxphyaddr))
8884 return -EINVAL;
8885
8886 return 0;
8887}
8888
8889
8890
8891
8892
8893static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
8894 struct vmcs12 *vmcs12)
8895{
8896 int msr;
8897 struct page *page;
8898 unsigned long *msr_bitmap;
8899
8900 if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
8901 return false;
8902
8903 page = nested_get_page(vcpu, vmcs12->msr_bitmap);
8904 if (!page) {
8905 WARN_ON(1);
8906 return false;
8907 }
8908 msr_bitmap = (unsigned long *)kmap(page);
8909 if (!msr_bitmap) {
8910 nested_release_page_clean(page);
8911 WARN_ON(1);
8912 return false;
8913 }
8914
8915 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
8916 if (nested_cpu_has_apic_reg_virt(vmcs12))
8917 for (msr = 0x800; msr <= 0x8ff; msr++)
8918 nested_vmx_disable_intercept_for_msr(
8919 msr_bitmap,
8920 vmx_msr_bitmap_nested,
8921 msr, MSR_TYPE_R);
8922
8923 nested_vmx_disable_intercept_for_msr(msr_bitmap,
8924 vmx_msr_bitmap_nested,
8925 APIC_BASE_MSR + (APIC_TASKPRI >> 4),
8926 MSR_TYPE_R | MSR_TYPE_W);
8927 if (nested_cpu_has_vid(vmcs12)) {
8928
8929 nested_vmx_disable_intercept_for_msr(
8930 msr_bitmap,
8931 vmx_msr_bitmap_nested,
8932 APIC_BASE_MSR + (APIC_EOI >> 4),
8933 MSR_TYPE_W);
8934 nested_vmx_disable_intercept_for_msr(
8935 msr_bitmap,
8936 vmx_msr_bitmap_nested,
8937 APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
8938 MSR_TYPE_W);
8939 }
8940 } else {
8941
8942
8943
8944
8945
8946
8947 for (msr = 0x800; msr <= 0x8ff; msr++)
8948 __vmx_enable_intercept_for_msr(
8949 vmx_msr_bitmap_nested,
8950 msr,
8951 MSR_TYPE_R);
8952
8953 __vmx_enable_intercept_for_msr(
8954 vmx_msr_bitmap_nested,
8955 APIC_BASE_MSR + (APIC_TASKPRI >> 4),
8956 MSR_TYPE_W);
8957 __vmx_enable_intercept_for_msr(
8958 vmx_msr_bitmap_nested,
8959 APIC_BASE_MSR + (APIC_EOI >> 4),
8960 MSR_TYPE_W);
8961 __vmx_enable_intercept_for_msr(
8962 vmx_msr_bitmap_nested,
8963 APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
8964 MSR_TYPE_W);
8965 }
8966 kunmap(page);
8967 nested_release_page_clean(page);
8968
8969 return true;
8970}
8971
8972static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
8973 struct vmcs12 *vmcs12)
8974{
8975 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
8976 !nested_cpu_has_apic_reg_virt(vmcs12) &&
8977 !nested_cpu_has_vid(vmcs12) &&
8978 !nested_cpu_has_posted_intr(vmcs12))
8979 return 0;
8980
8981
8982
8983
8984
8985 if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
8986 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
8987 return -EINVAL;
8988
8989
8990
8991
8992
8993 if (nested_cpu_has_vid(vmcs12) &&
8994 !nested_exit_on_intr(vcpu))
8995 return -EINVAL;
8996
8997
8998
8999
9000
9001
9002 if (nested_cpu_has_posted_intr(vmcs12) &&
9003 (!nested_cpu_has_vid(vmcs12) ||
9004 !nested_exit_intr_ack_set(vcpu) ||
9005 vmcs12->posted_intr_nv & 0xff00))
9006 return -EINVAL;
9007
9008
9009 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
9010 return -EINVAL;
9011
9012 return 0;
9013}
9014
9015static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
9016 unsigned long count_field,
9017 unsigned long addr_field)
9018{
9019 int maxphyaddr;
9020 u64 count, addr;
9021
9022 if (vmcs12_read_any(vcpu, count_field, &count) ||
9023 vmcs12_read_any(vcpu, addr_field, &addr)) {
9024 WARN_ON(1);
9025 return -EINVAL;
9026 }
9027 if (count == 0)
9028 return 0;
9029 maxphyaddr = cpuid_maxphyaddr(vcpu);
9030 if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
9031 (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
9032 pr_warn_ratelimited(
9033 "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)",
9034 addr_field, maxphyaddr, count, addr);
9035 return -EINVAL;
9036 }
9037 return 0;
9038}
9039
9040static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
9041 struct vmcs12 *vmcs12)
9042{
9043 if (vmcs12->vm_exit_msr_load_count == 0 &&
9044 vmcs12->vm_exit_msr_store_count == 0 &&
9045 vmcs12->vm_entry_msr_load_count == 0)
9046 return 0;
9047 if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
9048 VM_EXIT_MSR_LOAD_ADDR) ||
9049 nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
9050 VM_EXIT_MSR_STORE_ADDR) ||
9051 nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
9052 VM_ENTRY_MSR_LOAD_ADDR))
9053 return -EINVAL;
9054 return 0;
9055}
9056
9057static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
9058 struct vmx_msr_entry *e)
9059{
9060
9061 if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)
9062 return -EINVAL;
9063 if (e->index == MSR_IA32_UCODE_WRITE ||
9064 e->index == MSR_IA32_UCODE_REV)
9065 return -EINVAL;
9066 if (e->reserved != 0)
9067 return -EINVAL;
9068 return 0;
9069}
9070
9071static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
9072 struct vmx_msr_entry *e)
9073{
9074 if (e->index == MSR_FS_BASE ||
9075 e->index == MSR_GS_BASE ||
9076 e->index == MSR_IA32_SMM_MONITOR_CTL ||
9077 nested_vmx_msr_check_common(vcpu, e))
9078 return -EINVAL;
9079 return 0;
9080}
9081
9082static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
9083 struct vmx_msr_entry *e)
9084{
9085 if (e->index == MSR_IA32_SMBASE ||
9086 nested_vmx_msr_check_common(vcpu, e))
9087 return -EINVAL;
9088 return 0;
9089}
9090
9091
9092
9093
9094
9095static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
9096{
9097 u32 i;
9098 struct vmx_msr_entry e;
9099 struct msr_data msr;
9100
9101 msr.host_initiated = false;
9102 for (i = 0; i < count; i++) {
9103 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
9104 &e, sizeof(e))) {
9105 pr_warn_ratelimited(
9106 "%s cannot read MSR entry (%u, 0x%08llx)\n",
9107 __func__, i, gpa + i * sizeof(e));
9108 goto fail;
9109 }
9110 if (nested_vmx_load_msr_check(vcpu, &e)) {
9111 pr_warn_ratelimited(
9112 "%s check failed (%u, 0x%x, 0x%x)\n",
9113 __func__, i, e.index, e.reserved);
9114 goto fail;
9115 }
9116 msr.index = e.index;
9117 msr.data = e.value;
9118 if (kvm_set_msr(vcpu, &msr)) {
9119 pr_warn_ratelimited(
9120 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
9121 __func__, i, e.index, e.value);
9122 goto fail;
9123 }
9124 }
9125 return 0;
9126fail:
9127 return i + 1;
9128}
9129
9130static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
9131{
9132 u32 i;
9133 struct vmx_msr_entry e;
9134
9135 for (i = 0; i < count; i++) {
9136 struct msr_data msr_info;
9137 if (kvm_vcpu_read_guest(vcpu,
9138 gpa + i * sizeof(e),
9139 &e, 2 * sizeof(u32))) {
9140 pr_warn_ratelimited(
9141 "%s cannot read MSR entry (%u, 0x%08llx)\n",
9142 __func__, i, gpa + i * sizeof(e));
9143 return -EINVAL;
9144 }
9145 if (nested_vmx_store_msr_check(vcpu, &e)) {
9146 pr_warn_ratelimited(
9147 "%s check failed (%u, 0x%x, 0x%x)\n",
9148 __func__, i, e.index, e.reserved);
9149 return -EINVAL;
9150 }
9151 msr_info.host_initiated = false;
9152 msr_info.index = e.index;
9153 if (kvm_get_msr(vcpu, &msr_info)) {
9154 pr_warn_ratelimited(
9155 "%s cannot read MSR (%u, 0x%x)\n",
9156 __func__, i, e.index);
9157 return -EINVAL;
9158 }
9159 if (kvm_vcpu_write_guest(vcpu,
9160 gpa + i * sizeof(e) +
9161 offsetof(struct vmx_msr_entry, value),
9162 &msr_info.data, sizeof(msr_info.data))) {
9163 pr_warn_ratelimited(
9164 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
9165 __func__, i, e.index, msr_info.data);
9166 return -EINVAL;
9167 }
9168 }
9169 return 0;
9170}
9171
9172
9173
9174
9175
9176
9177
9178
9179
9180
9181static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
9182{
9183 struct vcpu_vmx *vmx = to_vmx(vcpu);
9184 u32 exec_control;
9185
9186 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
9187 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
9188 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
9189 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
9190 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
9191 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
9192 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
9193 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
9194 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
9195 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
9196 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
9197 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
9198 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
9199 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
9200 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
9201 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
9202 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
9203 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
9204 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
9205 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
9206 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
9207 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
9208 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
9209 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
9210 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
9211 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
9212 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
9213 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
9214 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
9215 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
9216 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
9217 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
9218 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
9219 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
9220 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
9221 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
9222
9223 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
9224 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
9225 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
9226 } else {
9227 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
9228 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
9229 }
9230 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
9231 vmcs12->vm_entry_intr_info_field);
9232 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
9233 vmcs12->vm_entry_exception_error_code);
9234 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
9235 vmcs12->vm_entry_instruction_len);
9236 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
9237 vmcs12->guest_interruptibility_info);
9238 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
9239 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
9240 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
9241 vmcs12->guest_pending_dbg_exceptions);
9242 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
9243 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
9244
9245 if (nested_cpu_has_xsaves(vmcs12))
9246 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
9247 vmcs_write64(VMCS_LINK_POINTER, -1ull);
9248
9249 exec_control = vmcs12->pin_based_vm_exec_control;
9250 exec_control |= vmcs_config.pin_based_exec_ctrl;
9251 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
9252
9253 if (nested_cpu_has_posted_intr(vmcs12)) {
9254
9255
9256
9257
9258 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
9259 vmx->nested.pi_pending = false;
9260 vmcs_write64(POSTED_INTR_NV, POSTED_INTR_VECTOR);
9261 vmcs_write64(POSTED_INTR_DESC_ADDR,
9262 page_to_phys(vmx->nested.pi_desc_page) +
9263 (unsigned long)(vmcs12->posted_intr_desc_addr &
9264 (PAGE_SIZE - 1)));
9265 } else
9266 exec_control &= ~PIN_BASED_POSTED_INTR;
9267
9268 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
9269
9270 vmx->nested.preemption_timer_expired = false;
9271 if (nested_cpu_has_preemption_timer(vmcs12))
9272 vmx_start_preemption_timer(vcpu);
9273
9274
9275
9276
9277
9278
9279
9280
9281
9282
9283
9284
9285
9286
9287
9288
9289
9290
9291
9292
9293
9294 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
9295 enable_ept ? vmcs12->page_fault_error_code_mask : 0);
9296 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
9297 enable_ept ? vmcs12->page_fault_error_code_match : 0);
9298
9299 if (cpu_has_secondary_exec_ctrls()) {
9300 exec_control = vmx_secondary_exec_control(vmx);
9301 if (!vmx->rdtscp_enabled)
9302 exec_control &= ~SECONDARY_EXEC_RDTSCP;
9303
9304 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
9305 SECONDARY_EXEC_RDTSCP |
9306 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
9307 SECONDARY_EXEC_APIC_REGISTER_VIRT);
9308 if (nested_cpu_has(vmcs12,
9309 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
9310 exec_control |= vmcs12->secondary_vm_exec_control;
9311
9312 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) {
9313
9314
9315
9316
9317
9318
9319 if (!vmx->nested.apic_access_page)
9320 exec_control &=
9321 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
9322 else
9323 vmcs_write64(APIC_ACCESS_ADDR,
9324 page_to_phys(vmx->nested.apic_access_page));
9325 } else if (!(nested_cpu_has_virt_x2apic_mode(vmcs12)) &&
9326 (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))) {
9327 exec_control |=
9328 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
9329 kvm_vcpu_reload_apic_access_page(vcpu);
9330 }
9331
9332 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
9333 vmcs_write64(EOI_EXIT_BITMAP0,
9334 vmcs12->eoi_exit_bitmap0);
9335 vmcs_write64(EOI_EXIT_BITMAP1,
9336 vmcs12->eoi_exit_bitmap1);
9337 vmcs_write64(EOI_EXIT_BITMAP2,
9338 vmcs12->eoi_exit_bitmap2);
9339 vmcs_write64(EOI_EXIT_BITMAP3,
9340 vmcs12->eoi_exit_bitmap3);
9341 vmcs_write16(GUEST_INTR_STATUS,
9342 vmcs12->guest_intr_status);
9343 }
9344
9345 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
9346 }
9347
9348
9349
9350
9351
9352
9353
9354
9355 vmx_set_constant_host_state(vmx);
9356
9357
9358
9359
9360
9361
9362
9363
9364 vmx->host_rsp = 0;
9365
9366 exec_control = vmx_exec_control(vmx);
9367 exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
9368 exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
9369 exec_control &= ~CPU_BASED_TPR_SHADOW;
9370 exec_control |= vmcs12->cpu_based_vm_exec_control;
9371
9372 if (exec_control & CPU_BASED_TPR_SHADOW) {
9373 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
9374 page_to_phys(vmx->nested.virtual_apic_page));
9375 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
9376 }
9377
9378 if (cpu_has_vmx_msr_bitmap() &&
9379 exec_control & CPU_BASED_USE_MSR_BITMAPS) {
9380 nested_vmx_merge_msr_bitmap(vcpu, vmcs12);
9381
9382 } else
9383 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
9384
9385
9386
9387
9388
9389 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
9390 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
9391
9392 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
9393
9394
9395
9396
9397
9398 update_exception_bitmap(vcpu);
9399 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
9400 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
9401
9402
9403
9404
9405
9406 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
9407
9408
9409
9410
9411 vm_entry_controls_init(vmx,
9412 (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
9413 ~VM_ENTRY_IA32E_MODE) |
9414 (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
9415
9416 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) {
9417 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
9418 vcpu->arch.pat = vmcs12->guest_ia32_pat;
9419 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
9420 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
9421
9422
9423 set_cr4_guest_host_mask(vmx);
9424
9425 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)
9426 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
9427
9428 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
9429 vmcs_write64(TSC_OFFSET,
9430 vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
9431 else
9432 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
9433
9434 if (enable_vpid) {
9435
9436
9437
9438
9439
9440 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
9441 vmx_flush_tlb(vcpu);
9442 }
9443
9444 if (nested_cpu_has_ept(vmcs12)) {
9445 kvm_mmu_unload(vcpu);
9446 nested_ept_init_mmu_context(vcpu);
9447 }
9448
9449 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
9450 vcpu->arch.efer = vmcs12->guest_ia32_efer;
9451 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
9452 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
9453 else
9454 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
9455
9456 vmx_set_efer(vcpu, vcpu->arch.efer);
9457
9458
9459
9460
9461
9462
9463
9464
9465
9466 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
9467 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
9468
9469 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
9470 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
9471
9472
9473 kvm_set_cr3(vcpu, vmcs12->guest_cr3);
9474 kvm_mmu_reset_context(vcpu);
9475
9476 if (!enable_ept)
9477 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
9478
9479
9480
9481
9482 if (enable_ept) {
9483 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
9484 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
9485 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
9486 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
9487 }
9488
9489 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
9490 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
9491}
9492
9493
9494
9495
9496
9497static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
9498{
9499 struct vmcs12 *vmcs12;
9500 struct vcpu_vmx *vmx = to_vmx(vcpu);
9501 int cpu;
9502 struct loaded_vmcs *vmcs02;
9503 bool ia32e;
9504 u32 msr_entry_idx;
9505
9506 if (!nested_vmx_check_permission(vcpu) ||
9507 !nested_vmx_check_vmcs12(vcpu))
9508 return 1;
9509
9510 skip_emulated_instruction(vcpu);
9511 vmcs12 = get_vmcs12(vcpu);
9512
9513 if (enable_shadow_vmcs)
9514 copy_shadow_to_vmcs12(vmx);
9515
9516
9517
9518
9519
9520
9521
9522
9523
9524
9525
9526 if (vmcs12->launch_state == launch) {
9527 nested_vmx_failValid(vcpu,
9528 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
9529 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
9530 return 1;
9531 }
9532
9533 if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
9534 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) {
9535 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
9536 return 1;
9537 }
9538
9539 if (!nested_get_vmcs12_pages(vcpu, vmcs12)) {
9540 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
9541 return 1;
9542 }
9543
9544 if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) {
9545 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
9546 return 1;
9547 }
9548
9549 if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) {
9550 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
9551 return 1;
9552 }
9553
9554 if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) {
9555 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
9556 return 1;
9557 }
9558
9559 if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
9560 vmx->nested.nested_vmx_true_procbased_ctls_low,
9561 vmx->nested.nested_vmx_procbased_ctls_high) ||
9562 !vmx_control_verify(vmcs12->secondary_vm_exec_control,
9563 vmx->nested.nested_vmx_secondary_ctls_low,
9564 vmx->nested.nested_vmx_secondary_ctls_high) ||
9565 !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
9566 vmx->nested.nested_vmx_pinbased_ctls_low,
9567 vmx->nested.nested_vmx_pinbased_ctls_high) ||
9568 !vmx_control_verify(vmcs12->vm_exit_controls,
9569 vmx->nested.nested_vmx_true_exit_ctls_low,
9570 vmx->nested.nested_vmx_exit_ctls_high) ||
9571 !vmx_control_verify(vmcs12->vm_entry_controls,
9572 vmx->nested.nested_vmx_true_entry_ctls_low,
9573 vmx->nested.nested_vmx_entry_ctls_high))
9574 {
9575 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
9576 return 1;
9577 }
9578
9579 if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
9580 ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
9581 nested_vmx_failValid(vcpu,
9582 VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
9583 return 1;
9584 }
9585
9586 if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) ||
9587 ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
9588 nested_vmx_entry_failure(vcpu, vmcs12,
9589 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
9590 return 1;
9591 }
9592 if (vmcs12->vmcs_link_pointer != -1ull) {
9593 nested_vmx_entry_failure(vcpu, vmcs12,
9594 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
9595 return 1;
9596 }
9597
9598
9599
9600
9601
9602
9603
9604
9605
9606
9607 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) {
9608 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
9609 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
9610 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
9611 ((vmcs12->guest_cr0 & X86_CR0_PG) &&
9612 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
9613 nested_vmx_entry_failure(vcpu, vmcs12,
9614 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
9615 return 1;
9616 }
9617 }
9618
9619
9620
9621
9622
9623
9624
9625 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
9626 ia32e = (vmcs12->vm_exit_controls &
9627 VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
9628 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
9629 ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
9630 ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
9631 nested_vmx_entry_failure(vcpu, vmcs12,
9632 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
9633 return 1;
9634 }
9635 }
9636
9637
9638
9639
9640
9641
9642 vmcs02 = nested_get_current_vmcs02(vmx);
9643 if (!vmcs02)
9644 return -ENOMEM;
9645
9646 enter_guest_mode(vcpu);
9647
9648 vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
9649
9650 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
9651 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
9652
9653 cpu = get_cpu();
9654 vmx->loaded_vmcs = vmcs02;
9655 vmx_vcpu_put(vcpu);
9656 vmx_vcpu_load(vcpu, cpu);
9657 vcpu->cpu = cpu;
9658 put_cpu();
9659
9660 vmx_segment_cache_clear(vmx);
9661
9662 prepare_vmcs02(vcpu, vmcs12);
9663
9664 msr_entry_idx = nested_vmx_load_msr(vcpu,
9665 vmcs12->vm_entry_msr_load_addr,
9666 vmcs12->vm_entry_msr_load_count);
9667 if (msr_entry_idx) {
9668 leave_guest_mode(vcpu);
9669 vmx_load_vmcs01(vcpu);
9670 nested_vmx_entry_failure(vcpu, vmcs12,
9671 EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx);
9672 return 1;
9673 }
9674
9675 vmcs12->launch_state = 1;
9676
9677 if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
9678 return kvm_vcpu_halt(vcpu);
9679
9680 vmx->nested.nested_run_pending = 1;
9681
9682
9683
9684
9685
9686
9687
9688 return 1;
9689}
9690
9691
9692
9693
9694
9695
9696
9697
9698
9699
9700
9701
9702
9703
9704
9705
9706
9707
9708static inline unsigned long
9709vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
9710{
9711 return
9712 (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
9713 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
9714 (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
9715 vcpu->arch.cr0_guest_owned_bits));
9716}
9717
9718static inline unsigned long
9719vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
9720{
9721 return
9722 (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
9723 (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
9724 (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
9725 vcpu->arch.cr4_guest_owned_bits));
9726}
9727
9728static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
9729 struct vmcs12 *vmcs12)
9730{
9731 u32 idt_vectoring;
9732 unsigned int nr;
9733
9734 if (vcpu->arch.exception.pending && vcpu->arch.exception.reinject) {
9735 nr = vcpu->arch.exception.nr;
9736 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
9737
9738 if (kvm_exception_is_soft(nr)) {
9739 vmcs12->vm_exit_instruction_len =
9740 vcpu->arch.event_exit_inst_len;
9741 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
9742 } else
9743 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
9744
9745 if (vcpu->arch.exception.has_error_code) {
9746 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
9747 vmcs12->idt_vectoring_error_code =
9748 vcpu->arch.exception.error_code;
9749 }
9750
9751 vmcs12->idt_vectoring_info_field = idt_vectoring;
9752 } else if (vcpu->arch.nmi_injected) {
9753 vmcs12->idt_vectoring_info_field =
9754 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
9755 } else if (vcpu->arch.interrupt.pending) {
9756 nr = vcpu->arch.interrupt.nr;
9757 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
9758
9759 if (vcpu->arch.interrupt.soft) {
9760 idt_vectoring |= INTR_TYPE_SOFT_INTR;
9761 vmcs12->vm_entry_instruction_len =
9762 vcpu->arch.event_exit_inst_len;
9763 } else
9764 idt_vectoring |= INTR_TYPE_EXT_INTR;
9765
9766 vmcs12->idt_vectoring_info_field = idt_vectoring;
9767 }
9768}
9769
9770static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
9771{
9772 struct vcpu_vmx *vmx = to_vmx(vcpu);
9773
9774 if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
9775 vmx->nested.preemption_timer_expired) {
9776 if (vmx->nested.nested_run_pending)
9777 return -EBUSY;
9778 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
9779 return 0;
9780 }
9781
9782 if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
9783 if (vmx->nested.nested_run_pending ||
9784 vcpu->arch.interrupt.pending)
9785 return -EBUSY;
9786 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
9787 NMI_VECTOR | INTR_TYPE_NMI_INTR |
9788 INTR_INFO_VALID_MASK, 0);
9789
9790
9791
9792
9793 vcpu->arch.nmi_pending = 0;
9794 vmx_set_nmi_mask(vcpu, true);
9795 return 0;
9796 }
9797
9798 if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
9799 nested_exit_on_intr(vcpu)) {
9800 if (vmx->nested.nested_run_pending)
9801 return -EBUSY;
9802 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
9803 return 0;
9804 }
9805
9806 return vmx_complete_nested_posted_interrupt(vcpu);
9807}
9808
9809static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
9810{
9811 ktime_t remaining =
9812 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
9813 u64 value;
9814
9815 if (ktime_to_ns(remaining) <= 0)
9816 return 0;
9817
9818 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
9819 do_div(value, 1000000);
9820 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
9821}
9822
9823
9824
9825
9826
9827
9828
9829
9830
9831
9832
9833
9834static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
9835 u32 exit_reason, u32 exit_intr_info,
9836 unsigned long exit_qualification)
9837{
9838
9839 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
9840 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
9841
9842 vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
9843 vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
9844 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
9845
9846 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
9847 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
9848 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
9849 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
9850 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
9851 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
9852 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
9853 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
9854 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
9855 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
9856 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
9857 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
9858 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
9859 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
9860 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
9861 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
9862 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
9863 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
9864 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
9865 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
9866 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
9867 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
9868 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
9869 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
9870 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
9871 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
9872 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
9873 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
9874 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
9875 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
9876 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
9877 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
9878 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
9879 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
9880 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
9881 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
9882
9883 vmcs12->guest_interruptibility_info =
9884 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
9885 vmcs12->guest_pending_dbg_exceptions =
9886 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
9887 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
9888 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
9889 else
9890 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
9891
9892 if (nested_cpu_has_preemption_timer(vmcs12)) {
9893 if (vmcs12->vm_exit_controls &
9894 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
9895 vmcs12->vmx_preemption_timer_value =
9896 vmx_get_preemption_timer_value(vcpu);
9897 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
9898 }
9899
9900
9901
9902
9903
9904
9905
9906
9907
9908 if (enable_ept) {
9909 vmcs12->guest_cr3 = vmcs_read64(GUEST_CR3);
9910 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
9911 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
9912 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
9913 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
9914 }
9915
9916 if (nested_cpu_has_vid(vmcs12))
9917 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
9918
9919 vmcs12->vm_entry_controls =
9920 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
9921 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
9922
9923 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
9924 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
9925 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
9926 }
9927
9928
9929
9930 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
9931 vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
9932 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
9933 vmcs12->guest_ia32_efer = vcpu->arch.efer;
9934 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
9935 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
9936 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
9937 if (vmx_mpx_supported())
9938 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
9939 if (nested_cpu_has_xsaves(vmcs12))
9940 vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP);
9941
9942
9943
9944 vmcs12->vm_exit_reason = exit_reason;
9945 vmcs12->exit_qualification = exit_qualification;
9946
9947 vmcs12->vm_exit_intr_info = exit_intr_info;
9948 if ((vmcs12->vm_exit_intr_info &
9949 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
9950 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK))
9951 vmcs12->vm_exit_intr_error_code =
9952 vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
9953 vmcs12->idt_vectoring_info_field = 0;
9954 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
9955 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
9956
9957 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
9958
9959
9960 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
9961
9962
9963
9964
9965
9966 vmcs12_save_pending_event(vcpu, vmcs12);
9967 }
9968
9969
9970
9971
9972
9973 vcpu->arch.nmi_injected = false;
9974 kvm_clear_exception_queue(vcpu);
9975 kvm_clear_interrupt_queue(vcpu);
9976}
9977
9978
9979
9980
9981
9982
9983
9984
9985
9986
9987static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
9988 struct vmcs12 *vmcs12)
9989{
9990 struct kvm_segment seg;
9991
9992 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
9993 vcpu->arch.efer = vmcs12->host_ia32_efer;
9994 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
9995 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
9996 else
9997 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
9998 vmx_set_efer(vcpu, vcpu->arch.efer);
9999
10000 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
10001 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
10002 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
10003
10004
10005
10006
10007
10008
10009 vmx_set_cr0(vcpu, vmcs12->host_cr0);
10010
10011
10012
10013
10014
10015 update_exception_bitmap(vcpu);
10016 vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0);
10017 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
10018
10019
10020
10021
10022
10023 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
10024 kvm_set_cr4(vcpu, vmcs12->host_cr4);
10025
10026 nested_ept_uninit_mmu_context(vcpu);
10027
10028 kvm_set_cr3(vcpu, vmcs12->host_cr3);
10029 kvm_mmu_reset_context(vcpu);
10030
10031 if (!enable_ept)
10032 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
10033
10034 if (enable_vpid) {
10035
10036
10037
10038
10039
10040 vmx_flush_tlb(vcpu);
10041 }
10042
10043
10044 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
10045 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
10046 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
10047 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
10048 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
10049
10050
10051 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
10052 vmcs_write64(GUEST_BNDCFGS, 0);
10053
10054 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
10055 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
10056 vcpu->arch.pat = vmcs12->host_ia32_pat;
10057 }
10058 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
10059 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
10060 vmcs12->host_ia32_perf_global_ctrl);
10061
10062
10063
10064 seg = (struct kvm_segment) {
10065 .base = 0,
10066 .limit = 0xFFFFFFFF,
10067 .selector = vmcs12->host_cs_selector,
10068 .type = 11,
10069 .present = 1,
10070 .s = 1,
10071 .g = 1
10072 };
10073 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
10074 seg.l = 1;
10075 else
10076 seg.db = 1;
10077 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
10078 seg = (struct kvm_segment) {
10079 .base = 0,
10080 .limit = 0xFFFFFFFF,
10081 .type = 3,
10082 .present = 1,
10083 .s = 1,
10084 .db = 1,
10085 .g = 1
10086 };
10087 seg.selector = vmcs12->host_ds_selector;
10088 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
10089 seg.selector = vmcs12->host_es_selector;
10090 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
10091 seg.selector = vmcs12->host_ss_selector;
10092 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
10093 seg.selector = vmcs12->host_fs_selector;
10094 seg.base = vmcs12->host_fs_base;
10095 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
10096 seg.selector = vmcs12->host_gs_selector;
10097 seg.base = vmcs12->host_gs_base;
10098 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
10099 seg = (struct kvm_segment) {
10100 .base = vmcs12->host_tr_base,
10101 .limit = 0x67,
10102 .selector = vmcs12->host_tr_selector,
10103 .type = 11,
10104 .present = 1
10105 };
10106 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
10107
10108 kvm_set_dr(vcpu, 7, 0x400);
10109 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
10110
10111 if (cpu_has_vmx_msr_bitmap())
10112 vmx_set_msr_bitmap(vcpu);
10113
10114 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
10115 vmcs12->vm_exit_msr_load_count))
10116 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
10117}
10118
10119
10120
10121
10122
10123
10124static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
10125 u32 exit_intr_info,
10126 unsigned long exit_qualification)
10127{
10128 struct vcpu_vmx *vmx = to_vmx(vcpu);
10129 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
10130
10131
10132 WARN_ON_ONCE(vmx->nested.nested_run_pending);
10133
10134 leave_guest_mode(vcpu);
10135 prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
10136 exit_qualification);
10137
10138 if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
10139 vmcs12->vm_exit_msr_store_count))
10140 nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
10141
10142 vmx_load_vmcs01(vcpu);
10143
10144 if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
10145 && nested_exit_intr_ack_set(vcpu)) {
10146 int irq = kvm_cpu_get_interrupt(vcpu);
10147 WARN_ON(irq < 0);
10148 vmcs12->vm_exit_intr_info = irq |
10149 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
10150 }
10151
10152 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
10153 vmcs12->exit_qualification,
10154 vmcs12->idt_vectoring_info_field,
10155 vmcs12->vm_exit_intr_info,
10156 vmcs12->vm_exit_intr_error_code,
10157 KVM_ISA_VMX);
10158
10159 vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
10160 vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS));
10161 vmx_segment_cache_clear(vmx);
10162
10163
10164 if (VMCS02_POOL_SIZE == 0)
10165 nested_free_vmcs02(vmx, vmx->nested.current_vmptr);
10166
10167 load_vmcs12_host_state(vcpu, vmcs12);
10168
10169
10170 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
10171
10172
10173 vmx->host_rsp = 0;
10174
10175
10176 if (vmx->nested.apic_access_page) {
10177 nested_release_page(vmx->nested.apic_access_page);
10178 vmx->nested.apic_access_page = NULL;
10179 }
10180 if (vmx->nested.virtual_apic_page) {
10181 nested_release_page(vmx->nested.virtual_apic_page);
10182 vmx->nested.virtual_apic_page = NULL;
10183 }
10184 if (vmx->nested.pi_desc_page) {
10185 kunmap(vmx->nested.pi_desc_page);
10186 nested_release_page(vmx->nested.pi_desc_page);
10187 vmx->nested.pi_desc_page = NULL;
10188 vmx->nested.pi_desc = NULL;
10189 }
10190
10191
10192
10193
10194
10195 kvm_vcpu_reload_apic_access_page(vcpu);
10196
10197
10198
10199
10200
10201
10202 if (unlikely(vmx->fail)) {
10203 vmx->fail = 0;
10204 nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR));
10205 } else
10206 nested_vmx_succeed(vcpu);
10207 if (enable_shadow_vmcs)
10208 vmx->nested.sync_shadow_vmcs = true;
10209
10210
10211 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
10212}
10213
10214
10215
10216
10217static void vmx_leave_nested(struct kvm_vcpu *vcpu)
10218{
10219 if (is_guest_mode(vcpu))
10220 nested_vmx_vmexit(vcpu, -1, 0, 0);
10221 free_nested(to_vmx(vcpu));
10222}
10223
10224
10225
10226
10227
10228
10229
10230
10231static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
10232 struct vmcs12 *vmcs12,
10233 u32 reason, unsigned long qualification)
10234{
10235 load_vmcs12_host_state(vcpu, vmcs12);
10236 vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
10237 vmcs12->exit_qualification = qualification;
10238 nested_vmx_succeed(vcpu);
10239 if (enable_shadow_vmcs)
10240 to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
10241}
10242
10243static int vmx_check_intercept(struct kvm_vcpu *vcpu,
10244 struct x86_instruction_info *info,
10245 enum x86_intercept_stage stage)
10246{
10247 return X86EMUL_CONTINUE;
10248}
10249
10250static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
10251{
10252 if (ple_gap)
10253 shrink_ple_window(vcpu);
10254}
10255
10256static void vmx_slot_enable_log_dirty(struct kvm *kvm,
10257 struct kvm_memory_slot *slot)
10258{
10259 kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
10260 kvm_mmu_slot_largepage_remove_write_access(kvm, slot);
10261}
10262
10263static void vmx_slot_disable_log_dirty(struct kvm *kvm,
10264 struct kvm_memory_slot *slot)
10265{
10266 kvm_mmu_slot_set_dirty(kvm, slot);
10267}
10268
10269static void vmx_flush_log_dirty(struct kvm *kvm)
10270{
10271 kvm_flush_pml_buffers(kvm);
10272}
10273
10274static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
10275 struct kvm_memory_slot *memslot,
10276 gfn_t offset, unsigned long mask)
10277{
10278 kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
10279}
10280
10281static struct kvm_x86_ops vmx_x86_ops = {
10282 .cpu_has_kvm_support = cpu_has_kvm_support,
10283 .disabled_by_bios = vmx_disabled_by_bios,
10284 .hardware_setup = hardware_setup,
10285 .hardware_unsetup = hardware_unsetup,
10286 .check_processor_compatibility = vmx_check_processor_compat,
10287 .hardware_enable = hardware_enable,
10288 .hardware_disable = hardware_disable,
10289 .cpu_has_accelerated_tpr = report_flexpriority,
10290 .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
10291
10292 .vcpu_create = vmx_create_vcpu,
10293 .vcpu_free = vmx_free_vcpu,
10294 .vcpu_reset = vmx_vcpu_reset,
10295
10296 .prepare_guest_switch = vmx_save_host_state,
10297 .vcpu_load = vmx_vcpu_load,
10298 .vcpu_put = vmx_vcpu_put,
10299
10300 .update_db_bp_intercept = update_exception_bitmap,
10301 .get_msr = vmx_get_msr,
10302 .set_msr = vmx_set_msr,
10303 .get_segment_base = vmx_get_segment_base,
10304 .get_segment = vmx_get_segment,
10305 .set_segment = vmx_set_segment,
10306 .get_cpl = vmx_get_cpl,
10307 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
10308 .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
10309 .decache_cr3 = vmx_decache_cr3,
10310 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
10311 .set_cr0 = vmx_set_cr0,
10312 .set_cr3 = vmx_set_cr3,
10313 .set_cr4 = vmx_set_cr4,
10314 .set_efer = vmx_set_efer,
10315 .get_idt = vmx_get_idt,
10316 .set_idt = vmx_set_idt,
10317 .get_gdt = vmx_get_gdt,
10318 .set_gdt = vmx_set_gdt,
10319 .get_dr6 = vmx_get_dr6,
10320 .set_dr6 = vmx_set_dr6,
10321 .set_dr7 = vmx_set_dr7,
10322 .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
10323 .cache_reg = vmx_cache_reg,
10324 .get_rflags = vmx_get_rflags,
10325 .set_rflags = vmx_set_rflags,
10326 .fpu_activate = vmx_fpu_activate,
10327 .fpu_deactivate = vmx_fpu_deactivate,
10328
10329 .tlb_flush = vmx_flush_tlb,
10330
10331 .run = vmx_vcpu_run,
10332 .handle_exit = vmx_handle_exit,
10333 .skip_emulated_instruction = skip_emulated_instruction,
10334 .set_interrupt_shadow = vmx_set_interrupt_shadow,
10335 .get_interrupt_shadow = vmx_get_interrupt_shadow,
10336 .patch_hypercall = vmx_patch_hypercall,
10337 .set_irq = vmx_inject_irq,
10338 .set_nmi = vmx_inject_nmi,
10339 .queue_exception = vmx_queue_exception,
10340 .cancel_injection = vmx_cancel_injection,
10341 .interrupt_allowed = vmx_interrupt_allowed,
10342 .nmi_allowed = vmx_nmi_allowed,
10343 .get_nmi_mask = vmx_get_nmi_mask,
10344 .set_nmi_mask = vmx_set_nmi_mask,
10345 .enable_nmi_window = enable_nmi_window,
10346 .enable_irq_window = enable_irq_window,
10347 .update_cr8_intercept = update_cr8_intercept,
10348 .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
10349 .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
10350 .vm_has_apicv = vmx_vm_has_apicv,
10351 .load_eoi_exitmap = vmx_load_eoi_exitmap,
10352 .hwapic_irr_update = vmx_hwapic_irr_update,
10353 .hwapic_isr_update = vmx_hwapic_isr_update,
10354 .sync_pir_to_irr = vmx_sync_pir_to_irr,
10355 .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
10356
10357 .set_tss_addr = vmx_set_tss_addr,
10358 .get_tdp_level = get_ept_level,
10359 .get_mt_mask = vmx_get_mt_mask,
10360
10361 .get_exit_info = vmx_get_exit_info,
10362
10363 .get_lpage_level = vmx_get_lpage_level,
10364
10365 .cpuid_update = vmx_cpuid_update,
10366
10367 .rdtscp_supported = vmx_rdtscp_supported,
10368 .invpcid_supported = vmx_invpcid_supported,
10369
10370 .set_supported_cpuid = vmx_set_supported_cpuid,
10371
10372 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
10373
10374 .set_tsc_khz = vmx_set_tsc_khz,
10375 .read_tsc_offset = vmx_read_tsc_offset,
10376 .write_tsc_offset = vmx_write_tsc_offset,
10377 .adjust_tsc_offset = vmx_adjust_tsc_offset,
10378 .compute_tsc_offset = vmx_compute_tsc_offset,
10379 .read_l1_tsc = vmx_read_l1_tsc,
10380
10381 .set_tdp_cr3 = vmx_set_cr3,
10382
10383 .check_intercept = vmx_check_intercept,
10384 .handle_external_intr = vmx_handle_external_intr,
10385 .mpx_supported = vmx_mpx_supported,
10386 .xsaves_supported = vmx_xsaves_supported,
10387
10388 .check_nested_events = vmx_check_nested_events,
10389
10390 .sched_in = vmx_sched_in,
10391
10392 .slot_enable_log_dirty = vmx_slot_enable_log_dirty,
10393 .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
10394 .flush_log_dirty = vmx_flush_log_dirty,
10395 .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
10396
10397 .pmu_ops = &intel_pmu_ops,
10398};
10399
10400static int __init vmx_init(void)
10401{
10402 int r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
10403 __alignof__(struct vcpu_vmx), THIS_MODULE);
10404 if (r)
10405 return r;
10406
10407#ifdef CONFIG_KEXEC_CORE
10408 rcu_assign_pointer(crash_vmclear_loaded_vmcss,
10409 crash_vmclear_local_loaded_vmcss);
10410#endif
10411
10412 return 0;
10413}
10414
10415static void __exit vmx_exit(void)
10416{
10417#ifdef CONFIG_KEXEC_CORE
10418 RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
10419 synchronize_rcu();
10420#endif
10421
10422 kvm_exit();
10423}
10424
10425module_init(vmx_init)
10426module_exit(vmx_exit)
10427