1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include "qemu/osdep.h"
16#include "qapi/error.h"
17#include <sys/ioctl.h>
18#include <sys/utsname.h>
19
20#include <linux/kvm.h>
21#include "standard-headers/asm-x86/kvm_para.h"
22
23#include "cpu.h"
24#include "sysemu/sysemu.h"
25#include "sysemu/hw_accel.h"
26#include "sysemu/kvm_int.h"
27#include "sysemu/reset.h"
28#include "sysemu/runstate.h"
29#include "kvm_i386.h"
30#include "hyperv.h"
31#include "hyperv-proto.h"
32
33#include "exec/gdbstub.h"
34#include "qemu/host-utils.h"
35#include "qemu/main-loop.h"
36#include "qemu/config-file.h"
37#include "qemu/error-report.h"
38#include "hw/i386/pc.h"
39#include "hw/i386/apic.h"
40#include "hw/i386/apic_internal.h"
41#include "hw/i386/apic-msidef.h"
42#include "hw/i386/intel_iommu.h"
43#include "hw/i386/x86-iommu.h"
44#include "hw/i386/e820_memory_layout.h"
45
46#include "hw/pci/pci.h"
47#include "hw/pci/msi.h"
48#include "hw/pci/msix.h"
49#include "migration/blocker.h"
50#include "exec/memattrs.h"
51#include "trace.h"
52
53
54
55#ifdef DEBUG_KVM
56#define DPRINTF(fmt, ...) \
57 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
58#else
59#define DPRINTF(fmt, ...) \
60 do { } while (0)
61#endif
62
63#define MSR_KVM_WALL_CLOCK 0x11
64#define MSR_KVM_SYSTEM_TIME 0x12
65
66
67
68#define MSR_BUF_SIZE 4096
69
70static void kvm_init_msrs(X86CPU *cpu);
71
72const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
73 KVM_CAP_INFO(SET_TSS_ADDR),
74 KVM_CAP_INFO(EXT_CPUID),
75 KVM_CAP_INFO(MP_STATE),
76 KVM_CAP_LAST_INFO
77};
78
79static bool has_msr_star;
80static bool has_msr_hsave_pa;
81static bool has_msr_tsc_aux;
82static bool has_msr_tsc_adjust;
83static bool has_msr_tsc_deadline;
84static bool has_msr_feature_control;
85static bool has_msr_misc_enable;
86static bool has_msr_smbase;
87static bool has_msr_bndcfgs;
88static int lm_capable_kernel;
89static bool has_msr_hv_hypercall;
90static bool has_msr_hv_crash;
91static bool has_msr_hv_reset;
92static bool has_msr_hv_vpindex;
93static bool hv_vpindex_settable;
94static bool has_msr_hv_runtime;
95static bool has_msr_hv_synic;
96static bool has_msr_hv_stimer;
97static bool has_msr_hv_frequencies;
98static bool has_msr_hv_reenlightenment;
99static bool has_msr_xss;
100static bool has_msr_umwait;
101static bool has_msr_spec_ctrl;
102static bool has_msr_tsx_ctrl;
103static bool has_msr_virt_ssbd;
104static bool has_msr_smi_count;
105static bool has_msr_arch_capabs;
106static bool has_msr_core_capabs;
107static bool has_msr_vmx_vmfunc;
108static bool has_msr_vmx_procbased_ctls2;
109
110static uint32_t has_architectural_pmu_version;
111static uint32_t num_architectural_pmu_gp_counters;
112static uint32_t num_architectural_pmu_fixed_counters;
113
114static int has_xsave;
115static int has_xcrs;
116static int has_pit_state2;
117static int has_exception_payload;
118
119static bool has_msr_mcg_ext_ctl;
120
121static struct kvm_cpuid2 *cpuid_cache;
122static struct kvm_msr_list *kvm_feature_msrs;
123
124int kvm_has_pit_state2(void)
125{
126 return has_pit_state2;
127}
128
129bool kvm_has_smm(void)
130{
131 return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM);
132}
133
134bool kvm_has_adjust_clock_stable(void)
135{
136 int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
137
138 return (ret == KVM_CLOCK_TSC_STABLE);
139}
140
141bool kvm_has_exception_payload(void)
142{
143 return has_exception_payload;
144}
145
146bool kvm_allows_irq0_override(void)
147{
148 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
149}
150
151static bool kvm_x2apic_api_set_flags(uint64_t flags)
152{
153 KVMState *s = KVM_STATE(current_machine->accelerator);
154
155 return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
156}
157
158#define MEMORIZE(fn, _result) \
159 ({ \
160 static bool _memorized; \
161 \
162 if (_memorized) { \
163 return _result; \
164 } \
165 _memorized = true; \
166 _result = fn; \
167 })
168
169static bool has_x2apic_api;
170
171bool kvm_has_x2apic_api(void)
172{
173 return has_x2apic_api;
174}
175
176bool kvm_enable_x2apic(void)
177{
178 return MEMORIZE(
179 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
180 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
181 has_x2apic_api);
182}
183
184bool kvm_hv_vpindex_settable(void)
185{
186 return hv_vpindex_settable;
187}
188
189static int kvm_get_tsc(CPUState *cs)
190{
191 X86CPU *cpu = X86_CPU(cs);
192 CPUX86State *env = &cpu->env;
193 struct {
194 struct kvm_msrs info;
195 struct kvm_msr_entry entries[1];
196 } msr_data = {};
197 int ret;
198
199 if (env->tsc_valid) {
200 return 0;
201 }
202
203 memset(&msr_data, 0, sizeof(msr_data));
204 msr_data.info.nmsrs = 1;
205 msr_data.entries[0].index = MSR_IA32_TSC;
206 env->tsc_valid = !runstate_is_running();
207
208 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
209 if (ret < 0) {
210 return ret;
211 }
212
213 assert(ret == 1);
214 env->tsc = msr_data.entries[0].data;
215 return 0;
216}
217
218static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
219{
220 kvm_get_tsc(cpu);
221}
222
223void kvm_synchronize_all_tsc(void)
224{
225 CPUState *cpu;
226
227 if (kvm_enabled()) {
228 CPU_FOREACH(cpu) {
229 run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
230 }
231 }
232}
233
234static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
235{
236 struct kvm_cpuid2 *cpuid;
237 int r, size;
238
239 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
240 cpuid = g_malloc0(size);
241 cpuid->nent = max;
242 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
243 if (r == 0 && cpuid->nent >= max) {
244 r = -E2BIG;
245 }
246 if (r < 0) {
247 if (r == -E2BIG) {
248 g_free(cpuid);
249 return NULL;
250 } else {
251 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
252 strerror(-r));
253 exit(1);
254 }
255 }
256 return cpuid;
257}
258
259
260
261
262static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
263{
264 struct kvm_cpuid2 *cpuid;
265 int max = 1;
266
267 if (cpuid_cache != NULL) {
268 return cpuid_cache;
269 }
270 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
271 max *= 2;
272 }
273 cpuid_cache = cpuid;
274 return cpuid;
275}
276
277static const struct kvm_para_features {
278 int cap;
279 int feature;
280} para_features[] = {
281 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
282 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
283 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
284 { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
285};
286
287static int get_para_features(KVMState *s)
288{
289 int i, features = 0;
290
291 for (i = 0; i < ARRAY_SIZE(para_features); i++) {
292 if (kvm_check_extension(s, para_features[i].cap)) {
293 features |= (1 << para_features[i].feature);
294 }
295 }
296
297 return features;
298}
299
300static bool host_tsx_blacklisted(void)
301{
302 int family, model, stepping;\
303 char vendor[CPUID_VENDOR_SZ + 1];
304
305 host_vendor_fms(vendor, &family, &model, &stepping);
306
307
308 return !strcmp(vendor, CPUID_VENDOR_INTEL) &&
309 (family == 6) &&
310 ((model == 63 && stepping < 4) ||
311 model == 60 || model == 69 || model == 70);
312}
313
314
315
316static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
317{
318 uint32_t ret = 0;
319 switch (reg) {
320 case R_EAX:
321 ret = entry->eax;
322 break;
323 case R_EBX:
324 ret = entry->ebx;
325 break;
326 case R_ECX:
327 ret = entry->ecx;
328 break;
329 case R_EDX:
330 ret = entry->edx;
331 break;
332 }
333 return ret;
334}
335
336
337
338static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
339 uint32_t function,
340 uint32_t index)
341{
342 int i;
343 for (i = 0; i < cpuid->nent; ++i) {
344 if (cpuid->entries[i].function == function &&
345 cpuid->entries[i].index == index) {
346 return &cpuid->entries[i];
347 }
348 }
349
350 return NULL;
351}
352
353uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
354 uint32_t index, int reg)
355{
356 struct kvm_cpuid2 *cpuid;
357 uint32_t ret = 0;
358 uint32_t cpuid_1_edx;
359 bool found = false;
360
361 cpuid = get_supported_cpuid(s);
362
363 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
364 if (entry) {
365 found = true;
366 ret = cpuid_entry_get_reg(entry, reg);
367 }
368
369
370
371 if (function == 1 && reg == R_EDX) {
372
373 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
374 } else if (function == 1 && reg == R_ECX) {
375
376
377
378 ret |= CPUID_EXT_HYPERVISOR;
379
380
381
382
383 if (kvm_irqchip_in_kernel() &&
384 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
385 ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
386 }
387
388
389
390
391 if (!kvm_irqchip_in_kernel()) {
392 ret &= ~CPUID_EXT_X2APIC;
393 }
394
395 if (enable_cpu_pm) {
396 int disable_exits = kvm_check_extension(s,
397 KVM_CAP_X86_DISABLE_EXITS);
398
399 if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) {
400 ret |= CPUID_EXT_MONITOR;
401 }
402 }
403 } else if (function == 6 && reg == R_EAX) {
404 ret |= CPUID_6_EAX_ARAT;
405 } else if (function == 7 && index == 0 && reg == R_EBX) {
406 if (host_tsx_blacklisted()) {
407 ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
408 }
409 } else if (function == 7 && index == 0 && reg == R_ECX) {
410 if (enable_cpu_pm) {
411 ret |= CPUID_7_0_ECX_WAITPKG;
412 } else {
413 ret &= ~CPUID_7_0_ECX_WAITPKG;
414 }
415 } else if (function == 7 && index == 0 && reg == R_EDX) {
416
417
418
419
420
421 if (!has_msr_arch_capabs) {
422 ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES;
423 }
424 } else if (function == 0x80000001 && reg == R_ECX) {
425
426
427
428
429
430 ret |= CPUID_EXT3_TOPOEXT;
431 } else if (function == 0x80000001 && reg == R_EDX) {
432
433
434
435 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
436 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
437 } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
438
439
440
441 if (!kvm_irqchip_in_kernel()) {
442 ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
443 }
444 } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) {
445 ret |= 1U << KVM_HINTS_REALTIME;
446 found = 1;
447 }
448
449
450 if ((function == KVM_CPUID_FEATURES) && !found) {
451 ret = get_para_features(s);
452 }
453
454 return ret;
455}
456
457uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index)
458{
459 struct {
460 struct kvm_msrs info;
461 struct kvm_msr_entry entries[1];
462 } msr_data = {};
463 uint64_t value;
464 uint32_t ret, can_be_one, must_be_one;
465
466 if (kvm_feature_msrs == NULL) {
467 return 0;
468 }
469
470
471 int i;
472 for (i = 0; i < kvm_feature_msrs->nmsrs; i++)
473 if (kvm_feature_msrs->indices[i] == index) {
474 break;
475 }
476 if (i == kvm_feature_msrs->nmsrs) {
477 return 0;
478 }
479
480 msr_data.info.nmsrs = 1;
481 msr_data.entries[0].index = index;
482
483 ret = kvm_ioctl(s, KVM_GET_MSRS, &msr_data);
484 if (ret != 1) {
485 error_report("KVM get MSR (index=0x%x) feature failed, %s",
486 index, strerror(-ret));
487 exit(1);
488 }
489
490 value = msr_data.entries[0].data;
491 switch (index) {
492 case MSR_IA32_VMX_PROCBASED_CTLS2:
493 if (!has_msr_vmx_procbased_ctls2) {
494
495 if (kvm_arch_get_supported_cpuid(s, 0xD, 1, R_ECX) &
496 CPUID_XSAVE_XSAVES) {
497 value |= (uint64_t)VMX_SECONDARY_EXEC_XSAVES << 32;
498 }
499 if (kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX) &
500 CPUID_EXT_RDRAND) {
501 value |= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING << 32;
502 }
503 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
504 CPUID_7_0_EBX_INVPCID) {
505 value |= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID << 32;
506 }
507 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
508 CPUID_7_0_EBX_RDSEED) {
509 value |= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING << 32;
510 }
511 if (kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX) &
512 CPUID_EXT2_RDTSCP) {
513 value |= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP << 32;
514 }
515 }
516
517 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
518 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
519 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
520 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
521
522
523
524
525
526 must_be_one = (uint32_t)value;
527 can_be_one = (uint32_t)(value >> 32);
528 return can_be_one & ~must_be_one;
529
530 default:
531 return value;
532 }
533}
534
535
536typedef struct HWPoisonPage {
537 ram_addr_t ram_addr;
538 QLIST_ENTRY(HWPoisonPage) list;
539} HWPoisonPage;
540
541static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
542 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
543
544static void kvm_unpoison_all(void *param)
545{
546 HWPoisonPage *page, *next_page;
547
548 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
549 QLIST_REMOVE(page, list);
550 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
551 g_free(page);
552 }
553}
554
555static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
556{
557 HWPoisonPage *page;
558
559 QLIST_FOREACH(page, &hwpoison_page_list, list) {
560 if (page->ram_addr == ram_addr) {
561 return;
562 }
563 }
564 page = g_new(HWPoisonPage, 1);
565 page->ram_addr = ram_addr;
566 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
567}
568
569static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
570 int *max_banks)
571{
572 int r;
573
574 r = kvm_check_extension(s, KVM_CAP_MCE);
575 if (r > 0) {
576 *max_banks = r;
577 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
578 }
579 return -ENOSYS;
580}
581
582static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
583{
584 CPUState *cs = CPU(cpu);
585 CPUX86State *env = &cpu->env;
586 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
587 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
588 uint64_t mcg_status = MCG_STATUS_MCIP;
589 int flags = 0;
590
591 if (code == BUS_MCEERR_AR) {
592 status |= MCI_STATUS_AR | 0x134;
593 mcg_status |= MCG_STATUS_EIPV;
594 } else {
595 status |= 0xc0;
596 mcg_status |= MCG_STATUS_RIPV;
597 }
598
599 flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
600
601
602
603 cpu_synchronize_state(cs);
604 if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
605 mcg_status |= MCG_STATUS_LMCE;
606 flags = 0;
607 }
608
609 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
610 (MCM_ADDR_PHYS << 6) | 0xc, flags);
611}
612
613static void hardware_memory_error(void *host_addr)
614{
615 error_report("QEMU got Hardware memory error at addr %p", host_addr);
616 exit(1);
617}
618
619void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
620{
621 X86CPU *cpu = X86_CPU(c);
622 CPUX86State *env = &cpu->env;
623 ram_addr_t ram_addr;
624 hwaddr paddr;
625
626
627
628
629
630
631 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
632
633 if ((env->mcg_cap & MCG_SER_P) && addr) {
634 ram_addr = qemu_ram_addr_from_host(addr);
635 if (ram_addr != RAM_ADDR_INVALID &&
636 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
637 kvm_hwpoison_page_add(ram_addr);
638 kvm_mce_inject(cpu, paddr, code);
639
640
641
642
643
644
645 if (code == BUS_MCEERR_AR) {
646 error_report("Guest MCE Memory Error at QEMU addr %p and "
647 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
648 addr, paddr, "BUS_MCEERR_AR");
649 } else {
650 warn_report("Guest MCE Memory Error at QEMU addr %p and "
651 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
652 addr, paddr, "BUS_MCEERR_AO");
653 }
654
655 return;
656 }
657
658 if (code == BUS_MCEERR_AO) {
659 warn_report("Hardware memory error at addr %p of type %s "
660 "for memory used by QEMU itself instead of guest system!",
661 addr, "BUS_MCEERR_AO");
662 }
663 }
664
665 if (code == BUS_MCEERR_AR) {
666 hardware_memory_error(addr);
667 }
668
669
670}
671
672static void kvm_reset_exception(CPUX86State *env)
673{
674 env->exception_nr = -1;
675 env->exception_pending = 0;
676 env->exception_injected = 0;
677 env->exception_has_payload = false;
678 env->exception_payload = 0;
679}
680
681static void kvm_queue_exception(CPUX86State *env,
682 int32_t exception_nr,
683 uint8_t exception_has_payload,
684 uint64_t exception_payload)
685{
686 assert(env->exception_nr == -1);
687 assert(!env->exception_pending);
688 assert(!env->exception_injected);
689 assert(!env->exception_has_payload);
690
691 env->exception_nr = exception_nr;
692
693 if (has_exception_payload) {
694 env->exception_pending = 1;
695
696 env->exception_has_payload = exception_has_payload;
697 env->exception_payload = exception_payload;
698 } else {
699 env->exception_injected = 1;
700
701 if (exception_nr == EXCP01_DB) {
702 assert(exception_has_payload);
703 env->dr[6] = exception_payload;
704 } else if (exception_nr == EXCP0E_PAGE) {
705 assert(exception_has_payload);
706 env->cr[2] = exception_payload;
707 } else {
708 assert(!exception_has_payload);
709 }
710 }
711}
712
713static int kvm_inject_mce_oldstyle(X86CPU *cpu)
714{
715 CPUX86State *env = &cpu->env;
716
717 if (!kvm_has_vcpu_events() && env->exception_nr == EXCP12_MCHK) {
718 unsigned int bank, bank_num = env->mcg_cap & 0xff;
719 struct kvm_x86_mce mce;
720
721 kvm_reset_exception(env);
722
723
724
725
726
727 for (bank = 0; bank < bank_num; bank++) {
728 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
729 break;
730 }
731 }
732 assert(bank < bank_num);
733
734 mce.bank = bank;
735 mce.status = env->mce_banks[bank * 4 + 1];
736 mce.mcg_status = env->mcg_status;
737 mce.addr = env->mce_banks[bank * 4 + 2];
738 mce.misc = env->mce_banks[bank * 4 + 3];
739
740 return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
741 }
742 return 0;
743}
744
745static void cpu_update_state(void *opaque, int running, RunState state)
746{
747 CPUX86State *env = opaque;
748
749 if (running) {
750 env->tsc_valid = false;
751 }
752}
753
754unsigned long kvm_arch_vcpu_id(CPUState *cs)
755{
756 X86CPU *cpu = X86_CPU(cs);
757 return cpu->apic_id;
758}
759
760#ifndef KVM_CPUID_SIGNATURE_NEXT
761#define KVM_CPUID_SIGNATURE_NEXT 0x40000100
762#endif
763
764static bool hyperv_enabled(X86CPU *cpu)
765{
766 CPUState *cs = CPU(cpu);
767 return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
768 ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY) ||
769 cpu->hyperv_features || cpu->hyperv_passthrough);
770}
771
772static int kvm_arch_set_tsc_khz(CPUState *cs)
773{
774 X86CPU *cpu = X86_CPU(cs);
775 CPUX86State *env = &cpu->env;
776 int r;
777
778 if (!env->tsc_khz) {
779 return 0;
780 }
781
782 r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ?
783 kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
784 -ENOTSUP;
785 if (r < 0) {
786
787
788
789 int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
790 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
791 -ENOTSUP;
792 if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
793 warn_report("TSC frequency mismatch between "
794 "VM (%" PRId64 " kHz) and host (%d kHz), "
795 "and TSC scaling unavailable",
796 env->tsc_khz, cur_freq);
797 return r;
798 }
799 }
800
801 return 0;
802}
803
804static bool tsc_is_stable_and_known(CPUX86State *env)
805{
806 if (!env->tsc_khz) {
807 return false;
808 }
809 return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
810 || env->user_tsc_khz;
811}
812
813static struct {
814 const char *desc;
815 struct {
816 uint32_t fw;
817 uint32_t bits;
818 } flags[2];
819 uint64_t dependencies;
820} kvm_hyperv_properties[] = {
821 [HYPERV_FEAT_RELAXED] = {
822 .desc = "relaxed timing (hv-relaxed)",
823 .flags = {
824 {.fw = FEAT_HYPERV_EAX,
825 .bits = HV_HYPERCALL_AVAILABLE},
826 {.fw = FEAT_HV_RECOMM_EAX,
827 .bits = HV_RELAXED_TIMING_RECOMMENDED}
828 }
829 },
830 [HYPERV_FEAT_VAPIC] = {
831 .desc = "virtual APIC (hv-vapic)",
832 .flags = {
833 {.fw = FEAT_HYPERV_EAX,
834 .bits = HV_HYPERCALL_AVAILABLE | HV_APIC_ACCESS_AVAILABLE},
835 {.fw = FEAT_HV_RECOMM_EAX,
836 .bits = HV_APIC_ACCESS_RECOMMENDED}
837 }
838 },
839 [HYPERV_FEAT_TIME] = {
840 .desc = "clocksources (hv-time)",
841 .flags = {
842 {.fw = FEAT_HYPERV_EAX,
843 .bits = HV_HYPERCALL_AVAILABLE | HV_TIME_REF_COUNT_AVAILABLE |
844 HV_REFERENCE_TSC_AVAILABLE}
845 }
846 },
847 [HYPERV_FEAT_CRASH] = {
848 .desc = "crash MSRs (hv-crash)",
849 .flags = {
850 {.fw = FEAT_HYPERV_EDX,
851 .bits = HV_GUEST_CRASH_MSR_AVAILABLE}
852 }
853 },
854 [HYPERV_FEAT_RESET] = {
855 .desc = "reset MSR (hv-reset)",
856 .flags = {
857 {.fw = FEAT_HYPERV_EAX,
858 .bits = HV_RESET_AVAILABLE}
859 }
860 },
861 [HYPERV_FEAT_VPINDEX] = {
862 .desc = "VP_INDEX MSR (hv-vpindex)",
863 .flags = {
864 {.fw = FEAT_HYPERV_EAX,
865 .bits = HV_VP_INDEX_AVAILABLE}
866 }
867 },
868 [HYPERV_FEAT_RUNTIME] = {
869 .desc = "VP_RUNTIME MSR (hv-runtime)",
870 .flags = {
871 {.fw = FEAT_HYPERV_EAX,
872 .bits = HV_VP_RUNTIME_AVAILABLE}
873 }
874 },
875 [HYPERV_FEAT_SYNIC] = {
876 .desc = "synthetic interrupt controller (hv-synic)",
877 .flags = {
878 {.fw = FEAT_HYPERV_EAX,
879 .bits = HV_SYNIC_AVAILABLE}
880 }
881 },
882 [HYPERV_FEAT_STIMER] = {
883 .desc = "synthetic timers (hv-stimer)",
884 .flags = {
885 {.fw = FEAT_HYPERV_EAX,
886 .bits = HV_SYNTIMERS_AVAILABLE}
887 },
888 .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME)
889 },
890 [HYPERV_FEAT_FREQUENCIES] = {
891 .desc = "frequency MSRs (hv-frequencies)",
892 .flags = {
893 {.fw = FEAT_HYPERV_EAX,
894 .bits = HV_ACCESS_FREQUENCY_MSRS},
895 {.fw = FEAT_HYPERV_EDX,
896 .bits = HV_FREQUENCY_MSRS_AVAILABLE}
897 }
898 },
899 [HYPERV_FEAT_REENLIGHTENMENT] = {
900 .desc = "reenlightenment MSRs (hv-reenlightenment)",
901 .flags = {
902 {.fw = FEAT_HYPERV_EAX,
903 .bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL}
904 }
905 },
906 [HYPERV_FEAT_TLBFLUSH] = {
907 .desc = "paravirtualized TLB flush (hv-tlbflush)",
908 .flags = {
909 {.fw = FEAT_HV_RECOMM_EAX,
910 .bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED |
911 HV_EX_PROCESSOR_MASKS_RECOMMENDED}
912 },
913 .dependencies = BIT(HYPERV_FEAT_VPINDEX)
914 },
915 [HYPERV_FEAT_EVMCS] = {
916 .desc = "enlightened VMCS (hv-evmcs)",
917 .flags = {
918 {.fw = FEAT_HV_RECOMM_EAX,
919 .bits = HV_ENLIGHTENED_VMCS_RECOMMENDED}
920 },
921 .dependencies = BIT(HYPERV_FEAT_VAPIC)
922 },
923 [HYPERV_FEAT_IPI] = {
924 .desc = "paravirtualized IPI (hv-ipi)",
925 .flags = {
926 {.fw = FEAT_HV_RECOMM_EAX,
927 .bits = HV_CLUSTER_IPI_RECOMMENDED |
928 HV_EX_PROCESSOR_MASKS_RECOMMENDED}
929 },
930 .dependencies = BIT(HYPERV_FEAT_VPINDEX)
931 },
932 [HYPERV_FEAT_STIMER_DIRECT] = {
933 .desc = "direct mode synthetic timers (hv-stimer-direct)",
934 .flags = {
935 {.fw = FEAT_HYPERV_EDX,
936 .bits = HV_STIMER_DIRECT_MODE_AVAILABLE}
937 },
938 .dependencies = BIT(HYPERV_FEAT_STIMER)
939 },
940};
941
942static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max)
943{
944 struct kvm_cpuid2 *cpuid;
945 int r, size;
946
947 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
948 cpuid = g_malloc0(size);
949 cpuid->nent = max;
950
951 r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
952 if (r == 0 && cpuid->nent >= max) {
953 r = -E2BIG;
954 }
955 if (r < 0) {
956 if (r == -E2BIG) {
957 g_free(cpuid);
958 return NULL;
959 } else {
960 fprintf(stderr, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n",
961 strerror(-r));
962 exit(1);
963 }
964 }
965 return cpuid;
966}
967
968
969
970
971
972static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs)
973{
974 struct kvm_cpuid2 *cpuid;
975 int max = 7;
976
977
978
979
980
981
982 while ((cpuid = try_get_hv_cpuid(cs, max)) == NULL) {
983 max++;
984 }
985 return cpuid;
986}
987
988
989
990
991
992static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs)
993{
994 X86CPU *cpu = X86_CPU(cs);
995 struct kvm_cpuid2 *cpuid;
996 struct kvm_cpuid_entry2 *entry_feat, *entry_recomm;
997
998
999 cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries));
1000 cpuid->nent = 2;
1001
1002
1003 entry_feat = &cpuid->entries[0];
1004 entry_feat->function = HV_CPUID_FEATURES;
1005
1006 entry_recomm = &cpuid->entries[1];
1007 entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO;
1008 entry_recomm->ebx = cpu->hyperv_spinlock_attempts;
1009
1010 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) {
1011 entry_feat->eax |= HV_HYPERCALL_AVAILABLE;
1012 entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE;
1013 entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1014 entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED;
1015 entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED;
1016 }
1017
1018 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
1019 entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE;
1020 entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE;
1021 }
1022
1023 if (has_msr_hv_frequencies) {
1024 entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS;
1025 entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE;
1026 }
1027
1028 if (has_msr_hv_crash) {
1029 entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE;
1030 }
1031
1032 if (has_msr_hv_reenlightenment) {
1033 entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL;
1034 }
1035
1036 if (has_msr_hv_reset) {
1037 entry_feat->eax |= HV_RESET_AVAILABLE;
1038 }
1039
1040 if (has_msr_hv_vpindex) {
1041 entry_feat->eax |= HV_VP_INDEX_AVAILABLE;
1042 }
1043
1044 if (has_msr_hv_runtime) {
1045 entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE;
1046 }
1047
1048 if (has_msr_hv_synic) {
1049 unsigned int cap = cpu->hyperv_synic_kvm_only ?
1050 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
1051
1052 if (kvm_check_extension(cs->kvm_state, cap) > 0) {
1053 entry_feat->eax |= HV_SYNIC_AVAILABLE;
1054 }
1055 }
1056
1057 if (has_msr_hv_stimer) {
1058 entry_feat->eax |= HV_SYNTIMERS_AVAILABLE;
1059 }
1060
1061 if (kvm_check_extension(cs->kvm_state,
1062 KVM_CAP_HYPERV_TLBFLUSH) > 0) {
1063 entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED;
1064 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
1065 }
1066
1067 if (kvm_check_extension(cs->kvm_state,
1068 KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
1069 entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
1070 }
1071
1072 if (kvm_check_extension(cs->kvm_state,
1073 KVM_CAP_HYPERV_SEND_IPI) > 0) {
1074 entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED;
1075 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
1076 }
1077
1078 return cpuid;
1079}
1080
1081static int hv_cpuid_get_fw(struct kvm_cpuid2 *cpuid, int fw, uint32_t *r)
1082{
1083 struct kvm_cpuid_entry2 *entry;
1084 uint32_t func;
1085 int reg;
1086
1087 switch (fw) {
1088 case FEAT_HYPERV_EAX:
1089 reg = R_EAX;
1090 func = HV_CPUID_FEATURES;
1091 break;
1092 case FEAT_HYPERV_EDX:
1093 reg = R_EDX;
1094 func = HV_CPUID_FEATURES;
1095 break;
1096 case FEAT_HV_RECOMM_EAX:
1097 reg = R_EAX;
1098 func = HV_CPUID_ENLIGHTMENT_INFO;
1099 break;
1100 default:
1101 return -EINVAL;
1102 }
1103
1104 entry = cpuid_find_entry(cpuid, func, 0);
1105 if (!entry) {
1106 return -ENOENT;
1107 }
1108
1109 switch (reg) {
1110 case R_EAX:
1111 *r = entry->eax;
1112 break;
1113 case R_EDX:
1114 *r = entry->edx;
1115 break;
1116 default:
1117 return -EINVAL;
1118 }
1119
1120 return 0;
1121}
1122
1123static int hv_cpuid_check_and_set(CPUState *cs, struct kvm_cpuid2 *cpuid,
1124 int feature)
1125{
1126 X86CPU *cpu = X86_CPU(cs);
1127 CPUX86State *env = &cpu->env;
1128 uint32_t r, fw, bits;
1129 uint64_t deps;
1130 int i, dep_feat;
1131
1132 if (!hyperv_feat_enabled(cpu, feature) && !cpu->hyperv_passthrough) {
1133 return 0;
1134 }
1135
1136 deps = kvm_hyperv_properties[feature].dependencies;
1137 while (deps) {
1138 dep_feat = ctz64(deps);
1139 if (!(hyperv_feat_enabled(cpu, dep_feat))) {
1140 fprintf(stderr,
1141 "Hyper-V %s requires Hyper-V %s\n",
1142 kvm_hyperv_properties[feature].desc,
1143 kvm_hyperv_properties[dep_feat].desc);
1144 return 1;
1145 }
1146 deps &= ~(1ull << dep_feat);
1147 }
1148
1149 for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) {
1150 fw = kvm_hyperv_properties[feature].flags[i].fw;
1151 bits = kvm_hyperv_properties[feature].flags[i].bits;
1152
1153 if (!fw) {
1154 continue;
1155 }
1156
1157 if (hv_cpuid_get_fw(cpuid, fw, &r) || (r & bits) != bits) {
1158 if (hyperv_feat_enabled(cpu, feature)) {
1159 fprintf(stderr,
1160 "Hyper-V %s is not supported by kernel\n",
1161 kvm_hyperv_properties[feature].desc);
1162 return 1;
1163 } else {
1164 return 0;
1165 }
1166 }
1167
1168 env->features[fw] |= bits;
1169 }
1170
1171 if (cpu->hyperv_passthrough) {
1172 cpu->hyperv_features |= BIT(feature);
1173 }
1174
1175 return 0;
1176}
1177
1178
1179
1180
1181
1182
1183static int hyperv_handle_properties(CPUState *cs,
1184 struct kvm_cpuid_entry2 *cpuid_ent)
1185{
1186 X86CPU *cpu = X86_CPU(cs);
1187 CPUX86State *env = &cpu->env;
1188 struct kvm_cpuid2 *cpuid;
1189 struct kvm_cpuid_entry2 *c;
1190 uint32_t signature[3];
1191 uint32_t cpuid_i = 0;
1192 int r;
1193
1194 if (!hyperv_enabled(cpu))
1195 return 0;
1196
1197 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ||
1198 cpu->hyperv_passthrough) {
1199 uint16_t evmcs_version;
1200
1201 r = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0,
1202 (uintptr_t)&evmcs_version);
1203
1204 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) && r) {
1205 fprintf(stderr, "Hyper-V %s is not supported by kernel\n",
1206 kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc);
1207 return -ENOSYS;
1208 }
1209
1210 if (!r) {
1211 env->features[FEAT_HV_RECOMM_EAX] |=
1212 HV_ENLIGHTENED_VMCS_RECOMMENDED;
1213 env->features[FEAT_HV_NESTED_EAX] = evmcs_version;
1214 }
1215 }
1216
1217 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_CPUID) > 0) {
1218 cpuid = get_supported_hv_cpuid(cs);
1219 } else {
1220 cpuid = get_supported_hv_cpuid_legacy(cs);
1221 }
1222
1223 if (cpu->hyperv_passthrough) {
1224 memcpy(cpuid_ent, &cpuid->entries[0],
1225 cpuid->nent * sizeof(cpuid->entries[0]));
1226
1227 c = cpuid_find_entry(cpuid, HV_CPUID_FEATURES, 0);
1228 if (c) {
1229 env->features[FEAT_HYPERV_EAX] = c->eax;
1230 env->features[FEAT_HYPERV_EBX] = c->ebx;
1231 env->features[FEAT_HYPERV_EDX] = c->eax;
1232 }
1233 c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0);
1234 if (c) {
1235 env->features[FEAT_HV_RECOMM_EAX] = c->eax;
1236
1237
1238 if (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY) {
1239 c->ebx = cpu->hyperv_spinlock_attempts;
1240 }
1241 }
1242 c = cpuid_find_entry(cpuid, HV_CPUID_NESTED_FEATURES, 0);
1243 if (c) {
1244 env->features[FEAT_HV_NESTED_EAX] = c->eax;
1245 }
1246 }
1247
1248 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) {
1249 env->features[FEAT_HV_RECOMM_EAX] |= HV_NO_NONARCH_CORESHARING;
1250 } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) {
1251 c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0);
1252 if (c) {
1253 env->features[FEAT_HV_RECOMM_EAX] |=
1254 c->eax & HV_NO_NONARCH_CORESHARING;
1255 }
1256 }
1257
1258
1259 r = hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RELAXED);
1260 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VAPIC);
1261 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_TIME);
1262 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_CRASH);
1263 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RESET);
1264 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VPINDEX);
1265 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RUNTIME);
1266 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_SYNIC);
1267 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_STIMER);
1268 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_FREQUENCIES);
1269 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_REENLIGHTENMENT);
1270 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_TLBFLUSH);
1271 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_EVMCS);
1272 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_IPI);
1273 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_STIMER_DIRECT);
1274
1275
1276 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
1277 !cpu->hyperv_synic_kvm_only &&
1278 !hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) {
1279 fprintf(stderr, "Hyper-V %s requires Hyper-V %s\n",
1280 kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc,
1281 kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc);
1282 r |= 1;
1283 }
1284
1285
1286 env->features[FEAT_HYPERV_EDX] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1287
1288 if (r) {
1289 r = -ENOSYS;
1290 goto free;
1291 }
1292
1293 if (cpu->hyperv_passthrough) {
1294
1295 r = cpuid->nent;
1296 goto free;
1297 }
1298
1299 c = &cpuid_ent[cpuid_i++];
1300 c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
1301 if (!cpu->hyperv_vendor_id) {
1302 memcpy(signature, "Microsoft Hv", 12);
1303 } else {
1304 size_t len = strlen(cpu->hyperv_vendor_id);
1305
1306 if (len > 12) {
1307 error_report("hv-vendor-id truncated to 12 characters");
1308 len = 12;
1309 }
1310 memset(signature, 0, 12);
1311 memcpy(signature, cpu->hyperv_vendor_id, len);
1312 }
1313 c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ?
1314 HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS;
1315 c->ebx = signature[0];
1316 c->ecx = signature[1];
1317 c->edx = signature[2];
1318
1319 c = &cpuid_ent[cpuid_i++];
1320 c->function = HV_CPUID_INTERFACE;
1321 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
1322 c->eax = signature[0];
1323 c->ebx = 0;
1324 c->ecx = 0;
1325 c->edx = 0;
1326
1327 c = &cpuid_ent[cpuid_i++];
1328 c->function = HV_CPUID_VERSION;
1329 c->eax = 0x00001bbc;
1330 c->ebx = 0x00060001;
1331
1332 c = &cpuid_ent[cpuid_i++];
1333 c->function = HV_CPUID_FEATURES;
1334 c->eax = env->features[FEAT_HYPERV_EAX];
1335 c->ebx = env->features[FEAT_HYPERV_EBX];
1336 c->edx = env->features[FEAT_HYPERV_EDX];
1337
1338 c = &cpuid_ent[cpuid_i++];
1339 c->function = HV_CPUID_ENLIGHTMENT_INFO;
1340 c->eax = env->features[FEAT_HV_RECOMM_EAX];
1341 c->ebx = cpu->hyperv_spinlock_attempts;
1342
1343 c = &cpuid_ent[cpuid_i++];
1344 c->function = HV_CPUID_IMPLEMENT_LIMITS;
1345 c->eax = cpu->hv_max_vps;
1346 c->ebx = 0x40;
1347
1348 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
1349 __u32 function;
1350
1351
1352 for (function = HV_CPUID_IMPLEMENT_LIMITS + 1;
1353 function < HV_CPUID_NESTED_FEATURES; function++) {
1354 c = &cpuid_ent[cpuid_i++];
1355 c->function = function;
1356 }
1357
1358 c = &cpuid_ent[cpuid_i++];
1359 c->function = HV_CPUID_NESTED_FEATURES;
1360 c->eax = env->features[FEAT_HV_NESTED_EAX];
1361 }
1362 r = cpuid_i;
1363
1364free:
1365 g_free(cpuid);
1366
1367 return r;
1368}
1369
1370static Error *hv_passthrough_mig_blocker;
1371static Error *hv_no_nonarch_cs_mig_blocker;
1372
1373static int hyperv_init_vcpu(X86CPU *cpu)
1374{
1375 CPUState *cs = CPU(cpu);
1376 Error *local_err = NULL;
1377 int ret;
1378
1379 if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) {
1380 error_setg(&hv_passthrough_mig_blocker,
1381 "'hv-passthrough' CPU flag prevents migration, use explicit"
1382 " set of hv-* flags instead");
1383 ret = migrate_add_blocker(hv_passthrough_mig_blocker, &local_err);
1384 if (local_err) {
1385 error_report_err(local_err);
1386 error_free(hv_passthrough_mig_blocker);
1387 return ret;
1388 }
1389 }
1390
1391 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO &&
1392 hv_no_nonarch_cs_mig_blocker == NULL) {
1393 error_setg(&hv_no_nonarch_cs_mig_blocker,
1394 "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration"
1395 " use explicit 'hv-no-nonarch-coresharing=on' instead (but"
1396 " make sure SMT is disabled and/or that vCPUs are properly"
1397 " pinned)");
1398 ret = migrate_add_blocker(hv_no_nonarch_cs_mig_blocker, &local_err);
1399 if (local_err) {
1400 error_report_err(local_err);
1401 error_free(hv_no_nonarch_cs_mig_blocker);
1402 return ret;
1403 }
1404 }
1405
1406 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) {
1407
1408
1409
1410
1411 struct {
1412 struct kvm_msrs info;
1413 struct kvm_msr_entry entries[1];
1414 } msr_data = {
1415 .info.nmsrs = 1,
1416 .entries[0].index = HV_X64_MSR_VP_INDEX,
1417 };
1418
1419 ret = kvm_vcpu_ioctl(cs, KVM_GET_MSRS, &msr_data);
1420 if (ret < 0) {
1421 return ret;
1422 }
1423 assert(ret == 1);
1424
1425 if (msr_data.entries[0].data != hyperv_vp_index(CPU(cpu))) {
1426 error_report("kernel's vp_index != QEMU's vp_index");
1427 return -ENXIO;
1428 }
1429 }
1430
1431 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
1432 uint32_t synic_cap = cpu->hyperv_synic_kvm_only ?
1433 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
1434 ret = kvm_vcpu_enable_cap(cs, synic_cap, 0);
1435 if (ret < 0) {
1436 error_report("failed to turn on HyperV SynIC in KVM: %s",
1437 strerror(-ret));
1438 return ret;
1439 }
1440
1441 if (!cpu->hyperv_synic_kvm_only) {
1442 ret = hyperv_x86_synic_add(cpu);
1443 if (ret < 0) {
1444 error_report("failed to create HyperV SynIC: %s",
1445 strerror(-ret));
1446 return ret;
1447 }
1448 }
1449 }
1450
1451 return 0;
1452}
1453
1454static Error *invtsc_mig_blocker;
1455
1456#define KVM_MAX_CPUID_ENTRIES 100
1457
1458int kvm_arch_init_vcpu(CPUState *cs)
1459{
1460 struct {
1461 struct kvm_cpuid2 cpuid;
1462 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
1463 } cpuid_data;
1464
1465
1466
1467
1468 QEMU_BUILD_BUG_ON(sizeof(cpuid_data) !=
1469 sizeof(struct kvm_cpuid2) +
1470 sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES);
1471
1472 X86CPU *cpu = X86_CPU(cs);
1473 CPUX86State *env = &cpu->env;
1474 uint32_t limit, i, j, cpuid_i;
1475 uint32_t unused;
1476 struct kvm_cpuid_entry2 *c;
1477 uint32_t signature[3];
1478 int kvm_base = KVM_CPUID_SIGNATURE;
1479 int max_nested_state_len;
1480 int r;
1481 Error *local_err = NULL;
1482
1483 memset(&cpuid_data, 0, sizeof(cpuid_data));
1484
1485 cpuid_i = 0;
1486
1487 r = kvm_arch_set_tsc_khz(cs);
1488 if (r < 0) {
1489 return r;
1490 }
1491
1492
1493
1494
1495
1496
1497 if (!env->tsc_khz) {
1498 r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
1499 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
1500 -ENOTSUP;
1501 if (r > 0) {
1502 env->tsc_khz = r;
1503 }
1504 }
1505
1506
1507 r = hyperv_handle_properties(cs, cpuid_data.entries);
1508 if (r < 0) {
1509 return r;
1510 } else if (r > 0) {
1511 cpuid_i = r;
1512 kvm_base = KVM_CPUID_SIGNATURE_NEXT;
1513 has_msr_hv_hypercall = true;
1514 }
1515
1516 if (cpu->expose_kvm) {
1517 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
1518 c = &cpuid_data.entries[cpuid_i++];
1519 c->function = KVM_CPUID_SIGNATURE | kvm_base;
1520 c->eax = KVM_CPUID_FEATURES | kvm_base;
1521 c->ebx = signature[0];
1522 c->ecx = signature[1];
1523 c->edx = signature[2];
1524
1525 c = &cpuid_data.entries[cpuid_i++];
1526 c->function = KVM_CPUID_FEATURES | kvm_base;
1527 c->eax = env->features[FEAT_KVM];
1528 c->edx = env->features[FEAT_KVM_HINTS];
1529 }
1530
1531 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
1532
1533 for (i = 0; i <= limit; i++) {
1534 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1535 fprintf(stderr, "unsupported level value: 0x%x\n", limit);
1536 abort();
1537 }
1538 c = &cpuid_data.entries[cpuid_i++];
1539
1540 switch (i) {
1541 case 2: {
1542
1543 int times;
1544
1545 c->function = i;
1546 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
1547 KVM_CPUID_FLAG_STATE_READ_NEXT;
1548 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1549 times = c->eax & 0xff;
1550
1551 for (j = 1; j < times; ++j) {
1552 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1553 fprintf(stderr, "cpuid_data is full, no space for "
1554 "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
1555 abort();
1556 }
1557 c = &cpuid_data.entries[cpuid_i++];
1558 c->function = i;
1559 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
1560 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1561 }
1562 break;
1563 }
1564 case 0x1f:
1565 if (env->nr_dies < 2) {
1566 break;
1567 }
1568 case 4:
1569 case 0xb:
1570 case 0xd:
1571 for (j = 0; ; j++) {
1572 if (i == 0xd && j == 64) {
1573 break;
1574 }
1575
1576 if (i == 0x1f && j == 64) {
1577 break;
1578 }
1579
1580 c->function = i;
1581 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1582 c->index = j;
1583 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1584
1585 if (i == 4 && c->eax == 0) {
1586 break;
1587 }
1588 if (i == 0xb && !(c->ecx & 0xff00)) {
1589 break;
1590 }
1591 if (i == 0x1f && !(c->ecx & 0xff00)) {
1592 break;
1593 }
1594 if (i == 0xd && c->eax == 0) {
1595 continue;
1596 }
1597 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1598 fprintf(stderr, "cpuid_data is full, no space for "
1599 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1600 abort();
1601 }
1602 c = &cpuid_data.entries[cpuid_i++];
1603 }
1604 break;
1605 case 0x7:
1606 case 0x14: {
1607 uint32_t times;
1608
1609 c->function = i;
1610 c->index = 0;
1611 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1612 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1613 times = c->eax;
1614
1615 for (j = 1; j <= times; ++j) {
1616 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1617 fprintf(stderr, "cpuid_data is full, no space for "
1618 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1619 abort();
1620 }
1621 c = &cpuid_data.entries[cpuid_i++];
1622 c->function = i;
1623 c->index = j;
1624 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1625 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1626 }
1627 break;
1628 }
1629 default:
1630 c->function = i;
1631 c->flags = 0;
1632 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1633 if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
1634
1635
1636
1637
1638 cpuid_i--;
1639 }
1640 break;
1641 }
1642 }
1643
1644 if (limit >= 0x0a) {
1645 uint32_t eax, edx;
1646
1647 cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
1648
1649 has_architectural_pmu_version = eax & 0xff;
1650 if (has_architectural_pmu_version > 0) {
1651 num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
1652
1653
1654
1655
1656
1657 if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
1658 num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
1659 }
1660
1661 if (has_architectural_pmu_version > 1) {
1662 num_architectural_pmu_fixed_counters = edx & 0x1f;
1663
1664 if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
1665 num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
1666 }
1667 }
1668 }
1669 }
1670
1671 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
1672
1673 for (i = 0x80000000; i <= limit; i++) {
1674 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1675 fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
1676 abort();
1677 }
1678 c = &cpuid_data.entries[cpuid_i++];
1679
1680 switch (i) {
1681 case 0x8000001d:
1682
1683 for (j = 0; ; j++) {
1684 c->function = i;
1685 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1686 c->index = j;
1687 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1688
1689 if (c->eax == 0) {
1690 break;
1691 }
1692 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1693 fprintf(stderr, "cpuid_data is full, no space for "
1694 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1695 abort();
1696 }
1697 c = &cpuid_data.entries[cpuid_i++];
1698 }
1699 break;
1700 default:
1701 c->function = i;
1702 c->flags = 0;
1703 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1704 if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
1705
1706
1707
1708
1709 cpuid_i--;
1710 }
1711 break;
1712 }
1713 }
1714
1715
1716 if (env->cpuid_xlevel2 > 0) {
1717 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
1718
1719 for (i = 0xC0000000; i <= limit; i++) {
1720 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1721 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
1722 abort();
1723 }
1724 c = &cpuid_data.entries[cpuid_i++];
1725
1726 c->function = i;
1727 c->flags = 0;
1728 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1729 }
1730 }
1731
1732 cpuid_data.cpuid.nent = cpuid_i;
1733
1734 if (((env->cpuid_version >> 8)&0xF) >= 6
1735 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
1736 (CPUID_MCE | CPUID_MCA)
1737 && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
1738 uint64_t mcg_cap, unsupported_caps;
1739 int banks;
1740 int ret;
1741
1742 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
1743 if (ret < 0) {
1744 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
1745 return ret;
1746 }
1747
1748 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
1749 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
1750 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
1751 return -ENOTSUP;
1752 }
1753
1754 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
1755 if (unsupported_caps) {
1756 if (unsupported_caps & MCG_LMCE_P) {
1757 error_report("kvm: LMCE not supported");
1758 return -ENOTSUP;
1759 }
1760 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64,
1761 unsupported_caps);
1762 }
1763
1764 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
1765 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
1766 if (ret < 0) {
1767 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
1768 return ret;
1769 }
1770 }
1771
1772 qemu_add_vm_change_state_handler(cpu_update_state, env);
1773
1774 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
1775 if (c) {
1776 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
1777 !!(c->ecx & CPUID_EXT_SMX);
1778 }
1779
1780 if (env->mcg_cap & MCG_LMCE_P) {
1781 has_msr_mcg_ext_ctl = has_msr_feature_control = true;
1782 }
1783
1784 if (!env->user_tsc_khz) {
1785 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
1786 invtsc_mig_blocker == NULL) {
1787 error_setg(&invtsc_mig_blocker,
1788 "State blocked by non-migratable CPU device"
1789 " (invtsc flag)");
1790 r = migrate_add_blocker(invtsc_mig_blocker, &local_err);
1791 if (local_err) {
1792 error_report_err(local_err);
1793 error_free(invtsc_mig_blocker);
1794 return r;
1795 }
1796 }
1797 }
1798
1799 if (cpu->vmware_cpuid_freq
1800
1801
1802 && cpu->expose_kvm
1803 && kvm_base == KVM_CPUID_SIGNATURE
1804
1805 && tsc_is_stable_and_known(env)) {
1806
1807 c = &cpuid_data.entries[cpuid_i++];
1808 c->function = KVM_CPUID_SIGNATURE | 0x10;
1809 c->eax = env->tsc_khz;
1810
1811
1812 c->ebx = 1000000;
1813 c->ecx = c->edx = 0;
1814
1815 c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
1816 c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10);
1817 }
1818
1819 cpuid_data.cpuid.nent = cpuid_i;
1820
1821 cpuid_data.cpuid.padding = 0;
1822 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
1823 if (r) {
1824 goto fail;
1825 }
1826
1827 if (has_xsave) {
1828 env->xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
1829 memset(env->xsave_buf, 0, sizeof(struct kvm_xsave));
1830 }
1831
1832 max_nested_state_len = kvm_max_nested_state_length();
1833 if (max_nested_state_len > 0) {
1834 assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
1835
1836 if (cpu_has_vmx(env)) {
1837 struct kvm_vmx_nested_state_hdr *vmx_hdr;
1838
1839 env->nested_state = g_malloc0(max_nested_state_len);
1840 env->nested_state->size = max_nested_state_len;
1841 env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
1842
1843 vmx_hdr = &env->nested_state->hdr.vmx;
1844 vmx_hdr->vmxon_pa = -1ull;
1845 vmx_hdr->vmcs12_pa = -1ull;
1846 }
1847 }
1848
1849 cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
1850
1851 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
1852 has_msr_tsc_aux = false;
1853 }
1854
1855 kvm_init_msrs(cpu);
1856
1857 r = hyperv_init_vcpu(cpu);
1858 if (r) {
1859 goto fail;
1860 }
1861
1862 return 0;
1863
1864 fail:
1865 migrate_del_blocker(invtsc_mig_blocker);
1866
1867 return r;
1868}
1869
1870int kvm_arch_destroy_vcpu(CPUState *cs)
1871{
1872 X86CPU *cpu = X86_CPU(cs);
1873 CPUX86State *env = &cpu->env;
1874
1875 if (cpu->kvm_msr_buf) {
1876 g_free(cpu->kvm_msr_buf);
1877 cpu->kvm_msr_buf = NULL;
1878 }
1879
1880 if (env->nested_state) {
1881 g_free(env->nested_state);
1882 env->nested_state = NULL;
1883 }
1884
1885 return 0;
1886}
1887
1888void kvm_arch_reset_vcpu(X86CPU *cpu)
1889{
1890 CPUX86State *env = &cpu->env;
1891
1892 env->xcr0 = 1;
1893 if (kvm_irqchip_in_kernel()) {
1894 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
1895 KVM_MP_STATE_UNINITIALIZED;
1896 } else {
1897 env->mp_state = KVM_MP_STATE_RUNNABLE;
1898 }
1899
1900 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
1901 int i;
1902 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
1903 env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
1904 }
1905
1906 hyperv_x86_synic_reset(cpu);
1907 }
1908
1909 env->poll_control_msr = 1;
1910}
1911
1912void kvm_arch_do_init_vcpu(X86CPU *cpu)
1913{
1914 CPUX86State *env = &cpu->env;
1915
1916
1917 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
1918 env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
1919 }
1920}
1921
1922static int kvm_get_supported_feature_msrs(KVMState *s)
1923{
1924 int ret = 0;
1925
1926 if (kvm_feature_msrs != NULL) {
1927 return 0;
1928 }
1929
1930 if (!kvm_check_extension(s, KVM_CAP_GET_MSR_FEATURES)) {
1931 return 0;
1932 }
1933
1934 struct kvm_msr_list msr_list;
1935
1936 msr_list.nmsrs = 0;
1937 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, &msr_list);
1938 if (ret < 0 && ret != -E2BIG) {
1939 error_report("Fetch KVM feature MSR list failed: %s",
1940 strerror(-ret));
1941 return ret;
1942 }
1943
1944 assert(msr_list.nmsrs > 0);
1945 kvm_feature_msrs = (struct kvm_msr_list *) \
1946 g_malloc0(sizeof(msr_list) +
1947 msr_list.nmsrs * sizeof(msr_list.indices[0]));
1948
1949 kvm_feature_msrs->nmsrs = msr_list.nmsrs;
1950 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, kvm_feature_msrs);
1951
1952 if (ret < 0) {
1953 error_report("Fetch KVM feature MSR list failed: %s",
1954 strerror(-ret));
1955 g_free(kvm_feature_msrs);
1956 kvm_feature_msrs = NULL;
1957 return ret;
1958 }
1959
1960 return 0;
1961}
1962
1963static int kvm_get_supported_msrs(KVMState *s)
1964{
1965 int ret = 0;
1966 struct kvm_msr_list msr_list, *kvm_msr_list;
1967
1968
1969
1970
1971
1972 msr_list.nmsrs = 0;
1973 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
1974 if (ret < 0 && ret != -E2BIG) {
1975 return ret;
1976 }
1977
1978
1979
1980
1981 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
1982 msr_list.nmsrs *
1983 sizeof(msr_list.indices[0])));
1984
1985 kvm_msr_list->nmsrs = msr_list.nmsrs;
1986 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
1987 if (ret >= 0) {
1988 int i;
1989
1990 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
1991 switch (kvm_msr_list->indices[i]) {
1992 case MSR_STAR:
1993 has_msr_star = true;
1994 break;
1995 case MSR_VM_HSAVE_PA:
1996 has_msr_hsave_pa = true;
1997 break;
1998 case MSR_TSC_AUX:
1999 has_msr_tsc_aux = true;
2000 break;
2001 case MSR_TSC_ADJUST:
2002 has_msr_tsc_adjust = true;
2003 break;
2004 case MSR_IA32_TSCDEADLINE:
2005 has_msr_tsc_deadline = true;
2006 break;
2007 case MSR_IA32_SMBASE:
2008 has_msr_smbase = true;
2009 break;
2010 case MSR_SMI_COUNT:
2011 has_msr_smi_count = true;
2012 break;
2013 case MSR_IA32_MISC_ENABLE:
2014 has_msr_misc_enable = true;
2015 break;
2016 case MSR_IA32_BNDCFGS:
2017 has_msr_bndcfgs = true;
2018 break;
2019 case MSR_IA32_XSS:
2020 has_msr_xss = true;
2021 break;
2022 case MSR_IA32_UMWAIT_CONTROL:
2023 has_msr_umwait = true;
2024 break;
2025 case HV_X64_MSR_CRASH_CTL:
2026 has_msr_hv_crash = true;
2027 break;
2028 case HV_X64_MSR_RESET:
2029 has_msr_hv_reset = true;
2030 break;
2031 case HV_X64_MSR_VP_INDEX:
2032 has_msr_hv_vpindex = true;
2033 break;
2034 case HV_X64_MSR_VP_RUNTIME:
2035 has_msr_hv_runtime = true;
2036 break;
2037 case HV_X64_MSR_SCONTROL:
2038 has_msr_hv_synic = true;
2039 break;
2040 case HV_X64_MSR_STIMER0_CONFIG:
2041 has_msr_hv_stimer = true;
2042 break;
2043 case HV_X64_MSR_TSC_FREQUENCY:
2044 has_msr_hv_frequencies = true;
2045 break;
2046 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
2047 has_msr_hv_reenlightenment = true;
2048 break;
2049 case MSR_IA32_SPEC_CTRL:
2050 has_msr_spec_ctrl = true;
2051 break;
2052 case MSR_IA32_TSX_CTRL:
2053 has_msr_tsx_ctrl = true;
2054 break;
2055 case MSR_VIRT_SSBD:
2056 has_msr_virt_ssbd = true;
2057 break;
2058 case MSR_IA32_ARCH_CAPABILITIES:
2059 has_msr_arch_capabs = true;
2060 break;
2061 case MSR_IA32_CORE_CAPABILITY:
2062 has_msr_core_capabs = true;
2063 break;
2064 case MSR_IA32_VMX_VMFUNC:
2065 has_msr_vmx_vmfunc = true;
2066 break;
2067 case MSR_IA32_VMX_PROCBASED_CTLS2:
2068 has_msr_vmx_procbased_ctls2 = true;
2069 break;
2070 }
2071 }
2072 }
2073
2074 g_free(kvm_msr_list);
2075
2076 return ret;
2077}
2078
2079static Notifier smram_machine_done;
2080static KVMMemoryListener smram_listener;
2081static AddressSpace smram_address_space;
2082static MemoryRegion smram_as_root;
2083static MemoryRegion smram_as_mem;
2084
2085static void register_smram_listener(Notifier *n, void *unused)
2086{
2087 MemoryRegion *smram =
2088 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2089
2090
2091 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
2092 memory_region_set_enabled(&smram_as_root, true);
2093
2094
2095
2096
2097 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
2098 get_system_memory(), 0, ~0ull);
2099 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
2100 memory_region_set_enabled(&smram_as_mem, true);
2101
2102 if (smram) {
2103
2104 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
2105 memory_region_set_enabled(smram, true);
2106 }
2107
2108 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
2109 kvm_memory_listener_register(kvm_state, &smram_listener,
2110 &smram_address_space, 1);
2111}
2112
2113int kvm_arch_init(MachineState *ms, KVMState *s)
2114{
2115 uint64_t identity_base = 0xfffbc000;
2116 uint64_t shadow_mem;
2117 int ret;
2118 struct utsname utsname;
2119
2120 has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
2121 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
2122 has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
2123
2124 hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
2125
2126 has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD);
2127 if (has_exception_payload) {
2128 ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true);
2129 if (ret < 0) {
2130 error_report("kvm: Failed to enable exception payload cap: %s",
2131 strerror(-ret));
2132 return ret;
2133 }
2134 }
2135
2136 ret = kvm_get_supported_msrs(s);
2137 if (ret < 0) {
2138 return ret;
2139 }
2140
2141 kvm_get_supported_feature_msrs(s);
2142
2143 uname(&utsname);
2144 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
2158
2159 identity_base = 0xfeffc000;
2160
2161 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
2162 if (ret < 0) {
2163 return ret;
2164 }
2165 }
2166
2167
2168 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
2169 if (ret < 0) {
2170 return ret;
2171 }
2172
2173
2174 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
2175 if (ret < 0) {
2176 fprintf(stderr, "e820_add_entry() table is full\n");
2177 return ret;
2178 }
2179 qemu_register_reset(kvm_unpoison_all, NULL);
2180
2181 shadow_mem = machine_kvm_shadow_mem(ms);
2182 if (shadow_mem != -1) {
2183 shadow_mem /= 4096;
2184 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
2185 if (ret < 0) {
2186 return ret;
2187 }
2188 }
2189
2190 if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
2191 object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE) &&
2192 pc_machine_is_smm_enabled(PC_MACHINE(ms))) {
2193 smram_machine_done.notify = register_smram_listener;
2194 qemu_add_machine_init_done_notifier(&smram_machine_done);
2195 }
2196
2197 if (enable_cpu_pm) {
2198 int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
2199 int ret;
2200
2201
2202#if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
2203#define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
2204#endif
2205 if (disable_exits) {
2206 disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT |
2207 KVM_X86_DISABLE_EXITS_HLT |
2208 KVM_X86_DISABLE_EXITS_PAUSE |
2209 KVM_X86_DISABLE_EXITS_CSTATE);
2210 }
2211
2212 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0,
2213 disable_exits);
2214 if (ret < 0) {
2215 error_report("kvm: guest stopping CPU not supported: %s",
2216 strerror(-ret));
2217 }
2218 }
2219
2220 return 0;
2221}
2222
2223static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
2224{
2225 lhs->selector = rhs->selector;
2226 lhs->base = rhs->base;
2227 lhs->limit = rhs->limit;
2228 lhs->type = 3;
2229 lhs->present = 1;
2230 lhs->dpl = 3;
2231 lhs->db = 0;
2232 lhs->s = 1;
2233 lhs->l = 0;
2234 lhs->g = 0;
2235 lhs->avl = 0;
2236 lhs->unusable = 0;
2237}
2238
2239static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
2240{
2241 unsigned flags = rhs->flags;
2242 lhs->selector = rhs->selector;
2243 lhs->base = rhs->base;
2244 lhs->limit = rhs->limit;
2245 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
2246 lhs->present = (flags & DESC_P_MASK) != 0;
2247 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
2248 lhs->db = (flags >> DESC_B_SHIFT) & 1;
2249 lhs->s = (flags & DESC_S_MASK) != 0;
2250 lhs->l = (flags >> DESC_L_SHIFT) & 1;
2251 lhs->g = (flags & DESC_G_MASK) != 0;
2252 lhs->avl = (flags & DESC_AVL_MASK) != 0;
2253 lhs->unusable = !lhs->present;
2254 lhs->padding = 0;
2255}
2256
2257static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
2258{
2259 lhs->selector = rhs->selector;
2260 lhs->base = rhs->base;
2261 lhs->limit = rhs->limit;
2262 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
2263 ((rhs->present && !rhs->unusable) * DESC_P_MASK) |
2264 (rhs->dpl << DESC_DPL_SHIFT) |
2265 (rhs->db << DESC_B_SHIFT) |
2266 (rhs->s * DESC_S_MASK) |
2267 (rhs->l << DESC_L_SHIFT) |
2268 (rhs->g * DESC_G_MASK) |
2269 (rhs->avl * DESC_AVL_MASK);
2270}
2271
2272static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
2273{
2274 if (set) {
2275 *kvm_reg = *qemu_reg;
2276 } else {
2277 *qemu_reg = *kvm_reg;
2278 }
2279}
2280
2281static int kvm_getput_regs(X86CPU *cpu, int set)
2282{
2283 CPUX86State *env = &cpu->env;
2284 struct kvm_regs regs;
2285 int ret = 0;
2286
2287 if (!set) {
2288 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, ®s);
2289 if (ret < 0) {
2290 return ret;
2291 }
2292 }
2293
2294 kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
2295 kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
2296 kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
2297 kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
2298 kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
2299 kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
2300 kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
2301 kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
2302#ifdef TARGET_X86_64
2303 kvm_getput_reg(®s.r8, &env->regs[8], set);
2304 kvm_getput_reg(®s.r9, &env->regs[9], set);
2305 kvm_getput_reg(®s.r10, &env->regs[10], set);
2306 kvm_getput_reg(®s.r11, &env->regs[11], set);
2307 kvm_getput_reg(®s.r12, &env->regs[12], set);
2308 kvm_getput_reg(®s.r13, &env->regs[13], set);
2309 kvm_getput_reg(®s.r14, &env->regs[14], set);
2310 kvm_getput_reg(®s.r15, &env->regs[15], set);
2311#endif
2312
2313 kvm_getput_reg(®s.rflags, &env->eflags, set);
2314 kvm_getput_reg(®s.rip, &env->eip, set);
2315
2316 if (set) {
2317 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, ®s);
2318 }
2319
2320 return ret;
2321}
2322
2323static int kvm_put_fpu(X86CPU *cpu)
2324{
2325 CPUX86State *env = &cpu->env;
2326 struct kvm_fpu fpu;
2327 int i;
2328
2329 memset(&fpu, 0, sizeof fpu);
2330 fpu.fsw = env->fpus & ~(7 << 11);
2331 fpu.fsw |= (env->fpstt & 7) << 11;
2332 fpu.fcw = env->fpuc;
2333 fpu.last_opcode = env->fpop;
2334 fpu.last_ip = env->fpip;
2335 fpu.last_dp = env->fpdp;
2336 for (i = 0; i < 8; ++i) {
2337 fpu.ftwx |= (!env->fptags[i]) << i;
2338 }
2339 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
2340 for (i = 0; i < CPU_NB_REGS; i++) {
2341 stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
2342 stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
2343 }
2344 fpu.mxcsr = env->mxcsr;
2345
2346 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
2347}
2348
2349#define XSAVE_FCW_FSW 0
2350#define XSAVE_FTW_FOP 1
2351#define XSAVE_CWD_RIP 2
2352#define XSAVE_CWD_RDP 4
2353#define XSAVE_MXCSR 6
2354#define XSAVE_ST_SPACE 8
2355#define XSAVE_XMM_SPACE 40
2356#define XSAVE_XSTATE_BV 128
2357#define XSAVE_YMMH_SPACE 144
2358#define XSAVE_BNDREGS 240
2359#define XSAVE_BNDCSR 256
2360#define XSAVE_OPMASK 272
2361#define XSAVE_ZMM_Hi256 288
2362#define XSAVE_Hi16_ZMM 416
2363#define XSAVE_PKRU 672
2364
2365#define XSAVE_BYTE_OFFSET(word_offset) \
2366 ((word_offset) * sizeof_field(struct kvm_xsave, region[0]))
2367
2368#define ASSERT_OFFSET(word_offset, field) \
2369 QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
2370 offsetof(X86XSaveArea, field))
2371
2372ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw);
2373ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw);
2374ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip);
2375ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp);
2376ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr);
2377ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs);
2378ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs);
2379ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv);
2380ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state);
2381ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state);
2382ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state);
2383ASSERT_OFFSET(XSAVE_OPMASK, opmask_state);
2384ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state);
2385ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state);
2386ASSERT_OFFSET(XSAVE_PKRU, pkru_state);
2387
2388static int kvm_put_xsave(X86CPU *cpu)
2389{
2390 CPUX86State *env = &cpu->env;
2391 X86XSaveArea *xsave = env->xsave_buf;
2392
2393 if (!has_xsave) {
2394 return kvm_put_fpu(cpu);
2395 }
2396 x86_cpu_xsave_all_areas(cpu, xsave);
2397
2398 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
2399}
2400
2401static int kvm_put_xcrs(X86CPU *cpu)
2402{
2403 CPUX86State *env = &cpu->env;
2404 struct kvm_xcrs xcrs = {};
2405
2406 if (!has_xcrs) {
2407 return 0;
2408 }
2409
2410 xcrs.nr_xcrs = 1;
2411 xcrs.flags = 0;
2412 xcrs.xcrs[0].xcr = 0;
2413 xcrs.xcrs[0].value = env->xcr0;
2414 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
2415}
2416
2417static int kvm_put_sregs(X86CPU *cpu)
2418{
2419 CPUX86State *env = &cpu->env;
2420 struct kvm_sregs sregs;
2421
2422 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
2423 if (env->interrupt_injected >= 0) {
2424 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
2425 (uint64_t)1 << (env->interrupt_injected % 64);
2426 }
2427
2428 if ((env->eflags & VM_MASK)) {
2429 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
2430 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
2431 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
2432 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
2433 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
2434 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
2435 } else {
2436 set_seg(&sregs.cs, &env->segs[R_CS]);
2437 set_seg(&sregs.ds, &env->segs[R_DS]);
2438 set_seg(&sregs.es, &env->segs[R_ES]);
2439 set_seg(&sregs.fs, &env->segs[R_FS]);
2440 set_seg(&sregs.gs, &env->segs[R_GS]);
2441 set_seg(&sregs.ss, &env->segs[R_SS]);
2442 }
2443
2444 set_seg(&sregs.tr, &env->tr);
2445 set_seg(&sregs.ldt, &env->ldt);
2446
2447 sregs.idt.limit = env->idt.limit;
2448 sregs.idt.base = env->idt.base;
2449 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
2450 sregs.gdt.limit = env->gdt.limit;
2451 sregs.gdt.base = env->gdt.base;
2452 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
2453
2454 sregs.cr0 = env->cr[0];
2455 sregs.cr2 = env->cr[2];
2456 sregs.cr3 = env->cr[3];
2457 sregs.cr4 = env->cr[4];
2458
2459 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
2460 sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
2461
2462 sregs.efer = env->efer;
2463
2464 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
2465}
2466
2467static void kvm_msr_buf_reset(X86CPU *cpu)
2468{
2469 memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
2470}
2471
2472static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
2473{
2474 struct kvm_msrs *msrs = cpu->kvm_msr_buf;
2475 void *limit = ((void *)msrs) + MSR_BUF_SIZE;
2476 struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
2477
2478 assert((void *)(entry + 1) <= limit);
2479
2480 entry->index = index;
2481 entry->reserved = 0;
2482 entry->data = value;
2483 msrs->nmsrs++;
2484}
2485
2486static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
2487{
2488 kvm_msr_buf_reset(cpu);
2489 kvm_msr_entry_add(cpu, index, value);
2490
2491 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
2492}
2493
2494void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
2495{
2496 int ret;
2497
2498 ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
2499 assert(ret == 1);
2500}
2501
2502static int kvm_put_tscdeadline_msr(X86CPU *cpu)
2503{
2504 CPUX86State *env = &cpu->env;
2505 int ret;
2506
2507 if (!has_msr_tsc_deadline) {
2508 return 0;
2509 }
2510
2511 ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
2512 if (ret < 0) {
2513 return ret;
2514 }
2515
2516 assert(ret == 1);
2517 return 0;
2518}
2519
2520
2521
2522
2523
2524
2525
2526static int kvm_put_msr_feature_control(X86CPU *cpu)
2527{
2528 int ret;
2529
2530 if (!has_msr_feature_control) {
2531 return 0;
2532 }
2533
2534 ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
2535 cpu->env.msr_ia32_feature_control);
2536 if (ret < 0) {
2537 return ret;
2538 }
2539
2540 assert(ret == 1);
2541 return 0;
2542}
2543
2544static uint64_t make_vmx_msr_value(uint32_t index, uint32_t features)
2545{
2546 uint32_t default1, can_be_one, can_be_zero;
2547 uint32_t must_be_one;
2548
2549 switch (index) {
2550 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
2551 default1 = 0x00000016;
2552 break;
2553 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
2554 default1 = 0x0401e172;
2555 break;
2556 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
2557 default1 = 0x000011ff;
2558 break;
2559 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
2560 default1 = 0x00036dff;
2561 break;
2562 case MSR_IA32_VMX_PROCBASED_CTLS2:
2563 default1 = 0;
2564 break;
2565 default:
2566 abort();
2567 }
2568
2569
2570
2571
2572 can_be_one = features | default1;
2573 can_be_zero = features | ~default1;
2574 must_be_one = ~can_be_zero;
2575
2576
2577
2578
2579
2580 return must_be_one | (((uint64_t)can_be_one) << 32);
2581}
2582
2583#define VMCS12_MAX_FIELD_INDEX (0x17)
2584
2585static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f)
2586{
2587 uint64_t kvm_vmx_basic =
2588 kvm_arch_get_supported_msr_feature(kvm_state,
2589 MSR_IA32_VMX_BASIC);
2590
2591 if (!kvm_vmx_basic) {
2592
2593
2594
2595 return;
2596 }
2597
2598 uint64_t kvm_vmx_misc =
2599 kvm_arch_get_supported_msr_feature(kvm_state,
2600 MSR_IA32_VMX_MISC);
2601 uint64_t kvm_vmx_ept_vpid =
2602 kvm_arch_get_supported_msr_feature(kvm_state,
2603 MSR_IA32_VMX_EPT_VPID_CAP);
2604
2605
2606
2607
2608
2609 uint64_t fixed_vmx_exit = f[FEAT_8000_0001_EDX] & CPUID_EXT2_LM
2610 ? (uint64_t)VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE << 32 : 0;
2611
2612
2613
2614
2615
2616 uint64_t fixed_vmx_basic = kvm_vmx_basic &
2617 (MSR_VMX_BASIC_VMCS_REVISION_MASK |
2618 MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK |
2619 MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK);
2620
2621
2622
2623
2624
2625
2626
2627
2628 uint64_t fixed_vmx_misc = kvm_vmx_misc &
2629 (MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK |
2630 MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK);
2631
2632
2633
2634
2635
2636 uint64_t fixed_vmx_ept_mask =
2637 (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_ENABLE_EPT ?
2638 MSR_VMX_EPT_UC | MSR_VMX_EPT_WB : 0);
2639 uint64_t fixed_vmx_ept_vpid = kvm_vmx_ept_vpid & fixed_vmx_ept_mask;
2640
2641 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
2642 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
2643 f[FEAT_VMX_PROCBASED_CTLS]));
2644 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
2645 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PINBASED_CTLS,
2646 f[FEAT_VMX_PINBASED_CTLS]));
2647 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_EXIT_CTLS,
2648 make_vmx_msr_value(MSR_IA32_VMX_TRUE_EXIT_CTLS,
2649 f[FEAT_VMX_EXIT_CTLS]) | fixed_vmx_exit);
2650 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
2651 make_vmx_msr_value(MSR_IA32_VMX_TRUE_ENTRY_CTLS,
2652 f[FEAT_VMX_ENTRY_CTLS]));
2653 kvm_msr_entry_add(cpu, MSR_IA32_VMX_PROCBASED_CTLS2,
2654 make_vmx_msr_value(MSR_IA32_VMX_PROCBASED_CTLS2,
2655 f[FEAT_VMX_SECONDARY_CTLS]));
2656 kvm_msr_entry_add(cpu, MSR_IA32_VMX_EPT_VPID_CAP,
2657 f[FEAT_VMX_EPT_VPID_CAPS] | fixed_vmx_ept_vpid);
2658 kvm_msr_entry_add(cpu, MSR_IA32_VMX_BASIC,
2659 f[FEAT_VMX_BASIC] | fixed_vmx_basic);
2660 kvm_msr_entry_add(cpu, MSR_IA32_VMX_MISC,
2661 f[FEAT_VMX_MISC] | fixed_vmx_misc);
2662 if (has_msr_vmx_vmfunc) {
2663 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMFUNC, f[FEAT_VMX_VMFUNC]);
2664 }
2665
2666
2667
2668
2669
2670 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR0_FIXED0,
2671 CR0_PE_MASK | CR0_PG_MASK | CR0_NE_MASK);
2672 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0,
2673 CR4_VMXE_MASK);
2674 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM,
2675 VMCS12_MAX_FIELD_INDEX << 1);
2676}
2677
2678static int kvm_buf_set_msrs(X86CPU *cpu)
2679{
2680 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
2681 if (ret < 0) {
2682 return ret;
2683 }
2684
2685 if (ret < cpu->kvm_msr_buf->nmsrs) {
2686 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
2687 error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64,
2688 (uint32_t)e->index, (uint64_t)e->data);
2689 }
2690
2691 assert(ret == cpu->kvm_msr_buf->nmsrs);
2692 return 0;
2693}
2694
2695static void kvm_init_msrs(X86CPU *cpu)
2696{
2697 CPUX86State *env = &cpu->env;
2698
2699 kvm_msr_buf_reset(cpu);
2700 if (has_msr_arch_capabs) {
2701 kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
2702 env->features[FEAT_ARCH_CAPABILITIES]);
2703 }
2704
2705 if (has_msr_core_capabs) {
2706 kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
2707 env->features[FEAT_CORE_CAPABILITY]);
2708 }
2709
2710
2711
2712
2713
2714 if (kvm_feature_msrs && cpu_has_vmx(env)) {
2715 kvm_msr_entry_add_vmx(cpu, env->features);
2716 }
2717
2718 assert(kvm_buf_set_msrs(cpu) == 0);
2719}
2720
2721static int kvm_put_msrs(X86CPU *cpu, int level)
2722{
2723 CPUX86State *env = &cpu->env;
2724 int i;
2725
2726 kvm_msr_buf_reset(cpu);
2727
2728 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
2729 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
2730 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
2731 kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
2732 if (has_msr_star) {
2733 kvm_msr_entry_add(cpu, MSR_STAR, env->star);
2734 }
2735 if (has_msr_hsave_pa) {
2736 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
2737 }
2738 if (has_msr_tsc_aux) {
2739 kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
2740 }
2741 if (has_msr_tsc_adjust) {
2742 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
2743 }
2744 if (has_msr_misc_enable) {
2745 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
2746 env->msr_ia32_misc_enable);
2747 }
2748 if (has_msr_smbase) {
2749 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
2750 }
2751 if (has_msr_smi_count) {
2752 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count);
2753 }
2754 if (has_msr_bndcfgs) {
2755 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
2756 }
2757 if (has_msr_xss) {
2758 kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
2759 }
2760 if (has_msr_umwait) {
2761 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, env->umwait);
2762 }
2763 if (has_msr_spec_ctrl) {
2764 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl);
2765 }
2766 if (has_msr_tsx_ctrl) {
2767 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, env->tsx_ctrl);
2768 }
2769 if (has_msr_virt_ssbd) {
2770 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd);
2771 }
2772
2773#ifdef TARGET_X86_64
2774 if (lm_capable_kernel) {
2775 kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
2776 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
2777 kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
2778 kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
2779 }
2780#endif
2781
2782
2783
2784
2785
2786 if (level >= KVM_PUT_RESET_STATE) {
2787 kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
2788 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
2789 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
2790 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
2791 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
2792 }
2793 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
2794 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
2795 }
2796 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
2797 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
2798 }
2799
2800 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
2801 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr);
2802 }
2803
2804 if (has_architectural_pmu_version > 0) {
2805 if (has_architectural_pmu_version > 1) {
2806
2807 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
2808 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
2809 }
2810
2811
2812 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
2813 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
2814 env->msr_fixed_counters[i]);
2815 }
2816 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
2817 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
2818 env->msr_gp_counters[i]);
2819 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
2820 env->msr_gp_evtsel[i]);
2821 }
2822 if (has_architectural_pmu_version > 1) {
2823 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
2824 env->msr_global_status);
2825 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
2826 env->msr_global_ovf_ctrl);
2827
2828
2829 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
2830 env->msr_fixed_ctr_ctrl);
2831 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
2832 env->msr_global_ctrl);
2833 }
2834 }
2835
2836
2837
2838
2839 if (current_cpu == first_cpu) {
2840 if (has_msr_hv_hypercall) {
2841 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
2842 env->msr_hv_guest_os_id);
2843 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
2844 env->msr_hv_hypercall);
2845 }
2846 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
2847 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
2848 env->msr_hv_tsc);
2849 }
2850 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
2851 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL,
2852 env->msr_hv_reenlightenment_control);
2853 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL,
2854 env->msr_hv_tsc_emulation_control);
2855 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS,
2856 env->msr_hv_tsc_emulation_status);
2857 }
2858 }
2859 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
2860 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
2861 env->msr_hv_vapic);
2862 }
2863 if (has_msr_hv_crash) {
2864 int j;
2865
2866 for (j = 0; j < HV_CRASH_PARAMS; j++)
2867 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
2868 env->msr_hv_crash_params[j]);
2869
2870 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY);
2871 }
2872 if (has_msr_hv_runtime) {
2873 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
2874 }
2875 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)
2876 && hv_vpindex_settable) {
2877 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX,
2878 hyperv_vp_index(CPU(cpu)));
2879 }
2880 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
2881 int j;
2882
2883 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION);
2884
2885 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
2886 env->msr_hv_synic_control);
2887 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
2888 env->msr_hv_synic_evt_page);
2889 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
2890 env->msr_hv_synic_msg_page);
2891
2892 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
2893 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
2894 env->msr_hv_synic_sint[j]);
2895 }
2896 }
2897 if (has_msr_hv_stimer) {
2898 int j;
2899
2900 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
2901 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
2902 env->msr_hv_stimer_config[j]);
2903 }
2904
2905 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
2906 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
2907 env->msr_hv_stimer_count[j]);
2908 }
2909 }
2910 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
2911 uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
2912
2913 kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
2914 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
2915 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
2916 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
2917 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
2918 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
2919 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
2920 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
2921 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
2922 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
2923 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
2924 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
2925 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
2926
2927
2928
2929 uint64_t mask = env->mtrr_var[i].mask;
2930 mask &= phys_mask;
2931
2932 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
2933 env->mtrr_var[i].base);
2934 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
2935 }
2936 }
2937 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
2938 int addr_num = kvm_arch_get_supported_cpuid(kvm_state,
2939 0x14, 1, R_EAX) & 0x7;
2940
2941 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL,
2942 env->msr_rtit_ctrl);
2943 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS,
2944 env->msr_rtit_status);
2945 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE,
2946 env->msr_rtit_output_base);
2947 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK,
2948 env->msr_rtit_output_mask);
2949 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH,
2950 env->msr_rtit_cr3_match);
2951 for (i = 0; i < addr_num; i++) {
2952 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i,
2953 env->msr_rtit_addrs[i]);
2954 }
2955 }
2956
2957
2958
2959 }
2960
2961 if (env->mcg_cap) {
2962 int i;
2963
2964 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
2965 kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
2966 if (has_msr_mcg_ext_ctl) {
2967 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
2968 }
2969 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
2970 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
2971 }
2972 }
2973
2974 return kvm_buf_set_msrs(cpu);
2975}
2976
2977
2978static int kvm_get_fpu(X86CPU *cpu)
2979{
2980 CPUX86State *env = &cpu->env;
2981 struct kvm_fpu fpu;
2982 int i, ret;
2983
2984 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
2985 if (ret < 0) {
2986 return ret;
2987 }
2988
2989 env->fpstt = (fpu.fsw >> 11) & 7;
2990 env->fpus = fpu.fsw;
2991 env->fpuc = fpu.fcw;
2992 env->fpop = fpu.last_opcode;
2993 env->fpip = fpu.last_ip;
2994 env->fpdp = fpu.last_dp;
2995 for (i = 0; i < 8; ++i) {
2996 env->fptags[i] = !((fpu.ftwx >> i) & 1);
2997 }
2998 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
2999 for (i = 0; i < CPU_NB_REGS; i++) {
3000 env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
3001 env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
3002 }
3003 env->mxcsr = fpu.mxcsr;
3004
3005 return 0;
3006}
3007
3008static int kvm_get_xsave(X86CPU *cpu)
3009{
3010 CPUX86State *env = &cpu->env;
3011 X86XSaveArea *xsave = env->xsave_buf;
3012 int ret;
3013
3014 if (!has_xsave) {
3015 return kvm_get_fpu(cpu);
3016 }
3017
3018 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
3019 if (ret < 0) {
3020 return ret;
3021 }
3022 x86_cpu_xrstor_all_areas(cpu, xsave);
3023
3024 return 0;
3025}
3026
3027static int kvm_get_xcrs(X86CPU *cpu)
3028{
3029 CPUX86State *env = &cpu->env;
3030 int i, ret;
3031 struct kvm_xcrs xcrs;
3032
3033 if (!has_xcrs) {
3034 return 0;
3035 }
3036
3037 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
3038 if (ret < 0) {
3039 return ret;
3040 }
3041
3042 for (i = 0; i < xcrs.nr_xcrs; i++) {
3043
3044 if (xcrs.xcrs[i].xcr == 0) {
3045 env->xcr0 = xcrs.xcrs[i].value;
3046 break;
3047 }
3048 }
3049 return 0;
3050}
3051
3052static int kvm_get_sregs(X86CPU *cpu)
3053{
3054 CPUX86State *env = &cpu->env;
3055 struct kvm_sregs sregs;
3056 int bit, i, ret;
3057
3058 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
3059 if (ret < 0) {
3060 return ret;
3061 }
3062
3063
3064
3065 env->interrupt_injected = -1;
3066 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
3067 if (sregs.interrupt_bitmap[i]) {
3068 bit = ctz64(sregs.interrupt_bitmap[i]);
3069 env->interrupt_injected = i * 64 + bit;
3070 break;
3071 }
3072 }
3073
3074 get_seg(&env->segs[R_CS], &sregs.cs);
3075 get_seg(&env->segs[R_DS], &sregs.ds);
3076 get_seg(&env->segs[R_ES], &sregs.es);
3077 get_seg(&env->segs[R_FS], &sregs.fs);
3078 get_seg(&env->segs[R_GS], &sregs.gs);
3079 get_seg(&env->segs[R_SS], &sregs.ss);
3080
3081 get_seg(&env->tr, &sregs.tr);
3082 get_seg(&env->ldt, &sregs.ldt);
3083
3084 env->idt.limit = sregs.idt.limit;
3085 env->idt.base = sregs.idt.base;
3086 env->gdt.limit = sregs.gdt.limit;
3087 env->gdt.base = sregs.gdt.base;
3088
3089 env->cr[0] = sregs.cr0;
3090 env->cr[2] = sregs.cr2;
3091 env->cr[3] = sregs.cr3;
3092 env->cr[4] = sregs.cr4;
3093
3094 env->efer = sregs.efer;
3095
3096
3097 x86_update_hflags(env);
3098
3099 return 0;
3100}
3101
3102static int kvm_get_msrs(X86CPU *cpu)
3103{
3104 CPUX86State *env = &cpu->env;
3105 struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
3106 int ret, i;
3107 uint64_t mtrr_top_bits;
3108
3109 kvm_msr_buf_reset(cpu);
3110
3111 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
3112 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
3113 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
3114 kvm_msr_entry_add(cpu, MSR_PAT, 0);
3115 if (has_msr_star) {
3116 kvm_msr_entry_add(cpu, MSR_STAR, 0);
3117 }
3118 if (has_msr_hsave_pa) {
3119 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
3120 }
3121 if (has_msr_tsc_aux) {
3122 kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
3123 }
3124 if (has_msr_tsc_adjust) {
3125 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
3126 }
3127 if (has_msr_tsc_deadline) {
3128 kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
3129 }
3130 if (has_msr_misc_enable) {
3131 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
3132 }
3133 if (has_msr_smbase) {
3134 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
3135 }
3136 if (has_msr_smi_count) {
3137 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0);
3138 }
3139 if (has_msr_feature_control) {
3140 kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
3141 }
3142 if (has_msr_bndcfgs) {
3143 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
3144 }
3145 if (has_msr_xss) {
3146 kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
3147 }
3148 if (has_msr_umwait) {
3149 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, 0);
3150 }
3151 if (has_msr_spec_ctrl) {
3152 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0);
3153 }
3154 if (has_msr_tsx_ctrl) {
3155 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, 0);
3156 }
3157 if (has_msr_virt_ssbd) {
3158 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0);
3159 }
3160 if (!env->tsc_valid) {
3161 kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
3162 env->tsc_valid = !runstate_is_running();
3163 }
3164
3165#ifdef TARGET_X86_64
3166 if (lm_capable_kernel) {
3167 kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
3168 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
3169 kvm_msr_entry_add(cpu, MSR_FMASK, 0);
3170 kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
3171 }
3172#endif
3173 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
3174 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
3175 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
3176 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
3177 }
3178 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
3179 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
3180 }
3181 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
3182 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
3183 }
3184 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
3185 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1);
3186 }
3187 if (has_architectural_pmu_version > 0) {
3188 if (has_architectural_pmu_version > 1) {
3189 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
3190 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
3191 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
3192 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
3193 }
3194 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
3195 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
3196 }
3197 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
3198 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
3199 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
3200 }
3201 }
3202
3203 if (env->mcg_cap) {
3204 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
3205 kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
3206 if (has_msr_mcg_ext_ctl) {
3207 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
3208 }
3209 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
3210 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
3211 }
3212 }
3213
3214 if (has_msr_hv_hypercall) {
3215 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
3216 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
3217 }
3218 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
3219 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
3220 }
3221 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
3222 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
3223 }
3224 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
3225 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
3226 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
3227 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0);
3228 }
3229 if (has_msr_hv_crash) {
3230 int j;
3231
3232 for (j = 0; j < HV_CRASH_PARAMS; j++) {
3233 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
3234 }
3235 }
3236 if (has_msr_hv_runtime) {
3237 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
3238 }
3239 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
3240 uint32_t msr;
3241
3242 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
3243 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
3244 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
3245 for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
3246 kvm_msr_entry_add(cpu, msr, 0);
3247 }
3248 }
3249 if (has_msr_hv_stimer) {
3250 uint32_t msr;
3251
3252 for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
3253 msr++) {
3254 kvm_msr_entry_add(cpu, msr, 0);
3255 }
3256 }
3257 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
3258 kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
3259 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
3260 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
3261 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
3262 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
3263 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
3264 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
3265 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
3266 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
3267 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
3268 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
3269 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
3270 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
3271 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
3272 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
3273 }
3274 }
3275
3276 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
3277 int addr_num =
3278 kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7;
3279
3280 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0);
3281 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0);
3282 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0);
3283 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0);
3284 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0);
3285 for (i = 0; i < addr_num; i++) {
3286 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0);
3287 }
3288 }
3289
3290 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
3291 if (ret < 0) {
3292 return ret;
3293 }
3294
3295 if (ret < cpu->kvm_msr_buf->nmsrs) {
3296 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
3297 error_report("error: failed to get MSR 0x%" PRIx32,
3298 (uint32_t)e->index);
3299 }
3300
3301 assert(ret == cpu->kvm_msr_buf->nmsrs);
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318 if (cpu->fill_mtrr_mask) {
3319 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
3320 assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
3321 mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
3322 } else {
3323 mtrr_top_bits = 0;
3324 }
3325
3326 for (i = 0; i < ret; i++) {
3327 uint32_t index = msrs[i].index;
3328 switch (index) {
3329 case MSR_IA32_SYSENTER_CS:
3330 env->sysenter_cs = msrs[i].data;
3331 break;
3332 case MSR_IA32_SYSENTER_ESP:
3333 env->sysenter_esp = msrs[i].data;
3334 break;
3335 case MSR_IA32_SYSENTER_EIP:
3336 env->sysenter_eip = msrs[i].data;
3337 break;
3338 case MSR_PAT:
3339 env->pat = msrs[i].data;
3340 break;
3341 case MSR_STAR:
3342 env->star = msrs[i].data;
3343 break;
3344#ifdef TARGET_X86_64
3345 case MSR_CSTAR:
3346 env->cstar = msrs[i].data;
3347 break;
3348 case MSR_KERNELGSBASE:
3349 env->kernelgsbase = msrs[i].data;
3350 break;
3351 case MSR_FMASK:
3352 env->fmask = msrs[i].data;
3353 break;
3354 case MSR_LSTAR:
3355 env->lstar = msrs[i].data;
3356 break;
3357#endif
3358 case MSR_IA32_TSC:
3359 env->tsc = msrs[i].data;
3360 break;
3361 case MSR_TSC_AUX:
3362 env->tsc_aux = msrs[i].data;
3363 break;
3364 case MSR_TSC_ADJUST:
3365 env->tsc_adjust = msrs[i].data;
3366 break;
3367 case MSR_IA32_TSCDEADLINE:
3368 env->tsc_deadline = msrs[i].data;
3369 break;
3370 case MSR_VM_HSAVE_PA:
3371 env->vm_hsave = msrs[i].data;
3372 break;
3373 case MSR_KVM_SYSTEM_TIME:
3374 env->system_time_msr = msrs[i].data;
3375 break;
3376 case MSR_KVM_WALL_CLOCK:
3377 env->wall_clock_msr = msrs[i].data;
3378 break;
3379 case MSR_MCG_STATUS:
3380 env->mcg_status = msrs[i].data;
3381 break;
3382 case MSR_MCG_CTL:
3383 env->mcg_ctl = msrs[i].data;
3384 break;
3385 case MSR_MCG_EXT_CTL:
3386 env->mcg_ext_ctl = msrs[i].data;
3387 break;
3388 case MSR_IA32_MISC_ENABLE:
3389 env->msr_ia32_misc_enable = msrs[i].data;
3390 break;
3391 case MSR_IA32_SMBASE:
3392 env->smbase = msrs[i].data;
3393 break;
3394 case MSR_SMI_COUNT:
3395 env->msr_smi_count = msrs[i].data;
3396 break;
3397 case MSR_IA32_FEATURE_CONTROL:
3398 env->msr_ia32_feature_control = msrs[i].data;
3399 break;
3400 case MSR_IA32_BNDCFGS:
3401 env->msr_bndcfgs = msrs[i].data;
3402 break;
3403 case MSR_IA32_XSS:
3404 env->xss = msrs[i].data;
3405 break;
3406 case MSR_IA32_UMWAIT_CONTROL:
3407 env->umwait = msrs[i].data;
3408 break;
3409 default:
3410 if (msrs[i].index >= MSR_MC0_CTL &&
3411 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
3412 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
3413 }
3414 break;
3415 case MSR_KVM_ASYNC_PF_EN:
3416 env->async_pf_en_msr = msrs[i].data;
3417 break;
3418 case MSR_KVM_PV_EOI_EN:
3419 env->pv_eoi_en_msr = msrs[i].data;
3420 break;
3421 case MSR_KVM_STEAL_TIME:
3422 env->steal_time_msr = msrs[i].data;
3423 break;
3424 case MSR_KVM_POLL_CONTROL: {
3425 env->poll_control_msr = msrs[i].data;
3426 break;
3427 }
3428 case MSR_CORE_PERF_FIXED_CTR_CTRL:
3429 env->msr_fixed_ctr_ctrl = msrs[i].data;
3430 break;
3431 case MSR_CORE_PERF_GLOBAL_CTRL:
3432 env->msr_global_ctrl = msrs[i].data;
3433 break;
3434 case MSR_CORE_PERF_GLOBAL_STATUS:
3435 env->msr_global_status = msrs[i].data;
3436 break;
3437 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
3438 env->msr_global_ovf_ctrl = msrs[i].data;
3439 break;
3440 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
3441 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
3442 break;
3443 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
3444 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
3445 break;
3446 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
3447 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
3448 break;
3449 case HV_X64_MSR_HYPERCALL:
3450 env->msr_hv_hypercall = msrs[i].data;
3451 break;
3452 case HV_X64_MSR_GUEST_OS_ID:
3453 env->msr_hv_guest_os_id = msrs[i].data;
3454 break;
3455 case HV_X64_MSR_APIC_ASSIST_PAGE:
3456 env->msr_hv_vapic = msrs[i].data;
3457 break;
3458 case HV_X64_MSR_REFERENCE_TSC:
3459 env->msr_hv_tsc = msrs[i].data;
3460 break;
3461 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
3462 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
3463 break;
3464 case HV_X64_MSR_VP_RUNTIME:
3465 env->msr_hv_runtime = msrs[i].data;
3466 break;
3467 case HV_X64_MSR_SCONTROL:
3468 env->msr_hv_synic_control = msrs[i].data;
3469 break;
3470 case HV_X64_MSR_SIEFP:
3471 env->msr_hv_synic_evt_page = msrs[i].data;
3472 break;
3473 case HV_X64_MSR_SIMP:
3474 env->msr_hv_synic_msg_page = msrs[i].data;
3475 break;
3476 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
3477 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
3478 break;
3479 case HV_X64_MSR_STIMER0_CONFIG:
3480 case HV_X64_MSR_STIMER1_CONFIG:
3481 case HV_X64_MSR_STIMER2_CONFIG:
3482 case HV_X64_MSR_STIMER3_CONFIG:
3483 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
3484 msrs[i].data;
3485 break;
3486 case HV_X64_MSR_STIMER0_COUNT:
3487 case HV_X64_MSR_STIMER1_COUNT:
3488 case HV_X64_MSR_STIMER2_COUNT:
3489 case HV_X64_MSR_STIMER3_COUNT:
3490 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
3491 msrs[i].data;
3492 break;
3493 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
3494 env->msr_hv_reenlightenment_control = msrs[i].data;
3495 break;
3496 case HV_X64_MSR_TSC_EMULATION_CONTROL:
3497 env->msr_hv_tsc_emulation_control = msrs[i].data;
3498 break;
3499 case HV_X64_MSR_TSC_EMULATION_STATUS:
3500 env->msr_hv_tsc_emulation_status = msrs[i].data;
3501 break;
3502 case MSR_MTRRdefType:
3503 env->mtrr_deftype = msrs[i].data;
3504 break;
3505 case MSR_MTRRfix64K_00000:
3506 env->mtrr_fixed[0] = msrs[i].data;
3507 break;
3508 case MSR_MTRRfix16K_80000:
3509 env->mtrr_fixed[1] = msrs[i].data;
3510 break;
3511 case MSR_MTRRfix16K_A0000:
3512 env->mtrr_fixed[2] = msrs[i].data;
3513 break;
3514 case MSR_MTRRfix4K_C0000:
3515 env->mtrr_fixed[3] = msrs[i].data;
3516 break;
3517 case MSR_MTRRfix4K_C8000:
3518 env->mtrr_fixed[4] = msrs[i].data;
3519 break;
3520 case MSR_MTRRfix4K_D0000:
3521 env->mtrr_fixed[5] = msrs[i].data;
3522 break;
3523 case MSR_MTRRfix4K_D8000:
3524 env->mtrr_fixed[6] = msrs[i].data;
3525 break;
3526 case MSR_MTRRfix4K_E0000:
3527 env->mtrr_fixed[7] = msrs[i].data;
3528 break;
3529 case MSR_MTRRfix4K_E8000:
3530 env->mtrr_fixed[8] = msrs[i].data;
3531 break;
3532 case MSR_MTRRfix4K_F0000:
3533 env->mtrr_fixed[9] = msrs[i].data;
3534 break;
3535 case MSR_MTRRfix4K_F8000:
3536 env->mtrr_fixed[10] = msrs[i].data;
3537 break;
3538 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
3539 if (index & 1) {
3540 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
3541 mtrr_top_bits;
3542 } else {
3543 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
3544 }
3545 break;
3546 case MSR_IA32_SPEC_CTRL:
3547 env->spec_ctrl = msrs[i].data;
3548 break;
3549 case MSR_IA32_TSX_CTRL:
3550 env->tsx_ctrl = msrs[i].data;
3551 break;
3552 case MSR_VIRT_SSBD:
3553 env->virt_ssbd = msrs[i].data;
3554 break;
3555 case MSR_IA32_RTIT_CTL:
3556 env->msr_rtit_ctrl = msrs[i].data;
3557 break;
3558 case MSR_IA32_RTIT_STATUS:
3559 env->msr_rtit_status = msrs[i].data;
3560 break;
3561 case MSR_IA32_RTIT_OUTPUT_BASE:
3562 env->msr_rtit_output_base = msrs[i].data;
3563 break;
3564 case MSR_IA32_RTIT_OUTPUT_MASK:
3565 env->msr_rtit_output_mask = msrs[i].data;
3566 break;
3567 case MSR_IA32_RTIT_CR3_MATCH:
3568 env->msr_rtit_cr3_match = msrs[i].data;
3569 break;
3570 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
3571 env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data;
3572 break;
3573 }
3574 }
3575
3576 return 0;
3577}
3578
3579static int kvm_put_mp_state(X86CPU *cpu)
3580{
3581 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
3582
3583 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
3584}
3585
3586static int kvm_get_mp_state(X86CPU *cpu)
3587{
3588 CPUState *cs = CPU(cpu);
3589 CPUX86State *env = &cpu->env;
3590 struct kvm_mp_state mp_state;
3591 int ret;
3592
3593 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
3594 if (ret < 0) {
3595 return ret;
3596 }
3597 env->mp_state = mp_state.mp_state;
3598 if (kvm_irqchip_in_kernel()) {
3599 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
3600 }
3601 return 0;
3602}
3603
3604static int kvm_get_apic(X86CPU *cpu)
3605{
3606 DeviceState *apic = cpu->apic_state;
3607 struct kvm_lapic_state kapic;
3608 int ret;
3609
3610 if (apic && kvm_irqchip_in_kernel()) {
3611 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
3612 if (ret < 0) {
3613 return ret;
3614 }
3615
3616 kvm_get_apic_state(apic, &kapic);
3617 }
3618 return 0;
3619}
3620
3621static int kvm_put_vcpu_events(X86CPU *cpu, int level)
3622{
3623 CPUState *cs = CPU(cpu);
3624 CPUX86State *env = &cpu->env;
3625 struct kvm_vcpu_events events = {};
3626
3627 if (!kvm_has_vcpu_events()) {
3628 return 0;
3629 }
3630
3631 events.flags = 0;
3632
3633 if (has_exception_payload) {
3634 events.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
3635 events.exception.pending = env->exception_pending;
3636 events.exception_has_payload = env->exception_has_payload;
3637 events.exception_payload = env->exception_payload;
3638 }
3639 events.exception.nr = env->exception_nr;
3640 events.exception.injected = env->exception_injected;
3641 events.exception.has_error_code = env->has_error_code;
3642 events.exception.error_code = env->error_code;
3643
3644 events.interrupt.injected = (env->interrupt_injected >= 0);
3645 events.interrupt.nr = env->interrupt_injected;
3646 events.interrupt.soft = env->soft_interrupt;
3647
3648 events.nmi.injected = env->nmi_injected;
3649 events.nmi.pending = env->nmi_pending;
3650 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
3651
3652 events.sipi_vector = env->sipi_vector;
3653
3654 if (has_msr_smbase) {
3655 events.smi.smm = !!(env->hflags & HF_SMM_MASK);
3656 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
3657 if (kvm_irqchip_in_kernel()) {
3658
3659
3660
3661 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
3662 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
3663 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
3664 } else {
3665
3666 events.smi.pending = 0;
3667 events.smi.latched_init = 0;
3668 }
3669
3670
3671
3672 if (!cpu->kvm_no_smi_migration) {
3673 events.flags |= KVM_VCPUEVENT_VALID_SMM;
3674 }
3675 }
3676
3677 if (level >= KVM_PUT_RESET_STATE) {
3678 events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
3679 if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
3680 events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
3681 }
3682 }
3683
3684 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
3685}
3686
3687static int kvm_get_vcpu_events(X86CPU *cpu)
3688{
3689 CPUX86State *env = &cpu->env;
3690 struct kvm_vcpu_events events;
3691 int ret;
3692
3693 if (!kvm_has_vcpu_events()) {
3694 return 0;
3695 }
3696
3697 memset(&events, 0, sizeof(events));
3698 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
3699 if (ret < 0) {
3700 return ret;
3701 }
3702
3703 if (events.flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
3704 env->exception_pending = events.exception.pending;
3705 env->exception_has_payload = events.exception_has_payload;
3706 env->exception_payload = events.exception_payload;
3707 } else {
3708 env->exception_pending = 0;
3709 env->exception_has_payload = false;
3710 }
3711 env->exception_injected = events.exception.injected;
3712 env->exception_nr =
3713 (env->exception_pending || env->exception_injected) ?
3714 events.exception.nr : -1;
3715 env->has_error_code = events.exception.has_error_code;
3716 env->error_code = events.exception.error_code;
3717
3718 env->interrupt_injected =
3719 events.interrupt.injected ? events.interrupt.nr : -1;
3720 env->soft_interrupt = events.interrupt.soft;
3721
3722 env->nmi_injected = events.nmi.injected;
3723 env->nmi_pending = events.nmi.pending;
3724 if (events.nmi.masked) {
3725 env->hflags2 |= HF2_NMI_MASK;
3726 } else {
3727 env->hflags2 &= ~HF2_NMI_MASK;
3728 }
3729
3730 if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
3731 if (events.smi.smm) {
3732 env->hflags |= HF_SMM_MASK;
3733 } else {
3734 env->hflags &= ~HF_SMM_MASK;
3735 }
3736 if (events.smi.pending) {
3737 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
3738 } else {
3739 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
3740 }
3741 if (events.smi.smm_inside_nmi) {
3742 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
3743 } else {
3744 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
3745 }
3746 if (events.smi.latched_init) {
3747 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
3748 } else {
3749 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
3750 }
3751 }
3752
3753 env->sipi_vector = events.sipi_vector;
3754
3755 return 0;
3756}
3757
3758static int kvm_guest_debug_workarounds(X86CPU *cpu)
3759{
3760 CPUState *cs = CPU(cpu);
3761 CPUX86State *env = &cpu->env;
3762 int ret = 0;
3763 unsigned long reinject_trap = 0;
3764
3765 if (!kvm_has_vcpu_events()) {
3766 if (env->exception_nr == EXCP01_DB) {
3767 reinject_trap = KVM_GUESTDBG_INJECT_DB;
3768 } else if (env->exception_injected == EXCP03_INT3) {
3769 reinject_trap = KVM_GUESTDBG_INJECT_BP;
3770 }
3771 kvm_reset_exception(env);
3772 }
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782 if (reinject_trap ||
3783 (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
3784 ret = kvm_update_guest_debug(cs, reinject_trap);
3785 }
3786 return ret;
3787}
3788
3789static int kvm_put_debugregs(X86CPU *cpu)
3790{
3791 CPUX86State *env = &cpu->env;
3792 struct kvm_debugregs dbgregs;
3793 int i;
3794
3795 if (!kvm_has_debugregs()) {
3796 return 0;
3797 }
3798
3799 memset(&dbgregs, 0, sizeof(dbgregs));
3800 for (i = 0; i < 4; i++) {
3801 dbgregs.db[i] = env->dr[i];
3802 }
3803 dbgregs.dr6 = env->dr[6];
3804 dbgregs.dr7 = env->dr[7];
3805 dbgregs.flags = 0;
3806
3807 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
3808}
3809
3810static int kvm_get_debugregs(X86CPU *cpu)
3811{
3812 CPUX86State *env = &cpu->env;
3813 struct kvm_debugregs dbgregs;
3814 int i, ret;
3815
3816 if (!kvm_has_debugregs()) {
3817 return 0;
3818 }
3819
3820 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
3821 if (ret < 0) {
3822 return ret;
3823 }
3824 for (i = 0; i < 4; i++) {
3825 env->dr[i] = dbgregs.db[i];
3826 }
3827 env->dr[4] = env->dr[6] = dbgregs.dr6;
3828 env->dr[5] = env->dr[7] = dbgregs.dr7;
3829
3830 return 0;
3831}
3832
3833static int kvm_put_nested_state(X86CPU *cpu)
3834{
3835 CPUX86State *env = &cpu->env;
3836 int max_nested_state_len = kvm_max_nested_state_length();
3837
3838 if (!env->nested_state) {
3839 return 0;
3840 }
3841
3842 assert(env->nested_state->size <= max_nested_state_len);
3843 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state);
3844}
3845
3846static int kvm_get_nested_state(X86CPU *cpu)
3847{
3848 CPUX86State *env = &cpu->env;
3849 int max_nested_state_len = kvm_max_nested_state_length();
3850 int ret;
3851
3852 if (!env->nested_state) {
3853 return 0;
3854 }
3855
3856
3857
3858
3859
3860
3861
3862
3863 env->nested_state->size = max_nested_state_len;
3864
3865 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state);
3866 if (ret < 0) {
3867 return ret;
3868 }
3869
3870 if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) {
3871 env->hflags |= HF_GUEST_MASK;
3872 } else {
3873 env->hflags &= ~HF_GUEST_MASK;
3874 }
3875
3876 return ret;
3877}
3878
3879int kvm_arch_put_registers(CPUState *cpu, int level)
3880{
3881 X86CPU *x86_cpu = X86_CPU(cpu);
3882 int ret;
3883
3884 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
3885
3886 if (level >= KVM_PUT_RESET_STATE) {
3887 ret = kvm_put_nested_state(x86_cpu);
3888 if (ret < 0) {
3889 return ret;
3890 }
3891
3892 ret = kvm_put_msr_feature_control(x86_cpu);
3893 if (ret < 0) {
3894 return ret;
3895 }
3896 }
3897
3898 if (level == KVM_PUT_FULL_STATE) {
3899
3900
3901
3902
3903
3904 kvm_arch_set_tsc_khz(cpu);
3905 }
3906
3907 ret = kvm_getput_regs(x86_cpu, 1);
3908 if (ret < 0) {
3909 return ret;
3910 }
3911 ret = kvm_put_xsave(x86_cpu);
3912 if (ret < 0) {
3913 return ret;
3914 }
3915 ret = kvm_put_xcrs(x86_cpu);
3916 if (ret < 0) {
3917 return ret;
3918 }
3919 ret = kvm_put_sregs(x86_cpu);
3920 if (ret < 0) {
3921 return ret;
3922 }
3923
3924 ret = kvm_inject_mce_oldstyle(x86_cpu);
3925 if (ret < 0) {
3926 return ret;
3927 }
3928 ret = kvm_put_msrs(x86_cpu, level);
3929 if (ret < 0) {
3930 return ret;
3931 }
3932 ret = kvm_put_vcpu_events(x86_cpu, level);
3933 if (ret < 0) {
3934 return ret;
3935 }
3936 if (level >= KVM_PUT_RESET_STATE) {
3937 ret = kvm_put_mp_state(x86_cpu);
3938 if (ret < 0) {
3939 return ret;
3940 }
3941 }
3942
3943 ret = kvm_put_tscdeadline_msr(x86_cpu);
3944 if (ret < 0) {
3945 return ret;
3946 }
3947 ret = kvm_put_debugregs(x86_cpu);
3948 if (ret < 0) {
3949 return ret;
3950 }
3951
3952 ret = kvm_guest_debug_workarounds(x86_cpu);
3953 if (ret < 0) {
3954 return ret;
3955 }
3956 return 0;
3957}
3958
3959int kvm_arch_get_registers(CPUState *cs)
3960{
3961 X86CPU *cpu = X86_CPU(cs);
3962 int ret;
3963
3964 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
3965
3966 ret = kvm_get_vcpu_events(cpu);
3967 if (ret < 0) {
3968 goto out;
3969 }
3970
3971
3972
3973
3974 ret = kvm_get_mp_state(cpu);
3975 if (ret < 0) {
3976 goto out;
3977 }
3978 ret = kvm_getput_regs(cpu, 0);
3979 if (ret < 0) {
3980 goto out;
3981 }
3982 ret = kvm_get_xsave(cpu);
3983 if (ret < 0) {
3984 goto out;
3985 }
3986 ret = kvm_get_xcrs(cpu);
3987 if (ret < 0) {
3988 goto out;
3989 }
3990 ret = kvm_get_sregs(cpu);
3991 if (ret < 0) {
3992 goto out;
3993 }
3994 ret = kvm_get_msrs(cpu);
3995 if (ret < 0) {
3996 goto out;
3997 }
3998 ret = kvm_get_apic(cpu);
3999 if (ret < 0) {
4000 goto out;
4001 }
4002 ret = kvm_get_debugregs(cpu);
4003 if (ret < 0) {
4004 goto out;
4005 }
4006 ret = kvm_get_nested_state(cpu);
4007 if (ret < 0) {
4008 goto out;
4009 }
4010 ret = 0;
4011 out:
4012 cpu_sync_bndcs_hflags(&cpu->env);
4013 return ret;
4014}
4015
4016void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
4017{
4018 X86CPU *x86_cpu = X86_CPU(cpu);
4019 CPUX86State *env = &x86_cpu->env;
4020 int ret;
4021
4022
4023 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
4024 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
4025 qemu_mutex_lock_iothread();
4026 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
4027 qemu_mutex_unlock_iothread();
4028 DPRINTF("injected NMI\n");
4029 ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
4030 if (ret < 0) {
4031 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
4032 strerror(-ret));
4033 }
4034 }
4035 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
4036 qemu_mutex_lock_iothread();
4037 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
4038 qemu_mutex_unlock_iothread();
4039 DPRINTF("injected SMI\n");
4040 ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
4041 if (ret < 0) {
4042 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
4043 strerror(-ret));
4044 }
4045 }
4046 }
4047
4048 if (!kvm_pic_in_kernel()) {
4049 qemu_mutex_lock_iothread();
4050 }
4051
4052
4053
4054
4055
4056 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
4057 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
4058 !(env->hflags & HF_SMM_MASK)) {
4059 cpu->exit_request = 1;
4060 }
4061 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
4062 cpu->exit_request = 1;
4063 }
4064 }
4065
4066 if (!kvm_pic_in_kernel()) {
4067
4068 if (run->ready_for_interrupt_injection &&
4069 (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
4070 (env->eflags & IF_MASK)) {
4071 int irq;
4072
4073 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
4074 irq = cpu_get_pic_interrupt(env);
4075 if (irq >= 0) {
4076 struct kvm_interrupt intr;
4077
4078 intr.irq = irq;
4079 DPRINTF("injected interrupt %d\n", irq);
4080 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
4081 if (ret < 0) {
4082 fprintf(stderr,
4083 "KVM: injection failed, interrupt lost (%s)\n",
4084 strerror(-ret));
4085 }
4086 }
4087 }
4088
4089
4090
4091
4092
4093 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
4094 run->request_interrupt_window = 1;
4095 } else {
4096 run->request_interrupt_window = 0;
4097 }
4098
4099 DPRINTF("setting tpr\n");
4100 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
4101
4102 qemu_mutex_unlock_iothread();
4103 }
4104}
4105
4106MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
4107{
4108 X86CPU *x86_cpu = X86_CPU(cpu);
4109 CPUX86State *env = &x86_cpu->env;
4110
4111 if (run->flags & KVM_RUN_X86_SMM) {
4112 env->hflags |= HF_SMM_MASK;
4113 } else {
4114 env->hflags &= ~HF_SMM_MASK;
4115 }
4116 if (run->if_flag) {
4117 env->eflags |= IF_MASK;
4118 } else {
4119 env->eflags &= ~IF_MASK;
4120 }
4121
4122
4123
4124 if (!kvm_irqchip_in_kernel()) {
4125 qemu_mutex_lock_iothread();
4126 }
4127 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
4128 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
4129 if (!kvm_irqchip_in_kernel()) {
4130 qemu_mutex_unlock_iothread();
4131 }
4132 return cpu_get_mem_attrs(env);
4133}
4134
4135int kvm_arch_process_async_events(CPUState *cs)
4136{
4137 X86CPU *cpu = X86_CPU(cs);
4138 CPUX86State *env = &cpu->env;
4139
4140 if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
4141
4142 assert(env->mcg_cap);
4143
4144 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
4145
4146 kvm_cpu_synchronize_state(cs);
4147
4148 if (env->exception_nr == EXCP08_DBLE) {
4149
4150 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
4151 cs->exit_request = 1;
4152 return 0;
4153 }
4154 kvm_queue_exception(env, EXCP12_MCHK, 0, 0);
4155 env->has_error_code = 0;
4156
4157 cs->halted = 0;
4158 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
4159 env->mp_state = KVM_MP_STATE_RUNNABLE;
4160 }
4161 }
4162
4163 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
4164 !(env->hflags & HF_SMM_MASK)) {
4165 kvm_cpu_synchronize_state(cs);
4166 do_cpu_init(cpu);
4167 }
4168
4169 if (kvm_irqchip_in_kernel()) {
4170 return 0;
4171 }
4172
4173 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
4174 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
4175 apic_poll_irq(cpu->apic_state);
4176 }
4177 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
4178 (env->eflags & IF_MASK)) ||
4179 (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
4180 cs->halted = 0;
4181 }
4182 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
4183 kvm_cpu_synchronize_state(cs);
4184 do_cpu_sipi(cpu);
4185 }
4186 if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
4187 cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
4188 kvm_cpu_synchronize_state(cs);
4189 apic_handle_tpr_access_report(cpu->apic_state, env->eip,
4190 env->tpr_access_type);
4191 }
4192
4193 return cs->halted;
4194}
4195
4196static int kvm_handle_halt(X86CPU *cpu)
4197{
4198 CPUState *cs = CPU(cpu);
4199 CPUX86State *env = &cpu->env;
4200
4201 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
4202 (env->eflags & IF_MASK)) &&
4203 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
4204 cs->halted = 1;
4205 return EXCP_HLT;
4206 }
4207
4208 return 0;
4209}
4210
4211static int kvm_handle_tpr_access(X86CPU *cpu)
4212{
4213 CPUState *cs = CPU(cpu);
4214 struct kvm_run *run = cs->kvm_run;
4215
4216 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
4217 run->tpr_access.is_write ? TPR_ACCESS_WRITE
4218 : TPR_ACCESS_READ);
4219 return 1;
4220}
4221
4222int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
4223{
4224 static const uint8_t int3 = 0xcc;
4225
4226 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
4227 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
4228 return -EINVAL;
4229 }
4230 return 0;
4231}
4232
4233int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
4234{
4235 uint8_t int3;
4236
4237 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
4238 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
4239 return -EINVAL;
4240 }
4241 return 0;
4242}
4243
4244static struct {
4245 target_ulong addr;
4246 int len;
4247 int type;
4248} hw_breakpoint[4];
4249
4250static int nb_hw_breakpoint;
4251
4252static int find_hw_breakpoint(target_ulong addr, int len, int type)
4253{
4254 int n;
4255
4256 for (n = 0; n < nb_hw_breakpoint; n++) {
4257 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
4258 (hw_breakpoint[n].len == len || len == -1)) {
4259 return n;
4260 }
4261 }
4262 return -1;
4263}
4264
4265int kvm_arch_insert_hw_breakpoint(target_ulong addr,
4266 target_ulong len, int type)
4267{
4268 switch (type) {
4269 case GDB_BREAKPOINT_HW:
4270 len = 1;
4271 break;
4272 case GDB_WATCHPOINT_WRITE:
4273 case GDB_WATCHPOINT_ACCESS:
4274 switch (len) {
4275 case 1:
4276 break;
4277 case 2:
4278 case 4:
4279 case 8:
4280 if (addr & (len - 1)) {
4281 return -EINVAL;
4282 }
4283 break;
4284 default:
4285 return -EINVAL;
4286 }
4287 break;
4288 default:
4289 return -ENOSYS;
4290 }
4291
4292 if (nb_hw_breakpoint == 4) {
4293 return -ENOBUFS;
4294 }
4295 if (find_hw_breakpoint(addr, len, type) >= 0) {
4296 return -EEXIST;
4297 }
4298 hw_breakpoint[nb_hw_breakpoint].addr = addr;
4299 hw_breakpoint[nb_hw_breakpoint].len = len;
4300 hw_breakpoint[nb_hw_breakpoint].type = type;
4301 nb_hw_breakpoint++;
4302
4303 return 0;
4304}
4305
4306int kvm_arch_remove_hw_breakpoint(target_ulong addr,
4307 target_ulong len, int type)
4308{
4309 int n;
4310
4311 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
4312 if (n < 0) {
4313 return -ENOENT;
4314 }
4315 nb_hw_breakpoint--;
4316 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
4317
4318 return 0;
4319}
4320
4321void kvm_arch_remove_all_hw_breakpoints(void)
4322{
4323 nb_hw_breakpoint = 0;
4324}
4325
4326static CPUWatchpoint hw_watchpoint;
4327
4328static int kvm_handle_debug(X86CPU *cpu,
4329 struct kvm_debug_exit_arch *arch_info)
4330{
4331 CPUState *cs = CPU(cpu);
4332 CPUX86State *env = &cpu->env;
4333 int ret = 0;
4334 int n;
4335
4336 if (arch_info->exception == EXCP01_DB) {
4337 if (arch_info->dr6 & DR6_BS) {
4338 if (cs->singlestep_enabled) {
4339 ret = EXCP_DEBUG;
4340 }
4341 } else {
4342 for (n = 0; n < 4; n++) {
4343 if (arch_info->dr6 & (1 << n)) {
4344 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
4345 case 0x0:
4346 ret = EXCP_DEBUG;
4347 break;
4348 case 0x1:
4349 ret = EXCP_DEBUG;
4350 cs->watchpoint_hit = &hw_watchpoint;
4351 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
4352 hw_watchpoint.flags = BP_MEM_WRITE;
4353 break;
4354 case 0x3:
4355 ret = EXCP_DEBUG;
4356 cs->watchpoint_hit = &hw_watchpoint;
4357 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
4358 hw_watchpoint.flags = BP_MEM_ACCESS;
4359 break;
4360 }
4361 }
4362 }
4363 }
4364 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
4365 ret = EXCP_DEBUG;
4366 }
4367 if (ret == 0) {
4368 cpu_synchronize_state(cs);
4369 assert(env->exception_nr == -1);
4370
4371
4372 kvm_queue_exception(env, arch_info->exception,
4373 arch_info->exception == EXCP01_DB,
4374 arch_info->dr6);
4375 env->has_error_code = 0;
4376 }
4377
4378 return ret;
4379}
4380
4381void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
4382{
4383 const uint8_t type_code[] = {
4384 [GDB_BREAKPOINT_HW] = 0x0,
4385 [GDB_WATCHPOINT_WRITE] = 0x1,
4386 [GDB_WATCHPOINT_ACCESS] = 0x3
4387 };
4388 const uint8_t len_code[] = {
4389 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
4390 };
4391 int n;
4392
4393 if (kvm_sw_breakpoints_active(cpu)) {
4394 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
4395 }
4396 if (nb_hw_breakpoint > 0) {
4397 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
4398 dbg->arch.debugreg[7] = 0x0600;
4399 for (n = 0; n < nb_hw_breakpoint; n++) {
4400 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
4401 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
4402 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
4403 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
4404 }
4405 }
4406}
4407
4408static bool host_supports_vmx(void)
4409{
4410 uint32_t ecx, unused;
4411
4412 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
4413 return ecx & CPUID_EXT_VMX;
4414}
4415
4416#define VMX_INVALID_GUEST_STATE 0x80000021
4417
4418int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
4419{
4420 X86CPU *cpu = X86_CPU(cs);
4421 uint64_t code;
4422 int ret;
4423
4424 switch (run->exit_reason) {
4425 case KVM_EXIT_HLT:
4426 DPRINTF("handle_hlt\n");
4427 qemu_mutex_lock_iothread();
4428 ret = kvm_handle_halt(cpu);
4429 qemu_mutex_unlock_iothread();
4430 break;
4431 case KVM_EXIT_SET_TPR:
4432 ret = 0;
4433 break;
4434 case KVM_EXIT_TPR_ACCESS:
4435 qemu_mutex_lock_iothread();
4436 ret = kvm_handle_tpr_access(cpu);
4437 qemu_mutex_unlock_iothread();
4438 break;
4439 case KVM_EXIT_FAIL_ENTRY:
4440 code = run->fail_entry.hardware_entry_failure_reason;
4441 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
4442 code);
4443 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
4444 fprintf(stderr,
4445 "\nIf you're running a guest on an Intel machine without "
4446 "unrestricted mode\n"
4447 "support, the failure can be most likely due to the guest "
4448 "entering an invalid\n"
4449 "state for Intel VT. For example, the guest maybe running "
4450 "in big real mode\n"
4451 "which is not supported on less recent Intel processors."
4452 "\n\n");
4453 }
4454 ret = -1;
4455 break;
4456 case KVM_EXIT_EXCEPTION:
4457 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
4458 run->ex.exception, run->ex.error_code);
4459 ret = -1;
4460 break;
4461 case KVM_EXIT_DEBUG:
4462 DPRINTF("kvm_exit_debug\n");
4463 qemu_mutex_lock_iothread();
4464 ret = kvm_handle_debug(cpu, &run->debug.arch);
4465 qemu_mutex_unlock_iothread();
4466 break;
4467 case KVM_EXIT_HYPERV:
4468 ret = kvm_hv_handle_exit(cpu, &run->hyperv);
4469 break;
4470 case KVM_EXIT_IOAPIC_EOI:
4471 ioapic_eoi_broadcast(run->eoi.vector);
4472 ret = 0;
4473 break;
4474 default:
4475 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
4476 ret = -1;
4477 break;
4478 }
4479
4480 return ret;
4481}
4482
4483bool kvm_arch_stop_on_emulation_error(CPUState *cs)
4484{
4485 X86CPU *cpu = X86_CPU(cs);
4486 CPUX86State *env = &cpu->env;
4487
4488 kvm_cpu_synchronize_state(cs);
4489 return !(env->cr[0] & CR0_PE_MASK) ||
4490 ((env->segs[R_CS].selector & 3) != 3);
4491}
4492
4493void kvm_arch_init_irq_routing(KVMState *s)
4494{
4495 if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
4496
4497
4498
4499
4500 no_hpet = 1;
4501 }
4502
4503
4504
4505
4506 kvm_msi_via_irqfd_allowed = true;
4507 kvm_gsi_routing_allowed = true;
4508
4509 if (kvm_irqchip_is_split()) {
4510 int i;
4511
4512
4513
4514 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
4515 if (kvm_irqchip_add_msi_route(s, 0, NULL) < 0) {
4516 error_report("Could not enable split IRQ mode.");
4517 exit(1);
4518 }
4519 }
4520 }
4521}
4522
4523int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
4524{
4525 int ret;
4526 if (machine_kernel_irqchip_split(ms)) {
4527 ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
4528 if (ret) {
4529 error_report("Could not enable split irqchip mode: %s",
4530 strerror(-ret));
4531 exit(1);
4532 } else {
4533 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
4534 kvm_split_irqchip = true;
4535 return 1;
4536 }
4537 } else {
4538 return 0;
4539 }
4540}
4541
4542
4543int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
4544 uint32_t flags, uint32_t *dev_id)
4545{
4546 struct kvm_assigned_pci_dev dev_data = {
4547 .segnr = dev_addr->domain,
4548 .busnr = dev_addr->bus,
4549 .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function),
4550 .flags = flags,
4551 };
4552 int ret;
4553
4554 dev_data.assigned_dev_id =
4555 (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn;
4556
4557 ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data);
4558 if (ret < 0) {
4559 return ret;
4560 }
4561
4562 *dev_id = dev_data.assigned_dev_id;
4563
4564 return 0;
4565}
4566
4567int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id)
4568{
4569 struct kvm_assigned_pci_dev dev_data = {
4570 .assigned_dev_id = dev_id,
4571 };
4572
4573 return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data);
4574}
4575
4576static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id,
4577 uint32_t irq_type, uint32_t guest_irq)
4578{
4579 struct kvm_assigned_irq assigned_irq = {
4580 .assigned_dev_id = dev_id,
4581 .guest_irq = guest_irq,
4582 .flags = irq_type,
4583 };
4584
4585 if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) {
4586 return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq);
4587 } else {
4588 return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq);
4589 }
4590}
4591
4592int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi,
4593 uint32_t guest_irq)
4594{
4595 uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX |
4596 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX);
4597
4598 return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq);
4599}
4600
4601int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
4602{
4603 struct kvm_assigned_pci_dev dev_data = {
4604 .assigned_dev_id = dev_id,
4605 .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0,
4606 };
4607
4608 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data);
4609}
4610
4611static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id,
4612 uint32_t type)
4613{
4614 struct kvm_assigned_irq assigned_irq = {
4615 .assigned_dev_id = dev_id,
4616 .flags = type,
4617 };
4618
4619 return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq);
4620}
4621
4622int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi)
4623{
4624 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX |
4625 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX));
4626}
4627
4628int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq)
4629{
4630 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI |
4631 KVM_DEV_IRQ_GUEST_MSI, virq);
4632}
4633
4634int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id)
4635{
4636 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI |
4637 KVM_DEV_IRQ_HOST_MSI);
4638}
4639
4640bool kvm_device_msix_supported(KVMState *s)
4641{
4642
4643
4644 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT;
4645}
4646
4647int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
4648 uint32_t nr_vectors)
4649{
4650 struct kvm_assigned_msix_nr msix_nr = {
4651 .assigned_dev_id = dev_id,
4652 .entry_nr = nr_vectors,
4653 };
4654
4655 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr);
4656}
4657
4658int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
4659 int virq)
4660{
4661 struct kvm_assigned_msix_entry msix_entry = {
4662 .assigned_dev_id = dev_id,
4663 .gsi = virq,
4664 .entry = vector,
4665 };
4666
4667 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry);
4668}
4669
4670int kvm_device_msix_assign(KVMState *s, uint32_t dev_id)
4671{
4672 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX |
4673 KVM_DEV_IRQ_GUEST_MSIX, 0);
4674}
4675
4676int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
4677{
4678 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
4679 KVM_DEV_IRQ_HOST_MSIX);
4680}
4681
4682int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
4683 uint64_t address, uint32_t data, PCIDevice *dev)
4684{
4685 X86IOMMUState *iommu = x86_iommu_get_default();
4686
4687 if (iommu) {
4688 int ret;
4689 MSIMessage src, dst;
4690 X86IOMMUClass *class = X86_IOMMU_GET_CLASS(iommu);
4691
4692 if (!class->int_remap) {
4693 return 0;
4694 }
4695
4696 src.address = route->u.msi.address_hi;
4697 src.address <<= VTD_MSI_ADDR_HI_SHIFT;
4698 src.address |= route->u.msi.address_lo;
4699 src.data = route->u.msi.data;
4700
4701 ret = class->int_remap(iommu, &src, &dst, dev ? \
4702 pci_requester_id(dev) : \
4703 X86_IOMMU_SID_INVALID);
4704 if (ret) {
4705 trace_kvm_x86_fixup_msi_error(route->gsi);
4706 return 1;
4707 }
4708
4709 route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
4710 route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
4711 route->u.msi.data = dst.data;
4712 }
4713
4714 return 0;
4715}
4716
4717typedef struct MSIRouteEntry MSIRouteEntry;
4718
4719struct MSIRouteEntry {
4720 PCIDevice *dev;
4721 int vector;
4722 int virq;
4723 QLIST_ENTRY(MSIRouteEntry) list;
4724};
4725
4726
4727static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
4728 QLIST_HEAD_INITIALIZER(msi_route_list);
4729
4730static void kvm_update_msi_routes_all(void *private, bool global,
4731 uint32_t index, uint32_t mask)
4732{
4733 int cnt = 0, vector;
4734 MSIRouteEntry *entry;
4735 MSIMessage msg;
4736 PCIDevice *dev;
4737
4738
4739 QLIST_FOREACH(entry, &msi_route_list, list) {
4740 cnt++;
4741 vector = entry->vector;
4742 dev = entry->dev;
4743 if (msix_enabled(dev) && !msix_is_masked(dev, vector)) {
4744 msg = msix_get_message(dev, vector);
4745 } else if (msi_enabled(dev) && !msi_is_masked(dev, vector)) {
4746 msg = msi_get_message(dev, vector);
4747 } else {
4748
4749
4750
4751
4752 continue;
4753 }
4754 kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
4755 }
4756 kvm_irqchip_commit_routes(kvm_state);
4757 trace_kvm_x86_update_msi_routes(cnt);
4758}
4759
4760int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
4761 int vector, PCIDevice *dev)
4762{
4763 static bool notify_list_inited = false;
4764 MSIRouteEntry *entry;
4765
4766 if (!dev) {
4767
4768
4769
4770 return 0;
4771 }
4772
4773 entry = g_new0(MSIRouteEntry, 1);
4774 entry->dev = dev;
4775 entry->vector = vector;
4776 entry->virq = route->gsi;
4777 QLIST_INSERT_HEAD(&msi_route_list, entry, list);
4778
4779 trace_kvm_x86_add_msi_route(route->gsi);
4780
4781 if (!notify_list_inited) {
4782
4783
4784 X86IOMMUState *iommu = x86_iommu_get_default();
4785 if (iommu) {
4786 x86_iommu_iec_register_notifier(iommu,
4787 kvm_update_msi_routes_all,
4788 NULL);
4789 }
4790 notify_list_inited = true;
4791 }
4792 return 0;
4793}
4794
4795int kvm_arch_release_virq_post(int virq)
4796{
4797 MSIRouteEntry *entry, *next;
4798 QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
4799 if (entry->virq == virq) {
4800 trace_kvm_x86_remove_msi_route(virq);
4801 QLIST_REMOVE(entry, list);
4802 g_free(entry);
4803 break;
4804 }
4805 }
4806 return 0;
4807}
4808
4809int kvm_arch_msi_data_to_gsi(uint32_t data)
4810{
4811 abort();
4812}
4813