1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include "qemu/osdep.h"
16#include "qapi/error.h"
17#include <sys/ioctl.h>
18#include <sys/utsname.h>
19
20#include <linux/kvm.h>
21#include "standard-headers/asm-x86/kvm_para.h"
22
23#include "cpu.h"
24#include "sysemu/sysemu.h"
25#include "sysemu/hw_accel.h"
26#include "sysemu/kvm_int.h"
27#include "kvm_i386.h"
28#include "hyperv.h"
29#include "hyperv-proto.h"
30
31#include "exec/gdbstub.h"
32#include "qemu/host-utils.h"
33#include "qemu/config-file.h"
34#include "qemu/error-report.h"
35#include "hw/i386/pc.h"
36#include "hw/i386/apic.h"
37#include "hw/i386/apic_internal.h"
38#include "hw/i386/apic-msidef.h"
39#include "hw/i386/intel_iommu.h"
40#include "hw/i386/x86-iommu.h"
41
42#include "hw/pci/pci.h"
43#include "hw/pci/msi.h"
44#include "hw/pci/msix.h"
45#include "migration/blocker.h"
46#include "exec/memattrs.h"
47#include "trace.h"
48
49
50
51#ifdef DEBUG_KVM
52#define DPRINTF(fmt, ...) \
53 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
54#else
55#define DPRINTF(fmt, ...) \
56 do { } while (0)
57#endif
58
59#define MSR_KVM_WALL_CLOCK 0x11
60#define MSR_KVM_SYSTEM_TIME 0x12
61
62
63
64#define MSR_BUF_SIZE 4096
65
66const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
67 KVM_CAP_INFO(SET_TSS_ADDR),
68 KVM_CAP_INFO(EXT_CPUID),
69 KVM_CAP_INFO(MP_STATE),
70 KVM_CAP_LAST_INFO
71};
72
73static bool has_msr_star;
74static bool has_msr_hsave_pa;
75static bool has_msr_tsc_aux;
76static bool has_msr_tsc_adjust;
77static bool has_msr_tsc_deadline;
78static bool has_msr_feature_control;
79static bool has_msr_misc_enable;
80static bool has_msr_smbase;
81static bool has_msr_bndcfgs;
82static int lm_capable_kernel;
83static bool has_msr_hv_hypercall;
84static bool has_msr_hv_crash;
85static bool has_msr_hv_reset;
86static bool has_msr_hv_vpindex;
87static bool hv_vpindex_settable;
88static bool has_msr_hv_runtime;
89static bool has_msr_hv_synic;
90static bool has_msr_hv_stimer;
91static bool has_msr_hv_frequencies;
92static bool has_msr_hv_reenlightenment;
93static bool has_msr_xss;
94static bool has_msr_spec_ctrl;
95static bool has_msr_virt_ssbd;
96static bool has_msr_smi_count;
97static bool has_msr_arch_capabs;
98static bool has_msr_core_capabs;
99
100static uint32_t has_architectural_pmu_version;
101static uint32_t num_architectural_pmu_gp_counters;
102static uint32_t num_architectural_pmu_fixed_counters;
103
104static int has_xsave;
105static int has_xcrs;
106static int has_pit_state2;
107static int has_exception_payload;
108
109static bool has_msr_mcg_ext_ctl;
110
111static struct kvm_cpuid2 *cpuid_cache;
112static struct kvm_msr_list *kvm_feature_msrs;
113
114int kvm_has_pit_state2(void)
115{
116 return has_pit_state2;
117}
118
119bool kvm_has_smm(void)
120{
121 return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM);
122}
123
124bool kvm_has_adjust_clock_stable(void)
125{
126 int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
127
128 return (ret == KVM_CLOCK_TSC_STABLE);
129}
130
131bool kvm_has_exception_payload(void)
132{
133 return has_exception_payload;
134}
135
136bool kvm_allows_irq0_override(void)
137{
138 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
139}
140
141static bool kvm_x2apic_api_set_flags(uint64_t flags)
142{
143 KVMState *s = KVM_STATE(current_machine->accelerator);
144
145 return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
146}
147
148#define MEMORIZE(fn, _result) \
149 ({ \
150 static bool _memorized; \
151 \
152 if (_memorized) { \
153 return _result; \
154 } \
155 _memorized = true; \
156 _result = fn; \
157 })
158
159static bool has_x2apic_api;
160
161bool kvm_has_x2apic_api(void)
162{
163 return has_x2apic_api;
164}
165
166bool kvm_enable_x2apic(void)
167{
168 return MEMORIZE(
169 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
170 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
171 has_x2apic_api);
172}
173
174bool kvm_hv_vpindex_settable(void)
175{
176 return hv_vpindex_settable;
177}
178
179static int kvm_get_tsc(CPUState *cs)
180{
181 X86CPU *cpu = X86_CPU(cs);
182 CPUX86State *env = &cpu->env;
183 struct {
184 struct kvm_msrs info;
185 struct kvm_msr_entry entries[1];
186 } msr_data;
187 int ret;
188
189 if (env->tsc_valid) {
190 return 0;
191 }
192
193 msr_data.info.nmsrs = 1;
194 msr_data.entries[0].index = MSR_IA32_TSC;
195 env->tsc_valid = !runstate_is_running();
196
197 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
198 if (ret < 0) {
199 return ret;
200 }
201
202 assert(ret == 1);
203 env->tsc = msr_data.entries[0].data;
204 return 0;
205}
206
207static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
208{
209 kvm_get_tsc(cpu);
210}
211
212void kvm_synchronize_all_tsc(void)
213{
214 CPUState *cpu;
215
216 if (kvm_enabled()) {
217 CPU_FOREACH(cpu) {
218 run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
219 }
220 }
221}
222
223static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
224{
225 struct kvm_cpuid2 *cpuid;
226 int r, size;
227
228 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
229 cpuid = g_malloc0(size);
230 cpuid->nent = max;
231 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
232 if (r == 0 && cpuid->nent >= max) {
233 r = -E2BIG;
234 }
235 if (r < 0) {
236 if (r == -E2BIG) {
237 g_free(cpuid);
238 return NULL;
239 } else {
240 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
241 strerror(-r));
242 exit(1);
243 }
244 }
245 return cpuid;
246}
247
248
249
250
251static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
252{
253 struct kvm_cpuid2 *cpuid;
254 int max = 1;
255
256 if (cpuid_cache != NULL) {
257 return cpuid_cache;
258 }
259 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
260 max *= 2;
261 }
262 cpuid_cache = cpuid;
263 return cpuid;
264}
265
266static const struct kvm_para_features {
267 int cap;
268 int feature;
269} para_features[] = {
270 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
271 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
272 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
273 { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
274};
275
276static int get_para_features(KVMState *s)
277{
278 int i, features = 0;
279
280 for (i = 0; i < ARRAY_SIZE(para_features); i++) {
281 if (kvm_check_extension(s, para_features[i].cap)) {
282 features |= (1 << para_features[i].feature);
283 }
284 }
285
286 return features;
287}
288
289static bool host_tsx_blacklisted(void)
290{
291 int family, model, stepping;\
292 char vendor[CPUID_VENDOR_SZ + 1];
293
294 host_vendor_fms(vendor, &family, &model, &stepping);
295
296
297 return !strcmp(vendor, CPUID_VENDOR_INTEL) &&
298 (family == 6) &&
299 ((model == 63 && stepping < 4) ||
300 model == 60 || model == 69 || model == 70);
301}
302
303
304
305static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
306{
307 uint32_t ret = 0;
308 switch (reg) {
309 case R_EAX:
310 ret = entry->eax;
311 break;
312 case R_EBX:
313 ret = entry->ebx;
314 break;
315 case R_ECX:
316 ret = entry->ecx;
317 break;
318 case R_EDX:
319 ret = entry->edx;
320 break;
321 }
322 return ret;
323}
324
325
326
327static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
328 uint32_t function,
329 uint32_t index)
330{
331 int i;
332 for (i = 0; i < cpuid->nent; ++i) {
333 if (cpuid->entries[i].function == function &&
334 cpuid->entries[i].index == index) {
335 return &cpuid->entries[i];
336 }
337 }
338
339 return NULL;
340}
341
342uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
343 uint32_t index, int reg)
344{
345 struct kvm_cpuid2 *cpuid;
346 uint32_t ret = 0;
347 uint32_t cpuid_1_edx;
348 bool found = false;
349
350 cpuid = get_supported_cpuid(s);
351
352 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
353 if (entry) {
354 found = true;
355 ret = cpuid_entry_get_reg(entry, reg);
356 }
357
358
359
360 if (function == 1 && reg == R_EDX) {
361
362 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
363 } else if (function == 1 && reg == R_ECX) {
364
365
366
367 ret |= CPUID_EXT_HYPERVISOR;
368
369
370
371
372 if (kvm_irqchip_in_kernel() &&
373 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
374 ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
375 }
376
377
378
379
380 if (!kvm_irqchip_in_kernel()) {
381 ret &= ~CPUID_EXT_X2APIC;
382 }
383
384 if (enable_cpu_pm) {
385 int disable_exits = kvm_check_extension(s,
386 KVM_CAP_X86_DISABLE_EXITS);
387
388 if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) {
389 ret |= CPUID_EXT_MONITOR;
390 }
391 }
392 } else if (function == 6 && reg == R_EAX) {
393 ret |= CPUID_6_EAX_ARAT;
394 } else if (function == 7 && index == 0 && reg == R_EBX) {
395 if (host_tsx_blacklisted()) {
396 ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
397 }
398 } else if (function == 7 && index == 0 && reg == R_EDX) {
399
400
401
402
403
404 if (!has_msr_arch_capabs) {
405 ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES;
406 }
407 } else if (function == 0x80000001 && reg == R_ECX) {
408
409
410
411
412
413 ret |= CPUID_EXT3_TOPOEXT;
414 } else if (function == 0x80000001 && reg == R_EDX) {
415
416
417
418 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
419 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
420 } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
421
422
423
424 if (!kvm_irqchip_in_kernel()) {
425 ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
426 }
427 } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) {
428 ret |= 1U << KVM_HINTS_REALTIME;
429 found = 1;
430 }
431
432
433 if ((function == KVM_CPUID_FEATURES) && !found) {
434 ret = get_para_features(s);
435 }
436
437 return ret;
438}
439
440uint32_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index)
441{
442 struct {
443 struct kvm_msrs info;
444 struct kvm_msr_entry entries[1];
445 } msr_data;
446 uint32_t ret;
447
448 if (kvm_feature_msrs == NULL) {
449 return 0;
450 }
451
452
453 int i;
454 for (i = 0; i < kvm_feature_msrs->nmsrs; i++)
455 if (kvm_feature_msrs->indices[i] == index) {
456 break;
457 }
458 if (i == kvm_feature_msrs->nmsrs) {
459 return 0;
460 }
461
462 msr_data.info.nmsrs = 1;
463 msr_data.entries[0].index = index;
464
465 ret = kvm_ioctl(s, KVM_GET_MSRS, &msr_data);
466 if (ret != 1) {
467 error_report("KVM get MSR (index=0x%x) feature failed, %s",
468 index, strerror(-ret));
469 exit(1);
470 }
471
472 return msr_data.entries[0].data;
473}
474
475
476typedef struct HWPoisonPage {
477 ram_addr_t ram_addr;
478 QLIST_ENTRY(HWPoisonPage) list;
479} HWPoisonPage;
480
481static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
482 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
483
484static void kvm_unpoison_all(void *param)
485{
486 HWPoisonPage *page, *next_page;
487
488 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
489 QLIST_REMOVE(page, list);
490 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
491 g_free(page);
492 }
493}
494
495static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
496{
497 HWPoisonPage *page;
498
499 QLIST_FOREACH(page, &hwpoison_page_list, list) {
500 if (page->ram_addr == ram_addr) {
501 return;
502 }
503 }
504 page = g_new(HWPoisonPage, 1);
505 page->ram_addr = ram_addr;
506 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
507}
508
509static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
510 int *max_banks)
511{
512 int r;
513
514 r = kvm_check_extension(s, KVM_CAP_MCE);
515 if (r > 0) {
516 *max_banks = r;
517 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
518 }
519 return -ENOSYS;
520}
521
522static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
523{
524 CPUState *cs = CPU(cpu);
525 CPUX86State *env = &cpu->env;
526 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
527 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
528 uint64_t mcg_status = MCG_STATUS_MCIP;
529 int flags = 0;
530
531 if (code == BUS_MCEERR_AR) {
532 status |= MCI_STATUS_AR | 0x134;
533 mcg_status |= MCG_STATUS_EIPV;
534 } else {
535 status |= 0xc0;
536 mcg_status |= MCG_STATUS_RIPV;
537 }
538
539 flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
540
541
542
543 cpu_synchronize_state(cs);
544 if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
545 mcg_status |= MCG_STATUS_LMCE;
546 flags = 0;
547 }
548
549 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
550 (MCM_ADDR_PHYS << 6) | 0xc, flags);
551}
552
553static void hardware_memory_error(void)
554{
555 fprintf(stderr, "Hardware memory error!\n");
556 exit(1);
557}
558
559void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
560{
561 X86CPU *cpu = X86_CPU(c);
562 CPUX86State *env = &cpu->env;
563 ram_addr_t ram_addr;
564 hwaddr paddr;
565
566
567
568
569
570
571 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
572
573 if ((env->mcg_cap & MCG_SER_P) && addr) {
574 ram_addr = qemu_ram_addr_from_host(addr);
575 if (ram_addr != RAM_ADDR_INVALID &&
576 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
577 kvm_hwpoison_page_add(ram_addr);
578 kvm_mce_inject(cpu, paddr, code);
579 return;
580 }
581
582 fprintf(stderr, "Hardware memory error for memory used by "
583 "QEMU itself instead of guest system!\n");
584 }
585
586 if (code == BUS_MCEERR_AR) {
587 hardware_memory_error();
588 }
589
590
591}
592
593static void kvm_reset_exception(CPUX86State *env)
594{
595 env->exception_nr = -1;
596 env->exception_pending = 0;
597 env->exception_injected = 0;
598 env->exception_has_payload = false;
599 env->exception_payload = 0;
600}
601
602static void kvm_queue_exception(CPUX86State *env,
603 int32_t exception_nr,
604 uint8_t exception_has_payload,
605 uint64_t exception_payload)
606{
607 assert(env->exception_nr == -1);
608 assert(!env->exception_pending);
609 assert(!env->exception_injected);
610 assert(!env->exception_has_payload);
611
612 env->exception_nr = exception_nr;
613
614 if (has_exception_payload) {
615 env->exception_pending = 1;
616
617 env->exception_has_payload = exception_has_payload;
618 env->exception_payload = exception_payload;
619 } else {
620 env->exception_injected = 1;
621
622 if (exception_nr == EXCP01_DB) {
623 assert(exception_has_payload);
624 env->dr[6] = exception_payload;
625 } else if (exception_nr == EXCP0E_PAGE) {
626 assert(exception_has_payload);
627 env->cr[2] = exception_payload;
628 } else {
629 assert(!exception_has_payload);
630 }
631 }
632}
633
634static int kvm_inject_mce_oldstyle(X86CPU *cpu)
635{
636 CPUX86State *env = &cpu->env;
637
638 if (!kvm_has_vcpu_events() && env->exception_nr == EXCP12_MCHK) {
639 unsigned int bank, bank_num = env->mcg_cap & 0xff;
640 struct kvm_x86_mce mce;
641
642 kvm_reset_exception(env);
643
644
645
646
647
648 for (bank = 0; bank < bank_num; bank++) {
649 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
650 break;
651 }
652 }
653 assert(bank < bank_num);
654
655 mce.bank = bank;
656 mce.status = env->mce_banks[bank * 4 + 1];
657 mce.mcg_status = env->mcg_status;
658 mce.addr = env->mce_banks[bank * 4 + 2];
659 mce.misc = env->mce_banks[bank * 4 + 3];
660
661 return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
662 }
663 return 0;
664}
665
666static void cpu_update_state(void *opaque, int running, RunState state)
667{
668 CPUX86State *env = opaque;
669
670 if (running) {
671 env->tsc_valid = false;
672 }
673}
674
675unsigned long kvm_arch_vcpu_id(CPUState *cs)
676{
677 X86CPU *cpu = X86_CPU(cs);
678 return cpu->apic_id;
679}
680
681#ifndef KVM_CPUID_SIGNATURE_NEXT
682#define KVM_CPUID_SIGNATURE_NEXT 0x40000100
683#endif
684
685static bool hyperv_enabled(X86CPU *cpu)
686{
687 CPUState *cs = CPU(cpu);
688 return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
689 ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY) ||
690 cpu->hyperv_features || cpu->hyperv_passthrough);
691}
692
693static int kvm_arch_set_tsc_khz(CPUState *cs)
694{
695 X86CPU *cpu = X86_CPU(cs);
696 CPUX86State *env = &cpu->env;
697 int r;
698
699 if (!env->tsc_khz) {
700 return 0;
701 }
702
703 r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ?
704 kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
705 -ENOTSUP;
706 if (r < 0) {
707
708
709
710 int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
711 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
712 -ENOTSUP;
713 if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
714 warn_report("TSC frequency mismatch between "
715 "VM (%" PRId64 " kHz) and host (%d kHz), "
716 "and TSC scaling unavailable",
717 env->tsc_khz, cur_freq);
718 return r;
719 }
720 }
721
722 return 0;
723}
724
725static bool tsc_is_stable_and_known(CPUX86State *env)
726{
727 if (!env->tsc_khz) {
728 return false;
729 }
730 return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
731 || env->user_tsc_khz;
732}
733
734static struct {
735 const char *desc;
736 struct {
737 uint32_t fw;
738 uint32_t bits;
739 } flags[2];
740 uint64_t dependencies;
741} kvm_hyperv_properties[] = {
742 [HYPERV_FEAT_RELAXED] = {
743 .desc = "relaxed timing (hv-relaxed)",
744 .flags = {
745 {.fw = FEAT_HYPERV_EAX,
746 .bits = HV_HYPERCALL_AVAILABLE},
747 {.fw = FEAT_HV_RECOMM_EAX,
748 .bits = HV_RELAXED_TIMING_RECOMMENDED}
749 }
750 },
751 [HYPERV_FEAT_VAPIC] = {
752 .desc = "virtual APIC (hv-vapic)",
753 .flags = {
754 {.fw = FEAT_HYPERV_EAX,
755 .bits = HV_HYPERCALL_AVAILABLE | HV_APIC_ACCESS_AVAILABLE},
756 {.fw = FEAT_HV_RECOMM_EAX,
757 .bits = HV_APIC_ACCESS_RECOMMENDED}
758 }
759 },
760 [HYPERV_FEAT_TIME] = {
761 .desc = "clocksources (hv-time)",
762 .flags = {
763 {.fw = FEAT_HYPERV_EAX,
764 .bits = HV_HYPERCALL_AVAILABLE | HV_TIME_REF_COUNT_AVAILABLE |
765 HV_REFERENCE_TSC_AVAILABLE}
766 }
767 },
768 [HYPERV_FEAT_CRASH] = {
769 .desc = "crash MSRs (hv-crash)",
770 .flags = {
771 {.fw = FEAT_HYPERV_EDX,
772 .bits = HV_GUEST_CRASH_MSR_AVAILABLE}
773 }
774 },
775 [HYPERV_FEAT_RESET] = {
776 .desc = "reset MSR (hv-reset)",
777 .flags = {
778 {.fw = FEAT_HYPERV_EAX,
779 .bits = HV_RESET_AVAILABLE}
780 }
781 },
782 [HYPERV_FEAT_VPINDEX] = {
783 .desc = "VP_INDEX MSR (hv-vpindex)",
784 .flags = {
785 {.fw = FEAT_HYPERV_EAX,
786 .bits = HV_VP_INDEX_AVAILABLE}
787 }
788 },
789 [HYPERV_FEAT_RUNTIME] = {
790 .desc = "VP_RUNTIME MSR (hv-runtime)",
791 .flags = {
792 {.fw = FEAT_HYPERV_EAX,
793 .bits = HV_VP_RUNTIME_AVAILABLE}
794 }
795 },
796 [HYPERV_FEAT_SYNIC] = {
797 .desc = "synthetic interrupt controller (hv-synic)",
798 .flags = {
799 {.fw = FEAT_HYPERV_EAX,
800 .bits = HV_SYNIC_AVAILABLE}
801 }
802 },
803 [HYPERV_FEAT_STIMER] = {
804 .desc = "synthetic timers (hv-stimer)",
805 .flags = {
806 {.fw = FEAT_HYPERV_EAX,
807 .bits = HV_SYNTIMERS_AVAILABLE}
808 },
809 .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME)
810 },
811 [HYPERV_FEAT_FREQUENCIES] = {
812 .desc = "frequency MSRs (hv-frequencies)",
813 .flags = {
814 {.fw = FEAT_HYPERV_EAX,
815 .bits = HV_ACCESS_FREQUENCY_MSRS},
816 {.fw = FEAT_HYPERV_EDX,
817 .bits = HV_FREQUENCY_MSRS_AVAILABLE}
818 }
819 },
820 [HYPERV_FEAT_REENLIGHTENMENT] = {
821 .desc = "reenlightenment MSRs (hv-reenlightenment)",
822 .flags = {
823 {.fw = FEAT_HYPERV_EAX,
824 .bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL}
825 }
826 },
827 [HYPERV_FEAT_TLBFLUSH] = {
828 .desc = "paravirtualized TLB flush (hv-tlbflush)",
829 .flags = {
830 {.fw = FEAT_HV_RECOMM_EAX,
831 .bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED |
832 HV_EX_PROCESSOR_MASKS_RECOMMENDED}
833 },
834 .dependencies = BIT(HYPERV_FEAT_VPINDEX)
835 },
836 [HYPERV_FEAT_EVMCS] = {
837 .desc = "enlightened VMCS (hv-evmcs)",
838 .flags = {
839 {.fw = FEAT_HV_RECOMM_EAX,
840 .bits = HV_ENLIGHTENED_VMCS_RECOMMENDED}
841 },
842 .dependencies = BIT(HYPERV_FEAT_VAPIC)
843 },
844 [HYPERV_FEAT_IPI] = {
845 .desc = "paravirtualized IPI (hv-ipi)",
846 .flags = {
847 {.fw = FEAT_HV_RECOMM_EAX,
848 .bits = HV_CLUSTER_IPI_RECOMMENDED |
849 HV_EX_PROCESSOR_MASKS_RECOMMENDED}
850 },
851 .dependencies = BIT(HYPERV_FEAT_VPINDEX)
852 },
853 [HYPERV_FEAT_STIMER_DIRECT] = {
854 .desc = "direct mode synthetic timers (hv-stimer-direct)",
855 .flags = {
856 {.fw = FEAT_HYPERV_EDX,
857 .bits = HV_STIMER_DIRECT_MODE_AVAILABLE}
858 },
859 .dependencies = BIT(HYPERV_FEAT_STIMER)
860 },
861};
862
863static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max)
864{
865 struct kvm_cpuid2 *cpuid;
866 int r, size;
867
868 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
869 cpuid = g_malloc0(size);
870 cpuid->nent = max;
871
872 r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
873 if (r == 0 && cpuid->nent >= max) {
874 r = -E2BIG;
875 }
876 if (r < 0) {
877 if (r == -E2BIG) {
878 g_free(cpuid);
879 return NULL;
880 } else {
881 fprintf(stderr, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n",
882 strerror(-r));
883 exit(1);
884 }
885 }
886 return cpuid;
887}
888
889
890
891
892
893static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs)
894{
895 struct kvm_cpuid2 *cpuid;
896 int max = 7;
897
898
899
900
901
902
903 while ((cpuid = try_get_hv_cpuid(cs, max)) == NULL) {
904 max++;
905 }
906 return cpuid;
907}
908
909
910
911
912
913static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs)
914{
915 X86CPU *cpu = X86_CPU(cs);
916 struct kvm_cpuid2 *cpuid;
917 struct kvm_cpuid_entry2 *entry_feat, *entry_recomm;
918
919
920 cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries));
921 cpuid->nent = 2;
922
923
924 entry_feat = &cpuid->entries[0];
925 entry_feat->function = HV_CPUID_FEATURES;
926
927 entry_recomm = &cpuid->entries[1];
928 entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO;
929 entry_recomm->ebx = cpu->hyperv_spinlock_attempts;
930
931 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) {
932 entry_feat->eax |= HV_HYPERCALL_AVAILABLE;
933 entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE;
934 entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
935 entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED;
936 entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED;
937 }
938
939 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
940 entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE;
941 entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE;
942 }
943
944 if (has_msr_hv_frequencies) {
945 entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS;
946 entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE;
947 }
948
949 if (has_msr_hv_crash) {
950 entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE;
951 }
952
953 if (has_msr_hv_reenlightenment) {
954 entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL;
955 }
956
957 if (has_msr_hv_reset) {
958 entry_feat->eax |= HV_RESET_AVAILABLE;
959 }
960
961 if (has_msr_hv_vpindex) {
962 entry_feat->eax |= HV_VP_INDEX_AVAILABLE;
963 }
964
965 if (has_msr_hv_runtime) {
966 entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE;
967 }
968
969 if (has_msr_hv_synic) {
970 unsigned int cap = cpu->hyperv_synic_kvm_only ?
971 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
972
973 if (kvm_check_extension(cs->kvm_state, cap) > 0) {
974 entry_feat->eax |= HV_SYNIC_AVAILABLE;
975 }
976 }
977
978 if (has_msr_hv_stimer) {
979 entry_feat->eax |= HV_SYNTIMERS_AVAILABLE;
980 }
981
982 if (kvm_check_extension(cs->kvm_state,
983 KVM_CAP_HYPERV_TLBFLUSH) > 0) {
984 entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED;
985 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
986 }
987
988 if (kvm_check_extension(cs->kvm_state,
989 KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
990 entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
991 }
992
993 if (kvm_check_extension(cs->kvm_state,
994 KVM_CAP_HYPERV_SEND_IPI) > 0) {
995 entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED;
996 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
997 }
998
999 return cpuid;
1000}
1001
1002static int hv_cpuid_get_fw(struct kvm_cpuid2 *cpuid, int fw, uint32_t *r)
1003{
1004 struct kvm_cpuid_entry2 *entry;
1005 uint32_t func;
1006 int reg;
1007
1008 switch (fw) {
1009 case FEAT_HYPERV_EAX:
1010 reg = R_EAX;
1011 func = HV_CPUID_FEATURES;
1012 break;
1013 case FEAT_HYPERV_EDX:
1014 reg = R_EDX;
1015 func = HV_CPUID_FEATURES;
1016 break;
1017 case FEAT_HV_RECOMM_EAX:
1018 reg = R_EAX;
1019 func = HV_CPUID_ENLIGHTMENT_INFO;
1020 break;
1021 default:
1022 return -EINVAL;
1023 }
1024
1025 entry = cpuid_find_entry(cpuid, func, 0);
1026 if (!entry) {
1027 return -ENOENT;
1028 }
1029
1030 switch (reg) {
1031 case R_EAX:
1032 *r = entry->eax;
1033 break;
1034 case R_EDX:
1035 *r = entry->edx;
1036 break;
1037 default:
1038 return -EINVAL;
1039 }
1040
1041 return 0;
1042}
1043
1044static int hv_cpuid_check_and_set(CPUState *cs, struct kvm_cpuid2 *cpuid,
1045 int feature)
1046{
1047 X86CPU *cpu = X86_CPU(cs);
1048 CPUX86State *env = &cpu->env;
1049 uint32_t r, fw, bits;
1050 uint64_t deps;
1051 int i, dep_feat;
1052
1053 if (!hyperv_feat_enabled(cpu, feature) && !cpu->hyperv_passthrough) {
1054 return 0;
1055 }
1056
1057 deps = kvm_hyperv_properties[feature].dependencies;
1058 while (deps) {
1059 dep_feat = ctz64(deps);
1060 if (!(hyperv_feat_enabled(cpu, dep_feat))) {
1061 fprintf(stderr,
1062 "Hyper-V %s requires Hyper-V %s\n",
1063 kvm_hyperv_properties[feature].desc,
1064 kvm_hyperv_properties[dep_feat].desc);
1065 return 1;
1066 }
1067 deps &= ~(1ull << dep_feat);
1068 }
1069
1070 for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) {
1071 fw = kvm_hyperv_properties[feature].flags[i].fw;
1072 bits = kvm_hyperv_properties[feature].flags[i].bits;
1073
1074 if (!fw) {
1075 continue;
1076 }
1077
1078 if (hv_cpuid_get_fw(cpuid, fw, &r) || (r & bits) != bits) {
1079 if (hyperv_feat_enabled(cpu, feature)) {
1080 fprintf(stderr,
1081 "Hyper-V %s is not supported by kernel\n",
1082 kvm_hyperv_properties[feature].desc);
1083 return 1;
1084 } else {
1085 return 0;
1086 }
1087 }
1088
1089 env->features[fw] |= bits;
1090 }
1091
1092 if (cpu->hyperv_passthrough) {
1093 cpu->hyperv_features |= BIT(feature);
1094 }
1095
1096 return 0;
1097}
1098
1099
1100
1101
1102
1103
1104static int hyperv_handle_properties(CPUState *cs,
1105 struct kvm_cpuid_entry2 *cpuid_ent)
1106{
1107 X86CPU *cpu = X86_CPU(cs);
1108 CPUX86State *env = &cpu->env;
1109 struct kvm_cpuid2 *cpuid;
1110 struct kvm_cpuid_entry2 *c;
1111 uint32_t signature[3];
1112 uint32_t cpuid_i = 0;
1113 int r;
1114
1115 if (!hyperv_enabled(cpu))
1116 return 0;
1117
1118 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ||
1119 cpu->hyperv_passthrough) {
1120 uint16_t evmcs_version;
1121
1122 r = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0,
1123 (uintptr_t)&evmcs_version);
1124
1125 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) && r) {
1126 fprintf(stderr, "Hyper-V %s is not supported by kernel\n",
1127 kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc);
1128 return -ENOSYS;
1129 }
1130
1131 if (!r) {
1132 env->features[FEAT_HV_RECOMM_EAX] |=
1133 HV_ENLIGHTENED_VMCS_RECOMMENDED;
1134 env->features[FEAT_HV_NESTED_EAX] = evmcs_version;
1135 }
1136 }
1137
1138 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_CPUID) > 0) {
1139 cpuid = get_supported_hv_cpuid(cs);
1140 } else {
1141 cpuid = get_supported_hv_cpuid_legacy(cs);
1142 }
1143
1144 if (cpu->hyperv_passthrough) {
1145 memcpy(cpuid_ent, &cpuid->entries[0],
1146 cpuid->nent * sizeof(cpuid->entries[0]));
1147
1148 c = cpuid_find_entry(cpuid, HV_CPUID_FEATURES, 0);
1149 if (c) {
1150 env->features[FEAT_HYPERV_EAX] = c->eax;
1151 env->features[FEAT_HYPERV_EBX] = c->ebx;
1152 env->features[FEAT_HYPERV_EDX] = c->eax;
1153 }
1154 c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0);
1155 if (c) {
1156 env->features[FEAT_HV_RECOMM_EAX] = c->eax;
1157
1158
1159 if (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY) {
1160 c->ebx = cpu->hyperv_spinlock_attempts;
1161 }
1162 }
1163 c = cpuid_find_entry(cpuid, HV_CPUID_NESTED_FEATURES, 0);
1164 if (c) {
1165 env->features[FEAT_HV_NESTED_EAX] = c->eax;
1166 }
1167 }
1168
1169
1170 r = hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RELAXED);
1171 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VAPIC);
1172 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_TIME);
1173 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_CRASH);
1174 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RESET);
1175 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VPINDEX);
1176 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RUNTIME);
1177 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_SYNIC);
1178 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_STIMER);
1179 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_FREQUENCIES);
1180 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_REENLIGHTENMENT);
1181 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_TLBFLUSH);
1182 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_EVMCS);
1183 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_IPI);
1184 r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_STIMER_DIRECT);
1185
1186
1187 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
1188 !cpu->hyperv_synic_kvm_only &&
1189 !hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) {
1190 fprintf(stderr, "Hyper-V %s requires Hyper-V %s\n",
1191 kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc,
1192 kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc);
1193 r |= 1;
1194 }
1195
1196
1197 env->features[FEAT_HYPERV_EDX] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1198
1199 if (r) {
1200 r = -ENOSYS;
1201 goto free;
1202 }
1203
1204 if (cpu->hyperv_passthrough) {
1205
1206 r = cpuid->nent;
1207 goto free;
1208 }
1209
1210 c = &cpuid_ent[cpuid_i++];
1211 c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
1212 if (!cpu->hyperv_vendor_id) {
1213 memcpy(signature, "Microsoft Hv", 12);
1214 } else {
1215 size_t len = strlen(cpu->hyperv_vendor_id);
1216
1217 if (len > 12) {
1218 error_report("hv-vendor-id truncated to 12 characters");
1219 len = 12;
1220 }
1221 memset(signature, 0, 12);
1222 memcpy(signature, cpu->hyperv_vendor_id, len);
1223 }
1224 c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ?
1225 HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS;
1226 c->ebx = signature[0];
1227 c->ecx = signature[1];
1228 c->edx = signature[2];
1229
1230 c = &cpuid_ent[cpuid_i++];
1231 c->function = HV_CPUID_INTERFACE;
1232 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
1233 c->eax = signature[0];
1234 c->ebx = 0;
1235 c->ecx = 0;
1236 c->edx = 0;
1237
1238 c = &cpuid_ent[cpuid_i++];
1239 c->function = HV_CPUID_VERSION;
1240 c->eax = 0x00001bbc;
1241 c->ebx = 0x00060001;
1242
1243 c = &cpuid_ent[cpuid_i++];
1244 c->function = HV_CPUID_FEATURES;
1245 c->eax = env->features[FEAT_HYPERV_EAX];
1246 c->ebx = env->features[FEAT_HYPERV_EBX];
1247 c->edx = env->features[FEAT_HYPERV_EDX];
1248
1249 c = &cpuid_ent[cpuid_i++];
1250 c->function = HV_CPUID_ENLIGHTMENT_INFO;
1251 c->eax = env->features[FEAT_HV_RECOMM_EAX];
1252 c->ebx = cpu->hyperv_spinlock_attempts;
1253
1254 c = &cpuid_ent[cpuid_i++];
1255 c->function = HV_CPUID_IMPLEMENT_LIMITS;
1256 c->eax = cpu->hv_max_vps;
1257 c->ebx = 0x40;
1258
1259 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
1260 __u32 function;
1261
1262
1263 for (function = HV_CPUID_IMPLEMENT_LIMITS + 1;
1264 function < HV_CPUID_NESTED_FEATURES; function++) {
1265 c = &cpuid_ent[cpuid_i++];
1266 c->function = function;
1267 }
1268
1269 c = &cpuid_ent[cpuid_i++];
1270 c->function = HV_CPUID_NESTED_FEATURES;
1271 c->eax = env->features[FEAT_HV_NESTED_EAX];
1272 }
1273 r = cpuid_i;
1274
1275free:
1276 g_free(cpuid);
1277
1278 return r;
1279}
1280
1281static Error *hv_passthrough_mig_blocker;
1282
1283static int hyperv_init_vcpu(X86CPU *cpu)
1284{
1285 CPUState *cs = CPU(cpu);
1286 Error *local_err = NULL;
1287 int ret;
1288
1289 if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) {
1290 error_setg(&hv_passthrough_mig_blocker,
1291 "'hv-passthrough' CPU flag prevents migration, use explicit"
1292 " set of hv-* flags instead");
1293 ret = migrate_add_blocker(hv_passthrough_mig_blocker, &local_err);
1294 if (local_err) {
1295 error_report_err(local_err);
1296 error_free(hv_passthrough_mig_blocker);
1297 return ret;
1298 }
1299 }
1300
1301 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) {
1302
1303
1304
1305
1306 struct {
1307 struct kvm_msrs info;
1308 struct kvm_msr_entry entries[1];
1309 } msr_data = {
1310 .info.nmsrs = 1,
1311 .entries[0].index = HV_X64_MSR_VP_INDEX,
1312 };
1313
1314 ret = kvm_vcpu_ioctl(cs, KVM_GET_MSRS, &msr_data);
1315 if (ret < 0) {
1316 return ret;
1317 }
1318 assert(ret == 1);
1319
1320 if (msr_data.entries[0].data != hyperv_vp_index(CPU(cpu))) {
1321 error_report("kernel's vp_index != QEMU's vp_index");
1322 return -ENXIO;
1323 }
1324 }
1325
1326 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
1327 uint32_t synic_cap = cpu->hyperv_synic_kvm_only ?
1328 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
1329 ret = kvm_vcpu_enable_cap(cs, synic_cap, 0);
1330 if (ret < 0) {
1331 error_report("failed to turn on HyperV SynIC in KVM: %s",
1332 strerror(-ret));
1333 return ret;
1334 }
1335
1336 if (!cpu->hyperv_synic_kvm_only) {
1337 ret = hyperv_x86_synic_add(cpu);
1338 if (ret < 0) {
1339 error_report("failed to create HyperV SynIC: %s",
1340 strerror(-ret));
1341 return ret;
1342 }
1343 }
1344 }
1345
1346 return 0;
1347}
1348
1349static Error *invtsc_mig_blocker;
1350
1351#define KVM_MAX_CPUID_ENTRIES 100
1352
1353int kvm_arch_init_vcpu(CPUState *cs)
1354{
1355 struct {
1356 struct kvm_cpuid2 cpuid;
1357 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
1358 } cpuid_data;
1359
1360
1361
1362
1363 QEMU_BUILD_BUG_ON(sizeof(cpuid_data) !=
1364 sizeof(struct kvm_cpuid2) +
1365 sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES);
1366
1367 X86CPU *cpu = X86_CPU(cs);
1368 CPUX86State *env = &cpu->env;
1369 uint32_t limit, i, j, cpuid_i;
1370 uint32_t unused;
1371 struct kvm_cpuid_entry2 *c;
1372 uint32_t signature[3];
1373 int kvm_base = KVM_CPUID_SIGNATURE;
1374 int max_nested_state_len;
1375 int r;
1376 Error *local_err = NULL;
1377
1378 memset(&cpuid_data, 0, sizeof(cpuid_data));
1379
1380 cpuid_i = 0;
1381
1382 r = kvm_arch_set_tsc_khz(cs);
1383 if (r < 0) {
1384 return r;
1385 }
1386
1387
1388
1389
1390
1391
1392 if (!env->tsc_khz) {
1393 r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
1394 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
1395 -ENOTSUP;
1396 if (r > 0) {
1397 env->tsc_khz = r;
1398 }
1399 }
1400
1401
1402 r = hyperv_handle_properties(cs, cpuid_data.entries);
1403 if (r < 0) {
1404 return r;
1405 } else if (r > 0) {
1406 cpuid_i = r;
1407 kvm_base = KVM_CPUID_SIGNATURE_NEXT;
1408 has_msr_hv_hypercall = true;
1409 }
1410
1411 if (cpu->expose_kvm) {
1412 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
1413 c = &cpuid_data.entries[cpuid_i++];
1414 c->function = KVM_CPUID_SIGNATURE | kvm_base;
1415 c->eax = KVM_CPUID_FEATURES | kvm_base;
1416 c->ebx = signature[0];
1417 c->ecx = signature[1];
1418 c->edx = signature[2];
1419
1420 c = &cpuid_data.entries[cpuid_i++];
1421 c->function = KVM_CPUID_FEATURES | kvm_base;
1422 c->eax = env->features[FEAT_KVM];
1423 c->edx = env->features[FEAT_KVM_HINTS];
1424 }
1425
1426 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
1427
1428 for (i = 0; i <= limit; i++) {
1429 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1430 fprintf(stderr, "unsupported level value: 0x%x\n", limit);
1431 abort();
1432 }
1433 c = &cpuid_data.entries[cpuid_i++];
1434
1435 switch (i) {
1436 case 2: {
1437
1438 int times;
1439
1440 c->function = i;
1441 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
1442 KVM_CPUID_FLAG_STATE_READ_NEXT;
1443 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1444 times = c->eax & 0xff;
1445
1446 for (j = 1; j < times; ++j) {
1447 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1448 fprintf(stderr, "cpuid_data is full, no space for "
1449 "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
1450 abort();
1451 }
1452 c = &cpuid_data.entries[cpuid_i++];
1453 c->function = i;
1454 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
1455 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1456 }
1457 break;
1458 }
1459 case 0x1f:
1460 if (env->nr_dies < 2) {
1461 break;
1462 }
1463 case 4:
1464 case 0xb:
1465 case 0xd:
1466 for (j = 0; ; j++) {
1467 if (i == 0xd && j == 64) {
1468 break;
1469 }
1470
1471 if (i == 0x1f && j == 64) {
1472 break;
1473 }
1474
1475 c->function = i;
1476 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1477 c->index = j;
1478 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1479
1480 if (i == 4 && c->eax == 0) {
1481 break;
1482 }
1483 if (i == 0xb && !(c->ecx & 0xff00)) {
1484 break;
1485 }
1486 if (i == 0x1f && !(c->ecx & 0xff00)) {
1487 break;
1488 }
1489 if (i == 0xd && c->eax == 0) {
1490 continue;
1491 }
1492 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1493 fprintf(stderr, "cpuid_data is full, no space for "
1494 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1495 abort();
1496 }
1497 c = &cpuid_data.entries[cpuid_i++];
1498 }
1499 break;
1500 case 0x14: {
1501 uint32_t times;
1502
1503 c->function = i;
1504 c->index = 0;
1505 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1506 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1507 times = c->eax;
1508
1509 for (j = 1; j <= times; ++j) {
1510 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1511 fprintf(stderr, "cpuid_data is full, no space for "
1512 "cpuid(eax:0x14,ecx:0x%x)\n", j);
1513 abort();
1514 }
1515 c = &cpuid_data.entries[cpuid_i++];
1516 c->function = i;
1517 c->index = j;
1518 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1519 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1520 }
1521 break;
1522 }
1523 default:
1524 c->function = i;
1525 c->flags = 0;
1526 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1527 break;
1528 }
1529 }
1530
1531 if (limit >= 0x0a) {
1532 uint32_t eax, edx;
1533
1534 cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
1535
1536 has_architectural_pmu_version = eax & 0xff;
1537 if (has_architectural_pmu_version > 0) {
1538 num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
1539
1540
1541
1542
1543
1544 if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
1545 num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
1546 }
1547
1548 if (has_architectural_pmu_version > 1) {
1549 num_architectural_pmu_fixed_counters = edx & 0x1f;
1550
1551 if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
1552 num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
1553 }
1554 }
1555 }
1556 }
1557
1558 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
1559
1560 for (i = 0x80000000; i <= limit; i++) {
1561 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1562 fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
1563 abort();
1564 }
1565 c = &cpuid_data.entries[cpuid_i++];
1566
1567 switch (i) {
1568 case 0x8000001d:
1569
1570 for (j = 0; ; j++) {
1571 c->function = i;
1572 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1573 c->index = j;
1574 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1575
1576 if (c->eax == 0) {
1577 break;
1578 }
1579 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1580 fprintf(stderr, "cpuid_data is full, no space for "
1581 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1582 abort();
1583 }
1584 c = &cpuid_data.entries[cpuid_i++];
1585 }
1586 break;
1587 default:
1588 c->function = i;
1589 c->flags = 0;
1590 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1591 break;
1592 }
1593 }
1594
1595
1596 if (env->cpuid_xlevel2 > 0) {
1597 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
1598
1599 for (i = 0xC0000000; i <= limit; i++) {
1600 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1601 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
1602 abort();
1603 }
1604 c = &cpuid_data.entries[cpuid_i++];
1605
1606 c->function = i;
1607 c->flags = 0;
1608 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1609 }
1610 }
1611
1612 cpuid_data.cpuid.nent = cpuid_i;
1613
1614 if (((env->cpuid_version >> 8)&0xF) >= 6
1615 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
1616 (CPUID_MCE | CPUID_MCA)
1617 && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
1618 uint64_t mcg_cap, unsupported_caps;
1619 int banks;
1620 int ret;
1621
1622 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
1623 if (ret < 0) {
1624 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
1625 return ret;
1626 }
1627
1628 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
1629 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
1630 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
1631 return -ENOTSUP;
1632 }
1633
1634 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
1635 if (unsupported_caps) {
1636 if (unsupported_caps & MCG_LMCE_P) {
1637 error_report("kvm: LMCE not supported");
1638 return -ENOTSUP;
1639 }
1640 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64,
1641 unsupported_caps);
1642 }
1643
1644 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
1645 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
1646 if (ret < 0) {
1647 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
1648 return ret;
1649 }
1650 }
1651
1652 qemu_add_vm_change_state_handler(cpu_update_state, env);
1653
1654 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
1655 if (c) {
1656 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
1657 !!(c->ecx & CPUID_EXT_SMX);
1658 }
1659
1660 if (env->mcg_cap & MCG_LMCE_P) {
1661 has_msr_mcg_ext_ctl = has_msr_feature_control = true;
1662 }
1663
1664 if (!env->user_tsc_khz) {
1665 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
1666 invtsc_mig_blocker == NULL) {
1667 error_setg(&invtsc_mig_blocker,
1668 "State blocked by non-migratable CPU device"
1669 " (invtsc flag)");
1670 r = migrate_add_blocker(invtsc_mig_blocker, &local_err);
1671 if (local_err) {
1672 error_report_err(local_err);
1673 error_free(invtsc_mig_blocker);
1674 return r;
1675 }
1676 }
1677 }
1678
1679 if (cpu->vmware_cpuid_freq
1680
1681
1682 && cpu->expose_kvm
1683 && kvm_base == KVM_CPUID_SIGNATURE
1684
1685 && tsc_is_stable_and_known(env)) {
1686
1687 c = &cpuid_data.entries[cpuid_i++];
1688 c->function = KVM_CPUID_SIGNATURE | 0x10;
1689 c->eax = env->tsc_khz;
1690
1691
1692 c->ebx = 1000000;
1693 c->ecx = c->edx = 0;
1694
1695 c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
1696 c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10);
1697 }
1698
1699 cpuid_data.cpuid.nent = cpuid_i;
1700
1701 cpuid_data.cpuid.padding = 0;
1702 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
1703 if (r) {
1704 goto fail;
1705 }
1706
1707 if (has_xsave) {
1708 env->xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
1709 }
1710
1711 max_nested_state_len = kvm_max_nested_state_length();
1712 if (max_nested_state_len > 0) {
1713 assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
1714
1715 if (cpu_has_vmx(env)) {
1716 struct kvm_vmx_nested_state_hdr *vmx_hdr;
1717
1718 env->nested_state = g_malloc0(max_nested_state_len);
1719 env->nested_state->size = max_nested_state_len;
1720 env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
1721
1722 vmx_hdr = &env->nested_state->hdr.vmx;
1723 vmx_hdr->vmxon_pa = -1ull;
1724 vmx_hdr->vmcs12_pa = -1ull;
1725 }
1726 }
1727
1728 cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
1729
1730 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
1731 has_msr_tsc_aux = false;
1732 }
1733
1734 r = hyperv_init_vcpu(cpu);
1735 if (r) {
1736 goto fail;
1737 }
1738
1739 return 0;
1740
1741 fail:
1742 migrate_del_blocker(invtsc_mig_blocker);
1743
1744 return r;
1745}
1746
1747int kvm_arch_destroy_vcpu(CPUState *cs)
1748{
1749 X86CPU *cpu = X86_CPU(cs);
1750 CPUX86State *env = &cpu->env;
1751
1752 if (cpu->kvm_msr_buf) {
1753 g_free(cpu->kvm_msr_buf);
1754 cpu->kvm_msr_buf = NULL;
1755 }
1756
1757 if (env->nested_state) {
1758 g_free(env->nested_state);
1759 env->nested_state = NULL;
1760 }
1761
1762 return 0;
1763}
1764
1765void kvm_arch_reset_vcpu(X86CPU *cpu)
1766{
1767 CPUX86State *env = &cpu->env;
1768
1769 env->xcr0 = 1;
1770 if (kvm_irqchip_in_kernel()) {
1771 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
1772 KVM_MP_STATE_UNINITIALIZED;
1773 } else {
1774 env->mp_state = KVM_MP_STATE_RUNNABLE;
1775 }
1776
1777 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
1778 int i;
1779 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
1780 env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
1781 }
1782
1783 hyperv_x86_synic_reset(cpu);
1784 }
1785}
1786
1787void kvm_arch_do_init_vcpu(X86CPU *cpu)
1788{
1789 CPUX86State *env = &cpu->env;
1790
1791
1792 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
1793 env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
1794 }
1795}
1796
1797static int kvm_get_supported_feature_msrs(KVMState *s)
1798{
1799 int ret = 0;
1800
1801 if (kvm_feature_msrs != NULL) {
1802 return 0;
1803 }
1804
1805 if (!kvm_check_extension(s, KVM_CAP_GET_MSR_FEATURES)) {
1806 return 0;
1807 }
1808
1809 struct kvm_msr_list msr_list;
1810
1811 msr_list.nmsrs = 0;
1812 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, &msr_list);
1813 if (ret < 0 && ret != -E2BIG) {
1814 error_report("Fetch KVM feature MSR list failed: %s",
1815 strerror(-ret));
1816 return ret;
1817 }
1818
1819 assert(msr_list.nmsrs > 0);
1820 kvm_feature_msrs = (struct kvm_msr_list *) \
1821 g_malloc0(sizeof(msr_list) +
1822 msr_list.nmsrs * sizeof(msr_list.indices[0]));
1823
1824 kvm_feature_msrs->nmsrs = msr_list.nmsrs;
1825 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, kvm_feature_msrs);
1826
1827 if (ret < 0) {
1828 error_report("Fetch KVM feature MSR list failed: %s",
1829 strerror(-ret));
1830 g_free(kvm_feature_msrs);
1831 kvm_feature_msrs = NULL;
1832 return ret;
1833 }
1834
1835 return 0;
1836}
1837
1838static int kvm_get_supported_msrs(KVMState *s)
1839{
1840 static int kvm_supported_msrs;
1841 int ret = 0;
1842
1843
1844 if (kvm_supported_msrs == 0) {
1845 struct kvm_msr_list msr_list, *kvm_msr_list;
1846
1847 kvm_supported_msrs = -1;
1848
1849
1850
1851 msr_list.nmsrs = 0;
1852 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
1853 if (ret < 0 && ret != -E2BIG) {
1854 return ret;
1855 }
1856
1857
1858 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
1859 msr_list.nmsrs *
1860 sizeof(msr_list.indices[0])));
1861
1862 kvm_msr_list->nmsrs = msr_list.nmsrs;
1863 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
1864 if (ret >= 0) {
1865 int i;
1866
1867 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
1868 switch (kvm_msr_list->indices[i]) {
1869 case MSR_STAR:
1870 has_msr_star = true;
1871 break;
1872 case MSR_VM_HSAVE_PA:
1873 has_msr_hsave_pa = true;
1874 break;
1875 case MSR_TSC_AUX:
1876 has_msr_tsc_aux = true;
1877 break;
1878 case MSR_TSC_ADJUST:
1879 has_msr_tsc_adjust = true;
1880 break;
1881 case MSR_IA32_TSCDEADLINE:
1882 has_msr_tsc_deadline = true;
1883 break;
1884 case MSR_IA32_SMBASE:
1885 has_msr_smbase = true;
1886 break;
1887 case MSR_SMI_COUNT:
1888 has_msr_smi_count = true;
1889 break;
1890 case MSR_IA32_MISC_ENABLE:
1891 has_msr_misc_enable = true;
1892 break;
1893 case MSR_IA32_BNDCFGS:
1894 has_msr_bndcfgs = true;
1895 break;
1896 case MSR_IA32_XSS:
1897 has_msr_xss = true;
1898 break;
1899 case HV_X64_MSR_CRASH_CTL:
1900 has_msr_hv_crash = true;
1901 break;
1902 case HV_X64_MSR_RESET:
1903 has_msr_hv_reset = true;
1904 break;
1905 case HV_X64_MSR_VP_INDEX:
1906 has_msr_hv_vpindex = true;
1907 break;
1908 case HV_X64_MSR_VP_RUNTIME:
1909 has_msr_hv_runtime = true;
1910 break;
1911 case HV_X64_MSR_SCONTROL:
1912 has_msr_hv_synic = true;
1913 break;
1914 case HV_X64_MSR_STIMER0_CONFIG:
1915 has_msr_hv_stimer = true;
1916 break;
1917 case HV_X64_MSR_TSC_FREQUENCY:
1918 has_msr_hv_frequencies = true;
1919 break;
1920 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1921 has_msr_hv_reenlightenment = true;
1922 break;
1923 case MSR_IA32_SPEC_CTRL:
1924 has_msr_spec_ctrl = true;
1925 break;
1926 case MSR_VIRT_SSBD:
1927 has_msr_virt_ssbd = true;
1928 break;
1929 case MSR_IA32_ARCH_CAPABILITIES:
1930 has_msr_arch_capabs = true;
1931 break;
1932 case MSR_IA32_CORE_CAPABILITY:
1933 has_msr_core_capabs = true;
1934 break;
1935 }
1936 }
1937 }
1938
1939 g_free(kvm_msr_list);
1940 }
1941
1942 return ret;
1943}
1944
1945static Notifier smram_machine_done;
1946static KVMMemoryListener smram_listener;
1947static AddressSpace smram_address_space;
1948static MemoryRegion smram_as_root;
1949static MemoryRegion smram_as_mem;
1950
1951static void register_smram_listener(Notifier *n, void *unused)
1952{
1953 MemoryRegion *smram =
1954 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
1955
1956
1957 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
1958 memory_region_set_enabled(&smram_as_root, true);
1959
1960
1961
1962
1963 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
1964 get_system_memory(), 0, ~0ull);
1965 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
1966 memory_region_set_enabled(&smram_as_mem, true);
1967
1968 if (smram) {
1969
1970 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
1971 memory_region_set_enabled(smram, true);
1972 }
1973
1974 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
1975 kvm_memory_listener_register(kvm_state, &smram_listener,
1976 &smram_address_space, 1);
1977}
1978
1979int kvm_arch_init(MachineState *ms, KVMState *s)
1980{
1981 uint64_t identity_base = 0xfffbc000;
1982 uint64_t shadow_mem;
1983 int ret;
1984 struct utsname utsname;
1985
1986 has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
1987 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
1988 has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
1989
1990 hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
1991
1992 has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD);
1993 if (has_exception_payload) {
1994 ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true);
1995 if (ret < 0) {
1996 error_report("kvm: Failed to enable exception payload cap: %s",
1997 strerror(-ret));
1998 return ret;
1999 }
2000 }
2001
2002 ret = kvm_get_supported_msrs(s);
2003 if (ret < 0) {
2004 return ret;
2005 }
2006
2007 kvm_get_supported_feature_msrs(s);
2008
2009 uname(&utsname);
2010 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
2024
2025 identity_base = 0xfeffc000;
2026
2027 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
2028 if (ret < 0) {
2029 return ret;
2030 }
2031 }
2032
2033
2034 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
2035 if (ret < 0) {
2036 return ret;
2037 }
2038
2039
2040 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
2041 if (ret < 0) {
2042 fprintf(stderr, "e820_add_entry() table is full\n");
2043 return ret;
2044 }
2045 qemu_register_reset(kvm_unpoison_all, NULL);
2046
2047 shadow_mem = machine_kvm_shadow_mem(ms);
2048 if (shadow_mem != -1) {
2049 shadow_mem /= 4096;
2050 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
2051 if (ret < 0) {
2052 return ret;
2053 }
2054 }
2055
2056 if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
2057 object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE) &&
2058 pc_machine_is_smm_enabled(PC_MACHINE(ms))) {
2059 smram_machine_done.notify = register_smram_listener;
2060 qemu_add_machine_init_done_notifier(&smram_machine_done);
2061 }
2062
2063 if (enable_cpu_pm) {
2064 int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
2065 int ret;
2066
2067
2068#if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
2069#define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
2070#endif
2071 if (disable_exits) {
2072 disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT |
2073 KVM_X86_DISABLE_EXITS_HLT |
2074 KVM_X86_DISABLE_EXITS_PAUSE);
2075 }
2076
2077 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0,
2078 disable_exits);
2079 if (ret < 0) {
2080 error_report("kvm: guest stopping CPU not supported: %s",
2081 strerror(-ret));
2082 }
2083 }
2084
2085 return 0;
2086}
2087
2088static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
2089{
2090 lhs->selector = rhs->selector;
2091 lhs->base = rhs->base;
2092 lhs->limit = rhs->limit;
2093 lhs->type = 3;
2094 lhs->present = 1;
2095 lhs->dpl = 3;
2096 lhs->db = 0;
2097 lhs->s = 1;
2098 lhs->l = 0;
2099 lhs->g = 0;
2100 lhs->avl = 0;
2101 lhs->unusable = 0;
2102}
2103
2104static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
2105{
2106 unsigned flags = rhs->flags;
2107 lhs->selector = rhs->selector;
2108 lhs->base = rhs->base;
2109 lhs->limit = rhs->limit;
2110 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
2111 lhs->present = (flags & DESC_P_MASK) != 0;
2112 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
2113 lhs->db = (flags >> DESC_B_SHIFT) & 1;
2114 lhs->s = (flags & DESC_S_MASK) != 0;
2115 lhs->l = (flags >> DESC_L_SHIFT) & 1;
2116 lhs->g = (flags & DESC_G_MASK) != 0;
2117 lhs->avl = (flags & DESC_AVL_MASK) != 0;
2118 lhs->unusable = !lhs->present;
2119 lhs->padding = 0;
2120}
2121
2122static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
2123{
2124 lhs->selector = rhs->selector;
2125 lhs->base = rhs->base;
2126 lhs->limit = rhs->limit;
2127 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
2128 ((rhs->present && !rhs->unusable) * DESC_P_MASK) |
2129 (rhs->dpl << DESC_DPL_SHIFT) |
2130 (rhs->db << DESC_B_SHIFT) |
2131 (rhs->s * DESC_S_MASK) |
2132 (rhs->l << DESC_L_SHIFT) |
2133 (rhs->g * DESC_G_MASK) |
2134 (rhs->avl * DESC_AVL_MASK);
2135}
2136
2137static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
2138{
2139 if (set) {
2140 *kvm_reg = *qemu_reg;
2141 } else {
2142 *qemu_reg = *kvm_reg;
2143 }
2144}
2145
2146static int kvm_getput_regs(X86CPU *cpu, int set)
2147{
2148 CPUX86State *env = &cpu->env;
2149 struct kvm_regs regs;
2150 int ret = 0;
2151
2152 if (!set) {
2153 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, ®s);
2154 if (ret < 0) {
2155 return ret;
2156 }
2157 }
2158
2159 kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
2160 kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
2161 kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
2162 kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
2163 kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
2164 kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
2165 kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
2166 kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
2167#ifdef TARGET_X86_64
2168 kvm_getput_reg(®s.r8, &env->regs[8], set);
2169 kvm_getput_reg(®s.r9, &env->regs[9], set);
2170 kvm_getput_reg(®s.r10, &env->regs[10], set);
2171 kvm_getput_reg(®s.r11, &env->regs[11], set);
2172 kvm_getput_reg(®s.r12, &env->regs[12], set);
2173 kvm_getput_reg(®s.r13, &env->regs[13], set);
2174 kvm_getput_reg(®s.r14, &env->regs[14], set);
2175 kvm_getput_reg(®s.r15, &env->regs[15], set);
2176#endif
2177
2178 kvm_getput_reg(®s.rflags, &env->eflags, set);
2179 kvm_getput_reg(®s.rip, &env->eip, set);
2180
2181 if (set) {
2182 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, ®s);
2183 }
2184
2185 return ret;
2186}
2187
2188static int kvm_put_fpu(X86CPU *cpu)
2189{
2190 CPUX86State *env = &cpu->env;
2191 struct kvm_fpu fpu;
2192 int i;
2193
2194 memset(&fpu, 0, sizeof fpu);
2195 fpu.fsw = env->fpus & ~(7 << 11);
2196 fpu.fsw |= (env->fpstt & 7) << 11;
2197 fpu.fcw = env->fpuc;
2198 fpu.last_opcode = env->fpop;
2199 fpu.last_ip = env->fpip;
2200 fpu.last_dp = env->fpdp;
2201 for (i = 0; i < 8; ++i) {
2202 fpu.ftwx |= (!env->fptags[i]) << i;
2203 }
2204 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
2205 for (i = 0; i < CPU_NB_REGS; i++) {
2206 stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
2207 stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
2208 }
2209 fpu.mxcsr = env->mxcsr;
2210
2211 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
2212}
2213
2214#define XSAVE_FCW_FSW 0
2215#define XSAVE_FTW_FOP 1
2216#define XSAVE_CWD_RIP 2
2217#define XSAVE_CWD_RDP 4
2218#define XSAVE_MXCSR 6
2219#define XSAVE_ST_SPACE 8
2220#define XSAVE_XMM_SPACE 40
2221#define XSAVE_XSTATE_BV 128
2222#define XSAVE_YMMH_SPACE 144
2223#define XSAVE_BNDREGS 240
2224#define XSAVE_BNDCSR 256
2225#define XSAVE_OPMASK 272
2226#define XSAVE_ZMM_Hi256 288
2227#define XSAVE_Hi16_ZMM 416
2228#define XSAVE_PKRU 672
2229
2230#define XSAVE_BYTE_OFFSET(word_offset) \
2231 ((word_offset) * sizeof_field(struct kvm_xsave, region[0]))
2232
2233#define ASSERT_OFFSET(word_offset, field) \
2234 QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
2235 offsetof(X86XSaveArea, field))
2236
2237ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw);
2238ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw);
2239ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip);
2240ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp);
2241ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr);
2242ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs);
2243ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs);
2244ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv);
2245ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state);
2246ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state);
2247ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state);
2248ASSERT_OFFSET(XSAVE_OPMASK, opmask_state);
2249ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state);
2250ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state);
2251ASSERT_OFFSET(XSAVE_PKRU, pkru_state);
2252
2253static int kvm_put_xsave(X86CPU *cpu)
2254{
2255 CPUX86State *env = &cpu->env;
2256 X86XSaveArea *xsave = env->xsave_buf;
2257
2258 if (!has_xsave) {
2259 return kvm_put_fpu(cpu);
2260 }
2261 x86_cpu_xsave_all_areas(cpu, xsave);
2262
2263 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
2264}
2265
2266static int kvm_put_xcrs(X86CPU *cpu)
2267{
2268 CPUX86State *env = &cpu->env;
2269 struct kvm_xcrs xcrs = {};
2270
2271 if (!has_xcrs) {
2272 return 0;
2273 }
2274
2275 xcrs.nr_xcrs = 1;
2276 xcrs.flags = 0;
2277 xcrs.xcrs[0].xcr = 0;
2278 xcrs.xcrs[0].value = env->xcr0;
2279 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
2280}
2281
2282static int kvm_put_sregs(X86CPU *cpu)
2283{
2284 CPUX86State *env = &cpu->env;
2285 struct kvm_sregs sregs;
2286
2287 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
2288 if (env->interrupt_injected >= 0) {
2289 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
2290 (uint64_t)1 << (env->interrupt_injected % 64);
2291 }
2292
2293 if ((env->eflags & VM_MASK)) {
2294 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
2295 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
2296 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
2297 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
2298 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
2299 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
2300 } else {
2301 set_seg(&sregs.cs, &env->segs[R_CS]);
2302 set_seg(&sregs.ds, &env->segs[R_DS]);
2303 set_seg(&sregs.es, &env->segs[R_ES]);
2304 set_seg(&sregs.fs, &env->segs[R_FS]);
2305 set_seg(&sregs.gs, &env->segs[R_GS]);
2306 set_seg(&sregs.ss, &env->segs[R_SS]);
2307 }
2308
2309 set_seg(&sregs.tr, &env->tr);
2310 set_seg(&sregs.ldt, &env->ldt);
2311
2312 sregs.idt.limit = env->idt.limit;
2313 sregs.idt.base = env->idt.base;
2314 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
2315 sregs.gdt.limit = env->gdt.limit;
2316 sregs.gdt.base = env->gdt.base;
2317 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
2318
2319 sregs.cr0 = env->cr[0];
2320 sregs.cr2 = env->cr[2];
2321 sregs.cr3 = env->cr[3];
2322 sregs.cr4 = env->cr[4];
2323
2324 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
2325 sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
2326
2327 sregs.efer = env->efer;
2328
2329 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
2330}
2331
2332static void kvm_msr_buf_reset(X86CPU *cpu)
2333{
2334 memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
2335}
2336
2337static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
2338{
2339 struct kvm_msrs *msrs = cpu->kvm_msr_buf;
2340 void *limit = ((void *)msrs) + MSR_BUF_SIZE;
2341 struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
2342
2343 assert((void *)(entry + 1) <= limit);
2344
2345 entry->index = index;
2346 entry->reserved = 0;
2347 entry->data = value;
2348 msrs->nmsrs++;
2349}
2350
2351static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
2352{
2353 kvm_msr_buf_reset(cpu);
2354 kvm_msr_entry_add(cpu, index, value);
2355
2356 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
2357}
2358
2359void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
2360{
2361 int ret;
2362
2363 ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
2364 assert(ret == 1);
2365}
2366
2367static int kvm_put_tscdeadline_msr(X86CPU *cpu)
2368{
2369 CPUX86State *env = &cpu->env;
2370 int ret;
2371
2372 if (!has_msr_tsc_deadline) {
2373 return 0;
2374 }
2375
2376 ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
2377 if (ret < 0) {
2378 return ret;
2379 }
2380
2381 assert(ret == 1);
2382 return 0;
2383}
2384
2385
2386
2387
2388
2389
2390
2391static int kvm_put_msr_feature_control(X86CPU *cpu)
2392{
2393 int ret;
2394
2395 if (!has_msr_feature_control) {
2396 return 0;
2397 }
2398
2399 ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
2400 cpu->env.msr_ia32_feature_control);
2401 if (ret < 0) {
2402 return ret;
2403 }
2404
2405 assert(ret == 1);
2406 return 0;
2407}
2408
2409static int kvm_put_msrs(X86CPU *cpu, int level)
2410{
2411 CPUX86State *env = &cpu->env;
2412 int i;
2413 int ret;
2414
2415 kvm_msr_buf_reset(cpu);
2416
2417 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
2418 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
2419 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
2420 kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
2421 if (has_msr_star) {
2422 kvm_msr_entry_add(cpu, MSR_STAR, env->star);
2423 }
2424 if (has_msr_hsave_pa) {
2425 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
2426 }
2427 if (has_msr_tsc_aux) {
2428 kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
2429 }
2430 if (has_msr_tsc_adjust) {
2431 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
2432 }
2433 if (has_msr_misc_enable) {
2434 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
2435 env->msr_ia32_misc_enable);
2436 }
2437 if (has_msr_smbase) {
2438 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
2439 }
2440 if (has_msr_smi_count) {
2441 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count);
2442 }
2443 if (has_msr_bndcfgs) {
2444 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
2445 }
2446 if (has_msr_xss) {
2447 kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
2448 }
2449 if (has_msr_spec_ctrl) {
2450 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl);
2451 }
2452 if (has_msr_virt_ssbd) {
2453 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd);
2454 }
2455
2456#ifdef TARGET_X86_64
2457 if (lm_capable_kernel) {
2458 kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
2459 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
2460 kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
2461 kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
2462 }
2463#endif
2464
2465
2466 if (has_msr_arch_capabs) {
2467 kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
2468 env->features[FEAT_ARCH_CAPABILITIES]);
2469 }
2470
2471 if (has_msr_core_capabs) {
2472 kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
2473 env->features[FEAT_CORE_CAPABILITY]);
2474 }
2475
2476
2477
2478
2479
2480 if (level >= KVM_PUT_RESET_STATE) {
2481 kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
2482 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
2483 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
2484 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
2485 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
2486 }
2487 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
2488 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
2489 }
2490 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
2491 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
2492 }
2493 if (has_architectural_pmu_version > 0) {
2494 if (has_architectural_pmu_version > 1) {
2495
2496 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
2497 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
2498 }
2499
2500
2501 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
2502 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
2503 env->msr_fixed_counters[i]);
2504 }
2505 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
2506 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
2507 env->msr_gp_counters[i]);
2508 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
2509 env->msr_gp_evtsel[i]);
2510 }
2511 if (has_architectural_pmu_version > 1) {
2512 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
2513 env->msr_global_status);
2514 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
2515 env->msr_global_ovf_ctrl);
2516
2517
2518 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
2519 env->msr_fixed_ctr_ctrl);
2520 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
2521 env->msr_global_ctrl);
2522 }
2523 }
2524
2525
2526
2527
2528 if (current_cpu == first_cpu) {
2529 if (has_msr_hv_hypercall) {
2530 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
2531 env->msr_hv_guest_os_id);
2532 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
2533 env->msr_hv_hypercall);
2534 }
2535 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
2536 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
2537 env->msr_hv_tsc);
2538 }
2539 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
2540 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL,
2541 env->msr_hv_reenlightenment_control);
2542 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL,
2543 env->msr_hv_tsc_emulation_control);
2544 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS,
2545 env->msr_hv_tsc_emulation_status);
2546 }
2547 }
2548 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
2549 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
2550 env->msr_hv_vapic);
2551 }
2552 if (has_msr_hv_crash) {
2553 int j;
2554
2555 for (j = 0; j < HV_CRASH_PARAMS; j++)
2556 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
2557 env->msr_hv_crash_params[j]);
2558
2559 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY);
2560 }
2561 if (has_msr_hv_runtime) {
2562 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
2563 }
2564 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)
2565 && hv_vpindex_settable) {
2566 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX,
2567 hyperv_vp_index(CPU(cpu)));
2568 }
2569 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
2570 int j;
2571
2572 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION);
2573
2574 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
2575 env->msr_hv_synic_control);
2576 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
2577 env->msr_hv_synic_evt_page);
2578 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
2579 env->msr_hv_synic_msg_page);
2580
2581 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
2582 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
2583 env->msr_hv_synic_sint[j]);
2584 }
2585 }
2586 if (has_msr_hv_stimer) {
2587 int j;
2588
2589 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
2590 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
2591 env->msr_hv_stimer_config[j]);
2592 }
2593
2594 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
2595 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
2596 env->msr_hv_stimer_count[j]);
2597 }
2598 }
2599 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
2600 uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
2601
2602 kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
2603 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
2604 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
2605 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
2606 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
2607 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
2608 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
2609 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
2610 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
2611 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
2612 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
2613 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
2614 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
2615
2616
2617
2618 uint64_t mask = env->mtrr_var[i].mask;
2619 mask &= phys_mask;
2620
2621 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
2622 env->mtrr_var[i].base);
2623 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
2624 }
2625 }
2626 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
2627 int addr_num = kvm_arch_get_supported_cpuid(kvm_state,
2628 0x14, 1, R_EAX) & 0x7;
2629
2630 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL,
2631 env->msr_rtit_ctrl);
2632 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS,
2633 env->msr_rtit_status);
2634 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE,
2635 env->msr_rtit_output_base);
2636 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK,
2637 env->msr_rtit_output_mask);
2638 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH,
2639 env->msr_rtit_cr3_match);
2640 for (i = 0; i < addr_num; i++) {
2641 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i,
2642 env->msr_rtit_addrs[i]);
2643 }
2644 }
2645
2646
2647
2648 }
2649 if (env->mcg_cap) {
2650 int i;
2651
2652 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
2653 kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
2654 if (has_msr_mcg_ext_ctl) {
2655 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
2656 }
2657 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
2658 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
2659 }
2660 }
2661
2662 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
2663 if (ret < 0) {
2664 return ret;
2665 }
2666
2667 if (ret < cpu->kvm_msr_buf->nmsrs) {
2668 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
2669 error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64,
2670 (uint32_t)e->index, (uint64_t)e->data);
2671 }
2672
2673 assert(ret == cpu->kvm_msr_buf->nmsrs);
2674 return 0;
2675}
2676
2677
2678static int kvm_get_fpu(X86CPU *cpu)
2679{
2680 CPUX86State *env = &cpu->env;
2681 struct kvm_fpu fpu;
2682 int i, ret;
2683
2684 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
2685 if (ret < 0) {
2686 return ret;
2687 }
2688
2689 env->fpstt = (fpu.fsw >> 11) & 7;
2690 env->fpus = fpu.fsw;
2691 env->fpuc = fpu.fcw;
2692 env->fpop = fpu.last_opcode;
2693 env->fpip = fpu.last_ip;
2694 env->fpdp = fpu.last_dp;
2695 for (i = 0; i < 8; ++i) {
2696 env->fptags[i] = !((fpu.ftwx >> i) & 1);
2697 }
2698 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
2699 for (i = 0; i < CPU_NB_REGS; i++) {
2700 env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
2701 env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
2702 }
2703 env->mxcsr = fpu.mxcsr;
2704
2705 return 0;
2706}
2707
2708static int kvm_get_xsave(X86CPU *cpu)
2709{
2710 CPUX86State *env = &cpu->env;
2711 X86XSaveArea *xsave = env->xsave_buf;
2712 int ret;
2713
2714 if (!has_xsave) {
2715 return kvm_get_fpu(cpu);
2716 }
2717
2718 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
2719 if (ret < 0) {
2720 return ret;
2721 }
2722 x86_cpu_xrstor_all_areas(cpu, xsave);
2723
2724 return 0;
2725}
2726
2727static int kvm_get_xcrs(X86CPU *cpu)
2728{
2729 CPUX86State *env = &cpu->env;
2730 int i, ret;
2731 struct kvm_xcrs xcrs;
2732
2733 if (!has_xcrs) {
2734 return 0;
2735 }
2736
2737 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
2738 if (ret < 0) {
2739 return ret;
2740 }
2741
2742 for (i = 0; i < xcrs.nr_xcrs; i++) {
2743
2744 if (xcrs.xcrs[i].xcr == 0) {
2745 env->xcr0 = xcrs.xcrs[i].value;
2746 break;
2747 }
2748 }
2749 return 0;
2750}
2751
2752static int kvm_get_sregs(X86CPU *cpu)
2753{
2754 CPUX86State *env = &cpu->env;
2755 struct kvm_sregs sregs;
2756 int bit, i, ret;
2757
2758 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
2759 if (ret < 0) {
2760 return ret;
2761 }
2762
2763
2764
2765 env->interrupt_injected = -1;
2766 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
2767 if (sregs.interrupt_bitmap[i]) {
2768 bit = ctz64(sregs.interrupt_bitmap[i]);
2769 env->interrupt_injected = i * 64 + bit;
2770 break;
2771 }
2772 }
2773
2774 get_seg(&env->segs[R_CS], &sregs.cs);
2775 get_seg(&env->segs[R_DS], &sregs.ds);
2776 get_seg(&env->segs[R_ES], &sregs.es);
2777 get_seg(&env->segs[R_FS], &sregs.fs);
2778 get_seg(&env->segs[R_GS], &sregs.gs);
2779 get_seg(&env->segs[R_SS], &sregs.ss);
2780
2781 get_seg(&env->tr, &sregs.tr);
2782 get_seg(&env->ldt, &sregs.ldt);
2783
2784 env->idt.limit = sregs.idt.limit;
2785 env->idt.base = sregs.idt.base;
2786 env->gdt.limit = sregs.gdt.limit;
2787 env->gdt.base = sregs.gdt.base;
2788
2789 env->cr[0] = sregs.cr0;
2790 env->cr[2] = sregs.cr2;
2791 env->cr[3] = sregs.cr3;
2792 env->cr[4] = sregs.cr4;
2793
2794 env->efer = sregs.efer;
2795
2796
2797 x86_update_hflags(env);
2798
2799 return 0;
2800}
2801
2802static int kvm_get_msrs(X86CPU *cpu)
2803{
2804 CPUX86State *env = &cpu->env;
2805 struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
2806 int ret, i;
2807 uint64_t mtrr_top_bits;
2808
2809 kvm_msr_buf_reset(cpu);
2810
2811 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
2812 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
2813 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
2814 kvm_msr_entry_add(cpu, MSR_PAT, 0);
2815 if (has_msr_star) {
2816 kvm_msr_entry_add(cpu, MSR_STAR, 0);
2817 }
2818 if (has_msr_hsave_pa) {
2819 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
2820 }
2821 if (has_msr_tsc_aux) {
2822 kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
2823 }
2824 if (has_msr_tsc_adjust) {
2825 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
2826 }
2827 if (has_msr_tsc_deadline) {
2828 kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
2829 }
2830 if (has_msr_misc_enable) {
2831 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
2832 }
2833 if (has_msr_smbase) {
2834 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
2835 }
2836 if (has_msr_smi_count) {
2837 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0);
2838 }
2839 if (has_msr_feature_control) {
2840 kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
2841 }
2842 if (has_msr_bndcfgs) {
2843 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
2844 }
2845 if (has_msr_xss) {
2846 kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
2847 }
2848 if (has_msr_spec_ctrl) {
2849 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0);
2850 }
2851 if (has_msr_virt_ssbd) {
2852 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0);
2853 }
2854 if (!env->tsc_valid) {
2855 kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
2856 env->tsc_valid = !runstate_is_running();
2857 }
2858
2859#ifdef TARGET_X86_64
2860 if (lm_capable_kernel) {
2861 kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
2862 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
2863 kvm_msr_entry_add(cpu, MSR_FMASK, 0);
2864 kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
2865 }
2866#endif
2867 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
2868 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
2869 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
2870 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
2871 }
2872 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
2873 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
2874 }
2875 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
2876 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
2877 }
2878 if (has_architectural_pmu_version > 0) {
2879 if (has_architectural_pmu_version > 1) {
2880 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
2881 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
2882 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
2883 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
2884 }
2885 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
2886 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
2887 }
2888 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
2889 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
2890 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
2891 }
2892 }
2893
2894 if (env->mcg_cap) {
2895 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
2896 kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
2897 if (has_msr_mcg_ext_ctl) {
2898 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
2899 }
2900 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
2901 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
2902 }
2903 }
2904
2905 if (has_msr_hv_hypercall) {
2906 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
2907 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
2908 }
2909 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
2910 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
2911 }
2912 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
2913 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
2914 }
2915 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
2916 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
2917 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
2918 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0);
2919 }
2920 if (has_msr_hv_crash) {
2921 int j;
2922
2923 for (j = 0; j < HV_CRASH_PARAMS; j++) {
2924 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
2925 }
2926 }
2927 if (has_msr_hv_runtime) {
2928 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
2929 }
2930 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
2931 uint32_t msr;
2932
2933 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
2934 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
2935 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
2936 for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
2937 kvm_msr_entry_add(cpu, msr, 0);
2938 }
2939 }
2940 if (has_msr_hv_stimer) {
2941 uint32_t msr;
2942
2943 for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
2944 msr++) {
2945 kvm_msr_entry_add(cpu, msr, 0);
2946 }
2947 }
2948 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
2949 kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
2950 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
2951 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
2952 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
2953 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
2954 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
2955 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
2956 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
2957 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
2958 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
2959 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
2960 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
2961 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
2962 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
2963 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
2964 }
2965 }
2966
2967 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
2968 int addr_num =
2969 kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7;
2970
2971 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0);
2972 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0);
2973 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0);
2974 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0);
2975 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0);
2976 for (i = 0; i < addr_num; i++) {
2977 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0);
2978 }
2979 }
2980
2981 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
2982 if (ret < 0) {
2983 return ret;
2984 }
2985
2986 if (ret < cpu->kvm_msr_buf->nmsrs) {
2987 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
2988 error_report("error: failed to get MSR 0x%" PRIx32,
2989 (uint32_t)e->index);
2990 }
2991
2992 assert(ret == cpu->kvm_msr_buf->nmsrs);
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009 if (cpu->fill_mtrr_mask) {
3010 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
3011 assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
3012 mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
3013 } else {
3014 mtrr_top_bits = 0;
3015 }
3016
3017 for (i = 0; i < ret; i++) {
3018 uint32_t index = msrs[i].index;
3019 switch (index) {
3020 case MSR_IA32_SYSENTER_CS:
3021 env->sysenter_cs = msrs[i].data;
3022 break;
3023 case MSR_IA32_SYSENTER_ESP:
3024 env->sysenter_esp = msrs[i].data;
3025 break;
3026 case MSR_IA32_SYSENTER_EIP:
3027 env->sysenter_eip = msrs[i].data;
3028 break;
3029 case MSR_PAT:
3030 env->pat = msrs[i].data;
3031 break;
3032 case MSR_STAR:
3033 env->star = msrs[i].data;
3034 break;
3035#ifdef TARGET_X86_64
3036 case MSR_CSTAR:
3037 env->cstar = msrs[i].data;
3038 break;
3039 case MSR_KERNELGSBASE:
3040 env->kernelgsbase = msrs[i].data;
3041 break;
3042 case MSR_FMASK:
3043 env->fmask = msrs[i].data;
3044 break;
3045 case MSR_LSTAR:
3046 env->lstar = msrs[i].data;
3047 break;
3048#endif
3049 case MSR_IA32_TSC:
3050 env->tsc = msrs[i].data;
3051 break;
3052 case MSR_TSC_AUX:
3053 env->tsc_aux = msrs[i].data;
3054 break;
3055 case MSR_TSC_ADJUST:
3056 env->tsc_adjust = msrs[i].data;
3057 break;
3058 case MSR_IA32_TSCDEADLINE:
3059 env->tsc_deadline = msrs[i].data;
3060 break;
3061 case MSR_VM_HSAVE_PA:
3062 env->vm_hsave = msrs[i].data;
3063 break;
3064 case MSR_KVM_SYSTEM_TIME:
3065 env->system_time_msr = msrs[i].data;
3066 break;
3067 case MSR_KVM_WALL_CLOCK:
3068 env->wall_clock_msr = msrs[i].data;
3069 break;
3070 case MSR_MCG_STATUS:
3071 env->mcg_status = msrs[i].data;
3072 break;
3073 case MSR_MCG_CTL:
3074 env->mcg_ctl = msrs[i].data;
3075 break;
3076 case MSR_MCG_EXT_CTL:
3077 env->mcg_ext_ctl = msrs[i].data;
3078 break;
3079 case MSR_IA32_MISC_ENABLE:
3080 env->msr_ia32_misc_enable = msrs[i].data;
3081 break;
3082 case MSR_IA32_SMBASE:
3083 env->smbase = msrs[i].data;
3084 break;
3085 case MSR_SMI_COUNT:
3086 env->msr_smi_count = msrs[i].data;
3087 break;
3088 case MSR_IA32_FEATURE_CONTROL:
3089 env->msr_ia32_feature_control = msrs[i].data;
3090 break;
3091 case MSR_IA32_BNDCFGS:
3092 env->msr_bndcfgs = msrs[i].data;
3093 break;
3094 case MSR_IA32_XSS:
3095 env->xss = msrs[i].data;
3096 break;
3097 default:
3098 if (msrs[i].index >= MSR_MC0_CTL &&
3099 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
3100 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
3101 }
3102 break;
3103 case MSR_KVM_ASYNC_PF_EN:
3104 env->async_pf_en_msr = msrs[i].data;
3105 break;
3106 case MSR_KVM_PV_EOI_EN:
3107 env->pv_eoi_en_msr = msrs[i].data;
3108 break;
3109 case MSR_KVM_STEAL_TIME:
3110 env->steal_time_msr = msrs[i].data;
3111 break;
3112 case MSR_CORE_PERF_FIXED_CTR_CTRL:
3113 env->msr_fixed_ctr_ctrl = msrs[i].data;
3114 break;
3115 case MSR_CORE_PERF_GLOBAL_CTRL:
3116 env->msr_global_ctrl = msrs[i].data;
3117 break;
3118 case MSR_CORE_PERF_GLOBAL_STATUS:
3119 env->msr_global_status = msrs[i].data;
3120 break;
3121 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
3122 env->msr_global_ovf_ctrl = msrs[i].data;
3123 break;
3124 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
3125 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
3126 break;
3127 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
3128 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
3129 break;
3130 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
3131 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
3132 break;
3133 case HV_X64_MSR_HYPERCALL:
3134 env->msr_hv_hypercall = msrs[i].data;
3135 break;
3136 case HV_X64_MSR_GUEST_OS_ID:
3137 env->msr_hv_guest_os_id = msrs[i].data;
3138 break;
3139 case HV_X64_MSR_APIC_ASSIST_PAGE:
3140 env->msr_hv_vapic = msrs[i].data;
3141 break;
3142 case HV_X64_MSR_REFERENCE_TSC:
3143 env->msr_hv_tsc = msrs[i].data;
3144 break;
3145 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
3146 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
3147 break;
3148 case HV_X64_MSR_VP_RUNTIME:
3149 env->msr_hv_runtime = msrs[i].data;
3150 break;
3151 case HV_X64_MSR_SCONTROL:
3152 env->msr_hv_synic_control = msrs[i].data;
3153 break;
3154 case HV_X64_MSR_SIEFP:
3155 env->msr_hv_synic_evt_page = msrs[i].data;
3156 break;
3157 case HV_X64_MSR_SIMP:
3158 env->msr_hv_synic_msg_page = msrs[i].data;
3159 break;
3160 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
3161 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
3162 break;
3163 case HV_X64_MSR_STIMER0_CONFIG:
3164 case HV_X64_MSR_STIMER1_CONFIG:
3165 case HV_X64_MSR_STIMER2_CONFIG:
3166 case HV_X64_MSR_STIMER3_CONFIG:
3167 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
3168 msrs[i].data;
3169 break;
3170 case HV_X64_MSR_STIMER0_COUNT:
3171 case HV_X64_MSR_STIMER1_COUNT:
3172 case HV_X64_MSR_STIMER2_COUNT:
3173 case HV_X64_MSR_STIMER3_COUNT:
3174 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
3175 msrs[i].data;
3176 break;
3177 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
3178 env->msr_hv_reenlightenment_control = msrs[i].data;
3179 break;
3180 case HV_X64_MSR_TSC_EMULATION_CONTROL:
3181 env->msr_hv_tsc_emulation_control = msrs[i].data;
3182 break;
3183 case HV_X64_MSR_TSC_EMULATION_STATUS:
3184 env->msr_hv_tsc_emulation_status = msrs[i].data;
3185 break;
3186 case MSR_MTRRdefType:
3187 env->mtrr_deftype = msrs[i].data;
3188 break;
3189 case MSR_MTRRfix64K_00000:
3190 env->mtrr_fixed[0] = msrs[i].data;
3191 break;
3192 case MSR_MTRRfix16K_80000:
3193 env->mtrr_fixed[1] = msrs[i].data;
3194 break;
3195 case MSR_MTRRfix16K_A0000:
3196 env->mtrr_fixed[2] = msrs[i].data;
3197 break;
3198 case MSR_MTRRfix4K_C0000:
3199 env->mtrr_fixed[3] = msrs[i].data;
3200 break;
3201 case MSR_MTRRfix4K_C8000:
3202 env->mtrr_fixed[4] = msrs[i].data;
3203 break;
3204 case MSR_MTRRfix4K_D0000:
3205 env->mtrr_fixed[5] = msrs[i].data;
3206 break;
3207 case MSR_MTRRfix4K_D8000:
3208 env->mtrr_fixed[6] = msrs[i].data;
3209 break;
3210 case MSR_MTRRfix4K_E0000:
3211 env->mtrr_fixed[7] = msrs[i].data;
3212 break;
3213 case MSR_MTRRfix4K_E8000:
3214 env->mtrr_fixed[8] = msrs[i].data;
3215 break;
3216 case MSR_MTRRfix4K_F0000:
3217 env->mtrr_fixed[9] = msrs[i].data;
3218 break;
3219 case MSR_MTRRfix4K_F8000:
3220 env->mtrr_fixed[10] = msrs[i].data;
3221 break;
3222 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
3223 if (index & 1) {
3224 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
3225 mtrr_top_bits;
3226 } else {
3227 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
3228 }
3229 break;
3230 case MSR_IA32_SPEC_CTRL:
3231 env->spec_ctrl = msrs[i].data;
3232 break;
3233 case MSR_VIRT_SSBD:
3234 env->virt_ssbd = msrs[i].data;
3235 break;
3236 case MSR_IA32_RTIT_CTL:
3237 env->msr_rtit_ctrl = msrs[i].data;
3238 break;
3239 case MSR_IA32_RTIT_STATUS:
3240 env->msr_rtit_status = msrs[i].data;
3241 break;
3242 case MSR_IA32_RTIT_OUTPUT_BASE:
3243 env->msr_rtit_output_base = msrs[i].data;
3244 break;
3245 case MSR_IA32_RTIT_OUTPUT_MASK:
3246 env->msr_rtit_output_mask = msrs[i].data;
3247 break;
3248 case MSR_IA32_RTIT_CR3_MATCH:
3249 env->msr_rtit_cr3_match = msrs[i].data;
3250 break;
3251 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
3252 env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data;
3253 break;
3254 }
3255 }
3256
3257 return 0;
3258}
3259
3260static int kvm_put_mp_state(X86CPU *cpu)
3261{
3262 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
3263
3264 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
3265}
3266
3267static int kvm_get_mp_state(X86CPU *cpu)
3268{
3269 CPUState *cs = CPU(cpu);
3270 CPUX86State *env = &cpu->env;
3271 struct kvm_mp_state mp_state;
3272 int ret;
3273
3274 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
3275 if (ret < 0) {
3276 return ret;
3277 }
3278 env->mp_state = mp_state.mp_state;
3279 if (kvm_irqchip_in_kernel()) {
3280 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
3281 }
3282 return 0;
3283}
3284
3285static int kvm_get_apic(X86CPU *cpu)
3286{
3287 DeviceState *apic = cpu->apic_state;
3288 struct kvm_lapic_state kapic;
3289 int ret;
3290
3291 if (apic && kvm_irqchip_in_kernel()) {
3292 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
3293 if (ret < 0) {
3294 return ret;
3295 }
3296
3297 kvm_get_apic_state(apic, &kapic);
3298 }
3299 return 0;
3300}
3301
3302static int kvm_put_vcpu_events(X86CPU *cpu, int level)
3303{
3304 CPUState *cs = CPU(cpu);
3305 CPUX86State *env = &cpu->env;
3306 struct kvm_vcpu_events events = {};
3307
3308 if (!kvm_has_vcpu_events()) {
3309 return 0;
3310 }
3311
3312 events.flags = 0;
3313
3314 if (has_exception_payload) {
3315 events.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
3316 events.exception.pending = env->exception_pending;
3317 events.exception_has_payload = env->exception_has_payload;
3318 events.exception_payload = env->exception_payload;
3319 }
3320 events.exception.nr = env->exception_nr;
3321 events.exception.injected = env->exception_injected;
3322 events.exception.has_error_code = env->has_error_code;
3323 events.exception.error_code = env->error_code;
3324
3325 events.interrupt.injected = (env->interrupt_injected >= 0);
3326 events.interrupt.nr = env->interrupt_injected;
3327 events.interrupt.soft = env->soft_interrupt;
3328
3329 events.nmi.injected = env->nmi_injected;
3330 events.nmi.pending = env->nmi_pending;
3331 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
3332
3333 events.sipi_vector = env->sipi_vector;
3334
3335 if (has_msr_smbase) {
3336 events.smi.smm = !!(env->hflags & HF_SMM_MASK);
3337 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
3338 if (kvm_irqchip_in_kernel()) {
3339
3340
3341
3342 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
3343 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
3344 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
3345 } else {
3346
3347 events.smi.pending = 0;
3348 events.smi.latched_init = 0;
3349 }
3350
3351
3352
3353 if (!cpu->kvm_no_smi_migration) {
3354 events.flags |= KVM_VCPUEVENT_VALID_SMM;
3355 }
3356 }
3357
3358 if (level >= KVM_PUT_RESET_STATE) {
3359 events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
3360 if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
3361 events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
3362 }
3363 }
3364
3365 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
3366}
3367
3368static int kvm_get_vcpu_events(X86CPU *cpu)
3369{
3370 CPUX86State *env = &cpu->env;
3371 struct kvm_vcpu_events events;
3372 int ret;
3373
3374 if (!kvm_has_vcpu_events()) {
3375 return 0;
3376 }
3377
3378 memset(&events, 0, sizeof(events));
3379 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
3380 if (ret < 0) {
3381 return ret;
3382 }
3383
3384 if (events.flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
3385 env->exception_pending = events.exception.pending;
3386 env->exception_has_payload = events.exception_has_payload;
3387 env->exception_payload = events.exception_payload;
3388 } else {
3389 env->exception_pending = 0;
3390 env->exception_has_payload = false;
3391 }
3392 env->exception_injected = events.exception.injected;
3393 env->exception_nr =
3394 (env->exception_pending || env->exception_injected) ?
3395 events.exception.nr : -1;
3396 env->has_error_code = events.exception.has_error_code;
3397 env->error_code = events.exception.error_code;
3398
3399 env->interrupt_injected =
3400 events.interrupt.injected ? events.interrupt.nr : -1;
3401 env->soft_interrupt = events.interrupt.soft;
3402
3403 env->nmi_injected = events.nmi.injected;
3404 env->nmi_pending = events.nmi.pending;
3405 if (events.nmi.masked) {
3406 env->hflags2 |= HF2_NMI_MASK;
3407 } else {
3408 env->hflags2 &= ~HF2_NMI_MASK;
3409 }
3410
3411 if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
3412 if (events.smi.smm) {
3413 env->hflags |= HF_SMM_MASK;
3414 } else {
3415 env->hflags &= ~HF_SMM_MASK;
3416 }
3417 if (events.smi.pending) {
3418 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
3419 } else {
3420 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
3421 }
3422 if (events.smi.smm_inside_nmi) {
3423 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
3424 } else {
3425 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
3426 }
3427 if (events.smi.latched_init) {
3428 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
3429 } else {
3430 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
3431 }
3432 }
3433
3434 env->sipi_vector = events.sipi_vector;
3435
3436 return 0;
3437}
3438
3439static int kvm_guest_debug_workarounds(X86CPU *cpu)
3440{
3441 CPUState *cs = CPU(cpu);
3442 CPUX86State *env = &cpu->env;
3443 int ret = 0;
3444 unsigned long reinject_trap = 0;
3445
3446 if (!kvm_has_vcpu_events()) {
3447 if (env->exception_nr == EXCP01_DB) {
3448 reinject_trap = KVM_GUESTDBG_INJECT_DB;
3449 } else if (env->exception_injected == EXCP03_INT3) {
3450 reinject_trap = KVM_GUESTDBG_INJECT_BP;
3451 }
3452 kvm_reset_exception(env);
3453 }
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463 if (reinject_trap ||
3464 (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
3465 ret = kvm_update_guest_debug(cs, reinject_trap);
3466 }
3467 return ret;
3468}
3469
3470static int kvm_put_debugregs(X86CPU *cpu)
3471{
3472 CPUX86State *env = &cpu->env;
3473 struct kvm_debugregs dbgregs;
3474 int i;
3475
3476 if (!kvm_has_debugregs()) {
3477 return 0;
3478 }
3479
3480 for (i = 0; i < 4; i++) {
3481 dbgregs.db[i] = env->dr[i];
3482 }
3483 dbgregs.dr6 = env->dr[6];
3484 dbgregs.dr7 = env->dr[7];
3485 dbgregs.flags = 0;
3486
3487 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
3488}
3489
3490static int kvm_get_debugregs(X86CPU *cpu)
3491{
3492 CPUX86State *env = &cpu->env;
3493 struct kvm_debugregs dbgregs;
3494 int i, ret;
3495
3496 if (!kvm_has_debugregs()) {
3497 return 0;
3498 }
3499
3500 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
3501 if (ret < 0) {
3502 return ret;
3503 }
3504 for (i = 0; i < 4; i++) {
3505 env->dr[i] = dbgregs.db[i];
3506 }
3507 env->dr[4] = env->dr[6] = dbgregs.dr6;
3508 env->dr[5] = env->dr[7] = dbgregs.dr7;
3509
3510 return 0;
3511}
3512
3513static int kvm_put_nested_state(X86CPU *cpu)
3514{
3515 CPUX86State *env = &cpu->env;
3516 int max_nested_state_len = kvm_max_nested_state_length();
3517
3518 if (!env->nested_state) {
3519 return 0;
3520 }
3521
3522 assert(env->nested_state->size <= max_nested_state_len);
3523 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state);
3524}
3525
3526static int kvm_get_nested_state(X86CPU *cpu)
3527{
3528 CPUX86State *env = &cpu->env;
3529 int max_nested_state_len = kvm_max_nested_state_length();
3530 int ret;
3531
3532 if (!env->nested_state) {
3533 return 0;
3534 }
3535
3536
3537
3538
3539
3540
3541
3542
3543 env->nested_state->size = max_nested_state_len;
3544
3545 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state);
3546 if (ret < 0) {
3547 return ret;
3548 }
3549
3550 if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) {
3551 env->hflags |= HF_GUEST_MASK;
3552 } else {
3553 env->hflags &= ~HF_GUEST_MASK;
3554 }
3555
3556 return ret;
3557}
3558
3559int kvm_arch_put_registers(CPUState *cpu, int level)
3560{
3561 X86CPU *x86_cpu = X86_CPU(cpu);
3562 int ret;
3563
3564 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
3565
3566 if (level >= KVM_PUT_RESET_STATE) {
3567 ret = kvm_put_nested_state(x86_cpu);
3568 if (ret < 0) {
3569 return ret;
3570 }
3571
3572 ret = kvm_put_msr_feature_control(x86_cpu);
3573 if (ret < 0) {
3574 return ret;
3575 }
3576 }
3577
3578 if (level == KVM_PUT_FULL_STATE) {
3579
3580
3581
3582
3583
3584 kvm_arch_set_tsc_khz(cpu);
3585 }
3586
3587 ret = kvm_getput_regs(x86_cpu, 1);
3588 if (ret < 0) {
3589 return ret;
3590 }
3591 ret = kvm_put_xsave(x86_cpu);
3592 if (ret < 0) {
3593 return ret;
3594 }
3595 ret = kvm_put_xcrs(x86_cpu);
3596 if (ret < 0) {
3597 return ret;
3598 }
3599 ret = kvm_put_sregs(x86_cpu);
3600 if (ret < 0) {
3601 return ret;
3602 }
3603
3604 ret = kvm_inject_mce_oldstyle(x86_cpu);
3605 if (ret < 0) {
3606 return ret;
3607 }
3608 ret = kvm_put_msrs(x86_cpu, level);
3609 if (ret < 0) {
3610 return ret;
3611 }
3612 ret = kvm_put_vcpu_events(x86_cpu, level);
3613 if (ret < 0) {
3614 return ret;
3615 }
3616 if (level >= KVM_PUT_RESET_STATE) {
3617 ret = kvm_put_mp_state(x86_cpu);
3618 if (ret < 0) {
3619 return ret;
3620 }
3621 }
3622
3623 ret = kvm_put_tscdeadline_msr(x86_cpu);
3624 if (ret < 0) {
3625 return ret;
3626 }
3627 ret = kvm_put_debugregs(x86_cpu);
3628 if (ret < 0) {
3629 return ret;
3630 }
3631
3632 ret = kvm_guest_debug_workarounds(x86_cpu);
3633 if (ret < 0) {
3634 return ret;
3635 }
3636 return 0;
3637}
3638
3639int kvm_arch_get_registers(CPUState *cs)
3640{
3641 X86CPU *cpu = X86_CPU(cs);
3642 int ret;
3643
3644 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
3645
3646 ret = kvm_get_vcpu_events(cpu);
3647 if (ret < 0) {
3648 goto out;
3649 }
3650
3651
3652
3653
3654 ret = kvm_get_mp_state(cpu);
3655 if (ret < 0) {
3656 goto out;
3657 }
3658 ret = kvm_getput_regs(cpu, 0);
3659 if (ret < 0) {
3660 goto out;
3661 }
3662 ret = kvm_get_xsave(cpu);
3663 if (ret < 0) {
3664 goto out;
3665 }
3666 ret = kvm_get_xcrs(cpu);
3667 if (ret < 0) {
3668 goto out;
3669 }
3670 ret = kvm_get_sregs(cpu);
3671 if (ret < 0) {
3672 goto out;
3673 }
3674 ret = kvm_get_msrs(cpu);
3675 if (ret < 0) {
3676 goto out;
3677 }
3678 ret = kvm_get_apic(cpu);
3679 if (ret < 0) {
3680 goto out;
3681 }
3682 ret = kvm_get_debugregs(cpu);
3683 if (ret < 0) {
3684 goto out;
3685 }
3686 ret = kvm_get_nested_state(cpu);
3687 if (ret < 0) {
3688 goto out;
3689 }
3690 ret = 0;
3691 out:
3692 cpu_sync_bndcs_hflags(&cpu->env);
3693 return ret;
3694}
3695
3696void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
3697{
3698 X86CPU *x86_cpu = X86_CPU(cpu);
3699 CPUX86State *env = &x86_cpu->env;
3700 int ret;
3701
3702
3703 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
3704 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
3705 qemu_mutex_lock_iothread();
3706 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
3707 qemu_mutex_unlock_iothread();
3708 DPRINTF("injected NMI\n");
3709 ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
3710 if (ret < 0) {
3711 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
3712 strerror(-ret));
3713 }
3714 }
3715 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
3716 qemu_mutex_lock_iothread();
3717 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
3718 qemu_mutex_unlock_iothread();
3719 DPRINTF("injected SMI\n");
3720 ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
3721 if (ret < 0) {
3722 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
3723 strerror(-ret));
3724 }
3725 }
3726 }
3727
3728 if (!kvm_pic_in_kernel()) {
3729 qemu_mutex_lock_iothread();
3730 }
3731
3732
3733
3734
3735
3736 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
3737 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
3738 !(env->hflags & HF_SMM_MASK)) {
3739 cpu->exit_request = 1;
3740 }
3741 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
3742 cpu->exit_request = 1;
3743 }
3744 }
3745
3746 if (!kvm_pic_in_kernel()) {
3747
3748 if (run->ready_for_interrupt_injection &&
3749 (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
3750 (env->eflags & IF_MASK)) {
3751 int irq;
3752
3753 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
3754 irq = cpu_get_pic_interrupt(env);
3755 if (irq >= 0) {
3756 struct kvm_interrupt intr;
3757
3758 intr.irq = irq;
3759 DPRINTF("injected interrupt %d\n", irq);
3760 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
3761 if (ret < 0) {
3762 fprintf(stderr,
3763 "KVM: injection failed, interrupt lost (%s)\n",
3764 strerror(-ret));
3765 }
3766 }
3767 }
3768
3769
3770
3771
3772
3773 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
3774 run->request_interrupt_window = 1;
3775 } else {
3776 run->request_interrupt_window = 0;
3777 }
3778
3779 DPRINTF("setting tpr\n");
3780 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
3781
3782 qemu_mutex_unlock_iothread();
3783 }
3784}
3785
3786MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
3787{
3788 X86CPU *x86_cpu = X86_CPU(cpu);
3789 CPUX86State *env = &x86_cpu->env;
3790
3791 if (run->flags & KVM_RUN_X86_SMM) {
3792 env->hflags |= HF_SMM_MASK;
3793 } else {
3794 env->hflags &= ~HF_SMM_MASK;
3795 }
3796 if (run->if_flag) {
3797 env->eflags |= IF_MASK;
3798 } else {
3799 env->eflags &= ~IF_MASK;
3800 }
3801
3802
3803
3804 if (!kvm_irqchip_in_kernel()) {
3805 qemu_mutex_lock_iothread();
3806 }
3807 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
3808 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
3809 if (!kvm_irqchip_in_kernel()) {
3810 qemu_mutex_unlock_iothread();
3811 }
3812 return cpu_get_mem_attrs(env);
3813}
3814
3815int kvm_arch_process_async_events(CPUState *cs)
3816{
3817 X86CPU *cpu = X86_CPU(cs);
3818 CPUX86State *env = &cpu->env;
3819
3820 if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
3821
3822 assert(env->mcg_cap);
3823
3824 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
3825
3826 kvm_cpu_synchronize_state(cs);
3827
3828 if (env->exception_nr == EXCP08_DBLE) {
3829
3830 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3831 cs->exit_request = 1;
3832 return 0;
3833 }
3834 kvm_queue_exception(env, EXCP12_MCHK, 0, 0);
3835 env->has_error_code = 0;
3836
3837 cs->halted = 0;
3838 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
3839 env->mp_state = KVM_MP_STATE_RUNNABLE;
3840 }
3841 }
3842
3843 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
3844 !(env->hflags & HF_SMM_MASK)) {
3845 kvm_cpu_synchronize_state(cs);
3846 do_cpu_init(cpu);
3847 }
3848
3849 if (kvm_irqchip_in_kernel()) {
3850 return 0;
3851 }
3852
3853 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
3854 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
3855 apic_poll_irq(cpu->apic_state);
3856 }
3857 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
3858 (env->eflags & IF_MASK)) ||
3859 (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
3860 cs->halted = 0;
3861 }
3862 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
3863 kvm_cpu_synchronize_state(cs);
3864 do_cpu_sipi(cpu);
3865 }
3866 if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
3867 cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
3868 kvm_cpu_synchronize_state(cs);
3869 apic_handle_tpr_access_report(cpu->apic_state, env->eip,
3870 env->tpr_access_type);
3871 }
3872
3873 return cs->halted;
3874}
3875
3876static int kvm_handle_halt(X86CPU *cpu)
3877{
3878 CPUState *cs = CPU(cpu);
3879 CPUX86State *env = &cpu->env;
3880
3881 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
3882 (env->eflags & IF_MASK)) &&
3883 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
3884 cs->halted = 1;
3885 return EXCP_HLT;
3886 }
3887
3888 return 0;
3889}
3890
3891static int kvm_handle_tpr_access(X86CPU *cpu)
3892{
3893 CPUState *cs = CPU(cpu);
3894 struct kvm_run *run = cs->kvm_run;
3895
3896 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
3897 run->tpr_access.is_write ? TPR_ACCESS_WRITE
3898 : TPR_ACCESS_READ);
3899 return 1;
3900}
3901
3902int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
3903{
3904 static const uint8_t int3 = 0xcc;
3905
3906 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
3907 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
3908 return -EINVAL;
3909 }
3910 return 0;
3911}
3912
3913int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
3914{
3915 uint8_t int3;
3916
3917 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
3918 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
3919 return -EINVAL;
3920 }
3921 return 0;
3922}
3923
3924static struct {
3925 target_ulong addr;
3926 int len;
3927 int type;
3928} hw_breakpoint[4];
3929
3930static int nb_hw_breakpoint;
3931
3932static int find_hw_breakpoint(target_ulong addr, int len, int type)
3933{
3934 int n;
3935
3936 for (n = 0; n < nb_hw_breakpoint; n++) {
3937 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
3938 (hw_breakpoint[n].len == len || len == -1)) {
3939 return n;
3940 }
3941 }
3942 return -1;
3943}
3944
3945int kvm_arch_insert_hw_breakpoint(target_ulong addr,
3946 target_ulong len, int type)
3947{
3948 switch (type) {
3949 case GDB_BREAKPOINT_HW:
3950 len = 1;
3951 break;
3952 case GDB_WATCHPOINT_WRITE:
3953 case GDB_WATCHPOINT_ACCESS:
3954 switch (len) {
3955 case 1:
3956 break;
3957 case 2:
3958 case 4:
3959 case 8:
3960 if (addr & (len - 1)) {
3961 return -EINVAL;
3962 }
3963 break;
3964 default:
3965 return -EINVAL;
3966 }
3967 break;
3968 default:
3969 return -ENOSYS;
3970 }
3971
3972 if (nb_hw_breakpoint == 4) {
3973 return -ENOBUFS;
3974 }
3975 if (find_hw_breakpoint(addr, len, type) >= 0) {
3976 return -EEXIST;
3977 }
3978 hw_breakpoint[nb_hw_breakpoint].addr = addr;
3979 hw_breakpoint[nb_hw_breakpoint].len = len;
3980 hw_breakpoint[nb_hw_breakpoint].type = type;
3981 nb_hw_breakpoint++;
3982
3983 return 0;
3984}
3985
3986int kvm_arch_remove_hw_breakpoint(target_ulong addr,
3987 target_ulong len, int type)
3988{
3989 int n;
3990
3991 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
3992 if (n < 0) {
3993 return -ENOENT;
3994 }
3995 nb_hw_breakpoint--;
3996 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
3997
3998 return 0;
3999}
4000
4001void kvm_arch_remove_all_hw_breakpoints(void)
4002{
4003 nb_hw_breakpoint = 0;
4004}
4005
4006static CPUWatchpoint hw_watchpoint;
4007
4008static int kvm_handle_debug(X86CPU *cpu,
4009 struct kvm_debug_exit_arch *arch_info)
4010{
4011 CPUState *cs = CPU(cpu);
4012 CPUX86State *env = &cpu->env;
4013 int ret = 0;
4014 int n;
4015
4016 if (arch_info->exception == EXCP01_DB) {
4017 if (arch_info->dr6 & DR6_BS) {
4018 if (cs->singlestep_enabled) {
4019 ret = EXCP_DEBUG;
4020 }
4021 } else {
4022 for (n = 0; n < 4; n++) {
4023 if (arch_info->dr6 & (1 << n)) {
4024 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
4025 case 0x0:
4026 ret = EXCP_DEBUG;
4027 break;
4028 case 0x1:
4029 ret = EXCP_DEBUG;
4030 cs->watchpoint_hit = &hw_watchpoint;
4031 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
4032 hw_watchpoint.flags = BP_MEM_WRITE;
4033 break;
4034 case 0x3:
4035 ret = EXCP_DEBUG;
4036 cs->watchpoint_hit = &hw_watchpoint;
4037 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
4038 hw_watchpoint.flags = BP_MEM_ACCESS;
4039 break;
4040 }
4041 }
4042 }
4043 }
4044 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
4045 ret = EXCP_DEBUG;
4046 }
4047 if (ret == 0) {
4048 cpu_synchronize_state(cs);
4049 assert(env->exception_nr == -1);
4050
4051
4052 kvm_queue_exception(env, arch_info->exception,
4053 arch_info->exception == EXCP01_DB,
4054 arch_info->dr6);
4055 env->has_error_code = 0;
4056 }
4057
4058 return ret;
4059}
4060
4061void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
4062{
4063 const uint8_t type_code[] = {
4064 [GDB_BREAKPOINT_HW] = 0x0,
4065 [GDB_WATCHPOINT_WRITE] = 0x1,
4066 [GDB_WATCHPOINT_ACCESS] = 0x3
4067 };
4068 const uint8_t len_code[] = {
4069 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
4070 };
4071 int n;
4072
4073 if (kvm_sw_breakpoints_active(cpu)) {
4074 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
4075 }
4076 if (nb_hw_breakpoint > 0) {
4077 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
4078 dbg->arch.debugreg[7] = 0x0600;
4079 for (n = 0; n < nb_hw_breakpoint; n++) {
4080 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
4081 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
4082 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
4083 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
4084 }
4085 }
4086}
4087
4088static bool host_supports_vmx(void)
4089{
4090 uint32_t ecx, unused;
4091
4092 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
4093 return ecx & CPUID_EXT_VMX;
4094}
4095
4096#define VMX_INVALID_GUEST_STATE 0x80000021
4097
4098int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
4099{
4100 X86CPU *cpu = X86_CPU(cs);
4101 uint64_t code;
4102 int ret;
4103
4104 switch (run->exit_reason) {
4105 case KVM_EXIT_HLT:
4106 DPRINTF("handle_hlt\n");
4107 qemu_mutex_lock_iothread();
4108 ret = kvm_handle_halt(cpu);
4109 qemu_mutex_unlock_iothread();
4110 break;
4111 case KVM_EXIT_SET_TPR:
4112 ret = 0;
4113 break;
4114 case KVM_EXIT_TPR_ACCESS:
4115 qemu_mutex_lock_iothread();
4116 ret = kvm_handle_tpr_access(cpu);
4117 qemu_mutex_unlock_iothread();
4118 break;
4119 case KVM_EXIT_FAIL_ENTRY:
4120 code = run->fail_entry.hardware_entry_failure_reason;
4121 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
4122 code);
4123 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
4124 fprintf(stderr,
4125 "\nIf you're running a guest on an Intel machine without "
4126 "unrestricted mode\n"
4127 "support, the failure can be most likely due to the guest "
4128 "entering an invalid\n"
4129 "state for Intel VT. For example, the guest maybe running "
4130 "in big real mode\n"
4131 "which is not supported on less recent Intel processors."
4132 "\n\n");
4133 }
4134 ret = -1;
4135 break;
4136 case KVM_EXIT_EXCEPTION:
4137 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
4138 run->ex.exception, run->ex.error_code);
4139 ret = -1;
4140 break;
4141 case KVM_EXIT_DEBUG:
4142 DPRINTF("kvm_exit_debug\n");
4143 qemu_mutex_lock_iothread();
4144 ret = kvm_handle_debug(cpu, &run->debug.arch);
4145 qemu_mutex_unlock_iothread();
4146 break;
4147 case KVM_EXIT_HYPERV:
4148 ret = kvm_hv_handle_exit(cpu, &run->hyperv);
4149 break;
4150 case KVM_EXIT_IOAPIC_EOI:
4151 ioapic_eoi_broadcast(run->eoi.vector);
4152 ret = 0;
4153 break;
4154 default:
4155 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
4156 ret = -1;
4157 break;
4158 }
4159
4160 return ret;
4161}
4162
4163bool kvm_arch_stop_on_emulation_error(CPUState *cs)
4164{
4165 X86CPU *cpu = X86_CPU(cs);
4166 CPUX86State *env = &cpu->env;
4167
4168 kvm_cpu_synchronize_state(cs);
4169 return !(env->cr[0] & CR0_PE_MASK) ||
4170 ((env->segs[R_CS].selector & 3) != 3);
4171}
4172
4173void kvm_arch_init_irq_routing(KVMState *s)
4174{
4175 if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
4176
4177
4178
4179
4180 no_hpet = 1;
4181 }
4182
4183
4184
4185
4186 kvm_msi_via_irqfd_allowed = true;
4187 kvm_gsi_routing_allowed = true;
4188
4189 if (kvm_irqchip_is_split()) {
4190 int i;
4191
4192
4193
4194 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
4195 if (kvm_irqchip_add_msi_route(s, 0, NULL) < 0) {
4196 error_report("Could not enable split IRQ mode.");
4197 exit(1);
4198 }
4199 }
4200 }
4201}
4202
4203int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
4204{
4205 int ret;
4206 if (machine_kernel_irqchip_split(ms)) {
4207 ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
4208 if (ret) {
4209 error_report("Could not enable split irqchip mode: %s",
4210 strerror(-ret));
4211 exit(1);
4212 } else {
4213 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
4214 kvm_split_irqchip = true;
4215 return 1;
4216 }
4217 } else {
4218 return 0;
4219 }
4220}
4221
4222
4223int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
4224 uint32_t flags, uint32_t *dev_id)
4225{
4226 struct kvm_assigned_pci_dev dev_data = {
4227 .segnr = dev_addr->domain,
4228 .busnr = dev_addr->bus,
4229 .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function),
4230 .flags = flags,
4231 };
4232 int ret;
4233
4234 dev_data.assigned_dev_id =
4235 (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn;
4236
4237 ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data);
4238 if (ret < 0) {
4239 return ret;
4240 }
4241
4242 *dev_id = dev_data.assigned_dev_id;
4243
4244 return 0;
4245}
4246
4247int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id)
4248{
4249 struct kvm_assigned_pci_dev dev_data = {
4250 .assigned_dev_id = dev_id,
4251 };
4252
4253 return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data);
4254}
4255
4256static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id,
4257 uint32_t irq_type, uint32_t guest_irq)
4258{
4259 struct kvm_assigned_irq assigned_irq = {
4260 .assigned_dev_id = dev_id,
4261 .guest_irq = guest_irq,
4262 .flags = irq_type,
4263 };
4264
4265 if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) {
4266 return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq);
4267 } else {
4268 return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq);
4269 }
4270}
4271
4272int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi,
4273 uint32_t guest_irq)
4274{
4275 uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX |
4276 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX);
4277
4278 return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq);
4279}
4280
4281int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
4282{
4283 struct kvm_assigned_pci_dev dev_data = {
4284 .assigned_dev_id = dev_id,
4285 .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0,
4286 };
4287
4288 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data);
4289}
4290
4291static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id,
4292 uint32_t type)
4293{
4294 struct kvm_assigned_irq assigned_irq = {
4295 .assigned_dev_id = dev_id,
4296 .flags = type,
4297 };
4298
4299 return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq);
4300}
4301
4302int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi)
4303{
4304 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX |
4305 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX));
4306}
4307
4308int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq)
4309{
4310 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI |
4311 KVM_DEV_IRQ_GUEST_MSI, virq);
4312}
4313
4314int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id)
4315{
4316 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI |
4317 KVM_DEV_IRQ_HOST_MSI);
4318}
4319
4320bool kvm_device_msix_supported(KVMState *s)
4321{
4322
4323
4324 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT;
4325}
4326
4327int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
4328 uint32_t nr_vectors)
4329{
4330 struct kvm_assigned_msix_nr msix_nr = {
4331 .assigned_dev_id = dev_id,
4332 .entry_nr = nr_vectors,
4333 };
4334
4335 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr);
4336}
4337
4338int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
4339 int virq)
4340{
4341 struct kvm_assigned_msix_entry msix_entry = {
4342 .assigned_dev_id = dev_id,
4343 .gsi = virq,
4344 .entry = vector,
4345 };
4346
4347 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry);
4348}
4349
4350int kvm_device_msix_assign(KVMState *s, uint32_t dev_id)
4351{
4352 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX |
4353 KVM_DEV_IRQ_GUEST_MSIX, 0);
4354}
4355
4356int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
4357{
4358 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
4359 KVM_DEV_IRQ_HOST_MSIX);
4360}
4361
4362int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
4363 uint64_t address, uint32_t data, PCIDevice *dev)
4364{
4365 X86IOMMUState *iommu = x86_iommu_get_default();
4366
4367 if (iommu) {
4368 int ret;
4369 MSIMessage src, dst;
4370 X86IOMMUClass *class = X86_IOMMU_GET_CLASS(iommu);
4371
4372 if (!class->int_remap) {
4373 return 0;
4374 }
4375
4376 src.address = route->u.msi.address_hi;
4377 src.address <<= VTD_MSI_ADDR_HI_SHIFT;
4378 src.address |= route->u.msi.address_lo;
4379 src.data = route->u.msi.data;
4380
4381 ret = class->int_remap(iommu, &src, &dst, dev ? \
4382 pci_requester_id(dev) : \
4383 X86_IOMMU_SID_INVALID);
4384 if (ret) {
4385 trace_kvm_x86_fixup_msi_error(route->gsi);
4386 return 1;
4387 }
4388
4389 route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
4390 route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
4391 route->u.msi.data = dst.data;
4392 }
4393
4394 return 0;
4395}
4396
4397typedef struct MSIRouteEntry MSIRouteEntry;
4398
4399struct MSIRouteEntry {
4400 PCIDevice *dev;
4401 int vector;
4402 int virq;
4403 QLIST_ENTRY(MSIRouteEntry) list;
4404};
4405
4406
4407static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
4408 QLIST_HEAD_INITIALIZER(msi_route_list);
4409
4410static void kvm_update_msi_routes_all(void *private, bool global,
4411 uint32_t index, uint32_t mask)
4412{
4413 int cnt = 0, vector;
4414 MSIRouteEntry *entry;
4415 MSIMessage msg;
4416 PCIDevice *dev;
4417
4418
4419 QLIST_FOREACH(entry, &msi_route_list, list) {
4420 cnt++;
4421 vector = entry->vector;
4422 dev = entry->dev;
4423 if (msix_enabled(dev) && !msix_is_masked(dev, vector)) {
4424 msg = msix_get_message(dev, vector);
4425 } else if (msi_enabled(dev) && !msi_is_masked(dev, vector)) {
4426 msg = msi_get_message(dev, vector);
4427 } else {
4428
4429
4430
4431
4432 continue;
4433 }
4434 kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
4435 }
4436 kvm_irqchip_commit_routes(kvm_state);
4437 trace_kvm_x86_update_msi_routes(cnt);
4438}
4439
4440int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
4441 int vector, PCIDevice *dev)
4442{
4443 static bool notify_list_inited = false;
4444 MSIRouteEntry *entry;
4445
4446 if (!dev) {
4447
4448
4449
4450 return 0;
4451 }
4452
4453 entry = g_new0(MSIRouteEntry, 1);
4454 entry->dev = dev;
4455 entry->vector = vector;
4456 entry->virq = route->gsi;
4457 QLIST_INSERT_HEAD(&msi_route_list, entry, list);
4458
4459 trace_kvm_x86_add_msi_route(route->gsi);
4460
4461 if (!notify_list_inited) {
4462
4463
4464 X86IOMMUState *iommu = x86_iommu_get_default();
4465 if (iommu) {
4466 x86_iommu_iec_register_notifier(iommu,
4467 kvm_update_msi_routes_all,
4468 NULL);
4469 }
4470 notify_list_inited = true;
4471 }
4472 return 0;
4473}
4474
4475int kvm_arch_release_virq_post(int virq)
4476{
4477 MSIRouteEntry *entry, *next;
4478 QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
4479 if (entry->virq == virq) {
4480 trace_kvm_x86_remove_msi_route(virq);
4481 QLIST_REMOVE(entry, list);
4482 g_free(entry);
4483 break;
4484 }
4485 }
4486 return 0;
4487}
4488
4489int kvm_arch_msi_data_to_gsi(uint32_t data)
4490{
4491 abort();
4492}
4493