1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <sys/types.h>
16#include <sys/ioctl.h>
17#include <sys/mman.h>
18#include <sys/utsname.h>
19
20#include <linux/kvm.h>
21#include <linux/kvm_para.h>
22
23#include "qemu-common.h"
24#include "sysemu/sysemu.h"
25#include "sysemu/kvm_int.h"
26#include "kvm_i386.h"
27#include "cpu.h"
28#include "exec/gdbstub.h"
29#include "qemu/host-utils.h"
30#include "qemu/config-file.h"
31#include "qemu/error-report.h"
32#include "hw/i386/pc.h"
33#include "hw/i386/apic.h"
34#include "hw/i386/apic_internal.h"
35#include "hw/i386/apic-msidef.h"
36#include "exec/ioport.h"
37#include "standard-headers/asm-x86/hyperv.h"
38#include "hw/pci/pci.h"
39#include "migration/migration.h"
40#include "exec/memattrs.h"
41
42
43
44#ifdef DEBUG_KVM
45#define DPRINTF(fmt, ...) \
46 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
47#else
48#define DPRINTF(fmt, ...) \
49 do { } while (0)
50#endif
51
52#define MSR_KVM_WALL_CLOCK 0x11
53#define MSR_KVM_SYSTEM_TIME 0x12
54
55#ifndef BUS_MCEERR_AR
56#define BUS_MCEERR_AR 4
57#endif
58#ifndef BUS_MCEERR_AO
59#define BUS_MCEERR_AO 5
60#endif
61
62const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
63 KVM_CAP_INFO(SET_TSS_ADDR),
64 KVM_CAP_INFO(EXT_CPUID),
65 KVM_CAP_INFO(MP_STATE),
66 KVM_CAP_LAST_INFO
67};
68
69static bool has_msr_star;
70static bool has_msr_hsave_pa;
71static bool has_msr_tsc_aux;
72static bool has_msr_tsc_adjust;
73static bool has_msr_tsc_deadline;
74static bool has_msr_feature_control;
75static bool has_msr_async_pf_en;
76static bool has_msr_pv_eoi_en;
77static bool has_msr_misc_enable;
78static bool has_msr_smbase;
79static bool has_msr_bndcfgs;
80static bool has_msr_kvm_steal_time;
81static int lm_capable_kernel;
82static bool has_msr_hv_hypercall;
83static bool has_msr_hv_vapic;
84static bool has_msr_hv_tsc;
85static bool has_msr_hv_crash;
86static bool has_msr_hv_reset;
87static bool has_msr_hv_vpindex;
88static bool has_msr_hv_runtime;
89static bool has_msr_mtrr;
90static bool has_msr_xss;
91
92static bool has_msr_architectural_pmu;
93static uint32_t num_architectural_pmu_counters;
94
95static int has_xsave;
96static int has_xcrs;
97static int has_pit_state2;
98
99int kvm_has_pit_state2(void)
100{
101 return has_pit_state2;
102}
103
104bool kvm_has_smm(void)
105{
106 return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM);
107}
108
109bool kvm_allows_irq0_override(void)
110{
111 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
112}
113
114static int kvm_get_tsc(CPUState *cs)
115{
116 X86CPU *cpu = X86_CPU(cs);
117 CPUX86State *env = &cpu->env;
118 struct {
119 struct kvm_msrs info;
120 struct kvm_msr_entry entries[1];
121 } msr_data;
122 int ret;
123
124 if (env->tsc_valid) {
125 return 0;
126 }
127
128 msr_data.info.nmsrs = 1;
129 msr_data.entries[0].index = MSR_IA32_TSC;
130 env->tsc_valid = !runstate_is_running();
131
132 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
133 if (ret < 0) {
134 return ret;
135 }
136
137 env->tsc = msr_data.entries[0].data;
138 return 0;
139}
140
141static inline void do_kvm_synchronize_tsc(void *arg)
142{
143 CPUState *cpu = arg;
144
145 kvm_get_tsc(cpu);
146}
147
148void kvm_synchronize_all_tsc(void)
149{
150 CPUState *cpu;
151
152 if (kvm_enabled()) {
153 CPU_FOREACH(cpu) {
154 run_on_cpu(cpu, do_kvm_synchronize_tsc, cpu);
155 }
156 }
157}
158
159static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
160{
161 struct kvm_cpuid2 *cpuid;
162 int r, size;
163
164 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
165 cpuid = g_malloc0(size);
166 cpuid->nent = max;
167 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
168 if (r == 0 && cpuid->nent >= max) {
169 r = -E2BIG;
170 }
171 if (r < 0) {
172 if (r == -E2BIG) {
173 g_free(cpuid);
174 return NULL;
175 } else {
176 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
177 strerror(-r));
178 exit(1);
179 }
180 }
181 return cpuid;
182}
183
184
185
186
187static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
188{
189 struct kvm_cpuid2 *cpuid;
190 int max = 1;
191 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
192 max *= 2;
193 }
194 return cpuid;
195}
196
197static const struct kvm_para_features {
198 int cap;
199 int feature;
200} para_features[] = {
201 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
202 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
203 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
204 { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
205};
206
207static int get_para_features(KVMState *s)
208{
209 int i, features = 0;
210
211 for (i = 0; i < ARRAY_SIZE(para_features); i++) {
212 if (kvm_check_extension(s, para_features[i].cap)) {
213 features |= (1 << para_features[i].feature);
214 }
215 }
216
217 return features;
218}
219
220
221
222
223static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
224{
225 uint32_t ret = 0;
226 switch (reg) {
227 case R_EAX:
228 ret = entry->eax;
229 break;
230 case R_EBX:
231 ret = entry->ebx;
232 break;
233 case R_ECX:
234 ret = entry->ecx;
235 break;
236 case R_EDX:
237 ret = entry->edx;
238 break;
239 }
240 return ret;
241}
242
243
244
245static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
246 uint32_t function,
247 uint32_t index)
248{
249 int i;
250 for (i = 0; i < cpuid->nent; ++i) {
251 if (cpuid->entries[i].function == function &&
252 cpuid->entries[i].index == index) {
253 return &cpuid->entries[i];
254 }
255 }
256
257 return NULL;
258}
259
260uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
261 uint32_t index, int reg)
262{
263 struct kvm_cpuid2 *cpuid;
264 uint32_t ret = 0;
265 uint32_t cpuid_1_edx;
266 bool found = false;
267
268 cpuid = get_supported_cpuid(s);
269
270 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
271 if (entry) {
272 found = true;
273 ret = cpuid_entry_get_reg(entry, reg);
274 }
275
276
277
278 if (function == 1 && reg == R_EDX) {
279
280 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
281 } else if (function == 1 && reg == R_ECX) {
282
283
284
285 ret |= CPUID_EXT_HYPERVISOR;
286
287
288
289
290 if (kvm_irqchip_in_kernel() &&
291 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
292 ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
293 }
294
295
296
297
298 if (!kvm_irqchip_in_kernel()) {
299 ret &= ~CPUID_EXT_X2APIC;
300 }
301 } else if (function == 6 && reg == R_EAX) {
302 ret |= CPUID_6_EAX_ARAT;
303 } else if (function == 0x80000001 && reg == R_EDX) {
304
305
306
307 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
308 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
309 }
310
311 g_free(cpuid);
312
313
314 if ((function == KVM_CPUID_FEATURES) && !found) {
315 ret = get_para_features(s);
316 }
317
318 return ret;
319}
320
321typedef struct HWPoisonPage {
322 ram_addr_t ram_addr;
323 QLIST_ENTRY(HWPoisonPage) list;
324} HWPoisonPage;
325
326static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
327 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
328
329static void kvm_unpoison_all(void *param)
330{
331 HWPoisonPage *page, *next_page;
332
333 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
334 QLIST_REMOVE(page, list);
335 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
336 g_free(page);
337 }
338}
339
340static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
341{
342 HWPoisonPage *page;
343
344 QLIST_FOREACH(page, &hwpoison_page_list, list) {
345 if (page->ram_addr == ram_addr) {
346 return;
347 }
348 }
349 page = g_new(HWPoisonPage, 1);
350 page->ram_addr = ram_addr;
351 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
352}
353
354static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
355 int *max_banks)
356{
357 int r;
358
359 r = kvm_check_extension(s, KVM_CAP_MCE);
360 if (r > 0) {
361 *max_banks = r;
362 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
363 }
364 return -ENOSYS;
365}
366
367static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
368{
369 CPUX86State *env = &cpu->env;
370 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
371 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
372 uint64_t mcg_status = MCG_STATUS_MCIP;
373
374 if (code == BUS_MCEERR_AR) {
375 status |= MCI_STATUS_AR | 0x134;
376 mcg_status |= MCG_STATUS_EIPV;
377 } else {
378 status |= 0xc0;
379 mcg_status |= MCG_STATUS_RIPV;
380 }
381 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
382 (MCM_ADDR_PHYS << 6) | 0xc,
383 cpu_x86_support_mca_broadcast(env) ?
384 MCE_INJECT_BROADCAST : 0);
385}
386
387static void hardware_memory_error(void)
388{
389 fprintf(stderr, "Hardware memory error!\n");
390 exit(1);
391}
392
393int kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
394{
395 X86CPU *cpu = X86_CPU(c);
396 CPUX86State *env = &cpu->env;
397 ram_addr_t ram_addr;
398 hwaddr paddr;
399
400 if ((env->mcg_cap & MCG_SER_P) && addr
401 && (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
402 if (qemu_ram_addr_from_host(addr, &ram_addr) == NULL ||
403 !kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
404 fprintf(stderr, "Hardware memory error for memory used by "
405 "QEMU itself instead of guest system!\n");
406
407 if (code == BUS_MCEERR_AO) {
408 return 0;
409 } else {
410 hardware_memory_error();
411 }
412 }
413 kvm_hwpoison_page_add(ram_addr);
414 kvm_mce_inject(cpu, paddr, code);
415 } else {
416 if (code == BUS_MCEERR_AO) {
417 return 0;
418 } else if (code == BUS_MCEERR_AR) {
419 hardware_memory_error();
420 } else {
421 return 1;
422 }
423 }
424 return 0;
425}
426
427int kvm_arch_on_sigbus(int code, void *addr)
428{
429 X86CPU *cpu = X86_CPU(first_cpu);
430
431 if ((cpu->env.mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
432 ram_addr_t ram_addr;
433 hwaddr paddr;
434
435
436 if (qemu_ram_addr_from_host(addr, &ram_addr) == NULL ||
437 !kvm_physical_memory_addr_from_host(first_cpu->kvm_state,
438 addr, &paddr)) {
439 fprintf(stderr, "Hardware memory error for memory used by "
440 "QEMU itself instead of guest system!: %p\n", addr);
441 return 0;
442 }
443 kvm_hwpoison_page_add(ram_addr);
444 kvm_mce_inject(X86_CPU(first_cpu), paddr, code);
445 } else {
446 if (code == BUS_MCEERR_AO) {
447 return 0;
448 } else if (code == BUS_MCEERR_AR) {
449 hardware_memory_error();
450 } else {
451 return 1;
452 }
453 }
454 return 0;
455}
456
457static int kvm_inject_mce_oldstyle(X86CPU *cpu)
458{
459 CPUX86State *env = &cpu->env;
460
461 if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
462 unsigned int bank, bank_num = env->mcg_cap & 0xff;
463 struct kvm_x86_mce mce;
464
465 env->exception_injected = -1;
466
467
468
469
470
471 for (bank = 0; bank < bank_num; bank++) {
472 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
473 break;
474 }
475 }
476 assert(bank < bank_num);
477
478 mce.bank = bank;
479 mce.status = env->mce_banks[bank * 4 + 1];
480 mce.mcg_status = env->mcg_status;
481 mce.addr = env->mce_banks[bank * 4 + 2];
482 mce.misc = env->mce_banks[bank * 4 + 3];
483
484 return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
485 }
486 return 0;
487}
488
489static void cpu_update_state(void *opaque, int running, RunState state)
490{
491 CPUX86State *env = opaque;
492
493 if (running) {
494 env->tsc_valid = false;
495 }
496}
497
498unsigned long kvm_arch_vcpu_id(CPUState *cs)
499{
500 X86CPU *cpu = X86_CPU(cs);
501 return cpu->apic_id;
502}
503
504#ifndef KVM_CPUID_SIGNATURE_NEXT
505#define KVM_CPUID_SIGNATURE_NEXT 0x40000100
506#endif
507
508static bool hyperv_hypercall_available(X86CPU *cpu)
509{
510 return cpu->hyperv_vapic ||
511 (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY);
512}
513
514static bool hyperv_enabled(X86CPU *cpu)
515{
516 CPUState *cs = CPU(cpu);
517 return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
518 (hyperv_hypercall_available(cpu) ||
519 cpu->hyperv_time ||
520 cpu->hyperv_relaxed_timing ||
521 cpu->hyperv_crash ||
522 cpu->hyperv_reset ||
523 cpu->hyperv_vpindex ||
524 cpu->hyperv_runtime);
525}
526
527static Error *invtsc_mig_blocker;
528
529#define KVM_MAX_CPUID_ENTRIES 100
530
531int kvm_arch_init_vcpu(CPUState *cs)
532{
533 struct {
534 struct kvm_cpuid2 cpuid;
535 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
536 } QEMU_PACKED cpuid_data;
537 X86CPU *cpu = X86_CPU(cs);
538 CPUX86State *env = &cpu->env;
539 uint32_t limit, i, j, cpuid_i;
540 uint32_t unused;
541 struct kvm_cpuid_entry2 *c;
542 uint32_t signature[3];
543 int kvm_base = KVM_CPUID_SIGNATURE;
544 int r;
545
546 memset(&cpuid_data, 0, sizeof(cpuid_data));
547
548 cpuid_i = 0;
549
550
551 if (hyperv_enabled(cpu)) {
552 c = &cpuid_data.entries[cpuid_i++];
553 c->function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
554 if (!cpu->hyperv_vendor_id) {
555 memcpy(signature, "Microsoft Hv", 12);
556 } else {
557 size_t len = strlen(cpu->hyperv_vendor_id);
558
559 if (len > 12) {
560 error_report("hv-vendor-id truncated to 12 characters");
561 len = 12;
562 }
563 memset(signature, 0, 12);
564 memcpy(signature, cpu->hyperv_vendor_id, len);
565 }
566 c->eax = HYPERV_CPUID_MIN;
567 c->ebx = signature[0];
568 c->ecx = signature[1];
569 c->edx = signature[2];
570
571 c = &cpuid_data.entries[cpuid_i++];
572 c->function = HYPERV_CPUID_INTERFACE;
573 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
574 c->eax = signature[0];
575 c->ebx = 0;
576 c->ecx = 0;
577 c->edx = 0;
578
579 c = &cpuid_data.entries[cpuid_i++];
580 c->function = HYPERV_CPUID_VERSION;
581 c->eax = 0x00001bbc;
582 c->ebx = 0x00060001;
583
584 c = &cpuid_data.entries[cpuid_i++];
585 c->function = HYPERV_CPUID_FEATURES;
586 if (cpu->hyperv_relaxed_timing) {
587 c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
588 }
589 if (cpu->hyperv_vapic) {
590 c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
591 c->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
592 has_msr_hv_vapic = true;
593 }
594 if (cpu->hyperv_time &&
595 kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
596 c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
597 c->eax |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
598 c->eax |= 0x200;
599 has_msr_hv_tsc = true;
600 }
601 if (cpu->hyperv_crash && has_msr_hv_crash) {
602 c->edx |= HV_X64_GUEST_CRASH_MSR_AVAILABLE;
603 }
604 c->edx |= HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
605 if (cpu->hyperv_reset && has_msr_hv_reset) {
606 c->eax |= HV_X64_MSR_RESET_AVAILABLE;
607 }
608 if (cpu->hyperv_vpindex && has_msr_hv_vpindex) {
609 c->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
610 }
611 if (cpu->hyperv_runtime && has_msr_hv_runtime) {
612 c->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
613 }
614 c = &cpuid_data.entries[cpuid_i++];
615 c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
616 if (cpu->hyperv_relaxed_timing) {
617 c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
618 }
619 if (has_msr_hv_vapic) {
620 c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
621 }
622 c->ebx = cpu->hyperv_spinlock_attempts;
623
624 c = &cpuid_data.entries[cpuid_i++];
625 c->function = HYPERV_CPUID_IMPLEMENT_LIMITS;
626 c->eax = 0x40;
627 c->ebx = 0x40;
628
629 kvm_base = KVM_CPUID_SIGNATURE_NEXT;
630 has_msr_hv_hypercall = true;
631 }
632
633 if (cpu->expose_kvm) {
634 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
635 c = &cpuid_data.entries[cpuid_i++];
636 c->function = KVM_CPUID_SIGNATURE | kvm_base;
637 c->eax = KVM_CPUID_FEATURES | kvm_base;
638 c->ebx = signature[0];
639 c->ecx = signature[1];
640 c->edx = signature[2];
641
642 c = &cpuid_data.entries[cpuid_i++];
643 c->function = KVM_CPUID_FEATURES | kvm_base;
644 c->eax = env->features[FEAT_KVM];
645
646 has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
647
648 has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
649
650 has_msr_kvm_steal_time = c->eax & (1 << KVM_FEATURE_STEAL_TIME);
651 }
652
653 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
654
655 for (i = 0; i <= limit; i++) {
656 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
657 fprintf(stderr, "unsupported level value: 0x%x\n", limit);
658 abort();
659 }
660 c = &cpuid_data.entries[cpuid_i++];
661
662 switch (i) {
663 case 2: {
664
665 int times;
666
667 c->function = i;
668 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
669 KVM_CPUID_FLAG_STATE_READ_NEXT;
670 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
671 times = c->eax & 0xff;
672
673 for (j = 1; j < times; ++j) {
674 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
675 fprintf(stderr, "cpuid_data is full, no space for "
676 "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
677 abort();
678 }
679 c = &cpuid_data.entries[cpuid_i++];
680 c->function = i;
681 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
682 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
683 }
684 break;
685 }
686 case 4:
687 case 0xb:
688 case 0xd:
689 for (j = 0; ; j++) {
690 if (i == 0xd && j == 64) {
691 break;
692 }
693 c->function = i;
694 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
695 c->index = j;
696 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
697
698 if (i == 4 && c->eax == 0) {
699 break;
700 }
701 if (i == 0xb && !(c->ecx & 0xff00)) {
702 break;
703 }
704 if (i == 0xd && c->eax == 0) {
705 continue;
706 }
707 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
708 fprintf(stderr, "cpuid_data is full, no space for "
709 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
710 abort();
711 }
712 c = &cpuid_data.entries[cpuid_i++];
713 }
714 break;
715 default:
716 c->function = i;
717 c->flags = 0;
718 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
719 break;
720 }
721 }
722
723 if (limit >= 0x0a) {
724 uint32_t ver;
725
726 cpu_x86_cpuid(env, 0x0a, 0, &ver, &unused, &unused, &unused);
727 if ((ver & 0xff) > 0) {
728 has_msr_architectural_pmu = true;
729 num_architectural_pmu_counters = (ver & 0xff00) >> 8;
730
731
732
733
734
735 if (num_architectural_pmu_counters > MAX_GP_COUNTERS) {
736 num_architectural_pmu_counters = MAX_GP_COUNTERS;
737 }
738 }
739 }
740
741 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
742
743 for (i = 0x80000000; i <= limit; i++) {
744 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
745 fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
746 abort();
747 }
748 c = &cpuid_data.entries[cpuid_i++];
749
750 c->function = i;
751 c->flags = 0;
752 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
753 }
754
755
756 if (env->cpuid_xlevel2 > 0) {
757 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
758
759 for (i = 0xC0000000; i <= limit; i++) {
760 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
761 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
762 abort();
763 }
764 c = &cpuid_data.entries[cpuid_i++];
765
766 c->function = i;
767 c->flags = 0;
768 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
769 }
770 }
771
772 cpuid_data.cpuid.nent = cpuid_i;
773
774 if (((env->cpuid_version >> 8)&0xF) >= 6
775 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
776 (CPUID_MCE | CPUID_MCA)
777 && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
778 uint64_t mcg_cap, unsupported_caps;
779 int banks;
780 int ret;
781
782 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
783 if (ret < 0) {
784 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
785 return ret;
786 }
787
788 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
789 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
790 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
791 return -ENOTSUP;
792 }
793
794 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
795 if (unsupported_caps) {
796 error_report("warning: Unsupported MCG_CAP bits: 0x%" PRIx64,
797 unsupported_caps);
798 }
799
800 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
801 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
802 if (ret < 0) {
803 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
804 return ret;
805 }
806 }
807
808 qemu_add_vm_change_state_handler(cpu_update_state, env);
809
810 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
811 if (c) {
812 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
813 !!(c->ecx & CPUID_EXT_SMX);
814 }
815
816 c = cpuid_find_entry(&cpuid_data.cpuid, 0x80000007, 0);
817 if (c && (c->edx & 1<<8) && invtsc_mig_blocker == NULL) {
818
819 error_setg(&invtsc_mig_blocker,
820 "State blocked by non-migratable CPU device"
821 " (invtsc flag)");
822 migrate_add_blocker(invtsc_mig_blocker);
823
824 vmstate_x86_cpu.unmigratable = 1;
825 }
826
827 cpuid_data.cpuid.padding = 0;
828 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
829 if (r) {
830 return r;
831 }
832
833 r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL);
834 if (r && env->tsc_khz) {
835 r = kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz);
836 if (r < 0) {
837 fprintf(stderr, "KVM_SET_TSC_KHZ failed\n");
838 return r;
839 }
840 }
841
842 if (has_xsave) {
843 env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
844 }
845
846 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
847 has_msr_mtrr = true;
848 }
849
850 return 0;
851}
852
853void kvm_arch_reset_vcpu(X86CPU *cpu)
854{
855 CPUX86State *env = &cpu->env;
856
857 env->exception_injected = -1;
858 env->interrupt_injected = -1;
859 env->xcr0 = 1;
860 if (kvm_irqchip_in_kernel()) {
861 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
862 KVM_MP_STATE_UNINITIALIZED;
863 } else {
864 env->mp_state = KVM_MP_STATE_RUNNABLE;
865 }
866}
867
868void kvm_arch_do_init_vcpu(X86CPU *cpu)
869{
870 CPUX86State *env = &cpu->env;
871
872
873 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
874 env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
875 }
876}
877
878static int kvm_get_supported_msrs(KVMState *s)
879{
880 static int kvm_supported_msrs;
881 int ret = 0;
882
883
884 if (kvm_supported_msrs == 0) {
885 struct kvm_msr_list msr_list, *kvm_msr_list;
886
887 kvm_supported_msrs = -1;
888
889
890
891 msr_list.nmsrs = 0;
892 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
893 if (ret < 0 && ret != -E2BIG) {
894 return ret;
895 }
896
897
898 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
899 msr_list.nmsrs *
900 sizeof(msr_list.indices[0])));
901
902 kvm_msr_list->nmsrs = msr_list.nmsrs;
903 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
904 if (ret >= 0) {
905 int i;
906
907 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
908 if (kvm_msr_list->indices[i] == MSR_STAR) {
909 has_msr_star = true;
910 continue;
911 }
912 if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
913 has_msr_hsave_pa = true;
914 continue;
915 }
916 if (kvm_msr_list->indices[i] == MSR_TSC_AUX) {
917 has_msr_tsc_aux = true;
918 continue;
919 }
920 if (kvm_msr_list->indices[i] == MSR_TSC_ADJUST) {
921 has_msr_tsc_adjust = true;
922 continue;
923 }
924 if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
925 has_msr_tsc_deadline = true;
926 continue;
927 }
928 if (kvm_msr_list->indices[i] == MSR_IA32_SMBASE) {
929 has_msr_smbase = true;
930 continue;
931 }
932 if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) {
933 has_msr_misc_enable = true;
934 continue;
935 }
936 if (kvm_msr_list->indices[i] == MSR_IA32_BNDCFGS) {
937 has_msr_bndcfgs = true;
938 continue;
939 }
940 if (kvm_msr_list->indices[i] == MSR_IA32_XSS) {
941 has_msr_xss = true;
942 continue;
943 }
944 if (kvm_msr_list->indices[i] == HV_X64_MSR_CRASH_CTL) {
945 has_msr_hv_crash = true;
946 continue;
947 }
948 if (kvm_msr_list->indices[i] == HV_X64_MSR_RESET) {
949 has_msr_hv_reset = true;
950 continue;
951 }
952 if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_INDEX) {
953 has_msr_hv_vpindex = true;
954 continue;
955 }
956 if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_RUNTIME) {
957 has_msr_hv_runtime = true;
958 continue;
959 }
960 }
961 }
962
963 g_free(kvm_msr_list);
964 }
965
966 return ret;
967}
968
969static Notifier smram_machine_done;
970static KVMMemoryListener smram_listener;
971static AddressSpace smram_address_space;
972static MemoryRegion smram_as_root;
973static MemoryRegion smram_as_mem;
974
975static void register_smram_listener(Notifier *n, void *unused)
976{
977 MemoryRegion *smram =
978 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
979
980
981 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
982 memory_region_set_enabled(&smram_as_root, true);
983
984
985
986
987 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
988 get_system_memory(), 0, ~0ull);
989 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
990 memory_region_set_enabled(&smram_as_mem, true);
991
992 if (smram) {
993
994 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
995 memory_region_set_enabled(smram, true);
996 }
997
998 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
999 kvm_memory_listener_register(kvm_state, &smram_listener,
1000 &smram_address_space, 1);
1001}
1002
1003int kvm_arch_init(MachineState *ms, KVMState *s)
1004{
1005 uint64_t identity_base = 0xfffbc000;
1006 uint64_t shadow_mem;
1007 int ret;
1008 struct utsname utsname;
1009
1010#ifdef KVM_CAP_XSAVE
1011 has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
1012#endif
1013
1014#ifdef KVM_CAP_XCRS
1015 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
1016#endif
1017
1018#ifdef KVM_CAP_PIT_STATE2
1019 has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
1020#endif
1021
1022 ret = kvm_get_supported_msrs(s);
1023 if (ret < 0) {
1024 return ret;
1025 }
1026
1027 uname(&utsname);
1028 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
1042
1043 identity_base = 0xfeffc000;
1044
1045 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
1046 if (ret < 0) {
1047 return ret;
1048 }
1049 }
1050
1051
1052 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
1053 if (ret < 0) {
1054 return ret;
1055 }
1056
1057
1058 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
1059 if (ret < 0) {
1060 fprintf(stderr, "e820_add_entry() table is full\n");
1061 return ret;
1062 }
1063 qemu_register_reset(kvm_unpoison_all, NULL);
1064
1065 shadow_mem = machine_kvm_shadow_mem(ms);
1066 if (shadow_mem != -1) {
1067 shadow_mem /= 4096;
1068 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
1069 if (ret < 0) {
1070 return ret;
1071 }
1072 }
1073
1074 if (kvm_check_extension(s, KVM_CAP_X86_SMM)) {
1075 smram_machine_done.notify = register_smram_listener;
1076 qemu_add_machine_init_done_notifier(&smram_machine_done);
1077 }
1078 return 0;
1079}
1080
1081static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1082{
1083 lhs->selector = rhs->selector;
1084 lhs->base = rhs->base;
1085 lhs->limit = rhs->limit;
1086 lhs->type = 3;
1087 lhs->present = 1;
1088 lhs->dpl = 3;
1089 lhs->db = 0;
1090 lhs->s = 1;
1091 lhs->l = 0;
1092 lhs->g = 0;
1093 lhs->avl = 0;
1094 lhs->unusable = 0;
1095}
1096
1097static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1098{
1099 unsigned flags = rhs->flags;
1100 lhs->selector = rhs->selector;
1101 lhs->base = rhs->base;
1102 lhs->limit = rhs->limit;
1103 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
1104 lhs->present = (flags & DESC_P_MASK) != 0;
1105 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
1106 lhs->db = (flags >> DESC_B_SHIFT) & 1;
1107 lhs->s = (flags & DESC_S_MASK) != 0;
1108 lhs->l = (flags >> DESC_L_SHIFT) & 1;
1109 lhs->g = (flags & DESC_G_MASK) != 0;
1110 lhs->avl = (flags & DESC_AVL_MASK) != 0;
1111 lhs->unusable = 0;
1112 lhs->padding = 0;
1113}
1114
1115static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
1116{
1117 lhs->selector = rhs->selector;
1118 lhs->base = rhs->base;
1119 lhs->limit = rhs->limit;
1120 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
1121 (rhs->present * DESC_P_MASK) |
1122 (rhs->dpl << DESC_DPL_SHIFT) |
1123 (rhs->db << DESC_B_SHIFT) |
1124 (rhs->s * DESC_S_MASK) |
1125 (rhs->l << DESC_L_SHIFT) |
1126 (rhs->g * DESC_G_MASK) |
1127 (rhs->avl * DESC_AVL_MASK);
1128}
1129
1130static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
1131{
1132 if (set) {
1133 *kvm_reg = *qemu_reg;
1134 } else {
1135 *qemu_reg = *kvm_reg;
1136 }
1137}
1138
1139static int kvm_getput_regs(X86CPU *cpu, int set)
1140{
1141 CPUX86State *env = &cpu->env;
1142 struct kvm_regs regs;
1143 int ret = 0;
1144
1145 if (!set) {
1146 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, ®s);
1147 if (ret < 0) {
1148 return ret;
1149 }
1150 }
1151
1152 kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
1153 kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
1154 kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
1155 kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
1156 kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
1157 kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
1158 kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
1159 kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
1160#ifdef TARGET_X86_64
1161 kvm_getput_reg(®s.r8, &env->regs[8], set);
1162 kvm_getput_reg(®s.r9, &env->regs[9], set);
1163 kvm_getput_reg(®s.r10, &env->regs[10], set);
1164 kvm_getput_reg(®s.r11, &env->regs[11], set);
1165 kvm_getput_reg(®s.r12, &env->regs[12], set);
1166 kvm_getput_reg(®s.r13, &env->regs[13], set);
1167 kvm_getput_reg(®s.r14, &env->regs[14], set);
1168 kvm_getput_reg(®s.r15, &env->regs[15], set);
1169#endif
1170
1171 kvm_getput_reg(®s.rflags, &env->eflags, set);
1172 kvm_getput_reg(®s.rip, &env->eip, set);
1173
1174 if (set) {
1175 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, ®s);
1176 }
1177
1178 return ret;
1179}
1180
1181static int kvm_put_fpu(X86CPU *cpu)
1182{
1183 CPUX86State *env = &cpu->env;
1184 struct kvm_fpu fpu;
1185 int i;
1186
1187 memset(&fpu, 0, sizeof fpu);
1188 fpu.fsw = env->fpus & ~(7 << 11);
1189 fpu.fsw |= (env->fpstt & 7) << 11;
1190 fpu.fcw = env->fpuc;
1191 fpu.last_opcode = env->fpop;
1192 fpu.last_ip = env->fpip;
1193 fpu.last_dp = env->fpdp;
1194 for (i = 0; i < 8; ++i) {
1195 fpu.ftwx |= (!env->fptags[i]) << i;
1196 }
1197 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
1198 for (i = 0; i < CPU_NB_REGS; i++) {
1199 stq_p(&fpu.xmm[i][0], env->xmm_regs[i].XMM_Q(0));
1200 stq_p(&fpu.xmm[i][8], env->xmm_regs[i].XMM_Q(1));
1201 }
1202 fpu.mxcsr = env->mxcsr;
1203
1204 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
1205}
1206
1207#define XSAVE_FCW_FSW 0
1208#define XSAVE_FTW_FOP 1
1209#define XSAVE_CWD_RIP 2
1210#define XSAVE_CWD_RDP 4
1211#define XSAVE_MXCSR 6
1212#define XSAVE_ST_SPACE 8
1213#define XSAVE_XMM_SPACE 40
1214#define XSAVE_XSTATE_BV 128
1215#define XSAVE_YMMH_SPACE 144
1216#define XSAVE_BNDREGS 240
1217#define XSAVE_BNDCSR 256
1218#define XSAVE_OPMASK 272
1219#define XSAVE_ZMM_Hi256 288
1220#define XSAVE_Hi16_ZMM 416
1221
1222static int kvm_put_xsave(X86CPU *cpu)
1223{
1224 CPUX86State *env = &cpu->env;
1225 struct kvm_xsave* xsave = env->kvm_xsave_buf;
1226 uint16_t cwd, swd, twd;
1227 uint8_t *xmm, *ymmh, *zmmh;
1228 int i, r;
1229
1230 if (!has_xsave) {
1231 return kvm_put_fpu(cpu);
1232 }
1233
1234 memset(xsave, 0, sizeof(struct kvm_xsave));
1235 twd = 0;
1236 swd = env->fpus & ~(7 << 11);
1237 swd |= (env->fpstt & 7) << 11;
1238 cwd = env->fpuc;
1239 for (i = 0; i < 8; ++i) {
1240 twd |= (!env->fptags[i]) << i;
1241 }
1242 xsave->region[XSAVE_FCW_FSW] = (uint32_t)(swd << 16) + cwd;
1243 xsave->region[XSAVE_FTW_FOP] = (uint32_t)(env->fpop << 16) + twd;
1244 memcpy(&xsave->region[XSAVE_CWD_RIP], &env->fpip, sizeof(env->fpip));
1245 memcpy(&xsave->region[XSAVE_CWD_RDP], &env->fpdp, sizeof(env->fpdp));
1246 memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
1247 sizeof env->fpregs);
1248 xsave->region[XSAVE_MXCSR] = env->mxcsr;
1249 *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
1250 memcpy(&xsave->region[XSAVE_BNDREGS], env->bnd_regs,
1251 sizeof env->bnd_regs);
1252 memcpy(&xsave->region[XSAVE_BNDCSR], &env->bndcs_regs,
1253 sizeof(env->bndcs_regs));
1254 memcpy(&xsave->region[XSAVE_OPMASK], env->opmask_regs,
1255 sizeof env->opmask_regs);
1256
1257 xmm = (uint8_t *)&xsave->region[XSAVE_XMM_SPACE];
1258 ymmh = (uint8_t *)&xsave->region[XSAVE_YMMH_SPACE];
1259 zmmh = (uint8_t *)&xsave->region[XSAVE_ZMM_Hi256];
1260 for (i = 0; i < CPU_NB_REGS; i++, xmm += 16, ymmh += 16, zmmh += 32) {
1261 stq_p(xmm, env->xmm_regs[i].XMM_Q(0));
1262 stq_p(xmm+8, env->xmm_regs[i].XMM_Q(1));
1263 stq_p(ymmh, env->xmm_regs[i].XMM_Q(2));
1264 stq_p(ymmh+8, env->xmm_regs[i].XMM_Q(3));
1265 stq_p(zmmh, env->xmm_regs[i].XMM_Q(4));
1266 stq_p(zmmh+8, env->xmm_regs[i].XMM_Q(5));
1267 stq_p(zmmh+16, env->xmm_regs[i].XMM_Q(6));
1268 stq_p(zmmh+24, env->xmm_regs[i].XMM_Q(7));
1269 }
1270
1271#ifdef TARGET_X86_64
1272 memcpy(&xsave->region[XSAVE_Hi16_ZMM], &env->xmm_regs[16],
1273 16 * sizeof env->xmm_regs[16]);
1274#endif
1275 r = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
1276 return r;
1277}
1278
1279static int kvm_put_xcrs(X86CPU *cpu)
1280{
1281 CPUX86State *env = &cpu->env;
1282 struct kvm_xcrs xcrs = {};
1283
1284 if (!has_xcrs) {
1285 return 0;
1286 }
1287
1288 xcrs.nr_xcrs = 1;
1289 xcrs.flags = 0;
1290 xcrs.xcrs[0].xcr = 0;
1291 xcrs.xcrs[0].value = env->xcr0;
1292 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
1293}
1294
1295static int kvm_put_sregs(X86CPU *cpu)
1296{
1297 CPUX86State *env = &cpu->env;
1298 struct kvm_sregs sregs;
1299
1300 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
1301 if (env->interrupt_injected >= 0) {
1302 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
1303 (uint64_t)1 << (env->interrupt_injected % 64);
1304 }
1305
1306 if ((env->eflags & VM_MASK)) {
1307 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
1308 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
1309 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
1310 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
1311 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
1312 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
1313 } else {
1314 set_seg(&sregs.cs, &env->segs[R_CS]);
1315 set_seg(&sregs.ds, &env->segs[R_DS]);
1316 set_seg(&sregs.es, &env->segs[R_ES]);
1317 set_seg(&sregs.fs, &env->segs[R_FS]);
1318 set_seg(&sregs.gs, &env->segs[R_GS]);
1319 set_seg(&sregs.ss, &env->segs[R_SS]);
1320 }
1321
1322 set_seg(&sregs.tr, &env->tr);
1323 set_seg(&sregs.ldt, &env->ldt);
1324
1325 sregs.idt.limit = env->idt.limit;
1326 sregs.idt.base = env->idt.base;
1327 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
1328 sregs.gdt.limit = env->gdt.limit;
1329 sregs.gdt.base = env->gdt.base;
1330 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
1331
1332 sregs.cr0 = env->cr[0];
1333 sregs.cr2 = env->cr[2];
1334 sregs.cr3 = env->cr[3];
1335 sregs.cr4 = env->cr[4];
1336
1337 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
1338 sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
1339
1340 sregs.efer = env->efer;
1341
1342 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
1343}
1344
1345static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
1346 uint32_t index, uint64_t value)
1347{
1348 entry->index = index;
1349 entry->reserved = 0;
1350 entry->data = value;
1351}
1352
1353static int kvm_put_tscdeadline_msr(X86CPU *cpu)
1354{
1355 CPUX86State *env = &cpu->env;
1356 struct {
1357 struct kvm_msrs info;
1358 struct kvm_msr_entry entries[1];
1359 } msr_data;
1360 struct kvm_msr_entry *msrs = msr_data.entries;
1361
1362 if (!has_msr_tsc_deadline) {
1363 return 0;
1364 }
1365
1366 kvm_msr_entry_set(&msrs[0], MSR_IA32_TSCDEADLINE, env->tsc_deadline);
1367
1368 msr_data.info = (struct kvm_msrs) {
1369 .nmsrs = 1,
1370 };
1371
1372 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
1373}
1374
1375
1376
1377
1378
1379
1380
1381static int kvm_put_msr_feature_control(X86CPU *cpu)
1382{
1383 struct {
1384 struct kvm_msrs info;
1385 struct kvm_msr_entry entry;
1386 } msr_data;
1387
1388 kvm_msr_entry_set(&msr_data.entry, MSR_IA32_FEATURE_CONTROL,
1389 cpu->env.msr_ia32_feature_control);
1390
1391 msr_data.info = (struct kvm_msrs) {
1392 .nmsrs = 1,
1393 };
1394
1395 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
1396}
1397
1398static int kvm_put_msrs(X86CPU *cpu, int level)
1399{
1400 CPUX86State *env = &cpu->env;
1401 struct {
1402 struct kvm_msrs info;
1403 struct kvm_msr_entry entries[150];
1404 } msr_data;
1405 struct kvm_msr_entry *msrs = msr_data.entries;
1406 int n = 0, i;
1407
1408 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
1409 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
1410 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
1411 kvm_msr_entry_set(&msrs[n++], MSR_PAT, env->pat);
1412 if (has_msr_star) {
1413 kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
1414 }
1415 if (has_msr_hsave_pa) {
1416 kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
1417 }
1418 if (has_msr_tsc_aux) {
1419 kvm_msr_entry_set(&msrs[n++], MSR_TSC_AUX, env->tsc_aux);
1420 }
1421 if (has_msr_tsc_adjust) {
1422 kvm_msr_entry_set(&msrs[n++], MSR_TSC_ADJUST, env->tsc_adjust);
1423 }
1424 if (has_msr_misc_enable) {
1425 kvm_msr_entry_set(&msrs[n++], MSR_IA32_MISC_ENABLE,
1426 env->msr_ia32_misc_enable);
1427 }
1428 if (has_msr_smbase) {
1429 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SMBASE, env->smbase);
1430 }
1431 if (has_msr_bndcfgs) {
1432 kvm_msr_entry_set(&msrs[n++], MSR_IA32_BNDCFGS, env->msr_bndcfgs);
1433 }
1434 if (has_msr_xss) {
1435 kvm_msr_entry_set(&msrs[n++], MSR_IA32_XSS, env->xss);
1436 }
1437#ifdef TARGET_X86_64
1438 if (lm_capable_kernel) {
1439 kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
1440 kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
1441 kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
1442 kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
1443 }
1444#endif
1445
1446
1447
1448
1449 if (level >= KVM_PUT_RESET_STATE) {
1450 kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
1451 kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME,
1452 env->system_time_msr);
1453 kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
1454 if (has_msr_async_pf_en) {
1455 kvm_msr_entry_set(&msrs[n++], MSR_KVM_ASYNC_PF_EN,
1456 env->async_pf_en_msr);
1457 }
1458 if (has_msr_pv_eoi_en) {
1459 kvm_msr_entry_set(&msrs[n++], MSR_KVM_PV_EOI_EN,
1460 env->pv_eoi_en_msr);
1461 }
1462 if (has_msr_kvm_steal_time) {
1463 kvm_msr_entry_set(&msrs[n++], MSR_KVM_STEAL_TIME,
1464 env->steal_time_msr);
1465 }
1466 if (has_msr_architectural_pmu) {
1467
1468 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
1469 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_CTRL, 0);
1470
1471
1472 for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
1473 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR0 + i,
1474 env->msr_fixed_counters[i]);
1475 }
1476 for (i = 0; i < num_architectural_pmu_counters; i++) {
1477 kvm_msr_entry_set(&msrs[n++], MSR_P6_PERFCTR0 + i,
1478 env->msr_gp_counters[i]);
1479 kvm_msr_entry_set(&msrs[n++], MSR_P6_EVNTSEL0 + i,
1480 env->msr_gp_evtsel[i]);
1481 }
1482 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_STATUS,
1483 env->msr_global_status);
1484 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_OVF_CTRL,
1485 env->msr_global_ovf_ctrl);
1486
1487
1488 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR_CTRL,
1489 env->msr_fixed_ctr_ctrl);
1490 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_CTRL,
1491 env->msr_global_ctrl);
1492 }
1493 if (has_msr_hv_hypercall) {
1494 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_GUEST_OS_ID,
1495 env->msr_hv_guest_os_id);
1496 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_HYPERCALL,
1497 env->msr_hv_hypercall);
1498 }
1499 if (has_msr_hv_vapic) {
1500 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_APIC_ASSIST_PAGE,
1501 env->msr_hv_vapic);
1502 }
1503 if (has_msr_hv_tsc) {
1504 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_REFERENCE_TSC,
1505 env->msr_hv_tsc);
1506 }
1507 if (has_msr_hv_crash) {
1508 int j;
1509
1510 for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++)
1511 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_CRASH_P0 + j,
1512 env->msr_hv_crash_params[j]);
1513
1514 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_CRASH_CTL,
1515 HV_X64_MSR_CRASH_CTL_NOTIFY);
1516 }
1517 if (has_msr_hv_runtime) {
1518 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_VP_RUNTIME,
1519 env->msr_hv_runtime);
1520 }
1521 if (has_msr_mtrr) {
1522 kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype);
1523 kvm_msr_entry_set(&msrs[n++],
1524 MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
1525 kvm_msr_entry_set(&msrs[n++],
1526 MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
1527 kvm_msr_entry_set(&msrs[n++],
1528 MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
1529 kvm_msr_entry_set(&msrs[n++],
1530 MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
1531 kvm_msr_entry_set(&msrs[n++],
1532 MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
1533 kvm_msr_entry_set(&msrs[n++],
1534 MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
1535 kvm_msr_entry_set(&msrs[n++],
1536 MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
1537 kvm_msr_entry_set(&msrs[n++],
1538 MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
1539 kvm_msr_entry_set(&msrs[n++],
1540 MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
1541 kvm_msr_entry_set(&msrs[n++],
1542 MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
1543 kvm_msr_entry_set(&msrs[n++],
1544 MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
1545 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
1546 kvm_msr_entry_set(&msrs[n++],
1547 MSR_MTRRphysBase(i), env->mtrr_var[i].base);
1548 kvm_msr_entry_set(&msrs[n++],
1549 MSR_MTRRphysMask(i), env->mtrr_var[i].mask);
1550 }
1551 }
1552
1553
1554
1555 }
1556 if (env->mcg_cap) {
1557 int i;
1558
1559 kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
1560 kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
1561 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
1562 kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
1563 }
1564 }
1565
1566 msr_data.info = (struct kvm_msrs) {
1567 .nmsrs = n,
1568 };
1569
1570 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
1571
1572}
1573
1574
1575static int kvm_get_fpu(X86CPU *cpu)
1576{
1577 CPUX86State *env = &cpu->env;
1578 struct kvm_fpu fpu;
1579 int i, ret;
1580
1581 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
1582 if (ret < 0) {
1583 return ret;
1584 }
1585
1586 env->fpstt = (fpu.fsw >> 11) & 7;
1587 env->fpus = fpu.fsw;
1588 env->fpuc = fpu.fcw;
1589 env->fpop = fpu.last_opcode;
1590 env->fpip = fpu.last_ip;
1591 env->fpdp = fpu.last_dp;
1592 for (i = 0; i < 8; ++i) {
1593 env->fptags[i] = !((fpu.ftwx >> i) & 1);
1594 }
1595 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
1596 for (i = 0; i < CPU_NB_REGS; i++) {
1597 env->xmm_regs[i].XMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
1598 env->xmm_regs[i].XMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
1599 }
1600 env->mxcsr = fpu.mxcsr;
1601
1602 return 0;
1603}
1604
1605static int kvm_get_xsave(X86CPU *cpu)
1606{
1607 CPUX86State *env = &cpu->env;
1608 struct kvm_xsave* xsave = env->kvm_xsave_buf;
1609 int ret, i;
1610 const uint8_t *xmm, *ymmh, *zmmh;
1611 uint16_t cwd, swd, twd;
1612
1613 if (!has_xsave) {
1614 return kvm_get_fpu(cpu);
1615 }
1616
1617 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
1618 if (ret < 0) {
1619 return ret;
1620 }
1621
1622 cwd = (uint16_t)xsave->region[XSAVE_FCW_FSW];
1623 swd = (uint16_t)(xsave->region[XSAVE_FCW_FSW] >> 16);
1624 twd = (uint16_t)xsave->region[XSAVE_FTW_FOP];
1625 env->fpop = (uint16_t)(xsave->region[XSAVE_FTW_FOP] >> 16);
1626 env->fpstt = (swd >> 11) & 7;
1627 env->fpus = swd;
1628 env->fpuc = cwd;
1629 for (i = 0; i < 8; ++i) {
1630 env->fptags[i] = !((twd >> i) & 1);
1631 }
1632 memcpy(&env->fpip, &xsave->region[XSAVE_CWD_RIP], sizeof(env->fpip));
1633 memcpy(&env->fpdp, &xsave->region[XSAVE_CWD_RDP], sizeof(env->fpdp));
1634 env->mxcsr = xsave->region[XSAVE_MXCSR];
1635 memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE],
1636 sizeof env->fpregs);
1637 env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
1638 memcpy(env->bnd_regs, &xsave->region[XSAVE_BNDREGS],
1639 sizeof env->bnd_regs);
1640 memcpy(&env->bndcs_regs, &xsave->region[XSAVE_BNDCSR],
1641 sizeof(env->bndcs_regs));
1642 memcpy(env->opmask_regs, &xsave->region[XSAVE_OPMASK],
1643 sizeof env->opmask_regs);
1644
1645 xmm = (const uint8_t *)&xsave->region[XSAVE_XMM_SPACE];
1646 ymmh = (const uint8_t *)&xsave->region[XSAVE_YMMH_SPACE];
1647 zmmh = (const uint8_t *)&xsave->region[XSAVE_ZMM_Hi256];
1648 for (i = 0; i < CPU_NB_REGS; i++, xmm += 16, ymmh += 16, zmmh += 32) {
1649 env->xmm_regs[i].XMM_Q(0) = ldq_p(xmm);
1650 env->xmm_regs[i].XMM_Q(1) = ldq_p(xmm+8);
1651 env->xmm_regs[i].XMM_Q(2) = ldq_p(ymmh);
1652 env->xmm_regs[i].XMM_Q(3) = ldq_p(ymmh+8);
1653 env->xmm_regs[i].XMM_Q(4) = ldq_p(zmmh);
1654 env->xmm_regs[i].XMM_Q(5) = ldq_p(zmmh+8);
1655 env->xmm_regs[i].XMM_Q(6) = ldq_p(zmmh+16);
1656 env->xmm_regs[i].XMM_Q(7) = ldq_p(zmmh+24);
1657 }
1658
1659#ifdef TARGET_X86_64
1660 memcpy(&env->xmm_regs[16], &xsave->region[XSAVE_Hi16_ZMM],
1661 16 * sizeof env->xmm_regs[16]);
1662#endif
1663 return 0;
1664}
1665
1666static int kvm_get_xcrs(X86CPU *cpu)
1667{
1668 CPUX86State *env = &cpu->env;
1669 int i, ret;
1670 struct kvm_xcrs xcrs;
1671
1672 if (!has_xcrs) {
1673 return 0;
1674 }
1675
1676 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
1677 if (ret < 0) {
1678 return ret;
1679 }
1680
1681 for (i = 0; i < xcrs.nr_xcrs; i++) {
1682
1683 if (xcrs.xcrs[i].xcr == 0) {
1684 env->xcr0 = xcrs.xcrs[i].value;
1685 break;
1686 }
1687 }
1688 return 0;
1689}
1690
1691static int kvm_get_sregs(X86CPU *cpu)
1692{
1693 CPUX86State *env = &cpu->env;
1694 struct kvm_sregs sregs;
1695 uint32_t hflags;
1696 int bit, i, ret;
1697
1698 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
1699 if (ret < 0) {
1700 return ret;
1701 }
1702
1703
1704
1705 env->interrupt_injected = -1;
1706 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
1707 if (sregs.interrupt_bitmap[i]) {
1708 bit = ctz64(sregs.interrupt_bitmap[i]);
1709 env->interrupt_injected = i * 64 + bit;
1710 break;
1711 }
1712 }
1713
1714 get_seg(&env->segs[R_CS], &sregs.cs);
1715 get_seg(&env->segs[R_DS], &sregs.ds);
1716 get_seg(&env->segs[R_ES], &sregs.es);
1717 get_seg(&env->segs[R_FS], &sregs.fs);
1718 get_seg(&env->segs[R_GS], &sregs.gs);
1719 get_seg(&env->segs[R_SS], &sregs.ss);
1720
1721 get_seg(&env->tr, &sregs.tr);
1722 get_seg(&env->ldt, &sregs.ldt);
1723
1724 env->idt.limit = sregs.idt.limit;
1725 env->idt.base = sregs.idt.base;
1726 env->gdt.limit = sregs.gdt.limit;
1727 env->gdt.base = sregs.gdt.base;
1728
1729 env->cr[0] = sregs.cr0;
1730 env->cr[2] = sregs.cr2;
1731 env->cr[3] = sregs.cr3;
1732 env->cr[4] = sregs.cr4;
1733
1734 env->efer = sregs.efer;
1735
1736
1737
1738#define HFLAG_COPY_MASK \
1739 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1740 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1741 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1742 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
1743
1744 hflags = (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
1745 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
1746 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
1747 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
1748 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
1749 hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
1750 (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
1751
1752 if (env->efer & MSR_EFER_LMA) {
1753 hflags |= HF_LMA_MASK;
1754 }
1755
1756 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
1757 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
1758 } else {
1759 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
1760 (DESC_B_SHIFT - HF_CS32_SHIFT);
1761 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
1762 (DESC_B_SHIFT - HF_SS32_SHIFT);
1763 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
1764 !(hflags & HF_CS32_MASK)) {
1765 hflags |= HF_ADDSEG_MASK;
1766 } else {
1767 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
1768 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
1769 }
1770 }
1771 env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
1772
1773 return 0;
1774}
1775
1776static int kvm_get_msrs(X86CPU *cpu)
1777{
1778 CPUX86State *env = &cpu->env;
1779 struct {
1780 struct kvm_msrs info;
1781 struct kvm_msr_entry entries[150];
1782 } msr_data;
1783 struct kvm_msr_entry *msrs = msr_data.entries;
1784 int ret, i, n;
1785
1786 n = 0;
1787 msrs[n++].index = MSR_IA32_SYSENTER_CS;
1788 msrs[n++].index = MSR_IA32_SYSENTER_ESP;
1789 msrs[n++].index = MSR_IA32_SYSENTER_EIP;
1790 msrs[n++].index = MSR_PAT;
1791 if (has_msr_star) {
1792 msrs[n++].index = MSR_STAR;
1793 }
1794 if (has_msr_hsave_pa) {
1795 msrs[n++].index = MSR_VM_HSAVE_PA;
1796 }
1797 if (has_msr_tsc_aux) {
1798 msrs[n++].index = MSR_TSC_AUX;
1799 }
1800 if (has_msr_tsc_adjust) {
1801 msrs[n++].index = MSR_TSC_ADJUST;
1802 }
1803 if (has_msr_tsc_deadline) {
1804 msrs[n++].index = MSR_IA32_TSCDEADLINE;
1805 }
1806 if (has_msr_misc_enable) {
1807 msrs[n++].index = MSR_IA32_MISC_ENABLE;
1808 }
1809 if (has_msr_smbase) {
1810 msrs[n++].index = MSR_IA32_SMBASE;
1811 }
1812 if (has_msr_feature_control) {
1813 msrs[n++].index = MSR_IA32_FEATURE_CONTROL;
1814 }
1815 if (has_msr_bndcfgs) {
1816 msrs[n++].index = MSR_IA32_BNDCFGS;
1817 }
1818 if (has_msr_xss) {
1819 msrs[n++].index = MSR_IA32_XSS;
1820 }
1821
1822
1823 if (!env->tsc_valid) {
1824 msrs[n++].index = MSR_IA32_TSC;
1825 env->tsc_valid = !runstate_is_running();
1826 }
1827
1828#ifdef TARGET_X86_64
1829 if (lm_capable_kernel) {
1830 msrs[n++].index = MSR_CSTAR;
1831 msrs[n++].index = MSR_KERNELGSBASE;
1832 msrs[n++].index = MSR_FMASK;
1833 msrs[n++].index = MSR_LSTAR;
1834 }
1835#endif
1836 msrs[n++].index = MSR_KVM_SYSTEM_TIME;
1837 msrs[n++].index = MSR_KVM_WALL_CLOCK;
1838 if (has_msr_async_pf_en) {
1839 msrs[n++].index = MSR_KVM_ASYNC_PF_EN;
1840 }
1841 if (has_msr_pv_eoi_en) {
1842 msrs[n++].index = MSR_KVM_PV_EOI_EN;
1843 }
1844 if (has_msr_kvm_steal_time) {
1845 msrs[n++].index = MSR_KVM_STEAL_TIME;
1846 }
1847 if (has_msr_architectural_pmu) {
1848 msrs[n++].index = MSR_CORE_PERF_FIXED_CTR_CTRL;
1849 msrs[n++].index = MSR_CORE_PERF_GLOBAL_CTRL;
1850 msrs[n++].index = MSR_CORE_PERF_GLOBAL_STATUS;
1851 msrs[n++].index = MSR_CORE_PERF_GLOBAL_OVF_CTRL;
1852 for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
1853 msrs[n++].index = MSR_CORE_PERF_FIXED_CTR0 + i;
1854 }
1855 for (i = 0; i < num_architectural_pmu_counters; i++) {
1856 msrs[n++].index = MSR_P6_PERFCTR0 + i;
1857 msrs[n++].index = MSR_P6_EVNTSEL0 + i;
1858 }
1859 }
1860
1861 if (env->mcg_cap) {
1862 msrs[n++].index = MSR_MCG_STATUS;
1863 msrs[n++].index = MSR_MCG_CTL;
1864 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
1865 msrs[n++].index = MSR_MC0_CTL + i;
1866 }
1867 }
1868
1869 if (has_msr_hv_hypercall) {
1870 msrs[n++].index = HV_X64_MSR_HYPERCALL;
1871 msrs[n++].index = HV_X64_MSR_GUEST_OS_ID;
1872 }
1873 if (has_msr_hv_vapic) {
1874 msrs[n++].index = HV_X64_MSR_APIC_ASSIST_PAGE;
1875 }
1876 if (has_msr_hv_tsc) {
1877 msrs[n++].index = HV_X64_MSR_REFERENCE_TSC;
1878 }
1879 if (has_msr_hv_crash) {
1880 int j;
1881
1882 for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) {
1883 msrs[n++].index = HV_X64_MSR_CRASH_P0 + j;
1884 }
1885 }
1886 if (has_msr_hv_runtime) {
1887 msrs[n++].index = HV_X64_MSR_VP_RUNTIME;
1888 }
1889 if (has_msr_mtrr) {
1890 msrs[n++].index = MSR_MTRRdefType;
1891 msrs[n++].index = MSR_MTRRfix64K_00000;
1892 msrs[n++].index = MSR_MTRRfix16K_80000;
1893 msrs[n++].index = MSR_MTRRfix16K_A0000;
1894 msrs[n++].index = MSR_MTRRfix4K_C0000;
1895 msrs[n++].index = MSR_MTRRfix4K_C8000;
1896 msrs[n++].index = MSR_MTRRfix4K_D0000;
1897 msrs[n++].index = MSR_MTRRfix4K_D8000;
1898 msrs[n++].index = MSR_MTRRfix4K_E0000;
1899 msrs[n++].index = MSR_MTRRfix4K_E8000;
1900 msrs[n++].index = MSR_MTRRfix4K_F0000;
1901 msrs[n++].index = MSR_MTRRfix4K_F8000;
1902 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
1903 msrs[n++].index = MSR_MTRRphysBase(i);
1904 msrs[n++].index = MSR_MTRRphysMask(i);
1905 }
1906 }
1907
1908 msr_data.info = (struct kvm_msrs) {
1909 .nmsrs = n,
1910 };
1911
1912 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
1913 if (ret < 0) {
1914 return ret;
1915 }
1916
1917 for (i = 0; i < ret; i++) {
1918 uint32_t index = msrs[i].index;
1919 switch (index) {
1920 case MSR_IA32_SYSENTER_CS:
1921 env->sysenter_cs = msrs[i].data;
1922 break;
1923 case MSR_IA32_SYSENTER_ESP:
1924 env->sysenter_esp = msrs[i].data;
1925 break;
1926 case MSR_IA32_SYSENTER_EIP:
1927 env->sysenter_eip = msrs[i].data;
1928 break;
1929 case MSR_PAT:
1930 env->pat = msrs[i].data;
1931 break;
1932 case MSR_STAR:
1933 env->star = msrs[i].data;
1934 break;
1935#ifdef TARGET_X86_64
1936 case MSR_CSTAR:
1937 env->cstar = msrs[i].data;
1938 break;
1939 case MSR_KERNELGSBASE:
1940 env->kernelgsbase = msrs[i].data;
1941 break;
1942 case MSR_FMASK:
1943 env->fmask = msrs[i].data;
1944 break;
1945 case MSR_LSTAR:
1946 env->lstar = msrs[i].data;
1947 break;
1948#endif
1949 case MSR_IA32_TSC:
1950 env->tsc = msrs[i].data;
1951 break;
1952 case MSR_TSC_AUX:
1953 env->tsc_aux = msrs[i].data;
1954 break;
1955 case MSR_TSC_ADJUST:
1956 env->tsc_adjust = msrs[i].data;
1957 break;
1958 case MSR_IA32_TSCDEADLINE:
1959 env->tsc_deadline = msrs[i].data;
1960 break;
1961 case MSR_VM_HSAVE_PA:
1962 env->vm_hsave = msrs[i].data;
1963 break;
1964 case MSR_KVM_SYSTEM_TIME:
1965 env->system_time_msr = msrs[i].data;
1966 break;
1967 case MSR_KVM_WALL_CLOCK:
1968 env->wall_clock_msr = msrs[i].data;
1969 break;
1970 case MSR_MCG_STATUS:
1971 env->mcg_status = msrs[i].data;
1972 break;
1973 case MSR_MCG_CTL:
1974 env->mcg_ctl = msrs[i].data;
1975 break;
1976 case MSR_IA32_MISC_ENABLE:
1977 env->msr_ia32_misc_enable = msrs[i].data;
1978 break;
1979 case MSR_IA32_SMBASE:
1980 env->smbase = msrs[i].data;
1981 break;
1982 case MSR_IA32_FEATURE_CONTROL:
1983 env->msr_ia32_feature_control = msrs[i].data;
1984 break;
1985 case MSR_IA32_BNDCFGS:
1986 env->msr_bndcfgs = msrs[i].data;
1987 break;
1988 case MSR_IA32_XSS:
1989 env->xss = msrs[i].data;
1990 break;
1991 default:
1992 if (msrs[i].index >= MSR_MC0_CTL &&
1993 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
1994 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
1995 }
1996 break;
1997 case MSR_KVM_ASYNC_PF_EN:
1998 env->async_pf_en_msr = msrs[i].data;
1999 break;
2000 case MSR_KVM_PV_EOI_EN:
2001 env->pv_eoi_en_msr = msrs[i].data;
2002 break;
2003 case MSR_KVM_STEAL_TIME:
2004 env->steal_time_msr = msrs[i].data;
2005 break;
2006 case MSR_CORE_PERF_FIXED_CTR_CTRL:
2007 env->msr_fixed_ctr_ctrl = msrs[i].data;
2008 break;
2009 case MSR_CORE_PERF_GLOBAL_CTRL:
2010 env->msr_global_ctrl = msrs[i].data;
2011 break;
2012 case MSR_CORE_PERF_GLOBAL_STATUS:
2013 env->msr_global_status = msrs[i].data;
2014 break;
2015 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
2016 env->msr_global_ovf_ctrl = msrs[i].data;
2017 break;
2018 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
2019 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
2020 break;
2021 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
2022 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
2023 break;
2024 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
2025 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
2026 break;
2027 case HV_X64_MSR_HYPERCALL:
2028 env->msr_hv_hypercall = msrs[i].data;
2029 break;
2030 case HV_X64_MSR_GUEST_OS_ID:
2031 env->msr_hv_guest_os_id = msrs[i].data;
2032 break;
2033 case HV_X64_MSR_APIC_ASSIST_PAGE:
2034 env->msr_hv_vapic = msrs[i].data;
2035 break;
2036 case HV_X64_MSR_REFERENCE_TSC:
2037 env->msr_hv_tsc = msrs[i].data;
2038 break;
2039 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2040 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
2041 break;
2042 case HV_X64_MSR_VP_RUNTIME:
2043 env->msr_hv_runtime = msrs[i].data;
2044 break;
2045 case MSR_MTRRdefType:
2046 env->mtrr_deftype = msrs[i].data;
2047 break;
2048 case MSR_MTRRfix64K_00000:
2049 env->mtrr_fixed[0] = msrs[i].data;
2050 break;
2051 case MSR_MTRRfix16K_80000:
2052 env->mtrr_fixed[1] = msrs[i].data;
2053 break;
2054 case MSR_MTRRfix16K_A0000:
2055 env->mtrr_fixed[2] = msrs[i].data;
2056 break;
2057 case MSR_MTRRfix4K_C0000:
2058 env->mtrr_fixed[3] = msrs[i].data;
2059 break;
2060 case MSR_MTRRfix4K_C8000:
2061 env->mtrr_fixed[4] = msrs[i].data;
2062 break;
2063 case MSR_MTRRfix4K_D0000:
2064 env->mtrr_fixed[5] = msrs[i].data;
2065 break;
2066 case MSR_MTRRfix4K_D8000:
2067 env->mtrr_fixed[6] = msrs[i].data;
2068 break;
2069 case MSR_MTRRfix4K_E0000:
2070 env->mtrr_fixed[7] = msrs[i].data;
2071 break;
2072 case MSR_MTRRfix4K_E8000:
2073 env->mtrr_fixed[8] = msrs[i].data;
2074 break;
2075 case MSR_MTRRfix4K_F0000:
2076 env->mtrr_fixed[9] = msrs[i].data;
2077 break;
2078 case MSR_MTRRfix4K_F8000:
2079 env->mtrr_fixed[10] = msrs[i].data;
2080 break;
2081 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
2082 if (index & 1) {
2083 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data;
2084 } else {
2085 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
2086 }
2087 break;
2088 }
2089 }
2090
2091 return 0;
2092}
2093
2094static int kvm_put_mp_state(X86CPU *cpu)
2095{
2096 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
2097
2098 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
2099}
2100
2101static int kvm_get_mp_state(X86CPU *cpu)
2102{
2103 CPUState *cs = CPU(cpu);
2104 CPUX86State *env = &cpu->env;
2105 struct kvm_mp_state mp_state;
2106 int ret;
2107
2108 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
2109 if (ret < 0) {
2110 return ret;
2111 }
2112 env->mp_state = mp_state.mp_state;
2113 if (kvm_irqchip_in_kernel()) {
2114 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
2115 }
2116 return 0;
2117}
2118
2119static int kvm_get_apic(X86CPU *cpu)
2120{
2121 DeviceState *apic = cpu->apic_state;
2122 struct kvm_lapic_state kapic;
2123 int ret;
2124
2125 if (apic && kvm_irqchip_in_kernel()) {
2126 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
2127 if (ret < 0) {
2128 return ret;
2129 }
2130
2131 kvm_get_apic_state(apic, &kapic);
2132 }
2133 return 0;
2134}
2135
2136static int kvm_put_apic(X86CPU *cpu)
2137{
2138 DeviceState *apic = cpu->apic_state;
2139 struct kvm_lapic_state kapic;
2140
2141 if (apic && kvm_irqchip_in_kernel()) {
2142 kvm_put_apic_state(apic, &kapic);
2143
2144 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_LAPIC, &kapic);
2145 }
2146 return 0;
2147}
2148
2149static int kvm_put_vcpu_events(X86CPU *cpu, int level)
2150{
2151 CPUState *cs = CPU(cpu);
2152 CPUX86State *env = &cpu->env;
2153 struct kvm_vcpu_events events = {};
2154
2155 if (!kvm_has_vcpu_events()) {
2156 return 0;
2157 }
2158
2159 events.exception.injected = (env->exception_injected >= 0);
2160 events.exception.nr = env->exception_injected;
2161 events.exception.has_error_code = env->has_error_code;
2162 events.exception.error_code = env->error_code;
2163 events.exception.pad = 0;
2164
2165 events.interrupt.injected = (env->interrupt_injected >= 0);
2166 events.interrupt.nr = env->interrupt_injected;
2167 events.interrupt.soft = env->soft_interrupt;
2168
2169 events.nmi.injected = env->nmi_injected;
2170 events.nmi.pending = env->nmi_pending;
2171 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
2172 events.nmi.pad = 0;
2173
2174 events.sipi_vector = env->sipi_vector;
2175
2176 if (has_msr_smbase) {
2177 events.smi.smm = !!(env->hflags & HF_SMM_MASK);
2178 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
2179 if (kvm_irqchip_in_kernel()) {
2180
2181
2182
2183 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
2184 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
2185 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
2186 } else {
2187
2188 events.smi.pending = 0;
2189 events.smi.latched_init = 0;
2190 }
2191 events.flags |= KVM_VCPUEVENT_VALID_SMM;
2192 }
2193
2194 events.flags = 0;
2195 if (level >= KVM_PUT_RESET_STATE) {
2196 events.flags |=
2197 KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
2198 }
2199
2200 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
2201}
2202
2203static int kvm_get_vcpu_events(X86CPU *cpu)
2204{
2205 CPUX86State *env = &cpu->env;
2206 struct kvm_vcpu_events events;
2207 int ret;
2208
2209 if (!kvm_has_vcpu_events()) {
2210 return 0;
2211 }
2212
2213 memset(&events, 0, sizeof(events));
2214 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
2215 if (ret < 0) {
2216 return ret;
2217 }
2218 env->exception_injected =
2219 events.exception.injected ? events.exception.nr : -1;
2220 env->has_error_code = events.exception.has_error_code;
2221 env->error_code = events.exception.error_code;
2222
2223 env->interrupt_injected =
2224 events.interrupt.injected ? events.interrupt.nr : -1;
2225 env->soft_interrupt = events.interrupt.soft;
2226
2227 env->nmi_injected = events.nmi.injected;
2228 env->nmi_pending = events.nmi.pending;
2229 if (events.nmi.masked) {
2230 env->hflags2 |= HF2_NMI_MASK;
2231 } else {
2232 env->hflags2 &= ~HF2_NMI_MASK;
2233 }
2234
2235 if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
2236 if (events.smi.smm) {
2237 env->hflags |= HF_SMM_MASK;
2238 } else {
2239 env->hflags &= ~HF_SMM_MASK;
2240 }
2241 if (events.smi.pending) {
2242 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2243 } else {
2244 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2245 }
2246 if (events.smi.smm_inside_nmi) {
2247 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
2248 } else {
2249 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
2250 }
2251 if (events.smi.latched_init) {
2252 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2253 } else {
2254 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2255 }
2256 }
2257
2258 env->sipi_vector = events.sipi_vector;
2259
2260 return 0;
2261}
2262
2263static int kvm_guest_debug_workarounds(X86CPU *cpu)
2264{
2265 CPUState *cs = CPU(cpu);
2266 CPUX86State *env = &cpu->env;
2267 int ret = 0;
2268 unsigned long reinject_trap = 0;
2269
2270 if (!kvm_has_vcpu_events()) {
2271 if (env->exception_injected == 1) {
2272 reinject_trap = KVM_GUESTDBG_INJECT_DB;
2273 } else if (env->exception_injected == 3) {
2274 reinject_trap = KVM_GUESTDBG_INJECT_BP;
2275 }
2276 env->exception_injected = -1;
2277 }
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287 if (reinject_trap ||
2288 (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
2289 ret = kvm_update_guest_debug(cs, reinject_trap);
2290 }
2291 return ret;
2292}
2293
2294static int kvm_put_debugregs(X86CPU *cpu)
2295{
2296 CPUX86State *env = &cpu->env;
2297 struct kvm_debugregs dbgregs;
2298 int i;
2299
2300 if (!kvm_has_debugregs()) {
2301 return 0;
2302 }
2303
2304 for (i = 0; i < 4; i++) {
2305 dbgregs.db[i] = env->dr[i];
2306 }
2307 dbgregs.dr6 = env->dr[6];
2308 dbgregs.dr7 = env->dr[7];
2309 dbgregs.flags = 0;
2310
2311 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
2312}
2313
2314static int kvm_get_debugregs(X86CPU *cpu)
2315{
2316 CPUX86State *env = &cpu->env;
2317 struct kvm_debugregs dbgregs;
2318 int i, ret;
2319
2320 if (!kvm_has_debugregs()) {
2321 return 0;
2322 }
2323
2324 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
2325 if (ret < 0) {
2326 return ret;
2327 }
2328 for (i = 0; i < 4; i++) {
2329 env->dr[i] = dbgregs.db[i];
2330 }
2331 env->dr[4] = env->dr[6] = dbgregs.dr6;
2332 env->dr[5] = env->dr[7] = dbgregs.dr7;
2333
2334 return 0;
2335}
2336
2337int kvm_arch_put_registers(CPUState *cpu, int level)
2338{
2339 X86CPU *x86_cpu = X86_CPU(cpu);
2340 int ret;
2341
2342 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
2343
2344 if (level >= KVM_PUT_RESET_STATE && has_msr_feature_control) {
2345 ret = kvm_put_msr_feature_control(x86_cpu);
2346 if (ret < 0) {
2347 return ret;
2348 }
2349 }
2350
2351 ret = kvm_getput_regs(x86_cpu, 1);
2352 if (ret < 0) {
2353 return ret;
2354 }
2355 ret = kvm_put_xsave(x86_cpu);
2356 if (ret < 0) {
2357 return ret;
2358 }
2359 ret = kvm_put_xcrs(x86_cpu);
2360 if (ret < 0) {
2361 return ret;
2362 }
2363 ret = kvm_put_sregs(x86_cpu);
2364 if (ret < 0) {
2365 return ret;
2366 }
2367
2368 ret = kvm_inject_mce_oldstyle(x86_cpu);
2369 if (ret < 0) {
2370 return ret;
2371 }
2372 ret = kvm_put_msrs(x86_cpu, level);
2373 if (ret < 0) {
2374 return ret;
2375 }
2376 if (level >= KVM_PUT_RESET_STATE) {
2377 ret = kvm_put_mp_state(x86_cpu);
2378 if (ret < 0) {
2379 return ret;
2380 }
2381 ret = kvm_put_apic(x86_cpu);
2382 if (ret < 0) {
2383 return ret;
2384 }
2385 }
2386
2387 ret = kvm_put_tscdeadline_msr(x86_cpu);
2388 if (ret < 0) {
2389 return ret;
2390 }
2391
2392 ret = kvm_put_vcpu_events(x86_cpu, level);
2393 if (ret < 0) {
2394 return ret;
2395 }
2396 ret = kvm_put_debugregs(x86_cpu);
2397 if (ret < 0) {
2398 return ret;
2399 }
2400
2401 ret = kvm_guest_debug_workarounds(x86_cpu);
2402 if (ret < 0) {
2403 return ret;
2404 }
2405 return 0;
2406}
2407
2408int kvm_arch_get_registers(CPUState *cs)
2409{
2410 X86CPU *cpu = X86_CPU(cs);
2411 int ret;
2412
2413 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
2414
2415 ret = kvm_getput_regs(cpu, 0);
2416 if (ret < 0) {
2417 return ret;
2418 }
2419 ret = kvm_get_xsave(cpu);
2420 if (ret < 0) {
2421 return ret;
2422 }
2423 ret = kvm_get_xcrs(cpu);
2424 if (ret < 0) {
2425 return ret;
2426 }
2427 ret = kvm_get_sregs(cpu);
2428 if (ret < 0) {
2429 return ret;
2430 }
2431 ret = kvm_get_msrs(cpu);
2432 if (ret < 0) {
2433 return ret;
2434 }
2435 ret = kvm_get_mp_state(cpu);
2436 if (ret < 0) {
2437 return ret;
2438 }
2439 ret = kvm_get_apic(cpu);
2440 if (ret < 0) {
2441 return ret;
2442 }
2443 ret = kvm_get_vcpu_events(cpu);
2444 if (ret < 0) {
2445 return ret;
2446 }
2447 ret = kvm_get_debugregs(cpu);
2448 if (ret < 0) {
2449 return ret;
2450 }
2451 return 0;
2452}
2453
2454void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
2455{
2456 X86CPU *x86_cpu = X86_CPU(cpu);
2457 CPUX86State *env = &x86_cpu->env;
2458 int ret;
2459
2460
2461 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
2462 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
2463 qemu_mutex_lock_iothread();
2464 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
2465 qemu_mutex_unlock_iothread();
2466 DPRINTF("injected NMI\n");
2467 ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
2468 if (ret < 0) {
2469 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
2470 strerror(-ret));
2471 }
2472 }
2473 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
2474 qemu_mutex_lock_iothread();
2475 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
2476 qemu_mutex_unlock_iothread();
2477 DPRINTF("injected SMI\n");
2478 ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
2479 if (ret < 0) {
2480 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
2481 strerror(-ret));
2482 }
2483 }
2484 }
2485
2486 if (!kvm_irqchip_in_kernel()) {
2487 qemu_mutex_lock_iothread();
2488 }
2489
2490
2491
2492
2493
2494 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
2495 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
2496 !(env->hflags & HF_SMM_MASK)) {
2497 cpu->exit_request = 1;
2498 }
2499 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
2500 cpu->exit_request = 1;
2501 }
2502 }
2503
2504 if (!kvm_irqchip_in_kernel()) {
2505
2506 if (run->ready_for_interrupt_injection &&
2507 (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
2508 (env->eflags & IF_MASK)) {
2509 int irq;
2510
2511 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
2512 irq = cpu_get_pic_interrupt(env);
2513 if (irq >= 0) {
2514 struct kvm_interrupt intr;
2515
2516 intr.irq = irq;
2517 DPRINTF("injected interrupt %d\n", irq);
2518 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
2519 if (ret < 0) {
2520 fprintf(stderr,
2521 "KVM: injection failed, interrupt lost (%s)\n",
2522 strerror(-ret));
2523 }
2524 }
2525 }
2526
2527
2528
2529
2530
2531 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
2532 run->request_interrupt_window = 1;
2533 } else {
2534 run->request_interrupt_window = 0;
2535 }
2536
2537 DPRINTF("setting tpr\n");
2538 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
2539
2540 qemu_mutex_unlock_iothread();
2541 }
2542}
2543
2544MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
2545{
2546 X86CPU *x86_cpu = X86_CPU(cpu);
2547 CPUX86State *env = &x86_cpu->env;
2548
2549 if (run->flags & KVM_RUN_X86_SMM) {
2550 env->hflags |= HF_SMM_MASK;
2551 } else {
2552 env->hflags &= HF_SMM_MASK;
2553 }
2554 if (run->if_flag) {
2555 env->eflags |= IF_MASK;
2556 } else {
2557 env->eflags &= ~IF_MASK;
2558 }
2559
2560
2561
2562 if (!kvm_irqchip_in_kernel()) {
2563 qemu_mutex_lock_iothread();
2564 }
2565 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
2566 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
2567 if (!kvm_irqchip_in_kernel()) {
2568 qemu_mutex_unlock_iothread();
2569 }
2570 return cpu_get_mem_attrs(env);
2571}
2572
2573int kvm_arch_process_async_events(CPUState *cs)
2574{
2575 X86CPU *cpu = X86_CPU(cs);
2576 CPUX86State *env = &cpu->env;
2577
2578 if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
2579
2580 assert(env->mcg_cap);
2581
2582 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
2583
2584 kvm_cpu_synchronize_state(cs);
2585
2586 if (env->exception_injected == EXCP08_DBLE) {
2587
2588 qemu_system_reset_request();
2589 cs->exit_request = 1;
2590 return 0;
2591 }
2592 env->exception_injected = EXCP12_MCHK;
2593 env->has_error_code = 0;
2594
2595 cs->halted = 0;
2596 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
2597 env->mp_state = KVM_MP_STATE_RUNNABLE;
2598 }
2599 }
2600
2601 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
2602 !(env->hflags & HF_SMM_MASK)) {
2603 kvm_cpu_synchronize_state(cs);
2604 do_cpu_init(cpu);
2605 }
2606
2607 if (kvm_irqchip_in_kernel()) {
2608 return 0;
2609 }
2610
2611 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
2612 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
2613 apic_poll_irq(cpu->apic_state);
2614 }
2615 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
2616 (env->eflags & IF_MASK)) ||
2617 (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
2618 cs->halted = 0;
2619 }
2620 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
2621 kvm_cpu_synchronize_state(cs);
2622 do_cpu_sipi(cpu);
2623 }
2624 if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
2625 cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
2626 kvm_cpu_synchronize_state(cs);
2627 apic_handle_tpr_access_report(cpu->apic_state, env->eip,
2628 env->tpr_access_type);
2629 }
2630
2631 return cs->halted;
2632}
2633
2634static int kvm_handle_halt(X86CPU *cpu)
2635{
2636 CPUState *cs = CPU(cpu);
2637 CPUX86State *env = &cpu->env;
2638
2639 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
2640 (env->eflags & IF_MASK)) &&
2641 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
2642 cs->halted = 1;
2643 return EXCP_HLT;
2644 }
2645
2646 return 0;
2647}
2648
2649static int kvm_handle_tpr_access(X86CPU *cpu)
2650{
2651 CPUState *cs = CPU(cpu);
2652 struct kvm_run *run = cs->kvm_run;
2653
2654 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
2655 run->tpr_access.is_write ? TPR_ACCESS_WRITE
2656 : TPR_ACCESS_READ);
2657 return 1;
2658}
2659
2660int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
2661{
2662 static const uint8_t int3 = 0xcc;
2663
2664 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
2665 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
2666 return -EINVAL;
2667 }
2668 return 0;
2669}
2670
2671int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
2672{
2673 uint8_t int3;
2674
2675 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
2676 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
2677 return -EINVAL;
2678 }
2679 return 0;
2680}
2681
2682static struct {
2683 target_ulong addr;
2684 int len;
2685 int type;
2686} hw_breakpoint[4];
2687
2688static int nb_hw_breakpoint;
2689
2690static int find_hw_breakpoint(target_ulong addr, int len, int type)
2691{
2692 int n;
2693
2694 for (n = 0; n < nb_hw_breakpoint; n++) {
2695 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
2696 (hw_breakpoint[n].len == len || len == -1)) {
2697 return n;
2698 }
2699 }
2700 return -1;
2701}
2702
2703int kvm_arch_insert_hw_breakpoint(target_ulong addr,
2704 target_ulong len, int type)
2705{
2706 switch (type) {
2707 case GDB_BREAKPOINT_HW:
2708 len = 1;
2709 break;
2710 case GDB_WATCHPOINT_WRITE:
2711 case GDB_WATCHPOINT_ACCESS:
2712 switch (len) {
2713 case 1:
2714 break;
2715 case 2:
2716 case 4:
2717 case 8:
2718 if (addr & (len - 1)) {
2719 return -EINVAL;
2720 }
2721 break;
2722 default:
2723 return -EINVAL;
2724 }
2725 break;
2726 default:
2727 return -ENOSYS;
2728 }
2729
2730 if (nb_hw_breakpoint == 4) {
2731 return -ENOBUFS;
2732 }
2733 if (find_hw_breakpoint(addr, len, type) >= 0) {
2734 return -EEXIST;
2735 }
2736 hw_breakpoint[nb_hw_breakpoint].addr = addr;
2737 hw_breakpoint[nb_hw_breakpoint].len = len;
2738 hw_breakpoint[nb_hw_breakpoint].type = type;
2739 nb_hw_breakpoint++;
2740
2741 return 0;
2742}
2743
2744int kvm_arch_remove_hw_breakpoint(target_ulong addr,
2745 target_ulong len, int type)
2746{
2747 int n;
2748
2749 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
2750 if (n < 0) {
2751 return -ENOENT;
2752 }
2753 nb_hw_breakpoint--;
2754 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
2755
2756 return 0;
2757}
2758
2759void kvm_arch_remove_all_hw_breakpoints(void)
2760{
2761 nb_hw_breakpoint = 0;
2762}
2763
2764static CPUWatchpoint hw_watchpoint;
2765
2766static int kvm_handle_debug(X86CPU *cpu,
2767 struct kvm_debug_exit_arch *arch_info)
2768{
2769 CPUState *cs = CPU(cpu);
2770 CPUX86State *env = &cpu->env;
2771 int ret = 0;
2772 int n;
2773
2774 if (arch_info->exception == 1) {
2775 if (arch_info->dr6 & (1 << 14)) {
2776 if (cs->singlestep_enabled) {
2777 ret = EXCP_DEBUG;
2778 }
2779 } else {
2780 for (n = 0; n < 4; n++) {
2781 if (arch_info->dr6 & (1 << n)) {
2782 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
2783 case 0x0:
2784 ret = EXCP_DEBUG;
2785 break;
2786 case 0x1:
2787 ret = EXCP_DEBUG;
2788 cs->watchpoint_hit = &hw_watchpoint;
2789 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
2790 hw_watchpoint.flags = BP_MEM_WRITE;
2791 break;
2792 case 0x3:
2793 ret = EXCP_DEBUG;
2794 cs->watchpoint_hit = &hw_watchpoint;
2795 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
2796 hw_watchpoint.flags = BP_MEM_ACCESS;
2797 break;
2798 }
2799 }
2800 }
2801 }
2802 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
2803 ret = EXCP_DEBUG;
2804 }
2805 if (ret == 0) {
2806 cpu_synchronize_state(cs);
2807 assert(env->exception_injected == -1);
2808
2809
2810 env->exception_injected = arch_info->exception;
2811 env->has_error_code = 0;
2812 }
2813
2814 return ret;
2815}
2816
2817void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
2818{
2819 const uint8_t type_code[] = {
2820 [GDB_BREAKPOINT_HW] = 0x0,
2821 [GDB_WATCHPOINT_WRITE] = 0x1,
2822 [GDB_WATCHPOINT_ACCESS] = 0x3
2823 };
2824 const uint8_t len_code[] = {
2825 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
2826 };
2827 int n;
2828
2829 if (kvm_sw_breakpoints_active(cpu)) {
2830 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
2831 }
2832 if (nb_hw_breakpoint > 0) {
2833 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
2834 dbg->arch.debugreg[7] = 0x0600;
2835 for (n = 0; n < nb_hw_breakpoint; n++) {
2836 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
2837 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
2838 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
2839 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
2840 }
2841 }
2842}
2843
2844static bool host_supports_vmx(void)
2845{
2846 uint32_t ecx, unused;
2847
2848 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
2849 return ecx & CPUID_EXT_VMX;
2850}
2851
2852#define VMX_INVALID_GUEST_STATE 0x80000021
2853
2854int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
2855{
2856 X86CPU *cpu = X86_CPU(cs);
2857 uint64_t code;
2858 int ret;
2859
2860 switch (run->exit_reason) {
2861 case KVM_EXIT_HLT:
2862 DPRINTF("handle_hlt\n");
2863 qemu_mutex_lock_iothread();
2864 ret = kvm_handle_halt(cpu);
2865 qemu_mutex_unlock_iothread();
2866 break;
2867 case KVM_EXIT_SET_TPR:
2868 ret = 0;
2869 break;
2870 case KVM_EXIT_TPR_ACCESS:
2871 qemu_mutex_lock_iothread();
2872 ret = kvm_handle_tpr_access(cpu);
2873 qemu_mutex_unlock_iothread();
2874 break;
2875 case KVM_EXIT_FAIL_ENTRY:
2876 code = run->fail_entry.hardware_entry_failure_reason;
2877 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
2878 code);
2879 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
2880 fprintf(stderr,
2881 "\nIf you're running a guest on an Intel machine without "
2882 "unrestricted mode\n"
2883 "support, the failure can be most likely due to the guest "
2884 "entering an invalid\n"
2885 "state for Intel VT. For example, the guest maybe running "
2886 "in big real mode\n"
2887 "which is not supported on less recent Intel processors."
2888 "\n\n");
2889 }
2890 ret = -1;
2891 break;
2892 case KVM_EXIT_EXCEPTION:
2893 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
2894 run->ex.exception, run->ex.error_code);
2895 ret = -1;
2896 break;
2897 case KVM_EXIT_DEBUG:
2898 DPRINTF("kvm_exit_debug\n");
2899 qemu_mutex_lock_iothread();
2900 ret = kvm_handle_debug(cpu, &run->debug.arch);
2901 qemu_mutex_unlock_iothread();
2902 break;
2903 default:
2904 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
2905 ret = -1;
2906 break;
2907 }
2908
2909 return ret;
2910}
2911
2912bool kvm_arch_stop_on_emulation_error(CPUState *cs)
2913{
2914 X86CPU *cpu = X86_CPU(cs);
2915 CPUX86State *env = &cpu->env;
2916
2917 kvm_cpu_synchronize_state(cs);
2918 return !(env->cr[0] & CR0_PE_MASK) ||
2919 ((env->segs[R_CS].selector & 3) != 3);
2920}
2921
2922void kvm_arch_init_irq_routing(KVMState *s)
2923{
2924 if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
2925
2926
2927
2928
2929 no_hpet = 1;
2930 }
2931
2932
2933
2934
2935 kvm_msi_via_irqfd_allowed = true;
2936 kvm_gsi_routing_allowed = true;
2937}
2938
2939
2940int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
2941 uint32_t flags, uint32_t *dev_id)
2942{
2943 struct kvm_assigned_pci_dev dev_data = {
2944 .segnr = dev_addr->domain,
2945 .busnr = dev_addr->bus,
2946 .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function),
2947 .flags = flags,
2948 };
2949 int ret;
2950
2951 dev_data.assigned_dev_id =
2952 (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn;
2953
2954 ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data);
2955 if (ret < 0) {
2956 return ret;
2957 }
2958
2959 *dev_id = dev_data.assigned_dev_id;
2960
2961 return 0;
2962}
2963
2964int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id)
2965{
2966 struct kvm_assigned_pci_dev dev_data = {
2967 .assigned_dev_id = dev_id,
2968 };
2969
2970 return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data);
2971}
2972
2973static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id,
2974 uint32_t irq_type, uint32_t guest_irq)
2975{
2976 struct kvm_assigned_irq assigned_irq = {
2977 .assigned_dev_id = dev_id,
2978 .guest_irq = guest_irq,
2979 .flags = irq_type,
2980 };
2981
2982 if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) {
2983 return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq);
2984 } else {
2985 return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq);
2986 }
2987}
2988
2989int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi,
2990 uint32_t guest_irq)
2991{
2992 uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX |
2993 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX);
2994
2995 return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq);
2996}
2997
2998int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
2999{
3000 struct kvm_assigned_pci_dev dev_data = {
3001 .assigned_dev_id = dev_id,
3002 .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0,
3003 };
3004
3005 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data);
3006}
3007
3008static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id,
3009 uint32_t type)
3010{
3011 struct kvm_assigned_irq assigned_irq = {
3012 .assigned_dev_id = dev_id,
3013 .flags = type,
3014 };
3015
3016 return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq);
3017}
3018
3019int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi)
3020{
3021 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX |
3022 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX));
3023}
3024
3025int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq)
3026{
3027 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI |
3028 KVM_DEV_IRQ_GUEST_MSI, virq);
3029}
3030
3031int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id)
3032{
3033 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI |
3034 KVM_DEV_IRQ_HOST_MSI);
3035}
3036
3037bool kvm_device_msix_supported(KVMState *s)
3038{
3039
3040
3041 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT;
3042}
3043
3044int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
3045 uint32_t nr_vectors)
3046{
3047 struct kvm_assigned_msix_nr msix_nr = {
3048 .assigned_dev_id = dev_id,
3049 .entry_nr = nr_vectors,
3050 };
3051
3052 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr);
3053}
3054
3055int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
3056 int virq)
3057{
3058 struct kvm_assigned_msix_entry msix_entry = {
3059 .assigned_dev_id = dev_id,
3060 .gsi = virq,
3061 .entry = vector,
3062 };
3063
3064 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry);
3065}
3066
3067int kvm_device_msix_assign(KVMState *s, uint32_t dev_id)
3068{
3069 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX |
3070 KVM_DEV_IRQ_GUEST_MSIX, 0);
3071}
3072
3073int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
3074{
3075 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
3076 KVM_DEV_IRQ_HOST_MSIX);
3077}
3078
3079int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
3080 uint64_t address, uint32_t data, PCIDevice *dev)
3081{
3082 return 0;
3083}
3084
3085int kvm_arch_msi_data_to_gsi(uint32_t data)
3086{
3087 abort();
3088}
3089