1
2
3
4
5
6
7
8
9
10
11#include "qemu/osdep.h"
12#include "cpu.h"
13#include "exec/address-spaces.h"
14#include "exec/exec-all.h"
15#include "exec/ioport.h"
16#include "qemu-common.h"
17#include "strings.h"
18#include "sysemu/accel.h"
19#include "sysemu/whpx.h"
20#include "sysemu/sysemu.h"
21#include "sysemu/cpus.h"
22#include "qemu/main-loop.h"
23#include "hw/boards.h"
24#include "qemu/error-report.h"
25#include "qemu/queue.h"
26#include "qapi/error.h"
27#include "migration/blocker.h"
28
29#include <WinHvPlatform.h>
30#include <WinHvEmulation.h>
31
32struct whpx_state {
33 uint64_t mem_quota;
34 WHV_PARTITION_HANDLE partition;
35};
36
37static const WHV_REGISTER_NAME whpx_register_names[] = {
38
39
40 WHvX64RegisterRax,
41 WHvX64RegisterRcx,
42 WHvX64RegisterRdx,
43 WHvX64RegisterRbx,
44 WHvX64RegisterRsp,
45 WHvX64RegisterRbp,
46 WHvX64RegisterRsi,
47 WHvX64RegisterRdi,
48 WHvX64RegisterR8,
49 WHvX64RegisterR9,
50 WHvX64RegisterR10,
51 WHvX64RegisterR11,
52 WHvX64RegisterR12,
53 WHvX64RegisterR13,
54 WHvX64RegisterR14,
55 WHvX64RegisterR15,
56 WHvX64RegisterRip,
57 WHvX64RegisterRflags,
58
59
60 WHvX64RegisterEs,
61 WHvX64RegisterCs,
62 WHvX64RegisterSs,
63 WHvX64RegisterDs,
64 WHvX64RegisterFs,
65 WHvX64RegisterGs,
66 WHvX64RegisterLdtr,
67 WHvX64RegisterTr,
68
69
70 WHvX64RegisterIdtr,
71 WHvX64RegisterGdtr,
72
73
74 WHvX64RegisterCr0,
75 WHvX64RegisterCr2,
76 WHvX64RegisterCr3,
77 WHvX64RegisterCr4,
78 WHvX64RegisterCr8,
79
80
81
82
83
84
85
86
87
88
89
90
91 WHvX64RegisterXmm0,
92 WHvX64RegisterXmm1,
93 WHvX64RegisterXmm2,
94 WHvX64RegisterXmm3,
95 WHvX64RegisterXmm4,
96 WHvX64RegisterXmm5,
97 WHvX64RegisterXmm6,
98 WHvX64RegisterXmm7,
99 WHvX64RegisterXmm8,
100 WHvX64RegisterXmm9,
101 WHvX64RegisterXmm10,
102 WHvX64RegisterXmm11,
103 WHvX64RegisterXmm12,
104 WHvX64RegisterXmm13,
105 WHvX64RegisterXmm14,
106 WHvX64RegisterXmm15,
107 WHvX64RegisterFpMmx0,
108 WHvX64RegisterFpMmx1,
109 WHvX64RegisterFpMmx2,
110 WHvX64RegisterFpMmx3,
111 WHvX64RegisterFpMmx4,
112 WHvX64RegisterFpMmx5,
113 WHvX64RegisterFpMmx6,
114 WHvX64RegisterFpMmx7,
115 WHvX64RegisterFpControlStatus,
116 WHvX64RegisterXmmControlStatus,
117
118
119 WHvX64RegisterTsc,
120 WHvX64RegisterEfer,
121#ifdef TARGET_X86_64
122 WHvX64RegisterKernelGsBase,
123#endif
124 WHvX64RegisterApicBase,
125
126 WHvX64RegisterSysenterCs,
127 WHvX64RegisterSysenterEip,
128 WHvX64RegisterSysenterEsp,
129 WHvX64RegisterStar,
130#ifdef TARGET_X86_64
131 WHvX64RegisterLstar,
132 WHvX64RegisterCstar,
133 WHvX64RegisterSfmask,
134#endif
135
136
137
138
139
140
141
142
143
144};
145
146struct whpx_register_set {
147 WHV_REGISTER_VALUE values[RTL_NUMBER_OF(whpx_register_names)];
148};
149
150struct whpx_vcpu {
151 WHV_EMULATOR_HANDLE emulator;
152 bool window_registered;
153 bool interruptable;
154 uint64_t tpr;
155 uint64_t apic_base;
156 bool interruption_pending;
157
158
159 WHV_RUN_VP_EXIT_CONTEXT exit_ctx;
160};
161
162static bool whpx_allowed;
163
164struct whpx_state whpx_global;
165
166
167
168
169
170
171static struct whpx_vcpu *get_whpx_vcpu(CPUState *cpu)
172{
173 return (struct whpx_vcpu *)cpu->hax_vcpu;
174}
175
176static WHV_X64_SEGMENT_REGISTER whpx_seg_q2h(const SegmentCache *qs, int v86,
177 int r86)
178{
179 WHV_X64_SEGMENT_REGISTER hs;
180 unsigned flags = qs->flags;
181
182 hs.Base = qs->base;
183 hs.Limit = qs->limit;
184 hs.Selector = qs->selector;
185
186 if (v86) {
187 hs.Attributes = 0;
188 hs.SegmentType = 3;
189 hs.Present = 1;
190 hs.DescriptorPrivilegeLevel = 3;
191 hs.NonSystemSegment = 1;
192
193 } else {
194 hs.Attributes = (flags >> DESC_TYPE_SHIFT);
195
196 if (r86) {
197
198 }
199 }
200
201 return hs;
202}
203
204static SegmentCache whpx_seg_h2q(const WHV_X64_SEGMENT_REGISTER *hs)
205{
206 SegmentCache qs;
207
208 qs.base = hs->Base;
209 qs.limit = hs->Limit;
210 qs.selector = hs->Selector;
211
212 qs.flags = ((uint32_t)hs->Attributes) << DESC_TYPE_SHIFT;
213
214 return qs;
215}
216
217static void whpx_set_registers(CPUState *cpu)
218{
219 struct whpx_state *whpx = &whpx_global;
220 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
221 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
222 X86CPU *x86_cpu = X86_CPU(cpu);
223 struct whpx_register_set vcxt = {0};
224 HRESULT hr;
225 int idx = 0;
226 int i;
227 int v86, r86;
228
229 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
230
231 v86 = (env->eflags & VM_MASK);
232 r86 = !(env->cr[0] & CR0_PE_MASK);
233
234 vcpu->tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
235 vcpu->apic_base = cpu_get_apic_base(x86_cpu->apic_state);
236
237
238 for (idx = 0; idx < CPU_NB_REGS64; idx += 1) {
239 vcxt.values[idx].Reg64 = env->regs[idx];
240 }
241
242
243 assert(whpx_register_names[idx] == WHvX64RegisterRip);
244 vcxt.values[idx++].Reg64 = env->eip;
245
246 assert(whpx_register_names[idx] == WHvX64RegisterRflags);
247 vcxt.values[idx++].Reg64 = env->eflags;
248
249
250 assert(idx == WHvX64RegisterEs);
251 for (i = 0; i < 6; i += 1, idx += 1) {
252 vcxt.values[idx].Segment = whpx_seg_q2h(&env->segs[i], v86, r86);
253 }
254
255 assert(idx == WHvX64RegisterLdtr);
256 vcxt.values[idx++].Segment = whpx_seg_q2h(&env->ldt, 0, 0);
257
258 assert(idx == WHvX64RegisterTr);
259 vcxt.values[idx++].Segment = whpx_seg_q2h(&env->tr, 0, 0);
260
261 assert(idx == WHvX64RegisterIdtr);
262 vcxt.values[idx].Table.Base = env->idt.base;
263 vcxt.values[idx].Table.Limit = env->idt.limit;
264 idx += 1;
265
266 assert(idx == WHvX64RegisterGdtr);
267 vcxt.values[idx].Table.Base = env->gdt.base;
268 vcxt.values[idx].Table.Limit = env->gdt.limit;
269 idx += 1;
270
271
272 assert(whpx_register_names[idx] == WHvX64RegisterCr0);
273 vcxt.values[idx++].Reg64 = env->cr[0];
274 assert(whpx_register_names[idx] == WHvX64RegisterCr2);
275 vcxt.values[idx++].Reg64 = env->cr[2];
276 assert(whpx_register_names[idx] == WHvX64RegisterCr3);
277 vcxt.values[idx++].Reg64 = env->cr[3];
278 assert(whpx_register_names[idx] == WHvX64RegisterCr4);
279 vcxt.values[idx++].Reg64 = env->cr[4];
280 assert(whpx_register_names[idx] == WHvX64RegisterCr8);
281 vcxt.values[idx++].Reg64 = vcpu->tpr;
282
283
284
285
286 assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
287 for (i = 0; i < 16; i += 1, idx += 1) {
288 vcxt.values[idx].Reg128.Low64 = env->xmm_regs[i].ZMM_Q(0);
289 vcxt.values[idx].Reg128.High64 = env->xmm_regs[i].ZMM_Q(1);
290 }
291
292
293 assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
294 for (i = 0; i < 8; i += 1, idx += 1) {
295 vcxt.values[idx].Fp.AsUINT128.Low64 = env->fpregs[i].mmx.MMX_Q(0);
296
297
298
299 }
300
301
302 assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
303 vcxt.values[idx].FpControlStatus.FpControl = env->fpuc;
304 vcxt.values[idx].FpControlStatus.FpStatus =
305 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
306 vcxt.values[idx].FpControlStatus.FpTag = 0;
307 for (i = 0; i < 8; ++i) {
308 vcxt.values[idx].FpControlStatus.FpTag |= (!env->fptags[i]) << i;
309 }
310 vcxt.values[idx].FpControlStatus.Reserved = 0;
311 vcxt.values[idx].FpControlStatus.LastFpOp = env->fpop;
312 vcxt.values[idx].FpControlStatus.LastFpRip = env->fpip;
313 idx += 1;
314
315
316 assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
317 vcxt.values[idx].XmmControlStatus.LastFpRdp = 0;
318 vcxt.values[idx].XmmControlStatus.XmmStatusControl = env->mxcsr;
319 vcxt.values[idx].XmmControlStatus.XmmStatusControlMask = 0x0000ffff;
320 idx += 1;
321
322
323 assert(whpx_register_names[idx] == WHvX64RegisterTsc);
324 vcxt.values[idx++].Reg64 = env->tsc;
325 assert(whpx_register_names[idx] == WHvX64RegisterEfer);
326 vcxt.values[idx++].Reg64 = env->efer;
327#ifdef TARGET_X86_64
328 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
329 vcxt.values[idx++].Reg64 = env->kernelgsbase;
330#endif
331
332 assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
333 vcxt.values[idx++].Reg64 = vcpu->apic_base;
334
335
336
337 assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
338 vcxt.values[idx++].Reg64 = env->sysenter_cs;
339 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
340 vcxt.values[idx++].Reg64 = env->sysenter_eip;
341 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
342 vcxt.values[idx++].Reg64 = env->sysenter_esp;
343 assert(whpx_register_names[idx] == WHvX64RegisterStar);
344 vcxt.values[idx++].Reg64 = env->star;
345#ifdef TARGET_X86_64
346 assert(whpx_register_names[idx] == WHvX64RegisterLstar);
347 vcxt.values[idx++].Reg64 = env->lstar;
348 assert(whpx_register_names[idx] == WHvX64RegisterCstar);
349 vcxt.values[idx++].Reg64 = env->cstar;
350 assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
351 vcxt.values[idx++].Reg64 = env->fmask;
352#endif
353
354
355
356 assert(idx == RTL_NUMBER_OF(whpx_register_names));
357
358 hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
359 whpx_register_names,
360 RTL_NUMBER_OF(whpx_register_names),
361 &vcxt.values[0]);
362
363 if (FAILED(hr)) {
364 error_report("WHPX: Failed to set virtual processor context, hr=%08lx",
365 hr);
366 }
367
368 return;
369}
370
371static void whpx_get_registers(CPUState *cpu)
372{
373 struct whpx_state *whpx = &whpx_global;
374 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
375 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
376 X86CPU *x86_cpu = X86_CPU(cpu);
377 struct whpx_register_set vcxt;
378 uint64_t tpr, apic_base;
379 HRESULT hr;
380 int idx = 0;
381 int i;
382
383 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
384
385 hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
386 whpx_register_names,
387 RTL_NUMBER_OF(whpx_register_names),
388 &vcxt.values[0]);
389 if (FAILED(hr)) {
390 error_report("WHPX: Failed to get virtual processor context, hr=%08lx",
391 hr);
392 }
393
394
395 for (idx = 0; idx < CPU_NB_REGS64; idx += 1) {
396 env->regs[idx] = vcxt.values[idx].Reg64;
397 }
398
399
400 assert(whpx_register_names[idx] == WHvX64RegisterRip);
401 env->eip = vcxt.values[idx++].Reg64;
402 assert(whpx_register_names[idx] == WHvX64RegisterRflags);
403 env->eflags = vcxt.values[idx++].Reg64;
404
405
406 assert(idx == WHvX64RegisterEs);
407 for (i = 0; i < 6; i += 1, idx += 1) {
408 env->segs[i] = whpx_seg_h2q(&vcxt.values[idx].Segment);
409 }
410
411 assert(idx == WHvX64RegisterLdtr);
412 env->ldt = whpx_seg_h2q(&vcxt.values[idx++].Segment);
413 assert(idx == WHvX64RegisterTr);
414 env->tr = whpx_seg_h2q(&vcxt.values[idx++].Segment);
415 assert(idx == WHvX64RegisterIdtr);
416 env->idt.base = vcxt.values[idx].Table.Base;
417 env->idt.limit = vcxt.values[idx].Table.Limit;
418 idx += 1;
419 assert(idx == WHvX64RegisterGdtr);
420 env->gdt.base = vcxt.values[idx].Table.Base;
421 env->gdt.limit = vcxt.values[idx].Table.Limit;
422 idx += 1;
423
424
425 assert(whpx_register_names[idx] == WHvX64RegisterCr0);
426 env->cr[0] = vcxt.values[idx++].Reg64;
427 assert(whpx_register_names[idx] == WHvX64RegisterCr2);
428 env->cr[2] = vcxt.values[idx++].Reg64;
429 assert(whpx_register_names[idx] == WHvX64RegisterCr3);
430 env->cr[3] = vcxt.values[idx++].Reg64;
431 assert(whpx_register_names[idx] == WHvX64RegisterCr4);
432 env->cr[4] = vcxt.values[idx++].Reg64;
433 assert(whpx_register_names[idx] == WHvX64RegisterCr8);
434 tpr = vcxt.values[idx++].Reg64;
435 if (tpr != vcpu->tpr) {
436 vcpu->tpr = tpr;
437 cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
438 }
439
440
441
442
443 assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
444 for (i = 0; i < 16; i += 1, idx += 1) {
445 env->xmm_regs[i].ZMM_Q(0) = vcxt.values[idx].Reg128.Low64;
446 env->xmm_regs[i].ZMM_Q(1) = vcxt.values[idx].Reg128.High64;
447 }
448
449
450 assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
451 for (i = 0; i < 8; i += 1, idx += 1) {
452 env->fpregs[i].mmx.MMX_Q(0) = vcxt.values[idx].Fp.AsUINT128.Low64;
453
454
455
456 }
457
458
459 assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
460 env->fpuc = vcxt.values[idx].FpControlStatus.FpControl;
461 env->fpstt = (vcxt.values[idx].FpControlStatus.FpStatus >> 11) & 0x7;
462 env->fpus = vcxt.values[idx].FpControlStatus.FpStatus & ~0x3800;
463 for (i = 0; i < 8; ++i) {
464 env->fptags[i] = !((vcxt.values[idx].FpControlStatus.FpTag >> i) & 1);
465 }
466 env->fpop = vcxt.values[idx].FpControlStatus.LastFpOp;
467 env->fpip = vcxt.values[idx].FpControlStatus.LastFpRip;
468 idx += 1;
469
470
471 assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
472 env->mxcsr = vcxt.values[idx].XmmControlStatus.XmmStatusControl;
473 idx += 1;
474
475
476 assert(whpx_register_names[idx] == WHvX64RegisterTsc);
477 env->tsc = vcxt.values[idx++].Reg64;
478 assert(whpx_register_names[idx] == WHvX64RegisterEfer);
479 env->efer = vcxt.values[idx++].Reg64;
480#ifdef TARGET_X86_64
481 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
482 env->kernelgsbase = vcxt.values[idx++].Reg64;
483#endif
484
485 assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
486 apic_base = vcxt.values[idx++].Reg64;
487 if (apic_base != vcpu->apic_base) {
488 vcpu->apic_base = apic_base;
489 cpu_set_apic_base(x86_cpu->apic_state, vcpu->apic_base);
490 }
491
492
493
494 assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
495 env->sysenter_cs = vcxt.values[idx++].Reg64;;
496 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
497 env->sysenter_eip = vcxt.values[idx++].Reg64;
498 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
499 env->sysenter_esp = vcxt.values[idx++].Reg64;
500 assert(whpx_register_names[idx] == WHvX64RegisterStar);
501 env->star = vcxt.values[idx++].Reg64;
502#ifdef TARGET_X86_64
503 assert(whpx_register_names[idx] == WHvX64RegisterLstar);
504 env->lstar = vcxt.values[idx++].Reg64;
505 assert(whpx_register_names[idx] == WHvX64RegisterCstar);
506 env->cstar = vcxt.values[idx++].Reg64;
507 assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
508 env->fmask = vcxt.values[idx++].Reg64;
509#endif
510
511
512
513 assert(idx == RTL_NUMBER_OF(whpx_register_names));
514
515 return;
516}
517
518static HRESULT CALLBACK whpx_emu_ioport_callback(
519 void *ctx,
520 WHV_EMULATOR_IO_ACCESS_INFO *IoAccess)
521{
522 MemTxAttrs attrs = { 0 };
523 address_space_rw(&address_space_io, IoAccess->Port, attrs,
524 (uint8_t *)&IoAccess->Data, IoAccess->AccessSize,
525 IoAccess->Direction);
526 return S_OK;
527}
528
529static HRESULT CALLBACK whpx_emu_mmio_callback(
530 void *ctx,
531 WHV_EMULATOR_MEMORY_ACCESS_INFO *ma)
532{
533 cpu_physical_memory_rw(ma->GpaAddress, ma->Data, ma->AccessSize,
534 ma->Direction);
535 return S_OK;
536}
537
538static HRESULT CALLBACK whpx_emu_getreg_callback(
539 void *ctx,
540 const WHV_REGISTER_NAME *RegisterNames,
541 UINT32 RegisterCount,
542 WHV_REGISTER_VALUE *RegisterValues)
543{
544 HRESULT hr;
545 struct whpx_state *whpx = &whpx_global;
546 CPUState *cpu = (CPUState *)ctx;
547
548 hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
549 RegisterNames, RegisterCount,
550 RegisterValues);
551 if (FAILED(hr)) {
552 error_report("WHPX: Failed to get virtual processor registers,"
553 " hr=%08lx", hr);
554 }
555
556 return hr;
557}
558
559static HRESULT CALLBACK whpx_emu_setreg_callback(
560 void *ctx,
561 const WHV_REGISTER_NAME *RegisterNames,
562 UINT32 RegisterCount,
563 const WHV_REGISTER_VALUE *RegisterValues)
564{
565 HRESULT hr;
566 struct whpx_state *whpx = &whpx_global;
567 CPUState *cpu = (CPUState *)ctx;
568
569 hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
570 RegisterNames, RegisterCount,
571 RegisterValues);
572 if (FAILED(hr)) {
573 error_report("WHPX: Failed to set virtual processor registers,"
574 " hr=%08lx", hr);
575 }
576
577
578
579
580
581 cpu->vcpu_dirty = false;
582
583 return hr;
584}
585
586static HRESULT CALLBACK whpx_emu_translate_callback(
587 void *ctx,
588 WHV_GUEST_VIRTUAL_ADDRESS Gva,
589 WHV_TRANSLATE_GVA_FLAGS TranslateFlags,
590 WHV_TRANSLATE_GVA_RESULT_CODE *TranslationResult,
591 WHV_GUEST_PHYSICAL_ADDRESS *Gpa)
592{
593 HRESULT hr;
594 struct whpx_state *whpx = &whpx_global;
595 CPUState *cpu = (CPUState *)ctx;
596 WHV_TRANSLATE_GVA_RESULT res;
597
598 hr = WHvTranslateGva(whpx->partition, cpu->cpu_index,
599 Gva, TranslateFlags, &res, Gpa);
600 if (FAILED(hr)) {
601 error_report("WHPX: Failed to translate GVA, hr=%08lx", hr);
602 } else {
603 *TranslationResult = res.ResultCode;
604 }
605
606 return hr;
607}
608
609static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks = {
610 .Size = sizeof(WHV_EMULATOR_CALLBACKS),
611 .WHvEmulatorIoPortCallback = whpx_emu_ioport_callback,
612 .WHvEmulatorMemoryCallback = whpx_emu_mmio_callback,
613 .WHvEmulatorGetVirtualProcessorRegisters = whpx_emu_getreg_callback,
614 .WHvEmulatorSetVirtualProcessorRegisters = whpx_emu_setreg_callback,
615 .WHvEmulatorTranslateGvaPage = whpx_emu_translate_callback,
616};
617
618static int whpx_handle_mmio(CPUState *cpu, WHV_MEMORY_ACCESS_CONTEXT *ctx)
619{
620 HRESULT hr;
621 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
622 WHV_EMULATOR_STATUS emu_status;
623
624 hr = WHvEmulatorTryMmioEmulation(vcpu->emulator, cpu,
625 &vcpu->exit_ctx.VpContext, ctx,
626 &emu_status);
627 if (FAILED(hr)) {
628 error_report("WHPX: Failed to parse MMIO access, hr=%08lx", hr);
629 return -1;
630 }
631
632 if (!emu_status.EmulationSuccessful) {
633 error_report("WHPX: Failed to emulate MMIO access");
634 return -1;
635 }
636
637 return 0;
638}
639
640static int whpx_handle_portio(CPUState *cpu,
641 WHV_X64_IO_PORT_ACCESS_CONTEXT *ctx)
642{
643 HRESULT hr;
644 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
645 WHV_EMULATOR_STATUS emu_status;
646
647 hr = WHvEmulatorTryIoEmulation(vcpu->emulator, cpu,
648 &vcpu->exit_ctx.VpContext, ctx,
649 &emu_status);
650 if (FAILED(hr)) {
651 error_report("WHPX: Failed to parse PortIO access, hr=%08lx", hr);
652 return -1;
653 }
654
655 if (!emu_status.EmulationSuccessful) {
656 error_report("WHPX: Failed to emulate PortMMIO access");
657 return -1;
658 }
659
660 return 0;
661}
662
663static int whpx_handle_halt(CPUState *cpu)
664{
665 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
666 int ret = 0;
667
668 qemu_mutex_lock_iothread();
669 if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
670 (env->eflags & IF_MASK)) &&
671 !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
672 cpu->exception_index = EXCP_HLT;
673 cpu->halted = true;
674 ret = 1;
675 }
676 qemu_mutex_unlock_iothread();
677
678 return ret;
679}
680
681static void whpx_vcpu_pre_run(CPUState *cpu)
682{
683 HRESULT hr;
684 struct whpx_state *whpx = &whpx_global;
685 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
686 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
687 X86CPU *x86_cpu = X86_CPU(cpu);
688 int irq;
689 uint8_t tpr;
690 WHV_X64_PENDING_INTERRUPTION_REGISTER new_int = {0};
691 UINT32 reg_count = 0;
692 WHV_REGISTER_VALUE reg_values[3] = {0};
693 WHV_REGISTER_NAME reg_names[3];
694
695 qemu_mutex_lock_iothread();
696
697
698 if (!vcpu->interruption_pending &&
699 cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
700 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
701 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
702 vcpu->interruptable = false;
703 new_int.InterruptionType = WHvX64PendingNmi;
704 new_int.InterruptionPending = 1;
705 new_int.InterruptionVector = 2;
706 }
707 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
708 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
709 }
710 }
711
712
713
714
715
716 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
717 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
718 !(env->hflags & HF_SMM_MASK)) {
719 cpu->exit_request = 1;
720 }
721 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
722 cpu->exit_request = 1;
723 }
724 }
725
726
727 if (!vcpu->interruption_pending &&
728 vcpu->interruptable && (env->eflags & IF_MASK)) {
729 assert(!new_int.InterruptionPending);
730 if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
731 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
732 irq = cpu_get_pic_interrupt(env);
733 if (irq >= 0) {
734 new_int.InterruptionType = WHvX64PendingInterrupt;
735 new_int.InterruptionPending = 1;
736 new_int.InterruptionVector = irq;
737 }
738 }
739 }
740
741
742 if (new_int.InterruptionPending) {
743 reg_values[reg_count].PendingInterruption = new_int;
744 reg_names[reg_count] = WHvRegisterPendingInterruption;
745 reg_count += 1;
746 }
747
748
749 tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
750 if (tpr != vcpu->tpr) {
751 vcpu->tpr = tpr;
752 reg_values[reg_count].Reg64 = tpr;
753 cpu->exit_request = 1;
754 reg_names[reg_count] = WHvX64RegisterCr8;
755 reg_count += 1;
756 }
757
758
759 if (!vcpu->window_registered &&
760 cpu->interrupt_request & CPU_INTERRUPT_HARD) {
761 reg_values[reg_count].DeliverabilityNotifications.InterruptNotification
762 = 1;
763 vcpu->window_registered = 1;
764 reg_names[reg_count] = WHvX64RegisterDeliverabilityNotifications;
765 reg_count += 1;
766 }
767
768 qemu_mutex_unlock_iothread();
769
770 if (reg_count) {
771 hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
772 reg_names, reg_count, reg_values);
773 if (FAILED(hr)) {
774 error_report("WHPX: Failed to set interrupt state registers,"
775 " hr=%08lx", hr);
776 }
777 }
778
779 return;
780}
781
782static void whpx_vcpu_post_run(CPUState *cpu)
783{
784 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
785 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
786 X86CPU *x86_cpu = X86_CPU(cpu);
787
788 env->eflags = vcpu->exit_ctx.VpContext.Rflags;
789
790 uint64_t tpr = vcpu->exit_ctx.VpContext.Cr8;
791 if (vcpu->tpr != tpr) {
792 vcpu->tpr = tpr;
793 qemu_mutex_lock_iothread();
794 cpu_set_apic_tpr(x86_cpu->apic_state, vcpu->tpr);
795 qemu_mutex_unlock_iothread();
796 }
797
798 vcpu->interruption_pending =
799 vcpu->exit_ctx.VpContext.ExecutionState.InterruptionPending;
800
801 vcpu->interruptable =
802 !vcpu->exit_ctx.VpContext.ExecutionState.InterruptShadow;
803
804 return;
805}
806
807static void whpx_vcpu_process_async_events(CPUState *cpu)
808{
809 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
810 X86CPU *x86_cpu = X86_CPU(cpu);
811 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
812
813 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
814 !(env->hflags & HF_SMM_MASK)) {
815
816 do_cpu_init(x86_cpu);
817 cpu->vcpu_dirty = true;
818 vcpu->interruptable = true;
819 }
820
821 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
822 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
823 apic_poll_irq(x86_cpu->apic_state);
824 }
825
826 if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
827 (env->eflags & IF_MASK)) ||
828 (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
829 cpu->halted = false;
830 }
831
832 if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
833 if (!cpu->vcpu_dirty) {
834 whpx_get_registers(cpu);
835 }
836 do_cpu_sipi(x86_cpu);
837 }
838
839 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
840 cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
841 if (!cpu->vcpu_dirty) {
842 whpx_get_registers(cpu);
843 }
844 apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
845 env->tpr_access_type);
846 }
847
848 return;
849}
850
851static int whpx_vcpu_run(CPUState *cpu)
852{
853 HRESULT hr;
854 struct whpx_state *whpx = &whpx_global;
855 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
856 int ret;
857
858 whpx_vcpu_process_async_events(cpu);
859 if (cpu->halted) {
860 cpu->exception_index = EXCP_HLT;
861 atomic_set(&cpu->exit_request, false);
862 return 0;
863 }
864
865 qemu_mutex_unlock_iothread();
866 cpu_exec_start(cpu);
867
868 do {
869 if (cpu->vcpu_dirty) {
870 whpx_set_registers(cpu);
871 cpu->vcpu_dirty = false;
872 }
873
874 whpx_vcpu_pre_run(cpu);
875
876 if (atomic_read(&cpu->exit_request)) {
877 whpx_vcpu_kick(cpu);
878 }
879
880 hr = WHvRunVirtualProcessor(whpx->partition, cpu->cpu_index,
881 &vcpu->exit_ctx, sizeof(vcpu->exit_ctx));
882
883 if (FAILED(hr)) {
884 error_report("WHPX: Failed to exec a virtual processor,"
885 " hr=%08lx", hr);
886 ret = -1;
887 break;
888 }
889
890 whpx_vcpu_post_run(cpu);
891
892 switch (vcpu->exit_ctx.ExitReason) {
893 case WHvRunVpExitReasonMemoryAccess:
894 ret = whpx_handle_mmio(cpu, &vcpu->exit_ctx.MemoryAccess);
895 break;
896
897 case WHvRunVpExitReasonX64IoPortAccess:
898 ret = whpx_handle_portio(cpu, &vcpu->exit_ctx.IoPortAccess);
899 break;
900
901 case WHvRunVpExitReasonX64InterruptWindow:
902 vcpu->window_registered = 0;
903 break;
904
905 case WHvRunVpExitReasonX64Halt:
906 ret = whpx_handle_halt(cpu);
907 break;
908
909 case WHvRunVpExitReasonCanceled:
910 cpu->exception_index = EXCP_INTERRUPT;
911 ret = 1;
912 break;
913
914 case WHvRunVpExitReasonX64Cpuid: {
915 WHV_REGISTER_VALUE reg_values[5] = {0};
916 WHV_REGISTER_NAME reg_names[5];
917 UINT32 reg_count = 5;
918 UINT64 rip, rax, rcx, rdx, rbx;
919
920 rip = vcpu->exit_ctx.VpContext.Rip +
921 vcpu->exit_ctx.VpContext.InstructionLength;
922 switch (vcpu->exit_ctx.CpuidAccess.Rax) {
923 case 1:
924 rax = vcpu->exit_ctx.CpuidAccess.DefaultResultRax;
925
926 rcx =
927 vcpu->exit_ctx.CpuidAccess.DefaultResultRcx |
928 CPUID_EXT_HYPERVISOR;
929
930 rdx = vcpu->exit_ctx.CpuidAccess.DefaultResultRdx;
931 rbx = vcpu->exit_ctx.CpuidAccess.DefaultResultRbx;
932 break;
933 default:
934 rax = vcpu->exit_ctx.CpuidAccess.DefaultResultRax;
935 rcx = vcpu->exit_ctx.CpuidAccess.DefaultResultRcx;
936 rdx = vcpu->exit_ctx.CpuidAccess.DefaultResultRdx;
937 rbx = vcpu->exit_ctx.CpuidAccess.DefaultResultRbx;
938 }
939
940 reg_names[0] = WHvX64RegisterRip;
941 reg_names[1] = WHvX64RegisterRax;
942 reg_names[2] = WHvX64RegisterRcx;
943 reg_names[3] = WHvX64RegisterRdx;
944 reg_names[4] = WHvX64RegisterRbx;
945
946 reg_values[0].Reg64 = rip;
947 reg_values[1].Reg64 = rax;
948 reg_values[2].Reg64 = rcx;
949 reg_values[3].Reg64 = rdx;
950 reg_values[4].Reg64 = rbx;
951
952 hr = WHvSetVirtualProcessorRegisters(whpx->partition,
953 cpu->cpu_index,
954 reg_names,
955 reg_count,
956 reg_values);
957
958 if (FAILED(hr)) {
959 error_report("WHPX: Failed to set CpuidAccess state registers,"
960 " hr=%08lx", hr);
961 }
962 ret = 0;
963 break;
964 }
965 case WHvRunVpExitReasonNone:
966 case WHvRunVpExitReasonUnrecoverableException:
967 case WHvRunVpExitReasonInvalidVpRegisterValue:
968 case WHvRunVpExitReasonUnsupportedFeature:
969 case WHvRunVpExitReasonX64MsrAccess:
970 case WHvRunVpExitReasonException:
971 default:
972 error_report("WHPX: Unexpected VP exit code %d",
973 vcpu->exit_ctx.ExitReason);
974 whpx_get_registers(cpu);
975 qemu_mutex_lock_iothread();
976 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
977 qemu_mutex_unlock_iothread();
978 break;
979 }
980
981 } while (!ret);
982
983 cpu_exec_end(cpu);
984 qemu_mutex_lock_iothread();
985 current_cpu = cpu;
986
987 atomic_set(&cpu->exit_request, false);
988
989 return ret < 0;
990}
991
992static void do_whpx_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
993{
994 whpx_get_registers(cpu);
995 cpu->vcpu_dirty = true;
996}
997
998static void do_whpx_cpu_synchronize_post_reset(CPUState *cpu,
999 run_on_cpu_data arg)
1000{
1001 whpx_set_registers(cpu);
1002 cpu->vcpu_dirty = false;
1003}
1004
1005static void do_whpx_cpu_synchronize_post_init(CPUState *cpu,
1006 run_on_cpu_data arg)
1007{
1008 whpx_set_registers(cpu);
1009 cpu->vcpu_dirty = false;
1010}
1011
1012static void do_whpx_cpu_synchronize_pre_loadvm(CPUState *cpu,
1013 run_on_cpu_data arg)
1014{
1015 cpu->vcpu_dirty = true;
1016}
1017
1018
1019
1020
1021
1022void whpx_cpu_synchronize_state(CPUState *cpu)
1023{
1024 if (!cpu->vcpu_dirty) {
1025 run_on_cpu(cpu, do_whpx_cpu_synchronize_state, RUN_ON_CPU_NULL);
1026 }
1027}
1028
1029void whpx_cpu_synchronize_post_reset(CPUState *cpu)
1030{
1031 run_on_cpu(cpu, do_whpx_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
1032}
1033
1034void whpx_cpu_synchronize_post_init(CPUState *cpu)
1035{
1036 run_on_cpu(cpu, do_whpx_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
1037}
1038
1039void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu)
1040{
1041 run_on_cpu(cpu, do_whpx_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
1042}
1043
1044
1045
1046
1047
1048static Error *whpx_migration_blocker;
1049
1050int whpx_init_vcpu(CPUState *cpu)
1051{
1052 HRESULT hr;
1053 struct whpx_state *whpx = &whpx_global;
1054 struct whpx_vcpu *vcpu;
1055 Error *local_error = NULL;
1056
1057
1058
1059
1060 if (whpx_migration_blocker == NULL) {
1061 error_setg(&whpx_migration_blocker,
1062 "State blocked due to non-migratable CPUID feature support,"
1063 "dirty memory tracking support, and XSAVE/XRSTOR support");
1064
1065 (void)migrate_add_blocker(whpx_migration_blocker, &local_error);
1066 if (local_error) {
1067 error_report_err(local_error);
1068 error_free(whpx_migration_blocker);
1069 migrate_del_blocker(whpx_migration_blocker);
1070 return -EINVAL;
1071 }
1072 }
1073
1074 vcpu = g_malloc0(sizeof(struct whpx_vcpu));
1075
1076 if (!vcpu) {
1077 error_report("WHPX: Failed to allocte VCPU context.");
1078 return -ENOMEM;
1079 }
1080
1081 hr = WHvEmulatorCreateEmulator(&whpx_emu_callbacks, &vcpu->emulator);
1082 if (FAILED(hr)) {
1083 error_report("WHPX: Failed to setup instruction completion support,"
1084 " hr=%08lx", hr);
1085 g_free(vcpu);
1086 return -EINVAL;
1087 }
1088
1089 hr = WHvCreateVirtualProcessor(whpx->partition, cpu->cpu_index, 0);
1090 if (FAILED(hr)) {
1091 error_report("WHPX: Failed to create a virtual processor,"
1092 " hr=%08lx", hr);
1093 WHvEmulatorDestroyEmulator(vcpu->emulator);
1094 g_free(vcpu);
1095 return -EINVAL;
1096 }
1097
1098 vcpu->interruptable = true;
1099
1100 cpu->vcpu_dirty = true;
1101 cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu;
1102
1103 return 0;
1104}
1105
1106int whpx_vcpu_exec(CPUState *cpu)
1107{
1108 int ret;
1109 int fatal;
1110
1111 for (;;) {
1112 if (cpu->exception_index >= EXCP_INTERRUPT) {
1113 ret = cpu->exception_index;
1114 cpu->exception_index = -1;
1115 break;
1116 }
1117
1118 fatal = whpx_vcpu_run(cpu);
1119
1120 if (fatal) {
1121 error_report("WHPX: Failed to exec a virtual processor");
1122 abort();
1123 }
1124 }
1125
1126 return ret;
1127}
1128
1129void whpx_destroy_vcpu(CPUState *cpu)
1130{
1131 struct whpx_state *whpx = &whpx_global;
1132 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
1133
1134 WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index);
1135 WHvEmulatorDestroyEmulator(vcpu->emulator);
1136 g_free(cpu->hax_vcpu);
1137 return;
1138}
1139
1140void whpx_vcpu_kick(CPUState *cpu)
1141{
1142 struct whpx_state *whpx = &whpx_global;
1143 WHvCancelRunVirtualProcessor(whpx->partition, cpu->cpu_index, 0);
1144}
1145
1146
1147
1148
1149
1150static void whpx_update_mapping(hwaddr start_pa, ram_addr_t size,
1151 void *host_va, int add, int rom,
1152 const char *name)
1153{
1154 struct whpx_state *whpx = &whpx_global;
1155 HRESULT hr;
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168 if (add) {
1169 hr = WHvMapGpaRange(whpx->partition,
1170 host_va,
1171 start_pa,
1172 size,
1173 (WHvMapGpaRangeFlagRead |
1174 WHvMapGpaRangeFlagExecute |
1175 (rom ? 0 : WHvMapGpaRangeFlagWrite)));
1176 } else {
1177 hr = WHvUnmapGpaRange(whpx->partition,
1178 start_pa,
1179 size);
1180 }
1181
1182 if (FAILED(hr)) {
1183 error_report("WHPX: Failed to %s GPA range '%s' PA:%p, Size:%p bytes,"
1184 " Host:%p, hr=%08lx",
1185 (add ? "MAP" : "UNMAP"), name,
1186 (void *)start_pa, (void *)size, host_va, hr);
1187 }
1188}
1189
1190static void whpx_process_section(MemoryRegionSection *section, int add)
1191{
1192 MemoryRegion *mr = section->mr;
1193 hwaddr start_pa = section->offset_within_address_space;
1194 ram_addr_t size = int128_get64(section->size);
1195 unsigned int delta;
1196 uint64_t host_va;
1197
1198 if (!memory_region_is_ram(mr)) {
1199 return;
1200 }
1201
1202 delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
1203 delta &= ~qemu_real_host_page_mask;
1204 if (delta > size) {
1205 return;
1206 }
1207 start_pa += delta;
1208 size -= delta;
1209 size &= qemu_real_host_page_mask;
1210 if (!size || (start_pa & ~qemu_real_host_page_mask)) {
1211 return;
1212 }
1213
1214 host_va = (uintptr_t)memory_region_get_ram_ptr(mr)
1215 + section->offset_within_region + delta;
1216
1217 whpx_update_mapping(start_pa, size, (void *)host_va, add,
1218 memory_region_is_rom(mr), mr->name);
1219}
1220
1221static void whpx_region_add(MemoryListener *listener,
1222 MemoryRegionSection *section)
1223{
1224 memory_region_ref(section->mr);
1225 whpx_process_section(section, 1);
1226}
1227
1228static void whpx_region_del(MemoryListener *listener,
1229 MemoryRegionSection *section)
1230{
1231 whpx_process_section(section, 0);
1232 memory_region_unref(section->mr);
1233}
1234
1235static void whpx_transaction_begin(MemoryListener *listener)
1236{
1237}
1238
1239static void whpx_transaction_commit(MemoryListener *listener)
1240{
1241}
1242
1243static void whpx_log_sync(MemoryListener *listener,
1244 MemoryRegionSection *section)
1245{
1246 MemoryRegion *mr = section->mr;
1247
1248 if (!memory_region_is_ram(mr)) {
1249 return;
1250 }
1251
1252 memory_region_set_dirty(mr, 0, int128_get64(section->size));
1253}
1254
1255static MemoryListener whpx_memory_listener = {
1256 .begin = whpx_transaction_begin,
1257 .commit = whpx_transaction_commit,
1258 .region_add = whpx_region_add,
1259 .region_del = whpx_region_del,
1260 .log_sync = whpx_log_sync,
1261 .priority = 10,
1262};
1263
1264static void whpx_memory_init(void)
1265{
1266 memory_listener_register(&whpx_memory_listener, &address_space_memory);
1267}
1268
1269static void whpx_handle_interrupt(CPUState *cpu, int mask)
1270{
1271 cpu->interrupt_request |= mask;
1272
1273 if (!qemu_cpu_is_self(cpu)) {
1274 qemu_cpu_kick(cpu);
1275 }
1276}
1277
1278
1279
1280
1281
1282static int whpx_accel_init(MachineState *ms)
1283{
1284 struct whpx_state *whpx;
1285 int ret;
1286 HRESULT hr;
1287 WHV_CAPABILITY whpx_cap;
1288 UINT32 whpx_cap_size;
1289 WHV_PARTITION_PROPERTY prop;
1290
1291 whpx = &whpx_global;
1292
1293 memset(whpx, 0, sizeof(struct whpx_state));
1294 whpx->mem_quota = ms->ram_size;
1295
1296 hr = WHvGetCapability(WHvCapabilityCodeHypervisorPresent, &whpx_cap,
1297 sizeof(whpx_cap), &whpx_cap_size);
1298 if (FAILED(hr) || !whpx_cap.HypervisorPresent) {
1299 error_report("WHPX: No accelerator found, hr=%08lx", hr);
1300 ret = -ENOSPC;
1301 goto error;
1302 }
1303
1304 hr = WHvCreatePartition(&whpx->partition);
1305 if (FAILED(hr)) {
1306 error_report("WHPX: Failed to create partition, hr=%08lx", hr);
1307 ret = -EINVAL;
1308 goto error;
1309 }
1310
1311 memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY));
1312 prop.ProcessorCount = smp_cpus;
1313 hr = WHvSetPartitionProperty(whpx->partition,
1314 WHvPartitionPropertyCodeProcessorCount,
1315 &prop,
1316 sizeof(WHV_PARTITION_PROPERTY));
1317
1318 if (FAILED(hr)) {
1319 error_report("WHPX: Failed to set partition core count to %d,"
1320 " hr=%08lx", smp_cores, hr);
1321 ret = -EINVAL;
1322 goto error;
1323 }
1324
1325 memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY));
1326 prop.ExtendedVmExits.X64CpuidExit = 1;
1327 hr = WHvSetPartitionProperty(whpx->partition,
1328 WHvPartitionPropertyCodeExtendedVmExits,
1329 &prop,
1330 sizeof(WHV_PARTITION_PROPERTY));
1331
1332 if (FAILED(hr)) {
1333 error_report("WHPX: Failed to enable partition extended X64CpuidExit"
1334 " hr=%08lx", hr);
1335 ret = -EINVAL;
1336 goto error;
1337 }
1338
1339 UINT32 cpuidExitList[] = {1};
1340 hr = WHvSetPartitionProperty(whpx->partition,
1341 WHvPartitionPropertyCodeCpuidExitList,
1342 cpuidExitList,
1343 RTL_NUMBER_OF(cpuidExitList) * sizeof(UINT32));
1344
1345 if (FAILED(hr)) {
1346 error_report("WHPX: Failed to set partition CpuidExitList hr=%08lx",
1347 hr);
1348 ret = -EINVAL;
1349 goto error;
1350 }
1351
1352 hr = WHvSetupPartition(whpx->partition);
1353 if (FAILED(hr)) {
1354 error_report("WHPX: Failed to setup partition, hr=%08lx", hr);
1355 ret = -EINVAL;
1356 goto error;
1357 }
1358
1359 whpx_memory_init();
1360
1361 cpu_interrupt_handler = whpx_handle_interrupt;
1362
1363 printf("Windows Hypervisor Platform accelerator is operational\n");
1364 return 0;
1365
1366 error:
1367
1368 if (NULL != whpx->partition) {
1369 WHvDeletePartition(whpx->partition);
1370 whpx->partition = NULL;
1371 }
1372
1373
1374 return ret;
1375}
1376
1377int whpx_enabled(void)
1378{
1379 return whpx_allowed;
1380}
1381
1382static void whpx_accel_class_init(ObjectClass *oc, void *data)
1383{
1384 AccelClass *ac = ACCEL_CLASS(oc);
1385 ac->name = "WHPX";
1386 ac->init_machine = whpx_accel_init;
1387 ac->allowed = &whpx_allowed;
1388}
1389
1390static const TypeInfo whpx_accel_type = {
1391 .name = ACCEL_CLASS_NAME("whpx"),
1392 .parent = TYPE_ACCEL,
1393 .class_init = whpx_accel_class_init,
1394};
1395
1396static void whpx_type_init(void)
1397{
1398 type_register_static(&whpx_accel_type);
1399}
1400
1401type_init(whpx_type_init);
1402