1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#include "qemu/osdep.h"
39#include "panic.h"
40#include "qemu-common.h"
41#include "x86_decode.h"
42#include "x86.h"
43#include "x86_emu.h"
44#include "x86_mmu.h"
45#include "x86_flags.h"
46#include "vmcs.h"
47#include "vmx.h"
48
49void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data,
50 int direction, int size, uint32_t count);
51
52#define EXEC_2OP_FLAGS_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \
53{ \
54 fetch_operands(env, decode, 2, true, true, false); \
55 switch (decode->operand_size) { \
56 case 1: \
57 { \
58 uint8_t v1 = (uint8_t)decode->op[0].val; \
59 uint8_t v2 = (uint8_t)decode->op[1].val; \
60 uint8_t diff = v1 cmd v2; \
61 if (save_res) { \
62 write_val_ext(env, decode->op[0].ptr, diff, 1); \
63 } \
64 FLAGS_FUNC##8(env, v1, v2, diff); \
65 break; \
66 } \
67 case 2: \
68 { \
69 uint16_t v1 = (uint16_t)decode->op[0].val; \
70 uint16_t v2 = (uint16_t)decode->op[1].val; \
71 uint16_t diff = v1 cmd v2; \
72 if (save_res) { \
73 write_val_ext(env, decode->op[0].ptr, diff, 2); \
74 } \
75 FLAGS_FUNC##16(env, v1, v2, diff); \
76 break; \
77 } \
78 case 4: \
79 { \
80 uint32_t v1 = (uint32_t)decode->op[0].val; \
81 uint32_t v2 = (uint32_t)decode->op[1].val; \
82 uint32_t diff = v1 cmd v2; \
83 if (save_res) { \
84 write_val_ext(env, decode->op[0].ptr, diff, 4); \
85 } \
86 FLAGS_FUNC##32(env, v1, v2, diff); \
87 break; \
88 } \
89 default: \
90 VM_PANIC("bad size\n"); \
91 } \
92} \
93
94target_ulong read_reg(CPUX86State *env, int reg, int size)
95{
96 switch (size) {
97 case 1:
98 return x86_reg(env, reg)->lx;
99 case 2:
100 return x86_reg(env, reg)->rx;
101 case 4:
102 return x86_reg(env, reg)->erx;
103 case 8:
104 return x86_reg(env, reg)->rrx;
105 default:
106 abort();
107 }
108 return 0;
109}
110
111void write_reg(CPUX86State *env, int reg, target_ulong val, int size)
112{
113 switch (size) {
114 case 1:
115 x86_reg(env, reg)->lx = val;
116 break;
117 case 2:
118 x86_reg(env, reg)->rx = val;
119 break;
120 case 4:
121 x86_reg(env, reg)->rrx = (uint32_t)val;
122 break;
123 case 8:
124 x86_reg(env, reg)->rrx = val;
125 break;
126 default:
127 abort();
128 }
129}
130
131target_ulong read_val_from_reg(target_ulong reg_ptr, int size)
132{
133 target_ulong val;
134
135 switch (size) {
136 case 1:
137 val = *(uint8_t *)reg_ptr;
138 break;
139 case 2:
140 val = *(uint16_t *)reg_ptr;
141 break;
142 case 4:
143 val = *(uint32_t *)reg_ptr;
144 break;
145 case 8:
146 val = *(uint64_t *)reg_ptr;
147 break;
148 default:
149 abort();
150 }
151 return val;
152}
153
154void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size)
155{
156 switch (size) {
157 case 1:
158 *(uint8_t *)reg_ptr = val;
159 break;
160 case 2:
161 *(uint16_t *)reg_ptr = val;
162 break;
163 case 4:
164 *(uint64_t *)reg_ptr = (uint32_t)val;
165 break;
166 case 8:
167 *(uint64_t *)reg_ptr = val;
168 break;
169 default:
170 abort();
171 }
172}
173
174static bool is_host_reg(struct CPUX86State *env, target_ulong ptr)
175{
176 return (ptr - (target_ulong)&env->regs[0]) < sizeof(env->regs);
177}
178
179void write_val_ext(struct CPUX86State *env, target_ulong ptr, target_ulong val, int size)
180{
181 if (is_host_reg(env, ptr)) {
182 write_val_to_reg(ptr, val, size);
183 return;
184 }
185 vmx_write_mem(env_cpu(env), ptr, &val, size);
186}
187
188uint8_t *read_mmio(struct CPUX86State *env, target_ulong ptr, int bytes)
189{
190 vmx_read_mem(env_cpu(env), env->hvf_mmio_buf, ptr, bytes);
191 return env->hvf_mmio_buf;
192}
193
194
195target_ulong read_val_ext(struct CPUX86State *env, target_ulong ptr, int size)
196{
197 target_ulong val;
198 uint8_t *mmio_ptr;
199
200 if (is_host_reg(env, ptr)) {
201 return read_val_from_reg(ptr, size);
202 }
203
204 mmio_ptr = read_mmio(env, ptr, size);
205 switch (size) {
206 case 1:
207 val = *(uint8_t *)mmio_ptr;
208 break;
209 case 2:
210 val = *(uint16_t *)mmio_ptr;
211 break;
212 case 4:
213 val = *(uint32_t *)mmio_ptr;
214 break;
215 case 8:
216 val = *(uint64_t *)mmio_ptr;
217 break;
218 default:
219 VM_PANIC("bad size\n");
220 break;
221 }
222 return val;
223}
224
225static void fetch_operands(struct CPUX86State *env, struct x86_decode *decode,
226 int n, bool val_op0, bool val_op1, bool val_op2)
227{
228 int i;
229 bool calc_val[3] = {val_op0, val_op1, val_op2};
230
231 for (i = 0; i < n; i++) {
232 switch (decode->op[i].type) {
233 case X86_VAR_IMMEDIATE:
234 break;
235 case X86_VAR_REG:
236 VM_PANIC_ON(!decode->op[i].ptr);
237 if (calc_val[i]) {
238 decode->op[i].val = read_val_from_reg(decode->op[i].ptr,
239 decode->operand_size);
240 }
241 break;
242 case X86_VAR_RM:
243 calc_modrm_operand(env, decode, &decode->op[i]);
244 if (calc_val[i]) {
245 decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
246 decode->operand_size);
247 }
248 break;
249 case X86_VAR_OFFSET:
250 decode->op[i].ptr = decode_linear_addr(env, decode,
251 decode->op[i].ptr,
252 R_DS);
253 if (calc_val[i]) {
254 decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
255 decode->operand_size);
256 }
257 break;
258 default:
259 break;
260 }
261 }
262}
263
264static void exec_mov(struct CPUX86State *env, struct x86_decode *decode)
265{
266 fetch_operands(env, decode, 2, false, true, false);
267 write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
268 decode->operand_size);
269
270 env->eip += decode->len;
271}
272
273static void exec_add(struct CPUX86State *env, struct x86_decode *decode)
274{
275 EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
276 env->eip += decode->len;
277}
278
279static void exec_or(struct CPUX86State *env, struct x86_decode *decode)
280{
281 EXEC_2OP_FLAGS_CMD(env, decode, |, SET_FLAGS_OSZAPC_LOGIC, true);
282 env->eip += decode->len;
283}
284
285static void exec_adc(struct CPUX86State *env, struct x86_decode *decode)
286{
287 EXEC_2OP_FLAGS_CMD(env, decode, +get_CF(env)+, SET_FLAGS_OSZAPC_ADD, true);
288 env->eip += decode->len;
289}
290
291static void exec_sbb(struct CPUX86State *env, struct x86_decode *decode)
292{
293 EXEC_2OP_FLAGS_CMD(env, decode, -get_CF(env)-, SET_FLAGS_OSZAPC_SUB, true);
294 env->eip += decode->len;
295}
296
297static void exec_and(struct CPUX86State *env, struct x86_decode *decode)
298{
299 EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, true);
300 env->eip += decode->len;
301}
302
303static void exec_sub(struct CPUX86State *env, struct x86_decode *decode)
304{
305 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, true);
306 env->eip += decode->len;
307}
308
309static void exec_xor(struct CPUX86State *env, struct x86_decode *decode)
310{
311 EXEC_2OP_FLAGS_CMD(env, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true);
312 env->eip += decode->len;
313}
314
315static void exec_neg(struct CPUX86State *env, struct x86_decode *decode)
316{
317
318 int32_t val;
319 fetch_operands(env, decode, 2, true, true, false);
320
321 val = 0 - sign(decode->op[1].val, decode->operand_size);
322 write_val_ext(env, decode->op[1].ptr, val, decode->operand_size);
323
324 if (4 == decode->operand_size) {
325 SET_FLAGS_OSZAPC_SUB32(env, 0, 0 - val, val);
326 } else if (2 == decode->operand_size) {
327 SET_FLAGS_OSZAPC_SUB16(env, 0, 0 - val, val);
328 } else if (1 == decode->operand_size) {
329 SET_FLAGS_OSZAPC_SUB8(env, 0, 0 - val, val);
330 } else {
331 VM_PANIC("bad op size\n");
332 }
333
334
335 env->eip += decode->len;
336}
337
338static void exec_cmp(struct CPUX86State *env, struct x86_decode *decode)
339{
340 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
341 env->eip += decode->len;
342}
343
344static void exec_inc(struct CPUX86State *env, struct x86_decode *decode)
345{
346 decode->op[1].type = X86_VAR_IMMEDIATE;
347 decode->op[1].val = 0;
348
349 EXEC_2OP_FLAGS_CMD(env, decode, +1+, SET_FLAGS_OSZAP_ADD, true);
350
351 env->eip += decode->len;
352}
353
354static void exec_dec(struct CPUX86State *env, struct x86_decode *decode)
355{
356 decode->op[1].type = X86_VAR_IMMEDIATE;
357 decode->op[1].val = 0;
358
359 EXEC_2OP_FLAGS_CMD(env, decode, -1-, SET_FLAGS_OSZAP_SUB, true);
360 env->eip += decode->len;
361}
362
363static void exec_tst(struct CPUX86State *env, struct x86_decode *decode)
364{
365 EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, false);
366 env->eip += decode->len;
367}
368
369static void exec_not(struct CPUX86State *env, struct x86_decode *decode)
370{
371 fetch_operands(env, decode, 1, true, false, false);
372
373 write_val_ext(env, decode->op[0].ptr, ~decode->op[0].val,
374 decode->operand_size);
375 env->eip += decode->len;
376}
377
378void exec_movzx(struct CPUX86State *env, struct x86_decode *decode)
379{
380 int src_op_size;
381 int op_size = decode->operand_size;
382
383 fetch_operands(env, decode, 1, false, false, false);
384
385 if (0xb6 == decode->opcode[1]) {
386 src_op_size = 1;
387 } else {
388 src_op_size = 2;
389 }
390 decode->operand_size = src_op_size;
391 calc_modrm_operand(env, decode, &decode->op[1]);
392 decode->op[1].val = read_val_ext(env, decode->op[1].ptr, src_op_size);
393 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
394
395 env->eip += decode->len;
396}
397
398static void exec_out(struct CPUX86State *env, struct x86_decode *decode)
399{
400 switch (decode->opcode[0]) {
401 case 0xe6:
402 hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 1, 1, 1);
403 break;
404 case 0xe7:
405 hvf_handle_io(env_cpu(env), decode->op[0].val, &RAX(env), 1,
406 decode->operand_size, 1);
407 break;
408 case 0xee:
409 hvf_handle_io(env_cpu(env), DX(env), &AL(env), 1, 1, 1);
410 break;
411 case 0xef:
412 hvf_handle_io(env_cpu(env), DX(env), &RAX(env), 1,
413 decode->operand_size, 1);
414 break;
415 default:
416 VM_PANIC("Bad out opcode\n");
417 break;
418 }
419 env->eip += decode->len;
420}
421
422static void exec_in(struct CPUX86State *env, struct x86_decode *decode)
423{
424 target_ulong val = 0;
425 switch (decode->opcode[0]) {
426 case 0xe4:
427 hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 0, 1, 1);
428 break;
429 case 0xe5:
430 hvf_handle_io(env_cpu(env), decode->op[0].val, &val, 0,
431 decode->operand_size, 1);
432 if (decode->operand_size == 2) {
433 AX(env) = val;
434 } else {
435 RAX(env) = (uint32_t)val;
436 }
437 break;
438 case 0xec:
439 hvf_handle_io(env_cpu(env), DX(env), &AL(env), 0, 1, 1);
440 break;
441 case 0xed:
442 hvf_handle_io(env_cpu(env), DX(env), &val, 0, decode->operand_size, 1);
443 if (decode->operand_size == 2) {
444 AX(env) = val;
445 } else {
446 RAX(env) = (uint32_t)val;
447 }
448
449 break;
450 default:
451 VM_PANIC("Bad in opcode\n");
452 break;
453 }
454
455 env->eip += decode->len;
456}
457
458static inline void string_increment_reg(struct CPUX86State *env, int reg,
459 struct x86_decode *decode)
460{
461 target_ulong val = read_reg(env, reg, decode->addressing_size);
462 if (env->eflags & DF_MASK) {
463 val -= decode->operand_size;
464 } else {
465 val += decode->operand_size;
466 }
467 write_reg(env, reg, val, decode->addressing_size);
468}
469
470static inline void string_rep(struct CPUX86State *env, struct x86_decode *decode,
471 void (*func)(struct CPUX86State *env,
472 struct x86_decode *ins), int rep)
473{
474 target_ulong rcx = read_reg(env, R_ECX, decode->addressing_size);
475 while (rcx--) {
476 func(env, decode);
477 write_reg(env, R_ECX, rcx, decode->addressing_size);
478 if ((PREFIX_REP == rep) && !get_ZF(env)) {
479 break;
480 }
481 if ((PREFIX_REPN == rep) && get_ZF(env)) {
482 break;
483 }
484 }
485}
486
487static void exec_ins_single(struct CPUX86State *env, struct x86_decode *decode)
488{
489 target_ulong addr = linear_addr_size(env_cpu(env), RDI(env),
490 decode->addressing_size, R_ES);
491
492 hvf_handle_io(env_cpu(env), DX(env), env->hvf_mmio_buf, 0,
493 decode->operand_size, 1);
494 vmx_write_mem(env_cpu(env), addr, env->hvf_mmio_buf,
495 decode->operand_size);
496
497 string_increment_reg(env, R_EDI, decode);
498}
499
500static void exec_ins(struct CPUX86State *env, struct x86_decode *decode)
501{
502 if (decode->rep) {
503 string_rep(env, decode, exec_ins_single, 0);
504 } else {
505 exec_ins_single(env, decode);
506 }
507
508 env->eip += decode->len;
509}
510
511static void exec_outs_single(struct CPUX86State *env, struct x86_decode *decode)
512{
513 target_ulong addr = decode_linear_addr(env, decode, RSI(env), R_DS);
514
515 vmx_read_mem(env_cpu(env), env->hvf_mmio_buf, addr,
516 decode->operand_size);
517 hvf_handle_io(env_cpu(env), DX(env), env->hvf_mmio_buf, 1,
518 decode->operand_size, 1);
519
520 string_increment_reg(env, R_ESI, decode);
521}
522
523static void exec_outs(struct CPUX86State *env, struct x86_decode *decode)
524{
525 if (decode->rep) {
526 string_rep(env, decode, exec_outs_single, 0);
527 } else {
528 exec_outs_single(env, decode);
529 }
530
531 env->eip += decode->len;
532}
533
534static void exec_movs_single(struct CPUX86State *env, struct x86_decode *decode)
535{
536 target_ulong src_addr;
537 target_ulong dst_addr;
538 target_ulong val;
539
540 src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
541 dst_addr = linear_addr_size(env_cpu(env), RDI(env),
542 decode->addressing_size, R_ES);
543
544 val = read_val_ext(env, src_addr, decode->operand_size);
545 write_val_ext(env, dst_addr, val, decode->operand_size);
546
547 string_increment_reg(env, R_ESI, decode);
548 string_increment_reg(env, R_EDI, decode);
549}
550
551static void exec_movs(struct CPUX86State *env, struct x86_decode *decode)
552{
553 if (decode->rep) {
554 string_rep(env, decode, exec_movs_single, 0);
555 } else {
556 exec_movs_single(env, decode);
557 }
558
559 env->eip += decode->len;
560}
561
562static void exec_cmps_single(struct CPUX86State *env, struct x86_decode *decode)
563{
564 target_ulong src_addr;
565 target_ulong dst_addr;
566
567 src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
568 dst_addr = linear_addr_size(env_cpu(env), RDI(env),
569 decode->addressing_size, R_ES);
570
571 decode->op[0].type = X86_VAR_IMMEDIATE;
572 decode->op[0].val = read_val_ext(env, src_addr, decode->operand_size);
573 decode->op[1].type = X86_VAR_IMMEDIATE;
574 decode->op[1].val = read_val_ext(env, dst_addr, decode->operand_size);
575
576 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
577
578 string_increment_reg(env, R_ESI, decode);
579 string_increment_reg(env, R_EDI, decode);
580}
581
582static void exec_cmps(struct CPUX86State *env, struct x86_decode *decode)
583{
584 if (decode->rep) {
585 string_rep(env, decode, exec_cmps_single, decode->rep);
586 } else {
587 exec_cmps_single(env, decode);
588 }
589 env->eip += decode->len;
590}
591
592
593static void exec_stos_single(struct CPUX86State *env, struct x86_decode *decode)
594{
595 target_ulong addr;
596 target_ulong val;
597
598 addr = linear_addr_size(env_cpu(env), RDI(env),
599 decode->addressing_size, R_ES);
600 val = read_reg(env, R_EAX, decode->operand_size);
601 vmx_write_mem(env_cpu(env), addr, &val, decode->operand_size);
602
603 string_increment_reg(env, R_EDI, decode);
604}
605
606
607static void exec_stos(struct CPUX86State *env, struct x86_decode *decode)
608{
609 if (decode->rep) {
610 string_rep(env, decode, exec_stos_single, 0);
611 } else {
612 exec_stos_single(env, decode);
613 }
614
615 env->eip += decode->len;
616}
617
618static void exec_scas_single(struct CPUX86State *env, struct x86_decode *decode)
619{
620 target_ulong addr;
621
622 addr = linear_addr_size(env_cpu(env), RDI(env),
623 decode->addressing_size, R_ES);
624 decode->op[1].type = X86_VAR_IMMEDIATE;
625 vmx_read_mem(env_cpu(env), &decode->op[1].val, addr, decode->operand_size);
626
627 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
628 string_increment_reg(env, R_EDI, decode);
629}
630
631static void exec_scas(struct CPUX86State *env, struct x86_decode *decode)
632{
633 decode->op[0].type = X86_VAR_REG;
634 decode->op[0].reg = R_EAX;
635 if (decode->rep) {
636 string_rep(env, decode, exec_scas_single, decode->rep);
637 } else {
638 exec_scas_single(env, decode);
639 }
640
641 env->eip += decode->len;
642}
643
644static void exec_lods_single(struct CPUX86State *env, struct x86_decode *decode)
645{
646 target_ulong addr;
647 target_ulong val = 0;
648
649 addr = decode_linear_addr(env, decode, RSI(env), R_DS);
650 vmx_read_mem(env_cpu(env), &val, addr, decode->operand_size);
651 write_reg(env, R_EAX, val, decode->operand_size);
652
653 string_increment_reg(env, R_ESI, decode);
654}
655
656static void exec_lods(struct CPUX86State *env, struct x86_decode *decode)
657{
658 if (decode->rep) {
659 string_rep(env, decode, exec_lods_single, 0);
660 } else {
661 exec_lods_single(env, decode);
662 }
663
664 env->eip += decode->len;
665}
666
667void simulate_rdmsr(struct CPUState *cpu)
668{
669 X86CPU *x86_cpu = X86_CPU(cpu);
670 CPUX86State *env = &x86_cpu->env;
671 CPUState *cs = env_cpu(env);
672 uint32_t msr = ECX(env);
673 uint64_t val = 0;
674
675 switch (msr) {
676 case MSR_IA32_TSC:
677 val = rdtscp() + rvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET);
678 break;
679 case MSR_IA32_APICBASE:
680 val = cpu_get_apic_base(X86_CPU(cpu)->apic_state);
681 break;
682 case MSR_IA32_UCODE_REV:
683 val = x86_cpu->ucode_rev;
684 break;
685 case MSR_EFER:
686 val = rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER);
687 break;
688 case MSR_FSBASE:
689 val = rvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE);
690 break;
691 case MSR_GSBASE:
692 val = rvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE);
693 break;
694 case MSR_KERNELGSBASE:
695 val = rvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE);
696 break;
697 case MSR_STAR:
698 abort();
699 break;
700 case MSR_LSTAR:
701 abort();
702 break;
703 case MSR_CSTAR:
704 abort();
705 break;
706 case MSR_IA32_MISC_ENABLE:
707 val = env->msr_ia32_misc_enable;
708 break;
709 case MSR_MTRRphysBase(0):
710 case MSR_MTRRphysBase(1):
711 case MSR_MTRRphysBase(2):
712 case MSR_MTRRphysBase(3):
713 case MSR_MTRRphysBase(4):
714 case MSR_MTRRphysBase(5):
715 case MSR_MTRRphysBase(6):
716 case MSR_MTRRphysBase(7):
717 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base;
718 break;
719 case MSR_MTRRphysMask(0):
720 case MSR_MTRRphysMask(1):
721 case MSR_MTRRphysMask(2):
722 case MSR_MTRRphysMask(3):
723 case MSR_MTRRphysMask(4):
724 case MSR_MTRRphysMask(5):
725 case MSR_MTRRphysMask(6):
726 case MSR_MTRRphysMask(7):
727 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask;
728 break;
729 case MSR_MTRRfix64K_00000:
730 val = env->mtrr_fixed[0];
731 break;
732 case MSR_MTRRfix16K_80000:
733 case MSR_MTRRfix16K_A0000:
734 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1];
735 break;
736 case MSR_MTRRfix4K_C0000:
737 case MSR_MTRRfix4K_C8000:
738 case MSR_MTRRfix4K_D0000:
739 case MSR_MTRRfix4K_D8000:
740 case MSR_MTRRfix4K_E0000:
741 case MSR_MTRRfix4K_E8000:
742 case MSR_MTRRfix4K_F0000:
743 case MSR_MTRRfix4K_F8000:
744 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3];
745 break;
746 case MSR_MTRRdefType:
747 val = env->mtrr_deftype;
748 break;
749 case MSR_CORE_THREAD_COUNT:
750 val = cs->nr_threads * cs->nr_cores;
751 val |= ((uint32_t)cs->nr_cores << 16);
752 break;
753 default:
754
755 val = 0;
756 break;
757 }
758
759 RAX(env) = (uint32_t)val;
760 RDX(env) = (uint32_t)(val >> 32);
761}
762
763static void exec_rdmsr(struct CPUX86State *env, struct x86_decode *decode)
764{
765 simulate_rdmsr(env_cpu(env));
766 env->eip += decode->len;
767}
768
769void simulate_wrmsr(struct CPUState *cpu)
770{
771 X86CPU *x86_cpu = X86_CPU(cpu);
772 CPUX86State *env = &x86_cpu->env;
773 uint32_t msr = ECX(env);
774 uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env);
775
776 switch (msr) {
777 case MSR_IA32_TSC:
778 break;
779 case MSR_IA32_APICBASE:
780 cpu_set_apic_base(X86_CPU(cpu)->apic_state, data);
781 break;
782 case MSR_FSBASE:
783 wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, data);
784 break;
785 case MSR_GSBASE:
786 wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, data);
787 break;
788 case MSR_KERNELGSBASE:
789 wvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE, data);
790 break;
791 case MSR_STAR:
792 abort();
793 break;
794 case MSR_LSTAR:
795 abort();
796 break;
797 case MSR_CSTAR:
798 abort();
799 break;
800 case MSR_EFER:
801
802 wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, data);
803 if (data & MSR_EFER_NXE) {
804 hv_vcpu_invalidate_tlb(cpu->hvf_fd);
805 }
806 break;
807 case MSR_MTRRphysBase(0):
808 case MSR_MTRRphysBase(1):
809 case MSR_MTRRphysBase(2):
810 case MSR_MTRRphysBase(3):
811 case MSR_MTRRphysBase(4):
812 case MSR_MTRRphysBase(5):
813 case MSR_MTRRphysBase(6):
814 case MSR_MTRRphysBase(7):
815 env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data;
816 break;
817 case MSR_MTRRphysMask(0):
818 case MSR_MTRRphysMask(1):
819 case MSR_MTRRphysMask(2):
820 case MSR_MTRRphysMask(3):
821 case MSR_MTRRphysMask(4):
822 case MSR_MTRRphysMask(5):
823 case MSR_MTRRphysMask(6):
824 case MSR_MTRRphysMask(7):
825 env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data;
826 break;
827 case MSR_MTRRfix64K_00000:
828 env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data;
829 break;
830 case MSR_MTRRfix16K_80000:
831 case MSR_MTRRfix16K_A0000:
832 env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data;
833 break;
834 case MSR_MTRRfix4K_C0000:
835 case MSR_MTRRfix4K_C8000:
836 case MSR_MTRRfix4K_D0000:
837 case MSR_MTRRfix4K_D8000:
838 case MSR_MTRRfix4K_E0000:
839 case MSR_MTRRfix4K_E8000:
840 case MSR_MTRRfix4K_F0000:
841 case MSR_MTRRfix4K_F8000:
842 env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data;
843 break;
844 case MSR_MTRRdefType:
845 env->mtrr_deftype = data;
846 break;
847 default:
848 break;
849 }
850
851
852
853
854
855
856}
857
858static void exec_wrmsr(struct CPUX86State *env, struct x86_decode *decode)
859{
860 simulate_wrmsr(env_cpu(env));
861 env->eip += decode->len;
862}
863
864
865
866
867
868static void do_bt(struct CPUX86State *env, struct x86_decode *decode, int flag)
869{
870 int32_t displacement;
871 uint8_t index;
872 bool cf;
873 int mask = (4 == decode->operand_size) ? 0x1f : 0xf;
874
875 VM_PANIC_ON(decode->rex.rex);
876
877 fetch_operands(env, decode, 2, false, true, false);
878 index = decode->op[1].val & mask;
879
880 if (decode->op[0].type != X86_VAR_REG) {
881 if (4 == decode->operand_size) {
882 displacement = ((int32_t) (decode->op[1].val & 0xffffffe0)) / 32;
883 decode->op[0].ptr += 4 * displacement;
884 } else if (2 == decode->operand_size) {
885 displacement = ((int16_t) (decode->op[1].val & 0xfff0)) / 16;
886 decode->op[0].ptr += 2 * displacement;
887 } else {
888 VM_PANIC("bt 64bit\n");
889 }
890 }
891 decode->op[0].val = read_val_ext(env, decode->op[0].ptr,
892 decode->operand_size);
893 cf = (decode->op[0].val >> index) & 0x01;
894
895 switch (flag) {
896 case 0:
897 set_CF(env, cf);
898 return;
899 case 1:
900 decode->op[0].val ^= (1u << index);
901 break;
902 case 2:
903 decode->op[0].val |= (1u << index);
904 break;
905 case 3:
906 decode->op[0].val &= ~(1u << index);
907 break;
908 }
909 write_val_ext(env, decode->op[0].ptr, decode->op[0].val,
910 decode->operand_size);
911 set_CF(env, cf);
912}
913
914static void exec_bt(struct CPUX86State *env, struct x86_decode *decode)
915{
916 do_bt(env, decode, 0);
917 env->eip += decode->len;
918}
919
920static void exec_btc(struct CPUX86State *env, struct x86_decode *decode)
921{
922 do_bt(env, decode, 1);
923 env->eip += decode->len;
924}
925
926static void exec_btr(struct CPUX86State *env, struct x86_decode *decode)
927{
928 do_bt(env, decode, 3);
929 env->eip += decode->len;
930}
931
932static void exec_bts(struct CPUX86State *env, struct x86_decode *decode)
933{
934 do_bt(env, decode, 2);
935 env->eip += decode->len;
936}
937
938void exec_shl(struct CPUX86State *env, struct x86_decode *decode)
939{
940 uint8_t count;
941 int of = 0, cf = 0;
942
943 fetch_operands(env, decode, 2, true, true, false);
944
945 count = decode->op[1].val;
946 count &= 0x1f;
947 if (!count) {
948 goto exit;
949 }
950
951 switch (decode->operand_size) {
952 case 1:
953 {
954 uint8_t res = 0;
955 if (count <= 8) {
956 res = (decode->op[0].val << count);
957 cf = (decode->op[0].val >> (8 - count)) & 0x1;
958 of = cf ^ (res >> 7);
959 }
960
961 write_val_ext(env, decode->op[0].ptr, res, 1);
962 SET_FLAGS_OSZAPC_LOGIC8(env, 0, 0, res);
963 SET_FLAGS_OxxxxC(env, of, cf);
964 break;
965 }
966 case 2:
967 {
968 uint16_t res = 0;
969
970
971 if (count <= 16) {
972 res = (decode->op[0].val << count);
973 cf = (decode->op[0].val >> (16 - count)) & 0x1;
974 of = cf ^ (res >> 15);
975 }
976
977 write_val_ext(env, decode->op[0].ptr, res, 2);
978 SET_FLAGS_OSZAPC_LOGIC16(env, 0, 0, res);
979 SET_FLAGS_OxxxxC(env, of, cf);
980 break;
981 }
982 case 4:
983 {
984 uint32_t res = decode->op[0].val << count;
985
986 write_val_ext(env, decode->op[0].ptr, res, 4);
987 SET_FLAGS_OSZAPC_LOGIC32(env, 0, 0, res);
988 cf = (decode->op[0].val >> (32 - count)) & 0x1;
989 of = cf ^ (res >> 31);
990 SET_FLAGS_OxxxxC(env, of, cf);
991 break;
992 }
993 default:
994 abort();
995 }
996
997exit:
998
999 env->eip += decode->len;
1000}
1001
1002void exec_movsx(CPUX86State *env, struct x86_decode *decode)
1003{
1004 int src_op_size;
1005 int op_size = decode->operand_size;
1006
1007 fetch_operands(env, decode, 2, false, false, false);
1008
1009 if (0xbe == decode->opcode[1]) {
1010 src_op_size = 1;
1011 } else {
1012 src_op_size = 2;
1013 }
1014
1015 decode->operand_size = src_op_size;
1016 calc_modrm_operand(env, decode, &decode->op[1]);
1017 decode->op[1].val = sign(read_val_ext(env, decode->op[1].ptr, src_op_size),
1018 src_op_size);
1019
1020 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
1021
1022 env->eip += decode->len;
1023}
1024
1025void exec_ror(struct CPUX86State *env, struct x86_decode *decode)
1026{
1027 uint8_t count;
1028
1029 fetch_operands(env, decode, 2, true, true, false);
1030 count = decode->op[1].val;
1031
1032 switch (decode->operand_size) {
1033 case 1:
1034 {
1035 uint32_t bit6, bit7;
1036 uint8_t res;
1037
1038 if ((count & 0x07) == 0) {
1039 if (count & 0x18) {
1040 bit6 = ((uint8_t)decode->op[0].val >> 6) & 1;
1041 bit7 = ((uint8_t)decode->op[0].val >> 7) & 1;
1042 SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
1043 }
1044 } else {
1045 count &= 0x7;
1046 res = ((uint8_t)decode->op[0].val >> count) |
1047 ((uint8_t)decode->op[0].val << (8 - count));
1048 write_val_ext(env, decode->op[0].ptr, res, 1);
1049 bit6 = (res >> 6) & 1;
1050 bit7 = (res >> 7) & 1;
1051
1052 SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
1053 }
1054 break;
1055 }
1056 case 2:
1057 {
1058 uint32_t bit14, bit15;
1059 uint16_t res;
1060
1061 if ((count & 0x0f) == 0) {
1062 if (count & 0x10) {
1063 bit14 = ((uint16_t)decode->op[0].val >> 14) & 1;
1064 bit15 = ((uint16_t)decode->op[0].val >> 15) & 1;
1065
1066 SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
1067 }
1068 } else {
1069 count &= 0x0f;
1070 res = ((uint16_t)decode->op[0].val >> count) |
1071 ((uint16_t)decode->op[0].val << (16 - count));
1072 write_val_ext(env, decode->op[0].ptr, res, 2);
1073
1074 bit14 = (res >> 14) & 1;
1075 bit15 = (res >> 15) & 1;
1076
1077 SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
1078 }
1079 break;
1080 }
1081 case 4:
1082 {
1083 uint32_t bit31, bit30;
1084 uint32_t res;
1085
1086 count &= 0x1f;
1087 if (count) {
1088 res = ((uint32_t)decode->op[0].val >> count) |
1089 ((uint32_t)decode->op[0].val << (32 - count));
1090 write_val_ext(env, decode->op[0].ptr, res, 4);
1091
1092 bit31 = (res >> 31) & 1;
1093 bit30 = (res >> 30) & 1;
1094
1095 SET_FLAGS_OxxxxC(env, bit30 ^ bit31, bit31);
1096 }
1097 break;
1098 }
1099 }
1100 env->eip += decode->len;
1101}
1102
1103void exec_rol(struct CPUX86State *env, struct x86_decode *decode)
1104{
1105 uint8_t count;
1106
1107 fetch_operands(env, decode, 2, true, true, false);
1108 count = decode->op[1].val;
1109
1110 switch (decode->operand_size) {
1111 case 1:
1112 {
1113 uint32_t bit0, bit7;
1114 uint8_t res;
1115
1116 if ((count & 0x07) == 0) {
1117 if (count & 0x18) {
1118 bit0 = ((uint8_t)decode->op[0].val & 1);
1119 bit7 = ((uint8_t)decode->op[0].val >> 7);
1120 SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
1121 }
1122 } else {
1123 count &= 0x7;
1124 res = ((uint8_t)decode->op[0].val << count) |
1125 ((uint8_t)decode->op[0].val >> (8 - count));
1126
1127 write_val_ext(env, decode->op[0].ptr, res, 1);
1128
1129
1130
1131 bit0 = (res & 1);
1132 bit7 = (res >> 7);
1133 SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
1134 }
1135 break;
1136 }
1137 case 2:
1138 {
1139 uint32_t bit0, bit15;
1140 uint16_t res;
1141
1142 if ((count & 0x0f) == 0) {
1143 if (count & 0x10) {
1144 bit0 = ((uint16_t)decode->op[0].val & 0x1);
1145 bit15 = ((uint16_t)decode->op[0].val >> 15);
1146
1147 SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
1148 }
1149 } else {
1150 count &= 0x0f;
1151 res = ((uint16_t)decode->op[0].val << count) |
1152 ((uint16_t)decode->op[0].val >> (16 - count));
1153
1154 write_val_ext(env, decode->op[0].ptr, res, 2);
1155 bit0 = (res & 0x1);
1156 bit15 = (res >> 15);
1157
1158 SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
1159 }
1160 break;
1161 }
1162 case 4:
1163 {
1164 uint32_t bit0, bit31;
1165 uint32_t res;
1166
1167 count &= 0x1f;
1168 if (count) {
1169 res = ((uint32_t)decode->op[0].val << count) |
1170 ((uint32_t)decode->op[0].val >> (32 - count));
1171
1172 write_val_ext(env, decode->op[0].ptr, res, 4);
1173 bit0 = (res & 0x1);
1174 bit31 = (res >> 31);
1175
1176 SET_FLAGS_OxxxxC(env, bit0 ^ bit31, bit0);
1177 }
1178 break;
1179 }
1180 }
1181 env->eip += decode->len;
1182}
1183
1184
1185void exec_rcl(struct CPUX86State *env, struct x86_decode *decode)
1186{
1187 uint8_t count;
1188 int of = 0, cf = 0;
1189
1190 fetch_operands(env, decode, 2, true, true, false);
1191 count = decode->op[1].val & 0x1f;
1192
1193 switch (decode->operand_size) {
1194 case 1:
1195 {
1196 uint8_t op1_8 = decode->op[0].val;
1197 uint8_t res;
1198 count %= 9;
1199 if (!count) {
1200 break;
1201 }
1202
1203 if (1 == count) {
1204 res = (op1_8 << 1) | get_CF(env);
1205 } else {
1206 res = (op1_8 << count) | (get_CF(env) << (count - 1)) |
1207 (op1_8 >> (9 - count));
1208 }
1209
1210 write_val_ext(env, decode->op[0].ptr, res, 1);
1211
1212 cf = (op1_8 >> (8 - count)) & 0x01;
1213 of = cf ^ (res >> 7);
1214 SET_FLAGS_OxxxxC(env, of, cf);
1215 break;
1216 }
1217 case 2:
1218 {
1219 uint16_t res;
1220 uint16_t op1_16 = decode->op[0].val;
1221
1222 count %= 17;
1223 if (!count) {
1224 break;
1225 }
1226
1227 if (1 == count) {
1228 res = (op1_16 << 1) | get_CF(env);
1229 } else if (count == 16) {
1230 res = (get_CF(env) << 15) | (op1_16 >> 1);
1231 } else {
1232 res = (op1_16 << count) | (get_CF(env) << (count - 1)) |
1233 (op1_16 >> (17 - count));
1234 }
1235
1236 write_val_ext(env, decode->op[0].ptr, res, 2);
1237
1238 cf = (op1_16 >> (16 - count)) & 0x1;
1239 of = cf ^ (res >> 15);
1240 SET_FLAGS_OxxxxC(env, of, cf);
1241 break;
1242 }
1243 case 4:
1244 {
1245 uint32_t res;
1246 uint32_t op1_32 = decode->op[0].val;
1247
1248 if (!count) {
1249 break;
1250 }
1251
1252 if (1 == count) {
1253 res = (op1_32 << 1) | get_CF(env);
1254 } else {
1255 res = (op1_32 << count) | (get_CF(env) << (count - 1)) |
1256 (op1_32 >> (33 - count));
1257 }
1258
1259 write_val_ext(env, decode->op[0].ptr, res, 4);
1260
1261 cf = (op1_32 >> (32 - count)) & 0x1;
1262 of = cf ^ (res >> 31);
1263 SET_FLAGS_OxxxxC(env, of, cf);
1264 break;
1265 }
1266 }
1267 env->eip += decode->len;
1268}
1269
1270void exec_rcr(struct CPUX86State *env, struct x86_decode *decode)
1271{
1272 uint8_t count;
1273 int of = 0, cf = 0;
1274
1275 fetch_operands(env, decode, 2, true, true, false);
1276 count = decode->op[1].val & 0x1f;
1277
1278 switch (decode->operand_size) {
1279 case 1:
1280 {
1281 uint8_t op1_8 = decode->op[0].val;
1282 uint8_t res;
1283
1284 count %= 9;
1285 if (!count) {
1286 break;
1287 }
1288 res = (op1_8 >> count) | (get_CF(env) << (8 - count)) |
1289 (op1_8 << (9 - count));
1290
1291 write_val_ext(env, decode->op[0].ptr, res, 1);
1292
1293 cf = (op1_8 >> (count - 1)) & 0x1;
1294 of = (((res << 1) ^ res) >> 7) & 0x1;
1295 SET_FLAGS_OxxxxC(env, of, cf);
1296 break;
1297 }
1298 case 2:
1299 {
1300 uint16_t op1_16 = decode->op[0].val;
1301 uint16_t res;
1302
1303 count %= 17;
1304 if (!count) {
1305 break;
1306 }
1307 res = (op1_16 >> count) | (get_CF(env) << (16 - count)) |
1308 (op1_16 << (17 - count));
1309
1310 write_val_ext(env, decode->op[0].ptr, res, 2);
1311
1312 cf = (op1_16 >> (count - 1)) & 0x1;
1313 of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1;
1314
1315 SET_FLAGS_OxxxxC(env, of, cf);
1316 break;
1317 }
1318 case 4:
1319 {
1320 uint32_t res;
1321 uint32_t op1_32 = decode->op[0].val;
1322
1323 if (!count) {
1324 break;
1325 }
1326
1327 if (1 == count) {
1328 res = (op1_32 >> 1) | (get_CF(env) << 31);
1329 } else {
1330 res = (op1_32 >> count) | (get_CF(env) << (32 - count)) |
1331 (op1_32 << (33 - count));
1332 }
1333
1334 write_val_ext(env, decode->op[0].ptr, res, 4);
1335
1336 cf = (op1_32 >> (count - 1)) & 0x1;
1337 of = ((res << 1) ^ res) >> 31;
1338 SET_FLAGS_OxxxxC(env, of, cf);
1339 break;
1340 }
1341 }
1342 env->eip += decode->len;
1343}
1344
1345static void exec_xchg(struct CPUX86State *env, struct x86_decode *decode)
1346{
1347 fetch_operands(env, decode, 2, true, true, false);
1348
1349 write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
1350 decode->operand_size);
1351 write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
1352 decode->operand_size);
1353
1354 env->eip += decode->len;
1355}
1356
1357static void exec_xadd(struct CPUX86State *env, struct x86_decode *decode)
1358{
1359 EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
1360 write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
1361 decode->operand_size);
1362
1363 env->eip += decode->len;
1364}
1365
1366static struct cmd_handler {
1367 enum x86_decode_cmd cmd;
1368 void (*handler)(struct CPUX86State *env, struct x86_decode *ins);
1369} handlers[] = {
1370 {X86_DECODE_CMD_INVL, NULL,},
1371 {X86_DECODE_CMD_MOV, exec_mov},
1372 {X86_DECODE_CMD_ADD, exec_add},
1373 {X86_DECODE_CMD_OR, exec_or},
1374 {X86_DECODE_CMD_ADC, exec_adc},
1375 {X86_DECODE_CMD_SBB, exec_sbb},
1376 {X86_DECODE_CMD_AND, exec_and},
1377 {X86_DECODE_CMD_SUB, exec_sub},
1378 {X86_DECODE_CMD_NEG, exec_neg},
1379 {X86_DECODE_CMD_XOR, exec_xor},
1380 {X86_DECODE_CMD_CMP, exec_cmp},
1381 {X86_DECODE_CMD_INC, exec_inc},
1382 {X86_DECODE_CMD_DEC, exec_dec},
1383 {X86_DECODE_CMD_TST, exec_tst},
1384 {X86_DECODE_CMD_NOT, exec_not},
1385 {X86_DECODE_CMD_MOVZX, exec_movzx},
1386 {X86_DECODE_CMD_OUT, exec_out},
1387 {X86_DECODE_CMD_IN, exec_in},
1388 {X86_DECODE_CMD_INS, exec_ins},
1389 {X86_DECODE_CMD_OUTS, exec_outs},
1390 {X86_DECODE_CMD_RDMSR, exec_rdmsr},
1391 {X86_DECODE_CMD_WRMSR, exec_wrmsr},
1392 {X86_DECODE_CMD_BT, exec_bt},
1393 {X86_DECODE_CMD_BTR, exec_btr},
1394 {X86_DECODE_CMD_BTC, exec_btc},
1395 {X86_DECODE_CMD_BTS, exec_bts},
1396 {X86_DECODE_CMD_SHL, exec_shl},
1397 {X86_DECODE_CMD_ROL, exec_rol},
1398 {X86_DECODE_CMD_ROR, exec_ror},
1399 {X86_DECODE_CMD_RCR, exec_rcr},
1400 {X86_DECODE_CMD_RCL, exec_rcl},
1401
1402 {X86_DECODE_CMD_MOVS, exec_movs},
1403 {X86_DECODE_CMD_CMPS, exec_cmps},
1404 {X86_DECODE_CMD_STOS, exec_stos},
1405 {X86_DECODE_CMD_SCAS, exec_scas},
1406 {X86_DECODE_CMD_LODS, exec_lods},
1407 {X86_DECODE_CMD_MOVSX, exec_movsx},
1408 {X86_DECODE_CMD_XCHG, exec_xchg},
1409 {X86_DECODE_CMD_XADD, exec_xadd},
1410};
1411
1412static struct cmd_handler _cmd_handler[X86_DECODE_CMD_LAST];
1413
1414static void init_cmd_handler()
1415{
1416 int i;
1417 for (i = 0; i < ARRAY_SIZE(handlers); i++) {
1418 _cmd_handler[handlers[i].cmd] = handlers[i];
1419 }
1420}
1421
1422void load_regs(struct CPUState *cpu)
1423{
1424 X86CPU *x86_cpu = X86_CPU(cpu);
1425 CPUX86State *env = &x86_cpu->env;
1426
1427 int i = 0;
1428 RRX(env, R_EAX) = rreg(cpu->hvf_fd, HV_X86_RAX);
1429 RRX(env, R_EBX) = rreg(cpu->hvf_fd, HV_X86_RBX);
1430 RRX(env, R_ECX) = rreg(cpu->hvf_fd, HV_X86_RCX);
1431 RRX(env, R_EDX) = rreg(cpu->hvf_fd, HV_X86_RDX);
1432 RRX(env, R_ESI) = rreg(cpu->hvf_fd, HV_X86_RSI);
1433 RRX(env, R_EDI) = rreg(cpu->hvf_fd, HV_X86_RDI);
1434 RRX(env, R_ESP) = rreg(cpu->hvf_fd, HV_X86_RSP);
1435 RRX(env, R_EBP) = rreg(cpu->hvf_fd, HV_X86_RBP);
1436 for (i = 8; i < 16; i++) {
1437 RRX(env, i) = rreg(cpu->hvf_fd, HV_X86_RAX + i);
1438 }
1439
1440 env->eflags = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
1441 rflags_to_lflags(env);
1442 env->eip = rreg(cpu->hvf_fd, HV_X86_RIP);
1443}
1444
1445void store_regs(struct CPUState *cpu)
1446{
1447 X86CPU *x86_cpu = X86_CPU(cpu);
1448 CPUX86State *env = &x86_cpu->env;
1449
1450 int i = 0;
1451 wreg(cpu->hvf_fd, HV_X86_RAX, RAX(env));
1452 wreg(cpu->hvf_fd, HV_X86_RBX, RBX(env));
1453 wreg(cpu->hvf_fd, HV_X86_RCX, RCX(env));
1454 wreg(cpu->hvf_fd, HV_X86_RDX, RDX(env));
1455 wreg(cpu->hvf_fd, HV_X86_RSI, RSI(env));
1456 wreg(cpu->hvf_fd, HV_X86_RDI, RDI(env));
1457 wreg(cpu->hvf_fd, HV_X86_RBP, RBP(env));
1458 wreg(cpu->hvf_fd, HV_X86_RSP, RSP(env));
1459 for (i = 8; i < 16; i++) {
1460 wreg(cpu->hvf_fd, HV_X86_RAX + i, RRX(env, i));
1461 }
1462
1463 lflags_to_rflags(env);
1464 wreg(cpu->hvf_fd, HV_X86_RFLAGS, env->eflags);
1465 macvm_set_rip(cpu, env->eip);
1466}
1467
1468bool exec_instruction(struct CPUX86State *env, struct x86_decode *ins)
1469{
1470
1471
1472
1473
1474 if (!_cmd_handler[ins->cmd].handler) {
1475 printf("Unimplemented handler (%llx) for %d (%x %x) \n", env->eip,
1476 ins->cmd, ins->opcode[0],
1477 ins->opcode_len > 1 ? ins->opcode[1] : 0);
1478 env->eip += ins->len;
1479 return true;
1480 }
1481
1482 _cmd_handler[ins->cmd].handler(env, ins);
1483 return true;
1484}
1485
1486void init_emu()
1487{
1488 init_cmd_handler();
1489}
1490