1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#include "qemu/osdep.h"
39#include "panic.h"
40#include "qemu-common.h"
41#include "x86_decode.h"
42#include "x86.h"
43#include "x86_emu.h"
44#include "x86_mmu.h"
45#include "x86_flags.h"
46#include "vmcs.h"
47#include "vmx.h"
48
49void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data,
50 int direction, int size, uint32_t count);
51
52#define EXEC_2OP_FLAGS_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \
53{ \
54 fetch_operands(env, decode, 2, true, true, false); \
55 switch (decode->operand_size) { \
56 case 1: \
57 { \
58 uint8_t v1 = (uint8_t)decode->op[0].val; \
59 uint8_t v2 = (uint8_t)decode->op[1].val; \
60 uint8_t diff = v1 cmd v2; \
61 if (save_res) { \
62 write_val_ext(env, decode->op[0].ptr, diff, 1); \
63 } \
64 FLAGS_FUNC##8(env, v1, v2, diff); \
65 break; \
66 } \
67 case 2: \
68 { \
69 uint16_t v1 = (uint16_t)decode->op[0].val; \
70 uint16_t v2 = (uint16_t)decode->op[1].val; \
71 uint16_t diff = v1 cmd v2; \
72 if (save_res) { \
73 write_val_ext(env, decode->op[0].ptr, diff, 2); \
74 } \
75 FLAGS_FUNC##16(env, v1, v2, diff); \
76 break; \
77 } \
78 case 4: \
79 { \
80 uint32_t v1 = (uint32_t)decode->op[0].val; \
81 uint32_t v2 = (uint32_t)decode->op[1].val; \
82 uint32_t diff = v1 cmd v2; \
83 if (save_res) { \
84 write_val_ext(env, decode->op[0].ptr, diff, 4); \
85 } \
86 FLAGS_FUNC##32(env, v1, v2, diff); \
87 break; \
88 } \
89 default: \
90 VM_PANIC("bad size\n"); \
91 } \
92} \
93
94target_ulong read_reg(CPUX86State *env, int reg, int size)
95{
96 switch (size) {
97 case 1:
98 return env->hvf_emul->regs[reg].lx;
99 case 2:
100 return env->hvf_emul->regs[reg].rx;
101 case 4:
102 return env->hvf_emul->regs[reg].erx;
103 case 8:
104 return env->hvf_emul->regs[reg].rrx;
105 default:
106 abort();
107 }
108 return 0;
109}
110
111void write_reg(CPUX86State *env, int reg, target_ulong val, int size)
112{
113 switch (size) {
114 case 1:
115 env->hvf_emul->regs[reg].lx = val;
116 break;
117 case 2:
118 env->hvf_emul->regs[reg].rx = val;
119 break;
120 case 4:
121 env->hvf_emul->regs[reg].rrx = (uint32_t)val;
122 break;
123 case 8:
124 env->hvf_emul->regs[reg].rrx = val;
125 break;
126 default:
127 abort();
128 }
129}
130
131target_ulong read_val_from_reg(target_ulong reg_ptr, int size)
132{
133 target_ulong val;
134
135 switch (size) {
136 case 1:
137 val = *(uint8_t *)reg_ptr;
138 break;
139 case 2:
140 val = *(uint16_t *)reg_ptr;
141 break;
142 case 4:
143 val = *(uint32_t *)reg_ptr;
144 break;
145 case 8:
146 val = *(uint64_t *)reg_ptr;
147 break;
148 default:
149 abort();
150 }
151 return val;
152}
153
154void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size)
155{
156 switch (size) {
157 case 1:
158 *(uint8_t *)reg_ptr = val;
159 break;
160 case 2:
161 *(uint16_t *)reg_ptr = val;
162 break;
163 case 4:
164 *(uint64_t *)reg_ptr = (uint32_t)val;
165 break;
166 case 8:
167 *(uint64_t *)reg_ptr = val;
168 break;
169 default:
170 abort();
171 }
172}
173
174static bool is_host_reg(struct CPUX86State *env, target_ulong ptr)
175{
176 return (ptr - (target_ulong)&env->hvf_emul->regs[0]) < sizeof(env->hvf_emul->regs);
177}
178
179void write_val_ext(struct CPUX86State *env, target_ulong ptr, target_ulong val, int size)
180{
181 if (is_host_reg(env, ptr)) {
182 write_val_to_reg(ptr, val, size);
183 return;
184 }
185 vmx_write_mem(ENV_GET_CPU(env), ptr, &val, size);
186}
187
188uint8_t *read_mmio(struct CPUX86State *env, target_ulong ptr, int bytes)
189{
190 vmx_read_mem(ENV_GET_CPU(env), env->hvf_emul->mmio_buf, ptr, bytes);
191 return env->hvf_emul->mmio_buf;
192}
193
194
195target_ulong read_val_ext(struct CPUX86State *env, target_ulong ptr, int size)
196{
197 target_ulong val;
198 uint8_t *mmio_ptr;
199
200 if (is_host_reg(env, ptr)) {
201 return read_val_from_reg(ptr, size);
202 }
203
204 mmio_ptr = read_mmio(env, ptr, size);
205 switch (size) {
206 case 1:
207 val = *(uint8_t *)mmio_ptr;
208 break;
209 case 2:
210 val = *(uint16_t *)mmio_ptr;
211 break;
212 case 4:
213 val = *(uint32_t *)mmio_ptr;
214 break;
215 case 8:
216 val = *(uint64_t *)mmio_ptr;
217 break;
218 default:
219 VM_PANIC("bad size\n");
220 break;
221 }
222 return val;
223}
224
225static void fetch_operands(struct CPUX86State *env, struct x86_decode *decode,
226 int n, bool val_op0, bool val_op1, bool val_op2)
227{
228 int i;
229 bool calc_val[3] = {val_op0, val_op1, val_op2};
230
231 for (i = 0; i < n; i++) {
232 switch (decode->op[i].type) {
233 case X86_VAR_IMMEDIATE:
234 break;
235 case X86_VAR_REG:
236 VM_PANIC_ON(!decode->op[i].ptr);
237 if (calc_val[i]) {
238 decode->op[i].val = read_val_from_reg(decode->op[i].ptr,
239 decode->operand_size);
240 }
241 break;
242 case X86_VAR_RM:
243 calc_modrm_operand(env, decode, &decode->op[i]);
244 if (calc_val[i]) {
245 decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
246 decode->operand_size);
247 }
248 break;
249 case X86_VAR_OFFSET:
250 decode->op[i].ptr = decode_linear_addr(env, decode,
251 decode->op[i].ptr,
252 R_DS);
253 if (calc_val[i]) {
254 decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
255 decode->operand_size);
256 }
257 break;
258 default:
259 break;
260 }
261 }
262}
263
264static void exec_mov(struct CPUX86State *env, struct x86_decode *decode)
265{
266 fetch_operands(env, decode, 2, false, true, false);
267 write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
268 decode->operand_size);
269
270 RIP(env) += decode->len;
271}
272
273static void exec_add(struct CPUX86State *env, struct x86_decode *decode)
274{
275 EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
276 RIP(env) += decode->len;
277}
278
279static void exec_or(struct CPUX86State *env, struct x86_decode *decode)
280{
281 EXEC_2OP_FLAGS_CMD(env, decode, |, SET_FLAGS_OSZAPC_LOGIC, true);
282 RIP(env) += decode->len;
283}
284
285static void exec_adc(struct CPUX86State *env, struct x86_decode *decode)
286{
287 EXEC_2OP_FLAGS_CMD(env, decode, +get_CF(env)+, SET_FLAGS_OSZAPC_ADD, true);
288 RIP(env) += decode->len;
289}
290
291static void exec_sbb(struct CPUX86State *env, struct x86_decode *decode)
292{
293 EXEC_2OP_FLAGS_CMD(env, decode, -get_CF(env)-, SET_FLAGS_OSZAPC_SUB, true);
294 RIP(env) += decode->len;
295}
296
297static void exec_and(struct CPUX86State *env, struct x86_decode *decode)
298{
299 EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, true);
300 RIP(env) += decode->len;
301}
302
303static void exec_sub(struct CPUX86State *env, struct x86_decode *decode)
304{
305 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, true);
306 RIP(env) += decode->len;
307}
308
309static void exec_xor(struct CPUX86State *env, struct x86_decode *decode)
310{
311 EXEC_2OP_FLAGS_CMD(env, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true);
312 RIP(env) += decode->len;
313}
314
315static void exec_neg(struct CPUX86State *env, struct x86_decode *decode)
316{
317
318 int32_t val;
319 fetch_operands(env, decode, 2, true, true, false);
320
321 val = 0 - sign(decode->op[1].val, decode->operand_size);
322 write_val_ext(env, decode->op[1].ptr, val, decode->operand_size);
323
324 if (4 == decode->operand_size) {
325 SET_FLAGS_OSZAPC_SUB32(env, 0, 0 - val, val);
326 } else if (2 == decode->operand_size) {
327 SET_FLAGS_OSZAPC_SUB16(env, 0, 0 - val, val);
328 } else if (1 == decode->operand_size) {
329 SET_FLAGS_OSZAPC_SUB8(env, 0, 0 - val, val);
330 } else {
331 VM_PANIC("bad op size\n");
332 }
333
334
335 RIP(env) += decode->len;
336}
337
338static void exec_cmp(struct CPUX86State *env, struct x86_decode *decode)
339{
340 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
341 RIP(env) += decode->len;
342}
343
344static void exec_inc(struct CPUX86State *env, struct x86_decode *decode)
345{
346 decode->op[1].type = X86_VAR_IMMEDIATE;
347 decode->op[1].val = 0;
348
349 EXEC_2OP_FLAGS_CMD(env, decode, +1+, SET_FLAGS_OSZAP_ADD, true);
350
351 RIP(env) += decode->len;
352}
353
354static void exec_dec(struct CPUX86State *env, struct x86_decode *decode)
355{
356 decode->op[1].type = X86_VAR_IMMEDIATE;
357 decode->op[1].val = 0;
358
359 EXEC_2OP_FLAGS_CMD(env, decode, -1-, SET_FLAGS_OSZAP_SUB, true);
360 RIP(env) += decode->len;
361}
362
363static void exec_tst(struct CPUX86State *env, struct x86_decode *decode)
364{
365 EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, false);
366 RIP(env) += decode->len;
367}
368
369static void exec_not(struct CPUX86State *env, struct x86_decode *decode)
370{
371 fetch_operands(env, decode, 1, true, false, false);
372
373 write_val_ext(env, decode->op[0].ptr, ~decode->op[0].val,
374 decode->operand_size);
375 RIP(env) += decode->len;
376}
377
378void exec_movzx(struct CPUX86State *env, struct x86_decode *decode)
379{
380 int src_op_size;
381 int op_size = decode->operand_size;
382
383 fetch_operands(env, decode, 1, false, false, false);
384
385 if (0xb6 == decode->opcode[1]) {
386 src_op_size = 1;
387 } else {
388 src_op_size = 2;
389 }
390 decode->operand_size = src_op_size;
391 calc_modrm_operand(env, decode, &decode->op[1]);
392 decode->op[1].val = read_val_ext(env, decode->op[1].ptr, src_op_size);
393 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
394
395 RIP(env) += decode->len;
396}
397
398static void exec_out(struct CPUX86State *env, struct x86_decode *decode)
399{
400 switch (decode->opcode[0]) {
401 case 0xe6:
402 hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &AL(env), 1, 1, 1);
403 break;
404 case 0xe7:
405 hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &RAX(env), 1,
406 decode->operand_size, 1);
407 break;
408 case 0xee:
409 hvf_handle_io(ENV_GET_CPU(env), DX(env), &AL(env), 1, 1, 1);
410 break;
411 case 0xef:
412 hvf_handle_io(ENV_GET_CPU(env), DX(env), &RAX(env), 1, decode->operand_size, 1);
413 break;
414 default:
415 VM_PANIC("Bad out opcode\n");
416 break;
417 }
418 RIP(env) += decode->len;
419}
420
421static void exec_in(struct CPUX86State *env, struct x86_decode *decode)
422{
423 target_ulong val = 0;
424 switch (decode->opcode[0]) {
425 case 0xe4:
426 hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &AL(env), 0, 1, 1);
427 break;
428 case 0xe5:
429 hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &val, 0, decode->operand_size, 1);
430 if (decode->operand_size == 2) {
431 AX(env) = val;
432 } else {
433 RAX(env) = (uint32_t)val;
434 }
435 break;
436 case 0xec:
437 hvf_handle_io(ENV_GET_CPU(env), DX(env), &AL(env), 0, 1, 1);
438 break;
439 case 0xed:
440 hvf_handle_io(ENV_GET_CPU(env), DX(env), &val, 0, decode->operand_size, 1);
441 if (decode->operand_size == 2) {
442 AX(env) = val;
443 } else {
444 RAX(env) = (uint32_t)val;
445 }
446
447 break;
448 default:
449 VM_PANIC("Bad in opcode\n");
450 break;
451 }
452
453 RIP(env) += decode->len;
454}
455
456static inline void string_increment_reg(struct CPUX86State *env, int reg,
457 struct x86_decode *decode)
458{
459 target_ulong val = read_reg(env, reg, decode->addressing_size);
460 if (env->hvf_emul->rflags.df) {
461 val -= decode->operand_size;
462 } else {
463 val += decode->operand_size;
464 }
465 write_reg(env, reg, val, decode->addressing_size);
466}
467
468static inline void string_rep(struct CPUX86State *env, struct x86_decode *decode,
469 void (*func)(struct CPUX86State *env,
470 struct x86_decode *ins), int rep)
471{
472 target_ulong rcx = read_reg(env, R_ECX, decode->addressing_size);
473 while (rcx--) {
474 func(env, decode);
475 write_reg(env, R_ECX, rcx, decode->addressing_size);
476 if ((PREFIX_REP == rep) && !get_ZF(env)) {
477 break;
478 }
479 if ((PREFIX_REPN == rep) && get_ZF(env)) {
480 break;
481 }
482 }
483}
484
485static void exec_ins_single(struct CPUX86State *env, struct x86_decode *decode)
486{
487 target_ulong addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size,
488 R_ES);
489
490 hvf_handle_io(ENV_GET_CPU(env), DX(env), env->hvf_emul->mmio_buf, 0,
491 decode->operand_size, 1);
492 vmx_write_mem(ENV_GET_CPU(env), addr, env->hvf_emul->mmio_buf, decode->operand_size);
493
494 string_increment_reg(env, R_EDI, decode);
495}
496
497static void exec_ins(struct CPUX86State *env, struct x86_decode *decode)
498{
499 if (decode->rep) {
500 string_rep(env, decode, exec_ins_single, 0);
501 } else {
502 exec_ins_single(env, decode);
503 }
504
505 RIP(env) += decode->len;
506}
507
508static void exec_outs_single(struct CPUX86State *env, struct x86_decode *decode)
509{
510 target_ulong addr = decode_linear_addr(env, decode, RSI(env), R_DS);
511
512 vmx_read_mem(ENV_GET_CPU(env), env->hvf_emul->mmio_buf, addr, decode->operand_size);
513 hvf_handle_io(ENV_GET_CPU(env), DX(env), env->hvf_emul->mmio_buf, 1,
514 decode->operand_size, 1);
515
516 string_increment_reg(env, R_ESI, decode);
517}
518
519static void exec_outs(struct CPUX86State *env, struct x86_decode *decode)
520{
521 if (decode->rep) {
522 string_rep(env, decode, exec_outs_single, 0);
523 } else {
524 exec_outs_single(env, decode);
525 }
526
527 RIP(env) += decode->len;
528}
529
530static void exec_movs_single(struct CPUX86State *env, struct x86_decode *decode)
531{
532 target_ulong src_addr;
533 target_ulong dst_addr;
534 target_ulong val;
535
536 src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
537 dst_addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size,
538 R_ES);
539
540 val = read_val_ext(env, src_addr, decode->operand_size);
541 write_val_ext(env, dst_addr, val, decode->operand_size);
542
543 string_increment_reg(env, R_ESI, decode);
544 string_increment_reg(env, R_EDI, decode);
545}
546
547static void exec_movs(struct CPUX86State *env, struct x86_decode *decode)
548{
549 if (decode->rep) {
550 string_rep(env, decode, exec_movs_single, 0);
551 } else {
552 exec_movs_single(env, decode);
553 }
554
555 RIP(env) += decode->len;
556}
557
558static void exec_cmps_single(struct CPUX86State *env, struct x86_decode *decode)
559{
560 target_ulong src_addr;
561 target_ulong dst_addr;
562
563 src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
564 dst_addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size,
565 R_ES);
566
567 decode->op[0].type = X86_VAR_IMMEDIATE;
568 decode->op[0].val = read_val_ext(env, src_addr, decode->operand_size);
569 decode->op[1].type = X86_VAR_IMMEDIATE;
570 decode->op[1].val = read_val_ext(env, dst_addr, decode->operand_size);
571
572 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
573
574 string_increment_reg(env, R_ESI, decode);
575 string_increment_reg(env, R_EDI, decode);
576}
577
578static void exec_cmps(struct CPUX86State *env, struct x86_decode *decode)
579{
580 if (decode->rep) {
581 string_rep(env, decode, exec_cmps_single, decode->rep);
582 } else {
583 exec_cmps_single(env, decode);
584 }
585 RIP(env) += decode->len;
586}
587
588
589static void exec_stos_single(struct CPUX86State *env, struct x86_decode *decode)
590{
591 target_ulong addr;
592 target_ulong val;
593
594 addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size, R_ES);
595 val = read_reg(env, R_EAX, decode->operand_size);
596 vmx_write_mem(ENV_GET_CPU(env), addr, &val, decode->operand_size);
597
598 string_increment_reg(env, R_EDI, decode);
599}
600
601
602static void exec_stos(struct CPUX86State *env, struct x86_decode *decode)
603{
604 if (decode->rep) {
605 string_rep(env, decode, exec_stos_single, 0);
606 } else {
607 exec_stos_single(env, decode);
608 }
609
610 RIP(env) += decode->len;
611}
612
613static void exec_scas_single(struct CPUX86State *env, struct x86_decode *decode)
614{
615 target_ulong addr;
616
617 addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size, R_ES);
618 decode->op[1].type = X86_VAR_IMMEDIATE;
619 vmx_read_mem(ENV_GET_CPU(env), &decode->op[1].val, addr, decode->operand_size);
620
621 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
622 string_increment_reg(env, R_EDI, decode);
623}
624
625static void exec_scas(struct CPUX86State *env, struct x86_decode *decode)
626{
627 decode->op[0].type = X86_VAR_REG;
628 decode->op[0].reg = R_EAX;
629 if (decode->rep) {
630 string_rep(env, decode, exec_scas_single, decode->rep);
631 } else {
632 exec_scas_single(env, decode);
633 }
634
635 RIP(env) += decode->len;
636}
637
638static void exec_lods_single(struct CPUX86State *env, struct x86_decode *decode)
639{
640 target_ulong addr;
641 target_ulong val = 0;
642
643 addr = decode_linear_addr(env, decode, RSI(env), R_DS);
644 vmx_read_mem(ENV_GET_CPU(env), &val, addr, decode->operand_size);
645 write_reg(env, R_EAX, val, decode->operand_size);
646
647 string_increment_reg(env, R_ESI, decode);
648}
649
650static void exec_lods(struct CPUX86State *env, struct x86_decode *decode)
651{
652 if (decode->rep) {
653 string_rep(env, decode, exec_lods_single, 0);
654 } else {
655 exec_lods_single(env, decode);
656 }
657
658 RIP(env) += decode->len;
659}
660
661#define MSR_IA32_UCODE_REV 0x00000017
662
663void simulate_rdmsr(struct CPUState *cpu)
664{
665 X86CPU *x86_cpu = X86_CPU(cpu);
666 CPUX86State *env = &x86_cpu->env;
667 uint32_t msr = ECX(env);
668 uint64_t val = 0;
669
670 switch (msr) {
671 case MSR_IA32_TSC:
672 val = rdtscp() + rvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET);
673 break;
674 case MSR_IA32_APICBASE:
675 val = cpu_get_apic_base(X86_CPU(cpu)->apic_state);
676 break;
677 case MSR_IA32_UCODE_REV:
678 val = (0x100000000ULL << 32) | 0x100000000ULL;
679 break;
680 case MSR_EFER:
681 val = rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER);
682 break;
683 case MSR_FSBASE:
684 val = rvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE);
685 break;
686 case MSR_GSBASE:
687 val = rvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE);
688 break;
689 case MSR_KERNELGSBASE:
690 val = rvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE);
691 break;
692 case MSR_STAR:
693 abort();
694 break;
695 case MSR_LSTAR:
696 abort();
697 break;
698 case MSR_CSTAR:
699 abort();
700 break;
701 case MSR_IA32_MISC_ENABLE:
702 val = env->msr_ia32_misc_enable;
703 break;
704 case MSR_MTRRphysBase(0):
705 case MSR_MTRRphysBase(1):
706 case MSR_MTRRphysBase(2):
707 case MSR_MTRRphysBase(3):
708 case MSR_MTRRphysBase(4):
709 case MSR_MTRRphysBase(5):
710 case MSR_MTRRphysBase(6):
711 case MSR_MTRRphysBase(7):
712 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base;
713 break;
714 case MSR_MTRRphysMask(0):
715 case MSR_MTRRphysMask(1):
716 case MSR_MTRRphysMask(2):
717 case MSR_MTRRphysMask(3):
718 case MSR_MTRRphysMask(4):
719 case MSR_MTRRphysMask(5):
720 case MSR_MTRRphysMask(6):
721 case MSR_MTRRphysMask(7):
722 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask;
723 break;
724 case MSR_MTRRfix64K_00000:
725 val = env->mtrr_fixed[0];
726 break;
727 case MSR_MTRRfix16K_80000:
728 case MSR_MTRRfix16K_A0000:
729 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1];
730 break;
731 case MSR_MTRRfix4K_C0000:
732 case MSR_MTRRfix4K_C8000:
733 case MSR_MTRRfix4K_D0000:
734 case MSR_MTRRfix4K_D8000:
735 case MSR_MTRRfix4K_E0000:
736 case MSR_MTRRfix4K_E8000:
737 case MSR_MTRRfix4K_F0000:
738 case MSR_MTRRfix4K_F8000:
739 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3];
740 break;
741 case MSR_MTRRdefType:
742 val = env->mtrr_deftype;
743 break;
744 default:
745
746 val = 0;
747 break;
748 }
749
750 RAX(env) = (uint32_t)val;
751 RDX(env) = (uint32_t)(val >> 32);
752}
753
754static void exec_rdmsr(struct CPUX86State *env, struct x86_decode *decode)
755{
756 simulate_rdmsr(ENV_GET_CPU(env));
757 RIP(env) += decode->len;
758}
759
760void simulate_wrmsr(struct CPUState *cpu)
761{
762 X86CPU *x86_cpu = X86_CPU(cpu);
763 CPUX86State *env = &x86_cpu->env;
764 uint32_t msr = ECX(env);
765 uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env);
766
767 switch (msr) {
768 case MSR_IA32_TSC:
769
770
771
772 break;
773 case MSR_IA32_APICBASE:
774 cpu_set_apic_base(X86_CPU(cpu)->apic_state, data);
775 break;
776 case MSR_FSBASE:
777 wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, data);
778 break;
779 case MSR_GSBASE:
780 wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, data);
781 break;
782 case MSR_KERNELGSBASE:
783 wvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE, data);
784 break;
785 case MSR_STAR:
786 abort();
787 break;
788 case MSR_LSTAR:
789 abort();
790 break;
791 case MSR_CSTAR:
792 abort();
793 break;
794 case MSR_EFER:
795
796 wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, data);
797 if (data & MSR_EFER_NXE) {
798 hv_vcpu_invalidate_tlb(cpu->hvf_fd);
799 }
800 break;
801 case MSR_MTRRphysBase(0):
802 case MSR_MTRRphysBase(1):
803 case MSR_MTRRphysBase(2):
804 case MSR_MTRRphysBase(3):
805 case MSR_MTRRphysBase(4):
806 case MSR_MTRRphysBase(5):
807 case MSR_MTRRphysBase(6):
808 case MSR_MTRRphysBase(7):
809 env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data;
810 break;
811 case MSR_MTRRphysMask(0):
812 case MSR_MTRRphysMask(1):
813 case MSR_MTRRphysMask(2):
814 case MSR_MTRRphysMask(3):
815 case MSR_MTRRphysMask(4):
816 case MSR_MTRRphysMask(5):
817 case MSR_MTRRphysMask(6):
818 case MSR_MTRRphysMask(7):
819 env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data;
820 break;
821 case MSR_MTRRfix64K_00000:
822 env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data;
823 break;
824 case MSR_MTRRfix16K_80000:
825 case MSR_MTRRfix16K_A0000:
826 env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data;
827 break;
828 case MSR_MTRRfix4K_C0000:
829 case MSR_MTRRfix4K_C8000:
830 case MSR_MTRRfix4K_D0000:
831 case MSR_MTRRfix4K_D8000:
832 case MSR_MTRRfix4K_E0000:
833 case MSR_MTRRfix4K_E8000:
834 case MSR_MTRRfix4K_F0000:
835 case MSR_MTRRfix4K_F8000:
836 env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data;
837 break;
838 case MSR_MTRRdefType:
839 env->mtrr_deftype = data;
840 break;
841 default:
842 break;
843 }
844
845
846
847
848
849
850}
851
852static void exec_wrmsr(struct CPUX86State *env, struct x86_decode *decode)
853{
854 simulate_wrmsr(ENV_GET_CPU(env));
855 RIP(env) += decode->len;
856}
857
858
859
860
861
862static void do_bt(struct CPUX86State *env, struct x86_decode *decode, int flag)
863{
864 int32_t displacement;
865 uint8_t index;
866 bool cf;
867 int mask = (4 == decode->operand_size) ? 0x1f : 0xf;
868
869 VM_PANIC_ON(decode->rex.rex);
870
871 fetch_operands(env, decode, 2, false, true, false);
872 index = decode->op[1].val & mask;
873
874 if (decode->op[0].type != X86_VAR_REG) {
875 if (4 == decode->operand_size) {
876 displacement = ((int32_t) (decode->op[1].val & 0xffffffe0)) / 32;
877 decode->op[0].ptr += 4 * displacement;
878 } else if (2 == decode->operand_size) {
879 displacement = ((int16_t) (decode->op[1].val & 0xfff0)) / 16;
880 decode->op[0].ptr += 2 * displacement;
881 } else {
882 VM_PANIC("bt 64bit\n");
883 }
884 }
885 decode->op[0].val = read_val_ext(env, decode->op[0].ptr,
886 decode->operand_size);
887 cf = (decode->op[0].val >> index) & 0x01;
888
889 switch (flag) {
890 case 0:
891 set_CF(env, cf);
892 return;
893 case 1:
894 decode->op[0].val ^= (1u << index);
895 break;
896 case 2:
897 decode->op[0].val |= (1u << index);
898 break;
899 case 3:
900 decode->op[0].val &= ~(1u << index);
901 break;
902 }
903 write_val_ext(env, decode->op[0].ptr, decode->op[0].val,
904 decode->operand_size);
905 set_CF(env, cf);
906}
907
908static void exec_bt(struct CPUX86State *env, struct x86_decode *decode)
909{
910 do_bt(env, decode, 0);
911 RIP(env) += decode->len;
912}
913
914static void exec_btc(struct CPUX86State *env, struct x86_decode *decode)
915{
916 do_bt(env, decode, 1);
917 RIP(env) += decode->len;
918}
919
920static void exec_btr(struct CPUX86State *env, struct x86_decode *decode)
921{
922 do_bt(env, decode, 3);
923 RIP(env) += decode->len;
924}
925
926static void exec_bts(struct CPUX86State *env, struct x86_decode *decode)
927{
928 do_bt(env, decode, 2);
929 RIP(env) += decode->len;
930}
931
932void exec_shl(struct CPUX86State *env, struct x86_decode *decode)
933{
934 uint8_t count;
935 int of = 0, cf = 0;
936
937 fetch_operands(env, decode, 2, true, true, false);
938
939 count = decode->op[1].val;
940 count &= 0x1f;
941 if (!count) {
942 goto exit;
943 }
944
945 switch (decode->operand_size) {
946 case 1:
947 {
948 uint8_t res = 0;
949 if (count <= 8) {
950 res = (decode->op[0].val << count);
951 cf = (decode->op[0].val >> (8 - count)) & 0x1;
952 of = cf ^ (res >> 7);
953 }
954
955 write_val_ext(env, decode->op[0].ptr, res, 1);
956 SET_FLAGS_OSZAPC_LOGIC8(env, 0, 0, res);
957 SET_FLAGS_OxxxxC(env, of, cf);
958 break;
959 }
960 case 2:
961 {
962 uint16_t res = 0;
963
964
965 if (count <= 16) {
966 res = (decode->op[0].val << count);
967 cf = (decode->op[0].val >> (16 - count)) & 0x1;
968 of = cf ^ (res >> 15);
969 }
970
971 write_val_ext(env, decode->op[0].ptr, res, 2);
972 SET_FLAGS_OSZAPC_LOGIC16(env, 0, 0, res);
973 SET_FLAGS_OxxxxC(env, of, cf);
974 break;
975 }
976 case 4:
977 {
978 uint32_t res = decode->op[0].val << count;
979
980 write_val_ext(env, decode->op[0].ptr, res, 4);
981 SET_FLAGS_OSZAPC_LOGIC32(env, 0, 0, res);
982 cf = (decode->op[0].val >> (32 - count)) & 0x1;
983 of = cf ^ (res >> 31);
984 SET_FLAGS_OxxxxC(env, of, cf);
985 break;
986 }
987 default:
988 abort();
989 }
990
991exit:
992
993 RIP(env) += decode->len;
994}
995
996void exec_movsx(CPUX86State *env, struct x86_decode *decode)
997{
998 int src_op_size;
999 int op_size = decode->operand_size;
1000
1001 fetch_operands(env, decode, 2, false, false, false);
1002
1003 if (0xbe == decode->opcode[1]) {
1004 src_op_size = 1;
1005 } else {
1006 src_op_size = 2;
1007 }
1008
1009 decode->operand_size = src_op_size;
1010 calc_modrm_operand(env, decode, &decode->op[1]);
1011 decode->op[1].val = sign(read_val_ext(env, decode->op[1].ptr, src_op_size),
1012 src_op_size);
1013
1014 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
1015
1016 RIP(env) += decode->len;
1017}
1018
1019void exec_ror(struct CPUX86State *env, struct x86_decode *decode)
1020{
1021 uint8_t count;
1022
1023 fetch_operands(env, decode, 2, true, true, false);
1024 count = decode->op[1].val;
1025
1026 switch (decode->operand_size) {
1027 case 1:
1028 {
1029 uint32_t bit6, bit7;
1030 uint8_t res;
1031
1032 if ((count & 0x07) == 0) {
1033 if (count & 0x18) {
1034 bit6 = ((uint8_t)decode->op[0].val >> 6) & 1;
1035 bit7 = ((uint8_t)decode->op[0].val >> 7) & 1;
1036 SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
1037 }
1038 } else {
1039 count &= 0x7;
1040 res = ((uint8_t)decode->op[0].val >> count) |
1041 ((uint8_t)decode->op[0].val << (8 - count));
1042 write_val_ext(env, decode->op[0].ptr, res, 1);
1043 bit6 = (res >> 6) & 1;
1044 bit7 = (res >> 7) & 1;
1045
1046 SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
1047 }
1048 break;
1049 }
1050 case 2:
1051 {
1052 uint32_t bit14, bit15;
1053 uint16_t res;
1054
1055 if ((count & 0x0f) == 0) {
1056 if (count & 0x10) {
1057 bit14 = ((uint16_t)decode->op[0].val >> 14) & 1;
1058 bit15 = ((uint16_t)decode->op[0].val >> 15) & 1;
1059
1060 SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
1061 }
1062 } else {
1063 count &= 0x0f;
1064 res = ((uint16_t)decode->op[0].val >> count) |
1065 ((uint16_t)decode->op[0].val << (16 - count));
1066 write_val_ext(env, decode->op[0].ptr, res, 2);
1067
1068 bit14 = (res >> 14) & 1;
1069 bit15 = (res >> 15) & 1;
1070
1071 SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
1072 }
1073 break;
1074 }
1075 case 4:
1076 {
1077 uint32_t bit31, bit30;
1078 uint32_t res;
1079
1080 count &= 0x1f;
1081 if (count) {
1082 res = ((uint32_t)decode->op[0].val >> count) |
1083 ((uint32_t)decode->op[0].val << (32 - count));
1084 write_val_ext(env, decode->op[0].ptr, res, 4);
1085
1086 bit31 = (res >> 31) & 1;
1087 bit30 = (res >> 30) & 1;
1088
1089 SET_FLAGS_OxxxxC(env, bit30 ^ bit31, bit31);
1090 }
1091 break;
1092 }
1093 }
1094 RIP(env) += decode->len;
1095}
1096
1097void exec_rol(struct CPUX86State *env, struct x86_decode *decode)
1098{
1099 uint8_t count;
1100
1101 fetch_operands(env, decode, 2, true, true, false);
1102 count = decode->op[1].val;
1103
1104 switch (decode->operand_size) {
1105 case 1:
1106 {
1107 uint32_t bit0, bit7;
1108 uint8_t res;
1109
1110 if ((count & 0x07) == 0) {
1111 if (count & 0x18) {
1112 bit0 = ((uint8_t)decode->op[0].val & 1);
1113 bit7 = ((uint8_t)decode->op[0].val >> 7);
1114 SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
1115 }
1116 } else {
1117 count &= 0x7;
1118 res = ((uint8_t)decode->op[0].val << count) |
1119 ((uint8_t)decode->op[0].val >> (8 - count));
1120
1121 write_val_ext(env, decode->op[0].ptr, res, 1);
1122
1123
1124
1125 bit0 = (res & 1);
1126 bit7 = (res >> 7);
1127 SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
1128 }
1129 break;
1130 }
1131 case 2:
1132 {
1133 uint32_t bit0, bit15;
1134 uint16_t res;
1135
1136 if ((count & 0x0f) == 0) {
1137 if (count & 0x10) {
1138 bit0 = ((uint16_t)decode->op[0].val & 0x1);
1139 bit15 = ((uint16_t)decode->op[0].val >> 15);
1140
1141 SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
1142 }
1143 } else {
1144 count &= 0x0f;
1145 res = ((uint16_t)decode->op[0].val << count) |
1146 ((uint16_t)decode->op[0].val >> (16 - count));
1147
1148 write_val_ext(env, decode->op[0].ptr, res, 2);
1149 bit0 = (res & 0x1);
1150 bit15 = (res >> 15);
1151
1152 SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
1153 }
1154 break;
1155 }
1156 case 4:
1157 {
1158 uint32_t bit0, bit31;
1159 uint32_t res;
1160
1161 count &= 0x1f;
1162 if (count) {
1163 res = ((uint32_t)decode->op[0].val << count) |
1164 ((uint32_t)decode->op[0].val >> (32 - count));
1165
1166 write_val_ext(env, decode->op[0].ptr, res, 4);
1167 bit0 = (res & 0x1);
1168 bit31 = (res >> 31);
1169
1170 SET_FLAGS_OxxxxC(env, bit0 ^ bit31, bit0);
1171 }
1172 break;
1173 }
1174 }
1175 RIP(env) += decode->len;
1176}
1177
1178
1179void exec_rcl(struct CPUX86State *env, struct x86_decode *decode)
1180{
1181 uint8_t count;
1182 int of = 0, cf = 0;
1183
1184 fetch_operands(env, decode, 2, true, true, false);
1185 count = decode->op[1].val & 0x1f;
1186
1187 switch (decode->operand_size) {
1188 case 1:
1189 {
1190 uint8_t op1_8 = decode->op[0].val;
1191 uint8_t res;
1192 count %= 9;
1193 if (!count) {
1194 break;
1195 }
1196
1197 if (1 == count) {
1198 res = (op1_8 << 1) | get_CF(env);
1199 } else {
1200 res = (op1_8 << count) | (get_CF(env) << (count - 1)) |
1201 (op1_8 >> (9 - count));
1202 }
1203
1204 write_val_ext(env, decode->op[0].ptr, res, 1);
1205
1206 cf = (op1_8 >> (8 - count)) & 0x01;
1207 of = cf ^ (res >> 7);
1208 SET_FLAGS_OxxxxC(env, of, cf);
1209 break;
1210 }
1211 case 2:
1212 {
1213 uint16_t res;
1214 uint16_t op1_16 = decode->op[0].val;
1215
1216 count %= 17;
1217 if (!count) {
1218 break;
1219 }
1220
1221 if (1 == count) {
1222 res = (op1_16 << 1) | get_CF(env);
1223 } else if (count == 16) {
1224 res = (get_CF(env) << 15) | (op1_16 >> 1);
1225 } else {
1226 res = (op1_16 << count) | (get_CF(env) << (count - 1)) |
1227 (op1_16 >> (17 - count));
1228 }
1229
1230 write_val_ext(env, decode->op[0].ptr, res, 2);
1231
1232 cf = (op1_16 >> (16 - count)) & 0x1;
1233 of = cf ^ (res >> 15);
1234 SET_FLAGS_OxxxxC(env, of, cf);
1235 break;
1236 }
1237 case 4:
1238 {
1239 uint32_t res;
1240 uint32_t op1_32 = decode->op[0].val;
1241
1242 if (!count) {
1243 break;
1244 }
1245
1246 if (1 == count) {
1247 res = (op1_32 << 1) | get_CF(env);
1248 } else {
1249 res = (op1_32 << count) | (get_CF(env) << (count - 1)) |
1250 (op1_32 >> (33 - count));
1251 }
1252
1253 write_val_ext(env, decode->op[0].ptr, res, 4);
1254
1255 cf = (op1_32 >> (32 - count)) & 0x1;
1256 of = cf ^ (res >> 31);
1257 SET_FLAGS_OxxxxC(env, of, cf);
1258 break;
1259 }
1260 }
1261 RIP(env) += decode->len;
1262}
1263
1264void exec_rcr(struct CPUX86State *env, struct x86_decode *decode)
1265{
1266 uint8_t count;
1267 int of = 0, cf = 0;
1268
1269 fetch_operands(env, decode, 2, true, true, false);
1270 count = decode->op[1].val & 0x1f;
1271
1272 switch (decode->operand_size) {
1273 case 1:
1274 {
1275 uint8_t op1_8 = decode->op[0].val;
1276 uint8_t res;
1277
1278 count %= 9;
1279 if (!count) {
1280 break;
1281 }
1282 res = (op1_8 >> count) | (get_CF(env) << (8 - count)) |
1283 (op1_8 << (9 - count));
1284
1285 write_val_ext(env, decode->op[0].ptr, res, 1);
1286
1287 cf = (op1_8 >> (count - 1)) & 0x1;
1288 of = (((res << 1) ^ res) >> 7) & 0x1;
1289 SET_FLAGS_OxxxxC(env, of, cf);
1290 break;
1291 }
1292 case 2:
1293 {
1294 uint16_t op1_16 = decode->op[0].val;
1295 uint16_t res;
1296
1297 count %= 17;
1298 if (!count) {
1299 break;
1300 }
1301 res = (op1_16 >> count) | (get_CF(env) << (16 - count)) |
1302 (op1_16 << (17 - count));
1303
1304 write_val_ext(env, decode->op[0].ptr, res, 2);
1305
1306 cf = (op1_16 >> (count - 1)) & 0x1;
1307 of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1;
1308
1309 SET_FLAGS_OxxxxC(env, of, cf);
1310 break;
1311 }
1312 case 4:
1313 {
1314 uint32_t res;
1315 uint32_t op1_32 = decode->op[0].val;
1316
1317 if (!count) {
1318 break;
1319 }
1320
1321 if (1 == count) {
1322 res = (op1_32 >> 1) | (get_CF(env) << 31);
1323 } else {
1324 res = (op1_32 >> count) | (get_CF(env) << (32 - count)) |
1325 (op1_32 << (33 - count));
1326 }
1327
1328 write_val_ext(env, decode->op[0].ptr, res, 4);
1329
1330 cf = (op1_32 >> (count - 1)) & 0x1;
1331 of = ((res << 1) ^ res) >> 31;
1332 SET_FLAGS_OxxxxC(env, of, cf);
1333 break;
1334 }
1335 }
1336 RIP(env) += decode->len;
1337}
1338
1339static void exec_xchg(struct CPUX86State *env, struct x86_decode *decode)
1340{
1341 fetch_operands(env, decode, 2, true, true, false);
1342
1343 write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
1344 decode->operand_size);
1345 write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
1346 decode->operand_size);
1347
1348 RIP(env) += decode->len;
1349}
1350
1351static void exec_xadd(struct CPUX86State *env, struct x86_decode *decode)
1352{
1353 EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
1354 write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
1355 decode->operand_size);
1356
1357 RIP(env) += decode->len;
1358}
1359
1360static struct cmd_handler {
1361 enum x86_decode_cmd cmd;
1362 void (*handler)(struct CPUX86State *env, struct x86_decode *ins);
1363} handlers[] = {
1364 {X86_DECODE_CMD_INVL, NULL,},
1365 {X86_DECODE_CMD_MOV, exec_mov},
1366 {X86_DECODE_CMD_ADD, exec_add},
1367 {X86_DECODE_CMD_OR, exec_or},
1368 {X86_DECODE_CMD_ADC, exec_adc},
1369 {X86_DECODE_CMD_SBB, exec_sbb},
1370 {X86_DECODE_CMD_AND, exec_and},
1371 {X86_DECODE_CMD_SUB, exec_sub},
1372 {X86_DECODE_CMD_NEG, exec_neg},
1373 {X86_DECODE_CMD_XOR, exec_xor},
1374 {X86_DECODE_CMD_CMP, exec_cmp},
1375 {X86_DECODE_CMD_INC, exec_inc},
1376 {X86_DECODE_CMD_DEC, exec_dec},
1377 {X86_DECODE_CMD_TST, exec_tst},
1378 {X86_DECODE_CMD_NOT, exec_not},
1379 {X86_DECODE_CMD_MOVZX, exec_movzx},
1380 {X86_DECODE_CMD_OUT, exec_out},
1381 {X86_DECODE_CMD_IN, exec_in},
1382 {X86_DECODE_CMD_INS, exec_ins},
1383 {X86_DECODE_CMD_OUTS, exec_outs},
1384 {X86_DECODE_CMD_RDMSR, exec_rdmsr},
1385 {X86_DECODE_CMD_WRMSR, exec_wrmsr},
1386 {X86_DECODE_CMD_BT, exec_bt},
1387 {X86_DECODE_CMD_BTR, exec_btr},
1388 {X86_DECODE_CMD_BTC, exec_btc},
1389 {X86_DECODE_CMD_BTS, exec_bts},
1390 {X86_DECODE_CMD_SHL, exec_shl},
1391 {X86_DECODE_CMD_ROL, exec_rol},
1392 {X86_DECODE_CMD_ROR, exec_ror},
1393 {X86_DECODE_CMD_RCR, exec_rcr},
1394 {X86_DECODE_CMD_RCL, exec_rcl},
1395
1396 {X86_DECODE_CMD_MOVS, exec_movs},
1397 {X86_DECODE_CMD_CMPS, exec_cmps},
1398 {X86_DECODE_CMD_STOS, exec_stos},
1399 {X86_DECODE_CMD_SCAS, exec_scas},
1400 {X86_DECODE_CMD_LODS, exec_lods},
1401 {X86_DECODE_CMD_MOVSX, exec_movsx},
1402 {X86_DECODE_CMD_XCHG, exec_xchg},
1403 {X86_DECODE_CMD_XADD, exec_xadd},
1404};
1405
1406static struct cmd_handler _cmd_handler[X86_DECODE_CMD_LAST];
1407
1408static void init_cmd_handler()
1409{
1410 int i;
1411 for (i = 0; i < ARRAY_SIZE(handlers); i++) {
1412 _cmd_handler[handlers[i].cmd] = handlers[i];
1413 }
1414}
1415
1416void load_regs(struct CPUState *cpu)
1417{
1418 X86CPU *x86_cpu = X86_CPU(cpu);
1419 CPUX86State *env = &x86_cpu->env;
1420
1421 int i = 0;
1422 RRX(env, R_EAX) = rreg(cpu->hvf_fd, HV_X86_RAX);
1423 RRX(env, R_EBX) = rreg(cpu->hvf_fd, HV_X86_RBX);
1424 RRX(env, R_ECX) = rreg(cpu->hvf_fd, HV_X86_RCX);
1425 RRX(env, R_EDX) = rreg(cpu->hvf_fd, HV_X86_RDX);
1426 RRX(env, R_ESI) = rreg(cpu->hvf_fd, HV_X86_RSI);
1427 RRX(env, R_EDI) = rreg(cpu->hvf_fd, HV_X86_RDI);
1428 RRX(env, R_ESP) = rreg(cpu->hvf_fd, HV_X86_RSP);
1429 RRX(env, R_EBP) = rreg(cpu->hvf_fd, HV_X86_RBP);
1430 for (i = 8; i < 16; i++) {
1431 RRX(env, i) = rreg(cpu->hvf_fd, HV_X86_RAX + i);
1432 }
1433
1434 RFLAGS(env) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
1435 rflags_to_lflags(env);
1436 RIP(env) = rreg(cpu->hvf_fd, HV_X86_RIP);
1437}
1438
1439void store_regs(struct CPUState *cpu)
1440{
1441 X86CPU *x86_cpu = X86_CPU(cpu);
1442 CPUX86State *env = &x86_cpu->env;
1443
1444 int i = 0;
1445 wreg(cpu->hvf_fd, HV_X86_RAX, RAX(env));
1446 wreg(cpu->hvf_fd, HV_X86_RBX, RBX(env));
1447 wreg(cpu->hvf_fd, HV_X86_RCX, RCX(env));
1448 wreg(cpu->hvf_fd, HV_X86_RDX, RDX(env));
1449 wreg(cpu->hvf_fd, HV_X86_RSI, RSI(env));
1450 wreg(cpu->hvf_fd, HV_X86_RDI, RDI(env));
1451 wreg(cpu->hvf_fd, HV_X86_RBP, RBP(env));
1452 wreg(cpu->hvf_fd, HV_X86_RSP, RSP(env));
1453 for (i = 8; i < 16; i++) {
1454 wreg(cpu->hvf_fd, HV_X86_RAX + i, RRX(env, i));
1455 }
1456
1457 lflags_to_rflags(env);
1458 wreg(cpu->hvf_fd, HV_X86_RFLAGS, RFLAGS(env));
1459 macvm_set_rip(cpu, RIP(env));
1460}
1461
1462bool exec_instruction(struct CPUX86State *env, struct x86_decode *ins)
1463{
1464
1465
1466
1467
1468 if (!_cmd_handler[ins->cmd].handler) {
1469 printf("Unimplemented handler (%llx) for %d (%x %x) \n", RIP(env),
1470 ins->cmd, ins->opcode[0],
1471 ins->opcode_len > 1 ? ins->opcode[1] : 0);
1472 RIP(env) += ins->len;
1473 return true;
1474 }
1475
1476 _cmd_handler[ins->cmd].handler(env, ins);
1477 return true;
1478}
1479
1480void init_emu()
1481{
1482 init_cmd_handler();
1483}
1484