1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20
21#include "qemu/host-utils.h"
22#include "cpu.h"
23#include "disas/disas.h"
24#include "exec/exec-all.h"
25#include "tcg/tcg-op.h"
26#include "tcg/tcg-op-gvec.h"
27#include "exec/cpu_ldst.h"
28#include "exec/translator.h"
29#include "fpu/softfloat.h"
30
31#include "exec/helper-proto.h"
32#include "exec/helper-gen.h"
33#include "helper-tcg.h"
34
35#include "exec/log.h"
36
37#define PREFIX_REPZ 0x01
38#define PREFIX_REPNZ 0x02
39#define PREFIX_LOCK 0x04
40#define PREFIX_DATA 0x08
41#define PREFIX_ADR 0x10
42#define PREFIX_VEX 0x20
43#define PREFIX_REX 0x40
44
45#ifdef TARGET_X86_64
46# define ctztl ctz64
47# define clztl clz64
48#else
49# define ctztl ctz32
50# define clztl clz32
51#endif
52
53
54#define CASE_MODRM_MEM_OP(OP) \
55 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
56 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
57 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
58
59#define CASE_MODRM_OP(OP) \
60 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
63 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
64
65
66
67
68static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
69static TCGv cpu_eip;
70static TCGv_i32 cpu_cc_op;
71static TCGv cpu_regs[CPU_NB_REGS];
72static TCGv cpu_seg_base[6];
73static TCGv_i64 cpu_bndl[4];
74static TCGv_i64 cpu_bndu[4];
75
76#include "exec/gen-icount.h"
77
78typedef struct DisasContext {
79 DisasContextBase base;
80
81 target_ulong pc;
82 target_ulong cs_base;
83 target_ulong pc_save;
84
85 MemOp aflag;
86 MemOp dflag;
87
88 int8_t override;
89 uint8_t prefix;
90
91 bool has_modrm;
92 uint8_t modrm;
93
94#ifndef CONFIG_USER_ONLY
95 uint8_t cpl;
96 uint8_t iopl;
97#endif
98 uint8_t vex_l;
99 uint8_t vex_v;
100 uint8_t popl_esp_hack;
101 uint8_t rip_offset;
102
103#ifdef TARGET_X86_64
104 uint8_t rex_r;
105 uint8_t rex_x;
106 uint8_t rex_b;
107#endif
108 bool vex_w;
109 bool jmp_opt;
110 bool repz_opt;
111 bool cc_op_dirty;
112
113 CCOp cc_op;
114 int mem_index;
115 uint32_t flags;
116 int cpuid_features;
117 int cpuid_ext_features;
118 int cpuid_ext2_features;
119 int cpuid_ext3_features;
120 int cpuid_7_0_ebx_features;
121 int cpuid_7_0_ecx_features;
122 int cpuid_xsave_features;
123
124
125 TCGv cc_srcT;
126 TCGv A0;
127 TCGv T0;
128 TCGv T1;
129
130
131 TCGv tmp0;
132 TCGv tmp4;
133 TCGv_i32 tmp2_i32;
134 TCGv_i32 tmp3_i32;
135 TCGv_i64 tmp1_i64;
136
137 sigjmp_buf jmpbuf;
138 TCGOp *prev_insn_end;
139} DisasContext;
140
141#define DISAS_EOB_ONLY DISAS_TARGET_0
142#define DISAS_EOB_NEXT DISAS_TARGET_1
143#define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_2
144#define DISAS_JUMP DISAS_TARGET_3
145
146
147#ifdef CONFIG_USER_ONLY
148#define PE(S) true
149#define CPL(S) 3
150#define IOPL(S) 0
151#define SVME(S) false
152#define GUEST(S) false
153#else
154#define PE(S) (((S)->flags & HF_PE_MASK) != 0)
155#define CPL(S) ((S)->cpl)
156#define IOPL(S) ((S)->iopl)
157#define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
158#define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
159#endif
160#if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
161#define VM86(S) false
162#define CODE32(S) true
163#define SS32(S) true
164#define ADDSEG(S) false
165#else
166#define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
167#define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
168#define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
169#define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
170#endif
171#if !defined(TARGET_X86_64)
172#define CODE64(S) false
173#define LMA(S) false
174#elif defined(CONFIG_USER_ONLY)
175#define CODE64(S) true
176#define LMA(S) true
177#else
178#define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
179#define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
180#endif
181
182#ifdef TARGET_X86_64
183#define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
184#define REX_W(S) ((S)->vex_w)
185#define REX_R(S) ((S)->rex_r + 0)
186#define REX_X(S) ((S)->rex_x + 0)
187#define REX_B(S) ((S)->rex_b + 0)
188#else
189#define REX_PREFIX(S) false
190#define REX_W(S) false
191#define REX_R(S) 0
192#define REX_X(S) 0
193#define REX_B(S) 0
194#endif
195
196
197
198
199
200
201#define STUB_HELPER(NAME, ...) \
202 static inline void gen_helper_##NAME(__VA_ARGS__) \
203 { qemu_build_not_reached(); }
204
205#ifdef CONFIG_USER_ONLY
206STUB_HELPER(clgi, TCGv_env env)
207STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
208STUB_HELPER(hlt, TCGv_env env, TCGv_i32 pc_ofs)
209STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
210STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
211STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
212STUB_HELPER(monitor, TCGv_env env, TCGv addr)
213STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
214STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
215STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
216STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
217STUB_HELPER(rdmsr, TCGv_env env)
218STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg)
219STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg)
220STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val)
221STUB_HELPER(stgi, TCGv_env env)
222STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
223STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
224STUB_HELPER(vmmcall, TCGv_env env)
225STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
226STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
227STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
228STUB_HELPER(wrmsr, TCGv_env env)
229#endif
230
231static void gen_eob(DisasContext *s);
232static void gen_jr(DisasContext *s);
233static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
234static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
235static void gen_op(DisasContext *s1, int op, MemOp ot, int d);
236static void gen_exception_gpf(DisasContext *s);
237
238
239enum {
240 OP_ADDL,
241 OP_ORL,
242 OP_ADCL,
243 OP_SBBL,
244 OP_ANDL,
245 OP_SUBL,
246 OP_XORL,
247 OP_CMPL,
248};
249
250
251enum {
252 OP_ROL,
253 OP_ROR,
254 OP_RCL,
255 OP_RCR,
256 OP_SHL,
257 OP_SHR,
258 OP_SHL1,
259 OP_SAR = 7,
260};
261
262enum {
263 JCC_O,
264 JCC_B,
265 JCC_Z,
266 JCC_BE,
267 JCC_S,
268 JCC_P,
269 JCC_L,
270 JCC_LE,
271};
272
273enum {
274
275 OR_EAX,
276 OR_ECX,
277 OR_EDX,
278 OR_EBX,
279 OR_ESP,
280 OR_EBP,
281 OR_ESI,
282 OR_EDI,
283
284 OR_TMP0 = 16,
285 OR_TMP1,
286 OR_A0,
287};
288
289enum {
290 USES_CC_DST = 1,
291 USES_CC_SRC = 2,
292 USES_CC_SRC2 = 4,
293 USES_CC_SRCT = 8,
294};
295
296
297static const uint8_t cc_op_live[CC_OP_NB] = {
298 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
299 [CC_OP_EFLAGS] = USES_CC_SRC,
300 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
301 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
302 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
303 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
304 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
305 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
306 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
307 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
308 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
309 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
310 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
311 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
312 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
313 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
314 [CC_OP_CLR] = 0,
315 [CC_OP_POPCNT] = USES_CC_SRC,
316};
317
318static void set_cc_op(DisasContext *s, CCOp op)
319{
320 int dead;
321
322 if (s->cc_op == op) {
323 return;
324 }
325
326
327 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
328 if (dead & USES_CC_DST) {
329 tcg_gen_discard_tl(cpu_cc_dst);
330 }
331 if (dead & USES_CC_SRC) {
332 tcg_gen_discard_tl(cpu_cc_src);
333 }
334 if (dead & USES_CC_SRC2) {
335 tcg_gen_discard_tl(cpu_cc_src2);
336 }
337 if (dead & USES_CC_SRCT) {
338 tcg_gen_discard_tl(s->cc_srcT);
339 }
340
341 if (op == CC_OP_DYNAMIC) {
342
343
344 s->cc_op_dirty = false;
345 } else {
346
347 if (s->cc_op == CC_OP_DYNAMIC) {
348 tcg_gen_discard_i32(cpu_cc_op);
349 }
350 s->cc_op_dirty = true;
351 }
352 s->cc_op = op;
353}
354
355static void gen_update_cc_op(DisasContext *s)
356{
357 if (s->cc_op_dirty) {
358 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
359 s->cc_op_dirty = false;
360 }
361}
362
363#ifdef TARGET_X86_64
364
365#define NB_OP_SIZES 4
366
367#else
368
369#define NB_OP_SIZES 3
370
371#endif
372
373#if HOST_BIG_ENDIAN
374#define REG_B_OFFSET (sizeof(target_ulong) - 1)
375#define REG_H_OFFSET (sizeof(target_ulong) - 2)
376#define REG_W_OFFSET (sizeof(target_ulong) - 2)
377#define REG_L_OFFSET (sizeof(target_ulong) - 4)
378#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
379#else
380#define REG_B_OFFSET 0
381#define REG_H_OFFSET 1
382#define REG_W_OFFSET 0
383#define REG_L_OFFSET 0
384#define REG_LH_OFFSET 4
385#endif
386
387
388
389
390
391
392
393static inline bool byte_reg_is_xH(DisasContext *s, int reg)
394{
395
396 if (reg < 4 || REX_PREFIX(s)) {
397 return false;
398 }
399 return true;
400}
401
402
403static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
404{
405 if (CODE64(s)) {
406 return ot == MO_16 ? MO_16 : MO_64;
407 } else {
408 return ot;
409 }
410}
411
412
413static inline MemOp mo_stacksize(DisasContext *s)
414{
415 return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
416}
417
418
419static inline MemOp mo_64_32(MemOp ot)
420{
421#ifdef TARGET_X86_64
422 return ot == MO_64 ? MO_64 : MO_32;
423#else
424 return MO_32;
425#endif
426}
427
428
429
430static inline MemOp mo_b_d(int b, MemOp ot)
431{
432 return b & 1 ? ot : MO_8;
433}
434
435
436
437static inline MemOp mo_b_d32(int b, MemOp ot)
438{
439 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
440}
441
442
443
444
445
446
447
448
449
450static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
451{
452 switch(ot) {
453 case MO_8:
454 if (byte_reg_is_xH(s, reg)) {
455 dest = dest ? dest : cpu_regs[reg - 4];
456 tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
457 return cpu_regs[reg - 4];
458 }
459 dest = dest ? dest : cpu_regs[reg];
460 tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
461 break;
462 case MO_16:
463 dest = dest ? dest : cpu_regs[reg];
464 tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
465 break;
466 case MO_32:
467
468
469 dest = dest ? dest : cpu_regs[reg];
470 tcg_gen_ext32u_tl(dest, t0);
471 break;
472#ifdef TARGET_X86_64
473 case MO_64:
474 dest = dest ? dest : cpu_regs[reg];
475 tcg_gen_mov_tl(dest, t0);
476 break;
477#endif
478 default:
479 tcg_abort();
480 }
481 return cpu_regs[reg];
482}
483
484static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
485{
486 gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
487}
488
489static inline
490void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
491{
492 if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
493 tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
494 } else {
495 tcg_gen_mov_tl(t0, cpu_regs[reg]);
496 }
497}
498
499static void gen_add_A0_im(DisasContext *s, int val)
500{
501 tcg_gen_addi_tl(s->A0, s->A0, val);
502 if (!CODE64(s)) {
503 tcg_gen_ext32u_tl(s->A0, s->A0);
504 }
505}
506
507static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
508{
509 tcg_gen_mov_tl(cpu_eip, dest);
510 s->pc_save = -1;
511}
512
513static inline
514void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
515{
516 tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
517 gen_op_mov_reg_v(s, size, reg, s->tmp0);
518}
519
520static inline void gen_op_add_reg_T0(DisasContext *s, MemOp size, int reg)
521{
522 tcg_gen_add_tl(s->tmp0, cpu_regs[reg], s->T0);
523 gen_op_mov_reg_v(s, size, reg, s->tmp0);
524}
525
526static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
527{
528 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
529}
530
531static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
532{
533 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
534}
535
536static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
537{
538 if (d == OR_TMP0) {
539 gen_op_st_v(s, idx, s->T0, s->A0);
540 } else {
541 gen_op_mov_reg_v(s, idx, d, s->T0);
542 }
543}
544
545static void gen_update_eip_cur(DisasContext *s)
546{
547 assert(s->pc_save != -1);
548 if (tb_cflags(s->base.tb) & CF_PCREL) {
549 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
550 } else {
551 tcg_gen_movi_tl(cpu_eip, s->base.pc_next - s->cs_base);
552 }
553 s->pc_save = s->base.pc_next;
554}
555
556static void gen_update_eip_next(DisasContext *s)
557{
558 assert(s->pc_save != -1);
559 if (tb_cflags(s->base.tb) & CF_PCREL) {
560 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
561 } else {
562 tcg_gen_movi_tl(cpu_eip, s->pc - s->cs_base);
563 }
564 s->pc_save = s->pc;
565}
566
567static int cur_insn_len(DisasContext *s)
568{
569 return s->pc - s->base.pc_next;
570}
571
572static TCGv_i32 cur_insn_len_i32(DisasContext *s)
573{
574 return tcg_constant_i32(cur_insn_len(s));
575}
576
577static TCGv_i32 eip_next_i32(DisasContext *s)
578{
579 assert(s->pc_save != -1);
580
581
582
583
584
585
586
587
588 if (CODE64(s)) {
589 return tcg_constant_i32(-1);
590 }
591 if (tb_cflags(s->base.tb) & CF_PCREL) {
592 TCGv_i32 ret = tcg_temp_new_i32();
593 tcg_gen_trunc_tl_i32(ret, cpu_eip);
594 tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
595 return ret;
596 } else {
597 return tcg_constant_i32(s->pc - s->cs_base);
598 }
599}
600
601static TCGv eip_next_tl(DisasContext *s)
602{
603 assert(s->pc_save != -1);
604 if (tb_cflags(s->base.tb) & CF_PCREL) {
605 TCGv ret = tcg_temp_new();
606 tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
607 return ret;
608 } else {
609 return tcg_constant_tl(s->pc - s->cs_base);
610 }
611}
612
613static TCGv eip_cur_tl(DisasContext *s)
614{
615 assert(s->pc_save != -1);
616 if (tb_cflags(s->base.tb) & CF_PCREL) {
617 TCGv ret = tcg_temp_new();
618 tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
619 return ret;
620 } else {
621 return tcg_constant_tl(s->base.pc_next - s->cs_base);
622 }
623}
624
625
626
627
628static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
629 int def_seg, int ovr_seg)
630{
631 switch (aflag) {
632#ifdef TARGET_X86_64
633 case MO_64:
634 if (ovr_seg < 0) {
635 tcg_gen_mov_tl(s->A0, a0);
636 return;
637 }
638 break;
639#endif
640 case MO_32:
641
642 if (ovr_seg < 0 && ADDSEG(s)) {
643 ovr_seg = def_seg;
644 }
645 if (ovr_seg < 0) {
646 tcg_gen_ext32u_tl(s->A0, a0);
647 return;
648 }
649 break;
650 case MO_16:
651
652 tcg_gen_ext16u_tl(s->A0, a0);
653 a0 = s->A0;
654 if (ovr_seg < 0) {
655 if (ADDSEG(s)) {
656 ovr_seg = def_seg;
657 } else {
658 return;
659 }
660 }
661 break;
662 default:
663 tcg_abort();
664 }
665
666 if (ovr_seg >= 0) {
667 TCGv seg = cpu_seg_base[ovr_seg];
668
669 if (aflag == MO_64) {
670 tcg_gen_add_tl(s->A0, a0, seg);
671 } else if (CODE64(s)) {
672 tcg_gen_ext32u_tl(s->A0, a0);
673 tcg_gen_add_tl(s->A0, s->A0, seg);
674 } else {
675 tcg_gen_add_tl(s->A0, a0, seg);
676 tcg_gen_ext32u_tl(s->A0, s->A0);
677 }
678 }
679}
680
681static inline void gen_string_movl_A0_ESI(DisasContext *s)
682{
683 gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
684}
685
686static inline void gen_string_movl_A0_EDI(DisasContext *s)
687{
688 gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
689}
690
691static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot)
692{
693 tcg_gen_ld32s_tl(s->T0, cpu_env, offsetof(CPUX86State, df));
694 tcg_gen_shli_tl(s->T0, s->T0, ot);
695};
696
697static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
698{
699 switch (size) {
700 case MO_8:
701 if (sign) {
702 tcg_gen_ext8s_tl(dst, src);
703 } else {
704 tcg_gen_ext8u_tl(dst, src);
705 }
706 return dst;
707 case MO_16:
708 if (sign) {
709 tcg_gen_ext16s_tl(dst, src);
710 } else {
711 tcg_gen_ext16u_tl(dst, src);
712 }
713 return dst;
714#ifdef TARGET_X86_64
715 case MO_32:
716 if (sign) {
717 tcg_gen_ext32s_tl(dst, src);
718 } else {
719 tcg_gen_ext32u_tl(dst, src);
720 }
721 return dst;
722#endif
723 default:
724 return src;
725 }
726}
727
728static void gen_extu(MemOp ot, TCGv reg)
729{
730 gen_ext_tl(reg, reg, ot, false);
731}
732
733static void gen_exts(MemOp ot, TCGv reg)
734{
735 gen_ext_tl(reg, reg, ot, true);
736}
737
738static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
739{
740 tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]);
741 gen_extu(s->aflag, s->tmp0);
742 tcg_gen_brcondi_tl(cond, s->tmp0, 0, label1);
743}
744
745static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
746{
747 gen_op_j_ecx(s, TCG_COND_EQ, label1);
748}
749
750static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
751{
752 gen_op_j_ecx(s, TCG_COND_NE, label1);
753}
754
755static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
756{
757 switch (ot) {
758 case MO_8:
759 gen_helper_inb(v, cpu_env, n);
760 break;
761 case MO_16:
762 gen_helper_inw(v, cpu_env, n);
763 break;
764 case MO_32:
765 gen_helper_inl(v, cpu_env, n);
766 break;
767 default:
768 tcg_abort();
769 }
770}
771
772static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
773{
774 switch (ot) {
775 case MO_8:
776 gen_helper_outb(cpu_env, v, n);
777 break;
778 case MO_16:
779 gen_helper_outw(cpu_env, v, n);
780 break;
781 case MO_32:
782 gen_helper_outl(cpu_env, v, n);
783 break;
784 default:
785 tcg_abort();
786 }
787}
788
789
790
791
792
793static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
794 uint32_t svm_flags)
795{
796#ifdef CONFIG_USER_ONLY
797
798
799
800
801 gen_exception_gpf(s);
802 return false;
803#else
804 if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
805 gen_helper_check_io(cpu_env, port, tcg_constant_i32(1 << ot));
806 }
807 if (GUEST(s)) {
808 gen_update_cc_op(s);
809 gen_update_eip_cur(s);
810 if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
811 svm_flags |= SVM_IOIO_REP_MASK;
812 }
813 svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
814 gen_helper_svm_check_io(cpu_env, port,
815 tcg_constant_i32(svm_flags),
816 cur_insn_len_i32(s));
817 }
818 return true;
819#endif
820}
821
822static void gen_movs(DisasContext *s, MemOp ot)
823{
824 gen_string_movl_A0_ESI(s);
825 gen_op_ld_v(s, ot, s->T0, s->A0);
826 gen_string_movl_A0_EDI(s);
827 gen_op_st_v(s, ot, s->T0, s->A0);
828 gen_op_movl_T0_Dshift(s, ot);
829 gen_op_add_reg_T0(s, s->aflag, R_ESI);
830 gen_op_add_reg_T0(s, s->aflag, R_EDI);
831}
832
833static void gen_op_update1_cc(DisasContext *s)
834{
835 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
836}
837
838static void gen_op_update2_cc(DisasContext *s)
839{
840 tcg_gen_mov_tl(cpu_cc_src, s->T1);
841 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
842}
843
844static void gen_op_update3_cc(DisasContext *s, TCGv reg)
845{
846 tcg_gen_mov_tl(cpu_cc_src2, reg);
847 tcg_gen_mov_tl(cpu_cc_src, s->T1);
848 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
849}
850
851static inline void gen_op_testl_T0_T1_cc(DisasContext *s)
852{
853 tcg_gen_and_tl(cpu_cc_dst, s->T0, s->T1);
854}
855
856static void gen_op_update_neg_cc(DisasContext *s)
857{
858 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
859 tcg_gen_neg_tl(cpu_cc_src, s->T0);
860 tcg_gen_movi_tl(s->cc_srcT, 0);
861}
862
863
864static void gen_compute_eflags(DisasContext *s)
865{
866 TCGv zero, dst, src1, src2;
867 int live, dead;
868
869 if (s->cc_op == CC_OP_EFLAGS) {
870 return;
871 }
872 if (s->cc_op == CC_OP_CLR) {
873 tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
874 set_cc_op(s, CC_OP_EFLAGS);
875 return;
876 }
877
878 zero = NULL;
879 dst = cpu_cc_dst;
880 src1 = cpu_cc_src;
881 src2 = cpu_cc_src2;
882
883
884 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
885 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
886 if (dead) {
887 zero = tcg_constant_tl(0);
888 if (dead & USES_CC_DST) {
889 dst = zero;
890 }
891 if (dead & USES_CC_SRC) {
892 src1 = zero;
893 }
894 if (dead & USES_CC_SRC2) {
895 src2 = zero;
896 }
897 }
898
899 gen_update_cc_op(s);
900 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
901 set_cc_op(s, CC_OP_EFLAGS);
902}
903
904typedef struct CCPrepare {
905 TCGCond cond;
906 TCGv reg;
907 TCGv reg2;
908 target_ulong imm;
909 target_ulong mask;
910 bool use_reg2;
911 bool no_setcond;
912} CCPrepare;
913
914
915static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
916{
917 TCGv t0, t1;
918 int size, shift;
919
920 switch (s->cc_op) {
921 case CC_OP_SUBB ... CC_OP_SUBQ:
922
923 size = s->cc_op - CC_OP_SUBB;
924 t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
925
926 t0 = t1 == cpu_cc_src ? s->tmp0 : reg;
927 tcg_gen_mov_tl(t0, s->cc_srcT);
928 gen_extu(size, t0);
929 goto add_sub;
930
931 case CC_OP_ADDB ... CC_OP_ADDQ:
932
933 size = s->cc_op - CC_OP_ADDB;
934 t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
935 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
936 add_sub:
937 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
938 .reg2 = t1, .mask = -1, .use_reg2 = true };
939
940 case CC_OP_LOGICB ... CC_OP_LOGICQ:
941 case CC_OP_CLR:
942 case CC_OP_POPCNT:
943 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
944
945 case CC_OP_INCB ... CC_OP_INCQ:
946 case CC_OP_DECB ... CC_OP_DECQ:
947 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
948 .mask = -1, .no_setcond = true };
949
950 case CC_OP_SHLB ... CC_OP_SHLQ:
951
952 size = s->cc_op - CC_OP_SHLB;
953 shift = (8 << size) - 1;
954 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
955 .mask = (target_ulong)1 << shift };
956
957 case CC_OP_MULB ... CC_OP_MULQ:
958 return (CCPrepare) { .cond = TCG_COND_NE,
959 .reg = cpu_cc_src, .mask = -1 };
960
961 case CC_OP_BMILGB ... CC_OP_BMILGQ:
962 size = s->cc_op - CC_OP_BMILGB;
963 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
964 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
965
966 case CC_OP_ADCX:
967 case CC_OP_ADCOX:
968 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
969 .mask = -1, .no_setcond = true };
970
971 case CC_OP_EFLAGS:
972 case CC_OP_SARB ... CC_OP_SARQ:
973
974 return (CCPrepare) { .cond = TCG_COND_NE,
975 .reg = cpu_cc_src, .mask = CC_C };
976
977 default:
978
979
980 gen_update_cc_op(s);
981 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
982 cpu_cc_src2, cpu_cc_op);
983 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
984 .mask = -1, .no_setcond = true };
985 }
986}
987
988
989static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
990{
991 gen_compute_eflags(s);
992 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
993 .mask = CC_P };
994}
995
996
997static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
998{
999 switch (s->cc_op) {
1000 case CC_OP_DYNAMIC:
1001 gen_compute_eflags(s);
1002
1003 case CC_OP_EFLAGS:
1004 case CC_OP_ADCX:
1005 case CC_OP_ADOX:
1006 case CC_OP_ADCOX:
1007 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1008 .mask = CC_S };
1009 case CC_OP_CLR:
1010 case CC_OP_POPCNT:
1011 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1012 default:
1013 {
1014 MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1015 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
1016 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
1017 }
1018 }
1019}
1020
1021
1022static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1023{
1024 switch (s->cc_op) {
1025 case CC_OP_ADOX:
1026 case CC_OP_ADCOX:
1027 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1028 .mask = -1, .no_setcond = true };
1029 case CC_OP_CLR:
1030 case CC_OP_POPCNT:
1031 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1032 default:
1033 gen_compute_eflags(s);
1034 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1035 .mask = CC_O };
1036 }
1037}
1038
1039
1040static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1041{
1042 switch (s->cc_op) {
1043 case CC_OP_DYNAMIC:
1044 gen_compute_eflags(s);
1045
1046 case CC_OP_EFLAGS:
1047 case CC_OP_ADCX:
1048 case CC_OP_ADOX:
1049 case CC_OP_ADCOX:
1050 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1051 .mask = CC_Z };
1052 case CC_OP_CLR:
1053 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
1054 case CC_OP_POPCNT:
1055 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src,
1056 .mask = -1 };
1057 default:
1058 {
1059 MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1060 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
1061 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
1062 }
1063 }
1064}
1065
1066
1067
1068static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1069{
1070 int inv, jcc_op, cond;
1071 MemOp size;
1072 CCPrepare cc;
1073 TCGv t0;
1074
1075 inv = b & 1;
1076 jcc_op = (b >> 1) & 7;
1077
1078 switch (s->cc_op) {
1079 case CC_OP_SUBB ... CC_OP_SUBQ:
1080
1081 size = s->cc_op - CC_OP_SUBB;
1082 switch (jcc_op) {
1083 case JCC_BE:
1084 tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1085 gen_extu(size, s->tmp4);
1086 t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
1087 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->tmp4,
1088 .reg2 = t0, .mask = -1, .use_reg2 = true };
1089 break;
1090
1091 case JCC_L:
1092 cond = TCG_COND_LT;
1093 goto fast_jcc_l;
1094 case JCC_LE:
1095 cond = TCG_COND_LE;
1096 fast_jcc_l:
1097 tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1098 gen_exts(size, s->tmp4);
1099 t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, true);
1100 cc = (CCPrepare) { .cond = cond, .reg = s->tmp4,
1101 .reg2 = t0, .mask = -1, .use_reg2 = true };
1102 break;
1103
1104 default:
1105 goto slow_jcc;
1106 }
1107 break;
1108
1109 default:
1110 slow_jcc:
1111
1112 switch (jcc_op) {
1113 case JCC_O:
1114 cc = gen_prepare_eflags_o(s, reg);
1115 break;
1116 case JCC_B:
1117 cc = gen_prepare_eflags_c(s, reg);
1118 break;
1119 case JCC_Z:
1120 cc = gen_prepare_eflags_z(s, reg);
1121 break;
1122 case JCC_BE:
1123 gen_compute_eflags(s);
1124 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1125 .mask = CC_Z | CC_C };
1126 break;
1127 case JCC_S:
1128 cc = gen_prepare_eflags_s(s, reg);
1129 break;
1130 case JCC_P:
1131 cc = gen_prepare_eflags_p(s, reg);
1132 break;
1133 case JCC_L:
1134 gen_compute_eflags(s);
1135 if (reg == cpu_cc_src) {
1136 reg = s->tmp0;
1137 }
1138 tcg_gen_shri_tl(reg, cpu_cc_src, 4);
1139 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1140 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1141 .mask = CC_S };
1142 break;
1143 default:
1144 case JCC_LE:
1145 gen_compute_eflags(s);
1146 if (reg == cpu_cc_src) {
1147 reg = s->tmp0;
1148 }
1149 tcg_gen_shri_tl(reg, cpu_cc_src, 4);
1150 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1151 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1152 .mask = CC_S | CC_Z };
1153 break;
1154 }
1155 break;
1156 }
1157
1158 if (inv) {
1159 cc.cond = tcg_invert_cond(cc.cond);
1160 }
1161 return cc;
1162}
1163
1164static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1165{
1166 CCPrepare cc = gen_prepare_cc(s, b, reg);
1167
1168 if (cc.no_setcond) {
1169 if (cc.cond == TCG_COND_EQ) {
1170 tcg_gen_xori_tl(reg, cc.reg, 1);
1171 } else {
1172 tcg_gen_mov_tl(reg, cc.reg);
1173 }
1174 return;
1175 }
1176
1177 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1178 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1179 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1180 tcg_gen_andi_tl(reg, reg, 1);
1181 return;
1182 }
1183 if (cc.mask != -1) {
1184 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1185 cc.reg = reg;
1186 }
1187 if (cc.use_reg2) {
1188 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1189 } else {
1190 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1191 }
1192}
1193
1194static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1195{
1196 gen_setcc1(s, JCC_B << 1, reg);
1197}
1198
1199
1200
1201static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1202{
1203 CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1204
1205 if (cc.mask != -1) {
1206 tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1207 cc.reg = s->T0;
1208 }
1209 if (cc.use_reg2) {
1210 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1211 } else {
1212 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1213 }
1214}
1215
1216
1217
1218
1219static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1220{
1221 CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1222
1223 gen_update_cc_op(s);
1224 if (cc.mask != -1) {
1225 tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1226 cc.reg = s->T0;
1227 }
1228 set_cc_op(s, CC_OP_DYNAMIC);
1229 if (cc.use_reg2) {
1230 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1231 } else {
1232 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1233 }
1234}
1235
1236
1237
1238static TCGLabel *gen_jz_ecx_string(DisasContext *s)
1239{
1240 TCGLabel *l1 = gen_new_label();
1241 TCGLabel *l2 = gen_new_label();
1242 gen_op_jnz_ecx(s, l1);
1243 gen_set_label(l2);
1244 gen_jmp_rel_csize(s, 0, 1);
1245 gen_set_label(l1);
1246 return l2;
1247}
1248
1249static void gen_stos(DisasContext *s, MemOp ot)
1250{
1251 gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
1252 gen_string_movl_A0_EDI(s);
1253 gen_op_st_v(s, ot, s->T0, s->A0);
1254 gen_op_movl_T0_Dshift(s, ot);
1255 gen_op_add_reg_T0(s, s->aflag, R_EDI);
1256}
1257
1258static void gen_lods(DisasContext *s, MemOp ot)
1259{
1260 gen_string_movl_A0_ESI(s);
1261 gen_op_ld_v(s, ot, s->T0, s->A0);
1262 gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1263 gen_op_movl_T0_Dshift(s, ot);
1264 gen_op_add_reg_T0(s, s->aflag, R_ESI);
1265}
1266
1267static void gen_scas(DisasContext *s, MemOp ot)
1268{
1269 gen_string_movl_A0_EDI(s);
1270 gen_op_ld_v(s, ot, s->T1, s->A0);
1271 gen_op(s, OP_CMPL, ot, R_EAX);
1272 gen_op_movl_T0_Dshift(s, ot);
1273 gen_op_add_reg_T0(s, s->aflag, R_EDI);
1274}
1275
1276static void gen_cmps(DisasContext *s, MemOp ot)
1277{
1278 gen_string_movl_A0_EDI(s);
1279 gen_op_ld_v(s, ot, s->T1, s->A0);
1280 gen_string_movl_A0_ESI(s);
1281 gen_op(s, OP_CMPL, ot, OR_TMP0);
1282 gen_op_movl_T0_Dshift(s, ot);
1283 gen_op_add_reg_T0(s, s->aflag, R_ESI);
1284 gen_op_add_reg_T0(s, s->aflag, R_EDI);
1285}
1286
1287static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1288{
1289 if (s->flags & HF_IOBPT_MASK) {
1290#ifdef CONFIG_USER_ONLY
1291
1292 g_assert_not_reached();
1293#else
1294 TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1295 TCGv t_next = eip_next_tl(s);
1296 gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
1297#endif
1298 }
1299}
1300
1301static void gen_ins(DisasContext *s, MemOp ot)
1302{
1303 gen_string_movl_A0_EDI(s);
1304
1305
1306 tcg_gen_movi_tl(s->T0, 0);
1307 gen_op_st_v(s, ot, s->T0, s->A0);
1308 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1309 tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1310 gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1311 gen_op_st_v(s, ot, s->T0, s->A0);
1312 gen_op_movl_T0_Dshift(s, ot);
1313 gen_op_add_reg_T0(s, s->aflag, R_EDI);
1314 gen_bpt_io(s, s->tmp2_i32, ot);
1315}
1316
1317static void gen_outs(DisasContext *s, MemOp ot)
1318{
1319 gen_string_movl_A0_ESI(s);
1320 gen_op_ld_v(s, ot, s->T0, s->A0);
1321
1322 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1323 tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1324 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1325 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1326 gen_op_movl_T0_Dshift(s, ot);
1327 gen_op_add_reg_T0(s, s->aflag, R_ESI);
1328 gen_bpt_io(s, s->tmp2_i32, ot);
1329}
1330
1331
1332static void gen_repz(DisasContext *s, MemOp ot,
1333 void (*fn)(DisasContext *s, MemOp ot))
1334{
1335 TCGLabel *l2;
1336 gen_update_cc_op(s);
1337 l2 = gen_jz_ecx_string(s);
1338 fn(s, ot);
1339 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1340
1341
1342
1343
1344 if (s->repz_opt) {
1345 gen_op_jz_ecx(s, l2);
1346 }
1347 gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1348}
1349
1350#define GEN_REPZ(op) \
1351 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1352 { gen_repz(s, ot, gen_##op); }
1353
1354static void gen_repz2(DisasContext *s, MemOp ot, int nz,
1355 void (*fn)(DisasContext *s, MemOp ot))
1356{
1357 TCGLabel *l2;
1358 gen_update_cc_op(s);
1359 l2 = gen_jz_ecx_string(s);
1360 fn(s, ot);
1361 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1362 gen_update_cc_op(s);
1363 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1364 if (s->repz_opt) {
1365 gen_op_jz_ecx(s, l2);
1366 }
1367 gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1368}
1369
1370#define GEN_REPZ2(op) \
1371 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1372 { gen_repz2(s, ot, nz, gen_##op); }
1373
1374GEN_REPZ(movs)
1375GEN_REPZ(stos)
1376GEN_REPZ(lods)
1377GEN_REPZ(ins)
1378GEN_REPZ(outs)
1379GEN_REPZ2(scas)
1380GEN_REPZ2(cmps)
1381
1382static void gen_helper_fp_arith_ST0_FT0(int op)
1383{
1384 switch (op) {
1385 case 0:
1386 gen_helper_fadd_ST0_FT0(cpu_env);
1387 break;
1388 case 1:
1389 gen_helper_fmul_ST0_FT0(cpu_env);
1390 break;
1391 case 2:
1392 gen_helper_fcom_ST0_FT0(cpu_env);
1393 break;
1394 case 3:
1395 gen_helper_fcom_ST0_FT0(cpu_env);
1396 break;
1397 case 4:
1398 gen_helper_fsub_ST0_FT0(cpu_env);
1399 break;
1400 case 5:
1401 gen_helper_fsubr_ST0_FT0(cpu_env);
1402 break;
1403 case 6:
1404 gen_helper_fdiv_ST0_FT0(cpu_env);
1405 break;
1406 case 7:
1407 gen_helper_fdivr_ST0_FT0(cpu_env);
1408 break;
1409 }
1410}
1411
1412
1413static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1414{
1415 TCGv_i32 tmp = tcg_constant_i32(opreg);
1416 switch (op) {
1417 case 0:
1418 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1419 break;
1420 case 1:
1421 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1422 break;
1423 case 4:
1424 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1425 break;
1426 case 5:
1427 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1428 break;
1429 case 6:
1430 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1431 break;
1432 case 7:
1433 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1434 break;
1435 }
1436}
1437
1438static void gen_exception(DisasContext *s, int trapno)
1439{
1440 gen_update_cc_op(s);
1441 gen_update_eip_cur(s);
1442 gen_helper_raise_exception(cpu_env, tcg_constant_i32(trapno));
1443 s->base.is_jmp = DISAS_NORETURN;
1444}
1445
1446
1447
1448static void gen_illegal_opcode(DisasContext *s)
1449{
1450 gen_exception(s, EXCP06_ILLOP);
1451}
1452
1453
1454static void gen_exception_gpf(DisasContext *s)
1455{
1456 gen_exception(s, EXCP0D_GPF);
1457}
1458
1459
1460static bool check_cpl0(DisasContext *s)
1461{
1462 if (CPL(s) == 0) {
1463 return true;
1464 }
1465 gen_exception_gpf(s);
1466 return false;
1467}
1468
1469
1470static bool check_vm86_iopl(DisasContext *s)
1471{
1472 if (!VM86(s) || IOPL(s) == 3) {
1473 return true;
1474 }
1475 gen_exception_gpf(s);
1476 return false;
1477}
1478
1479
1480static bool check_iopl(DisasContext *s)
1481{
1482 if (VM86(s) ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) {
1483 return true;
1484 }
1485 gen_exception_gpf(s);
1486 return false;
1487}
1488
1489
1490static void gen_op(DisasContext *s1, int op, MemOp ot, int d)
1491{
1492 if (d != OR_TMP0) {
1493 if (s1->prefix & PREFIX_LOCK) {
1494
1495 gen_illegal_opcode(s1);
1496 return;
1497 }
1498 gen_op_mov_v_reg(s1, ot, s1->T0, d);
1499 } else if (!(s1->prefix & PREFIX_LOCK)) {
1500 gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1501 }
1502 switch(op) {
1503 case OP_ADCL:
1504 gen_compute_eflags_c(s1, s1->tmp4);
1505 if (s1->prefix & PREFIX_LOCK) {
1506 tcg_gen_add_tl(s1->T0, s1->tmp4, s1->T1);
1507 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1508 s1->mem_index, ot | MO_LE);
1509 } else {
1510 tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1511 tcg_gen_add_tl(s1->T0, s1->T0, s1->tmp4);
1512 gen_op_st_rm_T0_A0(s1, ot, d);
1513 }
1514 gen_op_update3_cc(s1, s1->tmp4);
1515 set_cc_op(s1, CC_OP_ADCB + ot);
1516 break;
1517 case OP_SBBL:
1518 gen_compute_eflags_c(s1, s1->tmp4);
1519 if (s1->prefix & PREFIX_LOCK) {
1520 tcg_gen_add_tl(s1->T0, s1->T1, s1->tmp4);
1521 tcg_gen_neg_tl(s1->T0, s1->T0);
1522 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1523 s1->mem_index, ot | MO_LE);
1524 } else {
1525 tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1526 tcg_gen_sub_tl(s1->T0, s1->T0, s1->tmp4);
1527 gen_op_st_rm_T0_A0(s1, ot, d);
1528 }
1529 gen_op_update3_cc(s1, s1->tmp4);
1530 set_cc_op(s1, CC_OP_SBBB + ot);
1531 break;
1532 case OP_ADDL:
1533 if (s1->prefix & PREFIX_LOCK) {
1534 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T1,
1535 s1->mem_index, ot | MO_LE);
1536 } else {
1537 tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1538 gen_op_st_rm_T0_A0(s1, ot, d);
1539 }
1540 gen_op_update2_cc(s1);
1541 set_cc_op(s1, CC_OP_ADDB + ot);
1542 break;
1543 case OP_SUBL:
1544 if (s1->prefix & PREFIX_LOCK) {
1545 tcg_gen_neg_tl(s1->T0, s1->T1);
1546 tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0,
1547 s1->mem_index, ot | MO_LE);
1548 tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1);
1549 } else {
1550 tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1551 tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1552 gen_op_st_rm_T0_A0(s1, ot, d);
1553 }
1554 gen_op_update2_cc(s1);
1555 set_cc_op(s1, CC_OP_SUBB + ot);
1556 break;
1557 default:
1558 case OP_ANDL:
1559 if (s1->prefix & PREFIX_LOCK) {
1560 tcg_gen_atomic_and_fetch_tl(s1->T0, s1->A0, s1->T1,
1561 s1->mem_index, ot | MO_LE);
1562 } else {
1563 tcg_gen_and_tl(s1->T0, s1->T0, s1->T1);
1564 gen_op_st_rm_T0_A0(s1, ot, d);
1565 }
1566 gen_op_update1_cc(s1);
1567 set_cc_op(s1, CC_OP_LOGICB + ot);
1568 break;
1569 case OP_ORL:
1570 if (s1->prefix & PREFIX_LOCK) {
1571 tcg_gen_atomic_or_fetch_tl(s1->T0, s1->A0, s1->T1,
1572 s1->mem_index, ot | MO_LE);
1573 } else {
1574 tcg_gen_or_tl(s1->T0, s1->T0, s1->T1);
1575 gen_op_st_rm_T0_A0(s1, ot, d);
1576 }
1577 gen_op_update1_cc(s1);
1578 set_cc_op(s1, CC_OP_LOGICB + ot);
1579 break;
1580 case OP_XORL:
1581 if (s1->prefix & PREFIX_LOCK) {
1582 tcg_gen_atomic_xor_fetch_tl(s1->T0, s1->A0, s1->T1,
1583 s1->mem_index, ot | MO_LE);
1584 } else {
1585 tcg_gen_xor_tl(s1->T0, s1->T0, s1->T1);
1586 gen_op_st_rm_T0_A0(s1, ot, d);
1587 }
1588 gen_op_update1_cc(s1);
1589 set_cc_op(s1, CC_OP_LOGICB + ot);
1590 break;
1591 case OP_CMPL:
1592 tcg_gen_mov_tl(cpu_cc_src, s1->T1);
1593 tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1594 tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1);
1595 set_cc_op(s1, CC_OP_SUBB + ot);
1596 break;
1597 }
1598}
1599
1600
1601static void gen_inc(DisasContext *s1, MemOp ot, int d, int c)
1602{
1603 if (s1->prefix & PREFIX_LOCK) {
1604 if (d != OR_TMP0) {
1605
1606 gen_illegal_opcode(s1);
1607 return;
1608 }
1609 tcg_gen_movi_tl(s1->T0, c > 0 ? 1 : -1);
1610 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1611 s1->mem_index, ot | MO_LE);
1612 } else {
1613 if (d != OR_TMP0) {
1614 gen_op_mov_v_reg(s1, ot, s1->T0, d);
1615 } else {
1616 gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1617 }
1618 tcg_gen_addi_tl(s1->T0, s1->T0, (c > 0 ? 1 : -1));
1619 gen_op_st_rm_T0_A0(s1, ot, d);
1620 }
1621
1622 gen_compute_eflags_c(s1, cpu_cc_src);
1623 tcg_gen_mov_tl(cpu_cc_dst, s1->T0);
1624 set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
1625}
1626
1627static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result,
1628 TCGv shm1, TCGv count, bool is_right)
1629{
1630 TCGv_i32 z32, s32, oldop;
1631 TCGv z_tl;
1632
1633
1634
1635
1636 z_tl = tcg_constant_tl(0);
1637 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1638 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1639 result, cpu_cc_dst);
1640 } else {
1641 tcg_gen_mov_tl(cpu_cc_dst, result);
1642 }
1643 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1644 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1645 shm1, cpu_cc_src);
1646 } else {
1647 tcg_gen_mov_tl(cpu_cc_src, shm1);
1648 }
1649
1650
1651 tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1652 if (s->cc_op == CC_OP_DYNAMIC) {
1653 oldop = cpu_cc_op;
1654 } else {
1655 tcg_gen_movi_i32(s->tmp3_i32, s->cc_op);
1656 oldop = s->tmp3_i32;
1657 }
1658
1659
1660 z32 = tcg_constant_i32(0);
1661 s32 = tcg_temp_new_i32();
1662 tcg_gen_trunc_tl_i32(s32, count);
1663 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
1664
1665
1666 set_cc_op(s, CC_OP_DYNAMIC);
1667}
1668
1669static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1,
1670 int is_right, int is_arith)
1671{
1672 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1673
1674
1675 if (op1 == OR_TMP0) {
1676 gen_op_ld_v(s, ot, s->T0, s->A0);
1677 } else {
1678 gen_op_mov_v_reg(s, ot, s->T0, op1);
1679 }
1680
1681 tcg_gen_andi_tl(s->T1, s->T1, mask);
1682 tcg_gen_subi_tl(s->tmp0, s->T1, 1);
1683
1684 if (is_right) {
1685 if (is_arith) {
1686 gen_exts(ot, s->T0);
1687 tcg_gen_sar_tl(s->tmp0, s->T0, s->tmp0);
1688 tcg_gen_sar_tl(s->T0, s->T0, s->T1);
1689 } else {
1690 gen_extu(ot, s->T0);
1691 tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1692 tcg_gen_shr_tl(s->T0, s->T0, s->T1);
1693 }
1694 } else {
1695 tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1696 tcg_gen_shl_tl(s->T0, s->T0, s->T1);
1697 }
1698
1699
1700 gen_op_st_rm_T0_A0(s, ot, op1);
1701
1702 gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right);
1703}
1704
1705static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1706 int is_right, int is_arith)
1707{
1708 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1709
1710
1711 if (op1 == OR_TMP0)
1712 gen_op_ld_v(s, ot, s->T0, s->A0);
1713 else
1714 gen_op_mov_v_reg(s, ot, s->T0, op1);
1715
1716 op2 &= mask;
1717 if (op2 != 0) {
1718 if (is_right) {
1719 if (is_arith) {
1720 gen_exts(ot, s->T0);
1721 tcg_gen_sari_tl(s->tmp4, s->T0, op2 - 1);
1722 tcg_gen_sari_tl(s->T0, s->T0, op2);
1723 } else {
1724 gen_extu(ot, s->T0);
1725 tcg_gen_shri_tl(s->tmp4, s->T0, op2 - 1);
1726 tcg_gen_shri_tl(s->T0, s->T0, op2);
1727 }
1728 } else {
1729 tcg_gen_shli_tl(s->tmp4, s->T0, op2 - 1);
1730 tcg_gen_shli_tl(s->T0, s->T0, op2);
1731 }
1732 }
1733
1734
1735 gen_op_st_rm_T0_A0(s, ot, op1);
1736
1737
1738 if (op2 != 0) {
1739 tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
1740 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
1741 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1742 }
1743}
1744
1745static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right)
1746{
1747 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1748 TCGv_i32 t0, t1;
1749
1750
1751 if (op1 == OR_TMP0) {
1752 gen_op_ld_v(s, ot, s->T0, s->A0);
1753 } else {
1754 gen_op_mov_v_reg(s, ot, s->T0, op1);
1755 }
1756
1757 tcg_gen_andi_tl(s->T1, s->T1, mask);
1758
1759 switch (ot) {
1760 case MO_8:
1761
1762 tcg_gen_ext8u_tl(s->T0, s->T0);
1763 tcg_gen_muli_tl(s->T0, s->T0, 0x01010101);
1764 goto do_long;
1765 case MO_16:
1766
1767 tcg_gen_deposit_tl(s->T0, s->T0, s->T0, 16, 16);
1768 goto do_long;
1769 do_long:
1770#ifdef TARGET_X86_64
1771 case MO_32:
1772 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1773 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
1774 if (is_right) {
1775 tcg_gen_rotr_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1776 } else {
1777 tcg_gen_rotl_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1778 }
1779 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1780 break;
1781#endif
1782 default:
1783 if (is_right) {
1784 tcg_gen_rotr_tl(s->T0, s->T0, s->T1);
1785 } else {
1786 tcg_gen_rotl_tl(s->T0, s->T0, s->T1);
1787 }
1788 break;
1789 }
1790
1791
1792 gen_op_st_rm_T0_A0(s, ot, op1);
1793
1794
1795 gen_compute_eflags(s);
1796
1797
1798
1799
1800
1801 if (is_right) {
1802 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1803 tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1804 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1805 } else {
1806 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1807 tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1808 }
1809 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1810 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1811
1812
1813
1814
1815
1816 t0 = tcg_constant_i32(0);
1817 t1 = tcg_temp_new_i32();
1818 tcg_gen_trunc_tl_i32(t1, s->T1);
1819 tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX);
1820 tcg_gen_movi_i32(s->tmp3_i32, CC_OP_EFLAGS);
1821 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1822 s->tmp2_i32, s->tmp3_i32);
1823
1824
1825 set_cc_op(s, CC_OP_DYNAMIC);
1826}
1827
1828static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1829 int is_right)
1830{
1831 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1832 int shift;
1833
1834
1835 if (op1 == OR_TMP0) {
1836 gen_op_ld_v(s, ot, s->T0, s->A0);
1837 } else {
1838 gen_op_mov_v_reg(s, ot, s->T0, op1);
1839 }
1840
1841 op2 &= mask;
1842 if (op2 != 0) {
1843 switch (ot) {
1844#ifdef TARGET_X86_64
1845 case MO_32:
1846 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1847 if (is_right) {
1848 tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, op2);
1849 } else {
1850 tcg_gen_rotli_i32(s->tmp2_i32, s->tmp2_i32, op2);
1851 }
1852 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1853 break;
1854#endif
1855 default:
1856 if (is_right) {
1857 tcg_gen_rotri_tl(s->T0, s->T0, op2);
1858 } else {
1859 tcg_gen_rotli_tl(s->T0, s->T0, op2);
1860 }
1861 break;
1862 case MO_8:
1863 mask = 7;
1864 goto do_shifts;
1865 case MO_16:
1866 mask = 15;
1867 do_shifts:
1868 shift = op2 & mask;
1869 if (is_right) {
1870 shift = mask + 1 - shift;
1871 }
1872 gen_extu(ot, s->T0);
1873 tcg_gen_shli_tl(s->tmp0, s->T0, shift);
1874 tcg_gen_shri_tl(s->T0, s->T0, mask + 1 - shift);
1875 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
1876 break;
1877 }
1878 }
1879
1880
1881 gen_op_st_rm_T0_A0(s, ot, op1);
1882
1883 if (op2 != 0) {
1884
1885 gen_compute_eflags(s);
1886
1887
1888
1889
1890
1891 if (is_right) {
1892 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1893 tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1894 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1895 } else {
1896 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1897 tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1898 }
1899 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1900 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1901 set_cc_op(s, CC_OP_ADCOX);
1902 }
1903}
1904
1905
1906static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1,
1907 int is_right)
1908{
1909 gen_compute_eflags(s);
1910 assert(s->cc_op == CC_OP_EFLAGS);
1911
1912
1913 if (op1 == OR_TMP0)
1914 gen_op_ld_v(s, ot, s->T0, s->A0);
1915 else
1916 gen_op_mov_v_reg(s, ot, s->T0, op1);
1917
1918 if (is_right) {
1919 switch (ot) {
1920 case MO_8:
1921 gen_helper_rcrb(s->T0, cpu_env, s->T0, s->T1);
1922 break;
1923 case MO_16:
1924 gen_helper_rcrw(s->T0, cpu_env, s->T0, s->T1);
1925 break;
1926 case MO_32:
1927 gen_helper_rcrl(s->T0, cpu_env, s->T0, s->T1);
1928 break;
1929#ifdef TARGET_X86_64
1930 case MO_64:
1931 gen_helper_rcrq(s->T0, cpu_env, s->T0, s->T1);
1932 break;
1933#endif
1934 default:
1935 tcg_abort();
1936 }
1937 } else {
1938 switch (ot) {
1939 case MO_8:
1940 gen_helper_rclb(s->T0, cpu_env, s->T0, s->T1);
1941 break;
1942 case MO_16:
1943 gen_helper_rclw(s->T0, cpu_env, s->T0, s->T1);
1944 break;
1945 case MO_32:
1946 gen_helper_rcll(s->T0, cpu_env, s->T0, s->T1);
1947 break;
1948#ifdef TARGET_X86_64
1949 case MO_64:
1950 gen_helper_rclq(s->T0, cpu_env, s->T0, s->T1);
1951 break;
1952#endif
1953 default:
1954 tcg_abort();
1955 }
1956 }
1957
1958 gen_op_st_rm_T0_A0(s, ot, op1);
1959}
1960
1961
1962static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
1963 bool is_right, TCGv count_in)
1964{
1965 target_ulong mask = (ot == MO_64 ? 63 : 31);
1966 TCGv count;
1967
1968
1969 if (op1 == OR_TMP0) {
1970 gen_op_ld_v(s, ot, s->T0, s->A0);
1971 } else {
1972 gen_op_mov_v_reg(s, ot, s->T0, op1);
1973 }
1974
1975 count = tcg_temp_new();
1976 tcg_gen_andi_tl(count, count_in, mask);
1977
1978 switch (ot) {
1979 case MO_16:
1980
1981
1982
1983 if (is_right) {
1984 tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
1985 tcg_gen_mov_tl(s->T1, s->T0);
1986 tcg_gen_mov_tl(s->T0, s->tmp0);
1987 } else {
1988 tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1989 }
1990
1991
1992
1993
1994 case MO_32:
1995#ifdef TARGET_X86_64
1996
1997 tcg_gen_subi_tl(s->tmp0, count, 1);
1998 if (is_right) {
1999 tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
2000 tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
2001 tcg_gen_shr_i64(s->T0, s->T0, count);
2002 } else {
2003 tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
2004 tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
2005 tcg_gen_shl_i64(s->T0, s->T0, count);
2006 tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
2007 tcg_gen_shri_i64(s->T0, s->T0, 32);
2008 }
2009 break;
2010#endif
2011 default:
2012 tcg_gen_subi_tl(s->tmp0, count, 1);
2013 if (is_right) {
2014 tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
2015
2016 tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2017 tcg_gen_shr_tl(s->T0, s->T0, count);
2018 tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
2019 } else {
2020 tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
2021 if (ot == MO_16) {
2022
2023 tcg_gen_subfi_tl(s->tmp4, 33, count);
2024 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
2025 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
2026 }
2027
2028 tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2029 tcg_gen_shl_tl(s->T0, s->T0, count);
2030 tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
2031 }
2032 tcg_gen_movi_tl(s->tmp4, 0);
2033 tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
2034 s->tmp4, s->T1);
2035 tcg_gen_or_tl(s->T0, s->T0, s->T1);
2036 break;
2037 }
2038
2039
2040 gen_op_st_rm_T0_A0(s, ot, op1);
2041
2042 gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right);
2043}
2044
2045static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s)
2046{
2047 if (s != OR_TMP1)
2048 gen_op_mov_v_reg(s1, ot, s1->T1, s);
2049 switch(op) {
2050 case OP_ROL:
2051 gen_rot_rm_T1(s1, ot, d, 0);
2052 break;
2053 case OP_ROR:
2054 gen_rot_rm_T1(s1, ot, d, 1);
2055 break;
2056 case OP_SHL:
2057 case OP_SHL1:
2058 gen_shift_rm_T1(s1, ot, d, 0, 0);
2059 break;
2060 case OP_SHR:
2061 gen_shift_rm_T1(s1, ot, d, 1, 0);
2062 break;
2063 case OP_SAR:
2064 gen_shift_rm_T1(s1, ot, d, 1, 1);
2065 break;
2066 case OP_RCL:
2067 gen_rotc_rm_T1(s1, ot, d, 0);
2068 break;
2069 case OP_RCR:
2070 gen_rotc_rm_T1(s1, ot, d, 1);
2071 break;
2072 }
2073}
2074
2075static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c)
2076{
2077 switch(op) {
2078 case OP_ROL:
2079 gen_rot_rm_im(s1, ot, d, c, 0);
2080 break;
2081 case OP_ROR:
2082 gen_rot_rm_im(s1, ot, d, c, 1);
2083 break;
2084 case OP_SHL:
2085 case OP_SHL1:
2086 gen_shift_rm_im(s1, ot, d, c, 0, 0);
2087 break;
2088 case OP_SHR:
2089 gen_shift_rm_im(s1, ot, d, c, 1, 0);
2090 break;
2091 case OP_SAR:
2092 gen_shift_rm_im(s1, ot, d, c, 1, 1);
2093 break;
2094 default:
2095
2096 tcg_gen_movi_tl(s1->T1, c);
2097 gen_shift(s1, op, ot, d, OR_TMP1);
2098 break;
2099 }
2100}
2101
2102#define X86_MAX_INSN_LENGTH 15
2103
2104static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
2105{
2106 uint64_t pc = s->pc;
2107
2108
2109 if (s->base.num_insns > 1 &&
2110 !is_same_page(&s->base, s->pc + num_bytes - 1)) {
2111 siglongjmp(s->jmpbuf, 2);
2112 }
2113
2114 s->pc += num_bytes;
2115 if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
2116
2117
2118
2119
2120
2121 if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
2122 volatile uint8_t unused =
2123 cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK);
2124 (void) unused;
2125 }
2126 siglongjmp(s->jmpbuf, 1);
2127 }
2128
2129 return pc;
2130}
2131
2132static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
2133{
2134 return translator_ldub(env, &s->base, advance_pc(env, s, 1));
2135}
2136
2137static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
2138{
2139 return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2140}
2141
2142static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
2143{
2144 return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2145}
2146
2147static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
2148{
2149 return translator_ldl(env, &s->base, advance_pc(env, s, 4));
2150}
2151
2152#ifdef TARGET_X86_64
2153static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
2154{
2155 return translator_ldq(env, &s->base, advance_pc(env, s, 8));
2156}
2157#endif
2158
2159
2160
2161typedef struct AddressParts {
2162 int def_seg;
2163 int base;
2164 int index;
2165 int scale;
2166 target_long disp;
2167} AddressParts;
2168
2169static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
2170 int modrm)
2171{
2172 int def_seg, base, index, scale, mod, rm;
2173 target_long disp;
2174 bool havesib;
2175
2176 def_seg = R_DS;
2177 index = -1;
2178 scale = 0;
2179 disp = 0;
2180
2181 mod = (modrm >> 6) & 3;
2182 rm = modrm & 7;
2183 base = rm | REX_B(s);
2184
2185 if (mod == 3) {
2186
2187
2188 goto done;
2189 }
2190
2191 switch (s->aflag) {
2192 case MO_64:
2193 case MO_32:
2194 havesib = 0;
2195 if (rm == 4) {
2196 int code = x86_ldub_code(env, s);
2197 scale = (code >> 6) & 3;
2198 index = ((code >> 3) & 7) | REX_X(s);
2199 if (index == 4) {
2200 index = -1;
2201 }
2202 base = (code & 7) | REX_B(s);
2203 havesib = 1;
2204 }
2205
2206 switch (mod) {
2207 case 0:
2208 if ((base & 7) == 5) {
2209 base = -1;
2210 disp = (int32_t)x86_ldl_code(env, s);
2211 if (CODE64(s) && !havesib) {
2212 base = -2;
2213 disp += s->pc + s->rip_offset;
2214 }
2215 }
2216 break;
2217 case 1:
2218 disp = (int8_t)x86_ldub_code(env, s);
2219 break;
2220 default:
2221 case 2:
2222 disp = (int32_t)x86_ldl_code(env, s);
2223 break;
2224 }
2225
2226
2227 if (base == R_ESP && s->popl_esp_hack) {
2228 disp += s->popl_esp_hack;
2229 }
2230 if (base == R_EBP || base == R_ESP) {
2231 def_seg = R_SS;
2232 }
2233 break;
2234
2235 case MO_16:
2236 if (mod == 0) {
2237 if (rm == 6) {
2238 base = -1;
2239 disp = x86_lduw_code(env, s);
2240 break;
2241 }
2242 } else if (mod == 1) {
2243 disp = (int8_t)x86_ldub_code(env, s);
2244 } else {
2245 disp = (int16_t)x86_lduw_code(env, s);
2246 }
2247
2248 switch (rm) {
2249 case 0:
2250 base = R_EBX;
2251 index = R_ESI;
2252 break;
2253 case 1:
2254 base = R_EBX;
2255 index = R_EDI;
2256 break;
2257 case 2:
2258 base = R_EBP;
2259 index = R_ESI;
2260 def_seg = R_SS;
2261 break;
2262 case 3:
2263 base = R_EBP;
2264 index = R_EDI;
2265 def_seg = R_SS;
2266 break;
2267 case 4:
2268 base = R_ESI;
2269 break;
2270 case 5:
2271 base = R_EDI;
2272 break;
2273 case 6:
2274 base = R_EBP;
2275 def_seg = R_SS;
2276 break;
2277 default:
2278 case 7:
2279 base = R_EBX;
2280 break;
2281 }
2282 break;
2283
2284 default:
2285 tcg_abort();
2286 }
2287
2288 done:
2289 return (AddressParts){ def_seg, base, index, scale, disp };
2290}
2291
2292
2293static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
2294{
2295 TCGv ea = NULL;
2296
2297 if (a.index >= 0 && !is_vsib) {
2298 if (a.scale == 0) {
2299 ea = cpu_regs[a.index];
2300 } else {
2301 tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
2302 ea = s->A0;
2303 }
2304 if (a.base >= 0) {
2305 tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
2306 ea = s->A0;
2307 }
2308 } else if (a.base >= 0) {
2309 ea = cpu_regs[a.base];
2310 }
2311 if (!ea) {
2312 if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
2313
2314 tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
2315 } else {
2316 tcg_gen_movi_tl(s->A0, a.disp);
2317 }
2318 ea = s->A0;
2319 } else if (a.disp != 0) {
2320 tcg_gen_addi_tl(s->A0, ea, a.disp);
2321 ea = s->A0;
2322 }
2323
2324 return ea;
2325}
2326
2327static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
2328{
2329 AddressParts a = gen_lea_modrm_0(env, s, modrm);
2330 TCGv ea = gen_lea_modrm_1(s, a, false);
2331 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
2332}
2333
2334static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2335{
2336 (void)gen_lea_modrm_0(env, s, modrm);
2337}
2338
2339
2340static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
2341 TCGCond cond, TCGv_i64 bndv)
2342{
2343 AddressParts a = gen_lea_modrm_0(env, s, modrm);
2344 TCGv ea = gen_lea_modrm_1(s, a, false);
2345
2346 tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
2347 if (!CODE64(s)) {
2348 tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
2349 }
2350 tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
2351 tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
2352 gen_helper_bndck(cpu_env, s->tmp2_i32);
2353}
2354
2355
2356static void gen_add_A0_ds_seg(DisasContext *s)
2357{
2358 gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override);
2359}
2360
2361
2362
2363static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2364 MemOp ot, int reg, int is_store)
2365{
2366 int mod, rm;
2367
2368 mod = (modrm >> 6) & 3;
2369 rm = (modrm & 7) | REX_B(s);
2370 if (mod == 3) {
2371 if (is_store) {
2372 if (reg != OR_TMP0)
2373 gen_op_mov_v_reg(s, ot, s->T0, reg);
2374 gen_op_mov_reg_v(s, ot, rm, s->T0);
2375 } else {
2376 gen_op_mov_v_reg(s, ot, s->T0, rm);
2377 if (reg != OR_TMP0)
2378 gen_op_mov_reg_v(s, ot, reg, s->T0);
2379 }
2380 } else {
2381 gen_lea_modrm(env, s, modrm);
2382 if (is_store) {
2383 if (reg != OR_TMP0)
2384 gen_op_mov_v_reg(s, ot, s->T0, reg);
2385 gen_op_st_v(s, ot, s->T0, s->A0);
2386 } else {
2387 gen_op_ld_v(s, ot, s->T0, s->A0);
2388 if (reg != OR_TMP0)
2389 gen_op_mov_reg_v(s, ot, reg, s->T0);
2390 }
2391 }
2392}
2393
2394static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
2395{
2396 target_ulong ret;
2397
2398 switch (ot) {
2399 case MO_8:
2400 ret = x86_ldub_code(env, s);
2401 break;
2402 case MO_16:
2403 ret = x86_lduw_code(env, s);
2404 break;
2405 case MO_32:
2406 ret = x86_ldl_code(env, s);
2407 break;
2408#ifdef TARGET_X86_64
2409 case MO_64:
2410 ret = x86_ldq_code(env, s);
2411 break;
2412#endif
2413 default:
2414 g_assert_not_reached();
2415 }
2416 return ret;
2417}
2418
2419static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
2420{
2421 uint32_t ret;
2422
2423 switch (ot) {
2424 case MO_8:
2425 ret = x86_ldub_code(env, s);
2426 break;
2427 case MO_16:
2428 ret = x86_lduw_code(env, s);
2429 break;
2430 case MO_32:
2431#ifdef TARGET_X86_64
2432 case MO_64:
2433#endif
2434 ret = x86_ldl_code(env, s);
2435 break;
2436 default:
2437 tcg_abort();
2438 }
2439 return ret;
2440}
2441
2442static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
2443{
2444 target_long ret;
2445
2446 switch (ot) {
2447 case MO_8:
2448 ret = (int8_t) x86_ldub_code(env, s);
2449 break;
2450 case MO_16:
2451 ret = (int16_t) x86_lduw_code(env, s);
2452 break;
2453 case MO_32:
2454 ret = (int32_t) x86_ldl_code(env, s);
2455 break;
2456#ifdef TARGET_X86_64
2457 case MO_64:
2458 ret = x86_ldq_code(env, s);
2459 break;
2460#endif
2461 default:
2462 g_assert_not_reached();
2463 }
2464 return ret;
2465}
2466
2467static inline int insn_const_size(MemOp ot)
2468{
2469 if (ot <= MO_32) {
2470 return 1 << ot;
2471 } else {
2472 return 4;
2473 }
2474}
2475
2476static void gen_jcc(DisasContext *s, int b, int diff)
2477{
2478 TCGLabel *l1 = gen_new_label();
2479
2480 gen_jcc1(s, b, l1);
2481 gen_jmp_rel_csize(s, 0, 1);
2482 gen_set_label(l1);
2483 gen_jmp_rel(s, s->dflag, diff, 0);
2484}
2485
2486static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b,
2487 int modrm, int reg)
2488{
2489 CCPrepare cc;
2490
2491 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2492
2493 cc = gen_prepare_cc(s, b, s->T1);
2494 if (cc.mask != -1) {
2495 TCGv t0 = tcg_temp_new();
2496 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2497 cc.reg = t0;
2498 }
2499 if (!cc.use_reg2) {
2500 cc.reg2 = tcg_constant_tl(cc.imm);
2501 }
2502
2503 tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2,
2504 s->T0, cpu_regs[reg]);
2505 gen_op_mov_reg_v(s, ot, reg, s->T0);
2506}
2507
2508static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg)
2509{
2510 tcg_gen_ld32u_tl(s->T0, cpu_env,
2511 offsetof(CPUX86State,segs[seg_reg].selector));
2512}
2513
2514static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg)
2515{
2516 tcg_gen_ext16u_tl(s->T0, s->T0);
2517 tcg_gen_st32_tl(s->T0, cpu_env,
2518 offsetof(CPUX86State,segs[seg_reg].selector));
2519 tcg_gen_shli_tl(cpu_seg_base[seg_reg], s->T0, 4);
2520}
2521
2522
2523
2524static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg)
2525{
2526 if (PE(s) && !VM86(s)) {
2527 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2528 gen_helper_load_seg(cpu_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
2529
2530
2531
2532
2533 if (seg_reg == R_SS) {
2534 s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2535 } else if (CODE32(s) && seg_reg < R_FS) {
2536 s->base.is_jmp = DISAS_EOB_NEXT;
2537 }
2538 } else {
2539 gen_op_movl_seg_T0_vm(s, seg_reg);
2540 if (seg_reg == R_SS) {
2541 s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2542 }
2543 }
2544}
2545
2546static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
2547{
2548
2549 if (likely(!GUEST(s))) {
2550 return;
2551 }
2552 gen_helper_svm_check_intercept(cpu_env, tcg_constant_i32(type));
2553}
2554
2555static inline void gen_stack_update(DisasContext *s, int addend)
2556{
2557 gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
2558}
2559
2560
2561static void gen_push_v(DisasContext *s, TCGv val)
2562{
2563 MemOp d_ot = mo_pushpop(s, s->dflag);
2564 MemOp a_ot = mo_stacksize(s);
2565 int size = 1 << d_ot;
2566 TCGv new_esp = s->A0;
2567
2568 tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size);
2569
2570 if (!CODE64(s)) {
2571 if (ADDSEG(s)) {
2572 new_esp = s->tmp4;
2573 tcg_gen_mov_tl(new_esp, s->A0);
2574 }
2575 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2576 }
2577
2578 gen_op_st_v(s, d_ot, val, s->A0);
2579 gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2580}
2581
2582
2583static MemOp gen_pop_T0(DisasContext *s)
2584{
2585 MemOp d_ot = mo_pushpop(s, s->dflag);
2586
2587 gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
2588 gen_op_ld_v(s, d_ot, s->T0, s->A0);
2589
2590 return d_ot;
2591}
2592
2593static inline void gen_pop_update(DisasContext *s, MemOp ot)
2594{
2595 gen_stack_update(s, 1 << ot);
2596}
2597
2598static inline void gen_stack_A0(DisasContext *s)
2599{
2600 gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2601}
2602
2603static void gen_pusha(DisasContext *s)
2604{
2605 MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2606 MemOp d_ot = s->dflag;
2607 int size = 1 << d_ot;
2608 int i;
2609
2610 for (i = 0; i < 8; i++) {
2611 tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], (i - 8) * size);
2612 gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2613 gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
2614 }
2615
2616 gen_stack_update(s, -8 * size);
2617}
2618
2619static void gen_popa(DisasContext *s)
2620{
2621 MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2622 MemOp d_ot = s->dflag;
2623 int size = 1 << d_ot;
2624 int i;
2625
2626 for (i = 0; i < 8; i++) {
2627
2628 if (7 - i == R_ESP) {
2629 continue;
2630 }
2631 tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size);
2632 gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2633 gen_op_ld_v(s, d_ot, s->T0, s->A0);
2634 gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2635 }
2636
2637 gen_stack_update(s, 8 * size);
2638}
2639
2640static void gen_enter(DisasContext *s, int esp_addend, int level)
2641{
2642 MemOp d_ot = mo_pushpop(s, s->dflag);
2643 MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
2644 int size = 1 << d_ot;
2645
2646
2647 tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2648 gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1);
2649 gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2650
2651 level &= 31;
2652 if (level != 0) {
2653 int i;
2654
2655
2656 for (i = 1; i < level; ++i) {
2657 tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i);
2658 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2659 gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2660
2661 tcg_gen_subi_tl(s->A0, s->T1, size * i);
2662 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2663 gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2664 }
2665
2666
2667 tcg_gen_subi_tl(s->A0, s->T1, size * level);
2668 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2669 gen_op_st_v(s, d_ot, s->T1, s->A0);
2670 }
2671
2672
2673 gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1);
2674
2675
2676 tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2677 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2678}
2679
2680static void gen_leave(DisasContext *s)
2681{
2682 MemOp d_ot = mo_pushpop(s, s->dflag);
2683 MemOp a_ot = mo_stacksize(s);
2684
2685 gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
2686 gen_op_ld_v(s, d_ot, s->T0, s->A0);
2687
2688 tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2689
2690 gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2691 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2692}
2693
2694
2695
2696
2697static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2698{
2699 gen_illegal_opcode(s);
2700
2701 if (qemu_loglevel_mask(LOG_UNIMP)) {
2702 FILE *logfile = qemu_log_trylock();
2703 if (logfile) {
2704 target_ulong pc = s->base.pc_next, end = s->pc;
2705
2706 fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2707 for (; pc < end; ++pc) {
2708 fprintf(logfile, " %02x", cpu_ldub_code(env, pc));
2709 }
2710 fprintf(logfile, "\n");
2711 qemu_log_unlock(logfile);
2712 }
2713 }
2714}
2715
2716
2717
2718static void gen_interrupt(DisasContext *s, int intno)
2719{
2720 gen_update_cc_op(s);
2721 gen_update_eip_cur(s);
2722 gen_helper_raise_interrupt(cpu_env, tcg_constant_i32(intno),
2723 cur_insn_len_i32(s));
2724 s->base.is_jmp = DISAS_NORETURN;
2725}
2726
2727static void gen_set_hflag(DisasContext *s, uint32_t mask)
2728{
2729 if ((s->flags & mask) == 0) {
2730 TCGv_i32 t = tcg_temp_new_i32();
2731 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2732 tcg_gen_ori_i32(t, t, mask);
2733 tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2734 s->flags |= mask;
2735 }
2736}
2737
2738static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2739{
2740 if (s->flags & mask) {
2741 TCGv_i32 t = tcg_temp_new_i32();
2742 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2743 tcg_gen_andi_i32(t, t, ~mask);
2744 tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2745 s->flags &= ~mask;
2746 }
2747}
2748
2749static void gen_set_eflags(DisasContext *s, target_ulong mask)
2750{
2751 TCGv t = tcg_temp_new();
2752
2753 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags));
2754 tcg_gen_ori_tl(t, t, mask);
2755 tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags));
2756}
2757
2758static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2759{
2760 TCGv t = tcg_temp_new();
2761
2762 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags));
2763 tcg_gen_andi_tl(t, t, ~mask);
2764 tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags));
2765}
2766
2767
2768static void gen_bnd_jmp(DisasContext *s)
2769{
2770
2771
2772
2773 if ((s->prefix & PREFIX_REPNZ) == 0
2774 && (s->flags & HF_MPX_EN_MASK) != 0
2775 && (s->flags & HF_MPX_IU_MASK) != 0) {
2776 gen_helper_bnd_jmp(cpu_env);
2777 }
2778}
2779
2780
2781
2782
2783
2784static void
2785do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
2786{
2787 gen_update_cc_op(s);
2788
2789
2790 if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
2791 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2792 } else {
2793 gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2794 }
2795
2796 if (s->base.tb->flags & HF_RF_MASK) {
2797 gen_reset_eflags(s, RF_MASK);
2798 }
2799 if (recheck_tf) {
2800 gen_helper_rechecking_single_step(cpu_env);
2801 tcg_gen_exit_tb(NULL, 0);
2802 } else if (s->flags & HF_TF_MASK) {
2803 gen_helper_single_step(cpu_env);
2804 } else if (jr) {
2805 tcg_gen_lookup_and_goto_ptr();
2806 } else {
2807 tcg_gen_exit_tb(NULL, 0);
2808 }
2809 s->base.is_jmp = DISAS_NORETURN;
2810}
2811
2812static inline void
2813gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
2814{
2815 do_gen_eob_worker(s, inhibit, recheck_tf, false);
2816}
2817
2818
2819
2820static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
2821{
2822 gen_eob_worker(s, inhibit, false);
2823}
2824
2825
2826static void gen_eob(DisasContext *s)
2827{
2828 gen_eob_worker(s, false, false);
2829}
2830
2831
2832static void gen_jr(DisasContext *s)
2833{
2834 do_gen_eob_worker(s, false, false, true);
2835}
2836
2837
2838static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2839{
2840 bool use_goto_tb = s->jmp_opt;
2841 target_ulong mask = -1;
2842 target_ulong new_pc = s->pc + diff;
2843 target_ulong new_eip = new_pc - s->cs_base;
2844
2845
2846 if (!CODE64(s)) {
2847 if (ot == MO_16) {
2848 mask = 0xffff;
2849 if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2850 use_goto_tb = false;
2851 }
2852 } else {
2853 mask = 0xffffffff;
2854 }
2855 }
2856 new_eip &= mask;
2857
2858 gen_update_cc_op(s);
2859 set_cc_op(s, CC_OP_DYNAMIC);
2860
2861 if (tb_cflags(s->base.tb) & CF_PCREL) {
2862 tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2863
2864
2865
2866
2867
2868 if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2869 tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2870 use_goto_tb = false;
2871 }
2872 }
2873
2874 if (use_goto_tb &&
2875 translator_use_goto_tb(&s->base, new_eip + s->cs_base)) {
2876
2877 tcg_gen_goto_tb(tb_num);
2878 if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2879 tcg_gen_movi_tl(cpu_eip, new_eip);
2880 }
2881 tcg_gen_exit_tb(s->base.tb, tb_num);
2882 s->base.is_jmp = DISAS_NORETURN;
2883 } else {
2884 if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2885 tcg_gen_movi_tl(cpu_eip, new_eip);
2886 }
2887 if (s->jmp_opt) {
2888 gen_jr(s);
2889 } else {
2890 gen_eob(s);
2891 }
2892 }
2893}
2894
2895
2896static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2897{
2898
2899 gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2900}
2901
2902static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2903{
2904 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2905 tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset);
2906}
2907
2908static inline void gen_stq_env_A0(DisasContext *s, int offset)
2909{
2910 tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset);
2911 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2912}
2913
2914static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2915{
2916 int mem_index = s->mem_index;
2917 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
2918 MO_LEUQ | (align ? MO_ALIGN_16 : 0));
2919 tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2920 tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2921 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2922 tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2923}
2924
2925static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2926{
2927 int mem_index = s->mem_index;
2928 tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2929 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
2930 MO_LEUQ | (align ? MO_ALIGN_16 : 0));
2931 tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2932 tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2933 tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2934}
2935
2936static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2937{
2938 int mem_index = s->mem_index;
2939 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
2940 MO_LEUQ | (align ? MO_ALIGN_32 : 0));
2941 tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(0)));
2942 tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2943 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2944 tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(1)));
2945
2946 tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2947 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2948 tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(2)));
2949 tcg_gen_addi_tl(s->tmp0, s->A0, 24);
2950 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2951 tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(3)));
2952}
2953
2954static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2955{
2956 int mem_index = s->mem_index;
2957 tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(0)));
2958 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
2959 MO_LEUQ | (align ? MO_ALIGN_32 : 0));
2960 tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2961 tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(1)));
2962 tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2963 tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2964 tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(2)));
2965 tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2966 tcg_gen_addi_tl(s->tmp0, s->A0, 24);
2967 tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(3)));
2968 tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2969}
2970
2971#include "decode-new.h"
2972#include "emit.c.inc"
2973#include "decode-new.c.inc"
2974
2975static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
2976{
2977 TCGv_i64 cmp, val, old;
2978 TCGv Z;
2979
2980 gen_lea_modrm(env, s, modrm);
2981
2982 cmp = tcg_temp_new_i64();
2983 val = tcg_temp_new_i64();
2984 old = tcg_temp_new_i64();
2985
2986
2987 tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2988 tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2989
2990
2991 if (s->prefix & PREFIX_LOCK) {
2992 tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
2993 } else {
2994 tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
2995 s->mem_index, MO_TEUQ);
2996 }
2997
2998
2999 tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
3000 Z = tcg_temp_new();
3001 tcg_gen_trunc_i64_tl(Z, cmp);
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011 if (TARGET_LONG_BITS == 32) {
3012 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
3013 } else {
3014 TCGv zero = tcg_constant_tl(0);
3015
3016 tcg_gen_extr_i64_tl(s->T0, s->T1, old);
3017 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
3018 s->T0, cpu_regs[R_EAX]);
3019 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
3020 s->T1, cpu_regs[R_EDX]);
3021 }
3022
3023
3024 gen_compute_eflags(s);
3025 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
3026}
3027
3028#ifdef TARGET_X86_64
3029static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
3030{
3031 MemOp mop = MO_TE | MO_128 | MO_ALIGN;
3032 TCGv_i64 t0, t1;
3033 TCGv_i128 cmp, val;
3034
3035 gen_lea_modrm(env, s, modrm);
3036
3037 cmp = tcg_temp_new_i128();
3038 val = tcg_temp_new_i128();
3039 tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
3040 tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
3041
3042
3043 if (s->prefix & PREFIX_LOCK) {
3044 tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3045 } else {
3046 tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3047 }
3048
3049 tcg_gen_extr_i128_i64(s->T0, s->T1, val);
3050
3051
3052 t0 = tcg_temp_new_i64();
3053 t1 = tcg_temp_new_i64();
3054 tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
3055 tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
3056 tcg_gen_or_i64(t0, t0, t1);
3057
3058
3059 gen_compute_eflags(s);
3060 tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
3061 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
3062
3063
3064
3065
3066
3067
3068 tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
3069 tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
3070}
3071#endif
3072
3073
3074
3075static bool disas_insn(DisasContext *s, CPUState *cpu)
3076{
3077 CPUX86State *env = cpu->env_ptr;
3078 int b, prefixes;
3079 int shift;
3080 MemOp ot, aflag, dflag;
3081 int modrm, reg, rm, mod, op, opreg, val;
3082 bool orig_cc_op_dirty = s->cc_op_dirty;
3083 CCOp orig_cc_op = s->cc_op;
3084 target_ulong orig_pc_save = s->pc_save;
3085
3086 s->pc = s->base.pc_next;
3087 s->override = -1;
3088#ifdef TARGET_X86_64
3089 s->rex_r = 0;
3090 s->rex_x = 0;
3091 s->rex_b = 0;
3092#endif
3093 s->rip_offset = 0;
3094 s->vex_l = 0;
3095 s->vex_v = 0;
3096 s->vex_w = false;
3097 switch (sigsetjmp(s->jmpbuf, 0)) {
3098 case 0:
3099 break;
3100 case 1:
3101 gen_exception_gpf(s);
3102 return true;
3103 case 2:
3104
3105 s->pc = s->base.pc_next;
3106
3107
3108
3109
3110
3111 s->cc_op_dirty = orig_cc_op_dirty;
3112 s->cc_op = orig_cc_op;
3113 s->pc_save = orig_pc_save;
3114
3115 s->base.num_insns--;
3116 tcg_remove_ops_after(s->prev_insn_end);
3117 s->base.is_jmp = DISAS_TOO_MANY;
3118 return false;
3119 default:
3120 g_assert_not_reached();
3121 }
3122
3123 prefixes = 0;
3124
3125 next_byte:
3126 s->prefix = prefixes;
3127 b = x86_ldub_code(env, s);
3128
3129 switch (b) {
3130 default:
3131 break;
3132 case 0x0f:
3133 b = x86_ldub_code(env, s) + 0x100;
3134 break;
3135 case 0xf3:
3136 prefixes |= PREFIX_REPZ;
3137 prefixes &= ~PREFIX_REPNZ;
3138 goto next_byte;
3139 case 0xf2:
3140 prefixes |= PREFIX_REPNZ;
3141 prefixes &= ~PREFIX_REPZ;
3142 goto next_byte;
3143 case 0xf0:
3144 prefixes |= PREFIX_LOCK;
3145 goto next_byte;
3146 case 0x2e:
3147 s->override = R_CS;
3148 goto next_byte;
3149 case 0x36:
3150 s->override = R_SS;
3151 goto next_byte;
3152 case 0x3e:
3153 s->override = R_DS;
3154 goto next_byte;
3155 case 0x26:
3156 s->override = R_ES;
3157 goto next_byte;
3158 case 0x64:
3159 s->override = R_FS;
3160 goto next_byte;
3161 case 0x65:
3162 s->override = R_GS;
3163 goto next_byte;
3164 case 0x66:
3165 prefixes |= PREFIX_DATA;
3166 goto next_byte;
3167 case 0x67:
3168 prefixes |= PREFIX_ADR;
3169 goto next_byte;
3170#ifdef TARGET_X86_64
3171 case 0x40 ... 0x4f:
3172 if (CODE64(s)) {
3173
3174 prefixes |= PREFIX_REX;
3175 s->vex_w = (b >> 3) & 1;
3176 s->rex_r = (b & 0x4) << 1;
3177 s->rex_x = (b & 0x2) << 2;
3178 s->rex_b = (b & 0x1) << 3;
3179 goto next_byte;
3180 }
3181 break;
3182#endif
3183 case 0xc5:
3184 case 0xc4:
3185 if (CODE32(s) && !VM86(s)) {
3186 int vex2 = x86_ldub_code(env, s);
3187 s->pc--;
3188
3189 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
3190
3191
3192 break;
3193 }
3194 disas_insn_new(s, cpu, b);
3195 return s->pc;
3196 }
3197 break;
3198 }
3199
3200
3201 if (CODE64(s)) {
3202
3203
3204
3205 dflag = (REX_W(s) ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
3206
3207 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
3208 } else {
3209
3210 if (CODE32(s) ^ ((prefixes & PREFIX_DATA) != 0)) {
3211 dflag = MO_32;
3212 } else {
3213 dflag = MO_16;
3214 }
3215
3216 if (CODE32(s) ^ ((prefixes & PREFIX_ADR) != 0)) {
3217 aflag = MO_32;
3218 } else {
3219 aflag = MO_16;
3220 }
3221 }
3222
3223 s->prefix = prefixes;
3224 s->aflag = aflag;
3225 s->dflag = dflag;
3226
3227
3228 switch (b) {
3229
3230
3231 case 0x00 ... 0x05:
3232 case 0x08 ... 0x0d:
3233 case 0x10 ... 0x15:
3234 case 0x18 ... 0x1d:
3235 case 0x20 ... 0x25:
3236 case 0x28 ... 0x2d:
3237 case 0x30 ... 0x35:
3238 case 0x38 ... 0x3d:
3239 {
3240 int op, f, val;
3241 op = (b >> 3) & 7;
3242 f = (b >> 1) & 3;
3243
3244 ot = mo_b_d(b, dflag);
3245
3246 switch(f) {
3247 case 0:
3248 modrm = x86_ldub_code(env, s);
3249 reg = ((modrm >> 3) & 7) | REX_R(s);
3250 mod = (modrm >> 6) & 3;
3251 rm = (modrm & 7) | REX_B(s);
3252 if (mod != 3) {
3253 gen_lea_modrm(env, s, modrm);
3254 opreg = OR_TMP0;
3255 } else if (op == OP_XORL && rm == reg) {
3256 xor_zero:
3257
3258 set_cc_op(s, CC_OP_CLR);
3259 tcg_gen_movi_tl(s->T0, 0);
3260 gen_op_mov_reg_v(s, ot, reg, s->T0);
3261 break;
3262 } else {
3263 opreg = rm;
3264 }
3265 gen_op_mov_v_reg(s, ot, s->T1, reg);
3266 gen_op(s, op, ot, opreg);
3267 break;
3268 case 1:
3269 modrm = x86_ldub_code(env, s);
3270 mod = (modrm >> 6) & 3;
3271 reg = ((modrm >> 3) & 7) | REX_R(s);
3272 rm = (modrm & 7) | REX_B(s);
3273 if (mod != 3) {
3274 gen_lea_modrm(env, s, modrm);
3275 gen_op_ld_v(s, ot, s->T1, s->A0);
3276 } else if (op == OP_XORL && rm == reg) {
3277 goto xor_zero;
3278 } else {
3279 gen_op_mov_v_reg(s, ot, s->T1, rm);
3280 }
3281 gen_op(s, op, ot, reg);
3282 break;
3283 case 2:
3284 val = insn_get(env, s, ot);
3285 tcg_gen_movi_tl(s->T1, val);
3286 gen_op(s, op, ot, OR_EAX);
3287 break;
3288 }
3289 }
3290 break;
3291
3292 case 0x82:
3293 if (CODE64(s))
3294 goto illegal_op;
3295
3296 case 0x80:
3297 case 0x81:
3298 case 0x83:
3299 {
3300 int val;
3301
3302 ot = mo_b_d(b, dflag);
3303
3304 modrm = x86_ldub_code(env, s);
3305 mod = (modrm >> 6) & 3;
3306 rm = (modrm & 7) | REX_B(s);
3307 op = (modrm >> 3) & 7;
3308
3309 if (mod != 3) {
3310 if (b == 0x83)
3311 s->rip_offset = 1;
3312 else
3313 s->rip_offset = insn_const_size(ot);
3314 gen_lea_modrm(env, s, modrm);
3315 opreg = OR_TMP0;
3316 } else {
3317 opreg = rm;
3318 }
3319
3320 switch(b) {
3321 default:
3322 case 0x80:
3323 case 0x81:
3324 case 0x82:
3325 val = insn_get(env, s, ot);
3326 break;
3327 case 0x83:
3328 val = (int8_t)insn_get(env, s, MO_8);
3329 break;
3330 }
3331 tcg_gen_movi_tl(s->T1, val);
3332 gen_op(s, op, ot, opreg);
3333 }
3334 break;
3335
3336
3337
3338 case 0x40 ... 0x47:
3339 ot = dflag;
3340 gen_inc(s, ot, OR_EAX + (b & 7), 1);
3341 break;
3342 case 0x48 ... 0x4f:
3343 ot = dflag;
3344 gen_inc(s, ot, OR_EAX + (b & 7), -1);
3345 break;
3346 case 0xf6:
3347 case 0xf7:
3348 ot = mo_b_d(b, dflag);
3349
3350 modrm = x86_ldub_code(env, s);
3351 mod = (modrm >> 6) & 3;
3352 rm = (modrm & 7) | REX_B(s);
3353 op = (modrm >> 3) & 7;
3354 if (mod != 3) {
3355 if (op == 0) {
3356 s->rip_offset = insn_const_size(ot);
3357 }
3358 gen_lea_modrm(env, s, modrm);
3359
3360 if (!(s->prefix & PREFIX_LOCK)
3361 || op != 2) {
3362 gen_op_ld_v(s, ot, s->T0, s->A0);
3363 }
3364 } else {
3365 gen_op_mov_v_reg(s, ot, s->T0, rm);
3366 }
3367
3368 switch(op) {
3369 case 0:
3370 val = insn_get(env, s, ot);
3371 tcg_gen_movi_tl(s->T1, val);
3372 gen_op_testl_T0_T1_cc(s);
3373 set_cc_op(s, CC_OP_LOGICB + ot);
3374 break;
3375 case 2:
3376 if (s->prefix & PREFIX_LOCK) {
3377 if (mod == 3) {
3378 goto illegal_op;
3379 }
3380 tcg_gen_movi_tl(s->T0, ~0);
3381 tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T0,
3382 s->mem_index, ot | MO_LE);
3383 } else {
3384 tcg_gen_not_tl(s->T0, s->T0);
3385 if (mod != 3) {
3386 gen_op_st_v(s, ot, s->T0, s->A0);
3387 } else {
3388 gen_op_mov_reg_v(s, ot, rm, s->T0);
3389 }
3390 }
3391 break;
3392 case 3:
3393 if (s->prefix & PREFIX_LOCK) {
3394 TCGLabel *label1;
3395 TCGv a0, t0, t1, t2;
3396
3397 if (mod == 3) {
3398 goto illegal_op;
3399 }
3400 a0 = s->A0;
3401 t0 = s->T0;
3402 label1 = gen_new_label();
3403
3404 gen_set_label(label1);
3405 t1 = tcg_temp_new();
3406 t2 = tcg_temp_new();
3407 tcg_gen_mov_tl(t2, t0);
3408 tcg_gen_neg_tl(t1, t0);
3409 tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
3410 s->mem_index, ot | MO_LE);
3411 tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
3412
3413 tcg_gen_neg_tl(s->T0, t0);
3414 } else {
3415 tcg_gen_neg_tl(s->T0, s->T0);
3416 if (mod != 3) {
3417 gen_op_st_v(s, ot, s->T0, s->A0);
3418 } else {
3419 gen_op_mov_reg_v(s, ot, rm, s->T0);
3420 }
3421 }
3422 gen_op_update_neg_cc(s);
3423 set_cc_op(s, CC_OP_SUBB + ot);
3424 break;
3425 case 4:
3426 switch(ot) {
3427 case MO_8:
3428 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3429 tcg_gen_ext8u_tl(s->T0, s->T0);
3430 tcg_gen_ext8u_tl(s->T1, s->T1);
3431
3432 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3433 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3434 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3435 tcg_gen_andi_tl(cpu_cc_src, s->T0, 0xff00);
3436 set_cc_op(s, CC_OP_MULB);
3437 break;
3438 case MO_16:
3439 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3440 tcg_gen_ext16u_tl(s->T0, s->T0);
3441 tcg_gen_ext16u_tl(s->T1, s->T1);
3442
3443 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3444 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3445 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3446 tcg_gen_shri_tl(s->T0, s->T0, 16);
3447 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3448 tcg_gen_mov_tl(cpu_cc_src, s->T0);
3449 set_cc_op(s, CC_OP_MULW);
3450 break;
3451 default:
3452 case MO_32:
3453 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3454 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3455 tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
3456 s->tmp2_i32, s->tmp3_i32);
3457 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3458 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3459 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3460 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3461 set_cc_op(s, CC_OP_MULL);
3462 break;
3463#ifdef TARGET_X86_64
3464 case MO_64:
3465 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3466 s->T0, cpu_regs[R_EAX]);
3467 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3468 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3469 set_cc_op(s, CC_OP_MULQ);
3470 break;
3471#endif
3472 }
3473 break;
3474 case 5:
3475 switch(ot) {
3476 case MO_8:
3477 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3478 tcg_gen_ext8s_tl(s->T0, s->T0);
3479 tcg_gen_ext8s_tl(s->T1, s->T1);
3480
3481 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3482 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3483 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3484 tcg_gen_ext8s_tl(s->tmp0, s->T0);
3485 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3486 set_cc_op(s, CC_OP_MULB);
3487 break;
3488 case MO_16:
3489 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3490 tcg_gen_ext16s_tl(s->T0, s->T0);
3491 tcg_gen_ext16s_tl(s->T1, s->T1);
3492
3493 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3494 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3495 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3496 tcg_gen_ext16s_tl(s->tmp0, s->T0);
3497 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3498 tcg_gen_shri_tl(s->T0, s->T0, 16);
3499 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3500 set_cc_op(s, CC_OP_MULW);
3501 break;
3502 default:
3503 case MO_32:
3504 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3505 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3506 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3507 s->tmp2_i32, s->tmp3_i32);
3508 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3509 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3510 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3511 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3512 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3513 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3514 set_cc_op(s, CC_OP_MULL);
3515 break;
3516#ifdef TARGET_X86_64
3517 case MO_64:
3518 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3519 s->T0, cpu_regs[R_EAX]);
3520 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3521 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
3522 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3523 set_cc_op(s, CC_OP_MULQ);
3524 break;
3525#endif
3526 }
3527 break;
3528 case 6:
3529 switch(ot) {
3530 case MO_8:
3531 gen_helper_divb_AL(cpu_env, s->T0);
3532 break;
3533 case MO_16:
3534 gen_helper_divw_AX(cpu_env, s->T0);
3535 break;
3536 default:
3537 case MO_32:
3538 gen_helper_divl_EAX(cpu_env, s->T0);
3539 break;
3540#ifdef TARGET_X86_64
3541 case MO_64:
3542 gen_helper_divq_EAX(cpu_env, s->T0);
3543 break;
3544#endif
3545 }
3546 break;
3547 case 7:
3548 switch(ot) {
3549 case MO_8:
3550 gen_helper_idivb_AL(cpu_env, s->T0);
3551 break;
3552 case MO_16:
3553 gen_helper_idivw_AX(cpu_env, s->T0);
3554 break;
3555 default:
3556 case MO_32:
3557 gen_helper_idivl_EAX(cpu_env, s->T0);
3558 break;
3559#ifdef TARGET_X86_64
3560 case MO_64:
3561 gen_helper_idivq_EAX(cpu_env, s->T0);
3562 break;
3563#endif
3564 }
3565 break;
3566 default:
3567 goto unknown_op;
3568 }
3569 break;
3570
3571 case 0xfe:
3572 case 0xff:
3573 ot = mo_b_d(b, dflag);
3574
3575 modrm = x86_ldub_code(env, s);
3576 mod = (modrm >> 6) & 3;
3577 rm = (modrm & 7) | REX_B(s);
3578 op = (modrm >> 3) & 7;
3579 if (op >= 2 && b == 0xfe) {
3580 goto unknown_op;
3581 }
3582 if (CODE64(s)) {
3583 if (op == 2 || op == 4) {
3584
3585 ot = MO_64;
3586 } else if (op == 3 || op == 5) {
3587 ot = dflag != MO_16 ? MO_32 + REX_W(s) : MO_16;
3588 } else if (op == 6) {
3589
3590 ot = mo_pushpop(s, dflag);
3591 }
3592 }
3593 if (mod != 3) {
3594 gen_lea_modrm(env, s, modrm);
3595 if (op >= 2 && op != 3 && op != 5)
3596 gen_op_ld_v(s, ot, s->T0, s->A0);
3597 } else {
3598 gen_op_mov_v_reg(s, ot, s->T0, rm);
3599 }
3600
3601 switch(op) {
3602 case 0:
3603 if (mod != 3)
3604 opreg = OR_TMP0;
3605 else
3606 opreg = rm;
3607 gen_inc(s, ot, opreg, 1);
3608 break;
3609 case 1:
3610 if (mod != 3)
3611 opreg = OR_TMP0;
3612 else
3613 opreg = rm;
3614 gen_inc(s, ot, opreg, -1);
3615 break;
3616 case 2:
3617
3618 if (dflag == MO_16) {
3619 tcg_gen_ext16u_tl(s->T0, s->T0);
3620 }
3621 gen_push_v(s, eip_next_tl(s));
3622 gen_op_jmp_v(s, s->T0);
3623 gen_bnd_jmp(s);
3624 s->base.is_jmp = DISAS_JUMP;
3625 break;
3626 case 3:
3627 if (mod == 3) {
3628 goto illegal_op;
3629 }
3630 gen_op_ld_v(s, ot, s->T1, s->A0);
3631 gen_add_A0_im(s, 1 << ot);
3632 gen_op_ld_v(s, MO_16, s->T0, s->A0);
3633 do_lcall:
3634 if (PE(s) && !VM86(s)) {
3635 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3636 gen_helper_lcall_protected(cpu_env, s->tmp2_i32, s->T1,
3637 tcg_constant_i32(dflag - 1),
3638 eip_next_tl(s));
3639 } else {
3640 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3641 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3642 gen_helper_lcall_real(cpu_env, s->tmp2_i32, s->tmp3_i32,
3643 tcg_constant_i32(dflag - 1),
3644 eip_next_i32(s));
3645 }
3646 s->base.is_jmp = DISAS_JUMP;
3647 break;
3648 case 4:
3649 if (dflag == MO_16) {
3650 tcg_gen_ext16u_tl(s->T0, s->T0);
3651 }
3652 gen_op_jmp_v(s, s->T0);
3653 gen_bnd_jmp(s);
3654 s->base.is_jmp = DISAS_JUMP;
3655 break;
3656 case 5:
3657 if (mod == 3) {
3658 goto illegal_op;
3659 }
3660 gen_op_ld_v(s, ot, s->T1, s->A0);
3661 gen_add_A0_im(s, 1 << ot);
3662 gen_op_ld_v(s, MO_16, s->T0, s->A0);
3663 do_ljmp:
3664 if (PE(s) && !VM86(s)) {
3665 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3666 gen_helper_ljmp_protected(cpu_env, s->tmp2_i32, s->T1,
3667 eip_next_tl(s));
3668 } else {
3669 gen_op_movl_seg_T0_vm(s, R_CS);
3670 gen_op_jmp_v(s, s->T1);
3671 }
3672 s->base.is_jmp = DISAS_JUMP;
3673 break;
3674 case 6:
3675 gen_push_v(s, s->T0);
3676 break;
3677 default:
3678 goto unknown_op;
3679 }
3680 break;
3681
3682 case 0x84:
3683 case 0x85:
3684 ot = mo_b_d(b, dflag);
3685
3686 modrm = x86_ldub_code(env, s);
3687 reg = ((modrm >> 3) & 7) | REX_R(s);
3688
3689 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3690 gen_op_mov_v_reg(s, ot, s->T1, reg);
3691 gen_op_testl_T0_T1_cc(s);
3692 set_cc_op(s, CC_OP_LOGICB + ot);
3693 break;
3694
3695 case 0xa8:
3696 case 0xa9:
3697 ot = mo_b_d(b, dflag);
3698 val = insn_get(env, s, ot);
3699
3700 gen_op_mov_v_reg(s, ot, s->T0, OR_EAX);
3701 tcg_gen_movi_tl(s->T1, val);
3702 gen_op_testl_T0_T1_cc(s);
3703 set_cc_op(s, CC_OP_LOGICB + ot);
3704 break;
3705
3706 case 0x98:
3707 switch (dflag) {
3708#ifdef TARGET_X86_64
3709 case MO_64:
3710 gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3711 tcg_gen_ext32s_tl(s->T0, s->T0);
3712 gen_op_mov_reg_v(s, MO_64, R_EAX, s->T0);
3713 break;
3714#endif
3715 case MO_32:
3716 gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3717 tcg_gen_ext16s_tl(s->T0, s->T0);
3718 gen_op_mov_reg_v(s, MO_32, R_EAX, s->T0);
3719 break;
3720 case MO_16:
3721 gen_op_mov_v_reg(s, MO_8, s->T0, R_EAX);
3722 tcg_gen_ext8s_tl(s->T0, s->T0);
3723 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3724 break;
3725 default:
3726 tcg_abort();
3727 }
3728 break;
3729 case 0x99:
3730 switch (dflag) {
3731#ifdef TARGET_X86_64
3732 case MO_64:
3733 gen_op_mov_v_reg(s, MO_64, s->T0, R_EAX);
3734 tcg_gen_sari_tl(s->T0, s->T0, 63);
3735 gen_op_mov_reg_v(s, MO_64, R_EDX, s->T0);
3736 break;
3737#endif
3738 case MO_32:
3739 gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3740 tcg_gen_ext32s_tl(s->T0, s->T0);
3741 tcg_gen_sari_tl(s->T0, s->T0, 31);
3742 gen_op_mov_reg_v(s, MO_32, R_EDX, s->T0);
3743 break;
3744 case MO_16:
3745 gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3746 tcg_gen_ext16s_tl(s->T0, s->T0);
3747 tcg_gen_sari_tl(s->T0, s->T0, 15);
3748 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3749 break;
3750 default:
3751 tcg_abort();
3752 }
3753 break;
3754 case 0x1af:
3755 case 0x69:
3756 case 0x6b:
3757 ot = dflag;
3758 modrm = x86_ldub_code(env, s);
3759 reg = ((modrm >> 3) & 7) | REX_R(s);
3760 if (b == 0x69)
3761 s->rip_offset = insn_const_size(ot);
3762 else if (b == 0x6b)
3763 s->rip_offset = 1;
3764 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3765 if (b == 0x69) {
3766 val = insn_get(env, s, ot);
3767 tcg_gen_movi_tl(s->T1, val);
3768 } else if (b == 0x6b) {
3769 val = (int8_t)insn_get(env, s, MO_8);
3770 tcg_gen_movi_tl(s->T1, val);
3771 } else {
3772 gen_op_mov_v_reg(s, ot, s->T1, reg);
3773 }
3774 switch (ot) {
3775#ifdef TARGET_X86_64
3776 case MO_64:
3777 tcg_gen_muls2_i64(cpu_regs[reg], s->T1, s->T0, s->T1);
3778 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3779 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
3780 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, s->T1);
3781 break;
3782#endif
3783 case MO_32:
3784 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3785 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3786 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3787 s->tmp2_i32, s->tmp3_i32);
3788 tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32);
3789 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3790 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3791 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3792 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3793 break;
3794 default:
3795 tcg_gen_ext16s_tl(s->T0, s->T0);
3796 tcg_gen_ext16s_tl(s->T1, s->T1);
3797
3798 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3799 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3800 tcg_gen_ext16s_tl(s->tmp0, s->T0);
3801 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3802 gen_op_mov_reg_v(s, ot, reg, s->T0);
3803 break;
3804 }
3805 set_cc_op(s, CC_OP_MULB + ot);
3806 break;
3807 case 0x1c0:
3808 case 0x1c1:
3809 ot = mo_b_d(b, dflag);
3810 modrm = x86_ldub_code(env, s);
3811 reg = ((modrm >> 3) & 7) | REX_R(s);
3812 mod = (modrm >> 6) & 3;
3813 gen_op_mov_v_reg(s, ot, s->T0, reg);
3814 if (mod == 3) {
3815 rm = (modrm & 7) | REX_B(s);
3816 gen_op_mov_v_reg(s, ot, s->T1, rm);
3817 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3818 gen_op_mov_reg_v(s, ot, reg, s->T1);
3819 gen_op_mov_reg_v(s, ot, rm, s->T0);
3820 } else {
3821 gen_lea_modrm(env, s, modrm);
3822 if (s->prefix & PREFIX_LOCK) {
3823 tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0,
3824 s->mem_index, ot | MO_LE);
3825 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3826 } else {
3827 gen_op_ld_v(s, ot, s->T1, s->A0);
3828 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3829 gen_op_st_v(s, ot, s->T0, s->A0);
3830 }
3831 gen_op_mov_reg_v(s, ot, reg, s->T1);
3832 }
3833 gen_op_update2_cc(s);
3834 set_cc_op(s, CC_OP_ADDB + ot);
3835 break;
3836 case 0x1b0:
3837 case 0x1b1:
3838 {
3839 TCGv oldv, newv, cmpv, dest;
3840
3841 ot = mo_b_d(b, dflag);
3842 modrm = x86_ldub_code(env, s);
3843 reg = ((modrm >> 3) & 7) | REX_R(s);
3844 mod = (modrm >> 6) & 3;
3845 oldv = tcg_temp_new();
3846 newv = tcg_temp_new();
3847 cmpv = tcg_temp_new();
3848 gen_op_mov_v_reg(s, ot, newv, reg);
3849 tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
3850 gen_extu(ot, cmpv);
3851 if (s->prefix & PREFIX_LOCK) {
3852 if (mod == 3) {
3853 goto illegal_op;
3854 }
3855 gen_lea_modrm(env, s, modrm);
3856 tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
3857 s->mem_index, ot | MO_LE);
3858 } else {
3859 if (mod == 3) {
3860 rm = (modrm & 7) | REX_B(s);
3861 gen_op_mov_v_reg(s, ot, oldv, rm);
3862 gen_extu(ot, oldv);
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873 dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv);
3874 tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
3875 } else {
3876 gen_lea_modrm(env, s, modrm);
3877 gen_op_ld_v(s, ot, oldv, s->A0);
3878
3879
3880
3881
3882
3883
3884
3885 tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
3886 gen_op_st_v(s, ot, newv, s->A0);
3887 }
3888 }
3889
3890
3891
3892
3893 dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv);
3894 tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv);
3895 tcg_gen_mov_tl(cpu_cc_src, oldv);
3896 tcg_gen_mov_tl(s->cc_srcT, cmpv);
3897 tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
3898 set_cc_op(s, CC_OP_SUBB + ot);
3899 }
3900 break;
3901 case 0x1c7:
3902 modrm = x86_ldub_code(env, s);
3903 mod = (modrm >> 6) & 3;
3904 switch ((modrm >> 3) & 7) {
3905 case 1:
3906 if (mod == 3) {
3907 goto illegal_op;
3908 }
3909#ifdef TARGET_X86_64
3910 if (dflag == MO_64) {
3911 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
3912 goto illegal_op;
3913 }
3914 gen_cmpxchg16b(s, env, modrm);
3915 break;
3916 }
3917#endif
3918 if (!(s->cpuid_features & CPUID_CX8)) {
3919 goto illegal_op;
3920 }
3921 gen_cmpxchg8b(s, env, modrm);
3922 break;
3923
3924 case 7:
3925 case 6:
3926 if (mod != 3 ||
3927 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
3928 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3929 goto illegal_op;
3930 }
3931 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3932 gen_io_start();
3933 s->base.is_jmp = DISAS_TOO_MANY;
3934 }
3935 gen_helper_rdrand(s->T0, cpu_env);
3936 rm = (modrm & 7) | REX_B(s);
3937 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3938 set_cc_op(s, CC_OP_EFLAGS);
3939 break;
3940
3941 default:
3942 goto illegal_op;
3943 }
3944 break;
3945
3946
3947
3948 case 0x50 ... 0x57:
3949 gen_op_mov_v_reg(s, MO_32, s->T0, (b & 7) | REX_B(s));
3950 gen_push_v(s, s->T0);
3951 break;
3952 case 0x58 ... 0x5f:
3953 ot = gen_pop_T0(s);
3954
3955 gen_pop_update(s, ot);
3956 gen_op_mov_reg_v(s, ot, (b & 7) | REX_B(s), s->T0);
3957 break;
3958 case 0x60:
3959 if (CODE64(s))
3960 goto illegal_op;
3961 gen_pusha(s);
3962 break;
3963 case 0x61:
3964 if (CODE64(s))
3965 goto illegal_op;
3966 gen_popa(s);
3967 break;
3968 case 0x68:
3969 case 0x6a:
3970 ot = mo_pushpop(s, dflag);
3971 if (b == 0x68)
3972 val = insn_get(env, s, ot);
3973 else
3974 val = (int8_t)insn_get(env, s, MO_8);
3975 tcg_gen_movi_tl(s->T0, val);
3976 gen_push_v(s, s->T0);
3977 break;
3978 case 0x8f:
3979 modrm = x86_ldub_code(env, s);
3980 mod = (modrm >> 6) & 3;
3981 ot = gen_pop_T0(s);
3982 if (mod == 3) {
3983
3984 gen_pop_update(s, ot);
3985 rm = (modrm & 7) | REX_B(s);
3986 gen_op_mov_reg_v(s, ot, rm, s->T0);
3987 } else {
3988
3989 s->popl_esp_hack = 1 << ot;
3990 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
3991 s->popl_esp_hack = 0;
3992 gen_pop_update(s, ot);
3993 }
3994 break;
3995 case 0xc8:
3996 {
3997 int level;
3998 val = x86_lduw_code(env, s);
3999 level = x86_ldub_code(env, s);
4000 gen_enter(s, val, level);
4001 }
4002 break;
4003 case 0xc9:
4004 gen_leave(s);
4005 break;
4006 case 0x06:
4007 case 0x0e:
4008 case 0x16:
4009 case 0x1e:
4010 if (CODE64(s))
4011 goto illegal_op;
4012 gen_op_movl_T0_seg(s, b >> 3);
4013 gen_push_v(s, s->T0);
4014 break;
4015 case 0x1a0:
4016 case 0x1a8:
4017 gen_op_movl_T0_seg(s, (b >> 3) & 7);
4018 gen_push_v(s, s->T0);
4019 break;
4020 case 0x07:
4021 case 0x17:
4022 case 0x1f:
4023 if (CODE64(s))
4024 goto illegal_op;
4025 reg = b >> 3;
4026 ot = gen_pop_T0(s);
4027 gen_movl_seg_T0(s, reg);
4028 gen_pop_update(s, ot);
4029 break;
4030 case 0x1a1:
4031 case 0x1a9:
4032 ot = gen_pop_T0(s);
4033 gen_movl_seg_T0(s, (b >> 3) & 7);
4034 gen_pop_update(s, ot);
4035 break;
4036
4037
4038
4039 case 0x88:
4040 case 0x89:
4041 ot = mo_b_d(b, dflag);
4042 modrm = x86_ldub_code(env, s);
4043 reg = ((modrm >> 3) & 7) | REX_R(s);
4044
4045
4046 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
4047 break;
4048 case 0xc6:
4049 case 0xc7:
4050 ot = mo_b_d(b, dflag);
4051 modrm = x86_ldub_code(env, s);
4052 mod = (modrm >> 6) & 3;
4053 if (mod != 3) {
4054 s->rip_offset = insn_const_size(ot);
4055 gen_lea_modrm(env, s, modrm);
4056 }
4057 val = insn_get(env, s, ot);
4058 tcg_gen_movi_tl(s->T0, val);
4059 if (mod != 3) {
4060 gen_op_st_v(s, ot, s->T0, s->A0);
4061 } else {
4062 gen_op_mov_reg_v(s, ot, (modrm & 7) | REX_B(s), s->T0);
4063 }
4064 break;
4065 case 0x8a:
4066 case 0x8b:
4067 ot = mo_b_d(b, dflag);
4068 modrm = x86_ldub_code(env, s);
4069 reg = ((modrm >> 3) & 7) | REX_R(s);
4070
4071 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4072 gen_op_mov_reg_v(s, ot, reg, s->T0);
4073 break;
4074 case 0x8e:
4075 modrm = x86_ldub_code(env, s);
4076 reg = (modrm >> 3) & 7;
4077 if (reg >= 6 || reg == R_CS)
4078 goto illegal_op;
4079 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
4080 gen_movl_seg_T0(s, reg);
4081 break;
4082 case 0x8c:
4083 modrm = x86_ldub_code(env, s);
4084 reg = (modrm >> 3) & 7;
4085 mod = (modrm >> 6) & 3;
4086 if (reg >= 6)
4087 goto illegal_op;
4088 gen_op_movl_T0_seg(s, reg);
4089 ot = mod == 3 ? dflag : MO_16;
4090 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4091 break;
4092
4093 case 0x1b6:
4094 case 0x1b7:
4095 case 0x1be:
4096 case 0x1bf:
4097 {
4098 MemOp d_ot;
4099 MemOp s_ot;
4100
4101
4102 d_ot = dflag;
4103
4104 ot = (b & 1) + MO_8;
4105
4106 s_ot = b & 8 ? MO_SIGN | ot : ot;
4107
4108 modrm = x86_ldub_code(env, s);
4109 reg = ((modrm >> 3) & 7) | REX_R(s);
4110 mod = (modrm >> 6) & 3;
4111 rm = (modrm & 7) | REX_B(s);
4112
4113 if (mod == 3) {
4114 if (s_ot == MO_SB && byte_reg_is_xH(s, rm)) {
4115 tcg_gen_sextract_tl(s->T0, cpu_regs[rm - 4], 8, 8);
4116 } else {
4117 gen_op_mov_v_reg(s, ot, s->T0, rm);
4118 switch (s_ot) {
4119 case MO_UB:
4120 tcg_gen_ext8u_tl(s->T0, s->T0);
4121 break;
4122 case MO_SB:
4123 tcg_gen_ext8s_tl(s->T0, s->T0);
4124 break;
4125 case MO_UW:
4126 tcg_gen_ext16u_tl(s->T0, s->T0);
4127 break;
4128 default:
4129 case MO_SW:
4130 tcg_gen_ext16s_tl(s->T0, s->T0);
4131 break;
4132 }
4133 }
4134 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4135 } else {
4136 gen_lea_modrm(env, s, modrm);
4137 gen_op_ld_v(s, s_ot, s->T0, s->A0);
4138 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4139 }
4140 }
4141 break;
4142
4143 case 0x8d:
4144 modrm = x86_ldub_code(env, s);
4145 mod = (modrm >> 6) & 3;
4146 if (mod == 3)
4147 goto illegal_op;
4148 reg = ((modrm >> 3) & 7) | REX_R(s);
4149 {
4150 AddressParts a = gen_lea_modrm_0(env, s, modrm);
4151 TCGv ea = gen_lea_modrm_1(s, a, false);
4152 gen_lea_v_seg(s, s->aflag, ea, -1, -1);
4153 gen_op_mov_reg_v(s, dflag, reg, s->A0);
4154 }
4155 break;
4156
4157 case 0xa0:
4158 case 0xa1:
4159 case 0xa2:
4160 case 0xa3:
4161 {
4162 target_ulong offset_addr;
4163
4164 ot = mo_b_d(b, dflag);
4165 offset_addr = insn_get_addr(env, s, s->aflag);
4166 tcg_gen_movi_tl(s->A0, offset_addr);
4167 gen_add_A0_ds_seg(s);
4168 if ((b & 2) == 0) {
4169 gen_op_ld_v(s, ot, s->T0, s->A0);
4170 gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
4171 } else {
4172 gen_op_mov_v_reg(s, ot, s->T0, R_EAX);
4173 gen_op_st_v(s, ot, s->T0, s->A0);
4174 }
4175 }
4176 break;
4177 case 0xd7:
4178 tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]);
4179 tcg_gen_ext8u_tl(s->T0, cpu_regs[R_EAX]);
4180 tcg_gen_add_tl(s->A0, s->A0, s->T0);
4181 gen_extu(s->aflag, s->A0);
4182 gen_add_A0_ds_seg(s);
4183 gen_op_ld_v(s, MO_8, s->T0, s->A0);
4184 gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
4185 break;
4186 case 0xb0 ... 0xb7:
4187 val = insn_get(env, s, MO_8);
4188 tcg_gen_movi_tl(s->T0, val);
4189 gen_op_mov_reg_v(s, MO_8, (b & 7) | REX_B(s), s->T0);
4190 break;
4191 case 0xb8 ... 0xbf:
4192#ifdef TARGET_X86_64
4193 if (dflag == MO_64) {
4194 uint64_t tmp;
4195
4196 tmp = x86_ldq_code(env, s);
4197 reg = (b & 7) | REX_B(s);
4198 tcg_gen_movi_tl(s->T0, tmp);
4199 gen_op_mov_reg_v(s, MO_64, reg, s->T0);
4200 } else
4201#endif
4202 {
4203 ot = dflag;
4204 val = insn_get(env, s, ot);
4205 reg = (b & 7) | REX_B(s);
4206 tcg_gen_movi_tl(s->T0, val);
4207 gen_op_mov_reg_v(s, ot, reg, s->T0);
4208 }
4209 break;
4210
4211 case 0x91 ... 0x97:
4212 do_xchg_reg_eax:
4213 ot = dflag;
4214 reg = (b & 7) | REX_B(s);
4215 rm = R_EAX;
4216 goto do_xchg_reg;
4217 case 0x86:
4218 case 0x87:
4219 ot = mo_b_d(b, dflag);
4220 modrm = x86_ldub_code(env, s);
4221 reg = ((modrm >> 3) & 7) | REX_R(s);
4222 mod = (modrm >> 6) & 3;
4223 if (mod == 3) {
4224 rm = (modrm & 7) | REX_B(s);
4225 do_xchg_reg:
4226 gen_op_mov_v_reg(s, ot, s->T0, reg);
4227 gen_op_mov_v_reg(s, ot, s->T1, rm);
4228 gen_op_mov_reg_v(s, ot, rm, s->T0);
4229 gen_op_mov_reg_v(s, ot, reg, s->T1);
4230 } else {
4231 gen_lea_modrm(env, s, modrm);
4232 gen_op_mov_v_reg(s, ot, s->T0, reg);
4233
4234 tcg_gen_atomic_xchg_tl(s->T1, s->A0, s->T0,
4235 s->mem_index, ot | MO_LE);
4236 gen_op_mov_reg_v(s, ot, reg, s->T1);
4237 }
4238 break;
4239 case 0xc4:
4240
4241 op = R_ES;
4242 goto do_lxx;
4243 case 0xc5:
4244
4245 op = R_DS;
4246 goto do_lxx;
4247 case 0x1b2:
4248 op = R_SS;
4249 goto do_lxx;
4250 case 0x1b4:
4251 op = R_FS;
4252 goto do_lxx;
4253 case 0x1b5:
4254 op = R_GS;
4255 do_lxx:
4256 ot = dflag != MO_16 ? MO_32 : MO_16;
4257 modrm = x86_ldub_code(env, s);
4258 reg = ((modrm >> 3) & 7) | REX_R(s);
4259 mod = (modrm >> 6) & 3;
4260 if (mod == 3)
4261 goto illegal_op;
4262 gen_lea_modrm(env, s, modrm);
4263 gen_op_ld_v(s, ot, s->T1, s->A0);
4264 gen_add_A0_im(s, 1 << ot);
4265
4266 gen_op_ld_v(s, MO_16, s->T0, s->A0);
4267 gen_movl_seg_T0(s, op);
4268
4269 gen_op_mov_reg_v(s, ot, reg, s->T1);
4270 break;
4271
4272
4273
4274 case 0xc0:
4275 case 0xc1:
4276
4277 shift = 2;
4278 grp2:
4279 {
4280 ot = mo_b_d(b, dflag);
4281 modrm = x86_ldub_code(env, s);
4282 mod = (modrm >> 6) & 3;
4283 op = (modrm >> 3) & 7;
4284
4285 if (mod != 3) {
4286 if (shift == 2) {
4287 s->rip_offset = 1;
4288 }
4289 gen_lea_modrm(env, s, modrm);
4290 opreg = OR_TMP0;
4291 } else {
4292 opreg = (modrm & 7) | REX_B(s);
4293 }
4294
4295
4296 if (shift == 0) {
4297 gen_shift(s, op, ot, opreg, OR_ECX);
4298 } else {
4299 if (shift == 2) {
4300 shift = x86_ldub_code(env, s);
4301 }
4302 gen_shifti(s, op, ot, opreg, shift);
4303 }
4304 }
4305 break;
4306 case 0xd0:
4307 case 0xd1:
4308
4309 shift = 1;
4310 goto grp2;
4311 case 0xd2:
4312 case 0xd3:
4313
4314 shift = 0;
4315 goto grp2;
4316
4317 case 0x1a4:
4318 op = 0;
4319 shift = 1;
4320 goto do_shiftd;
4321 case 0x1a5:
4322 op = 0;
4323 shift = 0;
4324 goto do_shiftd;
4325 case 0x1ac:
4326 op = 1;
4327 shift = 1;
4328 goto do_shiftd;
4329 case 0x1ad:
4330 op = 1;
4331 shift = 0;
4332 do_shiftd:
4333 ot = dflag;
4334 modrm = x86_ldub_code(env, s);
4335 mod = (modrm >> 6) & 3;
4336 rm = (modrm & 7) | REX_B(s);
4337 reg = ((modrm >> 3) & 7) | REX_R(s);
4338 if (mod != 3) {
4339 gen_lea_modrm(env, s, modrm);
4340 opreg = OR_TMP0;
4341 } else {
4342 opreg = rm;
4343 }
4344 gen_op_mov_v_reg(s, ot, s->T1, reg);
4345
4346 if (shift) {
4347 TCGv imm = tcg_constant_tl(x86_ldub_code(env, s));
4348 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
4349 } else {
4350 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
4351 }
4352 break;
4353
4354
4355
4356 case 0xd8 ... 0xdf:
4357 {
4358 bool update_fip = true;
4359
4360 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
4361
4362
4363 gen_exception(s, EXCP07_PREX);
4364 break;
4365 }
4366 modrm = x86_ldub_code(env, s);
4367 mod = (modrm >> 6) & 3;
4368 rm = modrm & 7;
4369 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
4370 if (mod != 3) {
4371
4372 AddressParts a = gen_lea_modrm_0(env, s, modrm);
4373 TCGv ea = gen_lea_modrm_1(s, a, false);
4374 TCGv last_addr = tcg_temp_new();
4375 bool update_fdp = true;
4376
4377 tcg_gen_mov_tl(last_addr, ea);
4378 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
4379
4380 switch (op) {
4381 case 0x00 ... 0x07:
4382 case 0x10 ... 0x17:
4383 case 0x20 ... 0x27:
4384 case 0x30 ... 0x37:
4385 {
4386 int op1;
4387 op1 = op & 7;
4388
4389 switch (op >> 4) {
4390 case 0:
4391 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4392 s->mem_index, MO_LEUL);
4393 gen_helper_flds_FT0(cpu_env, s->tmp2_i32);
4394 break;
4395 case 1:
4396 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4397 s->mem_index, MO_LEUL);
4398 gen_helper_fildl_FT0(cpu_env, s->tmp2_i32);
4399 break;
4400 case 2:
4401 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4402 s->mem_index, MO_LEUQ);
4403 gen_helper_fldl_FT0(cpu_env, s->tmp1_i64);
4404 break;
4405 case 3:
4406 default:
4407 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4408 s->mem_index, MO_LESW);
4409 gen_helper_fildl_FT0(cpu_env, s->tmp2_i32);
4410 break;
4411 }
4412
4413 gen_helper_fp_arith_ST0_FT0(op1);
4414 if (op1 == 3) {
4415
4416 gen_helper_fpop(cpu_env);
4417 }
4418 }
4419 break;
4420 case 0x08:
4421 case 0x0a:
4422 case 0x0b:
4423 case 0x18 ... 0x1b:
4424 case 0x28 ... 0x2b:
4425 case 0x38 ... 0x3b:
4426 switch (op & 7) {
4427 case 0:
4428 switch (op >> 4) {
4429 case 0:
4430 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4431 s->mem_index, MO_LEUL);
4432 gen_helper_flds_ST0(cpu_env, s->tmp2_i32);
4433 break;
4434 case 1:
4435 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4436 s->mem_index, MO_LEUL);
4437 gen_helper_fildl_ST0(cpu_env, s->tmp2_i32);
4438 break;
4439 case 2:
4440 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4441 s->mem_index, MO_LEUQ);
4442 gen_helper_fldl_ST0(cpu_env, s->tmp1_i64);
4443 break;
4444 case 3:
4445 default:
4446 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4447 s->mem_index, MO_LESW);
4448 gen_helper_fildl_ST0(cpu_env, s->tmp2_i32);
4449 break;
4450 }
4451 break;
4452 case 1:
4453
4454 switch (op >> 4) {
4455 case 1:
4456 gen_helper_fisttl_ST0(s->tmp2_i32, cpu_env);
4457 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4458 s->mem_index, MO_LEUL);
4459 break;
4460 case 2:
4461 gen_helper_fisttll_ST0(s->tmp1_i64, cpu_env);
4462 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4463 s->mem_index, MO_LEUQ);
4464 break;
4465 case 3:
4466 default:
4467 gen_helper_fistt_ST0(s->tmp2_i32, cpu_env);
4468 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4469 s->mem_index, MO_LEUW);
4470 break;
4471 }
4472 gen_helper_fpop(cpu_env);
4473 break;
4474 default:
4475 switch (op >> 4) {
4476 case 0:
4477 gen_helper_fsts_ST0(s->tmp2_i32, cpu_env);
4478 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4479 s->mem_index, MO_LEUL);
4480 break;
4481 case 1:
4482 gen_helper_fistl_ST0(s->tmp2_i32, cpu_env);
4483 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4484 s->mem_index, MO_LEUL);
4485 break;
4486 case 2:
4487 gen_helper_fstl_ST0(s->tmp1_i64, cpu_env);
4488 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4489 s->mem_index, MO_LEUQ);
4490 break;
4491 case 3:
4492 default:
4493 gen_helper_fist_ST0(s->tmp2_i32, cpu_env);
4494 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4495 s->mem_index, MO_LEUW);
4496 break;
4497 }
4498 if ((op & 7) == 3) {
4499 gen_helper_fpop(cpu_env);
4500 }
4501 break;
4502 }
4503 break;
4504 case 0x0c:
4505 gen_helper_fldenv(cpu_env, s->A0,
4506 tcg_constant_i32(dflag - 1));
4507 update_fip = update_fdp = false;
4508 break;
4509 case 0x0d:
4510 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4511 s->mem_index, MO_LEUW);
4512 gen_helper_fldcw(cpu_env, s->tmp2_i32);
4513 update_fip = update_fdp = false;
4514 break;
4515 case 0x0e:
4516 gen_helper_fstenv(cpu_env, s->A0,
4517 tcg_constant_i32(dflag - 1));
4518 update_fip = update_fdp = false;
4519 break;
4520 case 0x0f:
4521 gen_helper_fnstcw(s->tmp2_i32, cpu_env);
4522 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4523 s->mem_index, MO_LEUW);
4524 update_fip = update_fdp = false;
4525 break;
4526 case 0x1d:
4527 gen_helper_fldt_ST0(cpu_env, s->A0);
4528 break;
4529 case 0x1f:
4530 gen_helper_fstt_ST0(cpu_env, s->A0);
4531 gen_helper_fpop(cpu_env);
4532 break;
4533 case 0x2c:
4534 gen_helper_frstor(cpu_env, s->A0,
4535 tcg_constant_i32(dflag - 1));
4536 update_fip = update_fdp = false;
4537 break;
4538 case 0x2e:
4539 gen_helper_fsave(cpu_env, s->A0,
4540 tcg_constant_i32(dflag - 1));
4541 update_fip = update_fdp = false;
4542 break;
4543 case 0x2f:
4544 gen_helper_fnstsw(s->tmp2_i32, cpu_env);
4545 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4546 s->mem_index, MO_LEUW);
4547 update_fip = update_fdp = false;
4548 break;
4549 case 0x3c:
4550 gen_helper_fbld_ST0(cpu_env, s->A0);
4551 break;
4552 case 0x3e:
4553 gen_helper_fbst_ST0(cpu_env, s->A0);
4554 gen_helper_fpop(cpu_env);
4555 break;
4556 case 0x3d:
4557 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4558 s->mem_index, MO_LEUQ);
4559 gen_helper_fildll_ST0(cpu_env, s->tmp1_i64);
4560 break;
4561 case 0x3f:
4562 gen_helper_fistll_ST0(s->tmp1_i64, cpu_env);
4563 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4564 s->mem_index, MO_LEUQ);
4565 gen_helper_fpop(cpu_env);
4566 break;
4567 default:
4568 goto unknown_op;
4569 }
4570
4571 if (update_fdp) {
4572 int last_seg = s->override >= 0 ? s->override : a.def_seg;
4573
4574 tcg_gen_ld_i32(s->tmp2_i32, cpu_env,
4575 offsetof(CPUX86State,
4576 segs[last_seg].selector));
4577 tcg_gen_st16_i32(s->tmp2_i32, cpu_env,
4578 offsetof(CPUX86State, fpds));
4579 tcg_gen_st_tl(last_addr, cpu_env,
4580 offsetof(CPUX86State, fpdp));
4581 }
4582 } else {
4583
4584 opreg = rm;
4585
4586 switch (op) {
4587 case 0x08:
4588 gen_helper_fpush(cpu_env);
4589 gen_helper_fmov_ST0_STN(cpu_env,
4590 tcg_constant_i32((opreg + 1) & 7));
4591 break;
4592 case 0x09:
4593 case 0x29:
4594 case 0x39:
4595 gen_helper_fxchg_ST0_STN(cpu_env, tcg_constant_i32(opreg));
4596 break;
4597 case 0x0a:
4598 switch (rm) {
4599 case 0:
4600
4601 gen_helper_fwait(cpu_env);
4602 update_fip = false;
4603 break;
4604 default:
4605 goto unknown_op;
4606 }
4607 break;
4608 case 0x0c:
4609 switch (rm) {
4610 case 0:
4611 gen_helper_fchs_ST0(cpu_env);
4612 break;
4613 case 1:
4614 gen_helper_fabs_ST0(cpu_env);
4615 break;
4616 case 4:
4617 gen_helper_fldz_FT0(cpu_env);
4618 gen_helper_fcom_ST0_FT0(cpu_env);
4619 break;
4620 case 5:
4621 gen_helper_fxam_ST0(cpu_env);
4622 break;
4623 default:
4624 goto unknown_op;
4625 }
4626 break;
4627 case 0x0d:
4628 {
4629 switch (rm) {
4630 case 0:
4631 gen_helper_fpush(cpu_env);
4632 gen_helper_fld1_ST0(cpu_env);
4633 break;
4634 case 1:
4635 gen_helper_fpush(cpu_env);
4636 gen_helper_fldl2t_ST0(cpu_env);
4637 break;
4638 case 2:
4639 gen_helper_fpush(cpu_env);
4640 gen_helper_fldl2e_ST0(cpu_env);
4641 break;
4642 case 3:
4643 gen_helper_fpush(cpu_env);
4644 gen_helper_fldpi_ST0(cpu_env);
4645 break;
4646 case 4:
4647 gen_helper_fpush(cpu_env);
4648 gen_helper_fldlg2_ST0(cpu_env);
4649 break;
4650 case 5:
4651 gen_helper_fpush(cpu_env);
4652 gen_helper_fldln2_ST0(cpu_env);
4653 break;
4654 case 6:
4655 gen_helper_fpush(cpu_env);
4656 gen_helper_fldz_ST0(cpu_env);
4657 break;
4658 default:
4659 goto unknown_op;
4660 }
4661 }
4662 break;
4663 case 0x0e:
4664 switch (rm) {
4665 case 0:
4666 gen_helper_f2xm1(cpu_env);
4667 break;
4668 case 1:
4669 gen_helper_fyl2x(cpu_env);
4670 break;
4671 case 2:
4672 gen_helper_fptan(cpu_env);
4673 break;
4674 case 3:
4675 gen_helper_fpatan(cpu_env);
4676 break;
4677 case 4:
4678 gen_helper_fxtract(cpu_env);
4679 break;
4680 case 5:
4681 gen_helper_fprem1(cpu_env);
4682 break;
4683 case 6:
4684 gen_helper_fdecstp(cpu_env);
4685 break;
4686 default:
4687 case 7:
4688 gen_helper_fincstp(cpu_env);
4689 break;
4690 }
4691 break;
4692 case 0x0f:
4693 switch (rm) {
4694 case 0:
4695 gen_helper_fprem(cpu_env);
4696 break;
4697 case 1:
4698 gen_helper_fyl2xp1(cpu_env);
4699 break;
4700 case 2:
4701 gen_helper_fsqrt(cpu_env);
4702 break;
4703 case 3:
4704 gen_helper_fsincos(cpu_env);
4705 break;
4706 case 5:
4707 gen_helper_fscale(cpu_env);
4708 break;
4709 case 4:
4710 gen_helper_frndint(cpu_env);
4711 break;
4712 case 6:
4713 gen_helper_fsin(cpu_env);
4714 break;
4715 default:
4716 case 7:
4717 gen_helper_fcos(cpu_env);
4718 break;
4719 }
4720 break;
4721 case 0x00: case 0x01: case 0x04 ... 0x07:
4722 case 0x20: case 0x21: case 0x24 ... 0x27:
4723 case 0x30: case 0x31: case 0x34 ... 0x37:
4724 {
4725 int op1;
4726
4727 op1 = op & 7;
4728 if (op >= 0x20) {
4729 gen_helper_fp_arith_STN_ST0(op1, opreg);
4730 if (op >= 0x30) {
4731 gen_helper_fpop(cpu_env);
4732 }
4733 } else {
4734 gen_helper_fmov_FT0_STN(cpu_env,
4735 tcg_constant_i32(opreg));
4736 gen_helper_fp_arith_ST0_FT0(op1);
4737 }
4738 }
4739 break;
4740 case 0x02:
4741 case 0x22:
4742 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4743 gen_helper_fcom_ST0_FT0(cpu_env);
4744 break;
4745 case 0x03:
4746 case 0x23:
4747 case 0x32:
4748 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4749 gen_helper_fcom_ST0_FT0(cpu_env);
4750 gen_helper_fpop(cpu_env);
4751 break;
4752 case 0x15:
4753 switch (rm) {
4754 case 1:
4755 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1));
4756 gen_helper_fucom_ST0_FT0(cpu_env);
4757 gen_helper_fpop(cpu_env);
4758 gen_helper_fpop(cpu_env);
4759 break;
4760 default:
4761 goto unknown_op;
4762 }
4763 break;
4764 case 0x1c:
4765 switch (rm) {
4766 case 0:
4767 break;
4768 case 1:
4769 break;
4770 case 2:
4771 gen_helper_fclex(cpu_env);
4772 update_fip = false;
4773 break;
4774 case 3:
4775 gen_helper_fninit(cpu_env);
4776 update_fip = false;
4777 break;
4778 case 4:
4779 break;
4780 default:
4781 goto unknown_op;
4782 }
4783 break;
4784 case 0x1d:
4785 if (!(s->cpuid_features & CPUID_CMOV)) {
4786 goto illegal_op;
4787 }
4788 gen_update_cc_op(s);
4789 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4790 gen_helper_fucomi_ST0_FT0(cpu_env);
4791 set_cc_op(s, CC_OP_EFLAGS);
4792 break;
4793 case 0x1e:
4794 if (!(s->cpuid_features & CPUID_CMOV)) {
4795 goto illegal_op;
4796 }
4797 gen_update_cc_op(s);
4798 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4799 gen_helper_fcomi_ST0_FT0(cpu_env);
4800 set_cc_op(s, CC_OP_EFLAGS);
4801 break;
4802 case 0x28:
4803 gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg));
4804 break;
4805 case 0x2a:
4806 gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg));
4807 break;
4808 case 0x2b:
4809 case 0x0b:
4810 case 0x3a:
4811 case 0x3b:
4812 gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg));
4813 gen_helper_fpop(cpu_env);
4814 break;
4815 case 0x2c:
4816 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4817 gen_helper_fucom_ST0_FT0(cpu_env);
4818 break;
4819 case 0x2d:
4820 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4821 gen_helper_fucom_ST0_FT0(cpu_env);
4822 gen_helper_fpop(cpu_env);
4823 break;
4824 case 0x33:
4825 switch (rm) {
4826 case 1:
4827 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1));
4828 gen_helper_fcom_ST0_FT0(cpu_env);
4829 gen_helper_fpop(cpu_env);
4830 gen_helper_fpop(cpu_env);
4831 break;
4832 default:
4833 goto unknown_op;
4834 }
4835 break;
4836 case 0x38:
4837 gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg));
4838 gen_helper_fpop(cpu_env);
4839 break;
4840 case 0x3c:
4841 switch (rm) {
4842 case 0:
4843 gen_helper_fnstsw(s->tmp2_i32, cpu_env);
4844 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
4845 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
4846 break;
4847 default:
4848 goto unknown_op;
4849 }
4850 break;
4851 case 0x3d:
4852 if (!(s->cpuid_features & CPUID_CMOV)) {
4853 goto illegal_op;
4854 }
4855 gen_update_cc_op(s);
4856 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4857 gen_helper_fucomi_ST0_FT0(cpu_env);
4858 gen_helper_fpop(cpu_env);
4859 set_cc_op(s, CC_OP_EFLAGS);
4860 break;
4861 case 0x3e:
4862 if (!(s->cpuid_features & CPUID_CMOV)) {
4863 goto illegal_op;
4864 }
4865 gen_update_cc_op(s);
4866 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4867 gen_helper_fcomi_ST0_FT0(cpu_env);
4868 gen_helper_fpop(cpu_env);
4869 set_cc_op(s, CC_OP_EFLAGS);
4870 break;
4871 case 0x10 ... 0x13:
4872 case 0x18 ... 0x1b:
4873 {
4874 int op1;
4875 TCGLabel *l1;
4876 static const uint8_t fcmov_cc[8] = {
4877 (JCC_B << 1),
4878 (JCC_Z << 1),
4879 (JCC_BE << 1),
4880 (JCC_P << 1),
4881 };
4882
4883 if (!(s->cpuid_features & CPUID_CMOV)) {
4884 goto illegal_op;
4885 }
4886 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
4887 l1 = gen_new_label();
4888 gen_jcc1_noeob(s, op1, l1);
4889 gen_helper_fmov_ST0_STN(cpu_env,
4890 tcg_constant_i32(opreg));
4891 gen_set_label(l1);
4892 }
4893 break;
4894 default:
4895 goto unknown_op;
4896 }
4897 }
4898
4899 if (update_fip) {
4900 tcg_gen_ld_i32(s->tmp2_i32, cpu_env,
4901 offsetof(CPUX86State, segs[R_CS].selector));
4902 tcg_gen_st16_i32(s->tmp2_i32, cpu_env,
4903 offsetof(CPUX86State, fpcs));
4904 tcg_gen_st_tl(eip_cur_tl(s),
4905 cpu_env, offsetof(CPUX86State, fpip));
4906 }
4907 }
4908 break;
4909
4910
4911
4912 case 0xa4:
4913 case 0xa5:
4914 ot = mo_b_d(b, dflag);
4915 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4916 gen_repz_movs(s, ot);
4917 } else {
4918 gen_movs(s, ot);
4919 }
4920 break;
4921
4922 case 0xaa:
4923 case 0xab:
4924 ot = mo_b_d(b, dflag);
4925 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4926 gen_repz_stos(s, ot);
4927 } else {
4928 gen_stos(s, ot);
4929 }
4930 break;
4931 case 0xac:
4932 case 0xad:
4933 ot = mo_b_d(b, dflag);
4934 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4935 gen_repz_lods(s, ot);
4936 } else {
4937 gen_lods(s, ot);
4938 }
4939 break;
4940 case 0xae:
4941 case 0xaf:
4942 ot = mo_b_d(b, dflag);
4943 if (prefixes & PREFIX_REPNZ) {
4944 gen_repz_scas(s, ot, 1);
4945 } else if (prefixes & PREFIX_REPZ) {
4946 gen_repz_scas(s, ot, 0);
4947 } else {
4948 gen_scas(s, ot);
4949 }
4950 break;
4951
4952 case 0xa6:
4953 case 0xa7:
4954 ot = mo_b_d(b, dflag);
4955 if (prefixes & PREFIX_REPNZ) {
4956 gen_repz_cmps(s, ot, 1);
4957 } else if (prefixes & PREFIX_REPZ) {
4958 gen_repz_cmps(s, ot, 0);
4959 } else {
4960 gen_cmps(s, ot);
4961 }
4962 break;
4963 case 0x6c:
4964 case 0x6d:
4965 ot = mo_b_d32(b, dflag);
4966 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
4967 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
4968 if (!gen_check_io(s, ot, s->tmp2_i32,
4969 SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
4970 break;
4971 }
4972 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4973 gen_io_start();
4974 s->base.is_jmp = DISAS_TOO_MANY;
4975 }
4976 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4977 gen_repz_ins(s, ot);
4978 } else {
4979 gen_ins(s, ot);
4980 }
4981 break;
4982 case 0x6e:
4983 case 0x6f:
4984 ot = mo_b_d32(b, dflag);
4985 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
4986 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
4987 if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) {
4988 break;
4989 }
4990 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4991 gen_io_start();
4992 s->base.is_jmp = DISAS_TOO_MANY;
4993 }
4994 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4995 gen_repz_outs(s, ot);
4996 } else {
4997 gen_outs(s, ot);
4998 }
4999 break;
5000
5001
5002
5003
5004 case 0xe4:
5005 case 0xe5:
5006 ot = mo_b_d32(b, dflag);
5007 val = x86_ldub_code(env, s);
5008 tcg_gen_movi_i32(s->tmp2_i32, val);
5009 if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5010 break;
5011 }
5012 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
5013 gen_io_start();
5014 s->base.is_jmp = DISAS_TOO_MANY;
5015 }
5016 gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5017 gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5018 gen_bpt_io(s, s->tmp2_i32, ot);
5019 break;
5020 case 0xe6:
5021 case 0xe7:
5022 ot = mo_b_d32(b, dflag);
5023 val = x86_ldub_code(env, s);
5024 tcg_gen_movi_i32(s->tmp2_i32, val);
5025 if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5026 break;
5027 }
5028 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
5029 gen_io_start();
5030 s->base.is_jmp = DISAS_TOO_MANY;
5031 }
5032 gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5033 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5034 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5035 gen_bpt_io(s, s->tmp2_i32, ot);
5036 break;
5037 case 0xec:
5038 case 0xed:
5039 ot = mo_b_d32(b, dflag);
5040 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5041 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5042 if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5043 break;
5044 }
5045 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
5046 gen_io_start();
5047 s->base.is_jmp = DISAS_TOO_MANY;
5048 }
5049 gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5050 gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5051 gen_bpt_io(s, s->tmp2_i32, ot);
5052 break;
5053 case 0xee:
5054 case 0xef:
5055 ot = mo_b_d32(b, dflag);
5056 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5057 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5058 if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5059 break;
5060 }
5061 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
5062 gen_io_start();
5063 s->base.is_jmp = DISAS_TOO_MANY;
5064 }
5065 gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5066 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5067 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5068 gen_bpt_io(s, s->tmp2_i32, ot);
5069 break;
5070
5071
5072
5073 case 0xc2:
5074 val = x86_ldsw_code(env, s);
5075 ot = gen_pop_T0(s);
5076 gen_stack_update(s, val + (1 << ot));
5077
5078 gen_op_jmp_v(s, s->T0);
5079 gen_bnd_jmp(s);
5080 s->base.is_jmp = DISAS_JUMP;
5081 break;
5082 case 0xc3:
5083 ot = gen_pop_T0(s);
5084 gen_pop_update(s, ot);
5085
5086 gen_op_jmp_v(s, s->T0);
5087 gen_bnd_jmp(s);
5088 s->base.is_jmp = DISAS_JUMP;
5089 break;
5090 case 0xca:
5091 val = x86_ldsw_code(env, s);
5092 do_lret:
5093 if (PE(s) && !VM86(s)) {
5094 gen_update_cc_op(s);
5095 gen_update_eip_cur(s);
5096 gen_helper_lret_protected(cpu_env, tcg_constant_i32(dflag - 1),
5097 tcg_constant_i32(val));
5098 } else {
5099 gen_stack_A0(s);
5100
5101 gen_op_ld_v(s, dflag, s->T0, s->A0);
5102
5103
5104 gen_op_jmp_v(s, s->T0);
5105
5106 gen_add_A0_im(s, 1 << dflag);
5107 gen_op_ld_v(s, dflag, s->T0, s->A0);
5108 gen_op_movl_seg_T0_vm(s, R_CS);
5109
5110 gen_stack_update(s, val + (2 << dflag));
5111 }
5112 s->base.is_jmp = DISAS_EOB_ONLY;
5113 break;
5114 case 0xcb:
5115 val = 0;
5116 goto do_lret;
5117 case 0xcf:
5118 gen_svm_check_intercept(s, SVM_EXIT_IRET);
5119 if (!PE(s) || VM86(s)) {
5120
5121 if (!check_vm86_iopl(s)) {
5122 break;
5123 }
5124 gen_helper_iret_real(cpu_env, tcg_constant_i32(dflag - 1));
5125 } else {
5126 gen_helper_iret_protected(cpu_env, tcg_constant_i32(dflag - 1),
5127 eip_next_i32(s));
5128 }
5129 set_cc_op(s, CC_OP_EFLAGS);
5130 s->base.is_jmp = DISAS_EOB_ONLY;
5131 break;
5132 case 0xe8:
5133 {
5134 int diff = (dflag != MO_16
5135 ? (int32_t)insn_get(env, s, MO_32)
5136 : (int16_t)insn_get(env, s, MO_16));
5137 gen_push_v(s, eip_next_tl(s));
5138 gen_bnd_jmp(s);
5139 gen_jmp_rel(s, dflag, diff, 0);
5140 }
5141 break;
5142 case 0x9a:
5143 {
5144 unsigned int selector, offset;
5145
5146 if (CODE64(s))
5147 goto illegal_op;
5148 ot = dflag;
5149 offset = insn_get(env, s, ot);
5150 selector = insn_get(env, s, MO_16);
5151
5152 tcg_gen_movi_tl(s->T0, selector);
5153 tcg_gen_movi_tl(s->T1, offset);
5154 }
5155 goto do_lcall;
5156 case 0xe9:
5157 {
5158 int diff = (dflag != MO_16
5159 ? (int32_t)insn_get(env, s, MO_32)
5160 : (int16_t)insn_get(env, s, MO_16));
5161 gen_bnd_jmp(s);
5162 gen_jmp_rel(s, dflag, diff, 0);
5163 }
5164 break;
5165 case 0xea:
5166 {
5167 unsigned int selector, offset;
5168
5169 if (CODE64(s))
5170 goto illegal_op;
5171 ot = dflag;
5172 offset = insn_get(env, s, ot);
5173 selector = insn_get(env, s, MO_16);
5174
5175 tcg_gen_movi_tl(s->T0, selector);
5176 tcg_gen_movi_tl(s->T1, offset);
5177 }
5178 goto do_ljmp;
5179 case 0xeb:
5180 {
5181 int diff = (int8_t)insn_get(env, s, MO_8);
5182 gen_jmp_rel(s, dflag, diff, 0);
5183 }
5184 break;
5185 case 0x70 ... 0x7f:
5186 {
5187 int diff = (int8_t)insn_get(env, s, MO_8);
5188 gen_bnd_jmp(s);
5189 gen_jcc(s, b, diff);
5190 }
5191 break;
5192 case 0x180 ... 0x18f:
5193 {
5194 int diff = (dflag != MO_16
5195 ? (int32_t)insn_get(env, s, MO_32)
5196 : (int16_t)insn_get(env, s, MO_16));
5197 gen_bnd_jmp(s);
5198 gen_jcc(s, b, diff);
5199 }
5200 break;
5201
5202 case 0x190 ... 0x19f:
5203 modrm = x86_ldub_code(env, s);
5204 gen_setcc1(s, b, s->T0);
5205 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
5206 break;
5207 case 0x140 ... 0x14f:
5208 if (!(s->cpuid_features & CPUID_CMOV)) {
5209 goto illegal_op;
5210 }
5211 ot = dflag;
5212 modrm = x86_ldub_code(env, s);
5213 reg = ((modrm >> 3) & 7) | REX_R(s);
5214 gen_cmovcc1(env, s, ot, b, modrm, reg);
5215 break;
5216
5217
5218
5219 case 0x9c:
5220 gen_svm_check_intercept(s, SVM_EXIT_PUSHF);
5221 if (check_vm86_iopl(s)) {
5222 gen_update_cc_op(s);
5223 gen_helper_read_eflags(s->T0, cpu_env);
5224 gen_push_v(s, s->T0);
5225 }
5226 break;
5227 case 0x9d:
5228 gen_svm_check_intercept(s, SVM_EXIT_POPF);
5229 if (check_vm86_iopl(s)) {
5230 int mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK;
5231
5232 if (CPL(s) == 0) {
5233 mask |= IF_MASK | IOPL_MASK;
5234 } else if (CPL(s) <= IOPL(s)) {
5235 mask |= IF_MASK;
5236 }
5237 if (dflag == MO_16) {
5238 mask &= 0xffff;
5239 }
5240
5241 ot = gen_pop_T0(s);
5242 gen_helper_write_eflags(cpu_env, s->T0, tcg_constant_i32(mask));
5243 gen_pop_update(s, ot);
5244 set_cc_op(s, CC_OP_EFLAGS);
5245
5246 s->base.is_jmp = DISAS_EOB_NEXT;
5247 }
5248 break;
5249 case 0x9e:
5250 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5251 goto illegal_op;
5252 tcg_gen_shri_tl(s->T0, cpu_regs[R_EAX], 8);
5253 gen_compute_eflags(s);
5254 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
5255 tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
5256 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, s->T0);
5257 break;
5258 case 0x9f:
5259 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5260 goto illegal_op;
5261 gen_compute_eflags(s);
5262
5263 tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02);
5264 tcg_gen_deposit_tl(cpu_regs[R_EAX], cpu_regs[R_EAX], s->T0, 8, 8);
5265 break;
5266 case 0xf5:
5267 gen_compute_eflags(s);
5268 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5269 break;
5270 case 0xf8:
5271 gen_compute_eflags(s);
5272 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
5273 break;
5274 case 0xf9:
5275 gen_compute_eflags(s);
5276 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5277 break;
5278 case 0xfc:
5279 tcg_gen_movi_i32(s->tmp2_i32, 1);
5280 tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, df));
5281 break;
5282 case 0xfd:
5283 tcg_gen_movi_i32(s->tmp2_i32, -1);
5284 tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, df));
5285 break;
5286
5287
5288
5289 case 0x1ba:
5290 ot = dflag;
5291 modrm = x86_ldub_code(env, s);
5292 op = (modrm >> 3) & 7;
5293 mod = (modrm >> 6) & 3;
5294 rm = (modrm & 7) | REX_B(s);
5295 if (mod != 3) {
5296 s->rip_offset = 1;
5297 gen_lea_modrm(env, s, modrm);
5298 if (!(s->prefix & PREFIX_LOCK)) {
5299 gen_op_ld_v(s, ot, s->T0, s->A0);
5300 }
5301 } else {
5302 gen_op_mov_v_reg(s, ot, s->T0, rm);
5303 }
5304
5305 val = x86_ldub_code(env, s);
5306 tcg_gen_movi_tl(s->T1, val);
5307 if (op < 4)
5308 goto unknown_op;
5309 op -= 4;
5310 goto bt_op;
5311 case 0x1a3:
5312 op = 0;
5313 goto do_btx;
5314 case 0x1ab:
5315 op = 1;
5316 goto do_btx;
5317 case 0x1b3:
5318 op = 2;
5319 goto do_btx;
5320 case 0x1bb:
5321 op = 3;
5322 do_btx:
5323 ot = dflag;
5324 modrm = x86_ldub_code(env, s);
5325 reg = ((modrm >> 3) & 7) | REX_R(s);
5326 mod = (modrm >> 6) & 3;
5327 rm = (modrm & 7) | REX_B(s);
5328 gen_op_mov_v_reg(s, MO_32, s->T1, reg);
5329 if (mod != 3) {
5330 AddressParts a = gen_lea_modrm_0(env, s, modrm);
5331
5332 gen_exts(ot, s->T1);
5333 tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
5334 tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
5335 tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
5336 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
5337 if (!(s->prefix & PREFIX_LOCK)) {
5338 gen_op_ld_v(s, ot, s->T0, s->A0);
5339 }
5340 } else {
5341 gen_op_mov_v_reg(s, ot, s->T0, rm);
5342 }
5343 bt_op:
5344 tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
5345 tcg_gen_movi_tl(s->tmp0, 1);
5346 tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
5347 if (s->prefix & PREFIX_LOCK) {
5348 switch (op) {
5349 case 0:
5350
5351
5352 gen_op_ld_v(s, ot, s->T0, s->A0);
5353 break;
5354 case 1:
5355 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
5356 s->mem_index, ot | MO_LE);
5357 break;
5358 case 2:
5359 tcg_gen_not_tl(s->tmp0, s->tmp0);
5360 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
5361 s->mem_index, ot | MO_LE);
5362 break;
5363 default:
5364 case 3:
5365 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
5366 s->mem_index, ot | MO_LE);
5367 break;
5368 }
5369 tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5370 } else {
5371 tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5372 switch (op) {
5373 case 0:
5374
5375 break;
5376 case 1:
5377 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
5378 break;
5379 case 2:
5380 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
5381 break;
5382 default:
5383 case 3:
5384 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
5385 break;
5386 }
5387 if (op != 0) {
5388 if (mod != 3) {
5389 gen_op_st_v(s, ot, s->T0, s->A0);
5390 } else {
5391 gen_op_mov_reg_v(s, ot, rm, s->T0);
5392 }
5393 }
5394 }
5395
5396
5397
5398
5399 switch (s->cc_op) {
5400 case CC_OP_MULB ... CC_OP_MULQ:
5401 case CC_OP_ADDB ... CC_OP_ADDQ:
5402 case CC_OP_ADCB ... CC_OP_ADCQ:
5403 case CC_OP_SUBB ... CC_OP_SUBQ:
5404 case CC_OP_SBBB ... CC_OP_SBBQ:
5405 case CC_OP_LOGICB ... CC_OP_LOGICQ:
5406 case CC_OP_INCB ... CC_OP_INCQ:
5407 case CC_OP_DECB ... CC_OP_DECQ:
5408 case CC_OP_SHLB ... CC_OP_SHLQ:
5409 case CC_OP_SARB ... CC_OP_SARQ:
5410 case CC_OP_BMILGB ... CC_OP_BMILGQ:
5411
5412
5413
5414
5415 tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
5416 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
5417 break;
5418 default:
5419
5420 gen_compute_eflags(s);
5421 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
5422 ctz32(CC_C), 1);
5423 break;
5424 }
5425 break;
5426 case 0x1bc:
5427 case 0x1bd:
5428 ot = dflag;
5429 modrm = x86_ldub_code(env, s);
5430 reg = ((modrm >> 3) & 7) | REX_R(s);
5431 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5432 gen_extu(ot, s->T0);
5433
5434
5435 if ((prefixes & PREFIX_REPZ)
5436 && (b & 1
5437 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
5438 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
5439 int size = 8 << ot;
5440
5441 tcg_gen_mov_tl(cpu_cc_src, s->T0);
5442 if (b & 1) {
5443
5444
5445 tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
5446 tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size);
5447 } else {
5448
5449 tcg_gen_ctzi_tl(s->T0, s->T0, size);
5450 }
5451
5452 gen_op_update1_cc(s);
5453 set_cc_op(s, CC_OP_BMILGB + ot);
5454 } else {
5455
5456
5457 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
5458 set_cc_op(s, CC_OP_LOGICB + ot);
5459
5460
5461
5462
5463
5464 if (b & 1) {
5465
5466
5467 tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
5468 tcg_gen_clz_tl(s->T0, s->T0, s->T1);
5469 tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
5470 } else {
5471 tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]);
5472 }
5473 }
5474 gen_op_mov_reg_v(s, ot, reg, s->T0);
5475 break;
5476
5477
5478 case 0x27:
5479 if (CODE64(s))
5480 goto illegal_op;
5481 gen_update_cc_op(s);
5482 gen_helper_daa(cpu_env);
5483 set_cc_op(s, CC_OP_EFLAGS);
5484 break;
5485 case 0x2f:
5486 if (CODE64(s))
5487 goto illegal_op;
5488 gen_update_cc_op(s);
5489 gen_helper_das(cpu_env);
5490 set_cc_op(s, CC_OP_EFLAGS);
5491 break;
5492 case 0x37:
5493 if (CODE64(s))
5494 goto illegal_op;
5495 gen_update_cc_op(s);
5496 gen_helper_aaa(cpu_env);
5497 set_cc_op(s, CC_OP_EFLAGS);
5498 break;
5499 case 0x3f:
5500 if (CODE64(s))
5501 goto illegal_op;
5502 gen_update_cc_op(s);
5503 gen_helper_aas(cpu_env);
5504 set_cc_op(s, CC_OP_EFLAGS);
5505 break;
5506 case 0xd4:
5507 if (CODE64(s))
5508 goto illegal_op;
5509 val = x86_ldub_code(env, s);
5510 if (val == 0) {
5511 gen_exception(s, EXCP00_DIVZ);
5512 } else {
5513 gen_helper_aam(cpu_env, tcg_constant_i32(val));
5514 set_cc_op(s, CC_OP_LOGICB);
5515 }
5516 break;
5517 case 0xd5:
5518 if (CODE64(s))
5519 goto illegal_op;
5520 val = x86_ldub_code(env, s);
5521 gen_helper_aad(cpu_env, tcg_constant_i32(val));
5522 set_cc_op(s, CC_OP_LOGICB);
5523 break;
5524
5525
5526 case 0x90:
5527
5528 if (prefixes & PREFIX_LOCK) {
5529 goto illegal_op;
5530 }
5531
5532 if (REX_B(s)) {
5533 goto do_xchg_reg_eax;
5534 }
5535 if (prefixes & PREFIX_REPZ) {
5536 gen_update_cc_op(s);
5537 gen_update_eip_cur(s);
5538 gen_helper_pause(cpu_env, cur_insn_len_i32(s));
5539 s->base.is_jmp = DISAS_NORETURN;
5540 }
5541 break;
5542 case 0x9b:
5543 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
5544 (HF_MP_MASK | HF_TS_MASK)) {
5545 gen_exception(s, EXCP07_PREX);
5546 } else {
5547 gen_helper_fwait(cpu_env);
5548 }
5549 break;
5550 case 0xcc:
5551 gen_interrupt(s, EXCP03_INT3);
5552 break;
5553 case 0xcd:
5554 val = x86_ldub_code(env, s);
5555 if (check_vm86_iopl(s)) {
5556 gen_interrupt(s, val);
5557 }
5558 break;
5559 case 0xce:
5560 if (CODE64(s))
5561 goto illegal_op;
5562 gen_update_cc_op(s);
5563 gen_update_eip_cur(s);
5564 gen_helper_into(cpu_env, cur_insn_len_i32(s));
5565 break;
5566#ifdef WANT_ICEBP
5567 case 0xf1:
5568 gen_svm_check_intercept(s, SVM_EXIT_ICEBP);
5569 gen_debug(s);
5570 break;
5571#endif
5572 case 0xfa:
5573 if (check_iopl(s)) {
5574 gen_reset_eflags(s, IF_MASK);
5575 }
5576 break;
5577 case 0xfb:
5578 if (check_iopl(s)) {
5579 gen_set_eflags(s, IF_MASK);
5580
5581 gen_update_eip_next(s);
5582 gen_eob_inhibit_irq(s, true);
5583 }
5584 break;
5585 case 0x62:
5586 if (CODE64(s))
5587 goto illegal_op;
5588 ot = dflag;
5589 modrm = x86_ldub_code(env, s);
5590 reg = (modrm >> 3) & 7;
5591 mod = (modrm >> 6) & 3;
5592 if (mod == 3)
5593 goto illegal_op;
5594 gen_op_mov_v_reg(s, ot, s->T0, reg);
5595 gen_lea_modrm(env, s, modrm);
5596 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5597 if (ot == MO_16) {
5598 gen_helper_boundw(cpu_env, s->A0, s->tmp2_i32);
5599 } else {
5600 gen_helper_boundl(cpu_env, s->A0, s->tmp2_i32);
5601 }
5602 break;
5603 case 0x1c8 ... 0x1cf:
5604 reg = (b & 7) | REX_B(s);
5605#ifdef TARGET_X86_64
5606 if (dflag == MO_64) {
5607 tcg_gen_bswap64_i64(cpu_regs[reg], cpu_regs[reg]);
5608 break;
5609 }
5610#endif
5611 tcg_gen_bswap32_tl(cpu_regs[reg], cpu_regs[reg], TCG_BSWAP_OZ);
5612 break;
5613 case 0xd6:
5614 if (CODE64(s))
5615 goto illegal_op;
5616 gen_compute_eflags_c(s, s->T0);
5617 tcg_gen_neg_tl(s->T0, s->T0);
5618 gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
5619 break;
5620 case 0xe0:
5621 case 0xe1:
5622 case 0xe2:
5623 case 0xe3:
5624 {
5625 TCGLabel *l1, *l2;
5626 int diff = (int8_t)insn_get(env, s, MO_8);
5627
5628 l1 = gen_new_label();
5629 l2 = gen_new_label();
5630 gen_update_cc_op(s);
5631 b &= 3;
5632 switch(b) {
5633 case 0:
5634 case 1:
5635 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5636 gen_op_jz_ecx(s, l2);
5637 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
5638 break;
5639 case 2:
5640 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5641 gen_op_jnz_ecx(s, l1);
5642 break;
5643 default:
5644 case 3:
5645 gen_op_jz_ecx(s, l1);
5646 break;
5647 }
5648
5649 gen_set_label(l2);
5650 gen_jmp_rel_csize(s, 0, 1);
5651
5652 gen_set_label(l1);
5653 gen_jmp_rel(s, dflag, diff, 0);
5654 }
5655 break;
5656 case 0x130:
5657 case 0x132:
5658 if (check_cpl0(s)) {
5659 gen_update_cc_op(s);
5660 gen_update_eip_cur(s);
5661 if (b & 2) {
5662 gen_helper_rdmsr(cpu_env);
5663 } else {
5664 gen_helper_wrmsr(cpu_env);
5665 s->base.is_jmp = DISAS_EOB_NEXT;
5666 }
5667 }
5668 break;
5669 case 0x131:
5670 gen_update_cc_op(s);
5671 gen_update_eip_cur(s);
5672 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
5673 gen_io_start();
5674 s->base.is_jmp = DISAS_TOO_MANY;
5675 }
5676 gen_helper_rdtsc(cpu_env);
5677 break;
5678 case 0x133:
5679 gen_update_cc_op(s);
5680 gen_update_eip_cur(s);
5681 gen_helper_rdpmc(cpu_env);
5682 s->base.is_jmp = DISAS_NORETURN;
5683 break;
5684 case 0x134:
5685
5686 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
5687 goto illegal_op;
5688 if (!PE(s)) {
5689 gen_exception_gpf(s);
5690 } else {
5691 gen_helper_sysenter(cpu_env);
5692 s->base.is_jmp = DISAS_EOB_ONLY;
5693 }
5694 break;
5695 case 0x135:
5696
5697 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
5698 goto illegal_op;
5699 if (!PE(s)) {
5700 gen_exception_gpf(s);
5701 } else {
5702 gen_helper_sysexit(cpu_env, tcg_constant_i32(dflag - 1));
5703 s->base.is_jmp = DISAS_EOB_ONLY;
5704 }
5705 break;
5706#ifdef TARGET_X86_64
5707 case 0x105:
5708
5709 gen_update_cc_op(s);
5710 gen_update_eip_cur(s);
5711 gen_helper_syscall(cpu_env, cur_insn_len_i32(s));
5712
5713
5714
5715 gen_eob_worker(s, false, true);
5716 break;
5717 case 0x107:
5718 if (!PE(s)) {
5719 gen_exception_gpf(s);
5720 } else {
5721 gen_helper_sysret(cpu_env, tcg_constant_i32(dflag - 1));
5722
5723 if (LMA(s)) {
5724 set_cc_op(s, CC_OP_EFLAGS);
5725 }
5726
5727
5728
5729
5730 gen_eob_worker(s, false, true);
5731 }
5732 break;
5733#endif
5734 case 0x1a2:
5735 gen_update_cc_op(s);
5736 gen_update_eip_cur(s);
5737 gen_helper_cpuid(cpu_env);
5738 break;
5739 case 0xf4:
5740 if (check_cpl0(s)) {
5741 gen_update_cc_op(s);
5742 gen_update_eip_cur(s);
5743 gen_helper_hlt(cpu_env, cur_insn_len_i32(s));
5744 s->base.is_jmp = DISAS_NORETURN;
5745 }
5746 break;
5747 case 0x100:
5748 modrm = x86_ldub_code(env, s);
5749 mod = (modrm >> 6) & 3;
5750 op = (modrm >> 3) & 7;
5751 switch(op) {
5752 case 0:
5753 if (!PE(s) || VM86(s))
5754 goto illegal_op;
5755 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5756 break;
5757 }
5758 gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
5759 tcg_gen_ld32u_tl(s->T0, cpu_env,
5760 offsetof(CPUX86State, ldt.selector));
5761 ot = mod == 3 ? dflag : MO_16;
5762 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5763 break;
5764 case 2:
5765 if (!PE(s) || VM86(s))
5766 goto illegal_op;
5767 if (check_cpl0(s)) {
5768 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
5769 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5770 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5771 gen_helper_lldt(cpu_env, s->tmp2_i32);
5772 }
5773 break;
5774 case 1:
5775 if (!PE(s) || VM86(s))
5776 goto illegal_op;
5777 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5778 break;
5779 }
5780 gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
5781 tcg_gen_ld32u_tl(s->T0, cpu_env,
5782 offsetof(CPUX86State, tr.selector));
5783 ot = mod == 3 ? dflag : MO_16;
5784 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5785 break;
5786 case 3:
5787 if (!PE(s) || VM86(s))
5788 goto illegal_op;
5789 if (check_cpl0(s)) {
5790 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
5791 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5792 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5793 gen_helper_ltr(cpu_env, s->tmp2_i32);
5794 }
5795 break;
5796 case 4:
5797 case 5:
5798 if (!PE(s) || VM86(s))
5799 goto illegal_op;
5800 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5801 gen_update_cc_op(s);
5802 if (op == 4) {
5803 gen_helper_verr(cpu_env, s->T0);
5804 } else {
5805 gen_helper_verw(cpu_env, s->T0);
5806 }
5807 set_cc_op(s, CC_OP_EFLAGS);
5808 break;
5809 default:
5810 goto unknown_op;
5811 }
5812 break;
5813
5814 case 0x101:
5815 modrm = x86_ldub_code(env, s);
5816 switch (modrm) {
5817 CASE_MODRM_MEM_OP(0):
5818 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5819 break;
5820 }
5821 gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
5822 gen_lea_modrm(env, s, modrm);
5823 tcg_gen_ld32u_tl(s->T0,
5824 cpu_env, offsetof(CPUX86State, gdt.limit));
5825 gen_op_st_v(s, MO_16, s->T0, s->A0);
5826 gen_add_A0_im(s, 2);
5827 tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.base));
5828 if (dflag == MO_16) {
5829 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5830 }
5831 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5832 break;
5833
5834 case 0xc8:
5835 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5836 goto illegal_op;
5837 }
5838 gen_update_cc_op(s);
5839 gen_update_eip_cur(s);
5840 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
5841 gen_extu(s->aflag, s->A0);
5842 gen_add_A0_ds_seg(s);
5843 gen_helper_monitor(cpu_env, s->A0);
5844 break;
5845
5846 case 0xc9:
5847 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5848 goto illegal_op;
5849 }
5850 gen_update_cc_op(s);
5851 gen_update_eip_cur(s);
5852 gen_helper_mwait(cpu_env, cur_insn_len_i32(s));
5853 s->base.is_jmp = DISAS_NORETURN;
5854 break;
5855
5856 case 0xca:
5857 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5858 || CPL(s) != 0) {
5859 goto illegal_op;
5860 }
5861 gen_reset_eflags(s, AC_MASK);
5862 s->base.is_jmp = DISAS_EOB_NEXT;
5863 break;
5864
5865 case 0xcb:
5866 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5867 || CPL(s) != 0) {
5868 goto illegal_op;
5869 }
5870 gen_set_eflags(s, AC_MASK);
5871 s->base.is_jmp = DISAS_EOB_NEXT;
5872 break;
5873
5874 CASE_MODRM_MEM_OP(1):
5875 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5876 break;
5877 }
5878 gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
5879 gen_lea_modrm(env, s, modrm);
5880 tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.limit));
5881 gen_op_st_v(s, MO_16, s->T0, s->A0);
5882 gen_add_A0_im(s, 2);
5883 tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.base));
5884 if (dflag == MO_16) {
5885 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5886 }
5887 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5888 break;
5889
5890 case 0xd0:
5891 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5892 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5893 | PREFIX_REPZ | PREFIX_REPNZ))) {
5894 goto illegal_op;
5895 }
5896 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5897 gen_helper_xgetbv(s->tmp1_i64, cpu_env, s->tmp2_i32);
5898 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
5899 break;
5900
5901 case 0xd1:
5902 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5903 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5904 | PREFIX_REPZ | PREFIX_REPNZ))) {
5905 goto illegal_op;
5906 }
5907 if (!check_cpl0(s)) {
5908 break;
5909 }
5910 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
5911 cpu_regs[R_EDX]);
5912 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5913 gen_helper_xsetbv(cpu_env, s->tmp2_i32, s->tmp1_i64);
5914
5915 s->base.is_jmp = DISAS_EOB_NEXT;
5916 break;
5917
5918 case 0xd8:
5919 if (!SVME(s) || !PE(s)) {
5920 goto illegal_op;
5921 }
5922 if (!check_cpl0(s)) {
5923 break;
5924 }
5925 gen_update_cc_op(s);
5926 gen_update_eip_cur(s);
5927 gen_helper_vmrun(cpu_env, tcg_constant_i32(s->aflag - 1),
5928 cur_insn_len_i32(s));
5929 tcg_gen_exit_tb(NULL, 0);
5930 s->base.is_jmp = DISAS_NORETURN;
5931 break;
5932
5933 case 0xd9:
5934 if (!SVME(s)) {
5935 goto illegal_op;
5936 }
5937 gen_update_cc_op(s);
5938 gen_update_eip_cur(s);
5939 gen_helper_vmmcall(cpu_env);
5940 break;
5941
5942 case 0xda:
5943 if (!SVME(s) || !PE(s)) {
5944 goto illegal_op;
5945 }
5946 if (!check_cpl0(s)) {
5947 break;
5948 }
5949 gen_update_cc_op(s);
5950 gen_update_eip_cur(s);
5951 gen_helper_vmload(cpu_env, tcg_constant_i32(s->aflag - 1));
5952 break;
5953
5954 case 0xdb:
5955 if (!SVME(s) || !PE(s)) {
5956 goto illegal_op;
5957 }
5958 if (!check_cpl0(s)) {
5959 break;
5960 }
5961 gen_update_cc_op(s);
5962 gen_update_eip_cur(s);
5963 gen_helper_vmsave(cpu_env, tcg_constant_i32(s->aflag - 1));
5964 break;
5965
5966 case 0xdc:
5967 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
5968 || !PE(s)) {
5969 goto illegal_op;
5970 }
5971 if (!check_cpl0(s)) {
5972 break;
5973 }
5974 gen_update_cc_op(s);
5975 gen_helper_stgi(cpu_env);
5976 s->base.is_jmp = DISAS_EOB_NEXT;
5977 break;
5978
5979 case 0xdd:
5980 if (!SVME(s) || !PE(s)) {
5981 goto illegal_op;
5982 }
5983 if (!check_cpl0(s)) {
5984 break;
5985 }
5986 gen_update_cc_op(s);
5987 gen_update_eip_cur(s);
5988 gen_helper_clgi(cpu_env);
5989 break;
5990
5991 case 0xde:
5992 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
5993 || !PE(s)) {
5994 goto illegal_op;
5995 }
5996 gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
5997
5998 goto illegal_op;
5999
6000 case 0xdf:
6001 if (!SVME(s) || !PE(s)) {
6002 goto illegal_op;
6003 }
6004 if (!check_cpl0(s)) {
6005 break;
6006 }
6007 gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
6008 if (s->aflag == MO_64) {
6009 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
6010 } else {
6011 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
6012 }
6013 gen_helper_flush_page(cpu_env, s->A0);
6014 s->base.is_jmp = DISAS_EOB_NEXT;
6015 break;
6016
6017 CASE_MODRM_MEM_OP(2):
6018 if (!check_cpl0(s)) {
6019 break;
6020 }
6021 gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
6022 gen_lea_modrm(env, s, modrm);
6023 gen_op_ld_v(s, MO_16, s->T1, s->A0);
6024 gen_add_A0_im(s, 2);
6025 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6026 if (dflag == MO_16) {
6027 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6028 }
6029 tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.base));
6030 tcg_gen_st32_tl(s->T1, cpu_env, offsetof(CPUX86State, gdt.limit));
6031 break;
6032
6033 CASE_MODRM_MEM_OP(3):
6034 if (!check_cpl0(s)) {
6035 break;
6036 }
6037 gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
6038 gen_lea_modrm(env, s, modrm);
6039 gen_op_ld_v(s, MO_16, s->T1, s->A0);
6040 gen_add_A0_im(s, 2);
6041 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6042 if (dflag == MO_16) {
6043 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6044 }
6045 tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.base));
6046 tcg_gen_st32_tl(s->T1, cpu_env, offsetof(CPUX86State, idt.limit));
6047 break;
6048
6049 CASE_MODRM_OP(4):
6050 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
6051 break;
6052 }
6053 gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
6054 tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, cr[0]));
6055
6056
6057
6058
6059
6060 mod = (modrm >> 6) & 3;
6061 ot = (mod != 3 ? MO_16 : s->dflag);
6062 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
6063 break;
6064 case 0xee:
6065 if (prefixes & PREFIX_LOCK) {
6066 goto illegal_op;
6067 }
6068 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6069 gen_helper_rdpkru(s->tmp1_i64, cpu_env, s->tmp2_i32);
6070 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
6071 break;
6072 case 0xef:
6073 if (prefixes & PREFIX_LOCK) {
6074 goto illegal_op;
6075 }
6076 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6077 cpu_regs[R_EDX]);
6078 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6079 gen_helper_wrpkru(cpu_env, s->tmp2_i32, s->tmp1_i64);
6080 break;
6081
6082 CASE_MODRM_OP(6):
6083 if (!check_cpl0(s)) {
6084 break;
6085 }
6086 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6087 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6088
6089
6090
6091
6092 tcg_gen_ld_tl(s->T1, cpu_env, offsetof(CPUX86State, cr[0]));
6093 tcg_gen_andi_tl(s->T0, s->T0, 0xf);
6094 tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
6095 tcg_gen_or_tl(s->T0, s->T0, s->T1);
6096 gen_helper_write_crN(cpu_env, tcg_constant_i32(0), s->T0);
6097 s->base.is_jmp = DISAS_EOB_NEXT;
6098 break;
6099
6100 CASE_MODRM_MEM_OP(7):
6101 if (!check_cpl0(s)) {
6102 break;
6103 }
6104 gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
6105 gen_lea_modrm(env, s, modrm);
6106 gen_helper_flush_page(cpu_env, s->A0);
6107 s->base.is_jmp = DISAS_EOB_NEXT;
6108 break;
6109
6110 case 0xf8:
6111#ifdef TARGET_X86_64
6112 if (CODE64(s)) {
6113 if (check_cpl0(s)) {
6114 tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
6115 tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
6116 offsetof(CPUX86State, kernelgsbase));
6117 tcg_gen_st_tl(s->T0, cpu_env,
6118 offsetof(CPUX86State, kernelgsbase));
6119 }
6120 break;
6121 }
6122#endif
6123 goto illegal_op;
6124
6125 case 0xf9:
6126 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
6127 goto illegal_op;
6128 }
6129 gen_update_cc_op(s);
6130 gen_update_eip_cur(s);
6131 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
6132 gen_io_start();
6133 s->base.is_jmp = DISAS_TOO_MANY;
6134 }
6135 gen_helper_rdtscp(cpu_env);
6136 break;
6137
6138 default:
6139 goto unknown_op;
6140 }
6141 break;
6142
6143 case 0x108:
6144 case 0x109:
6145 if (check_cpl0(s)) {
6146 gen_svm_check_intercept(s, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
6147
6148 }
6149 break;
6150 case 0x63:
6151#ifdef TARGET_X86_64
6152 if (CODE64(s)) {
6153 int d_ot;
6154
6155 d_ot = dflag;
6156
6157 modrm = x86_ldub_code(env, s);
6158 reg = ((modrm >> 3) & 7) | REX_R(s);
6159 mod = (modrm >> 6) & 3;
6160 rm = (modrm & 7) | REX_B(s);
6161
6162 if (mod == 3) {
6163 gen_op_mov_v_reg(s, MO_32, s->T0, rm);
6164
6165 if (d_ot == MO_64) {
6166 tcg_gen_ext32s_tl(s->T0, s->T0);
6167 }
6168 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6169 } else {
6170 gen_lea_modrm(env, s, modrm);
6171 gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0);
6172 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6173 }
6174 } else
6175#endif
6176 {
6177 TCGLabel *label1;
6178 TCGv t0, t1, t2;
6179
6180 if (!PE(s) || VM86(s))
6181 goto illegal_op;
6182 t0 = tcg_temp_new();
6183 t1 = tcg_temp_new();
6184 t2 = tcg_temp_new();
6185 ot = MO_16;
6186 modrm = x86_ldub_code(env, s);
6187 reg = (modrm >> 3) & 7;
6188 mod = (modrm >> 6) & 3;
6189 rm = modrm & 7;
6190 if (mod != 3) {
6191 gen_lea_modrm(env, s, modrm);
6192 gen_op_ld_v(s, ot, t0, s->A0);
6193 } else {
6194 gen_op_mov_v_reg(s, ot, t0, rm);
6195 }
6196 gen_op_mov_v_reg(s, ot, t1, reg);
6197 tcg_gen_andi_tl(s->tmp0, t0, 3);
6198 tcg_gen_andi_tl(t1, t1, 3);
6199 tcg_gen_movi_tl(t2, 0);
6200 label1 = gen_new_label();
6201 tcg_gen_brcond_tl(TCG_COND_GE, s->tmp0, t1, label1);
6202 tcg_gen_andi_tl(t0, t0, ~3);
6203 tcg_gen_or_tl(t0, t0, t1);
6204 tcg_gen_movi_tl(t2, CC_Z);
6205 gen_set_label(label1);
6206 if (mod != 3) {
6207 gen_op_st_v(s, ot, t0, s->A0);
6208 } else {
6209 gen_op_mov_reg_v(s, ot, rm, t0);
6210 }
6211 gen_compute_eflags(s);
6212 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
6213 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
6214 }
6215 break;
6216 case 0x102:
6217 case 0x103:
6218 {
6219 TCGLabel *label1;
6220 TCGv t0;
6221 if (!PE(s) || VM86(s))
6222 goto illegal_op;
6223 ot = dflag != MO_16 ? MO_32 : MO_16;
6224 modrm = x86_ldub_code(env, s);
6225 reg = ((modrm >> 3) & 7) | REX_R(s);
6226 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6227 t0 = tcg_temp_new();
6228 gen_update_cc_op(s);
6229 if (b == 0x102) {
6230 gen_helper_lar(t0, cpu_env, s->T0);
6231 } else {
6232 gen_helper_lsl(t0, cpu_env, s->T0);
6233 }
6234 tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z);
6235 label1 = gen_new_label();
6236 tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
6237 gen_op_mov_reg_v(s, ot, reg, t0);
6238 gen_set_label(label1);
6239 set_cc_op(s, CC_OP_EFLAGS);
6240 }
6241 break;
6242 case 0x118:
6243 modrm = x86_ldub_code(env, s);
6244 mod = (modrm >> 6) & 3;
6245 op = (modrm >> 3) & 7;
6246 switch(op) {
6247 case 0:
6248 case 1:
6249 case 2:
6250 case 3:
6251 if (mod == 3)
6252 goto illegal_op;
6253 gen_nop_modrm(env, s, modrm);
6254
6255 break;
6256 default:
6257 gen_nop_modrm(env, s, modrm);
6258 break;
6259 }
6260 break;
6261 case 0x11a:
6262 modrm = x86_ldub_code(env, s);
6263 if (s->flags & HF_MPX_EN_MASK) {
6264 mod = (modrm >> 6) & 3;
6265 reg = ((modrm >> 3) & 7) | REX_R(s);
6266 if (prefixes & PREFIX_REPZ) {
6267
6268 if (reg >= 4
6269 || (prefixes & PREFIX_LOCK)
6270 || s->aflag == MO_16) {
6271 goto illegal_op;
6272 }
6273 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
6274 } else if (prefixes & PREFIX_REPNZ) {
6275
6276 if (reg >= 4
6277 || (prefixes & PREFIX_LOCK)
6278 || s->aflag == MO_16) {
6279 goto illegal_op;
6280 }
6281 TCGv_i64 notu = tcg_temp_new_i64();
6282 tcg_gen_not_i64(notu, cpu_bndu[reg]);
6283 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
6284 } else if (prefixes & PREFIX_DATA) {
6285
6286 if (reg >= 4 || s->aflag == MO_16) {
6287 goto illegal_op;
6288 }
6289 if (mod == 3) {
6290 int reg2 = (modrm & 7) | REX_B(s);
6291 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6292 goto illegal_op;
6293 }
6294 if (s->flags & HF_MPX_IU_MASK) {
6295 tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
6296 tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
6297 }
6298 } else {
6299 gen_lea_modrm(env, s, modrm);
6300 if (CODE64(s)) {
6301 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6302 s->mem_index, MO_LEUQ);
6303 tcg_gen_addi_tl(s->A0, s->A0, 8);
6304 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6305 s->mem_index, MO_LEUQ);
6306 } else {
6307 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6308 s->mem_index, MO_LEUL);
6309 tcg_gen_addi_tl(s->A0, s->A0, 4);
6310 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6311 s->mem_index, MO_LEUL);
6312 }
6313
6314 gen_set_hflag(s, HF_MPX_IU_MASK);
6315 }
6316 } else if (mod != 3) {
6317
6318 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6319 if (reg >= 4
6320 || (prefixes & PREFIX_LOCK)
6321 || s->aflag == MO_16
6322 || a.base < -1) {
6323 goto illegal_op;
6324 }
6325 if (a.base >= 0) {
6326 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6327 } else {
6328 tcg_gen_movi_tl(s->A0, 0);
6329 }
6330 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6331 if (a.index >= 0) {
6332 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6333 } else {
6334 tcg_gen_movi_tl(s->T0, 0);
6335 }
6336 if (CODE64(s)) {
6337 gen_helper_bndldx64(cpu_bndl[reg], cpu_env, s->A0, s->T0);
6338 tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
6339 offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
6340 } else {
6341 gen_helper_bndldx32(cpu_bndu[reg], cpu_env, s->A0, s->T0);
6342 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
6343 tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
6344 }
6345 gen_set_hflag(s, HF_MPX_IU_MASK);
6346 }
6347 }
6348 gen_nop_modrm(env, s, modrm);
6349 break;
6350 case 0x11b:
6351 modrm = x86_ldub_code(env, s);
6352 if (s->flags & HF_MPX_EN_MASK) {
6353 mod = (modrm >> 6) & 3;
6354 reg = ((modrm >> 3) & 7) | REX_R(s);
6355 if (mod != 3 && (prefixes & PREFIX_REPZ)) {
6356
6357 if (reg >= 4
6358 || (prefixes & PREFIX_LOCK)
6359 || s->aflag == MO_16) {
6360 goto illegal_op;
6361 }
6362 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6363 if (a.base >= 0) {
6364 tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
6365 if (!CODE64(s)) {
6366 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
6367 }
6368 } else if (a.base == -1) {
6369
6370 tcg_gen_movi_i64(cpu_bndl[reg], 0);
6371 } else {
6372
6373 goto illegal_op;
6374 }
6375 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
6376 if (!CODE64(s)) {
6377 tcg_gen_ext32u_tl(s->A0, s->A0);
6378 }
6379 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
6380
6381 gen_set_hflag(s, HF_MPX_IU_MASK);
6382 break;
6383 } else if (prefixes & PREFIX_REPNZ) {
6384
6385 if (reg >= 4
6386 || (prefixes & PREFIX_LOCK)
6387 || s->aflag == MO_16) {
6388 goto illegal_op;
6389 }
6390 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
6391 } else if (prefixes & PREFIX_DATA) {
6392
6393 if (reg >= 4 || s->aflag == MO_16) {
6394 goto illegal_op;
6395 }
6396 if (mod == 3) {
6397 int reg2 = (modrm & 7) | REX_B(s);
6398 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6399 goto illegal_op;
6400 }
6401 if (s->flags & HF_MPX_IU_MASK) {
6402 tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
6403 tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
6404 }
6405 } else {
6406 gen_lea_modrm(env, s, modrm);
6407 if (CODE64(s)) {
6408 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6409 s->mem_index, MO_LEUQ);
6410 tcg_gen_addi_tl(s->A0, s->A0, 8);
6411 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6412 s->mem_index, MO_LEUQ);
6413 } else {
6414 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6415 s->mem_index, MO_LEUL);
6416 tcg_gen_addi_tl(s->A0, s->A0, 4);
6417 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6418 s->mem_index, MO_LEUL);
6419 }
6420 }
6421 } else if (mod != 3) {
6422
6423 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6424 if (reg >= 4
6425 || (prefixes & PREFIX_LOCK)
6426 || s->aflag == MO_16
6427 || a.base < -1) {
6428 goto illegal_op;
6429 }
6430 if (a.base >= 0) {
6431 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6432 } else {
6433 tcg_gen_movi_tl(s->A0, 0);
6434 }
6435 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6436 if (a.index >= 0) {
6437 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6438 } else {
6439 tcg_gen_movi_tl(s->T0, 0);
6440 }
6441 if (CODE64(s)) {
6442 gen_helper_bndstx64(cpu_env, s->A0, s->T0,
6443 cpu_bndl[reg], cpu_bndu[reg]);
6444 } else {
6445 gen_helper_bndstx32(cpu_env, s->A0, s->T0,
6446 cpu_bndl[reg], cpu_bndu[reg]);
6447 }
6448 }
6449 }
6450 gen_nop_modrm(env, s, modrm);
6451 break;
6452 case 0x119: case 0x11c ... 0x11f:
6453 modrm = x86_ldub_code(env, s);
6454 gen_nop_modrm(env, s, modrm);
6455 break;
6456
6457 case 0x120:
6458 case 0x122:
6459 if (!check_cpl0(s)) {
6460 break;
6461 }
6462 modrm = x86_ldub_code(env, s);
6463
6464
6465
6466
6467
6468
6469 rm = (modrm & 7) | REX_B(s);
6470 reg = ((modrm >> 3) & 7) | REX_R(s);
6471 switch (reg) {
6472 case 0:
6473 if ((prefixes & PREFIX_LOCK) &&
6474 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
6475 reg = 8;
6476 }
6477 break;
6478 case 2:
6479 case 3:
6480 case 4:
6481 case 8:
6482 break;
6483 default:
6484 goto unknown_op;
6485 }
6486 ot = (CODE64(s) ? MO_64 : MO_32);
6487
6488 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
6489 gen_io_start();
6490 s->base.is_jmp = DISAS_TOO_MANY;
6491 }
6492 if (b & 2) {
6493 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
6494 gen_op_mov_v_reg(s, ot, s->T0, rm);
6495 gen_helper_write_crN(cpu_env, tcg_constant_i32(reg), s->T0);
6496 s->base.is_jmp = DISAS_EOB_NEXT;
6497 } else {
6498 gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
6499 gen_helper_read_crN(s->T0, cpu_env, tcg_constant_i32(reg));
6500 gen_op_mov_reg_v(s, ot, rm, s->T0);
6501 }
6502 break;
6503
6504 case 0x121:
6505 case 0x123:
6506 if (check_cpl0(s)) {
6507 modrm = x86_ldub_code(env, s);
6508
6509
6510
6511
6512
6513 rm = (modrm & 7) | REX_B(s);
6514 reg = ((modrm >> 3) & 7) | REX_R(s);
6515 if (CODE64(s))
6516 ot = MO_64;
6517 else
6518 ot = MO_32;
6519 if (reg >= 8) {
6520 goto illegal_op;
6521 }
6522 if (b & 2) {
6523 gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
6524 gen_op_mov_v_reg(s, ot, s->T0, rm);
6525 tcg_gen_movi_i32(s->tmp2_i32, reg);
6526 gen_helper_set_dr(cpu_env, s->tmp2_i32, s->T0);
6527 s->base.is_jmp = DISAS_EOB_NEXT;
6528 } else {
6529 gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
6530 tcg_gen_movi_i32(s->tmp2_i32, reg);
6531 gen_helper_get_dr(s->T0, cpu_env, s->tmp2_i32);
6532 gen_op_mov_reg_v(s, ot, rm, s->T0);
6533 }
6534 }
6535 break;
6536 case 0x106:
6537 if (check_cpl0(s)) {
6538 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6539 gen_helper_clts(cpu_env);
6540
6541 s->base.is_jmp = DISAS_EOB_NEXT;
6542 }
6543 break;
6544
6545 case 0x1c3:
6546 if (!(s->cpuid_features & CPUID_SSE2))
6547 goto illegal_op;
6548 ot = mo_64_32(dflag);
6549 modrm = x86_ldub_code(env, s);
6550 mod = (modrm >> 6) & 3;
6551 if (mod == 3)
6552 goto illegal_op;
6553 reg = ((modrm >> 3) & 7) | REX_R(s);
6554
6555 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
6556 break;
6557 case 0x1ae:
6558 modrm = x86_ldub_code(env, s);
6559 switch (modrm) {
6560 CASE_MODRM_MEM_OP(0):
6561 if (!(s->cpuid_features & CPUID_FXSR)
6562 || (prefixes & PREFIX_LOCK)) {
6563 goto illegal_op;
6564 }
6565 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6566 gen_exception(s, EXCP07_PREX);
6567 break;
6568 }
6569 gen_lea_modrm(env, s, modrm);
6570 gen_helper_fxsave(cpu_env, s->A0);
6571 break;
6572
6573 CASE_MODRM_MEM_OP(1):
6574 if (!(s->cpuid_features & CPUID_FXSR)
6575 || (prefixes & PREFIX_LOCK)) {
6576 goto illegal_op;
6577 }
6578 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6579 gen_exception(s, EXCP07_PREX);
6580 break;
6581 }
6582 gen_lea_modrm(env, s, modrm);
6583 gen_helper_fxrstor(cpu_env, s->A0);
6584 break;
6585
6586 CASE_MODRM_MEM_OP(2):
6587 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6588 goto illegal_op;
6589 }
6590 if (s->flags & HF_TS_MASK) {
6591 gen_exception(s, EXCP07_PREX);
6592 break;
6593 }
6594 gen_lea_modrm(env, s, modrm);
6595 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
6596 gen_helper_ldmxcsr(cpu_env, s->tmp2_i32);
6597 break;
6598
6599 CASE_MODRM_MEM_OP(3):
6600 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6601 goto illegal_op;
6602 }
6603 if (s->flags & HF_TS_MASK) {
6604 gen_exception(s, EXCP07_PREX);
6605 break;
6606 }
6607 gen_helper_update_mxcsr(cpu_env);
6608 gen_lea_modrm(env, s, modrm);
6609 tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, mxcsr));
6610 gen_op_st_v(s, MO_32, s->T0, s->A0);
6611 break;
6612
6613 CASE_MODRM_MEM_OP(4):
6614 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6615 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6616 | PREFIX_REPZ | PREFIX_REPNZ))) {
6617 goto illegal_op;
6618 }
6619 gen_lea_modrm(env, s, modrm);
6620 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6621 cpu_regs[R_EDX]);
6622 gen_helper_xsave(cpu_env, s->A0, s->tmp1_i64);
6623 break;
6624
6625 CASE_MODRM_MEM_OP(5):
6626 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6627 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6628 | PREFIX_REPZ | PREFIX_REPNZ))) {
6629 goto illegal_op;
6630 }
6631 gen_lea_modrm(env, s, modrm);
6632 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6633 cpu_regs[R_EDX]);
6634 gen_helper_xrstor(cpu_env, s->A0, s->tmp1_i64);
6635
6636
6637 s->base.is_jmp = DISAS_EOB_NEXT;
6638 break;
6639
6640 CASE_MODRM_MEM_OP(6):
6641 if (prefixes & PREFIX_LOCK) {
6642 goto illegal_op;
6643 }
6644 if (prefixes & PREFIX_DATA) {
6645
6646 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
6647 goto illegal_op;
6648 }
6649 gen_nop_modrm(env, s, modrm);
6650 } else {
6651
6652 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6653 || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
6654 || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
6655 goto illegal_op;
6656 }
6657 gen_lea_modrm(env, s, modrm);
6658 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6659 cpu_regs[R_EDX]);
6660 gen_helper_xsaveopt(cpu_env, s->A0, s->tmp1_i64);
6661 }
6662 break;
6663
6664 CASE_MODRM_MEM_OP(7):
6665 if (prefixes & PREFIX_LOCK) {
6666 goto illegal_op;
6667 }
6668 if (prefixes & PREFIX_DATA) {
6669
6670 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
6671 goto illegal_op;
6672 }
6673 } else {
6674
6675 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
6676 || !(s->cpuid_features & CPUID_CLFLUSH)) {
6677 goto illegal_op;
6678 }
6679 }
6680 gen_nop_modrm(env, s, modrm);
6681 break;
6682
6683 case 0xc0 ... 0xc7:
6684 case 0xc8 ... 0xcf:
6685 case 0xd0 ... 0xd7:
6686 case 0xd8 ... 0xdf:
6687 if (CODE64(s)
6688 && (prefixes & PREFIX_REPZ)
6689 && !(prefixes & PREFIX_LOCK)
6690 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
6691 TCGv base, treg, src, dst;
6692
6693
6694 tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK);
6695 gen_helper_cr4_testbit(cpu_env, s->tmp2_i32);
6696
6697 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
6698 treg = cpu_regs[(modrm & 7) | REX_B(s)];
6699
6700 if (modrm & 0x10) {
6701
6702 dst = base, src = treg;
6703 } else {
6704
6705 dst = treg, src = base;
6706 }
6707
6708 if (s->dflag == MO_32) {
6709 tcg_gen_ext32u_tl(dst, src);
6710 } else {
6711 tcg_gen_mov_tl(dst, src);
6712 }
6713 break;
6714 }
6715 goto unknown_op;
6716
6717 case 0xf8:
6718 if (prefixes & PREFIX_DATA) {
6719
6720 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
6721 || (prefixes & PREFIX_LOCK)) {
6722 goto illegal_op;
6723 }
6724 break;
6725 }
6726
6727 case 0xf9 ... 0xff:
6728 if (!(s->cpuid_features & CPUID_SSE)
6729 || (prefixes & PREFIX_LOCK)) {
6730 goto illegal_op;
6731 }
6732 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
6733 break;
6734 case 0xe8 ... 0xef:
6735 if (!(s->cpuid_features & CPUID_SSE)
6736 || (prefixes & PREFIX_LOCK)) {
6737 goto illegal_op;
6738 }
6739 tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
6740 break;
6741 case 0xf0 ... 0xf7:
6742 if (!(s->cpuid_features & CPUID_SSE2)
6743 || (prefixes & PREFIX_LOCK)) {
6744 goto illegal_op;
6745 }
6746 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
6747 break;
6748
6749 default:
6750 goto unknown_op;
6751 }
6752 break;
6753
6754 case 0x10d:
6755 modrm = x86_ldub_code(env, s);
6756 mod = (modrm >> 6) & 3;
6757 if (mod == 3)
6758 goto illegal_op;
6759 gen_nop_modrm(env, s, modrm);
6760 break;
6761 case 0x1aa:
6762 gen_svm_check_intercept(s, SVM_EXIT_RSM);
6763 if (!(s->flags & HF_SMM_MASK))
6764 goto illegal_op;
6765#ifdef CONFIG_USER_ONLY
6766
6767 g_assert_not_reached();
6768#else
6769 gen_update_cc_op(s);
6770 gen_update_eip_next(s);
6771 gen_helper_rsm(cpu_env);
6772#endif
6773 s->base.is_jmp = DISAS_EOB_ONLY;
6774 break;
6775 case 0x1b8:
6776 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
6777 PREFIX_REPZ)
6778 goto illegal_op;
6779 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
6780 goto illegal_op;
6781
6782 modrm = x86_ldub_code(env, s);
6783 reg = ((modrm >> 3) & 7) | REX_R(s);
6784
6785 if (s->prefix & PREFIX_DATA) {
6786 ot = MO_16;
6787 } else {
6788 ot = mo_64_32(dflag);
6789 }
6790
6791 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6792 gen_extu(ot, s->T0);
6793 tcg_gen_mov_tl(cpu_cc_src, s->T0);
6794 tcg_gen_ctpop_tl(s->T0, s->T0);
6795 gen_op_mov_reg_v(s, ot, reg, s->T0);
6796
6797 set_cc_op(s, CC_OP_POPCNT);
6798 break;
6799 case 0x10e ... 0x117:
6800 case 0x128 ... 0x12f:
6801 case 0x138 ... 0x13a:
6802 case 0x150 ... 0x179:
6803 case 0x17c ... 0x17f:
6804 case 0x1c2:
6805 case 0x1c4 ... 0x1c6:
6806 case 0x1d0 ... 0x1fe:
6807 disas_insn_new(s, cpu, b);
6808 break;
6809 default:
6810 goto unknown_op;
6811 }
6812 return true;
6813 illegal_op:
6814 gen_illegal_opcode(s);
6815 return true;
6816 unknown_op:
6817 gen_unknown_opcode(env, s);
6818 return true;
6819}
6820
6821void tcg_x86_init(void)
6822{
6823 static const char reg_names[CPU_NB_REGS][4] = {
6824#ifdef TARGET_X86_64
6825 [R_EAX] = "rax",
6826 [R_EBX] = "rbx",
6827 [R_ECX] = "rcx",
6828 [R_EDX] = "rdx",
6829 [R_ESI] = "rsi",
6830 [R_EDI] = "rdi",
6831 [R_EBP] = "rbp",
6832 [R_ESP] = "rsp",
6833 [8] = "r8",
6834 [9] = "r9",
6835 [10] = "r10",
6836 [11] = "r11",
6837 [12] = "r12",
6838 [13] = "r13",
6839 [14] = "r14",
6840 [15] = "r15",
6841#else
6842 [R_EAX] = "eax",
6843 [R_EBX] = "ebx",
6844 [R_ECX] = "ecx",
6845 [R_EDX] = "edx",
6846 [R_ESI] = "esi",
6847 [R_EDI] = "edi",
6848 [R_EBP] = "ebp",
6849 [R_ESP] = "esp",
6850#endif
6851 };
6852 static const char eip_name[] = {
6853#ifdef TARGET_X86_64
6854 "rip"
6855#else
6856 "eip"
6857#endif
6858 };
6859 static const char seg_base_names[6][8] = {
6860 [R_CS] = "cs_base",
6861 [R_DS] = "ds_base",
6862 [R_ES] = "es_base",
6863 [R_FS] = "fs_base",
6864 [R_GS] = "gs_base",
6865 [R_SS] = "ss_base",
6866 };
6867 static const char bnd_regl_names[4][8] = {
6868 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6869 };
6870 static const char bnd_regu_names[4][8] = {
6871 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6872 };
6873 int i;
6874
6875 cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
6876 offsetof(CPUX86State, cc_op), "cc_op");
6877 cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
6878 "cc_dst");
6879 cpu_cc_src = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src),
6880 "cc_src");
6881 cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2),
6882 "cc_src2");
6883 cpu_eip = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, eip), eip_name);
6884
6885 for (i = 0; i < CPU_NB_REGS; ++i) {
6886 cpu_regs[i] = tcg_global_mem_new(cpu_env,
6887 offsetof(CPUX86State, regs[i]),
6888 reg_names[i]);
6889 }
6890
6891 for (i = 0; i < 6; ++i) {
6892 cpu_seg_base[i]
6893 = tcg_global_mem_new(cpu_env,
6894 offsetof(CPUX86State, segs[i].base),
6895 seg_base_names[i]);
6896 }
6897
6898 for (i = 0; i < 4; ++i) {
6899 cpu_bndl[i]
6900 = tcg_global_mem_new_i64(cpu_env,
6901 offsetof(CPUX86State, bnd_regs[i].lb),
6902 bnd_regl_names[i]);
6903 cpu_bndu[i]
6904 = tcg_global_mem_new_i64(cpu_env,
6905 offsetof(CPUX86State, bnd_regs[i].ub),
6906 bnd_regu_names[i]);
6907 }
6908}
6909
6910static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6911{
6912 DisasContext *dc = container_of(dcbase, DisasContext, base);
6913 CPUX86State *env = cpu->env_ptr;
6914 uint32_t flags = dc->base.tb->flags;
6915 uint32_t cflags = tb_cflags(dc->base.tb);
6916 int cpl = (flags >> HF_CPL_SHIFT) & 3;
6917 int iopl = (flags >> IOPL_SHIFT) & 3;
6918
6919 dc->cs_base = dc->base.tb->cs_base;
6920 dc->pc_save = dc->base.pc_next;
6921 dc->flags = flags;
6922#ifndef CONFIG_USER_ONLY
6923 dc->cpl = cpl;
6924 dc->iopl = iopl;
6925#endif
6926
6927
6928 g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
6929 g_assert(CPL(dc) == cpl);
6930 g_assert(IOPL(dc) == iopl);
6931 g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
6932 g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
6933 g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
6934 g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
6935 g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
6936 g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
6937 g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
6938 g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
6939
6940 dc->cc_op = CC_OP_DYNAMIC;
6941 dc->cc_op_dirty = false;
6942 dc->popl_esp_hack = 0;
6943
6944 dc->mem_index = 0;
6945#ifdef CONFIG_SOFTMMU
6946 dc->mem_index = cpu_mmu_index(env, false);
6947#endif
6948 dc->cpuid_features = env->features[FEAT_1_EDX];
6949 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
6950 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
6951 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
6952 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
6953 dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
6954 dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
6955 dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
6956 (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
6957
6958
6959
6960
6961
6962 dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
6963
6964 dc->T0 = tcg_temp_new();
6965 dc->T1 = tcg_temp_new();
6966 dc->A0 = tcg_temp_new();
6967
6968 dc->tmp0 = tcg_temp_new();
6969 dc->tmp1_i64 = tcg_temp_new_i64();
6970 dc->tmp2_i32 = tcg_temp_new_i32();
6971 dc->tmp3_i32 = tcg_temp_new_i32();
6972 dc->tmp4 = tcg_temp_new();
6973 dc->cc_srcT = tcg_temp_new();
6974}
6975
6976static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
6977{
6978}
6979
6980static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6981{
6982 DisasContext *dc = container_of(dcbase, DisasContext, base);
6983 target_ulong pc_arg = dc->base.pc_next;
6984
6985 dc->prev_insn_end = tcg_last_op();
6986 if (tb_cflags(dcbase->tb) & CF_PCREL) {
6987 pc_arg -= dc->cs_base;
6988 pc_arg &= ~TARGET_PAGE_MASK;
6989 }
6990 tcg_gen_insn_start(pc_arg, dc->cc_op);
6991}
6992
6993static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6994{
6995 DisasContext *dc = container_of(dcbase, DisasContext, base);
6996
6997#ifdef TARGET_VSYSCALL_PAGE
6998
6999
7000
7001 if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
7002 gen_exception(dc, EXCP_VSYSCALL);
7003 dc->base.pc_next = dc->pc + 1;
7004 return;
7005 }
7006#endif
7007
7008 if (disas_insn(dc, cpu)) {
7009 target_ulong pc_next = dc->pc;
7010 dc->base.pc_next = pc_next;
7011
7012 if (dc->base.is_jmp == DISAS_NEXT) {
7013 if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
7014
7015
7016
7017
7018
7019
7020
7021 dc->base.is_jmp = DISAS_EOB_NEXT;
7022 } else if (!is_same_page(&dc->base, pc_next)) {
7023 dc->base.is_jmp = DISAS_TOO_MANY;
7024 }
7025 }
7026 }
7027}
7028
7029static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
7030{
7031 DisasContext *dc = container_of(dcbase, DisasContext, base);
7032
7033 switch (dc->base.is_jmp) {
7034 case DISAS_NORETURN:
7035 break;
7036 case DISAS_TOO_MANY:
7037 gen_update_cc_op(dc);
7038 gen_jmp_rel_csize(dc, 0, 0);
7039 break;
7040 case DISAS_EOB_NEXT:
7041 gen_update_cc_op(dc);
7042 gen_update_eip_cur(dc);
7043
7044 case DISAS_EOB_ONLY:
7045 gen_eob(dc);
7046 break;
7047 case DISAS_EOB_INHIBIT_IRQ:
7048 gen_update_cc_op(dc);
7049 gen_update_eip_cur(dc);
7050 gen_eob_inhibit_irq(dc, true);
7051 break;
7052 case DISAS_JUMP:
7053 gen_jr(dc);
7054 break;
7055 default:
7056 g_assert_not_reached();
7057 }
7058}
7059
7060static void i386_tr_disas_log(const DisasContextBase *dcbase,
7061 CPUState *cpu, FILE *logfile)
7062{
7063 DisasContext *dc = container_of(dcbase, DisasContext, base);
7064
7065 fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
7066 target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
7067}
7068
7069static const TranslatorOps i386_tr_ops = {
7070 .init_disas_context = i386_tr_init_disas_context,
7071 .tb_start = i386_tr_tb_start,
7072 .insn_start = i386_tr_insn_start,
7073 .translate_insn = i386_tr_translate_insn,
7074 .tb_stop = i386_tr_tb_stop,
7075 .disas_log = i386_tr_disas_log,
7076};
7077
7078
7079void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
7080 target_ulong pc, void *host_pc)
7081{
7082 DisasContext dc;
7083
7084 translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
7085}
7086