1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kvm_host.h>
24#include "kvm_cache_regs.h"
25#include <asm/kvm_emulate.h>
26#include <linux/stringify.h>
27#include <asm/debugreg.h>
28#include <asm/nospec-branch.h>
29
30#include "x86.h"
31#include "tss.h"
32#include "mmu.h"
33#include "pmu.h"
34
35
36
37
38#define OpNone 0ull
39#define OpImplicit 1ull
40#define OpReg 2ull
41#define OpMem 3ull
42#define OpAcc 4ull
43#define OpDI 5ull
44#define OpMem64 6ull
45#define OpImmUByte 7ull
46#define OpDX 8ull
47#define OpCL 9ull
48#define OpImmByte 10ull
49#define OpOne 11ull
50#define OpImm 12ull
51#define OpMem16 13ull
52#define OpMem32 14ull
53#define OpImmU 15ull
54#define OpSI 16ull
55#define OpImmFAddr 17ull
56#define OpMemFAddr 18ull
57#define OpImmU16 19ull
58#define OpES 20ull
59#define OpCS 21ull
60#define OpSS 22ull
61#define OpDS 23ull
62#define OpFS 24ull
63#define OpGS 25ull
64#define OpMem8 26ull
65#define OpImm64 27ull
66#define OpXLat 28ull
67#define OpAccLo 29ull
68#define OpAccHi 30ull
69
70#define OpBits 5
71#define OpMask ((1ull << OpBits) - 1)
72
73
74
75
76
77
78
79
80
81
82
83#define ByteOp (1<<0)
84
85#define DstShift 1
86#define ImplicitOps (OpImplicit << DstShift)
87#define DstReg (OpReg << DstShift)
88#define DstMem (OpMem << DstShift)
89#define DstAcc (OpAcc << DstShift)
90#define DstDI (OpDI << DstShift)
91#define DstMem64 (OpMem64 << DstShift)
92#define DstMem16 (OpMem16 << DstShift)
93#define DstImmUByte (OpImmUByte << DstShift)
94#define DstDX (OpDX << DstShift)
95#define DstAccLo (OpAccLo << DstShift)
96#define DstMask (OpMask << DstShift)
97
98#define SrcShift 6
99#define SrcNone (OpNone << SrcShift)
100#define SrcReg (OpReg << SrcShift)
101#define SrcMem (OpMem << SrcShift)
102#define SrcMem16 (OpMem16 << SrcShift)
103#define SrcMem32 (OpMem32 << SrcShift)
104#define SrcImm (OpImm << SrcShift)
105#define SrcImmByte (OpImmByte << SrcShift)
106#define SrcOne (OpOne << SrcShift)
107#define SrcImmUByte (OpImmUByte << SrcShift)
108#define SrcImmU (OpImmU << SrcShift)
109#define SrcSI (OpSI << SrcShift)
110#define SrcXLat (OpXLat << SrcShift)
111#define SrcImmFAddr (OpImmFAddr << SrcShift)
112#define SrcMemFAddr (OpMemFAddr << SrcShift)
113#define SrcAcc (OpAcc << SrcShift)
114#define SrcImmU16 (OpImmU16 << SrcShift)
115#define SrcImm64 (OpImm64 << SrcShift)
116#define SrcDX (OpDX << SrcShift)
117#define SrcMem8 (OpMem8 << SrcShift)
118#define SrcAccHi (OpAccHi << SrcShift)
119#define SrcMask (OpMask << SrcShift)
120#define BitOp (1<<11)
121#define MemAbs (1<<12)
122#define String (1<<13)
123#define Stack (1<<14)
124#define GroupMask (7<<15)
125#define Group (1<<15)
126#define GroupDual (2<<15)
127#define Prefix (3<<15)
128#define RMExt (4<<15)
129#define Escape (5<<15)
130#define InstrDual (6<<15)
131#define ModeDual (7<<15)
132#define Sse (1<<18)
133
134#define ModRM (1<<19)
135
136#define Mov (1<<20)
137
138#define Prot (1<<21)
139#define EmulateOnUD (1<<22)
140#define NoAccess (1<<23)
141#define Op3264 (1<<24)
142#define Undefined (1<<25)
143#define Lock (1<<26)
144#define Priv (1<<27)
145#define No64 (1<<28)
146#define PageTable (1 << 29)
147#define NotImpl (1 << 30)
148
149#define Src2Shift (31)
150#define Src2None (OpNone << Src2Shift)
151#define Src2Mem (OpMem << Src2Shift)
152#define Src2CL (OpCL << Src2Shift)
153#define Src2ImmByte (OpImmByte << Src2Shift)
154#define Src2One (OpOne << Src2Shift)
155#define Src2Imm (OpImm << Src2Shift)
156#define Src2ES (OpES << Src2Shift)
157#define Src2CS (OpCS << Src2Shift)
158#define Src2SS (OpSS << Src2Shift)
159#define Src2DS (OpDS << Src2Shift)
160#define Src2FS (OpFS << Src2Shift)
161#define Src2GS (OpGS << Src2Shift)
162#define Src2Mask (OpMask << Src2Shift)
163#define Mmx ((u64)1 << 40)
164#define AlignMask ((u64)7 << 41)
165#define Aligned ((u64)1 << 41)
166#define Unaligned ((u64)2 << 41)
167#define Avx ((u64)3 << 41)
168#define Aligned16 ((u64)4 << 41)
169#define Fastop ((u64)1 << 44)
170#define NoWrite ((u64)1 << 45)
171#define SrcWrite ((u64)1 << 46)
172#define NoMod ((u64)1 << 47)
173#define Intercept ((u64)1 << 48)
174#define CheckPerm ((u64)1 << 49)
175#define PrivUD ((u64)1 << 51)
176#define NearBranch ((u64)1 << 52)
177#define No16 ((u64)1 << 53)
178#define IncSP ((u64)1 << 54)
179#define TwoMemOp ((u64)1 << 55)
180
181#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
182
183#define X2(x...) x, x
184#define X3(x...) X2(x), x
185#define X4(x...) X2(x), X2(x)
186#define X5(x...) X4(x), x
187#define X6(x...) X4(x), X2(x)
188#define X7(x...) X4(x), X3(x)
189#define X8(x...) X4(x), X4(x)
190#define X16(x...) X8(x), X8(x)
191
192#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
193#define FASTOP_SIZE 8
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212struct fastop;
213
214struct opcode {
215 u64 flags : 56;
216 u64 intercept : 8;
217 union {
218 int (*execute)(struct x86_emulate_ctxt *ctxt);
219 const struct opcode *group;
220 const struct group_dual *gdual;
221 const struct gprefix *gprefix;
222 const struct escape *esc;
223 const struct instr_dual *idual;
224 const struct mode_dual *mdual;
225 void (*fastop)(struct fastop *fake);
226 } u;
227 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
228};
229
230struct group_dual {
231 struct opcode mod012[8];
232 struct opcode mod3[8];
233};
234
235struct gprefix {
236 struct opcode pfx_no;
237 struct opcode pfx_66;
238 struct opcode pfx_f2;
239 struct opcode pfx_f3;
240};
241
242struct escape {
243 struct opcode op[8];
244 struct opcode high[64];
245};
246
247struct instr_dual {
248 struct opcode mod012;
249 struct opcode mod3;
250};
251
252struct mode_dual {
253 struct opcode mode32;
254 struct opcode mode64;
255};
256
257#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
258
259enum x86_transfer_type {
260 X86_TRANSFER_NONE,
261 X86_TRANSFER_CALL_JMP,
262 X86_TRANSFER_RET,
263 X86_TRANSFER_TASK_SWITCH,
264};
265
266static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
267{
268 if (!(ctxt->regs_valid & (1 << nr))) {
269 ctxt->regs_valid |= 1 << nr;
270 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
271 }
272 return ctxt->_regs[nr];
273}
274
275static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
276{
277 ctxt->regs_valid |= 1 << nr;
278 ctxt->regs_dirty |= 1 << nr;
279 return &ctxt->_regs[nr];
280}
281
282static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
283{
284 reg_read(ctxt, nr);
285 return reg_write(ctxt, nr);
286}
287
288static void writeback_registers(struct x86_emulate_ctxt *ctxt)
289{
290 unsigned reg;
291
292 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
293 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
294}
295
296static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
297{
298 ctxt->regs_dirty = 0;
299 ctxt->regs_valid = 0;
300}
301
302
303
304
305
306#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
307 X86_EFLAGS_PF|X86_EFLAGS_CF)
308
309#ifdef CONFIG_X86_64
310#define ON64(x) x
311#else
312#define ON64(x)
313#endif
314
315static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
316
317#define FOP_FUNC(name) \
318 ".align " __stringify(FASTOP_SIZE) " \n\t" \
319 ".type " name ", @function \n\t" \
320 name ":\n\t"
321
322#define FOP_RET "ret \n\t"
323
324#define FOP_START(op) \
325 extern void em_##op(struct fastop *fake); \
326 asm(".pushsection .text, \"ax\" \n\t" \
327 ".global em_" #op " \n\t" \
328 FOP_FUNC("em_" #op)
329
330#define FOP_END \
331 ".popsection")
332
333#define FOPNOP() \
334 FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
335 FOP_RET
336
337#define FOP1E(op, dst) \
338 FOP_FUNC(#op "_" #dst) \
339 "10: " #op " %" #dst " \n\t" FOP_RET
340
341#define FOP1EEX(op, dst) \
342 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
343
344#define FASTOP1(op) \
345 FOP_START(op) \
346 FOP1E(op##b, al) \
347 FOP1E(op##w, ax) \
348 FOP1E(op##l, eax) \
349 ON64(FOP1E(op##q, rax)) \
350 FOP_END
351
352
353#define FASTOP1SRC2(op, name) \
354 FOP_START(name) \
355 FOP1E(op, cl) \
356 FOP1E(op, cx) \
357 FOP1E(op, ecx) \
358 ON64(FOP1E(op, rcx)) \
359 FOP_END
360
361
362#define FASTOP1SRC2EX(op, name) \
363 FOP_START(name) \
364 FOP1EEX(op, cl) \
365 FOP1EEX(op, cx) \
366 FOP1EEX(op, ecx) \
367 ON64(FOP1EEX(op, rcx)) \
368 FOP_END
369
370#define FOP2E(op, dst, src) \
371 FOP_FUNC(#op "_" #dst "_" #src) \
372 #op " %" #src ", %" #dst " \n\t" FOP_RET
373
374#define FASTOP2(op) \
375 FOP_START(op) \
376 FOP2E(op##b, al, dl) \
377 FOP2E(op##w, ax, dx) \
378 FOP2E(op##l, eax, edx) \
379 ON64(FOP2E(op##q, rax, rdx)) \
380 FOP_END
381
382
383#define FASTOP2W(op) \
384 FOP_START(op) \
385 FOPNOP() \
386 FOP2E(op##w, ax, dx) \
387 FOP2E(op##l, eax, edx) \
388 ON64(FOP2E(op##q, rax, rdx)) \
389 FOP_END
390
391
392#define FASTOP2CL(op) \
393 FOP_START(op) \
394 FOP2E(op##b, al, cl) \
395 FOP2E(op##w, ax, cl) \
396 FOP2E(op##l, eax, cl) \
397 ON64(FOP2E(op##q, rax, cl)) \
398 FOP_END
399
400
401#define FASTOP2R(op, name) \
402 FOP_START(name) \
403 FOP2E(op##b, dl, al) \
404 FOP2E(op##w, dx, ax) \
405 FOP2E(op##l, edx, eax) \
406 ON64(FOP2E(op##q, rdx, rax)) \
407 FOP_END
408
409#define FOP3E(op, dst, src, src2) \
410 FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
411 #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
412
413
414#define FASTOP3WCL(op) \
415 FOP_START(op) \
416 FOPNOP() \
417 FOP3E(op##w, ax, dx, cl) \
418 FOP3E(op##l, eax, edx, cl) \
419 ON64(FOP3E(op##q, rax, rdx, cl)) \
420 FOP_END
421
422
423#define FOP_SETCC(op) \
424 ".align 4 \n\t" \
425 ".type " #op ", @function \n\t" \
426 #op ": \n\t" \
427 #op " %al \n\t" \
428 FOP_RET
429
430asm(".pushsection .fixup, \"ax\"\n"
431 ".global kvm_fastop_exception \n"
432 "kvm_fastop_exception: xor %esi, %esi; ret\n"
433 ".popsection");
434
435FOP_START(setcc)
436FOP_SETCC(seto)
437FOP_SETCC(setno)
438FOP_SETCC(setc)
439FOP_SETCC(setnc)
440FOP_SETCC(setz)
441FOP_SETCC(setnz)
442FOP_SETCC(setbe)
443FOP_SETCC(setnbe)
444FOP_SETCC(sets)
445FOP_SETCC(setns)
446FOP_SETCC(setp)
447FOP_SETCC(setnp)
448FOP_SETCC(setl)
449FOP_SETCC(setnl)
450FOP_SETCC(setle)
451FOP_SETCC(setnle)
452FOP_END;
453
454FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
455FOP_END;
456
457
458
459
460
461#define asm_safe(insn, inoutclob...) \
462({ \
463 int _fault = 0; \
464 \
465 asm volatile("1:" insn "\n" \
466 "2:\n" \
467 ".pushsection .fixup, \"ax\"\n" \
468 "3: movl $1, %[_fault]\n" \
469 " jmp 2b\n" \
470 ".popsection\n" \
471 _ASM_EXTABLE(1b, 3b) \
472 : [_fault] "+qm"(_fault) inoutclob ); \
473 \
474 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
475})
476
477static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
478 enum x86_intercept intercept,
479 enum x86_intercept_stage stage)
480{
481 struct x86_instruction_info info = {
482 .intercept = intercept,
483 .rep_prefix = ctxt->rep_prefix,
484 .modrm_mod = ctxt->modrm_mod,
485 .modrm_reg = ctxt->modrm_reg,
486 .modrm_rm = ctxt->modrm_rm,
487 .src_val = ctxt->src.val64,
488 .dst_val = ctxt->dst.val64,
489 .src_bytes = ctxt->src.bytes,
490 .dst_bytes = ctxt->dst.bytes,
491 .ad_bytes = ctxt->ad_bytes,
492 .next_rip = ctxt->eip,
493 };
494
495 return ctxt->ops->intercept(ctxt, &info, stage);
496}
497
498static void assign_masked(ulong *dest, ulong src, ulong mask)
499{
500 *dest = (*dest & ~mask) | (src & mask);
501}
502
503static void assign_register(unsigned long *reg, u64 val, int bytes)
504{
505
506 switch (bytes) {
507 case 1:
508 *(u8 *)reg = (u8)val;
509 break;
510 case 2:
511 *(u16 *)reg = (u16)val;
512 break;
513 case 4:
514 *reg = (u32)val;
515 break;
516 case 8:
517 *reg = val;
518 break;
519 }
520}
521
522static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
523{
524 return (1UL << (ctxt->ad_bytes << 3)) - 1;
525}
526
527static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
528{
529 u16 sel;
530 struct desc_struct ss;
531
532 if (ctxt->mode == X86EMUL_MODE_PROT64)
533 return ~0UL;
534 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
535 return ~0U >> ((ss.d ^ 1) * 16);
536}
537
538static int stack_size(struct x86_emulate_ctxt *ctxt)
539{
540 return (__fls(stack_mask(ctxt)) + 1) >> 3;
541}
542
543
544static inline unsigned long
545address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
546{
547 if (ctxt->ad_bytes == sizeof(unsigned long))
548 return reg;
549 else
550 return reg & ad_mask(ctxt);
551}
552
553static inline unsigned long
554register_address(struct x86_emulate_ctxt *ctxt, int reg)
555{
556 return address_mask(ctxt, reg_read(ctxt, reg));
557}
558
559static void masked_increment(ulong *reg, ulong mask, int inc)
560{
561 assign_masked(reg, *reg + inc, mask);
562}
563
564static inline void
565register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
566{
567 ulong *preg = reg_rmw(ctxt, reg);
568
569 assign_register(preg, *preg + inc, ctxt->ad_bytes);
570}
571
572static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
573{
574 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
575}
576
577static u32 desc_limit_scaled(struct desc_struct *desc)
578{
579 u32 limit = get_desc_limit(desc);
580
581 return desc->g ? (limit << 12) | 0xfff : limit;
582}
583
584static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
585{
586 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
587 return 0;
588
589 return ctxt->ops->get_cached_segment_base(ctxt, seg);
590}
591
592static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
593 u32 error, bool valid)
594{
595 WARN_ON(vec > 0x1f);
596 ctxt->exception.vector = vec;
597 ctxt->exception.error_code = error;
598 ctxt->exception.error_code_valid = valid;
599 return X86EMUL_PROPAGATE_FAULT;
600}
601
602static int emulate_db(struct x86_emulate_ctxt *ctxt)
603{
604 return emulate_exception(ctxt, DB_VECTOR, 0, false);
605}
606
607static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
608{
609 return emulate_exception(ctxt, GP_VECTOR, err, true);
610}
611
612static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
613{
614 return emulate_exception(ctxt, SS_VECTOR, err, true);
615}
616
617static int emulate_ud(struct x86_emulate_ctxt *ctxt)
618{
619 return emulate_exception(ctxt, UD_VECTOR, 0, false);
620}
621
622static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
623{
624 return emulate_exception(ctxt, TS_VECTOR, err, true);
625}
626
627static int emulate_de(struct x86_emulate_ctxt *ctxt)
628{
629 return emulate_exception(ctxt, DE_VECTOR, 0, false);
630}
631
632static int emulate_nm(struct x86_emulate_ctxt *ctxt)
633{
634 return emulate_exception(ctxt, NM_VECTOR, 0, false);
635}
636
637static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
638{
639 u16 selector;
640 struct desc_struct desc;
641
642 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
643 return selector;
644}
645
646static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
647 unsigned seg)
648{
649 u16 dummy;
650 u32 base3;
651 struct desc_struct desc;
652
653 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
654 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
655}
656
657
658
659
660
661
662
663
664
665
666static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
667{
668 u64 alignment = ctxt->d & AlignMask;
669
670 if (likely(size < 16))
671 return 1;
672
673 switch (alignment) {
674 case Unaligned:
675 case Avx:
676 return 1;
677 case Aligned16:
678 return 16;
679 case Aligned:
680 default:
681 return size;
682 }
683}
684
685static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
686 struct segmented_address addr,
687 unsigned *max_size, unsigned size,
688 bool write, bool fetch,
689 enum x86emul_mode mode, ulong *linear)
690{
691 struct desc_struct desc;
692 bool usable;
693 ulong la;
694 u32 lim;
695 u16 sel;
696 u8 va_bits;
697
698 la = seg_base(ctxt, addr.seg) + addr.ea;
699 *max_size = 0;
700 switch (mode) {
701 case X86EMUL_MODE_PROT64:
702 *linear = la;
703 va_bits = ctxt_virt_addr_bits(ctxt);
704 if (get_canonical(la, va_bits) != la)
705 goto bad;
706
707 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
708 if (size > *max_size)
709 goto bad;
710 break;
711 default:
712 *linear = la = (u32)la;
713 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
714 addr.seg);
715 if (!usable)
716 goto bad;
717
718 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
719 || !(desc.type & 2)) && write)
720 goto bad;
721
722 if (!fetch && (desc.type & 8) && !(desc.type & 2))
723 goto bad;
724 lim = desc_limit_scaled(&desc);
725 if (!(desc.type & 8) && (desc.type & 4)) {
726
727 if (addr.ea <= lim)
728 goto bad;
729 lim = desc.d ? 0xffffffff : 0xffff;
730 }
731 if (addr.ea > lim)
732 goto bad;
733 if (lim == 0xffffffff)
734 *max_size = ~0u;
735 else {
736 *max_size = (u64)lim + 1 - addr.ea;
737 if (size > *max_size)
738 goto bad;
739 }
740 break;
741 }
742 if (la & (insn_alignment(ctxt, size) - 1))
743 return emulate_gp(ctxt, 0);
744 return X86EMUL_CONTINUE;
745bad:
746 if (addr.seg == VCPU_SREG_SS)
747 return emulate_ss(ctxt, 0);
748 else
749 return emulate_gp(ctxt, 0);
750}
751
752static int linearize(struct x86_emulate_ctxt *ctxt,
753 struct segmented_address addr,
754 unsigned size, bool write,
755 ulong *linear)
756{
757 unsigned max_size;
758 return __linearize(ctxt, addr, &max_size, size, write, false,
759 ctxt->mode, linear);
760}
761
762static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
763 enum x86emul_mode mode)
764{
765 ulong linear;
766 int rc;
767 unsigned max_size;
768 struct segmented_address addr = { .seg = VCPU_SREG_CS,
769 .ea = dst };
770
771 if (ctxt->op_bytes != sizeof(unsigned long))
772 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
773 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
774 if (rc == X86EMUL_CONTINUE)
775 ctxt->_eip = addr.ea;
776 return rc;
777}
778
779static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
780{
781 return assign_eip(ctxt, dst, ctxt->mode);
782}
783
784static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
785 const struct desc_struct *cs_desc)
786{
787 enum x86emul_mode mode = ctxt->mode;
788 int rc;
789
790#ifdef CONFIG_X86_64
791 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
792 if (cs_desc->l) {
793 u64 efer = 0;
794
795 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
796 if (efer & EFER_LMA)
797 mode = X86EMUL_MODE_PROT64;
798 } else
799 mode = X86EMUL_MODE_PROT32;
800 }
801#endif
802 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
803 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
804 rc = assign_eip(ctxt, dst, mode);
805 if (rc == X86EMUL_CONTINUE)
806 ctxt->mode = mode;
807 return rc;
808}
809
810static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
811{
812 return assign_eip_near(ctxt, ctxt->_eip + rel);
813}
814
815static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
816 void *data, unsigned size)
817{
818 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
819}
820
821static int linear_write_system(struct x86_emulate_ctxt *ctxt,
822 ulong linear, void *data,
823 unsigned int size)
824{
825 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
826}
827
828static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
829 struct segmented_address addr,
830 void *data,
831 unsigned size)
832{
833 int rc;
834 ulong linear;
835
836 rc = linearize(ctxt, addr, size, false, &linear);
837 if (rc != X86EMUL_CONTINUE)
838 return rc;
839 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
840}
841
842static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
843 struct segmented_address addr,
844 void *data,
845 unsigned int size)
846{
847 int rc;
848 ulong linear;
849
850 rc = linearize(ctxt, addr, size, true, &linear);
851 if (rc != X86EMUL_CONTINUE)
852 return rc;
853 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
854}
855
856
857
858
859
860static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
861{
862 int rc;
863 unsigned size, max_size;
864 unsigned long linear;
865 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
866 struct segmented_address addr = { .seg = VCPU_SREG_CS,
867 .ea = ctxt->eip + cur_size };
868
869
870
871
872
873
874
875
876
877
878
879 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
880 &linear);
881 if (unlikely(rc != X86EMUL_CONTINUE))
882 return rc;
883
884 size = min_t(unsigned, 15UL ^ cur_size, max_size);
885 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
886
887
888
889
890
891
892
893 if (unlikely(size < op_size))
894 return emulate_gp(ctxt, 0);
895
896 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
897 size, &ctxt->exception);
898 if (unlikely(rc != X86EMUL_CONTINUE))
899 return rc;
900 ctxt->fetch.end += size;
901 return X86EMUL_CONTINUE;
902}
903
904static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
905 unsigned size)
906{
907 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
908
909 if (unlikely(done_size < size))
910 return __do_insn_fetch_bytes(ctxt, size - done_size);
911 else
912 return X86EMUL_CONTINUE;
913}
914
915
916#define insn_fetch(_type, _ctxt) \
917({ _type _x; \
918 \
919 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
920 if (rc != X86EMUL_CONTINUE) \
921 goto done; \
922 ctxt->_eip += sizeof(_type); \
923 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
924 ctxt->fetch.ptr += sizeof(_type); \
925 _x; \
926})
927
928#define insn_fetch_arr(_arr, _size, _ctxt) \
929({ \
930 rc = do_insn_fetch_bytes(_ctxt, _size); \
931 if (rc != X86EMUL_CONTINUE) \
932 goto done; \
933 ctxt->_eip += (_size); \
934 memcpy(_arr, ctxt->fetch.ptr, _size); \
935 ctxt->fetch.ptr += (_size); \
936})
937
938
939
940
941
942
943static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
944 int byteop)
945{
946 void *p;
947 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
948
949 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
950 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
951 else
952 p = reg_rmw(ctxt, modrm_reg);
953 return p;
954}
955
956static int read_descriptor(struct x86_emulate_ctxt *ctxt,
957 struct segmented_address addr,
958 u16 *size, unsigned long *address, int op_bytes)
959{
960 int rc;
961
962 if (op_bytes == 2)
963 op_bytes = 3;
964 *address = 0;
965 rc = segmented_read_std(ctxt, addr, size, 2);
966 if (rc != X86EMUL_CONTINUE)
967 return rc;
968 addr.ea += 2;
969 rc = segmented_read_std(ctxt, addr, address, op_bytes);
970 return rc;
971}
972
973FASTOP2(add);
974FASTOP2(or);
975FASTOP2(adc);
976FASTOP2(sbb);
977FASTOP2(and);
978FASTOP2(sub);
979FASTOP2(xor);
980FASTOP2(cmp);
981FASTOP2(test);
982
983FASTOP1SRC2(mul, mul_ex);
984FASTOP1SRC2(imul, imul_ex);
985FASTOP1SRC2EX(div, div_ex);
986FASTOP1SRC2EX(idiv, idiv_ex);
987
988FASTOP3WCL(shld);
989FASTOP3WCL(shrd);
990
991FASTOP2W(imul);
992
993FASTOP1(not);
994FASTOP1(neg);
995FASTOP1(inc);
996FASTOP1(dec);
997
998FASTOP2CL(rol);
999FASTOP2CL(ror);
1000FASTOP2CL(rcl);
1001FASTOP2CL(rcr);
1002FASTOP2CL(shl);
1003FASTOP2CL(shr);
1004FASTOP2CL(sar);
1005
1006FASTOP2W(bsf);
1007FASTOP2W(bsr);
1008FASTOP2W(bt);
1009FASTOP2W(bts);
1010FASTOP2W(btr);
1011FASTOP2W(btc);
1012
1013FASTOP2(xadd);
1014
1015FASTOP2R(cmp, cmp_r);
1016
1017static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1018{
1019
1020 if (ctxt->src.val == 0)
1021 ctxt->dst.type = OP_NONE;
1022 return fastop(ctxt, em_bsf);
1023}
1024
1025static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1026{
1027
1028 if (ctxt->src.val == 0)
1029 ctxt->dst.type = OP_NONE;
1030 return fastop(ctxt, em_bsr);
1031}
1032
1033static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1034{
1035 u8 rc;
1036 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1037
1038 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1039 asm("push %[flags]; popf; " CALL_NOSPEC
1040 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1041 return rc;
1042}
1043
1044static void fetch_register_operand(struct operand *op)
1045{
1046 switch (op->bytes) {
1047 case 1:
1048 op->val = *(u8 *)op->addr.reg;
1049 break;
1050 case 2:
1051 op->val = *(u16 *)op->addr.reg;
1052 break;
1053 case 4:
1054 op->val = *(u32 *)op->addr.reg;
1055 break;
1056 case 8:
1057 op->val = *(u64 *)op->addr.reg;
1058 break;
1059 }
1060}
1061
1062static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1063{
1064 switch (reg) {
1065 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1066 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1067 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1068 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1069 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1070 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1071 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1072 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1073#ifdef CONFIG_X86_64
1074 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1075 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1076 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1077 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1078 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1079 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1080 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1081 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1082#endif
1083 default: BUG();
1084 }
1085}
1086
1087static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1088 int reg)
1089{
1090 switch (reg) {
1091 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1092 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1093 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1094 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1095 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1096 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1097 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1098 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1099#ifdef CONFIG_X86_64
1100 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1101 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1102 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1103 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1104 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1105 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1106 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1107 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1108#endif
1109 default: BUG();
1110 }
1111}
1112
1113static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1114{
1115 switch (reg) {
1116 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1117 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1118 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1119 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1120 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1121 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1122 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1123 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1124 default: BUG();
1125 }
1126}
1127
1128static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1129{
1130 switch (reg) {
1131 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1132 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1133 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1134 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1135 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1136 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1137 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1138 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1139 default: BUG();
1140 }
1141}
1142
1143static int em_fninit(struct x86_emulate_ctxt *ctxt)
1144{
1145 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1146 return emulate_nm(ctxt);
1147
1148 asm volatile("fninit");
1149 return X86EMUL_CONTINUE;
1150}
1151
1152static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1153{
1154 u16 fcw;
1155
1156 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1157 return emulate_nm(ctxt);
1158
1159 asm volatile("fnstcw %0": "+m"(fcw));
1160
1161 ctxt->dst.val = fcw;
1162
1163 return X86EMUL_CONTINUE;
1164}
1165
1166static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1167{
1168 u16 fsw;
1169
1170 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1171 return emulate_nm(ctxt);
1172
1173 asm volatile("fnstsw %0": "+m"(fsw));
1174
1175 ctxt->dst.val = fsw;
1176
1177 return X86EMUL_CONTINUE;
1178}
1179
1180static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1181 struct operand *op)
1182{
1183 unsigned reg = ctxt->modrm_reg;
1184
1185 if (!(ctxt->d & ModRM))
1186 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1187
1188 if (ctxt->d & Sse) {
1189 op->type = OP_XMM;
1190 op->bytes = 16;
1191 op->addr.xmm = reg;
1192 read_sse_reg(ctxt, &op->vec_val, reg);
1193 return;
1194 }
1195 if (ctxt->d & Mmx) {
1196 reg &= 7;
1197 op->type = OP_MM;
1198 op->bytes = 8;
1199 op->addr.mm = reg;
1200 return;
1201 }
1202
1203 op->type = OP_REG;
1204 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1205 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1206
1207 fetch_register_operand(op);
1208 op->orig_val = op->val;
1209}
1210
1211static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1212{
1213 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1214 ctxt->modrm_seg = VCPU_SREG_SS;
1215}
1216
1217static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1218 struct operand *op)
1219{
1220 u8 sib;
1221 int index_reg, base_reg, scale;
1222 int rc = X86EMUL_CONTINUE;
1223 ulong modrm_ea = 0;
1224
1225 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8);
1226 index_reg = (ctxt->rex_prefix << 2) & 8;
1227 base_reg = (ctxt->rex_prefix << 3) & 8;
1228
1229 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1230 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1231 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1232 ctxt->modrm_seg = VCPU_SREG_DS;
1233
1234 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1235 op->type = OP_REG;
1236 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1237 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1238 ctxt->d & ByteOp);
1239 if (ctxt->d & Sse) {
1240 op->type = OP_XMM;
1241 op->bytes = 16;
1242 op->addr.xmm = ctxt->modrm_rm;
1243 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1244 return rc;
1245 }
1246 if (ctxt->d & Mmx) {
1247 op->type = OP_MM;
1248 op->bytes = 8;
1249 op->addr.mm = ctxt->modrm_rm & 7;
1250 return rc;
1251 }
1252 fetch_register_operand(op);
1253 return rc;
1254 }
1255
1256 op->type = OP_MEM;
1257
1258 if (ctxt->ad_bytes == 2) {
1259 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1260 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1261 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1262 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1263
1264
1265 switch (ctxt->modrm_mod) {
1266 case 0:
1267 if (ctxt->modrm_rm == 6)
1268 modrm_ea += insn_fetch(u16, ctxt);
1269 break;
1270 case 1:
1271 modrm_ea += insn_fetch(s8, ctxt);
1272 break;
1273 case 2:
1274 modrm_ea += insn_fetch(u16, ctxt);
1275 break;
1276 }
1277 switch (ctxt->modrm_rm) {
1278 case 0:
1279 modrm_ea += bx + si;
1280 break;
1281 case 1:
1282 modrm_ea += bx + di;
1283 break;
1284 case 2:
1285 modrm_ea += bp + si;
1286 break;
1287 case 3:
1288 modrm_ea += bp + di;
1289 break;
1290 case 4:
1291 modrm_ea += si;
1292 break;
1293 case 5:
1294 modrm_ea += di;
1295 break;
1296 case 6:
1297 if (ctxt->modrm_mod != 0)
1298 modrm_ea += bp;
1299 break;
1300 case 7:
1301 modrm_ea += bx;
1302 break;
1303 }
1304 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1305 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1306 ctxt->modrm_seg = VCPU_SREG_SS;
1307 modrm_ea = (u16)modrm_ea;
1308 } else {
1309
1310 if ((ctxt->modrm_rm & 7) == 4) {
1311 sib = insn_fetch(u8, ctxt);
1312 index_reg |= (sib >> 3) & 7;
1313 base_reg |= sib & 7;
1314 scale = sib >> 6;
1315
1316 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1317 modrm_ea += insn_fetch(s32, ctxt);
1318 else {
1319 modrm_ea += reg_read(ctxt, base_reg);
1320 adjust_modrm_seg(ctxt, base_reg);
1321
1322 if ((ctxt->d & IncSP) &&
1323 base_reg == VCPU_REGS_RSP)
1324 modrm_ea += ctxt->op_bytes;
1325 }
1326 if (index_reg != 4)
1327 modrm_ea += reg_read(ctxt, index_reg) << scale;
1328 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1329 modrm_ea += insn_fetch(s32, ctxt);
1330 if (ctxt->mode == X86EMUL_MODE_PROT64)
1331 ctxt->rip_relative = 1;
1332 } else {
1333 base_reg = ctxt->modrm_rm;
1334 modrm_ea += reg_read(ctxt, base_reg);
1335 adjust_modrm_seg(ctxt, base_reg);
1336 }
1337 switch (ctxt->modrm_mod) {
1338 case 1:
1339 modrm_ea += insn_fetch(s8, ctxt);
1340 break;
1341 case 2:
1342 modrm_ea += insn_fetch(s32, ctxt);
1343 break;
1344 }
1345 }
1346 op->addr.mem.ea = modrm_ea;
1347 if (ctxt->ad_bytes != 8)
1348 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1349
1350done:
1351 return rc;
1352}
1353
1354static int decode_abs(struct x86_emulate_ctxt *ctxt,
1355 struct operand *op)
1356{
1357 int rc = X86EMUL_CONTINUE;
1358
1359 op->type = OP_MEM;
1360 switch (ctxt->ad_bytes) {
1361 case 2:
1362 op->addr.mem.ea = insn_fetch(u16, ctxt);
1363 break;
1364 case 4:
1365 op->addr.mem.ea = insn_fetch(u32, ctxt);
1366 break;
1367 case 8:
1368 op->addr.mem.ea = insn_fetch(u64, ctxt);
1369 break;
1370 }
1371done:
1372 return rc;
1373}
1374
1375static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1376{
1377 long sv = 0, mask;
1378
1379 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1380 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1381
1382 if (ctxt->src.bytes == 2)
1383 sv = (s16)ctxt->src.val & (s16)mask;
1384 else if (ctxt->src.bytes == 4)
1385 sv = (s32)ctxt->src.val & (s32)mask;
1386 else
1387 sv = (s64)ctxt->src.val & (s64)mask;
1388
1389 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1390 ctxt->dst.addr.mem.ea + (sv >> 3));
1391 }
1392
1393
1394 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1395}
1396
1397static int read_emulated(struct x86_emulate_ctxt *ctxt,
1398 unsigned long addr, void *dest, unsigned size)
1399{
1400 int rc;
1401 struct read_cache *mc = &ctxt->mem_read;
1402
1403 if (mc->pos < mc->end)
1404 goto read_cached;
1405
1406 WARN_ON((mc->end + size) >= sizeof(mc->data));
1407
1408 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1409 &ctxt->exception);
1410 if (rc != X86EMUL_CONTINUE)
1411 return rc;
1412
1413 mc->end += size;
1414
1415read_cached:
1416 memcpy(dest, mc->data + mc->pos, size);
1417 mc->pos += size;
1418 return X86EMUL_CONTINUE;
1419}
1420
1421static int segmented_read(struct x86_emulate_ctxt *ctxt,
1422 struct segmented_address addr,
1423 void *data,
1424 unsigned size)
1425{
1426 int rc;
1427 ulong linear;
1428
1429 rc = linearize(ctxt, addr, size, false, &linear);
1430 if (rc != X86EMUL_CONTINUE)
1431 return rc;
1432 return read_emulated(ctxt, linear, data, size);
1433}
1434
1435static int segmented_write(struct x86_emulate_ctxt *ctxt,
1436 struct segmented_address addr,
1437 const void *data,
1438 unsigned size)
1439{
1440 int rc;
1441 ulong linear;
1442
1443 rc = linearize(ctxt, addr, size, true, &linear);
1444 if (rc != X86EMUL_CONTINUE)
1445 return rc;
1446 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1447 &ctxt->exception);
1448}
1449
1450static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1451 struct segmented_address addr,
1452 const void *orig_data, const void *data,
1453 unsigned size)
1454{
1455 int rc;
1456 ulong linear;
1457
1458 rc = linearize(ctxt, addr, size, true, &linear);
1459 if (rc != X86EMUL_CONTINUE)
1460 return rc;
1461 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1462 size, &ctxt->exception);
1463}
1464
1465static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1466 unsigned int size, unsigned short port,
1467 void *dest)
1468{
1469 struct read_cache *rc = &ctxt->io_read;
1470
1471 if (rc->pos == rc->end) {
1472 unsigned int in_page, n;
1473 unsigned int count = ctxt->rep_prefix ?
1474 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1475 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1476 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1477 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1478 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1479 if (n == 0)
1480 n = 1;
1481 rc->pos = rc->end = 0;
1482 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1483 return 0;
1484 rc->end = n * size;
1485 }
1486
1487 if (ctxt->rep_prefix && (ctxt->d & String) &&
1488 !(ctxt->eflags & X86_EFLAGS_DF)) {
1489 ctxt->dst.data = rc->data + rc->pos;
1490 ctxt->dst.type = OP_MEM_STR;
1491 ctxt->dst.count = (rc->end - rc->pos) / size;
1492 rc->pos = rc->end;
1493 } else {
1494 memcpy(dest, rc->data + rc->pos, size);
1495 rc->pos += size;
1496 }
1497 return 1;
1498}
1499
1500static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1501 u16 index, struct desc_struct *desc)
1502{
1503 struct desc_ptr dt;
1504 ulong addr;
1505
1506 ctxt->ops->get_idt(ctxt, &dt);
1507
1508 if (dt.size < index * 8 + 7)
1509 return emulate_gp(ctxt, index << 3 | 0x2);
1510
1511 addr = dt.address + index * 8;
1512 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1513}
1514
1515static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1516 u16 selector, struct desc_ptr *dt)
1517{
1518 const struct x86_emulate_ops *ops = ctxt->ops;
1519 u32 base3 = 0;
1520
1521 if (selector & 1 << 2) {
1522 struct desc_struct desc;
1523 u16 sel;
1524
1525 memset(dt, 0, sizeof(*dt));
1526 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1527 VCPU_SREG_LDTR))
1528 return;
1529
1530 dt->size = desc_limit_scaled(&desc);
1531 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1532 } else
1533 ops->get_gdt(ctxt, dt);
1534}
1535
1536static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1537 u16 selector, ulong *desc_addr_p)
1538{
1539 struct desc_ptr dt;
1540 u16 index = selector >> 3;
1541 ulong addr;
1542
1543 get_descriptor_table_ptr(ctxt, selector, &dt);
1544
1545 if (dt.size < index * 8 + 7)
1546 return emulate_gp(ctxt, selector & 0xfffc);
1547
1548 addr = dt.address + index * 8;
1549
1550#ifdef CONFIG_X86_64
1551 if (addr >> 32 != 0) {
1552 u64 efer = 0;
1553
1554 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1555 if (!(efer & EFER_LMA))
1556 addr &= (u32)-1;
1557 }
1558#endif
1559
1560 *desc_addr_p = addr;
1561 return X86EMUL_CONTINUE;
1562}
1563
1564
1565static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1566 u16 selector, struct desc_struct *desc,
1567 ulong *desc_addr_p)
1568{
1569 int rc;
1570
1571 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1572 if (rc != X86EMUL_CONTINUE)
1573 return rc;
1574
1575 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1576}
1577
1578
1579static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1580 u16 selector, struct desc_struct *desc)
1581{
1582 int rc;
1583 ulong addr;
1584
1585 rc = get_descriptor_ptr(ctxt, selector, &addr);
1586 if (rc != X86EMUL_CONTINUE)
1587 return rc;
1588
1589 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1590}
1591
1592static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1593 u16 selector, int seg, u8 cpl,
1594 enum x86_transfer_type transfer,
1595 struct desc_struct *desc)
1596{
1597 struct desc_struct seg_desc, old_desc;
1598 u8 dpl, rpl;
1599 unsigned err_vec = GP_VECTOR;
1600 u32 err_code = 0;
1601 bool null_selector = !(selector & ~0x3);
1602 ulong desc_addr;
1603 int ret;
1604 u16 dummy;
1605 u32 base3 = 0;
1606
1607 memset(&seg_desc, 0, sizeof(seg_desc));
1608
1609 if (ctxt->mode == X86EMUL_MODE_REAL) {
1610
1611
1612 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1613 set_desc_base(&seg_desc, selector << 4);
1614 goto load;
1615 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1616
1617 set_desc_base(&seg_desc, selector << 4);
1618 set_desc_limit(&seg_desc, 0xffff);
1619 seg_desc.type = 3;
1620 seg_desc.p = 1;
1621 seg_desc.s = 1;
1622 seg_desc.dpl = 3;
1623 goto load;
1624 }
1625
1626 rpl = selector & 3;
1627
1628
1629 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1630 goto exception;
1631
1632
1633 if (null_selector) {
1634 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1635 goto exception;
1636
1637 if (seg == VCPU_SREG_SS) {
1638 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1639 goto exception;
1640
1641
1642
1643
1644
1645 seg_desc.type = 3;
1646 seg_desc.p = 1;
1647 seg_desc.s = 1;
1648 seg_desc.dpl = cpl;
1649 seg_desc.d = 1;
1650 seg_desc.g = 1;
1651 }
1652
1653
1654 goto load;
1655 }
1656
1657 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1658 if (ret != X86EMUL_CONTINUE)
1659 return ret;
1660
1661 err_code = selector & 0xfffc;
1662 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1663 GP_VECTOR;
1664
1665
1666 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1667 if (transfer == X86_TRANSFER_CALL_JMP)
1668 return X86EMUL_UNHANDLEABLE;
1669 goto exception;
1670 }
1671
1672 if (!seg_desc.p) {
1673 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1674 goto exception;
1675 }
1676
1677 dpl = seg_desc.dpl;
1678
1679 switch (seg) {
1680 case VCPU_SREG_SS:
1681
1682
1683
1684
1685 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1686 goto exception;
1687 break;
1688 case VCPU_SREG_CS:
1689 if (!(seg_desc.type & 8))
1690 goto exception;
1691
1692 if (seg_desc.type & 4) {
1693
1694 if (dpl > cpl)
1695 goto exception;
1696 } else {
1697
1698 if (rpl > cpl || dpl != cpl)
1699 goto exception;
1700 }
1701
1702 if (seg_desc.d && seg_desc.l) {
1703 u64 efer = 0;
1704
1705 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1706 if (efer & EFER_LMA)
1707 goto exception;
1708 }
1709
1710
1711 selector = (selector & 0xfffc) | cpl;
1712 break;
1713 case VCPU_SREG_TR:
1714 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1715 goto exception;
1716 old_desc = seg_desc;
1717 seg_desc.type |= 2;
1718 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1719 sizeof(seg_desc), &ctxt->exception);
1720 if (ret != X86EMUL_CONTINUE)
1721 return ret;
1722 break;
1723 case VCPU_SREG_LDTR:
1724 if (seg_desc.s || seg_desc.type != 2)
1725 goto exception;
1726 break;
1727 default:
1728
1729
1730
1731
1732
1733 if ((seg_desc.type & 0xa) == 0x8 ||
1734 (((seg_desc.type & 0xc) != 0xc) &&
1735 (rpl > dpl && cpl > dpl)))
1736 goto exception;
1737 break;
1738 }
1739
1740 if (seg_desc.s) {
1741
1742 if (!(seg_desc.type & 1)) {
1743 seg_desc.type |= 1;
1744 ret = write_segment_descriptor(ctxt, selector,
1745 &seg_desc);
1746 if (ret != X86EMUL_CONTINUE)
1747 return ret;
1748 }
1749 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1750 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1751 if (ret != X86EMUL_CONTINUE)
1752 return ret;
1753 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1754 ((u64)base3 << 32), ctxt))
1755 return emulate_gp(ctxt, 0);
1756 }
1757load:
1758 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1759 if (desc)
1760 *desc = seg_desc;
1761 return X86EMUL_CONTINUE;
1762exception:
1763 return emulate_exception(ctxt, err_vec, err_code, true);
1764}
1765
1766static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1767 u16 selector, int seg)
1768{
1769 u8 cpl = ctxt->ops->cpl(ctxt);
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781 if (seg == VCPU_SREG_SS && selector == 3 &&
1782 ctxt->mode == X86EMUL_MODE_PROT64)
1783 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1784
1785 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1786 X86_TRANSFER_NONE, NULL);
1787}
1788
1789static void write_register_operand(struct operand *op)
1790{
1791 return assign_register(op->addr.reg, op->val, op->bytes);
1792}
1793
1794static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1795{
1796 switch (op->type) {
1797 case OP_REG:
1798 write_register_operand(op);
1799 break;
1800 case OP_MEM:
1801 if (ctxt->lock_prefix)
1802 return segmented_cmpxchg(ctxt,
1803 op->addr.mem,
1804 &op->orig_val,
1805 &op->val,
1806 op->bytes);
1807 else
1808 return segmented_write(ctxt,
1809 op->addr.mem,
1810 &op->val,
1811 op->bytes);
1812 break;
1813 case OP_MEM_STR:
1814 return segmented_write(ctxt,
1815 op->addr.mem,
1816 op->data,
1817 op->bytes * op->count);
1818 break;
1819 case OP_XMM:
1820 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1821 break;
1822 case OP_MM:
1823 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1824 break;
1825 case OP_NONE:
1826
1827 break;
1828 default:
1829 break;
1830 }
1831 return X86EMUL_CONTINUE;
1832}
1833
1834static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1835{
1836 struct segmented_address addr;
1837
1838 rsp_increment(ctxt, -bytes);
1839 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1840 addr.seg = VCPU_SREG_SS;
1841
1842 return segmented_write(ctxt, addr, data, bytes);
1843}
1844
1845static int em_push(struct x86_emulate_ctxt *ctxt)
1846{
1847
1848 ctxt->dst.type = OP_NONE;
1849 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1850}
1851
1852static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1853 void *dest, int len)
1854{
1855 int rc;
1856 struct segmented_address addr;
1857
1858 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1859 addr.seg = VCPU_SREG_SS;
1860 rc = segmented_read(ctxt, addr, dest, len);
1861 if (rc != X86EMUL_CONTINUE)
1862 return rc;
1863
1864 rsp_increment(ctxt, len);
1865 return rc;
1866}
1867
1868static int em_pop(struct x86_emulate_ctxt *ctxt)
1869{
1870 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1871}
1872
1873static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1874 void *dest, int len)
1875{
1876 int rc;
1877 unsigned long val, change_mask;
1878 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1879 int cpl = ctxt->ops->cpl(ctxt);
1880
1881 rc = emulate_pop(ctxt, &val, len);
1882 if (rc != X86EMUL_CONTINUE)
1883 return rc;
1884
1885 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1886 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1887 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1888 X86_EFLAGS_AC | X86_EFLAGS_ID;
1889
1890 switch(ctxt->mode) {
1891 case X86EMUL_MODE_PROT64:
1892 case X86EMUL_MODE_PROT32:
1893 case X86EMUL_MODE_PROT16:
1894 if (cpl == 0)
1895 change_mask |= X86_EFLAGS_IOPL;
1896 if (cpl <= iopl)
1897 change_mask |= X86_EFLAGS_IF;
1898 break;
1899 case X86EMUL_MODE_VM86:
1900 if (iopl < 3)
1901 return emulate_gp(ctxt, 0);
1902 change_mask |= X86_EFLAGS_IF;
1903 break;
1904 default:
1905 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1906 break;
1907 }
1908
1909 *(unsigned long *)dest =
1910 (ctxt->eflags & ~change_mask) | (val & change_mask);
1911
1912 return rc;
1913}
1914
1915static int em_popf(struct x86_emulate_ctxt *ctxt)
1916{
1917 ctxt->dst.type = OP_REG;
1918 ctxt->dst.addr.reg = &ctxt->eflags;
1919 ctxt->dst.bytes = ctxt->op_bytes;
1920 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1921}
1922
1923static int em_enter(struct x86_emulate_ctxt *ctxt)
1924{
1925 int rc;
1926 unsigned frame_size = ctxt->src.val;
1927 unsigned nesting_level = ctxt->src2.val & 31;
1928 ulong rbp;
1929
1930 if (nesting_level)
1931 return X86EMUL_UNHANDLEABLE;
1932
1933 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1934 rc = push(ctxt, &rbp, stack_size(ctxt));
1935 if (rc != X86EMUL_CONTINUE)
1936 return rc;
1937 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1938 stack_mask(ctxt));
1939 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1940 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1941 stack_mask(ctxt));
1942 return X86EMUL_CONTINUE;
1943}
1944
1945static int em_leave(struct x86_emulate_ctxt *ctxt)
1946{
1947 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1948 stack_mask(ctxt));
1949 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1950}
1951
1952static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1953{
1954 int seg = ctxt->src2.val;
1955
1956 ctxt->src.val = get_segment_selector(ctxt, seg);
1957 if (ctxt->op_bytes == 4) {
1958 rsp_increment(ctxt, -2);
1959 ctxt->op_bytes = 2;
1960 }
1961
1962 return em_push(ctxt);
1963}
1964
1965static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1966{
1967 int seg = ctxt->src2.val;
1968 unsigned long selector;
1969 int rc;
1970
1971 rc = emulate_pop(ctxt, &selector, 2);
1972 if (rc != X86EMUL_CONTINUE)
1973 return rc;
1974
1975 if (ctxt->modrm_reg == VCPU_SREG_SS)
1976 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1977 if (ctxt->op_bytes > 2)
1978 rsp_increment(ctxt, ctxt->op_bytes - 2);
1979
1980 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1981 return rc;
1982}
1983
1984static int em_pusha(struct x86_emulate_ctxt *ctxt)
1985{
1986 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1987 int rc = X86EMUL_CONTINUE;
1988 int reg = VCPU_REGS_RAX;
1989
1990 while (reg <= VCPU_REGS_RDI) {
1991 (reg == VCPU_REGS_RSP) ?
1992 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1993
1994 rc = em_push(ctxt);
1995 if (rc != X86EMUL_CONTINUE)
1996 return rc;
1997
1998 ++reg;
1999 }
2000
2001 return rc;
2002}
2003
2004static int em_pushf(struct x86_emulate_ctxt *ctxt)
2005{
2006 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2007 return em_push(ctxt);
2008}
2009
2010static int em_popa(struct x86_emulate_ctxt *ctxt)
2011{
2012 int rc = X86EMUL_CONTINUE;
2013 int reg = VCPU_REGS_RDI;
2014 u32 val;
2015
2016 while (reg >= VCPU_REGS_RAX) {
2017 if (reg == VCPU_REGS_RSP) {
2018 rsp_increment(ctxt, ctxt->op_bytes);
2019 --reg;
2020 }
2021
2022 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2023 if (rc != X86EMUL_CONTINUE)
2024 break;
2025 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2026 --reg;
2027 }
2028 return rc;
2029}
2030
2031static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2032{
2033 const struct x86_emulate_ops *ops = ctxt->ops;
2034 int rc;
2035 struct desc_ptr dt;
2036 gva_t cs_addr;
2037 gva_t eip_addr;
2038 u16 cs, eip;
2039
2040
2041 ctxt->src.val = ctxt->eflags;
2042 rc = em_push(ctxt);
2043 if (rc != X86EMUL_CONTINUE)
2044 return rc;
2045
2046 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2047
2048 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2049 rc = em_push(ctxt);
2050 if (rc != X86EMUL_CONTINUE)
2051 return rc;
2052
2053 ctxt->src.val = ctxt->_eip;
2054 rc = em_push(ctxt);
2055 if (rc != X86EMUL_CONTINUE)
2056 return rc;
2057
2058 ops->get_idt(ctxt, &dt);
2059
2060 eip_addr = dt.address + (irq << 2);
2061 cs_addr = dt.address + (irq << 2) + 2;
2062
2063 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2064 if (rc != X86EMUL_CONTINUE)
2065 return rc;
2066
2067 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2068 if (rc != X86EMUL_CONTINUE)
2069 return rc;
2070
2071 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2072 if (rc != X86EMUL_CONTINUE)
2073 return rc;
2074
2075 ctxt->_eip = eip;
2076
2077 return rc;
2078}
2079
2080int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2081{
2082 int rc;
2083
2084 invalidate_registers(ctxt);
2085 rc = __emulate_int_real(ctxt, irq);
2086 if (rc == X86EMUL_CONTINUE)
2087 writeback_registers(ctxt);
2088 return rc;
2089}
2090
2091static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2092{
2093 switch(ctxt->mode) {
2094 case X86EMUL_MODE_REAL:
2095 return __emulate_int_real(ctxt, irq);
2096 case X86EMUL_MODE_VM86:
2097 case X86EMUL_MODE_PROT16:
2098 case X86EMUL_MODE_PROT32:
2099 case X86EMUL_MODE_PROT64:
2100 default:
2101
2102 return X86EMUL_UNHANDLEABLE;
2103 }
2104}
2105
2106static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2107{
2108 int rc = X86EMUL_CONTINUE;
2109 unsigned long temp_eip = 0;
2110 unsigned long temp_eflags = 0;
2111 unsigned long cs = 0;
2112 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2113 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2114 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2115 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2116 X86_EFLAGS_AC | X86_EFLAGS_ID |
2117 X86_EFLAGS_FIXED;
2118 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2119 X86_EFLAGS_VIP;
2120
2121
2122
2123 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2124
2125 if (rc != X86EMUL_CONTINUE)
2126 return rc;
2127
2128 if (temp_eip & ~0xffff)
2129 return emulate_gp(ctxt, 0);
2130
2131 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2132
2133 if (rc != X86EMUL_CONTINUE)
2134 return rc;
2135
2136 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2137
2138 if (rc != X86EMUL_CONTINUE)
2139 return rc;
2140
2141 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2142
2143 if (rc != X86EMUL_CONTINUE)
2144 return rc;
2145
2146 ctxt->_eip = temp_eip;
2147
2148 if (ctxt->op_bytes == 4)
2149 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2150 else if (ctxt->op_bytes == 2) {
2151 ctxt->eflags &= ~0xffff;
2152 ctxt->eflags |= temp_eflags;
2153 }
2154
2155 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK;
2156 ctxt->eflags |= X86_EFLAGS_FIXED;
2157 ctxt->ops->set_nmi_mask(ctxt, false);
2158
2159 return rc;
2160}
2161
2162static int em_iret(struct x86_emulate_ctxt *ctxt)
2163{
2164 switch(ctxt->mode) {
2165 case X86EMUL_MODE_REAL:
2166 return emulate_iret_real(ctxt);
2167 case X86EMUL_MODE_VM86:
2168 case X86EMUL_MODE_PROT16:
2169 case X86EMUL_MODE_PROT32:
2170 case X86EMUL_MODE_PROT64:
2171 default:
2172
2173 return X86EMUL_UNHANDLEABLE;
2174 }
2175}
2176
2177static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2178{
2179 int rc;
2180 unsigned short sel;
2181 struct desc_struct new_desc;
2182 u8 cpl = ctxt->ops->cpl(ctxt);
2183
2184 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2185
2186 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2187 X86_TRANSFER_CALL_JMP,
2188 &new_desc);
2189 if (rc != X86EMUL_CONTINUE)
2190 return rc;
2191
2192 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2193
2194 if (rc != X86EMUL_CONTINUE)
2195 return X86EMUL_UNHANDLEABLE;
2196
2197 return rc;
2198}
2199
2200static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2201{
2202 return assign_eip_near(ctxt, ctxt->src.val);
2203}
2204
2205static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2206{
2207 int rc;
2208 long int old_eip;
2209
2210 old_eip = ctxt->_eip;
2211 rc = assign_eip_near(ctxt, ctxt->src.val);
2212 if (rc != X86EMUL_CONTINUE)
2213 return rc;
2214 ctxt->src.val = old_eip;
2215 rc = em_push(ctxt);
2216 return rc;
2217}
2218
2219static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2220{
2221 u64 old = ctxt->dst.orig_val64;
2222
2223 if (ctxt->dst.bytes == 16)
2224 return X86EMUL_UNHANDLEABLE;
2225
2226 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2227 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2228 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2229 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2230 ctxt->eflags &= ~X86_EFLAGS_ZF;
2231 } else {
2232 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2233 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2234
2235 ctxt->eflags |= X86_EFLAGS_ZF;
2236 }
2237 return X86EMUL_CONTINUE;
2238}
2239
2240static int em_ret(struct x86_emulate_ctxt *ctxt)
2241{
2242 int rc;
2243 unsigned long eip;
2244
2245 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2246 if (rc != X86EMUL_CONTINUE)
2247 return rc;
2248
2249 return assign_eip_near(ctxt, eip);
2250}
2251
2252static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2253{
2254 int rc;
2255 unsigned long eip, cs;
2256 int cpl = ctxt->ops->cpl(ctxt);
2257 struct desc_struct new_desc;
2258
2259 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2260 if (rc != X86EMUL_CONTINUE)
2261 return rc;
2262 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2263 if (rc != X86EMUL_CONTINUE)
2264 return rc;
2265
2266 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2267 return X86EMUL_UNHANDLEABLE;
2268 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2269 X86_TRANSFER_RET,
2270 &new_desc);
2271 if (rc != X86EMUL_CONTINUE)
2272 return rc;
2273 rc = assign_eip_far(ctxt, eip, &new_desc);
2274
2275 if (rc != X86EMUL_CONTINUE)
2276 return X86EMUL_UNHANDLEABLE;
2277
2278 return rc;
2279}
2280
2281static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2282{
2283 int rc;
2284
2285 rc = em_ret_far(ctxt);
2286 if (rc != X86EMUL_CONTINUE)
2287 return rc;
2288 rsp_increment(ctxt, ctxt->src.val);
2289 return X86EMUL_CONTINUE;
2290}
2291
2292static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2293{
2294
2295 ctxt->dst.orig_val = ctxt->dst.val;
2296 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2297 ctxt->src.orig_val = ctxt->src.val;
2298 ctxt->src.val = ctxt->dst.orig_val;
2299 fastop(ctxt, em_cmp);
2300
2301 if (ctxt->eflags & X86_EFLAGS_ZF) {
2302
2303 ctxt->src.type = OP_NONE;
2304 ctxt->dst.val = ctxt->src.orig_val;
2305 } else {
2306
2307 ctxt->src.type = OP_REG;
2308 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2309 ctxt->src.val = ctxt->dst.orig_val;
2310
2311 ctxt->dst.val = ctxt->dst.orig_val;
2312 }
2313 return X86EMUL_CONTINUE;
2314}
2315
2316static int em_lseg(struct x86_emulate_ctxt *ctxt)
2317{
2318 int seg = ctxt->src2.val;
2319 unsigned short sel;
2320 int rc;
2321
2322 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2323
2324 rc = load_segment_descriptor(ctxt, sel, seg);
2325 if (rc != X86EMUL_CONTINUE)
2326 return rc;
2327
2328 ctxt->dst.val = ctxt->src.val;
2329 return rc;
2330}
2331
2332static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2333{
2334#ifdef CONFIG_X86_64
2335 u32 eax, ebx, ecx, edx;
2336
2337 eax = 0x80000001;
2338 ecx = 0;
2339 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2340 return edx & bit(X86_FEATURE_LM);
2341#else
2342 return false;
2343#endif
2344}
2345
2346static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2347{
2348 desc->g = (flags >> 23) & 1;
2349 desc->d = (flags >> 22) & 1;
2350 desc->l = (flags >> 21) & 1;
2351 desc->avl = (flags >> 20) & 1;
2352 desc->p = (flags >> 15) & 1;
2353 desc->dpl = (flags >> 13) & 3;
2354 desc->s = (flags >> 12) & 1;
2355 desc->type = (flags >> 8) & 15;
2356}
2357
2358static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2359 int n)
2360{
2361 struct desc_struct desc;
2362 int offset;
2363 u16 selector;
2364
2365 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2366
2367 if (n < 3)
2368 offset = 0x7f84 + n * 12;
2369 else
2370 offset = 0x7f2c + (n - 3) * 12;
2371
2372 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2373 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2374 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2375 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2376 return X86EMUL_CONTINUE;
2377}
2378
2379#ifdef CONFIG_X86_64
2380static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2381 int n)
2382{
2383 struct desc_struct desc;
2384 int offset;
2385 u16 selector;
2386 u32 base3;
2387
2388 offset = 0x7e00 + n * 16;
2389
2390 selector = GET_SMSTATE(u16, smstate, offset);
2391 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2392 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2393 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2394 base3 = GET_SMSTATE(u32, smstate, offset + 12);
2395
2396 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2397 return X86EMUL_CONTINUE;
2398}
2399#endif
2400
2401static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2402 u64 cr0, u64 cr3, u64 cr4)
2403{
2404 int bad;
2405 u64 pcid;
2406
2407
2408 pcid = 0;
2409 if (cr4 & X86_CR4_PCIDE) {
2410 pcid = cr3 & 0xfff;
2411 cr3 &= ~0xfff;
2412 }
2413
2414 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2415 if (bad)
2416 return X86EMUL_UNHANDLEABLE;
2417
2418
2419
2420
2421
2422
2423 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2424 if (bad)
2425 return X86EMUL_UNHANDLEABLE;
2426
2427 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2428 if (bad)
2429 return X86EMUL_UNHANDLEABLE;
2430
2431 if (cr4 & X86_CR4_PCIDE) {
2432 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2433 if (bad)
2434 return X86EMUL_UNHANDLEABLE;
2435 if (pcid) {
2436 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2437 if (bad)
2438 return X86EMUL_UNHANDLEABLE;
2439 }
2440
2441 }
2442
2443 return X86EMUL_CONTINUE;
2444}
2445
2446static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2447 const char *smstate)
2448{
2449 struct desc_struct desc;
2450 struct desc_ptr dt;
2451 u16 selector;
2452 u32 val, cr0, cr3, cr4;
2453 int i;
2454
2455 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
2456 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
2457 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2458 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
2459
2460 for (i = 0; i < 8; i++)
2461 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2462
2463 val = GET_SMSTATE(u32, smstate, 0x7fcc);
2464 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2465 val = GET_SMSTATE(u32, smstate, 0x7fc8);
2466 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2467
2468 selector = GET_SMSTATE(u32, smstate, 0x7fc4);
2469 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
2470 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
2471 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
2472 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2473
2474 selector = GET_SMSTATE(u32, smstate, 0x7fc0);
2475 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
2476 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
2477 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
2478 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2479
2480 dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
2481 dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
2482 ctxt->ops->set_gdt(ctxt, &dt);
2483
2484 dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
2485 dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
2486 ctxt->ops->set_idt(ctxt, &dt);
2487
2488 for (i = 0; i < 6; i++) {
2489 int r = rsm_load_seg_32(ctxt, smstate, i);
2490 if (r != X86EMUL_CONTINUE)
2491 return r;
2492 }
2493
2494 cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2495
2496 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2497
2498 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2499}
2500
2501#ifdef CONFIG_X86_64
2502static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2503 const char *smstate)
2504{
2505 struct desc_struct desc;
2506 struct desc_ptr dt;
2507 u64 val, cr0, cr3, cr4;
2508 u32 base3;
2509 u16 selector;
2510 int i, r;
2511
2512 for (i = 0; i < 16; i++)
2513 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2514
2515 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
2516 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2517
2518 val = GET_SMSTATE(u32, smstate, 0x7f68);
2519 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2520 val = GET_SMSTATE(u32, smstate, 0x7f60);
2521 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2522
2523 cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
2524 cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
2525 cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
2526 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2527 val = GET_SMSTATE(u64, smstate, 0x7ed0);
2528 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2529
2530 selector = GET_SMSTATE(u32, smstate, 0x7e90);
2531 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2532 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
2533 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
2534 base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
2535 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2536
2537 dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
2538 dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
2539 ctxt->ops->set_idt(ctxt, &dt);
2540
2541 selector = GET_SMSTATE(u32, smstate, 0x7e70);
2542 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2543 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
2544 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
2545 base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
2546 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2547
2548 dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
2549 dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
2550 ctxt->ops->set_gdt(ctxt, &dt);
2551
2552 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2553 if (r != X86EMUL_CONTINUE)
2554 return r;
2555
2556 for (i = 0; i < 6; i++) {
2557 r = rsm_load_seg_64(ctxt, smstate, i);
2558 if (r != X86EMUL_CONTINUE)
2559 return r;
2560 }
2561
2562 return X86EMUL_CONTINUE;
2563}
2564#endif
2565
2566static int em_rsm(struct x86_emulate_ctxt *ctxt)
2567{
2568 unsigned long cr0, cr4, efer;
2569 char buf[512];
2570 u64 smbase;
2571 int ret;
2572
2573 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2574 return emulate_ud(ctxt);
2575
2576 smbase = ctxt->ops->get_smbase(ctxt);
2577
2578 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2579 if (ret != X86EMUL_CONTINUE)
2580 return X86EMUL_UNHANDLEABLE;
2581
2582 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2583 ctxt->ops->set_nmi_mask(ctxt, false);
2584
2585 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2586 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2587
2588
2589
2590
2591
2592
2593 if (emulator_has_longmode(ctxt)) {
2594 struct desc_struct cs_desc;
2595
2596
2597 cr4 = ctxt->ops->get_cr(ctxt, 4);
2598 if (cr4 & X86_CR4_PCIDE)
2599 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2600
2601
2602 memset(&cs_desc, 0, sizeof(cs_desc));
2603 cs_desc.type = 0xb;
2604 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2605 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2606 }
2607
2608
2609 cr0 = ctxt->ops->get_cr(ctxt, 0);
2610 if (cr0 & X86_CR0_PE)
2611 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2612
2613 if (emulator_has_longmode(ctxt)) {
2614
2615 cr4 = ctxt->ops->get_cr(ctxt, 4);
2616 if (cr4 & X86_CR4_PAE)
2617 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2618
2619
2620 efer = 0;
2621 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2622 }
2623
2624
2625
2626
2627
2628
2629 if (ctxt->ops->pre_leave_smm(ctxt, buf))
2630 return X86EMUL_UNHANDLEABLE;
2631
2632#ifdef CONFIG_X86_64
2633 if (emulator_has_longmode(ctxt))
2634 ret = rsm_load_state_64(ctxt, buf);
2635 else
2636#endif
2637 ret = rsm_load_state_32(ctxt, buf);
2638
2639 if (ret != X86EMUL_CONTINUE) {
2640
2641 return X86EMUL_UNHANDLEABLE;
2642 }
2643
2644 ctxt->ops->post_leave_smm(ctxt);
2645
2646 return X86EMUL_CONTINUE;
2647}
2648
2649static void
2650setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2651 struct desc_struct *cs, struct desc_struct *ss)
2652{
2653 cs->l = 0;
2654 set_desc_base(cs, 0);
2655 cs->g = 1;
2656 set_desc_limit(cs, 0xfffff);
2657 cs->type = 0x0b;
2658 cs->s = 1;
2659 cs->dpl = 0;
2660 cs->p = 1;
2661 cs->d = 1;
2662 cs->avl = 0;
2663
2664 set_desc_base(ss, 0);
2665 set_desc_limit(ss, 0xfffff);
2666 ss->g = 1;
2667 ss->s = 1;
2668 ss->type = 0x03;
2669 ss->d = 1;
2670 ss->dpl = 0;
2671 ss->p = 1;
2672 ss->l = 0;
2673 ss->avl = 0;
2674}
2675
2676static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2677{
2678 u32 eax, ebx, ecx, edx;
2679
2680 eax = ecx = 0;
2681 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2682 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2683 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2684 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2685}
2686
2687static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2688{
2689 const struct x86_emulate_ops *ops = ctxt->ops;
2690 u32 eax, ebx, ecx, edx;
2691
2692
2693
2694
2695
2696 if (ctxt->mode == X86EMUL_MODE_PROT64)
2697 return true;
2698
2699 eax = 0x00000000;
2700 ecx = 0x00000000;
2701 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2702
2703
2704
2705
2706
2707
2708
2709
2710 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2711 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2712 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2713 return false;
2714
2715
2716 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2717 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2718 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2719 return true;
2720
2721
2722 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2723 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2724 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2725 return true;
2726
2727
2728 return false;
2729}
2730
2731static int em_syscall(struct x86_emulate_ctxt *ctxt)
2732{
2733 const struct x86_emulate_ops *ops = ctxt->ops;
2734 struct desc_struct cs, ss;
2735 u64 msr_data;
2736 u16 cs_sel, ss_sel;
2737 u64 efer = 0;
2738
2739
2740 if (ctxt->mode == X86EMUL_MODE_REAL ||
2741 ctxt->mode == X86EMUL_MODE_VM86)
2742 return emulate_ud(ctxt);
2743
2744 if (!(em_syscall_is_enabled(ctxt)))
2745 return emulate_ud(ctxt);
2746
2747 ops->get_msr(ctxt, MSR_EFER, &efer);
2748 setup_syscalls_segments(ctxt, &cs, &ss);
2749
2750 if (!(efer & EFER_SCE))
2751 return emulate_ud(ctxt);
2752
2753 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2754 msr_data >>= 32;
2755 cs_sel = (u16)(msr_data & 0xfffc);
2756 ss_sel = (u16)(msr_data + 8);
2757
2758 if (efer & EFER_LMA) {
2759 cs.d = 0;
2760 cs.l = 1;
2761 }
2762 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2763 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2764
2765 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2766 if (efer & EFER_LMA) {
2767#ifdef CONFIG_X86_64
2768 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2769
2770 ops->get_msr(ctxt,
2771 ctxt->mode == X86EMUL_MODE_PROT64 ?
2772 MSR_LSTAR : MSR_CSTAR, &msr_data);
2773 ctxt->_eip = msr_data;
2774
2775 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2776 ctxt->eflags &= ~msr_data;
2777 ctxt->eflags |= X86_EFLAGS_FIXED;
2778#endif
2779 } else {
2780
2781 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2782 ctxt->_eip = (u32)msr_data;
2783
2784 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2785 }
2786
2787 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2788 return X86EMUL_CONTINUE;
2789}
2790
2791static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2792{
2793 const struct x86_emulate_ops *ops = ctxt->ops;
2794 struct desc_struct cs, ss;
2795 u64 msr_data;
2796 u16 cs_sel, ss_sel;
2797 u64 efer = 0;
2798
2799 ops->get_msr(ctxt, MSR_EFER, &efer);
2800
2801 if (ctxt->mode == X86EMUL_MODE_REAL)
2802 return emulate_gp(ctxt, 0);
2803
2804
2805
2806
2807
2808 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2809 && !vendor_intel(ctxt))
2810 return emulate_ud(ctxt);
2811
2812
2813 if (ctxt->mode == X86EMUL_MODE_PROT64)
2814 return X86EMUL_UNHANDLEABLE;
2815
2816 setup_syscalls_segments(ctxt, &cs, &ss);
2817
2818 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2819 if ((msr_data & 0xfffc) == 0x0)
2820 return emulate_gp(ctxt, 0);
2821
2822 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2823 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2824 ss_sel = cs_sel + 8;
2825 if (efer & EFER_LMA) {
2826 cs.d = 0;
2827 cs.l = 1;
2828 }
2829
2830 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2831 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2832
2833 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2834 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2835
2836 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2837 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2838 (u32)msr_data;
2839
2840 return X86EMUL_CONTINUE;
2841}
2842
2843static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2844{
2845 const struct x86_emulate_ops *ops = ctxt->ops;
2846 struct desc_struct cs, ss;
2847 u64 msr_data, rcx, rdx;
2848 int usermode;
2849 u16 cs_sel = 0, ss_sel = 0;
2850
2851
2852 if (ctxt->mode == X86EMUL_MODE_REAL ||
2853 ctxt->mode == X86EMUL_MODE_VM86)
2854 return emulate_gp(ctxt, 0);
2855
2856 setup_syscalls_segments(ctxt, &cs, &ss);
2857
2858 if ((ctxt->rex_prefix & 0x8) != 0x0)
2859 usermode = X86EMUL_MODE_PROT64;
2860 else
2861 usermode = X86EMUL_MODE_PROT32;
2862
2863 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2864 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2865
2866 cs.dpl = 3;
2867 ss.dpl = 3;
2868 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2869 switch (usermode) {
2870 case X86EMUL_MODE_PROT32:
2871 cs_sel = (u16)(msr_data + 16);
2872 if ((msr_data & 0xfffc) == 0x0)
2873 return emulate_gp(ctxt, 0);
2874 ss_sel = (u16)(msr_data + 24);
2875 rcx = (u32)rcx;
2876 rdx = (u32)rdx;
2877 break;
2878 case X86EMUL_MODE_PROT64:
2879 cs_sel = (u16)(msr_data + 32);
2880 if (msr_data == 0x0)
2881 return emulate_gp(ctxt, 0);
2882 ss_sel = cs_sel + 8;
2883 cs.d = 0;
2884 cs.l = 1;
2885 if (emul_is_noncanonical_address(rcx, ctxt) ||
2886 emul_is_noncanonical_address(rdx, ctxt))
2887 return emulate_gp(ctxt, 0);
2888 break;
2889 }
2890 cs_sel |= SEGMENT_RPL_MASK;
2891 ss_sel |= SEGMENT_RPL_MASK;
2892
2893 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2894 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2895
2896 ctxt->_eip = rdx;
2897 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2898
2899 return X86EMUL_CONTINUE;
2900}
2901
2902static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2903{
2904 int iopl;
2905 if (ctxt->mode == X86EMUL_MODE_REAL)
2906 return false;
2907 if (ctxt->mode == X86EMUL_MODE_VM86)
2908 return true;
2909 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2910 return ctxt->ops->cpl(ctxt) > iopl;
2911}
2912
2913#define VMWARE_PORT_VMPORT (0x5658)
2914#define VMWARE_PORT_VMRPC (0x5659)
2915
2916static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2917 u16 port, u16 len)
2918{
2919 const struct x86_emulate_ops *ops = ctxt->ops;
2920 struct desc_struct tr_seg;
2921 u32 base3;
2922 int r;
2923 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2924 unsigned mask = (1 << len) - 1;
2925 unsigned long base;
2926
2927
2928
2929
2930
2931 if (enable_vmware_backdoor &&
2932 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2933 return true;
2934
2935 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2936 if (!tr_seg.p)
2937 return false;
2938 if (desc_limit_scaled(&tr_seg) < 103)
2939 return false;
2940 base = get_desc_base(&tr_seg);
2941#ifdef CONFIG_X86_64
2942 base |= ((u64)base3) << 32;
2943#endif
2944 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2945 if (r != X86EMUL_CONTINUE)
2946 return false;
2947 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2948 return false;
2949 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2950 if (r != X86EMUL_CONTINUE)
2951 return false;
2952 if ((perm >> bit_idx) & mask)
2953 return false;
2954 return true;
2955}
2956
2957static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2958 u16 port, u16 len)
2959{
2960 if (ctxt->perm_ok)
2961 return true;
2962
2963 if (emulator_bad_iopl(ctxt))
2964 if (!emulator_io_port_access_allowed(ctxt, port, len))
2965 return false;
2966
2967 ctxt->perm_ok = true;
2968
2969 return true;
2970}
2971
2972static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2973{
2974
2975
2976
2977
2978#ifdef CONFIG_X86_64
2979 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2980 return;
2981
2982 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2983
2984 switch (ctxt->b) {
2985 case 0xa4:
2986 case 0xa5:
2987 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2988
2989 case 0xaa:
2990 case 0xab:
2991 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2992 }
2993#endif
2994}
2995
2996static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2997 struct tss_segment_16 *tss)
2998{
2999 tss->ip = ctxt->_eip;
3000 tss->flag = ctxt->eflags;
3001 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
3002 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
3003 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
3004 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
3005 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
3006 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
3007 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
3008 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
3009
3010 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3011 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3012 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3013 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3014 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
3015}
3016
3017static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
3018 struct tss_segment_16 *tss)
3019{
3020 int ret;
3021 u8 cpl;
3022
3023 ctxt->_eip = tss->ip;
3024 ctxt->eflags = tss->flag | 2;
3025 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
3026 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
3027 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
3028 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
3029 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
3030 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
3031 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
3032 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3033
3034
3035
3036
3037
3038 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3039 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3040 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3041 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3042 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3043
3044 cpl = tss->cs & 3;
3045
3046
3047
3048
3049
3050 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3051 X86_TRANSFER_TASK_SWITCH, NULL);
3052 if (ret != X86EMUL_CONTINUE)
3053 return ret;
3054 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3055 X86_TRANSFER_TASK_SWITCH, NULL);
3056 if (ret != X86EMUL_CONTINUE)
3057 return ret;
3058 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3059 X86_TRANSFER_TASK_SWITCH, NULL);
3060 if (ret != X86EMUL_CONTINUE)
3061 return ret;
3062 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3063 X86_TRANSFER_TASK_SWITCH, NULL);
3064 if (ret != X86EMUL_CONTINUE)
3065 return ret;
3066 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3067 X86_TRANSFER_TASK_SWITCH, NULL);
3068 if (ret != X86EMUL_CONTINUE)
3069 return ret;
3070
3071 return X86EMUL_CONTINUE;
3072}
3073
3074static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3075 u16 tss_selector, u16 old_tss_sel,
3076 ulong old_tss_base, struct desc_struct *new_desc)
3077{
3078 struct tss_segment_16 tss_seg;
3079 int ret;
3080 u32 new_tss_base = get_desc_base(new_desc);
3081
3082 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3083 if (ret != X86EMUL_CONTINUE)
3084 return ret;
3085
3086 save_state_to_tss16(ctxt, &tss_seg);
3087
3088 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3089 if (ret != X86EMUL_CONTINUE)
3090 return ret;
3091
3092 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3093 if (ret != X86EMUL_CONTINUE)
3094 return ret;
3095
3096 if (old_tss_sel != 0xffff) {
3097 tss_seg.prev_task_link = old_tss_sel;
3098
3099 ret = linear_write_system(ctxt, new_tss_base,
3100 &tss_seg.prev_task_link,
3101 sizeof(tss_seg.prev_task_link));
3102 if (ret != X86EMUL_CONTINUE)
3103 return ret;
3104 }
3105
3106 return load_state_from_tss16(ctxt, &tss_seg);
3107}
3108
3109static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3110 struct tss_segment_32 *tss)
3111{
3112
3113 tss->eip = ctxt->_eip;
3114 tss->eflags = ctxt->eflags;
3115 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3116 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3117 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3118 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3119 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3120 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3121 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3122 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3123
3124 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3125 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3126 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3127 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3128 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3129 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3130}
3131
3132static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3133 struct tss_segment_32 *tss)
3134{
3135 int ret;
3136 u8 cpl;
3137
3138 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3139 return emulate_gp(ctxt, 0);
3140 ctxt->_eip = tss->eip;
3141 ctxt->eflags = tss->eflags | 2;
3142
3143
3144 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3145 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3146 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3147 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3148 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3149 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3150 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3151 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3152
3153
3154
3155
3156
3157
3158 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3159 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3160 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3161 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3162 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3163 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3164 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3165
3166
3167
3168
3169
3170
3171 if (ctxt->eflags & X86_EFLAGS_VM) {
3172 ctxt->mode = X86EMUL_MODE_VM86;
3173 cpl = 3;
3174 } else {
3175 ctxt->mode = X86EMUL_MODE_PROT32;
3176 cpl = tss->cs & 3;
3177 }
3178
3179
3180
3181
3182
3183 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3184 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3185 if (ret != X86EMUL_CONTINUE)
3186 return ret;
3187 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3188 X86_TRANSFER_TASK_SWITCH, NULL);
3189 if (ret != X86EMUL_CONTINUE)
3190 return ret;
3191 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3192 X86_TRANSFER_TASK_SWITCH, NULL);
3193 if (ret != X86EMUL_CONTINUE)
3194 return ret;
3195 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3196 X86_TRANSFER_TASK_SWITCH, NULL);
3197 if (ret != X86EMUL_CONTINUE)
3198 return ret;
3199 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3200 X86_TRANSFER_TASK_SWITCH, NULL);
3201 if (ret != X86EMUL_CONTINUE)
3202 return ret;
3203 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3204 X86_TRANSFER_TASK_SWITCH, NULL);
3205 if (ret != X86EMUL_CONTINUE)
3206 return ret;
3207 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3208 X86_TRANSFER_TASK_SWITCH, NULL);
3209
3210 return ret;
3211}
3212
3213static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3214 u16 tss_selector, u16 old_tss_sel,
3215 ulong old_tss_base, struct desc_struct *new_desc)
3216{
3217 struct tss_segment_32 tss_seg;
3218 int ret;
3219 u32 new_tss_base = get_desc_base(new_desc);
3220 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3221 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3222
3223 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3224 if (ret != X86EMUL_CONTINUE)
3225 return ret;
3226
3227 save_state_to_tss32(ctxt, &tss_seg);
3228
3229
3230 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3231 ldt_sel_offset - eip_offset);
3232 if (ret != X86EMUL_CONTINUE)
3233 return ret;
3234
3235 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3236 if (ret != X86EMUL_CONTINUE)
3237 return ret;
3238
3239 if (old_tss_sel != 0xffff) {
3240 tss_seg.prev_task_link = old_tss_sel;
3241
3242 ret = linear_write_system(ctxt, new_tss_base,
3243 &tss_seg.prev_task_link,
3244 sizeof(tss_seg.prev_task_link));
3245 if (ret != X86EMUL_CONTINUE)
3246 return ret;
3247 }
3248
3249 return load_state_from_tss32(ctxt, &tss_seg);
3250}
3251
3252static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3253 u16 tss_selector, int idt_index, int reason,
3254 bool has_error_code, u32 error_code)
3255{
3256 const struct x86_emulate_ops *ops = ctxt->ops;
3257 struct desc_struct curr_tss_desc, next_tss_desc;
3258 int ret;
3259 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3260 ulong old_tss_base =
3261 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3262 u32 desc_limit;
3263 ulong desc_addr, dr7;
3264
3265
3266
3267 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3268 if (ret != X86EMUL_CONTINUE)
3269 return ret;
3270 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3271 if (ret != X86EMUL_CONTINUE)
3272 return ret;
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284 if (reason == TASK_SWITCH_GATE) {
3285 if (idt_index != -1) {
3286
3287 struct desc_struct task_gate_desc;
3288 int dpl;
3289
3290 ret = read_interrupt_descriptor(ctxt, idt_index,
3291 &task_gate_desc);
3292 if (ret != X86EMUL_CONTINUE)
3293 return ret;
3294
3295 dpl = task_gate_desc.dpl;
3296 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3297 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3298 }
3299 }
3300
3301 desc_limit = desc_limit_scaled(&next_tss_desc);
3302 if (!next_tss_desc.p ||
3303 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3304 desc_limit < 0x2b)) {
3305 return emulate_ts(ctxt, tss_selector & 0xfffc);
3306 }
3307
3308 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3309 curr_tss_desc.type &= ~(1 << 1);
3310 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3311 }
3312
3313 if (reason == TASK_SWITCH_IRET)
3314 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3315
3316
3317
3318 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3319 old_tss_sel = 0xffff;
3320
3321 if (next_tss_desc.type & 8)
3322 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3323 old_tss_base, &next_tss_desc);
3324 else
3325 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3326 old_tss_base, &next_tss_desc);
3327 if (ret != X86EMUL_CONTINUE)
3328 return ret;
3329
3330 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3331 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3332
3333 if (reason != TASK_SWITCH_IRET) {
3334 next_tss_desc.type |= (1 << 1);
3335 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3336 }
3337
3338 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3339 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3340
3341 if (has_error_code) {
3342 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3343 ctxt->lock_prefix = 0;
3344 ctxt->src.val = (unsigned long) error_code;
3345 ret = em_push(ctxt);
3346 }
3347
3348 ops->get_dr(ctxt, 7, &dr7);
3349 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3350
3351 return ret;
3352}
3353
3354int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3355 u16 tss_selector, int idt_index, int reason,
3356 bool has_error_code, u32 error_code)
3357{
3358 int rc;
3359
3360 invalidate_registers(ctxt);
3361 ctxt->_eip = ctxt->eip;
3362 ctxt->dst.type = OP_NONE;
3363
3364 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3365 has_error_code, error_code);
3366
3367 if (rc == X86EMUL_CONTINUE) {
3368 ctxt->eip = ctxt->_eip;
3369 writeback_registers(ctxt);
3370 }
3371
3372 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3373}
3374
3375static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3376 struct operand *op)
3377{
3378 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3379
3380 register_address_increment(ctxt, reg, df * op->bytes);
3381 op->addr.mem.ea = register_address(ctxt, reg);
3382}
3383
3384static int em_das(struct x86_emulate_ctxt *ctxt)
3385{
3386 u8 al, old_al;
3387 bool af, cf, old_cf;
3388
3389 cf = ctxt->eflags & X86_EFLAGS_CF;
3390 al = ctxt->dst.val;
3391
3392 old_al = al;
3393 old_cf = cf;
3394 cf = false;
3395 af = ctxt->eflags & X86_EFLAGS_AF;
3396 if ((al & 0x0f) > 9 || af) {
3397 al -= 6;
3398 cf = old_cf | (al >= 250);
3399 af = true;
3400 } else {
3401 af = false;
3402 }
3403 if (old_al > 0x99 || old_cf) {
3404 al -= 0x60;
3405 cf = true;
3406 }
3407
3408 ctxt->dst.val = al;
3409
3410 ctxt->src.type = OP_IMM;
3411 ctxt->src.val = 0;
3412 ctxt->src.bytes = 1;
3413 fastop(ctxt, em_or);
3414 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3415 if (cf)
3416 ctxt->eflags |= X86_EFLAGS_CF;
3417 if (af)
3418 ctxt->eflags |= X86_EFLAGS_AF;
3419 return X86EMUL_CONTINUE;
3420}
3421
3422static int em_aam(struct x86_emulate_ctxt *ctxt)
3423{
3424 u8 al, ah;
3425
3426 if (ctxt->src.val == 0)
3427 return emulate_de(ctxt);
3428
3429 al = ctxt->dst.val & 0xff;
3430 ah = al / ctxt->src.val;
3431 al %= ctxt->src.val;
3432
3433 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3434
3435
3436 ctxt->src.type = OP_IMM;
3437 ctxt->src.val = 0;
3438 ctxt->src.bytes = 1;
3439 fastop(ctxt, em_or);
3440
3441 return X86EMUL_CONTINUE;
3442}
3443
3444static int em_aad(struct x86_emulate_ctxt *ctxt)
3445{
3446 u8 al = ctxt->dst.val & 0xff;
3447 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3448
3449 al = (al + (ah * ctxt->src.val)) & 0xff;
3450
3451 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3452
3453
3454 ctxt->src.type = OP_IMM;
3455 ctxt->src.val = 0;
3456 ctxt->src.bytes = 1;
3457 fastop(ctxt, em_or);
3458
3459 return X86EMUL_CONTINUE;
3460}
3461
3462static int em_call(struct x86_emulate_ctxt *ctxt)
3463{
3464 int rc;
3465 long rel = ctxt->src.val;
3466
3467 ctxt->src.val = (unsigned long)ctxt->_eip;
3468 rc = jmp_rel(ctxt, rel);
3469 if (rc != X86EMUL_CONTINUE)
3470 return rc;
3471 return em_push(ctxt);
3472}
3473
3474static int em_call_far(struct x86_emulate_ctxt *ctxt)
3475{
3476 u16 sel, old_cs;
3477 ulong old_eip;
3478 int rc;
3479 struct desc_struct old_desc, new_desc;
3480 const struct x86_emulate_ops *ops = ctxt->ops;
3481 int cpl = ctxt->ops->cpl(ctxt);
3482 enum x86emul_mode prev_mode = ctxt->mode;
3483
3484 old_eip = ctxt->_eip;
3485 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3486
3487 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3488 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3489 X86_TRANSFER_CALL_JMP, &new_desc);
3490 if (rc != X86EMUL_CONTINUE)
3491 return rc;
3492
3493 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3494 if (rc != X86EMUL_CONTINUE)
3495 goto fail;
3496
3497 ctxt->src.val = old_cs;
3498 rc = em_push(ctxt);
3499 if (rc != X86EMUL_CONTINUE)
3500 goto fail;
3501
3502 ctxt->src.val = old_eip;
3503 rc = em_push(ctxt);
3504
3505
3506 if (rc != X86EMUL_CONTINUE) {
3507 pr_warn_once("faulting far call emulation tainted memory\n");
3508 goto fail;
3509 }
3510 return rc;
3511fail:
3512 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3513 ctxt->mode = prev_mode;
3514 return rc;
3515
3516}
3517
3518static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3519{
3520 int rc;
3521 unsigned long eip;
3522
3523 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3524 if (rc != X86EMUL_CONTINUE)
3525 return rc;
3526 rc = assign_eip_near(ctxt, eip);
3527 if (rc != X86EMUL_CONTINUE)
3528 return rc;
3529 rsp_increment(ctxt, ctxt->src.val);
3530 return X86EMUL_CONTINUE;
3531}
3532
3533static int em_xchg(struct x86_emulate_ctxt *ctxt)
3534{
3535
3536 ctxt->src.val = ctxt->dst.val;
3537 write_register_operand(&ctxt->src);
3538
3539
3540 ctxt->dst.val = ctxt->src.orig_val;
3541 ctxt->lock_prefix = 1;
3542 return X86EMUL_CONTINUE;
3543}
3544
3545static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3546{
3547 ctxt->dst.val = ctxt->src2.val;
3548 return fastop(ctxt, em_imul);
3549}
3550
3551static int em_cwd(struct x86_emulate_ctxt *ctxt)
3552{
3553 ctxt->dst.type = OP_REG;
3554 ctxt->dst.bytes = ctxt->src.bytes;
3555 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3556 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3557
3558 return X86EMUL_CONTINUE;
3559}
3560
3561static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3562{
3563 u64 tsc_aux = 0;
3564
3565 if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3566 return emulate_gp(ctxt, 0);
3567 ctxt->dst.val = tsc_aux;
3568 return X86EMUL_CONTINUE;
3569}
3570
3571static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3572{
3573 u64 tsc = 0;
3574
3575 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3576 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3577 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3578 return X86EMUL_CONTINUE;
3579}
3580
3581static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3582{
3583 u64 pmc;
3584
3585 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3586 return emulate_gp(ctxt, 0);
3587 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3588 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3589 return X86EMUL_CONTINUE;
3590}
3591
3592static int em_mov(struct x86_emulate_ctxt *ctxt)
3593{
3594 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3595 return X86EMUL_CONTINUE;
3596}
3597
3598#define FFL(x) bit(X86_FEATURE_##x)
3599
3600static int em_movbe(struct x86_emulate_ctxt *ctxt)
3601{
3602 u32 ebx, ecx, edx, eax = 1;
3603 u16 tmp;
3604
3605
3606
3607
3608 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3609 if (!(ecx & FFL(MOVBE)))
3610 return emulate_ud(ctxt);
3611
3612 switch (ctxt->op_bytes) {
3613 case 2:
3614
3615
3616
3617
3618
3619
3620
3621
3622 tmp = (u16)ctxt->src.val;
3623 ctxt->dst.val &= ~0xffffUL;
3624 ctxt->dst.val |= (unsigned long)swab16(tmp);
3625 break;
3626 case 4:
3627 ctxt->dst.val = swab32((u32)ctxt->src.val);
3628 break;
3629 case 8:
3630 ctxt->dst.val = swab64(ctxt->src.val);
3631 break;
3632 default:
3633 BUG();
3634 }
3635 return X86EMUL_CONTINUE;
3636}
3637
3638static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3639{
3640 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3641 return emulate_gp(ctxt, 0);
3642
3643
3644 ctxt->dst.type = OP_NONE;
3645 return X86EMUL_CONTINUE;
3646}
3647
3648static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3649{
3650 unsigned long val;
3651
3652 if (ctxt->mode == X86EMUL_MODE_PROT64)
3653 val = ctxt->src.val & ~0ULL;
3654 else
3655 val = ctxt->src.val & ~0U;
3656
3657
3658 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3659 return emulate_gp(ctxt, 0);
3660
3661
3662 ctxt->dst.type = OP_NONE;
3663 return X86EMUL_CONTINUE;
3664}
3665
3666static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3667{
3668 u64 msr_data;
3669
3670 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3671 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3672 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3673 return emulate_gp(ctxt, 0);
3674
3675 return X86EMUL_CONTINUE;
3676}
3677
3678static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3679{
3680 u64 msr_data;
3681
3682 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3683 return emulate_gp(ctxt, 0);
3684
3685 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3686 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3687 return X86EMUL_CONTINUE;
3688}
3689
3690static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3691{
3692 if (segment > VCPU_SREG_GS &&
3693 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3694 ctxt->ops->cpl(ctxt) > 0)
3695 return emulate_gp(ctxt, 0);
3696
3697 ctxt->dst.val = get_segment_selector(ctxt, segment);
3698 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3699 ctxt->dst.bytes = 2;
3700 return X86EMUL_CONTINUE;
3701}
3702
3703static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3704{
3705 if (ctxt->modrm_reg > VCPU_SREG_GS)
3706 return emulate_ud(ctxt);
3707
3708 return em_store_sreg(ctxt, ctxt->modrm_reg);
3709}
3710
3711static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3712{
3713 u16 sel = ctxt->src.val;
3714
3715 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3716 return emulate_ud(ctxt);
3717
3718 if (ctxt->modrm_reg == VCPU_SREG_SS)
3719 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3720
3721
3722 ctxt->dst.type = OP_NONE;
3723 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3724}
3725
3726static int em_sldt(struct x86_emulate_ctxt *ctxt)
3727{
3728 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3729}
3730
3731static int em_lldt(struct x86_emulate_ctxt *ctxt)
3732{
3733 u16 sel = ctxt->src.val;
3734
3735
3736 ctxt->dst.type = OP_NONE;
3737 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3738}
3739
3740static int em_str(struct x86_emulate_ctxt *ctxt)
3741{
3742 return em_store_sreg(ctxt, VCPU_SREG_TR);
3743}
3744
3745static int em_ltr(struct x86_emulate_ctxt *ctxt)
3746{
3747 u16 sel = ctxt->src.val;
3748
3749
3750 ctxt->dst.type = OP_NONE;
3751 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3752}
3753
3754static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3755{
3756 int rc;
3757 ulong linear;
3758
3759 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3760 if (rc == X86EMUL_CONTINUE)
3761 ctxt->ops->invlpg(ctxt, linear);
3762
3763 ctxt->dst.type = OP_NONE;
3764 return X86EMUL_CONTINUE;
3765}
3766
3767static int em_clts(struct x86_emulate_ctxt *ctxt)
3768{
3769 ulong cr0;
3770
3771 cr0 = ctxt->ops->get_cr(ctxt, 0);
3772 cr0 &= ~X86_CR0_TS;
3773 ctxt->ops->set_cr(ctxt, 0, cr0);
3774 return X86EMUL_CONTINUE;
3775}
3776
3777static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3778{
3779 int rc = ctxt->ops->fix_hypercall(ctxt);
3780
3781 if (rc != X86EMUL_CONTINUE)
3782 return rc;
3783
3784
3785 ctxt->_eip = ctxt->eip;
3786
3787 ctxt->dst.type = OP_NONE;
3788 return X86EMUL_CONTINUE;
3789}
3790
3791static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3792 void (*get)(struct x86_emulate_ctxt *ctxt,
3793 struct desc_ptr *ptr))
3794{
3795 struct desc_ptr desc_ptr;
3796
3797 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3798 ctxt->ops->cpl(ctxt) > 0)
3799 return emulate_gp(ctxt, 0);
3800
3801 if (ctxt->mode == X86EMUL_MODE_PROT64)
3802 ctxt->op_bytes = 8;
3803 get(ctxt, &desc_ptr);
3804 if (ctxt->op_bytes == 2) {
3805 ctxt->op_bytes = 4;
3806 desc_ptr.address &= 0x00ffffff;
3807 }
3808
3809 ctxt->dst.type = OP_NONE;
3810 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3811 &desc_ptr, 2 + ctxt->op_bytes);
3812}
3813
3814static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3815{
3816 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3817}
3818
3819static int em_sidt(struct x86_emulate_ctxt *ctxt)
3820{
3821 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3822}
3823
3824static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3825{
3826 struct desc_ptr desc_ptr;
3827 int rc;
3828
3829 if (ctxt->mode == X86EMUL_MODE_PROT64)
3830 ctxt->op_bytes = 8;
3831 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3832 &desc_ptr.size, &desc_ptr.address,
3833 ctxt->op_bytes);
3834 if (rc != X86EMUL_CONTINUE)
3835 return rc;
3836 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3837 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3838 return emulate_gp(ctxt, 0);
3839 if (lgdt)
3840 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3841 else
3842 ctxt->ops->set_idt(ctxt, &desc_ptr);
3843
3844 ctxt->dst.type = OP_NONE;
3845 return X86EMUL_CONTINUE;
3846}
3847
3848static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3849{
3850 return em_lgdt_lidt(ctxt, true);
3851}
3852
3853static int em_lidt(struct x86_emulate_ctxt *ctxt)
3854{
3855 return em_lgdt_lidt(ctxt, false);
3856}
3857
3858static int em_smsw(struct x86_emulate_ctxt *ctxt)
3859{
3860 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3861 ctxt->ops->cpl(ctxt) > 0)
3862 return emulate_gp(ctxt, 0);
3863
3864 if (ctxt->dst.type == OP_MEM)
3865 ctxt->dst.bytes = 2;
3866 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3867 return X86EMUL_CONTINUE;
3868}
3869
3870static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3871{
3872 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3873 | (ctxt->src.val & 0x0f));
3874 ctxt->dst.type = OP_NONE;
3875 return X86EMUL_CONTINUE;
3876}
3877
3878static int em_loop(struct x86_emulate_ctxt *ctxt)
3879{
3880 int rc = X86EMUL_CONTINUE;
3881
3882 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3883 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3884 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3885 rc = jmp_rel(ctxt, ctxt->src.val);
3886
3887 return rc;
3888}
3889
3890static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3891{
3892 int rc = X86EMUL_CONTINUE;
3893
3894 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3895 rc = jmp_rel(ctxt, ctxt->src.val);
3896
3897 return rc;
3898}
3899
3900static int em_in(struct x86_emulate_ctxt *ctxt)
3901{
3902 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3903 &ctxt->dst.val))
3904 return X86EMUL_IO_NEEDED;
3905
3906 return X86EMUL_CONTINUE;
3907}
3908
3909static int em_out(struct x86_emulate_ctxt *ctxt)
3910{
3911 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3912 &ctxt->src.val, 1);
3913
3914 ctxt->dst.type = OP_NONE;
3915 return X86EMUL_CONTINUE;
3916}
3917
3918static int em_cli(struct x86_emulate_ctxt *ctxt)
3919{
3920 if (emulator_bad_iopl(ctxt))
3921 return emulate_gp(ctxt, 0);
3922
3923 ctxt->eflags &= ~X86_EFLAGS_IF;
3924 return X86EMUL_CONTINUE;
3925}
3926
3927static int em_sti(struct x86_emulate_ctxt *ctxt)
3928{
3929 if (emulator_bad_iopl(ctxt))
3930 return emulate_gp(ctxt, 0);
3931
3932 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3933 ctxt->eflags |= X86_EFLAGS_IF;
3934 return X86EMUL_CONTINUE;
3935}
3936
3937static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3938{
3939 u32 eax, ebx, ecx, edx;
3940 u64 msr = 0;
3941
3942 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3943 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3944 ctxt->ops->cpl(ctxt)) {
3945 return emulate_gp(ctxt, 0);
3946 }
3947
3948 eax = reg_read(ctxt, VCPU_REGS_RAX);
3949 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3950 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
3951 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3952 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3953 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3954 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3955 return X86EMUL_CONTINUE;
3956}
3957
3958static int em_sahf(struct x86_emulate_ctxt *ctxt)
3959{
3960 u32 flags;
3961
3962 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3963 X86_EFLAGS_SF;
3964 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3965
3966 ctxt->eflags &= ~0xffUL;
3967 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3968 return X86EMUL_CONTINUE;
3969}
3970
3971static int em_lahf(struct x86_emulate_ctxt *ctxt)
3972{
3973 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3974 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3975 return X86EMUL_CONTINUE;
3976}
3977
3978static int em_bswap(struct x86_emulate_ctxt *ctxt)
3979{
3980 switch (ctxt->op_bytes) {
3981#ifdef CONFIG_X86_64
3982 case 8:
3983 asm("bswap %0" : "+r"(ctxt->dst.val));
3984 break;
3985#endif
3986 default:
3987 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3988 break;
3989 }
3990 return X86EMUL_CONTINUE;
3991}
3992
3993static int em_clflush(struct x86_emulate_ctxt *ctxt)
3994{
3995
3996 return X86EMUL_CONTINUE;
3997}
3998
3999static int em_movsxd(struct x86_emulate_ctxt *ctxt)
4000{
4001 ctxt->dst.val = (s32) ctxt->src.val;
4002 return X86EMUL_CONTINUE;
4003}
4004
4005static int check_fxsr(struct x86_emulate_ctxt *ctxt)
4006{
4007 u32 eax = 1, ebx, ecx = 0, edx;
4008
4009 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
4010 if (!(edx & FFL(FXSR)))
4011 return emulate_ud(ctxt);
4012
4013 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
4014 return emulate_nm(ctxt);
4015
4016
4017
4018
4019
4020 if (ctxt->mode >= X86EMUL_MODE_PROT64)
4021 return X86EMUL_UNHANDLEABLE;
4022
4023 return X86EMUL_CONTINUE;
4024}
4025
4026
4027
4028
4029
4030static size_t __fxstate_size(int nregs)
4031{
4032 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4033}
4034
4035static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4036{
4037 bool cr4_osfxsr;
4038 if (ctxt->mode == X86EMUL_MODE_PROT64)
4039 return __fxstate_size(16);
4040
4041 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4042 return __fxstate_size(cr4_osfxsr ? 8 : 0);
4043}
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4064{
4065 struct fxregs_state fx_state;
4066 int rc;
4067
4068 rc = check_fxsr(ctxt);
4069 if (rc != X86EMUL_CONTINUE)
4070 return rc;
4071
4072 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4073
4074 if (rc != X86EMUL_CONTINUE)
4075 return rc;
4076
4077 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4078 fxstate_size(ctxt));
4079}
4080
4081
4082
4083
4084
4085
4086
4087
4088static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4089 const size_t used_size)
4090{
4091 struct fxregs_state fx_tmp;
4092 int rc;
4093
4094 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4095 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4096 __fxstate_size(16) - used_size);
4097
4098 return rc;
4099}
4100
4101static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4102{
4103 struct fxregs_state fx_state;
4104 int rc;
4105 size_t size;
4106
4107 rc = check_fxsr(ctxt);
4108 if (rc != X86EMUL_CONTINUE)
4109 return rc;
4110
4111 size = fxstate_size(ctxt);
4112 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4113 if (rc != X86EMUL_CONTINUE)
4114 return rc;
4115
4116 if (size < __fxstate_size(16)) {
4117 rc = fxregs_fixup(&fx_state, size);
4118 if (rc != X86EMUL_CONTINUE)
4119 goto out;
4120 }
4121
4122 if (fx_state.mxcsr >> 16) {
4123 rc = emulate_gp(ctxt, 0);
4124 goto out;
4125 }
4126
4127 if (rc == X86EMUL_CONTINUE)
4128 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4129
4130out:
4131 return rc;
4132}
4133
4134static bool valid_cr(int nr)
4135{
4136 switch (nr) {
4137 case 0:
4138 case 2 ... 4:
4139 case 8:
4140 return true;
4141 default:
4142 return false;
4143 }
4144}
4145
4146static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4147{
4148 if (!valid_cr(ctxt->modrm_reg))
4149 return emulate_ud(ctxt);
4150
4151 return X86EMUL_CONTINUE;
4152}
4153
4154static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4155{
4156 u64 new_val = ctxt->src.val64;
4157 int cr = ctxt->modrm_reg;
4158 u64 efer = 0;
4159
4160 static u64 cr_reserved_bits[] = {
4161 0xffffffff00000000ULL,
4162 0, 0, 0,
4163 CR4_RESERVED_BITS,
4164 0, 0, 0,
4165 CR8_RESERVED_BITS,
4166 };
4167
4168 if (!valid_cr(cr))
4169 return emulate_ud(ctxt);
4170
4171 if (new_val & cr_reserved_bits[cr])
4172 return emulate_gp(ctxt, 0);
4173
4174 switch (cr) {
4175 case 0: {
4176 u64 cr4;
4177 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4178 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4179 return emulate_gp(ctxt, 0);
4180
4181 cr4 = ctxt->ops->get_cr(ctxt, 4);
4182 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4183
4184 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4185 !(cr4 & X86_CR4_PAE))
4186 return emulate_gp(ctxt, 0);
4187
4188 break;
4189 }
4190 case 3: {
4191 u64 rsvd = 0;
4192
4193 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4194 if (efer & EFER_LMA) {
4195 u64 maxphyaddr;
4196 u32 eax, ebx, ecx, edx;
4197
4198 eax = 0x80000008;
4199 ecx = 0;
4200 if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4201 &edx, false))
4202 maxphyaddr = eax & 0xff;
4203 else
4204 maxphyaddr = 36;
4205 rsvd = rsvd_bits(maxphyaddr, 63);
4206 if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
4207 rsvd &= ~X86_CR3_PCID_NOFLUSH;
4208 }
4209
4210 if (new_val & rsvd)
4211 return emulate_gp(ctxt, 0);
4212
4213 break;
4214 }
4215 case 4: {
4216 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4217
4218 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4219 return emulate_gp(ctxt, 0);
4220
4221 break;
4222 }
4223 }
4224
4225 return X86EMUL_CONTINUE;
4226}
4227
4228static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4229{
4230 unsigned long dr7;
4231
4232 ctxt->ops->get_dr(ctxt, 7, &dr7);
4233
4234
4235 return dr7 & (1 << 13);
4236}
4237
4238static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4239{
4240 int dr = ctxt->modrm_reg;
4241 u64 cr4;
4242
4243 if (dr > 7)
4244 return emulate_ud(ctxt);
4245
4246 cr4 = ctxt->ops->get_cr(ctxt, 4);
4247 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4248 return emulate_ud(ctxt);
4249
4250 if (check_dr7_gd(ctxt)) {
4251 ulong dr6;
4252
4253 ctxt->ops->get_dr(ctxt, 6, &dr6);
4254 dr6 &= ~15;
4255 dr6 |= DR6_BD | DR6_RTM;
4256 ctxt->ops->set_dr(ctxt, 6, dr6);
4257 return emulate_db(ctxt);
4258 }
4259
4260 return X86EMUL_CONTINUE;
4261}
4262
4263static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4264{
4265 u64 new_val = ctxt->src.val64;
4266 int dr = ctxt->modrm_reg;
4267
4268 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4269 return emulate_gp(ctxt, 0);
4270
4271 return check_dr_read(ctxt);
4272}
4273
4274static int check_svme(struct x86_emulate_ctxt *ctxt)
4275{
4276 u64 efer = 0;
4277
4278 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4279
4280 if (!(efer & EFER_SVME))
4281 return emulate_ud(ctxt);
4282
4283 return X86EMUL_CONTINUE;
4284}
4285
4286static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4287{
4288 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4289
4290
4291 if (rax & 0xffff000000000000ULL)
4292 return emulate_gp(ctxt, 0);
4293
4294 return check_svme(ctxt);
4295}
4296
4297static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4298{
4299 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4300
4301 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4302 return emulate_ud(ctxt);
4303
4304 return X86EMUL_CONTINUE;
4305}
4306
4307static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4308{
4309 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4310 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4311
4312
4313
4314
4315
4316 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4317 return X86EMUL_CONTINUE;
4318
4319 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4320 ctxt->ops->check_pmc(ctxt, rcx))
4321 return emulate_gp(ctxt, 0);
4322
4323 return X86EMUL_CONTINUE;
4324}
4325
4326static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4327{
4328 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4329 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4330 return emulate_gp(ctxt, 0);
4331
4332 return X86EMUL_CONTINUE;
4333}
4334
4335static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4336{
4337 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4338 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4339 return emulate_gp(ctxt, 0);
4340
4341 return X86EMUL_CONTINUE;
4342}
4343
4344#define D(_y) { .flags = (_y) }
4345#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4346#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4347 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4348#define N D(NotImpl)
4349#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4350#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4351#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4352#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4353#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4354#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4355#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4356#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4357#define II(_f, _e, _i) \
4358 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4359#define IIP(_f, _e, _i, _p) \
4360 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4361 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4362#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4363
4364#define D2bv(_f) D((_f) | ByteOp), D(_f)
4365#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4366#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4367#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4368#define I2bvIP(_f, _e, _i, _p) \
4369 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4370
4371#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4372 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4373 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4374
4375static const struct opcode group7_rm0[] = {
4376 N,
4377 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4378 N, N, N, N, N, N,
4379};
4380
4381static const struct opcode group7_rm1[] = {
4382 DI(SrcNone | Priv, monitor),
4383 DI(SrcNone | Priv, mwait),
4384 N, N, N, N, N, N,
4385};
4386
4387static const struct opcode group7_rm3[] = {
4388 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4389 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4390 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4391 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4392 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4393 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4394 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4395 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4396};
4397
4398static const struct opcode group7_rm7[] = {
4399 N,
4400 DIP(SrcNone, rdtscp, check_rdtsc),
4401 N, N, N, N, N, N,
4402};
4403
4404static const struct opcode group1[] = {
4405 F(Lock, em_add),
4406 F(Lock | PageTable, em_or),
4407 F(Lock, em_adc),
4408 F(Lock, em_sbb),
4409 F(Lock | PageTable, em_and),
4410 F(Lock, em_sub),
4411 F(Lock, em_xor),
4412 F(NoWrite, em_cmp),
4413};
4414
4415static const struct opcode group1A[] = {
4416 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4417};
4418
4419static const struct opcode group2[] = {
4420 F(DstMem | ModRM, em_rol),
4421 F(DstMem | ModRM, em_ror),
4422 F(DstMem | ModRM, em_rcl),
4423 F(DstMem | ModRM, em_rcr),
4424 F(DstMem | ModRM, em_shl),
4425 F(DstMem | ModRM, em_shr),
4426 F(DstMem | ModRM, em_shl),
4427 F(DstMem | ModRM, em_sar),
4428};
4429
4430static const struct opcode group3[] = {
4431 F(DstMem | SrcImm | NoWrite, em_test),
4432 F(DstMem | SrcImm | NoWrite, em_test),
4433 F(DstMem | SrcNone | Lock, em_not),
4434 F(DstMem | SrcNone | Lock, em_neg),
4435 F(DstXacc | Src2Mem, em_mul_ex),
4436 F(DstXacc | Src2Mem, em_imul_ex),
4437 F(DstXacc | Src2Mem, em_div_ex),
4438 F(DstXacc | Src2Mem, em_idiv_ex),
4439};
4440
4441static const struct opcode group4[] = {
4442 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4443 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4444 N, N, N, N, N, N,
4445};
4446
4447static const struct opcode group5[] = {
4448 F(DstMem | SrcNone | Lock, em_inc),
4449 F(DstMem | SrcNone | Lock, em_dec),
4450 I(SrcMem | NearBranch, em_call_near_abs),
4451 I(SrcMemFAddr | ImplicitOps, em_call_far),
4452 I(SrcMem | NearBranch, em_jmp_abs),
4453 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4454 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4455};
4456
4457static const struct opcode group6[] = {
4458 II(Prot | DstMem, em_sldt, sldt),
4459 II(Prot | DstMem, em_str, str),
4460 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4461 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4462 N, N, N, N,
4463};
4464
4465static const struct group_dual group7 = { {
4466 II(Mov | DstMem, em_sgdt, sgdt),
4467 II(Mov | DstMem, em_sidt, sidt),
4468 II(SrcMem | Priv, em_lgdt, lgdt),
4469 II(SrcMem | Priv, em_lidt, lidt),
4470 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4471 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4472 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4473}, {
4474 EXT(0, group7_rm0),
4475 EXT(0, group7_rm1),
4476 N, EXT(0, group7_rm3),
4477 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4478 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4479 EXT(0, group7_rm7),
4480} };
4481
4482static const struct opcode group8[] = {
4483 N, N, N, N,
4484 F(DstMem | SrcImmByte | NoWrite, em_bt),
4485 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4486 F(DstMem | SrcImmByte | Lock, em_btr),
4487 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4488};
4489
4490
4491
4492
4493
4494static const struct gprefix pfx_0f_c7_7 = {
4495 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
4496};
4497
4498
4499static const struct group_dual group9 = { {
4500 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4501}, {
4502 N, N, N, N, N, N, N,
4503 GP(0, &pfx_0f_c7_7),
4504} };
4505
4506static const struct opcode group11[] = {
4507 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4508 X7(D(Undefined)),
4509};
4510
4511static const struct gprefix pfx_0f_ae_7 = {
4512 I(SrcMem | ByteOp, em_clflush), N, N, N,
4513};
4514
4515static const struct group_dual group15 = { {
4516 I(ModRM | Aligned16, em_fxsave),
4517 I(ModRM | Aligned16, em_fxrstor),
4518 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4519}, {
4520 N, N, N, N, N, N, N, N,
4521} };
4522
4523static const struct gprefix pfx_0f_6f_0f_7f = {
4524 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4525};
4526
4527static const struct instr_dual instr_dual_0f_2b = {
4528 I(0, em_mov), N
4529};
4530
4531static const struct gprefix pfx_0f_2b = {
4532 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4533};
4534
4535static const struct gprefix pfx_0f_10_0f_11 = {
4536 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4537};
4538
4539static const struct gprefix pfx_0f_28_0f_29 = {
4540 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4541};
4542
4543static const struct gprefix pfx_0f_e7 = {
4544 N, I(Sse, em_mov), N, N,
4545};
4546
4547static const struct escape escape_d9 = { {
4548 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4549}, {
4550
4551 N, N, N, N, N, N, N, N,
4552
4553 N, N, N, N, N, N, N, N,
4554
4555 N, N, N, N, N, N, N, N,
4556
4557 N, N, N, N, N, N, N, N,
4558
4559 N, N, N, N, N, N, N, N,
4560
4561 N, N, N, N, N, N, N, N,
4562
4563 N, N, N, N, N, N, N, N,
4564
4565 N, N, N, N, N, N, N, N,
4566} };
4567
4568static const struct escape escape_db = { {
4569 N, N, N, N, N, N, N, N,
4570}, {
4571
4572 N, N, N, N, N, N, N, N,
4573
4574 N, N, N, N, N, N, N, N,
4575
4576 N, N, N, N, N, N, N, N,
4577
4578 N, N, N, N, N, N, N, N,
4579
4580 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4581
4582 N, N, N, N, N, N, N, N,
4583
4584 N, N, N, N, N, N, N, N,
4585
4586 N, N, N, N, N, N, N, N,
4587} };
4588
4589static const struct escape escape_dd = { {
4590 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4591}, {
4592
4593 N, N, N, N, N, N, N, N,
4594
4595 N, N, N, N, N, N, N, N,
4596
4597 N, N, N, N, N, N, N, N,
4598
4599 N, N, N, N, N, N, N, N,
4600
4601 N, N, N, N, N, N, N, N,
4602
4603 N, N, N, N, N, N, N, N,
4604
4605 N, N, N, N, N, N, N, N,
4606
4607 N, N, N, N, N, N, N, N,
4608} };
4609
4610static const struct instr_dual instr_dual_0f_c3 = {
4611 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4612};
4613
4614static const struct mode_dual mode_dual_63 = {
4615 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4616};
4617
4618static const struct opcode opcode_table[256] = {
4619
4620 F6ALU(Lock, em_add),
4621 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4622 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4623
4624 F6ALU(Lock | PageTable, em_or),
4625 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4626 N,
4627
4628 F6ALU(Lock, em_adc),
4629 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4630 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4631
4632 F6ALU(Lock, em_sbb),
4633 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4634 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4635
4636 F6ALU(Lock | PageTable, em_and), N, N,
4637
4638 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4639
4640 F6ALU(Lock, em_xor), N, N,
4641
4642 F6ALU(NoWrite, em_cmp), N, N,
4643
4644 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4645
4646 X8(I(SrcReg | Stack, em_push)),
4647
4648 X8(I(DstReg | Stack, em_pop)),
4649
4650 I(ImplicitOps | Stack | No64, em_pusha),
4651 I(ImplicitOps | Stack | No64, em_popa),
4652 N, MD(ModRM, &mode_dual_63),
4653 N, N, N, N,
4654
4655 I(SrcImm | Mov | Stack, em_push),
4656 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4657 I(SrcImmByte | Mov | Stack, em_push),
4658 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4659 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in),
4660 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out),
4661
4662 X16(D(SrcImmByte | NearBranch)),
4663
4664 G(ByteOp | DstMem | SrcImm, group1),
4665 G(DstMem | SrcImm, group1),
4666 G(ByteOp | DstMem | SrcImm | No64, group1),
4667 G(DstMem | SrcImmByte, group1),
4668 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4669 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4670
4671 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4672 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4673 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4674 D(ModRM | SrcMem | NoAccess | DstReg),
4675 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4676 G(0, group1A),
4677
4678 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4679
4680 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4681 I(SrcImmFAddr | No64, em_call_far), N,
4682 II(ImplicitOps | Stack, em_pushf, pushf),
4683 II(ImplicitOps | Stack, em_popf, popf),
4684 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4685
4686 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4687 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4688 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4689 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4690
4691 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4692 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4693 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4694 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4695
4696 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4697
4698 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4699
4700 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4701 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4702 I(ImplicitOps | NearBranch, em_ret),
4703 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4704 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4705 G(ByteOp, group11), G(0, group11),
4706
4707 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4708 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4709 I(ImplicitOps, em_ret_far),
4710 D(ImplicitOps), DI(SrcImmByte, intn),
4711 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4712
4713 G(Src2One | ByteOp, group2), G(Src2One, group2),
4714 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4715 I(DstAcc | SrcImmUByte | No64, em_aam),
4716 I(DstAcc | SrcImmUByte | No64, em_aad),
4717 F(DstAcc | ByteOp | No64, em_salc),
4718 I(DstAcc | SrcXLat | ByteOp, em_mov),
4719
4720 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4721
4722 X3(I(SrcImmByte | NearBranch, em_loop)),
4723 I(SrcImmByte | NearBranch, em_jcxz),
4724 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4725 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4726
4727 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4728 I(SrcImmFAddr | No64, em_jmp_far),
4729 D(SrcImmByte | ImplicitOps | NearBranch),
4730 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4731 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4732
4733 N, DI(ImplicitOps, icebp), N, N,
4734 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4735 G(ByteOp, group3), G(0, group3),
4736
4737 D(ImplicitOps), D(ImplicitOps),
4738 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4739 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4740};
4741
4742static const struct opcode twobyte_table[256] = {
4743
4744 G(0, group6), GD(0, &group7), N, N,
4745 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4746 II(ImplicitOps | Priv, em_clts, clts), N,
4747 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4748 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4749
4750 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4751 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4752 N, N, N, N, N, N,
4753 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4754 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4755
4756 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4757 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4758 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4759 check_cr_write),
4760 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4761 check_dr_write),
4762 N, N, N, N,
4763 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4764 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4765 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4766 N, N, N, N,
4767
4768 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4769 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4770 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4771 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4772 I(ImplicitOps | EmulateOnUD, em_sysenter),
4773 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4774 N, N,
4775 N, N, N, N, N, N, N, N,
4776
4777 X16(D(DstReg | SrcMem | ModRM)),
4778
4779 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4780
4781 N, N, N, N,
4782 N, N, N, N,
4783 N, N, N, N,
4784 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4785
4786 N, N, N, N,
4787 N, N, N, N,
4788 N, N, N, N,
4789 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4790
4791 X16(D(SrcImm | NearBranch)),
4792
4793 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4794
4795 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4796 II(ImplicitOps, em_cpuid, cpuid),
4797 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4798 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4799 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4800
4801 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4802 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4803 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4804 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4805 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4806 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4807
4808 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4809 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4810 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4811 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4812 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4813 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4814
4815 N, N,
4816 G(BitOp, group8),
4817 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4818 I(DstReg | SrcMem | ModRM, em_bsf_c),
4819 I(DstReg | SrcMem | ModRM, em_bsr_c),
4820 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4821
4822 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4823 N, ID(0, &instr_dual_0f_c3),
4824 N, N, N, GD(0, &group9),
4825
4826 X8(I(DstReg, em_bswap)),
4827
4828 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4829
4830 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4831 N, N, N, N, N, N, N, N,
4832
4833 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4834};
4835
4836static const struct instr_dual instr_dual_0f_38_f0 = {
4837 I(DstReg | SrcMem | Mov, em_movbe), N
4838};
4839
4840static const struct instr_dual instr_dual_0f_38_f1 = {
4841 I(DstMem | SrcReg | Mov, em_movbe), N
4842};
4843
4844static const struct gprefix three_byte_0f_38_f0 = {
4845 ID(0, &instr_dual_0f_38_f0), N, N, N
4846};
4847
4848static const struct gprefix three_byte_0f_38_f1 = {
4849 ID(0, &instr_dual_0f_38_f1), N, N, N
4850};
4851
4852
4853
4854
4855
4856static const struct opcode opcode_map_0f_38[256] = {
4857
4858 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4859
4860 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4861
4862 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4863 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4864
4865 N, N, X4(N), X8(N)
4866};
4867
4868#undef D
4869#undef N
4870#undef G
4871#undef GD
4872#undef I
4873#undef GP
4874#undef EXT
4875#undef MD
4876#undef ID
4877
4878#undef D2bv
4879#undef D2bvIP
4880#undef I2bv
4881#undef I2bvIP
4882#undef I6ALU
4883
4884static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4885{
4886 unsigned size;
4887
4888 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4889 if (size == 8)
4890 size = 4;
4891 return size;
4892}
4893
4894static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4895 unsigned size, bool sign_extension)
4896{
4897 int rc = X86EMUL_CONTINUE;
4898
4899 op->type = OP_IMM;
4900 op->bytes = size;
4901 op->addr.mem.ea = ctxt->_eip;
4902
4903 switch (op->bytes) {
4904 case 1:
4905 op->val = insn_fetch(s8, ctxt);
4906 break;
4907 case 2:
4908 op->val = insn_fetch(s16, ctxt);
4909 break;
4910 case 4:
4911 op->val = insn_fetch(s32, ctxt);
4912 break;
4913 case 8:
4914 op->val = insn_fetch(s64, ctxt);
4915 break;
4916 }
4917 if (!sign_extension) {
4918 switch (op->bytes) {
4919 case 1:
4920 op->val &= 0xff;
4921 break;
4922 case 2:
4923 op->val &= 0xffff;
4924 break;
4925 case 4:
4926 op->val &= 0xffffffff;
4927 break;
4928 }
4929 }
4930done:
4931 return rc;
4932}
4933
4934static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4935 unsigned d)
4936{
4937 int rc = X86EMUL_CONTINUE;
4938
4939 switch (d) {
4940 case OpReg:
4941 decode_register_operand(ctxt, op);
4942 break;
4943 case OpImmUByte:
4944 rc = decode_imm(ctxt, op, 1, false);
4945 break;
4946 case OpMem:
4947 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4948 mem_common:
4949 *op = ctxt->memop;
4950 ctxt->memopp = op;
4951 if (ctxt->d & BitOp)
4952 fetch_bit_operand(ctxt);
4953 op->orig_val = op->val;
4954 break;
4955 case OpMem64:
4956 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4957 goto mem_common;
4958 case OpAcc:
4959 op->type = OP_REG;
4960 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4961 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4962 fetch_register_operand(op);
4963 op->orig_val = op->val;
4964 break;
4965 case OpAccLo:
4966 op->type = OP_REG;
4967 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4968 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4969 fetch_register_operand(op);
4970 op->orig_val = op->val;
4971 break;
4972 case OpAccHi:
4973 if (ctxt->d & ByteOp) {
4974 op->type = OP_NONE;
4975 break;
4976 }
4977 op->type = OP_REG;
4978 op->bytes = ctxt->op_bytes;
4979 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4980 fetch_register_operand(op);
4981 op->orig_val = op->val;
4982 break;
4983 case OpDI:
4984 op->type = OP_MEM;
4985 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4986 op->addr.mem.ea =
4987 register_address(ctxt, VCPU_REGS_RDI);
4988 op->addr.mem.seg = VCPU_SREG_ES;
4989 op->val = 0;
4990 op->count = 1;
4991 break;
4992 case OpDX:
4993 op->type = OP_REG;
4994 op->bytes = 2;
4995 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4996 fetch_register_operand(op);
4997 break;
4998 case OpCL:
4999 op->type = OP_IMM;
5000 op->bytes = 1;
5001 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
5002 break;
5003 case OpImmByte:
5004 rc = decode_imm(ctxt, op, 1, true);
5005 break;
5006 case OpOne:
5007 op->type = OP_IMM;
5008 op->bytes = 1;
5009 op->val = 1;
5010 break;
5011 case OpImm:
5012 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
5013 break;
5014 case OpImm64:
5015 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
5016 break;
5017 case OpMem8:
5018 ctxt->memop.bytes = 1;
5019 if (ctxt->memop.type == OP_REG) {
5020 ctxt->memop.addr.reg = decode_register(ctxt,
5021 ctxt->modrm_rm, true);
5022 fetch_register_operand(&ctxt->memop);
5023 }
5024 goto mem_common;
5025 case OpMem16:
5026 ctxt->memop.bytes = 2;
5027 goto mem_common;
5028 case OpMem32:
5029 ctxt->memop.bytes = 4;
5030 goto mem_common;
5031 case OpImmU16:
5032 rc = decode_imm(ctxt, op, 2, false);
5033 break;
5034 case OpImmU:
5035 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
5036 break;
5037 case OpSI:
5038 op->type = OP_MEM;
5039 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5040 op->addr.mem.ea =
5041 register_address(ctxt, VCPU_REGS_RSI);
5042 op->addr.mem.seg = ctxt->seg_override;
5043 op->val = 0;
5044 op->count = 1;
5045 break;
5046 case OpXLat:
5047 op->type = OP_MEM;
5048 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5049 op->addr.mem.ea =
5050 address_mask(ctxt,
5051 reg_read(ctxt, VCPU_REGS_RBX) +
5052 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
5053 op->addr.mem.seg = ctxt->seg_override;
5054 op->val = 0;
5055 break;
5056 case OpImmFAddr:
5057 op->type = OP_IMM;
5058 op->addr.mem.ea = ctxt->_eip;
5059 op->bytes = ctxt->op_bytes + 2;
5060 insn_fetch_arr(op->valptr, op->bytes, ctxt);
5061 break;
5062 case OpMemFAddr:
5063 ctxt->memop.bytes = ctxt->op_bytes + 2;
5064 goto mem_common;
5065 case OpES:
5066 op->type = OP_IMM;
5067 op->val = VCPU_SREG_ES;
5068 break;
5069 case OpCS:
5070 op->type = OP_IMM;
5071 op->val = VCPU_SREG_CS;
5072 break;
5073 case OpSS:
5074 op->type = OP_IMM;
5075 op->val = VCPU_SREG_SS;
5076 break;
5077 case OpDS:
5078 op->type = OP_IMM;
5079 op->val = VCPU_SREG_DS;
5080 break;
5081 case OpFS:
5082 op->type = OP_IMM;
5083 op->val = VCPU_SREG_FS;
5084 break;
5085 case OpGS:
5086 op->type = OP_IMM;
5087 op->val = VCPU_SREG_GS;
5088 break;
5089 case OpImplicit:
5090
5091 default:
5092 op->type = OP_NONE;
5093 break;
5094 }
5095
5096done:
5097 return rc;
5098}
5099
5100int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
5101{
5102 int rc = X86EMUL_CONTINUE;
5103 int mode = ctxt->mode;
5104 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5105 bool op_prefix = false;
5106 bool has_seg_override = false;
5107 struct opcode opcode;
5108 u16 dummy;
5109 struct desc_struct desc;
5110
5111 ctxt->memop.type = OP_NONE;
5112 ctxt->memopp = NULL;
5113 ctxt->_eip = ctxt->eip;
5114 ctxt->fetch.ptr = ctxt->fetch.data;
5115 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5116 ctxt->opcode_len = 1;
5117 if (insn_len > 0)
5118 memcpy(ctxt->fetch.data, insn, insn_len);
5119 else {
5120 rc = __do_insn_fetch_bytes(ctxt, 1);
5121 if (rc != X86EMUL_CONTINUE)
5122 return rc;
5123 }
5124
5125 switch (mode) {
5126 case X86EMUL_MODE_REAL:
5127 case X86EMUL_MODE_VM86:
5128 def_op_bytes = def_ad_bytes = 2;
5129 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5130 if (desc.d)
5131 def_op_bytes = def_ad_bytes = 4;
5132 break;
5133 case X86EMUL_MODE_PROT16:
5134 def_op_bytes = def_ad_bytes = 2;
5135 break;
5136 case X86EMUL_MODE_PROT32:
5137 def_op_bytes = def_ad_bytes = 4;
5138 break;
5139#ifdef CONFIG_X86_64
5140 case X86EMUL_MODE_PROT64:
5141 def_op_bytes = 4;
5142 def_ad_bytes = 8;
5143 break;
5144#endif
5145 default:
5146 return EMULATION_FAILED;
5147 }
5148
5149 ctxt->op_bytes = def_op_bytes;
5150 ctxt->ad_bytes = def_ad_bytes;
5151
5152
5153 for (;;) {
5154 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5155 case 0x66:
5156 op_prefix = true;
5157
5158 ctxt->op_bytes = def_op_bytes ^ 6;
5159 break;
5160 case 0x67:
5161 if (mode == X86EMUL_MODE_PROT64)
5162
5163 ctxt->ad_bytes = def_ad_bytes ^ 12;
5164 else
5165
5166 ctxt->ad_bytes = def_ad_bytes ^ 6;
5167 break;
5168 case 0x26:
5169 case 0x2e:
5170 case 0x36:
5171 case 0x3e:
5172 has_seg_override = true;
5173 ctxt->seg_override = (ctxt->b >> 3) & 3;
5174 break;
5175 case 0x64:
5176 case 0x65:
5177 has_seg_override = true;
5178 ctxt->seg_override = ctxt->b & 7;
5179 break;
5180 case 0x40 ... 0x4f:
5181 if (mode != X86EMUL_MODE_PROT64)
5182 goto done_prefixes;
5183 ctxt->rex_prefix = ctxt->b;
5184 continue;
5185 case 0xf0:
5186 ctxt->lock_prefix = 1;
5187 break;
5188 case 0xf2:
5189 case 0xf3:
5190 ctxt->rep_prefix = ctxt->b;
5191 break;
5192 default:
5193 goto done_prefixes;
5194 }
5195
5196
5197
5198 ctxt->rex_prefix = 0;
5199 }
5200
5201done_prefixes:
5202
5203
5204 if (ctxt->rex_prefix & 8)
5205 ctxt->op_bytes = 8;
5206
5207
5208 opcode = opcode_table[ctxt->b];
5209
5210 if (ctxt->b == 0x0f) {
5211 ctxt->opcode_len = 2;
5212 ctxt->b = insn_fetch(u8, ctxt);
5213 opcode = twobyte_table[ctxt->b];
5214
5215
5216 if (ctxt->b == 0x38) {
5217 ctxt->opcode_len = 3;
5218 ctxt->b = insn_fetch(u8, ctxt);
5219 opcode = opcode_map_0f_38[ctxt->b];
5220 }
5221 }
5222 ctxt->d = opcode.flags;
5223
5224 if (ctxt->d & ModRM)
5225 ctxt->modrm = insn_fetch(u8, ctxt);
5226
5227
5228 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5229 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5230 ctxt->d = NotImpl;
5231 }
5232
5233 while (ctxt->d & GroupMask) {
5234 switch (ctxt->d & GroupMask) {
5235 case Group:
5236 goffset = (ctxt->modrm >> 3) & 7;
5237 opcode = opcode.u.group[goffset];
5238 break;
5239 case GroupDual:
5240 goffset = (ctxt->modrm >> 3) & 7;
5241 if ((ctxt->modrm >> 6) == 3)
5242 opcode = opcode.u.gdual->mod3[goffset];
5243 else
5244 opcode = opcode.u.gdual->mod012[goffset];
5245 break;
5246 case RMExt:
5247 goffset = ctxt->modrm & 7;
5248 opcode = opcode.u.group[goffset];
5249 break;
5250 case Prefix:
5251 if (ctxt->rep_prefix && op_prefix)
5252 return EMULATION_FAILED;
5253 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5254 switch (simd_prefix) {
5255 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5256 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5257 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5258 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5259 }
5260 break;
5261 case Escape:
5262 if (ctxt->modrm > 0xbf)
5263 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5264 else
5265 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5266 break;
5267 case InstrDual:
5268 if ((ctxt->modrm >> 6) == 3)
5269 opcode = opcode.u.idual->mod3;
5270 else
5271 opcode = opcode.u.idual->mod012;
5272 break;
5273 case ModeDual:
5274 if (ctxt->mode == X86EMUL_MODE_PROT64)
5275 opcode = opcode.u.mdual->mode64;
5276 else
5277 opcode = opcode.u.mdual->mode32;
5278 break;
5279 default:
5280 return EMULATION_FAILED;
5281 }
5282
5283 ctxt->d &= ~(u64)GroupMask;
5284 ctxt->d |= opcode.flags;
5285 }
5286
5287
5288 if (ctxt->d == 0)
5289 return EMULATION_FAILED;
5290
5291 ctxt->execute = opcode.u.execute;
5292
5293 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5294 return EMULATION_FAILED;
5295
5296 if (unlikely(ctxt->d &
5297 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5298 No16))) {
5299
5300
5301
5302
5303 ctxt->check_perm = opcode.check_perm;
5304 ctxt->intercept = opcode.intercept;
5305
5306 if (ctxt->d & NotImpl)
5307 return EMULATION_FAILED;
5308
5309 if (mode == X86EMUL_MODE_PROT64) {
5310 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5311 ctxt->op_bytes = 8;
5312 else if (ctxt->d & NearBranch)
5313 ctxt->op_bytes = 8;
5314 }
5315
5316 if (ctxt->d & Op3264) {
5317 if (mode == X86EMUL_MODE_PROT64)
5318 ctxt->op_bytes = 8;
5319 else
5320 ctxt->op_bytes = 4;
5321 }
5322
5323 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5324 ctxt->op_bytes = 4;
5325
5326 if (ctxt->d & Sse)
5327 ctxt->op_bytes = 16;
5328 else if (ctxt->d & Mmx)
5329 ctxt->op_bytes = 8;
5330 }
5331
5332
5333 if (ctxt->d & ModRM) {
5334 rc = decode_modrm(ctxt, &ctxt->memop);
5335 if (!has_seg_override) {
5336 has_seg_override = true;
5337 ctxt->seg_override = ctxt->modrm_seg;
5338 }
5339 } else if (ctxt->d & MemAbs)
5340 rc = decode_abs(ctxt, &ctxt->memop);
5341 if (rc != X86EMUL_CONTINUE)
5342 goto done;
5343
5344 if (!has_seg_override)
5345 ctxt->seg_override = VCPU_SREG_DS;
5346
5347 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5348
5349
5350
5351
5352
5353 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5354 if (rc != X86EMUL_CONTINUE)
5355 goto done;
5356
5357
5358
5359
5360
5361 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5362 if (rc != X86EMUL_CONTINUE)
5363 goto done;
5364
5365
5366 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5367
5368 if (ctxt->rip_relative && likely(ctxt->memopp))
5369 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5370 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5371
5372done:
5373 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5374}
5375
5376bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5377{
5378 return ctxt->d & PageTable;
5379}
5380
5381static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5382{
5383
5384
5385
5386
5387
5388
5389
5390 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5391 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5392 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5393 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5394 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5395 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5396 return true;
5397
5398 return false;
5399}
5400
5401static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5402{
5403 int rc;
5404
5405 rc = asm_safe("fwait");
5406
5407 if (unlikely(rc != X86EMUL_CONTINUE))
5408 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5409
5410 return X86EMUL_CONTINUE;
5411}
5412
5413static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5414 struct operand *op)
5415{
5416 if (op->type == OP_MM)
5417 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5418}
5419
5420static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5421{
5422 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5423
5424 if (!(ctxt->d & ByteOp))
5425 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5426
5427 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5428 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5429 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5430 : "c"(ctxt->src2.val));
5431
5432 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5433 if (!fop)
5434 return emulate_de(ctxt);
5435 return X86EMUL_CONTINUE;
5436}
5437
5438void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5439{
5440 memset(&ctxt->rip_relative, 0,
5441 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5442
5443 ctxt->io_read.pos = 0;
5444 ctxt->io_read.end = 0;
5445 ctxt->mem_read.end = 0;
5446}
5447
5448int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5449{
5450 const struct x86_emulate_ops *ops = ctxt->ops;
5451 int rc = X86EMUL_CONTINUE;
5452 int saved_dst_type = ctxt->dst.type;
5453 unsigned emul_flags;
5454
5455 ctxt->mem_read.pos = 0;
5456
5457
5458 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5459 rc = emulate_ud(ctxt);
5460 goto done;
5461 }
5462
5463 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5464 rc = emulate_ud(ctxt);
5465 goto done;
5466 }
5467
5468 emul_flags = ctxt->ops->get_hflags(ctxt);
5469 if (unlikely(ctxt->d &
5470 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5471 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5472 (ctxt->d & Undefined)) {
5473 rc = emulate_ud(ctxt);
5474 goto done;
5475 }
5476
5477 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5478 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5479 rc = emulate_ud(ctxt);
5480 goto done;
5481 }
5482
5483 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5484 rc = emulate_nm(ctxt);
5485 goto done;
5486 }
5487
5488 if (ctxt->d & Mmx) {
5489 rc = flush_pending_x87_faults(ctxt);
5490 if (rc != X86EMUL_CONTINUE)
5491 goto done;
5492
5493
5494
5495
5496 fetch_possible_mmx_operand(ctxt, &ctxt->src);
5497 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5498 if (!(ctxt->d & Mov))
5499 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5500 }
5501
5502 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5503 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5504 X86_ICPT_PRE_EXCEPT);
5505 if (rc != X86EMUL_CONTINUE)
5506 goto done;
5507 }
5508
5509
5510 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5511 rc = emulate_ud(ctxt);
5512 goto done;
5513 }
5514
5515
5516 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5517 if (ctxt->d & PrivUD)
5518 rc = emulate_ud(ctxt);
5519 else
5520 rc = emulate_gp(ctxt, 0);
5521 goto done;
5522 }
5523
5524
5525 if (ctxt->d & CheckPerm) {
5526 rc = ctxt->check_perm(ctxt);
5527 if (rc != X86EMUL_CONTINUE)
5528 goto done;
5529 }
5530
5531 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5532 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5533 X86_ICPT_POST_EXCEPT);
5534 if (rc != X86EMUL_CONTINUE)
5535 goto done;
5536 }
5537
5538 if (ctxt->rep_prefix && (ctxt->d & String)) {
5539
5540 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5541 string_registers_quirk(ctxt);
5542 ctxt->eip = ctxt->_eip;
5543 ctxt->eflags &= ~X86_EFLAGS_RF;
5544 goto done;
5545 }
5546 }
5547 }
5548
5549 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5550 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5551 ctxt->src.valptr, ctxt->src.bytes);
5552 if (rc != X86EMUL_CONTINUE)
5553 goto done;
5554 ctxt->src.orig_val64 = ctxt->src.val64;
5555 }
5556
5557 if (ctxt->src2.type == OP_MEM) {
5558 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5559 &ctxt->src2.val, ctxt->src2.bytes);
5560 if (rc != X86EMUL_CONTINUE)
5561 goto done;
5562 }
5563
5564 if ((ctxt->d & DstMask) == ImplicitOps)
5565 goto special_insn;
5566
5567
5568 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5569
5570 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5571 &ctxt->dst.val, ctxt->dst.bytes);
5572 if (rc != X86EMUL_CONTINUE) {
5573 if (!(ctxt->d & NoWrite) &&
5574 rc == X86EMUL_PROPAGATE_FAULT &&
5575 ctxt->exception.vector == PF_VECTOR)
5576 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5577 goto done;
5578 }
5579 }
5580
5581 ctxt->dst.orig_val64 = ctxt->dst.val64;
5582
5583special_insn:
5584
5585 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5586 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5587 X86_ICPT_POST_MEMACCESS);
5588 if (rc != X86EMUL_CONTINUE)
5589 goto done;
5590 }
5591
5592 if (ctxt->rep_prefix && (ctxt->d & String))
5593 ctxt->eflags |= X86_EFLAGS_RF;
5594 else
5595 ctxt->eflags &= ~X86_EFLAGS_RF;
5596
5597 if (ctxt->execute) {
5598 if (ctxt->d & Fastop) {
5599 void (*fop)(struct fastop *) = (void *)ctxt->execute;
5600 rc = fastop(ctxt, fop);
5601 if (rc != X86EMUL_CONTINUE)
5602 goto done;
5603 goto writeback;
5604 }
5605 rc = ctxt->execute(ctxt);
5606 if (rc != X86EMUL_CONTINUE)
5607 goto done;
5608 goto writeback;
5609 }
5610
5611 if (ctxt->opcode_len == 2)
5612 goto twobyte_insn;
5613 else if (ctxt->opcode_len == 3)
5614 goto threebyte_insn;
5615
5616 switch (ctxt->b) {
5617 case 0x70 ... 0x7f:
5618 if (test_cc(ctxt->b, ctxt->eflags))
5619 rc = jmp_rel(ctxt, ctxt->src.val);
5620 break;
5621 case 0x8d:
5622 ctxt->dst.val = ctxt->src.addr.mem.ea;
5623 break;
5624 case 0x90 ... 0x97:
5625 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5626 ctxt->dst.type = OP_NONE;
5627 else
5628 rc = em_xchg(ctxt);
5629 break;
5630 case 0x98:
5631 switch (ctxt->op_bytes) {
5632 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5633 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5634 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5635 }
5636 break;
5637 case 0xcc:
5638 rc = emulate_int(ctxt, 3);
5639 break;
5640 case 0xcd:
5641 rc = emulate_int(ctxt, ctxt->src.val);
5642 break;
5643 case 0xce:
5644 if (ctxt->eflags & X86_EFLAGS_OF)
5645 rc = emulate_int(ctxt, 4);
5646 break;
5647 case 0xe9:
5648 case 0xeb:
5649 rc = jmp_rel(ctxt, ctxt->src.val);
5650 ctxt->dst.type = OP_NONE;
5651 break;
5652 case 0xf4:
5653 ctxt->ops->halt(ctxt);
5654 break;
5655 case 0xf5:
5656
5657 ctxt->eflags ^= X86_EFLAGS_CF;
5658 break;
5659 case 0xf8:
5660 ctxt->eflags &= ~X86_EFLAGS_CF;
5661 break;
5662 case 0xf9:
5663 ctxt->eflags |= X86_EFLAGS_CF;
5664 break;
5665 case 0xfc:
5666 ctxt->eflags &= ~X86_EFLAGS_DF;
5667 break;
5668 case 0xfd:
5669 ctxt->eflags |= X86_EFLAGS_DF;
5670 break;
5671 default:
5672 goto cannot_emulate;
5673 }
5674
5675 if (rc != X86EMUL_CONTINUE)
5676 goto done;
5677
5678writeback:
5679 if (ctxt->d & SrcWrite) {
5680 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5681 rc = writeback(ctxt, &ctxt->src);
5682 if (rc != X86EMUL_CONTINUE)
5683 goto done;
5684 }
5685 if (!(ctxt->d & NoWrite)) {
5686 rc = writeback(ctxt, &ctxt->dst);
5687 if (rc != X86EMUL_CONTINUE)
5688 goto done;
5689 }
5690
5691
5692
5693
5694
5695 ctxt->dst.type = saved_dst_type;
5696
5697 if ((ctxt->d & SrcMask) == SrcSI)
5698 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5699
5700 if ((ctxt->d & DstMask) == DstDI)
5701 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5702
5703 if (ctxt->rep_prefix && (ctxt->d & String)) {
5704 unsigned int count;
5705 struct read_cache *r = &ctxt->io_read;
5706 if ((ctxt->d & SrcMask) == SrcSI)
5707 count = ctxt->src.count;
5708 else
5709 count = ctxt->dst.count;
5710 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5711
5712 if (!string_insn_completed(ctxt)) {
5713
5714
5715
5716
5717 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5718 (r->end == 0 || r->end != r->pos)) {
5719
5720
5721
5722
5723
5724 ctxt->mem_read.end = 0;
5725 writeback_registers(ctxt);
5726 return EMULATION_RESTART;
5727 }
5728 goto done;
5729 }
5730 ctxt->eflags &= ~X86_EFLAGS_RF;
5731 }
5732
5733 ctxt->eip = ctxt->_eip;
5734
5735done:
5736 if (rc == X86EMUL_PROPAGATE_FAULT) {
5737 WARN_ON(ctxt->exception.vector > 0x1f);
5738 ctxt->have_exception = true;
5739 }
5740 if (rc == X86EMUL_INTERCEPTED)
5741 return EMULATION_INTERCEPTED;
5742
5743 if (rc == X86EMUL_CONTINUE)
5744 writeback_registers(ctxt);
5745
5746 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5747
5748twobyte_insn:
5749 switch (ctxt->b) {
5750 case 0x09:
5751 (ctxt->ops->wbinvd)(ctxt);
5752 break;
5753 case 0x08:
5754 case 0x0d:
5755 case 0x18:
5756 case 0x1f:
5757 break;
5758 case 0x20:
5759 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5760 break;
5761 case 0x21:
5762 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5763 break;
5764 case 0x40 ... 0x4f:
5765 if (test_cc(ctxt->b, ctxt->eflags))
5766 ctxt->dst.val = ctxt->src.val;
5767 else if (ctxt->op_bytes != 4)
5768 ctxt->dst.type = OP_NONE;
5769 break;
5770 case 0x80 ... 0x8f:
5771 if (test_cc(ctxt->b, ctxt->eflags))
5772 rc = jmp_rel(ctxt, ctxt->src.val);
5773 break;
5774 case 0x90 ... 0x9f:
5775 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5776 break;
5777 case 0xb6 ... 0xb7:
5778 ctxt->dst.bytes = ctxt->op_bytes;
5779 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5780 : (u16) ctxt->src.val;
5781 break;
5782 case 0xbe ... 0xbf:
5783 ctxt->dst.bytes = ctxt->op_bytes;
5784 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5785 (s16) ctxt->src.val;
5786 break;
5787 default:
5788 goto cannot_emulate;
5789 }
5790
5791threebyte_insn:
5792
5793 if (rc != X86EMUL_CONTINUE)
5794 goto done;
5795
5796 goto writeback;
5797
5798cannot_emulate:
5799 return EMULATION_FAILED;
5800}
5801
5802void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5803{
5804 invalidate_registers(ctxt);
5805}
5806
5807void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5808{
5809 writeback_registers(ctxt);
5810}
5811
5812bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5813{
5814 if (ctxt->rep_prefix && (ctxt->d & String))
5815 return false;
5816
5817 if (ctxt->d & TwoMemOp)
5818 return false;
5819
5820 return true;
5821}
5822