1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kvm_host.h>
24#include "kvm_cache_regs.h"
25#include <asm/kvm_emulate.h>
26#include <linux/stringify.h>
27#include <asm/debugreg.h>
28#include <asm/nospec-branch.h>
29
30#include "x86.h"
31#include "tss.h"
32#include "mmu.h"
33#include "pmu.h"
34
35
36
37
38#define OpNone 0ull
39#define OpImplicit 1ull
40#define OpReg 2ull
41#define OpMem 3ull
42#define OpAcc 4ull
43#define OpDI 5ull
44#define OpMem64 6ull
45#define OpImmUByte 7ull
46#define OpDX 8ull
47#define OpCL 9ull
48#define OpImmByte 10ull
49#define OpOne 11ull
50#define OpImm 12ull
51#define OpMem16 13ull
52#define OpMem32 14ull
53#define OpImmU 15ull
54#define OpSI 16ull
55#define OpImmFAddr 17ull
56#define OpMemFAddr 18ull
57#define OpImmU16 19ull
58#define OpES 20ull
59#define OpCS 21ull
60#define OpSS 22ull
61#define OpDS 23ull
62#define OpFS 24ull
63#define OpGS 25ull
64#define OpMem8 26ull
65#define OpImm64 27ull
66#define OpXLat 28ull
67#define OpAccLo 29ull
68#define OpAccHi 30ull
69
70#define OpBits 5
71#define OpMask ((1ull << OpBits) - 1)
72
73
74
75
76
77
78
79
80
81
82
83#define ByteOp (1<<0)
84
85#define DstShift 1
86#define ImplicitOps (OpImplicit << DstShift)
87#define DstReg (OpReg << DstShift)
88#define DstMem (OpMem << DstShift)
89#define DstAcc (OpAcc << DstShift)
90#define DstDI (OpDI << DstShift)
91#define DstMem64 (OpMem64 << DstShift)
92#define DstMem16 (OpMem16 << DstShift)
93#define DstImmUByte (OpImmUByte << DstShift)
94#define DstDX (OpDX << DstShift)
95#define DstAccLo (OpAccLo << DstShift)
96#define DstMask (OpMask << DstShift)
97
98#define SrcShift 6
99#define SrcNone (OpNone << SrcShift)
100#define SrcReg (OpReg << SrcShift)
101#define SrcMem (OpMem << SrcShift)
102#define SrcMem16 (OpMem16 << SrcShift)
103#define SrcMem32 (OpMem32 << SrcShift)
104#define SrcImm (OpImm << SrcShift)
105#define SrcImmByte (OpImmByte << SrcShift)
106#define SrcOne (OpOne << SrcShift)
107#define SrcImmUByte (OpImmUByte << SrcShift)
108#define SrcImmU (OpImmU << SrcShift)
109#define SrcSI (OpSI << SrcShift)
110#define SrcXLat (OpXLat << SrcShift)
111#define SrcImmFAddr (OpImmFAddr << SrcShift)
112#define SrcMemFAddr (OpMemFAddr << SrcShift)
113#define SrcAcc (OpAcc << SrcShift)
114#define SrcImmU16 (OpImmU16 << SrcShift)
115#define SrcImm64 (OpImm64 << SrcShift)
116#define SrcDX (OpDX << SrcShift)
117#define SrcMem8 (OpMem8 << SrcShift)
118#define SrcAccHi (OpAccHi << SrcShift)
119#define SrcMask (OpMask << SrcShift)
120#define BitOp (1<<11)
121#define MemAbs (1<<12)
122#define String (1<<13)
123#define Stack (1<<14)
124#define GroupMask (7<<15)
125#define Group (1<<15)
126#define GroupDual (2<<15)
127#define Prefix (3<<15)
128#define RMExt (4<<15)
129#define Escape (5<<15)
130#define InstrDual (6<<15)
131#define ModeDual (7<<15)
132#define Sse (1<<18)
133
134#define ModRM (1<<19)
135
136#define Mov (1<<20)
137
138#define Prot (1<<21)
139#define EmulateOnUD (1<<22)
140#define NoAccess (1<<23)
141#define Op3264 (1<<24)
142#define Undefined (1<<25)
143#define Lock (1<<26)
144#define Priv (1<<27)
145#define No64 (1<<28)
146#define PageTable (1 << 29)
147#define NotImpl (1 << 30)
148
149#define Src2Shift (31)
150#define Src2None (OpNone << Src2Shift)
151#define Src2Mem (OpMem << Src2Shift)
152#define Src2CL (OpCL << Src2Shift)
153#define Src2ImmByte (OpImmByte << Src2Shift)
154#define Src2One (OpOne << Src2Shift)
155#define Src2Imm (OpImm << Src2Shift)
156#define Src2ES (OpES << Src2Shift)
157#define Src2CS (OpCS << Src2Shift)
158#define Src2SS (OpSS << Src2Shift)
159#define Src2DS (OpDS << Src2Shift)
160#define Src2FS (OpFS << Src2Shift)
161#define Src2GS (OpGS << Src2Shift)
162#define Src2Mask (OpMask << Src2Shift)
163#define Mmx ((u64)1 << 40)
164#define AlignMask ((u64)7 << 41)
165#define Aligned ((u64)1 << 41)
166#define Unaligned ((u64)2 << 41)
167#define Avx ((u64)3 << 41)
168#define Aligned16 ((u64)4 << 41)
169#define Fastop ((u64)1 << 44)
170#define NoWrite ((u64)1 << 45)
171#define SrcWrite ((u64)1 << 46)
172#define NoMod ((u64)1 << 47)
173#define Intercept ((u64)1 << 48)
174#define CheckPerm ((u64)1 << 49)
175#define PrivUD ((u64)1 << 51)
176#define NearBranch ((u64)1 << 52)
177#define No16 ((u64)1 << 53)
178#define IncSP ((u64)1 << 54)
179#define TwoMemOp ((u64)1 << 55)
180
181#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
182
183#define X2(x...) x, x
184#define X3(x...) X2(x), x
185#define X4(x...) X2(x), X2(x)
186#define X5(x...) X4(x), x
187#define X6(x...) X4(x), X2(x)
188#define X7(x...) X4(x), X3(x)
189#define X8(x...) X4(x), X4(x)
190#define X16(x...) X8(x), X8(x)
191
192#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
193#define FASTOP_SIZE 8
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212struct fastop;
213
214struct opcode {
215 u64 flags : 56;
216 u64 intercept : 8;
217 union {
218 int (*execute)(struct x86_emulate_ctxt *ctxt);
219 const struct opcode *group;
220 const struct group_dual *gdual;
221 const struct gprefix *gprefix;
222 const struct escape *esc;
223 const struct instr_dual *idual;
224 const struct mode_dual *mdual;
225 void (*fastop)(struct fastop *fake);
226 } u;
227 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
228};
229
230struct group_dual {
231 struct opcode mod012[8];
232 struct opcode mod3[8];
233};
234
235struct gprefix {
236 struct opcode pfx_no;
237 struct opcode pfx_66;
238 struct opcode pfx_f2;
239 struct opcode pfx_f3;
240};
241
242struct escape {
243 struct opcode op[8];
244 struct opcode high[64];
245};
246
247struct instr_dual {
248 struct opcode mod012;
249 struct opcode mod3;
250};
251
252struct mode_dual {
253 struct opcode mode32;
254 struct opcode mode64;
255};
256
257#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
258
259enum x86_transfer_type {
260 X86_TRANSFER_NONE,
261 X86_TRANSFER_CALL_JMP,
262 X86_TRANSFER_RET,
263 X86_TRANSFER_TASK_SWITCH,
264};
265
266static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
267{
268 if (!(ctxt->regs_valid & (1 << nr))) {
269 ctxt->regs_valid |= 1 << nr;
270 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
271 }
272 return ctxt->_regs[nr];
273}
274
275static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
276{
277 ctxt->regs_valid |= 1 << nr;
278 ctxt->regs_dirty |= 1 << nr;
279 return &ctxt->_regs[nr];
280}
281
282static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
283{
284 reg_read(ctxt, nr);
285 return reg_write(ctxt, nr);
286}
287
288static void writeback_registers(struct x86_emulate_ctxt *ctxt)
289{
290 unsigned reg;
291
292 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
293 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
294}
295
296static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
297{
298 ctxt->regs_dirty = 0;
299 ctxt->regs_valid = 0;
300}
301
302
303
304
305
306#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
307 X86_EFLAGS_PF|X86_EFLAGS_CF)
308
309#ifdef CONFIG_X86_64
310#define ON64(x) x
311#else
312#define ON64(x)
313#endif
314
315static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
316
317#define FOP_FUNC(name) \
318 ".align " __stringify(FASTOP_SIZE) " \n\t" \
319 ".type " name ", @function \n\t" \
320 name ":\n\t"
321
322#define FOP_RET "ret \n\t"
323
324#define FOP_START(op) \
325 extern void em_##op(struct fastop *fake); \
326 asm(".pushsection .text, \"ax\" \n\t" \
327 ".global em_" #op " \n\t" \
328 FOP_FUNC("em_" #op)
329
330#define FOP_END \
331 ".popsection")
332
333#define FOPNOP() \
334 FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
335 FOP_RET
336
337#define FOP1E(op, dst) \
338 FOP_FUNC(#op "_" #dst) \
339 "10: " #op " %" #dst " \n\t" FOP_RET
340
341#define FOP1EEX(op, dst) \
342 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
343
344#define FASTOP1(op) \
345 FOP_START(op) \
346 FOP1E(op##b, al) \
347 FOP1E(op##w, ax) \
348 FOP1E(op##l, eax) \
349 ON64(FOP1E(op##q, rax)) \
350 FOP_END
351
352
353#define FASTOP1SRC2(op, name) \
354 FOP_START(name) \
355 FOP1E(op, cl) \
356 FOP1E(op, cx) \
357 FOP1E(op, ecx) \
358 ON64(FOP1E(op, rcx)) \
359 FOP_END
360
361
362#define FASTOP1SRC2EX(op, name) \
363 FOP_START(name) \
364 FOP1EEX(op, cl) \
365 FOP1EEX(op, cx) \
366 FOP1EEX(op, ecx) \
367 ON64(FOP1EEX(op, rcx)) \
368 FOP_END
369
370#define FOP2E(op, dst, src) \
371 FOP_FUNC(#op "_" #dst "_" #src) \
372 #op " %" #src ", %" #dst " \n\t" FOP_RET
373
374#define FASTOP2(op) \
375 FOP_START(op) \
376 FOP2E(op##b, al, dl) \
377 FOP2E(op##w, ax, dx) \
378 FOP2E(op##l, eax, edx) \
379 ON64(FOP2E(op##q, rax, rdx)) \
380 FOP_END
381
382
383#define FASTOP2W(op) \
384 FOP_START(op) \
385 FOPNOP() \
386 FOP2E(op##w, ax, dx) \
387 FOP2E(op##l, eax, edx) \
388 ON64(FOP2E(op##q, rax, rdx)) \
389 FOP_END
390
391
392#define FASTOP2CL(op) \
393 FOP_START(op) \
394 FOP2E(op##b, al, cl) \
395 FOP2E(op##w, ax, cl) \
396 FOP2E(op##l, eax, cl) \
397 ON64(FOP2E(op##q, rax, cl)) \
398 FOP_END
399
400
401#define FASTOP2R(op, name) \
402 FOP_START(name) \
403 FOP2E(op##b, dl, al) \
404 FOP2E(op##w, dx, ax) \
405 FOP2E(op##l, edx, eax) \
406 ON64(FOP2E(op##q, rdx, rax)) \
407 FOP_END
408
409#define FOP3E(op, dst, src, src2) \
410 FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
411 #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
412
413
414#define FASTOP3WCL(op) \
415 FOP_START(op) \
416 FOPNOP() \
417 FOP3E(op##w, ax, dx, cl) \
418 FOP3E(op##l, eax, edx, cl) \
419 ON64(FOP3E(op##q, rax, rdx, cl)) \
420 FOP_END
421
422
423#define FOP_SETCC(op) \
424 ".align 4 \n\t" \
425 ".type " #op ", @function \n\t" \
426 #op ": \n\t" \
427 #op " %al \n\t" \
428 FOP_RET
429
430asm(".pushsection .fixup, \"ax\"\n"
431 ".global kvm_fastop_exception \n"
432 "kvm_fastop_exception: xor %esi, %esi; ret\n"
433 ".popsection");
434
435FOP_START(setcc)
436FOP_SETCC(seto)
437FOP_SETCC(setno)
438FOP_SETCC(setc)
439FOP_SETCC(setnc)
440FOP_SETCC(setz)
441FOP_SETCC(setnz)
442FOP_SETCC(setbe)
443FOP_SETCC(setnbe)
444FOP_SETCC(sets)
445FOP_SETCC(setns)
446FOP_SETCC(setp)
447FOP_SETCC(setnp)
448FOP_SETCC(setl)
449FOP_SETCC(setnl)
450FOP_SETCC(setle)
451FOP_SETCC(setnle)
452FOP_END;
453
454FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
455FOP_END;
456
457
458
459
460
461#define asm_safe(insn, inoutclob...) \
462({ \
463 int _fault = 0; \
464 \
465 asm volatile("1:" insn "\n" \
466 "2:\n" \
467 ".pushsection .fixup, \"ax\"\n" \
468 "3: movl $1, %[_fault]\n" \
469 " jmp 2b\n" \
470 ".popsection\n" \
471 _ASM_EXTABLE(1b, 3b) \
472 : [_fault] "+qm"(_fault) inoutclob ); \
473 \
474 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
475})
476
477static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
478 enum x86_intercept intercept,
479 enum x86_intercept_stage stage)
480{
481 struct x86_instruction_info info = {
482 .intercept = intercept,
483 .rep_prefix = ctxt->rep_prefix,
484 .modrm_mod = ctxt->modrm_mod,
485 .modrm_reg = ctxt->modrm_reg,
486 .modrm_rm = ctxt->modrm_rm,
487 .src_val = ctxt->src.val64,
488 .dst_val = ctxt->dst.val64,
489 .src_bytes = ctxt->src.bytes,
490 .dst_bytes = ctxt->dst.bytes,
491 .ad_bytes = ctxt->ad_bytes,
492 .next_rip = ctxt->eip,
493 };
494
495 return ctxt->ops->intercept(ctxt, &info, stage);
496}
497
498static void assign_masked(ulong *dest, ulong src, ulong mask)
499{
500 *dest = (*dest & ~mask) | (src & mask);
501}
502
503static void assign_register(unsigned long *reg, u64 val, int bytes)
504{
505
506 switch (bytes) {
507 case 1:
508 *(u8 *)reg = (u8)val;
509 break;
510 case 2:
511 *(u16 *)reg = (u16)val;
512 break;
513 case 4:
514 *reg = (u32)val;
515 break;
516 case 8:
517 *reg = val;
518 break;
519 }
520}
521
522static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
523{
524 return (1UL << (ctxt->ad_bytes << 3)) - 1;
525}
526
527static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
528{
529 u16 sel;
530 struct desc_struct ss;
531
532 if (ctxt->mode == X86EMUL_MODE_PROT64)
533 return ~0UL;
534 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
535 return ~0U >> ((ss.d ^ 1) * 16);
536}
537
538static int stack_size(struct x86_emulate_ctxt *ctxt)
539{
540 return (__fls(stack_mask(ctxt)) + 1) >> 3;
541}
542
543
544static inline unsigned long
545address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
546{
547 if (ctxt->ad_bytes == sizeof(unsigned long))
548 return reg;
549 else
550 return reg & ad_mask(ctxt);
551}
552
553static inline unsigned long
554register_address(struct x86_emulate_ctxt *ctxt, int reg)
555{
556 return address_mask(ctxt, reg_read(ctxt, reg));
557}
558
559static void masked_increment(ulong *reg, ulong mask, int inc)
560{
561 assign_masked(reg, *reg + inc, mask);
562}
563
564static inline void
565register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
566{
567 ulong *preg = reg_rmw(ctxt, reg);
568
569 assign_register(preg, *preg + inc, ctxt->ad_bytes);
570}
571
572static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
573{
574 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
575}
576
577static u32 desc_limit_scaled(struct desc_struct *desc)
578{
579 u32 limit = get_desc_limit(desc);
580
581 return desc->g ? (limit << 12) | 0xfff : limit;
582}
583
584static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
585{
586 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
587 return 0;
588
589 return ctxt->ops->get_cached_segment_base(ctxt, seg);
590}
591
592static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
593 u32 error, bool valid)
594{
595 WARN_ON(vec > 0x1f);
596 ctxt->exception.vector = vec;
597 ctxt->exception.error_code = error;
598 ctxt->exception.error_code_valid = valid;
599 return X86EMUL_PROPAGATE_FAULT;
600}
601
602static int emulate_db(struct x86_emulate_ctxt *ctxt)
603{
604 return emulate_exception(ctxt, DB_VECTOR, 0, false);
605}
606
607static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
608{
609 return emulate_exception(ctxt, GP_VECTOR, err, true);
610}
611
612static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
613{
614 return emulate_exception(ctxt, SS_VECTOR, err, true);
615}
616
617static int emulate_ud(struct x86_emulate_ctxt *ctxt)
618{
619 return emulate_exception(ctxt, UD_VECTOR, 0, false);
620}
621
622static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
623{
624 return emulate_exception(ctxt, TS_VECTOR, err, true);
625}
626
627static int emulate_de(struct x86_emulate_ctxt *ctxt)
628{
629 return emulate_exception(ctxt, DE_VECTOR, 0, false);
630}
631
632static int emulate_nm(struct x86_emulate_ctxt *ctxt)
633{
634 return emulate_exception(ctxt, NM_VECTOR, 0, false);
635}
636
637static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
638{
639 u16 selector;
640 struct desc_struct desc;
641
642 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
643 return selector;
644}
645
646static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
647 unsigned seg)
648{
649 u16 dummy;
650 u32 base3;
651 struct desc_struct desc;
652
653 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
654 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
655}
656
657
658
659
660
661
662
663
664
665
666static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
667{
668 u64 alignment = ctxt->d & AlignMask;
669
670 if (likely(size < 16))
671 return 1;
672
673 switch (alignment) {
674 case Unaligned:
675 case Avx:
676 return 1;
677 case Aligned16:
678 return 16;
679 case Aligned:
680 default:
681 return size;
682 }
683}
684
685static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
686 struct segmented_address addr,
687 unsigned *max_size, unsigned size,
688 bool write, bool fetch,
689 enum x86emul_mode mode, ulong *linear)
690{
691 struct desc_struct desc;
692 bool usable;
693 ulong la;
694 u32 lim;
695 u16 sel;
696 u8 va_bits;
697
698 la = seg_base(ctxt, addr.seg) + addr.ea;
699 *max_size = 0;
700 switch (mode) {
701 case X86EMUL_MODE_PROT64:
702 *linear = la;
703 va_bits = ctxt_virt_addr_bits(ctxt);
704 if (get_canonical(la, va_bits) != la)
705 goto bad;
706
707 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
708 if (size > *max_size)
709 goto bad;
710 break;
711 default:
712 *linear = la = (u32)la;
713 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
714 addr.seg);
715 if (!usable)
716 goto bad;
717
718 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
719 || !(desc.type & 2)) && write)
720 goto bad;
721
722 if (!fetch && (desc.type & 8) && !(desc.type & 2))
723 goto bad;
724 lim = desc_limit_scaled(&desc);
725 if (!(desc.type & 8) && (desc.type & 4)) {
726
727 if (addr.ea <= lim)
728 goto bad;
729 lim = desc.d ? 0xffffffff : 0xffff;
730 }
731 if (addr.ea > lim)
732 goto bad;
733 if (lim == 0xffffffff)
734 *max_size = ~0u;
735 else {
736 *max_size = (u64)lim + 1 - addr.ea;
737 if (size > *max_size)
738 goto bad;
739 }
740 break;
741 }
742 if (la & (insn_alignment(ctxt, size) - 1))
743 return emulate_gp(ctxt, 0);
744 return X86EMUL_CONTINUE;
745bad:
746 if (addr.seg == VCPU_SREG_SS)
747 return emulate_ss(ctxt, 0);
748 else
749 return emulate_gp(ctxt, 0);
750}
751
752static int linearize(struct x86_emulate_ctxt *ctxt,
753 struct segmented_address addr,
754 unsigned size, bool write,
755 ulong *linear)
756{
757 unsigned max_size;
758 return __linearize(ctxt, addr, &max_size, size, write, false,
759 ctxt->mode, linear);
760}
761
762static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
763 enum x86emul_mode mode)
764{
765 ulong linear;
766 int rc;
767 unsigned max_size;
768 struct segmented_address addr = { .seg = VCPU_SREG_CS,
769 .ea = dst };
770
771 if (ctxt->op_bytes != sizeof(unsigned long))
772 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
773 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
774 if (rc == X86EMUL_CONTINUE)
775 ctxt->_eip = addr.ea;
776 return rc;
777}
778
779static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
780{
781 return assign_eip(ctxt, dst, ctxt->mode);
782}
783
784static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
785 const struct desc_struct *cs_desc)
786{
787 enum x86emul_mode mode = ctxt->mode;
788 int rc;
789
790#ifdef CONFIG_X86_64
791 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
792 if (cs_desc->l) {
793 u64 efer = 0;
794
795 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
796 if (efer & EFER_LMA)
797 mode = X86EMUL_MODE_PROT64;
798 } else
799 mode = X86EMUL_MODE_PROT32;
800 }
801#endif
802 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
803 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
804 rc = assign_eip(ctxt, dst, mode);
805 if (rc == X86EMUL_CONTINUE)
806 ctxt->mode = mode;
807 return rc;
808}
809
810static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
811{
812 return assign_eip_near(ctxt, ctxt->_eip + rel);
813}
814
815static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
816 void *data, unsigned size)
817{
818 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
819}
820
821static int linear_write_system(struct x86_emulate_ctxt *ctxt,
822 ulong linear, void *data,
823 unsigned int size)
824{
825 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
826}
827
828static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
829 struct segmented_address addr,
830 void *data,
831 unsigned size)
832{
833 int rc;
834 ulong linear;
835
836 rc = linearize(ctxt, addr, size, false, &linear);
837 if (rc != X86EMUL_CONTINUE)
838 return rc;
839 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
840}
841
842static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
843 struct segmented_address addr,
844 void *data,
845 unsigned int size)
846{
847 int rc;
848 ulong linear;
849
850 rc = linearize(ctxt, addr, size, true, &linear);
851 if (rc != X86EMUL_CONTINUE)
852 return rc;
853 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
854}
855
856
857
858
859
860static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
861{
862 int rc;
863 unsigned size, max_size;
864 unsigned long linear;
865 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
866 struct segmented_address addr = { .seg = VCPU_SREG_CS,
867 .ea = ctxt->eip + cur_size };
868
869
870
871
872
873
874
875
876
877
878
879 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
880 &linear);
881 if (unlikely(rc != X86EMUL_CONTINUE))
882 return rc;
883
884 size = min_t(unsigned, 15UL ^ cur_size, max_size);
885 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
886
887
888
889
890
891
892
893 if (unlikely(size < op_size))
894 return emulate_gp(ctxt, 0);
895
896 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
897 size, &ctxt->exception);
898 if (unlikely(rc != X86EMUL_CONTINUE))
899 return rc;
900 ctxt->fetch.end += size;
901 return X86EMUL_CONTINUE;
902}
903
904static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
905 unsigned size)
906{
907 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
908
909 if (unlikely(done_size < size))
910 return __do_insn_fetch_bytes(ctxt, size - done_size);
911 else
912 return X86EMUL_CONTINUE;
913}
914
915
916#define insn_fetch(_type, _ctxt) \
917({ _type _x; \
918 \
919 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
920 if (rc != X86EMUL_CONTINUE) \
921 goto done; \
922 ctxt->_eip += sizeof(_type); \
923 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
924 ctxt->fetch.ptr += sizeof(_type); \
925 _x; \
926})
927
928#define insn_fetch_arr(_arr, _size, _ctxt) \
929({ \
930 rc = do_insn_fetch_bytes(_ctxt, _size); \
931 if (rc != X86EMUL_CONTINUE) \
932 goto done; \
933 ctxt->_eip += (_size); \
934 memcpy(_arr, ctxt->fetch.ptr, _size); \
935 ctxt->fetch.ptr += (_size); \
936})
937
938
939
940
941
942
943static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
944 int byteop)
945{
946 void *p;
947 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
948
949 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
950 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
951 else
952 p = reg_rmw(ctxt, modrm_reg);
953 return p;
954}
955
956static int read_descriptor(struct x86_emulate_ctxt *ctxt,
957 struct segmented_address addr,
958 u16 *size, unsigned long *address, int op_bytes)
959{
960 int rc;
961
962 if (op_bytes == 2)
963 op_bytes = 3;
964 *address = 0;
965 rc = segmented_read_std(ctxt, addr, size, 2);
966 if (rc != X86EMUL_CONTINUE)
967 return rc;
968 addr.ea += 2;
969 rc = segmented_read_std(ctxt, addr, address, op_bytes);
970 return rc;
971}
972
973FASTOP2(add);
974FASTOP2(or);
975FASTOP2(adc);
976FASTOP2(sbb);
977FASTOP2(and);
978FASTOP2(sub);
979FASTOP2(xor);
980FASTOP2(cmp);
981FASTOP2(test);
982
983FASTOP1SRC2(mul, mul_ex);
984FASTOP1SRC2(imul, imul_ex);
985FASTOP1SRC2EX(div, div_ex);
986FASTOP1SRC2EX(idiv, idiv_ex);
987
988FASTOP3WCL(shld);
989FASTOP3WCL(shrd);
990
991FASTOP2W(imul);
992
993FASTOP1(not);
994FASTOP1(neg);
995FASTOP1(inc);
996FASTOP1(dec);
997
998FASTOP2CL(rol);
999FASTOP2CL(ror);
1000FASTOP2CL(rcl);
1001FASTOP2CL(rcr);
1002FASTOP2CL(shl);
1003FASTOP2CL(shr);
1004FASTOP2CL(sar);
1005
1006FASTOP2W(bsf);
1007FASTOP2W(bsr);
1008FASTOP2W(bt);
1009FASTOP2W(bts);
1010FASTOP2W(btr);
1011FASTOP2W(btc);
1012
1013FASTOP2(xadd);
1014
1015FASTOP2R(cmp, cmp_r);
1016
1017static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1018{
1019
1020 if (ctxt->src.val == 0)
1021 ctxt->dst.type = OP_NONE;
1022 return fastop(ctxt, em_bsf);
1023}
1024
1025static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1026{
1027
1028 if (ctxt->src.val == 0)
1029 ctxt->dst.type = OP_NONE;
1030 return fastop(ctxt, em_bsr);
1031}
1032
1033static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1034{
1035 u8 rc;
1036 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1037
1038 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1039 asm("push %[flags]; popf; " CALL_NOSPEC
1040 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1041 return rc;
1042}
1043
1044static void fetch_register_operand(struct operand *op)
1045{
1046 switch (op->bytes) {
1047 case 1:
1048 op->val = *(u8 *)op->addr.reg;
1049 break;
1050 case 2:
1051 op->val = *(u16 *)op->addr.reg;
1052 break;
1053 case 4:
1054 op->val = *(u32 *)op->addr.reg;
1055 break;
1056 case 8:
1057 op->val = *(u64 *)op->addr.reg;
1058 break;
1059 }
1060}
1061
1062static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1063{
1064 switch (reg) {
1065 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1066 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1067 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1068 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1069 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1070 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1071 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1072 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1073#ifdef CONFIG_X86_64
1074 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1075 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1076 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1077 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1078 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1079 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1080 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1081 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1082#endif
1083 default: BUG();
1084 }
1085}
1086
1087static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1088 int reg)
1089{
1090 switch (reg) {
1091 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1092 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1093 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1094 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1095 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1096 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1097 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1098 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1099#ifdef CONFIG_X86_64
1100 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1101 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1102 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1103 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1104 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1105 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1106 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1107 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1108#endif
1109 default: BUG();
1110 }
1111}
1112
1113static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1114{
1115 switch (reg) {
1116 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1117 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1118 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1119 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1120 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1121 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1122 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1123 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1124 default: BUG();
1125 }
1126}
1127
1128static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1129{
1130 switch (reg) {
1131 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1132 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1133 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1134 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1135 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1136 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1137 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1138 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1139 default: BUG();
1140 }
1141}
1142
1143static int em_fninit(struct x86_emulate_ctxt *ctxt)
1144{
1145 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1146 return emulate_nm(ctxt);
1147
1148 asm volatile("fninit");
1149 return X86EMUL_CONTINUE;
1150}
1151
1152static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1153{
1154 u16 fcw;
1155
1156 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1157 return emulate_nm(ctxt);
1158
1159 asm volatile("fnstcw %0": "+m"(fcw));
1160
1161 ctxt->dst.val = fcw;
1162
1163 return X86EMUL_CONTINUE;
1164}
1165
1166static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1167{
1168 u16 fsw;
1169
1170 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1171 return emulate_nm(ctxt);
1172
1173 asm volatile("fnstsw %0": "+m"(fsw));
1174
1175 ctxt->dst.val = fsw;
1176
1177 return X86EMUL_CONTINUE;
1178}
1179
1180static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1181 struct operand *op)
1182{
1183 unsigned reg = ctxt->modrm_reg;
1184
1185 if (!(ctxt->d & ModRM))
1186 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1187
1188 if (ctxt->d & Sse) {
1189 op->type = OP_XMM;
1190 op->bytes = 16;
1191 op->addr.xmm = reg;
1192 read_sse_reg(ctxt, &op->vec_val, reg);
1193 return;
1194 }
1195 if (ctxt->d & Mmx) {
1196 reg &= 7;
1197 op->type = OP_MM;
1198 op->bytes = 8;
1199 op->addr.mm = reg;
1200 return;
1201 }
1202
1203 op->type = OP_REG;
1204 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1205 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1206
1207 fetch_register_operand(op);
1208 op->orig_val = op->val;
1209}
1210
1211static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1212{
1213 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1214 ctxt->modrm_seg = VCPU_SREG_SS;
1215}
1216
1217static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1218 struct operand *op)
1219{
1220 u8 sib;
1221 int index_reg, base_reg, scale;
1222 int rc = X86EMUL_CONTINUE;
1223 ulong modrm_ea = 0;
1224
1225 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8);
1226 index_reg = (ctxt->rex_prefix << 2) & 8;
1227 base_reg = (ctxt->rex_prefix << 3) & 8;
1228
1229 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1230 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1231 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1232 ctxt->modrm_seg = VCPU_SREG_DS;
1233
1234 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1235 op->type = OP_REG;
1236 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1237 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1238 ctxt->d & ByteOp);
1239 if (ctxt->d & Sse) {
1240 op->type = OP_XMM;
1241 op->bytes = 16;
1242 op->addr.xmm = ctxt->modrm_rm;
1243 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1244 return rc;
1245 }
1246 if (ctxt->d & Mmx) {
1247 op->type = OP_MM;
1248 op->bytes = 8;
1249 op->addr.mm = ctxt->modrm_rm & 7;
1250 return rc;
1251 }
1252 fetch_register_operand(op);
1253 return rc;
1254 }
1255
1256 op->type = OP_MEM;
1257
1258 if (ctxt->ad_bytes == 2) {
1259 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1260 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1261 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1262 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1263
1264
1265 switch (ctxt->modrm_mod) {
1266 case 0:
1267 if (ctxt->modrm_rm == 6)
1268 modrm_ea += insn_fetch(u16, ctxt);
1269 break;
1270 case 1:
1271 modrm_ea += insn_fetch(s8, ctxt);
1272 break;
1273 case 2:
1274 modrm_ea += insn_fetch(u16, ctxt);
1275 break;
1276 }
1277 switch (ctxt->modrm_rm) {
1278 case 0:
1279 modrm_ea += bx + si;
1280 break;
1281 case 1:
1282 modrm_ea += bx + di;
1283 break;
1284 case 2:
1285 modrm_ea += bp + si;
1286 break;
1287 case 3:
1288 modrm_ea += bp + di;
1289 break;
1290 case 4:
1291 modrm_ea += si;
1292 break;
1293 case 5:
1294 modrm_ea += di;
1295 break;
1296 case 6:
1297 if (ctxt->modrm_mod != 0)
1298 modrm_ea += bp;
1299 break;
1300 case 7:
1301 modrm_ea += bx;
1302 break;
1303 }
1304 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1305 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1306 ctxt->modrm_seg = VCPU_SREG_SS;
1307 modrm_ea = (u16)modrm_ea;
1308 } else {
1309
1310 if ((ctxt->modrm_rm & 7) == 4) {
1311 sib = insn_fetch(u8, ctxt);
1312 index_reg |= (sib >> 3) & 7;
1313 base_reg |= sib & 7;
1314 scale = sib >> 6;
1315
1316 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1317 modrm_ea += insn_fetch(s32, ctxt);
1318 else {
1319 modrm_ea += reg_read(ctxt, base_reg);
1320 adjust_modrm_seg(ctxt, base_reg);
1321
1322 if ((ctxt->d & IncSP) &&
1323 base_reg == VCPU_REGS_RSP)
1324 modrm_ea += ctxt->op_bytes;
1325 }
1326 if (index_reg != 4)
1327 modrm_ea += reg_read(ctxt, index_reg) << scale;
1328 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1329 modrm_ea += insn_fetch(s32, ctxt);
1330 if (ctxt->mode == X86EMUL_MODE_PROT64)
1331 ctxt->rip_relative = 1;
1332 } else {
1333 base_reg = ctxt->modrm_rm;
1334 modrm_ea += reg_read(ctxt, base_reg);
1335 adjust_modrm_seg(ctxt, base_reg);
1336 }
1337 switch (ctxt->modrm_mod) {
1338 case 1:
1339 modrm_ea += insn_fetch(s8, ctxt);
1340 break;
1341 case 2:
1342 modrm_ea += insn_fetch(s32, ctxt);
1343 break;
1344 }
1345 }
1346 op->addr.mem.ea = modrm_ea;
1347 if (ctxt->ad_bytes != 8)
1348 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1349
1350done:
1351 return rc;
1352}
1353
1354static int decode_abs(struct x86_emulate_ctxt *ctxt,
1355 struct operand *op)
1356{
1357 int rc = X86EMUL_CONTINUE;
1358
1359 op->type = OP_MEM;
1360 switch (ctxt->ad_bytes) {
1361 case 2:
1362 op->addr.mem.ea = insn_fetch(u16, ctxt);
1363 break;
1364 case 4:
1365 op->addr.mem.ea = insn_fetch(u32, ctxt);
1366 break;
1367 case 8:
1368 op->addr.mem.ea = insn_fetch(u64, ctxt);
1369 break;
1370 }
1371done:
1372 return rc;
1373}
1374
1375static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1376{
1377 long sv = 0, mask;
1378
1379 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1380 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1381
1382 if (ctxt->src.bytes == 2)
1383 sv = (s16)ctxt->src.val & (s16)mask;
1384 else if (ctxt->src.bytes == 4)
1385 sv = (s32)ctxt->src.val & (s32)mask;
1386 else
1387 sv = (s64)ctxt->src.val & (s64)mask;
1388
1389 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1390 ctxt->dst.addr.mem.ea + (sv >> 3));
1391 }
1392
1393
1394 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1395}
1396
1397static int read_emulated(struct x86_emulate_ctxt *ctxt,
1398 unsigned long addr, void *dest, unsigned size)
1399{
1400 int rc;
1401 struct read_cache *mc = &ctxt->mem_read;
1402
1403 if (mc->pos < mc->end)
1404 goto read_cached;
1405
1406 WARN_ON((mc->end + size) >= sizeof(mc->data));
1407
1408 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1409 &ctxt->exception);
1410 if (rc != X86EMUL_CONTINUE)
1411 return rc;
1412
1413 mc->end += size;
1414
1415read_cached:
1416 memcpy(dest, mc->data + mc->pos, size);
1417 mc->pos += size;
1418 return X86EMUL_CONTINUE;
1419}
1420
1421static int segmented_read(struct x86_emulate_ctxt *ctxt,
1422 struct segmented_address addr,
1423 void *data,
1424 unsigned size)
1425{
1426 int rc;
1427 ulong linear;
1428
1429 rc = linearize(ctxt, addr, size, false, &linear);
1430 if (rc != X86EMUL_CONTINUE)
1431 return rc;
1432 return read_emulated(ctxt, linear, data, size);
1433}
1434
1435static int segmented_write(struct x86_emulate_ctxt *ctxt,
1436 struct segmented_address addr,
1437 const void *data,
1438 unsigned size)
1439{
1440 int rc;
1441 ulong linear;
1442
1443 rc = linearize(ctxt, addr, size, true, &linear);
1444 if (rc != X86EMUL_CONTINUE)
1445 return rc;
1446 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1447 &ctxt->exception);
1448}
1449
1450static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1451 struct segmented_address addr,
1452 const void *orig_data, const void *data,
1453 unsigned size)
1454{
1455 int rc;
1456 ulong linear;
1457
1458 rc = linearize(ctxt, addr, size, true, &linear);
1459 if (rc != X86EMUL_CONTINUE)
1460 return rc;
1461 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1462 size, &ctxt->exception);
1463}
1464
1465static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1466 unsigned int size, unsigned short port,
1467 void *dest)
1468{
1469 struct read_cache *rc = &ctxt->io_read;
1470
1471 if (rc->pos == rc->end) {
1472 unsigned int in_page, n;
1473 unsigned int count = ctxt->rep_prefix ?
1474 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1475 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1476 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1477 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1478 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1479 if (n == 0)
1480 n = 1;
1481 rc->pos = rc->end = 0;
1482 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1483 return 0;
1484 rc->end = n * size;
1485 }
1486
1487 if (ctxt->rep_prefix && (ctxt->d & String) &&
1488 !(ctxt->eflags & X86_EFLAGS_DF)) {
1489 ctxt->dst.data = rc->data + rc->pos;
1490 ctxt->dst.type = OP_MEM_STR;
1491 ctxt->dst.count = (rc->end - rc->pos) / size;
1492 rc->pos = rc->end;
1493 } else {
1494 memcpy(dest, rc->data + rc->pos, size);
1495 rc->pos += size;
1496 }
1497 return 1;
1498}
1499
1500static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1501 u16 index, struct desc_struct *desc)
1502{
1503 struct desc_ptr dt;
1504 ulong addr;
1505
1506 ctxt->ops->get_idt(ctxt, &dt);
1507
1508 if (dt.size < index * 8 + 7)
1509 return emulate_gp(ctxt, index << 3 | 0x2);
1510
1511 addr = dt.address + index * 8;
1512 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1513}
1514
1515static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1516 u16 selector, struct desc_ptr *dt)
1517{
1518 const struct x86_emulate_ops *ops = ctxt->ops;
1519 u32 base3 = 0;
1520
1521 if (selector & 1 << 2) {
1522 struct desc_struct desc;
1523 u16 sel;
1524
1525 memset(dt, 0, sizeof(*dt));
1526 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1527 VCPU_SREG_LDTR))
1528 return;
1529
1530 dt->size = desc_limit_scaled(&desc);
1531 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1532 } else
1533 ops->get_gdt(ctxt, dt);
1534}
1535
1536static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1537 u16 selector, ulong *desc_addr_p)
1538{
1539 struct desc_ptr dt;
1540 u16 index = selector >> 3;
1541 ulong addr;
1542
1543 get_descriptor_table_ptr(ctxt, selector, &dt);
1544
1545 if (dt.size < index * 8 + 7)
1546 return emulate_gp(ctxt, selector & 0xfffc);
1547
1548 addr = dt.address + index * 8;
1549
1550#ifdef CONFIG_X86_64
1551 if (addr >> 32 != 0) {
1552 u64 efer = 0;
1553
1554 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1555 if (!(efer & EFER_LMA))
1556 addr &= (u32)-1;
1557 }
1558#endif
1559
1560 *desc_addr_p = addr;
1561 return X86EMUL_CONTINUE;
1562}
1563
1564
1565static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1566 u16 selector, struct desc_struct *desc,
1567 ulong *desc_addr_p)
1568{
1569 int rc;
1570
1571 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1572 if (rc != X86EMUL_CONTINUE)
1573 return rc;
1574
1575 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1576}
1577
1578
1579static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1580 u16 selector, struct desc_struct *desc)
1581{
1582 int rc;
1583 ulong addr;
1584
1585 rc = get_descriptor_ptr(ctxt, selector, &addr);
1586 if (rc != X86EMUL_CONTINUE)
1587 return rc;
1588
1589 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1590}
1591
1592static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1593 u16 selector, int seg, u8 cpl,
1594 enum x86_transfer_type transfer,
1595 struct desc_struct *desc)
1596{
1597 struct desc_struct seg_desc, old_desc;
1598 u8 dpl, rpl;
1599 unsigned err_vec = GP_VECTOR;
1600 u32 err_code = 0;
1601 bool null_selector = !(selector & ~0x3);
1602 ulong desc_addr;
1603 int ret;
1604 u16 dummy;
1605 u32 base3 = 0;
1606
1607 memset(&seg_desc, 0, sizeof(seg_desc));
1608
1609 if (ctxt->mode == X86EMUL_MODE_REAL) {
1610
1611
1612 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1613 set_desc_base(&seg_desc, selector << 4);
1614 goto load;
1615 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1616
1617 set_desc_base(&seg_desc, selector << 4);
1618 set_desc_limit(&seg_desc, 0xffff);
1619 seg_desc.type = 3;
1620 seg_desc.p = 1;
1621 seg_desc.s = 1;
1622 seg_desc.dpl = 3;
1623 goto load;
1624 }
1625
1626 rpl = selector & 3;
1627
1628
1629 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1630 goto exception;
1631
1632
1633 if (null_selector) {
1634 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1635 goto exception;
1636
1637 if (seg == VCPU_SREG_SS) {
1638 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1639 goto exception;
1640
1641
1642
1643
1644
1645 seg_desc.type = 3;
1646 seg_desc.p = 1;
1647 seg_desc.s = 1;
1648 seg_desc.dpl = cpl;
1649 seg_desc.d = 1;
1650 seg_desc.g = 1;
1651 }
1652
1653
1654 goto load;
1655 }
1656
1657 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1658 if (ret != X86EMUL_CONTINUE)
1659 return ret;
1660
1661 err_code = selector & 0xfffc;
1662 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1663 GP_VECTOR;
1664
1665
1666 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1667 if (transfer == X86_TRANSFER_CALL_JMP)
1668 return X86EMUL_UNHANDLEABLE;
1669 goto exception;
1670 }
1671
1672 if (!seg_desc.p) {
1673 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1674 goto exception;
1675 }
1676
1677 dpl = seg_desc.dpl;
1678
1679 switch (seg) {
1680 case VCPU_SREG_SS:
1681
1682
1683
1684
1685 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1686 goto exception;
1687 break;
1688 case VCPU_SREG_CS:
1689 if (!(seg_desc.type & 8))
1690 goto exception;
1691
1692 if (seg_desc.type & 4) {
1693
1694 if (dpl > cpl)
1695 goto exception;
1696 } else {
1697
1698 if (rpl > cpl || dpl != cpl)
1699 goto exception;
1700 }
1701
1702 if (seg_desc.d && seg_desc.l) {
1703 u64 efer = 0;
1704
1705 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1706 if (efer & EFER_LMA)
1707 goto exception;
1708 }
1709
1710
1711 selector = (selector & 0xfffc) | cpl;
1712 break;
1713 case VCPU_SREG_TR:
1714 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1715 goto exception;
1716 old_desc = seg_desc;
1717 seg_desc.type |= 2;
1718 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1719 sizeof(seg_desc), &ctxt->exception);
1720 if (ret != X86EMUL_CONTINUE)
1721 return ret;
1722 break;
1723 case VCPU_SREG_LDTR:
1724 if (seg_desc.s || seg_desc.type != 2)
1725 goto exception;
1726 break;
1727 default:
1728
1729
1730
1731
1732
1733 if ((seg_desc.type & 0xa) == 0x8 ||
1734 (((seg_desc.type & 0xc) != 0xc) &&
1735 (rpl > dpl && cpl > dpl)))
1736 goto exception;
1737 break;
1738 }
1739
1740 if (seg_desc.s) {
1741
1742 if (!(seg_desc.type & 1)) {
1743 seg_desc.type |= 1;
1744 ret = write_segment_descriptor(ctxt, selector,
1745 &seg_desc);
1746 if (ret != X86EMUL_CONTINUE)
1747 return ret;
1748 }
1749 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1750 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1751 if (ret != X86EMUL_CONTINUE)
1752 return ret;
1753 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1754 ((u64)base3 << 32), ctxt))
1755 return emulate_gp(ctxt, 0);
1756 }
1757load:
1758 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1759 if (desc)
1760 *desc = seg_desc;
1761 return X86EMUL_CONTINUE;
1762exception:
1763 return emulate_exception(ctxt, err_vec, err_code, true);
1764}
1765
1766static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1767 u16 selector, int seg)
1768{
1769 u8 cpl = ctxt->ops->cpl(ctxt);
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781 if (seg == VCPU_SREG_SS && selector == 3 &&
1782 ctxt->mode == X86EMUL_MODE_PROT64)
1783 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1784
1785 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1786 X86_TRANSFER_NONE, NULL);
1787}
1788
1789static void write_register_operand(struct operand *op)
1790{
1791 return assign_register(op->addr.reg, op->val, op->bytes);
1792}
1793
1794static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1795{
1796 switch (op->type) {
1797 case OP_REG:
1798 write_register_operand(op);
1799 break;
1800 case OP_MEM:
1801 if (ctxt->lock_prefix)
1802 return segmented_cmpxchg(ctxt,
1803 op->addr.mem,
1804 &op->orig_val,
1805 &op->val,
1806 op->bytes);
1807 else
1808 return segmented_write(ctxt,
1809 op->addr.mem,
1810 &op->val,
1811 op->bytes);
1812 break;
1813 case OP_MEM_STR:
1814 return segmented_write(ctxt,
1815 op->addr.mem,
1816 op->data,
1817 op->bytes * op->count);
1818 break;
1819 case OP_XMM:
1820 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1821 break;
1822 case OP_MM:
1823 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1824 break;
1825 case OP_NONE:
1826
1827 break;
1828 default:
1829 break;
1830 }
1831 return X86EMUL_CONTINUE;
1832}
1833
1834static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1835{
1836 struct segmented_address addr;
1837
1838 rsp_increment(ctxt, -bytes);
1839 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1840 addr.seg = VCPU_SREG_SS;
1841
1842 return segmented_write(ctxt, addr, data, bytes);
1843}
1844
1845static int em_push(struct x86_emulate_ctxt *ctxt)
1846{
1847
1848 ctxt->dst.type = OP_NONE;
1849 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1850}
1851
1852static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1853 void *dest, int len)
1854{
1855 int rc;
1856 struct segmented_address addr;
1857
1858 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1859 addr.seg = VCPU_SREG_SS;
1860 rc = segmented_read(ctxt, addr, dest, len);
1861 if (rc != X86EMUL_CONTINUE)
1862 return rc;
1863
1864 rsp_increment(ctxt, len);
1865 return rc;
1866}
1867
1868static int em_pop(struct x86_emulate_ctxt *ctxt)
1869{
1870 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1871}
1872
1873static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1874 void *dest, int len)
1875{
1876 int rc;
1877 unsigned long val, change_mask;
1878 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1879 int cpl = ctxt->ops->cpl(ctxt);
1880
1881 rc = emulate_pop(ctxt, &val, len);
1882 if (rc != X86EMUL_CONTINUE)
1883 return rc;
1884
1885 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1886 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1887 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1888 X86_EFLAGS_AC | X86_EFLAGS_ID;
1889
1890 switch(ctxt->mode) {
1891 case X86EMUL_MODE_PROT64:
1892 case X86EMUL_MODE_PROT32:
1893 case X86EMUL_MODE_PROT16:
1894 if (cpl == 0)
1895 change_mask |= X86_EFLAGS_IOPL;
1896 if (cpl <= iopl)
1897 change_mask |= X86_EFLAGS_IF;
1898 break;
1899 case X86EMUL_MODE_VM86:
1900 if (iopl < 3)
1901 return emulate_gp(ctxt, 0);
1902 change_mask |= X86_EFLAGS_IF;
1903 break;
1904 default:
1905 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1906 break;
1907 }
1908
1909 *(unsigned long *)dest =
1910 (ctxt->eflags & ~change_mask) | (val & change_mask);
1911
1912 return rc;
1913}
1914
1915static int em_popf(struct x86_emulate_ctxt *ctxt)
1916{
1917 ctxt->dst.type = OP_REG;
1918 ctxt->dst.addr.reg = &ctxt->eflags;
1919 ctxt->dst.bytes = ctxt->op_bytes;
1920 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1921}
1922
1923static int em_enter(struct x86_emulate_ctxt *ctxt)
1924{
1925 int rc;
1926 unsigned frame_size = ctxt->src.val;
1927 unsigned nesting_level = ctxt->src2.val & 31;
1928 ulong rbp;
1929
1930 if (nesting_level)
1931 return X86EMUL_UNHANDLEABLE;
1932
1933 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1934 rc = push(ctxt, &rbp, stack_size(ctxt));
1935 if (rc != X86EMUL_CONTINUE)
1936 return rc;
1937 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1938 stack_mask(ctxt));
1939 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1940 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1941 stack_mask(ctxt));
1942 return X86EMUL_CONTINUE;
1943}
1944
1945static int em_leave(struct x86_emulate_ctxt *ctxt)
1946{
1947 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1948 stack_mask(ctxt));
1949 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1950}
1951
1952static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1953{
1954 int seg = ctxt->src2.val;
1955
1956 ctxt->src.val = get_segment_selector(ctxt, seg);
1957 if (ctxt->op_bytes == 4) {
1958 rsp_increment(ctxt, -2);
1959 ctxt->op_bytes = 2;
1960 }
1961
1962 return em_push(ctxt);
1963}
1964
1965static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1966{
1967 int seg = ctxt->src2.val;
1968 unsigned long selector;
1969 int rc;
1970
1971 rc = emulate_pop(ctxt, &selector, 2);
1972 if (rc != X86EMUL_CONTINUE)
1973 return rc;
1974
1975 if (ctxt->modrm_reg == VCPU_SREG_SS)
1976 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1977 if (ctxt->op_bytes > 2)
1978 rsp_increment(ctxt, ctxt->op_bytes - 2);
1979
1980 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1981 return rc;
1982}
1983
1984static int em_pusha(struct x86_emulate_ctxt *ctxt)
1985{
1986 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1987 int rc = X86EMUL_CONTINUE;
1988 int reg = VCPU_REGS_RAX;
1989
1990 while (reg <= VCPU_REGS_RDI) {
1991 (reg == VCPU_REGS_RSP) ?
1992 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1993
1994 rc = em_push(ctxt);
1995 if (rc != X86EMUL_CONTINUE)
1996 return rc;
1997
1998 ++reg;
1999 }
2000
2001 return rc;
2002}
2003
2004static int em_pushf(struct x86_emulate_ctxt *ctxt)
2005{
2006 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2007 return em_push(ctxt);
2008}
2009
2010static int em_popa(struct x86_emulate_ctxt *ctxt)
2011{
2012 int rc = X86EMUL_CONTINUE;
2013 int reg = VCPU_REGS_RDI;
2014 u32 val;
2015
2016 while (reg >= VCPU_REGS_RAX) {
2017 if (reg == VCPU_REGS_RSP) {
2018 rsp_increment(ctxt, ctxt->op_bytes);
2019 --reg;
2020 }
2021
2022 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2023 if (rc != X86EMUL_CONTINUE)
2024 break;
2025 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2026 --reg;
2027 }
2028 return rc;
2029}
2030
2031static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2032{
2033 const struct x86_emulate_ops *ops = ctxt->ops;
2034 int rc;
2035 struct desc_ptr dt;
2036 gva_t cs_addr;
2037 gva_t eip_addr;
2038 u16 cs, eip;
2039
2040
2041 ctxt->src.val = ctxt->eflags;
2042 rc = em_push(ctxt);
2043 if (rc != X86EMUL_CONTINUE)
2044 return rc;
2045
2046 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2047
2048 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2049 rc = em_push(ctxt);
2050 if (rc != X86EMUL_CONTINUE)
2051 return rc;
2052
2053 ctxt->src.val = ctxt->_eip;
2054 rc = em_push(ctxt);
2055 if (rc != X86EMUL_CONTINUE)
2056 return rc;
2057
2058 ops->get_idt(ctxt, &dt);
2059
2060 eip_addr = dt.address + (irq << 2);
2061 cs_addr = dt.address + (irq << 2) + 2;
2062
2063 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2064 if (rc != X86EMUL_CONTINUE)
2065 return rc;
2066
2067 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2068 if (rc != X86EMUL_CONTINUE)
2069 return rc;
2070
2071 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2072 if (rc != X86EMUL_CONTINUE)
2073 return rc;
2074
2075 ctxt->_eip = eip;
2076
2077 return rc;
2078}
2079
2080int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2081{
2082 int rc;
2083
2084 invalidate_registers(ctxt);
2085 rc = __emulate_int_real(ctxt, irq);
2086 if (rc == X86EMUL_CONTINUE)
2087 writeback_registers(ctxt);
2088 return rc;
2089}
2090
2091static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2092{
2093 switch(ctxt->mode) {
2094 case X86EMUL_MODE_REAL:
2095 return __emulate_int_real(ctxt, irq);
2096 case X86EMUL_MODE_VM86:
2097 case X86EMUL_MODE_PROT16:
2098 case X86EMUL_MODE_PROT32:
2099 case X86EMUL_MODE_PROT64:
2100 default:
2101
2102 return X86EMUL_UNHANDLEABLE;
2103 }
2104}
2105
2106static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2107{
2108 int rc = X86EMUL_CONTINUE;
2109 unsigned long temp_eip = 0;
2110 unsigned long temp_eflags = 0;
2111 unsigned long cs = 0;
2112 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2113 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2114 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2115 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2116 X86_EFLAGS_AC | X86_EFLAGS_ID |
2117 X86_EFLAGS_FIXED;
2118 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2119 X86_EFLAGS_VIP;
2120
2121
2122
2123 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2124
2125 if (rc != X86EMUL_CONTINUE)
2126 return rc;
2127
2128 if (temp_eip & ~0xffff)
2129 return emulate_gp(ctxt, 0);
2130
2131 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2132
2133 if (rc != X86EMUL_CONTINUE)
2134 return rc;
2135
2136 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2137
2138 if (rc != X86EMUL_CONTINUE)
2139 return rc;
2140
2141 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2142
2143 if (rc != X86EMUL_CONTINUE)
2144 return rc;
2145
2146 ctxt->_eip = temp_eip;
2147
2148 if (ctxt->op_bytes == 4)
2149 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2150 else if (ctxt->op_bytes == 2) {
2151 ctxt->eflags &= ~0xffff;
2152 ctxt->eflags |= temp_eflags;
2153 }
2154
2155 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK;
2156 ctxt->eflags |= X86_EFLAGS_FIXED;
2157 ctxt->ops->set_nmi_mask(ctxt, false);
2158
2159 return rc;
2160}
2161
2162static int em_iret(struct x86_emulate_ctxt *ctxt)
2163{
2164 switch(ctxt->mode) {
2165 case X86EMUL_MODE_REAL:
2166 return emulate_iret_real(ctxt);
2167 case X86EMUL_MODE_VM86:
2168 case X86EMUL_MODE_PROT16:
2169 case X86EMUL_MODE_PROT32:
2170 case X86EMUL_MODE_PROT64:
2171 default:
2172
2173 return X86EMUL_UNHANDLEABLE;
2174 }
2175}
2176
2177static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2178{
2179 int rc;
2180 unsigned short sel;
2181 struct desc_struct new_desc;
2182 u8 cpl = ctxt->ops->cpl(ctxt);
2183
2184 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2185
2186 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2187 X86_TRANSFER_CALL_JMP,
2188 &new_desc);
2189 if (rc != X86EMUL_CONTINUE)
2190 return rc;
2191
2192 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2193
2194 if (rc != X86EMUL_CONTINUE)
2195 return X86EMUL_UNHANDLEABLE;
2196
2197 return rc;
2198}
2199
2200static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2201{
2202 return assign_eip_near(ctxt, ctxt->src.val);
2203}
2204
2205static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2206{
2207 int rc;
2208 long int old_eip;
2209
2210 old_eip = ctxt->_eip;
2211 rc = assign_eip_near(ctxt, ctxt->src.val);
2212 if (rc != X86EMUL_CONTINUE)
2213 return rc;
2214 ctxt->src.val = old_eip;
2215 rc = em_push(ctxt);
2216 return rc;
2217}
2218
2219static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2220{
2221 u64 old = ctxt->dst.orig_val64;
2222
2223 if (ctxt->dst.bytes == 16)
2224 return X86EMUL_UNHANDLEABLE;
2225
2226 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2227 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2228 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2229 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2230 ctxt->eflags &= ~X86_EFLAGS_ZF;
2231 } else {
2232 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2233 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2234
2235 ctxt->eflags |= X86_EFLAGS_ZF;
2236 }
2237 return X86EMUL_CONTINUE;
2238}
2239
2240static int em_ret(struct x86_emulate_ctxt *ctxt)
2241{
2242 int rc;
2243 unsigned long eip;
2244
2245 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2246 if (rc != X86EMUL_CONTINUE)
2247 return rc;
2248
2249 return assign_eip_near(ctxt, eip);
2250}
2251
2252static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2253{
2254 int rc;
2255 unsigned long eip, cs;
2256 int cpl = ctxt->ops->cpl(ctxt);
2257 struct desc_struct new_desc;
2258
2259 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2260 if (rc != X86EMUL_CONTINUE)
2261 return rc;
2262 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2263 if (rc != X86EMUL_CONTINUE)
2264 return rc;
2265
2266 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2267 return X86EMUL_UNHANDLEABLE;
2268 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2269 X86_TRANSFER_RET,
2270 &new_desc);
2271 if (rc != X86EMUL_CONTINUE)
2272 return rc;
2273 rc = assign_eip_far(ctxt, eip, &new_desc);
2274
2275 if (rc != X86EMUL_CONTINUE)
2276 return X86EMUL_UNHANDLEABLE;
2277
2278 return rc;
2279}
2280
2281static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2282{
2283 int rc;
2284
2285 rc = em_ret_far(ctxt);
2286 if (rc != X86EMUL_CONTINUE)
2287 return rc;
2288 rsp_increment(ctxt, ctxt->src.val);
2289 return X86EMUL_CONTINUE;
2290}
2291
2292static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2293{
2294
2295 ctxt->dst.orig_val = ctxt->dst.val;
2296 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2297 ctxt->src.orig_val = ctxt->src.val;
2298 ctxt->src.val = ctxt->dst.orig_val;
2299 fastop(ctxt, em_cmp);
2300
2301 if (ctxt->eflags & X86_EFLAGS_ZF) {
2302
2303 ctxt->src.type = OP_NONE;
2304 ctxt->dst.val = ctxt->src.orig_val;
2305 } else {
2306
2307 ctxt->src.type = OP_REG;
2308 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2309 ctxt->src.val = ctxt->dst.orig_val;
2310
2311 ctxt->dst.val = ctxt->dst.orig_val;
2312 }
2313 return X86EMUL_CONTINUE;
2314}
2315
2316static int em_lseg(struct x86_emulate_ctxt *ctxt)
2317{
2318 int seg = ctxt->src2.val;
2319 unsigned short sel;
2320 int rc;
2321
2322 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2323
2324 rc = load_segment_descriptor(ctxt, sel, seg);
2325 if (rc != X86EMUL_CONTINUE)
2326 return rc;
2327
2328 ctxt->dst.val = ctxt->src.val;
2329 return rc;
2330}
2331
2332static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2333{
2334#ifdef CONFIG_X86_64
2335 u32 eax, ebx, ecx, edx;
2336
2337 eax = 0x80000001;
2338 ecx = 0;
2339 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2340 return edx & bit(X86_FEATURE_LM);
2341#else
2342 return false;
2343#endif
2344}
2345
2346static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2347{
2348 desc->g = (flags >> 23) & 1;
2349 desc->d = (flags >> 22) & 1;
2350 desc->l = (flags >> 21) & 1;
2351 desc->avl = (flags >> 20) & 1;
2352 desc->p = (flags >> 15) & 1;
2353 desc->dpl = (flags >> 13) & 3;
2354 desc->s = (flags >> 12) & 1;
2355 desc->type = (flags >> 8) & 15;
2356}
2357
2358static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2359 int n)
2360{
2361 struct desc_struct desc;
2362 int offset;
2363 u16 selector;
2364
2365 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2366
2367 if (n < 3)
2368 offset = 0x7f84 + n * 12;
2369 else
2370 offset = 0x7f2c + (n - 3) * 12;
2371
2372 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2373 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2374 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2375 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2376 return X86EMUL_CONTINUE;
2377}
2378
2379#ifdef CONFIG_X86_64
2380static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2381 int n)
2382{
2383 struct desc_struct desc;
2384 int offset;
2385 u16 selector;
2386 u32 base3;
2387
2388 offset = 0x7e00 + n * 16;
2389
2390 selector = GET_SMSTATE(u16, smstate, offset);
2391 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2392 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2393 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2394 base3 = GET_SMSTATE(u32, smstate, offset + 12);
2395
2396 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2397 return X86EMUL_CONTINUE;
2398}
2399#endif
2400
2401static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2402 u64 cr0, u64 cr3, u64 cr4)
2403{
2404 int bad;
2405 u64 pcid;
2406
2407
2408 pcid = 0;
2409 if (cr4 & X86_CR4_PCIDE) {
2410 pcid = cr3 & 0xfff;
2411 cr3 &= ~0xfff;
2412 }
2413
2414 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2415 if (bad)
2416 return X86EMUL_UNHANDLEABLE;
2417
2418
2419
2420
2421
2422
2423 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2424 if (bad)
2425 return X86EMUL_UNHANDLEABLE;
2426
2427 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2428 if (bad)
2429 return X86EMUL_UNHANDLEABLE;
2430
2431 if (cr4 & X86_CR4_PCIDE) {
2432 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2433 if (bad)
2434 return X86EMUL_UNHANDLEABLE;
2435 if (pcid) {
2436 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2437 if (bad)
2438 return X86EMUL_UNHANDLEABLE;
2439 }
2440
2441 }
2442
2443 return X86EMUL_CONTINUE;
2444}
2445
2446static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2447 const char *smstate)
2448{
2449 struct desc_struct desc;
2450 struct desc_ptr dt;
2451 u16 selector;
2452 u32 val, cr0, cr3, cr4;
2453 int i;
2454
2455 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
2456 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
2457 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2458 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
2459
2460 for (i = 0; i < 8; i++)
2461 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2462
2463 val = GET_SMSTATE(u32, smstate, 0x7fcc);
2464 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2465 val = GET_SMSTATE(u32, smstate, 0x7fc8);
2466 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2467
2468 selector = GET_SMSTATE(u32, smstate, 0x7fc4);
2469 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
2470 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
2471 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
2472 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2473
2474 selector = GET_SMSTATE(u32, smstate, 0x7fc0);
2475 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
2476 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
2477 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
2478 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2479
2480 dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
2481 dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
2482 ctxt->ops->set_gdt(ctxt, &dt);
2483
2484 dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
2485 dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
2486 ctxt->ops->set_idt(ctxt, &dt);
2487
2488 for (i = 0; i < 6; i++) {
2489 int r = rsm_load_seg_32(ctxt, smstate, i);
2490 if (r != X86EMUL_CONTINUE)
2491 return r;
2492 }
2493
2494 cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2495
2496 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2497
2498 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2499}
2500
2501#ifdef CONFIG_X86_64
2502static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2503 const char *smstate)
2504{
2505 struct desc_struct desc;
2506 struct desc_ptr dt;
2507 u64 val, cr0, cr3, cr4;
2508 u32 base3;
2509 u16 selector;
2510 int i, r;
2511
2512 for (i = 0; i < 16; i++)
2513 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2514
2515 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
2516 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2517
2518 val = GET_SMSTATE(u32, smstate, 0x7f68);
2519 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2520 val = GET_SMSTATE(u32, smstate, 0x7f60);
2521 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2522
2523 cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
2524 cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
2525 cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
2526 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2527 val = GET_SMSTATE(u64, smstate, 0x7ed0);
2528 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2529
2530 selector = GET_SMSTATE(u32, smstate, 0x7e90);
2531 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2532 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
2533 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
2534 base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
2535 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2536
2537 dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
2538 dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
2539 ctxt->ops->set_idt(ctxt, &dt);
2540
2541 selector = GET_SMSTATE(u32, smstate, 0x7e70);
2542 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2543 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
2544 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
2545 base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
2546 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2547
2548 dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
2549 dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
2550 ctxt->ops->set_gdt(ctxt, &dt);
2551
2552 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2553 if (r != X86EMUL_CONTINUE)
2554 return r;
2555
2556 for (i = 0; i < 6; i++) {
2557 r = rsm_load_seg_64(ctxt, smstate, i);
2558 if (r != X86EMUL_CONTINUE)
2559 return r;
2560 }
2561
2562 return X86EMUL_CONTINUE;
2563}
2564#endif
2565
2566static int em_rsm(struct x86_emulate_ctxt *ctxt)
2567{
2568 unsigned long cr0, cr4, efer;
2569 char buf[512];
2570 u64 smbase;
2571 int ret;
2572
2573 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2574 return emulate_ud(ctxt);
2575
2576 smbase = ctxt->ops->get_smbase(ctxt);
2577
2578 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2579 if (ret != X86EMUL_CONTINUE)
2580 return X86EMUL_UNHANDLEABLE;
2581
2582 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2583 ctxt->ops->set_nmi_mask(ctxt, false);
2584
2585 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2586 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2587
2588
2589
2590
2591
2592
2593 if (emulator_has_longmode(ctxt)) {
2594 struct desc_struct cs_desc;
2595
2596
2597 cr4 = ctxt->ops->get_cr(ctxt, 4);
2598 if (cr4 & X86_CR4_PCIDE)
2599 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2600
2601
2602 memset(&cs_desc, 0, sizeof(cs_desc));
2603 cs_desc.type = 0xb;
2604 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2605 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2606 }
2607
2608
2609 cr0 = ctxt->ops->get_cr(ctxt, 0);
2610 if (cr0 & X86_CR0_PE)
2611 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2612
2613 if (emulator_has_longmode(ctxt)) {
2614
2615 cr4 = ctxt->ops->get_cr(ctxt, 4);
2616 if (cr4 & X86_CR4_PAE)
2617 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2618
2619
2620 efer = 0;
2621 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2622 }
2623
2624
2625
2626
2627
2628
2629 if (ctxt->ops->pre_leave_smm(ctxt, buf))
2630 return X86EMUL_UNHANDLEABLE;
2631
2632#ifdef CONFIG_X86_64
2633 if (emulator_has_longmode(ctxt))
2634 ret = rsm_load_state_64(ctxt, buf);
2635 else
2636#endif
2637 ret = rsm_load_state_32(ctxt, buf);
2638
2639 if (ret != X86EMUL_CONTINUE) {
2640
2641 return X86EMUL_UNHANDLEABLE;
2642 }
2643
2644 ctxt->ops->post_leave_smm(ctxt);
2645
2646 return X86EMUL_CONTINUE;
2647}
2648
2649static void
2650setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2651 struct desc_struct *cs, struct desc_struct *ss)
2652{
2653 cs->l = 0;
2654 set_desc_base(cs, 0);
2655 cs->g = 1;
2656 set_desc_limit(cs, 0xfffff);
2657 cs->type = 0x0b;
2658 cs->s = 1;
2659 cs->dpl = 0;
2660 cs->p = 1;
2661 cs->d = 1;
2662 cs->avl = 0;
2663
2664 set_desc_base(ss, 0);
2665 set_desc_limit(ss, 0xfffff);
2666 ss->g = 1;
2667 ss->s = 1;
2668 ss->type = 0x03;
2669 ss->d = 1;
2670 ss->dpl = 0;
2671 ss->p = 1;
2672 ss->l = 0;
2673 ss->avl = 0;
2674}
2675
2676static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2677{
2678 u32 eax, ebx, ecx, edx;
2679
2680 eax = ecx = 0;
2681 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2682 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2683 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2684 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2685}
2686
2687static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2688{
2689 const struct x86_emulate_ops *ops = ctxt->ops;
2690 u32 eax, ebx, ecx, edx;
2691
2692
2693
2694
2695
2696 if (ctxt->mode == X86EMUL_MODE_PROT64)
2697 return true;
2698
2699 eax = 0x00000000;
2700 ecx = 0x00000000;
2701 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2702
2703
2704
2705
2706
2707
2708
2709
2710 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2711 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2712 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2713 return false;
2714
2715
2716 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2717 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2718 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2719 return true;
2720
2721
2722 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2723 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2724 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2725 return true;
2726
2727
2728 if (ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx &&
2729 ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx &&
2730 edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx)
2731 return true;
2732
2733
2734
2735
2736
2737 return false;
2738}
2739
2740static int em_syscall(struct x86_emulate_ctxt *ctxt)
2741{
2742 const struct x86_emulate_ops *ops = ctxt->ops;
2743 struct desc_struct cs, ss;
2744 u64 msr_data;
2745 u16 cs_sel, ss_sel;
2746 u64 efer = 0;
2747
2748
2749 if (ctxt->mode == X86EMUL_MODE_REAL ||
2750 ctxt->mode == X86EMUL_MODE_VM86)
2751 return emulate_ud(ctxt);
2752
2753 if (!(em_syscall_is_enabled(ctxt)))
2754 return emulate_ud(ctxt);
2755
2756 ops->get_msr(ctxt, MSR_EFER, &efer);
2757 setup_syscalls_segments(ctxt, &cs, &ss);
2758
2759 if (!(efer & EFER_SCE))
2760 return emulate_ud(ctxt);
2761
2762 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2763 msr_data >>= 32;
2764 cs_sel = (u16)(msr_data & 0xfffc);
2765 ss_sel = (u16)(msr_data + 8);
2766
2767 if (efer & EFER_LMA) {
2768 cs.d = 0;
2769 cs.l = 1;
2770 }
2771 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2772 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2773
2774 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2775 if (efer & EFER_LMA) {
2776#ifdef CONFIG_X86_64
2777 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2778
2779 ops->get_msr(ctxt,
2780 ctxt->mode == X86EMUL_MODE_PROT64 ?
2781 MSR_LSTAR : MSR_CSTAR, &msr_data);
2782 ctxt->_eip = msr_data;
2783
2784 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2785 ctxt->eflags &= ~msr_data;
2786 ctxt->eflags |= X86_EFLAGS_FIXED;
2787#endif
2788 } else {
2789
2790 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2791 ctxt->_eip = (u32)msr_data;
2792
2793 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2794 }
2795
2796 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2797 return X86EMUL_CONTINUE;
2798}
2799
2800static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2801{
2802 const struct x86_emulate_ops *ops = ctxt->ops;
2803 struct desc_struct cs, ss;
2804 u64 msr_data;
2805 u16 cs_sel, ss_sel;
2806 u64 efer = 0;
2807
2808 ops->get_msr(ctxt, MSR_EFER, &efer);
2809
2810 if (ctxt->mode == X86EMUL_MODE_REAL)
2811 return emulate_gp(ctxt, 0);
2812
2813
2814
2815
2816
2817 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2818 && !vendor_intel(ctxt))
2819 return emulate_ud(ctxt);
2820
2821
2822 if (ctxt->mode == X86EMUL_MODE_PROT64)
2823 return X86EMUL_UNHANDLEABLE;
2824
2825 setup_syscalls_segments(ctxt, &cs, &ss);
2826
2827 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2828 if ((msr_data & 0xfffc) == 0x0)
2829 return emulate_gp(ctxt, 0);
2830
2831 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2832 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2833 ss_sel = cs_sel + 8;
2834 if (efer & EFER_LMA) {
2835 cs.d = 0;
2836 cs.l = 1;
2837 }
2838
2839 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2840 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2841
2842 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2843 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2844
2845 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2846 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2847 (u32)msr_data;
2848
2849 return X86EMUL_CONTINUE;
2850}
2851
2852static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2853{
2854 const struct x86_emulate_ops *ops = ctxt->ops;
2855 struct desc_struct cs, ss;
2856 u64 msr_data, rcx, rdx;
2857 int usermode;
2858 u16 cs_sel = 0, ss_sel = 0;
2859
2860
2861 if (ctxt->mode == X86EMUL_MODE_REAL ||
2862 ctxt->mode == X86EMUL_MODE_VM86)
2863 return emulate_gp(ctxt, 0);
2864
2865 setup_syscalls_segments(ctxt, &cs, &ss);
2866
2867 if ((ctxt->rex_prefix & 0x8) != 0x0)
2868 usermode = X86EMUL_MODE_PROT64;
2869 else
2870 usermode = X86EMUL_MODE_PROT32;
2871
2872 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2873 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2874
2875 cs.dpl = 3;
2876 ss.dpl = 3;
2877 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2878 switch (usermode) {
2879 case X86EMUL_MODE_PROT32:
2880 cs_sel = (u16)(msr_data + 16);
2881 if ((msr_data & 0xfffc) == 0x0)
2882 return emulate_gp(ctxt, 0);
2883 ss_sel = (u16)(msr_data + 24);
2884 rcx = (u32)rcx;
2885 rdx = (u32)rdx;
2886 break;
2887 case X86EMUL_MODE_PROT64:
2888 cs_sel = (u16)(msr_data + 32);
2889 if (msr_data == 0x0)
2890 return emulate_gp(ctxt, 0);
2891 ss_sel = cs_sel + 8;
2892 cs.d = 0;
2893 cs.l = 1;
2894 if (emul_is_noncanonical_address(rcx, ctxt) ||
2895 emul_is_noncanonical_address(rdx, ctxt))
2896 return emulate_gp(ctxt, 0);
2897 break;
2898 }
2899 cs_sel |= SEGMENT_RPL_MASK;
2900 ss_sel |= SEGMENT_RPL_MASK;
2901
2902 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2903 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2904
2905 ctxt->_eip = rdx;
2906 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2907
2908 return X86EMUL_CONTINUE;
2909}
2910
2911static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2912{
2913 int iopl;
2914 if (ctxt->mode == X86EMUL_MODE_REAL)
2915 return false;
2916 if (ctxt->mode == X86EMUL_MODE_VM86)
2917 return true;
2918 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2919 return ctxt->ops->cpl(ctxt) > iopl;
2920}
2921
2922#define VMWARE_PORT_VMPORT (0x5658)
2923#define VMWARE_PORT_VMRPC (0x5659)
2924
2925static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2926 u16 port, u16 len)
2927{
2928 const struct x86_emulate_ops *ops = ctxt->ops;
2929 struct desc_struct tr_seg;
2930 u32 base3;
2931 int r;
2932 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2933 unsigned mask = (1 << len) - 1;
2934 unsigned long base;
2935
2936
2937
2938
2939
2940 if (enable_vmware_backdoor &&
2941 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2942 return true;
2943
2944 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2945 if (!tr_seg.p)
2946 return false;
2947 if (desc_limit_scaled(&tr_seg) < 103)
2948 return false;
2949 base = get_desc_base(&tr_seg);
2950#ifdef CONFIG_X86_64
2951 base |= ((u64)base3) << 32;
2952#endif
2953 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2954 if (r != X86EMUL_CONTINUE)
2955 return false;
2956 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2957 return false;
2958 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2959 if (r != X86EMUL_CONTINUE)
2960 return false;
2961 if ((perm >> bit_idx) & mask)
2962 return false;
2963 return true;
2964}
2965
2966static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2967 u16 port, u16 len)
2968{
2969 if (ctxt->perm_ok)
2970 return true;
2971
2972 if (emulator_bad_iopl(ctxt))
2973 if (!emulator_io_port_access_allowed(ctxt, port, len))
2974 return false;
2975
2976 ctxt->perm_ok = true;
2977
2978 return true;
2979}
2980
2981static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2982{
2983
2984
2985
2986
2987#ifdef CONFIG_X86_64
2988 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2989 return;
2990
2991 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2992
2993 switch (ctxt->b) {
2994 case 0xa4:
2995 case 0xa5:
2996 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2997
2998 case 0xaa:
2999 case 0xab:
3000 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
3001 }
3002#endif
3003}
3004
3005static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
3006 struct tss_segment_16 *tss)
3007{
3008 tss->ip = ctxt->_eip;
3009 tss->flag = ctxt->eflags;
3010 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
3011 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
3012 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
3013 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
3014 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
3015 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
3016 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
3017 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
3018
3019 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3020 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3021 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3022 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3023 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
3024}
3025
3026static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
3027 struct tss_segment_16 *tss)
3028{
3029 int ret;
3030 u8 cpl;
3031
3032 ctxt->_eip = tss->ip;
3033 ctxt->eflags = tss->flag | 2;
3034 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
3035 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
3036 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
3037 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
3038 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
3039 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
3040 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
3041 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3042
3043
3044
3045
3046
3047 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3048 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3049 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3050 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3051 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3052
3053 cpl = tss->cs & 3;
3054
3055
3056
3057
3058
3059 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3060 X86_TRANSFER_TASK_SWITCH, NULL);
3061 if (ret != X86EMUL_CONTINUE)
3062 return ret;
3063 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3064 X86_TRANSFER_TASK_SWITCH, NULL);
3065 if (ret != X86EMUL_CONTINUE)
3066 return ret;
3067 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3068 X86_TRANSFER_TASK_SWITCH, NULL);
3069 if (ret != X86EMUL_CONTINUE)
3070 return ret;
3071 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3072 X86_TRANSFER_TASK_SWITCH, NULL);
3073 if (ret != X86EMUL_CONTINUE)
3074 return ret;
3075 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3076 X86_TRANSFER_TASK_SWITCH, NULL);
3077 if (ret != X86EMUL_CONTINUE)
3078 return ret;
3079
3080 return X86EMUL_CONTINUE;
3081}
3082
3083static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3084 u16 tss_selector, u16 old_tss_sel,
3085 ulong old_tss_base, struct desc_struct *new_desc)
3086{
3087 struct tss_segment_16 tss_seg;
3088 int ret;
3089 u32 new_tss_base = get_desc_base(new_desc);
3090
3091 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3092 if (ret != X86EMUL_CONTINUE)
3093 return ret;
3094
3095 save_state_to_tss16(ctxt, &tss_seg);
3096
3097 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3098 if (ret != X86EMUL_CONTINUE)
3099 return ret;
3100
3101 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3102 if (ret != X86EMUL_CONTINUE)
3103 return ret;
3104
3105 if (old_tss_sel != 0xffff) {
3106 tss_seg.prev_task_link = old_tss_sel;
3107
3108 ret = linear_write_system(ctxt, new_tss_base,
3109 &tss_seg.prev_task_link,
3110 sizeof(tss_seg.prev_task_link));
3111 if (ret != X86EMUL_CONTINUE)
3112 return ret;
3113 }
3114
3115 return load_state_from_tss16(ctxt, &tss_seg);
3116}
3117
3118static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3119 struct tss_segment_32 *tss)
3120{
3121
3122 tss->eip = ctxt->_eip;
3123 tss->eflags = ctxt->eflags;
3124 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3125 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3126 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3127 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3128 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3129 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3130 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3131 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3132
3133 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3134 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3135 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3136 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3137 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3138 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3139}
3140
3141static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3142 struct tss_segment_32 *tss)
3143{
3144 int ret;
3145 u8 cpl;
3146
3147 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3148 return emulate_gp(ctxt, 0);
3149 ctxt->_eip = tss->eip;
3150 ctxt->eflags = tss->eflags | 2;
3151
3152
3153 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3154 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3155 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3156 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3157 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3158 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3159 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3160 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3161
3162
3163
3164
3165
3166
3167 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3168 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3169 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3170 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3171 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3172 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3173 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3174
3175
3176
3177
3178
3179
3180 if (ctxt->eflags & X86_EFLAGS_VM) {
3181 ctxt->mode = X86EMUL_MODE_VM86;
3182 cpl = 3;
3183 } else {
3184 ctxt->mode = X86EMUL_MODE_PROT32;
3185 cpl = tss->cs & 3;
3186 }
3187
3188
3189
3190
3191
3192 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3193 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3194 if (ret != X86EMUL_CONTINUE)
3195 return ret;
3196 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3197 X86_TRANSFER_TASK_SWITCH, NULL);
3198 if (ret != X86EMUL_CONTINUE)
3199 return ret;
3200 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3201 X86_TRANSFER_TASK_SWITCH, NULL);
3202 if (ret != X86EMUL_CONTINUE)
3203 return ret;
3204 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3205 X86_TRANSFER_TASK_SWITCH, NULL);
3206 if (ret != X86EMUL_CONTINUE)
3207 return ret;
3208 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3209 X86_TRANSFER_TASK_SWITCH, NULL);
3210 if (ret != X86EMUL_CONTINUE)
3211 return ret;
3212 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3213 X86_TRANSFER_TASK_SWITCH, NULL);
3214 if (ret != X86EMUL_CONTINUE)
3215 return ret;
3216 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3217 X86_TRANSFER_TASK_SWITCH, NULL);
3218
3219 return ret;
3220}
3221
3222static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3223 u16 tss_selector, u16 old_tss_sel,
3224 ulong old_tss_base, struct desc_struct *new_desc)
3225{
3226 struct tss_segment_32 tss_seg;
3227 int ret;
3228 u32 new_tss_base = get_desc_base(new_desc);
3229 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3230 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3231
3232 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3233 if (ret != X86EMUL_CONTINUE)
3234 return ret;
3235
3236 save_state_to_tss32(ctxt, &tss_seg);
3237
3238
3239 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3240 ldt_sel_offset - eip_offset);
3241 if (ret != X86EMUL_CONTINUE)
3242 return ret;
3243
3244 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3245 if (ret != X86EMUL_CONTINUE)
3246 return ret;
3247
3248 if (old_tss_sel != 0xffff) {
3249 tss_seg.prev_task_link = old_tss_sel;
3250
3251 ret = linear_write_system(ctxt, new_tss_base,
3252 &tss_seg.prev_task_link,
3253 sizeof(tss_seg.prev_task_link));
3254 if (ret != X86EMUL_CONTINUE)
3255 return ret;
3256 }
3257
3258 return load_state_from_tss32(ctxt, &tss_seg);
3259}
3260
3261static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3262 u16 tss_selector, int idt_index, int reason,
3263 bool has_error_code, u32 error_code)
3264{
3265 const struct x86_emulate_ops *ops = ctxt->ops;
3266 struct desc_struct curr_tss_desc, next_tss_desc;
3267 int ret;
3268 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3269 ulong old_tss_base =
3270 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3271 u32 desc_limit;
3272 ulong desc_addr, dr7;
3273
3274
3275
3276 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3277 if (ret != X86EMUL_CONTINUE)
3278 return ret;
3279 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3280 if (ret != X86EMUL_CONTINUE)
3281 return ret;
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293 if (reason == TASK_SWITCH_GATE) {
3294 if (idt_index != -1) {
3295
3296 struct desc_struct task_gate_desc;
3297 int dpl;
3298
3299 ret = read_interrupt_descriptor(ctxt, idt_index,
3300 &task_gate_desc);
3301 if (ret != X86EMUL_CONTINUE)
3302 return ret;
3303
3304 dpl = task_gate_desc.dpl;
3305 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3306 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3307 }
3308 }
3309
3310 desc_limit = desc_limit_scaled(&next_tss_desc);
3311 if (!next_tss_desc.p ||
3312 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3313 desc_limit < 0x2b)) {
3314 return emulate_ts(ctxt, tss_selector & 0xfffc);
3315 }
3316
3317 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3318 curr_tss_desc.type &= ~(1 << 1);
3319 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3320 }
3321
3322 if (reason == TASK_SWITCH_IRET)
3323 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3324
3325
3326
3327 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3328 old_tss_sel = 0xffff;
3329
3330 if (next_tss_desc.type & 8)
3331 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3332 old_tss_base, &next_tss_desc);
3333 else
3334 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3335 old_tss_base, &next_tss_desc);
3336 if (ret != X86EMUL_CONTINUE)
3337 return ret;
3338
3339 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3340 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3341
3342 if (reason != TASK_SWITCH_IRET) {
3343 next_tss_desc.type |= (1 << 1);
3344 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3345 }
3346
3347 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3348 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3349
3350 if (has_error_code) {
3351 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3352 ctxt->lock_prefix = 0;
3353 ctxt->src.val = (unsigned long) error_code;
3354 ret = em_push(ctxt);
3355 }
3356
3357 ops->get_dr(ctxt, 7, &dr7);
3358 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3359
3360 return ret;
3361}
3362
3363int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3364 u16 tss_selector, int idt_index, int reason,
3365 bool has_error_code, u32 error_code)
3366{
3367 int rc;
3368
3369 invalidate_registers(ctxt);
3370 ctxt->_eip = ctxt->eip;
3371 ctxt->dst.type = OP_NONE;
3372
3373 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3374 has_error_code, error_code);
3375
3376 if (rc == X86EMUL_CONTINUE) {
3377 ctxt->eip = ctxt->_eip;
3378 writeback_registers(ctxt);
3379 }
3380
3381 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3382}
3383
3384static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3385 struct operand *op)
3386{
3387 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3388
3389 register_address_increment(ctxt, reg, df * op->bytes);
3390 op->addr.mem.ea = register_address(ctxt, reg);
3391}
3392
3393static int em_das(struct x86_emulate_ctxt *ctxt)
3394{
3395 u8 al, old_al;
3396 bool af, cf, old_cf;
3397
3398 cf = ctxt->eflags & X86_EFLAGS_CF;
3399 al = ctxt->dst.val;
3400
3401 old_al = al;
3402 old_cf = cf;
3403 cf = false;
3404 af = ctxt->eflags & X86_EFLAGS_AF;
3405 if ((al & 0x0f) > 9 || af) {
3406 al -= 6;
3407 cf = old_cf | (al >= 250);
3408 af = true;
3409 } else {
3410 af = false;
3411 }
3412 if (old_al > 0x99 || old_cf) {
3413 al -= 0x60;
3414 cf = true;
3415 }
3416
3417 ctxt->dst.val = al;
3418
3419 ctxt->src.type = OP_IMM;
3420 ctxt->src.val = 0;
3421 ctxt->src.bytes = 1;
3422 fastop(ctxt, em_or);
3423 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3424 if (cf)
3425 ctxt->eflags |= X86_EFLAGS_CF;
3426 if (af)
3427 ctxt->eflags |= X86_EFLAGS_AF;
3428 return X86EMUL_CONTINUE;
3429}
3430
3431static int em_aam(struct x86_emulate_ctxt *ctxt)
3432{
3433 u8 al, ah;
3434
3435 if (ctxt->src.val == 0)
3436 return emulate_de(ctxt);
3437
3438 al = ctxt->dst.val & 0xff;
3439 ah = al / ctxt->src.val;
3440 al %= ctxt->src.val;
3441
3442 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3443
3444
3445 ctxt->src.type = OP_IMM;
3446 ctxt->src.val = 0;
3447 ctxt->src.bytes = 1;
3448 fastop(ctxt, em_or);
3449
3450 return X86EMUL_CONTINUE;
3451}
3452
3453static int em_aad(struct x86_emulate_ctxt *ctxt)
3454{
3455 u8 al = ctxt->dst.val & 0xff;
3456 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3457
3458 al = (al + (ah * ctxt->src.val)) & 0xff;
3459
3460 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3461
3462
3463 ctxt->src.type = OP_IMM;
3464 ctxt->src.val = 0;
3465 ctxt->src.bytes = 1;
3466 fastop(ctxt, em_or);
3467
3468 return X86EMUL_CONTINUE;
3469}
3470
3471static int em_call(struct x86_emulate_ctxt *ctxt)
3472{
3473 int rc;
3474 long rel = ctxt->src.val;
3475
3476 ctxt->src.val = (unsigned long)ctxt->_eip;
3477 rc = jmp_rel(ctxt, rel);
3478 if (rc != X86EMUL_CONTINUE)
3479 return rc;
3480 return em_push(ctxt);
3481}
3482
3483static int em_call_far(struct x86_emulate_ctxt *ctxt)
3484{
3485 u16 sel, old_cs;
3486 ulong old_eip;
3487 int rc;
3488 struct desc_struct old_desc, new_desc;
3489 const struct x86_emulate_ops *ops = ctxt->ops;
3490 int cpl = ctxt->ops->cpl(ctxt);
3491 enum x86emul_mode prev_mode = ctxt->mode;
3492
3493 old_eip = ctxt->_eip;
3494 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3495
3496 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3497 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3498 X86_TRANSFER_CALL_JMP, &new_desc);
3499 if (rc != X86EMUL_CONTINUE)
3500 return rc;
3501
3502 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3503 if (rc != X86EMUL_CONTINUE)
3504 goto fail;
3505
3506 ctxt->src.val = old_cs;
3507 rc = em_push(ctxt);
3508 if (rc != X86EMUL_CONTINUE)
3509 goto fail;
3510
3511 ctxt->src.val = old_eip;
3512 rc = em_push(ctxt);
3513
3514
3515 if (rc != X86EMUL_CONTINUE) {
3516 pr_warn_once("faulting far call emulation tainted memory\n");
3517 goto fail;
3518 }
3519 return rc;
3520fail:
3521 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3522 ctxt->mode = prev_mode;
3523 return rc;
3524
3525}
3526
3527static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3528{
3529 int rc;
3530 unsigned long eip;
3531
3532 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3533 if (rc != X86EMUL_CONTINUE)
3534 return rc;
3535 rc = assign_eip_near(ctxt, eip);
3536 if (rc != X86EMUL_CONTINUE)
3537 return rc;
3538 rsp_increment(ctxt, ctxt->src.val);
3539 return X86EMUL_CONTINUE;
3540}
3541
3542static int em_xchg(struct x86_emulate_ctxt *ctxt)
3543{
3544
3545 ctxt->src.val = ctxt->dst.val;
3546 write_register_operand(&ctxt->src);
3547
3548
3549 ctxt->dst.val = ctxt->src.orig_val;
3550 ctxt->lock_prefix = 1;
3551 return X86EMUL_CONTINUE;
3552}
3553
3554static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3555{
3556 ctxt->dst.val = ctxt->src2.val;
3557 return fastop(ctxt, em_imul);
3558}
3559
3560static int em_cwd(struct x86_emulate_ctxt *ctxt)
3561{
3562 ctxt->dst.type = OP_REG;
3563 ctxt->dst.bytes = ctxt->src.bytes;
3564 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3565 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3566
3567 return X86EMUL_CONTINUE;
3568}
3569
3570static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3571{
3572 u64 tsc_aux = 0;
3573
3574 if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3575 return emulate_gp(ctxt, 0);
3576 ctxt->dst.val = tsc_aux;
3577 return X86EMUL_CONTINUE;
3578}
3579
3580static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3581{
3582 u64 tsc = 0;
3583
3584 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3585 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3586 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3587 return X86EMUL_CONTINUE;
3588}
3589
3590static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3591{
3592 u64 pmc;
3593
3594 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3595 return emulate_gp(ctxt, 0);
3596 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3597 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3598 return X86EMUL_CONTINUE;
3599}
3600
3601static int em_mov(struct x86_emulate_ctxt *ctxt)
3602{
3603 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3604 return X86EMUL_CONTINUE;
3605}
3606
3607#define FFL(x) bit(X86_FEATURE_##x)
3608
3609static int em_movbe(struct x86_emulate_ctxt *ctxt)
3610{
3611 u32 ebx, ecx, edx, eax = 1;
3612 u16 tmp;
3613
3614
3615
3616
3617 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3618 if (!(ecx & FFL(MOVBE)))
3619 return emulate_ud(ctxt);
3620
3621 switch (ctxt->op_bytes) {
3622 case 2:
3623
3624
3625
3626
3627
3628
3629
3630
3631 tmp = (u16)ctxt->src.val;
3632 ctxt->dst.val &= ~0xffffUL;
3633 ctxt->dst.val |= (unsigned long)swab16(tmp);
3634 break;
3635 case 4:
3636 ctxt->dst.val = swab32((u32)ctxt->src.val);
3637 break;
3638 case 8:
3639 ctxt->dst.val = swab64(ctxt->src.val);
3640 break;
3641 default:
3642 BUG();
3643 }
3644 return X86EMUL_CONTINUE;
3645}
3646
3647static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3648{
3649 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3650 return emulate_gp(ctxt, 0);
3651
3652
3653 ctxt->dst.type = OP_NONE;
3654 return X86EMUL_CONTINUE;
3655}
3656
3657static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3658{
3659 unsigned long val;
3660
3661 if (ctxt->mode == X86EMUL_MODE_PROT64)
3662 val = ctxt->src.val & ~0ULL;
3663 else
3664 val = ctxt->src.val & ~0U;
3665
3666
3667 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3668 return emulate_gp(ctxt, 0);
3669
3670
3671 ctxt->dst.type = OP_NONE;
3672 return X86EMUL_CONTINUE;
3673}
3674
3675static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3676{
3677 u64 msr_data;
3678
3679 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3680 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3681 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3682 return emulate_gp(ctxt, 0);
3683
3684 return X86EMUL_CONTINUE;
3685}
3686
3687static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3688{
3689 u64 msr_data;
3690
3691 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3692 return emulate_gp(ctxt, 0);
3693
3694 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3695 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3696 return X86EMUL_CONTINUE;
3697}
3698
3699static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3700{
3701 if (segment > VCPU_SREG_GS &&
3702 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3703 ctxt->ops->cpl(ctxt) > 0)
3704 return emulate_gp(ctxt, 0);
3705
3706 ctxt->dst.val = get_segment_selector(ctxt, segment);
3707 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3708 ctxt->dst.bytes = 2;
3709 return X86EMUL_CONTINUE;
3710}
3711
3712static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3713{
3714 if (ctxt->modrm_reg > VCPU_SREG_GS)
3715 return emulate_ud(ctxt);
3716
3717 return em_store_sreg(ctxt, ctxt->modrm_reg);
3718}
3719
3720static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3721{
3722 u16 sel = ctxt->src.val;
3723
3724 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3725 return emulate_ud(ctxt);
3726
3727 if (ctxt->modrm_reg == VCPU_SREG_SS)
3728 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3729
3730
3731 ctxt->dst.type = OP_NONE;
3732 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3733}
3734
3735static int em_sldt(struct x86_emulate_ctxt *ctxt)
3736{
3737 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3738}
3739
3740static int em_lldt(struct x86_emulate_ctxt *ctxt)
3741{
3742 u16 sel = ctxt->src.val;
3743
3744
3745 ctxt->dst.type = OP_NONE;
3746 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3747}
3748
3749static int em_str(struct x86_emulate_ctxt *ctxt)
3750{
3751 return em_store_sreg(ctxt, VCPU_SREG_TR);
3752}
3753
3754static int em_ltr(struct x86_emulate_ctxt *ctxt)
3755{
3756 u16 sel = ctxt->src.val;
3757
3758
3759 ctxt->dst.type = OP_NONE;
3760 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3761}
3762
3763static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3764{
3765 int rc;
3766 ulong linear;
3767
3768 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3769 if (rc == X86EMUL_CONTINUE)
3770 ctxt->ops->invlpg(ctxt, linear);
3771
3772 ctxt->dst.type = OP_NONE;
3773 return X86EMUL_CONTINUE;
3774}
3775
3776static int em_clts(struct x86_emulate_ctxt *ctxt)
3777{
3778 ulong cr0;
3779
3780 cr0 = ctxt->ops->get_cr(ctxt, 0);
3781 cr0 &= ~X86_CR0_TS;
3782 ctxt->ops->set_cr(ctxt, 0, cr0);
3783 return X86EMUL_CONTINUE;
3784}
3785
3786static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3787{
3788 int rc = ctxt->ops->fix_hypercall(ctxt);
3789
3790 if (rc != X86EMUL_CONTINUE)
3791 return rc;
3792
3793
3794 ctxt->_eip = ctxt->eip;
3795
3796 ctxt->dst.type = OP_NONE;
3797 return X86EMUL_CONTINUE;
3798}
3799
3800static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3801 void (*get)(struct x86_emulate_ctxt *ctxt,
3802 struct desc_ptr *ptr))
3803{
3804 struct desc_ptr desc_ptr;
3805
3806 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3807 ctxt->ops->cpl(ctxt) > 0)
3808 return emulate_gp(ctxt, 0);
3809
3810 if (ctxt->mode == X86EMUL_MODE_PROT64)
3811 ctxt->op_bytes = 8;
3812 get(ctxt, &desc_ptr);
3813 if (ctxt->op_bytes == 2) {
3814 ctxt->op_bytes = 4;
3815 desc_ptr.address &= 0x00ffffff;
3816 }
3817
3818 ctxt->dst.type = OP_NONE;
3819 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3820 &desc_ptr, 2 + ctxt->op_bytes);
3821}
3822
3823static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3824{
3825 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3826}
3827
3828static int em_sidt(struct x86_emulate_ctxt *ctxt)
3829{
3830 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3831}
3832
3833static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3834{
3835 struct desc_ptr desc_ptr;
3836 int rc;
3837
3838 if (ctxt->mode == X86EMUL_MODE_PROT64)
3839 ctxt->op_bytes = 8;
3840 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3841 &desc_ptr.size, &desc_ptr.address,
3842 ctxt->op_bytes);
3843 if (rc != X86EMUL_CONTINUE)
3844 return rc;
3845 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3846 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3847 return emulate_gp(ctxt, 0);
3848 if (lgdt)
3849 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3850 else
3851 ctxt->ops->set_idt(ctxt, &desc_ptr);
3852
3853 ctxt->dst.type = OP_NONE;
3854 return X86EMUL_CONTINUE;
3855}
3856
3857static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3858{
3859 return em_lgdt_lidt(ctxt, true);
3860}
3861
3862static int em_lidt(struct x86_emulate_ctxt *ctxt)
3863{
3864 return em_lgdt_lidt(ctxt, false);
3865}
3866
3867static int em_smsw(struct x86_emulate_ctxt *ctxt)
3868{
3869 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3870 ctxt->ops->cpl(ctxt) > 0)
3871 return emulate_gp(ctxt, 0);
3872
3873 if (ctxt->dst.type == OP_MEM)
3874 ctxt->dst.bytes = 2;
3875 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3876 return X86EMUL_CONTINUE;
3877}
3878
3879static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3880{
3881 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3882 | (ctxt->src.val & 0x0f));
3883 ctxt->dst.type = OP_NONE;
3884 return X86EMUL_CONTINUE;
3885}
3886
3887static int em_loop(struct x86_emulate_ctxt *ctxt)
3888{
3889 int rc = X86EMUL_CONTINUE;
3890
3891 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3892 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3893 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3894 rc = jmp_rel(ctxt, ctxt->src.val);
3895
3896 return rc;
3897}
3898
3899static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3900{
3901 int rc = X86EMUL_CONTINUE;
3902
3903 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3904 rc = jmp_rel(ctxt, ctxt->src.val);
3905
3906 return rc;
3907}
3908
3909static int em_in(struct x86_emulate_ctxt *ctxt)
3910{
3911 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3912 &ctxt->dst.val))
3913 return X86EMUL_IO_NEEDED;
3914
3915 return X86EMUL_CONTINUE;
3916}
3917
3918static int em_out(struct x86_emulate_ctxt *ctxt)
3919{
3920 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3921 &ctxt->src.val, 1);
3922
3923 ctxt->dst.type = OP_NONE;
3924 return X86EMUL_CONTINUE;
3925}
3926
3927static int em_cli(struct x86_emulate_ctxt *ctxt)
3928{
3929 if (emulator_bad_iopl(ctxt))
3930 return emulate_gp(ctxt, 0);
3931
3932 ctxt->eflags &= ~X86_EFLAGS_IF;
3933 return X86EMUL_CONTINUE;
3934}
3935
3936static int em_sti(struct x86_emulate_ctxt *ctxt)
3937{
3938 if (emulator_bad_iopl(ctxt))
3939 return emulate_gp(ctxt, 0);
3940
3941 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3942 ctxt->eflags |= X86_EFLAGS_IF;
3943 return X86EMUL_CONTINUE;
3944}
3945
3946static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3947{
3948 u32 eax, ebx, ecx, edx;
3949 u64 msr = 0;
3950
3951 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3952 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3953 ctxt->ops->cpl(ctxt)) {
3954 return emulate_gp(ctxt, 0);
3955 }
3956
3957 eax = reg_read(ctxt, VCPU_REGS_RAX);
3958 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3959 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
3960 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3961 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3962 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3963 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3964 return X86EMUL_CONTINUE;
3965}
3966
3967static int em_sahf(struct x86_emulate_ctxt *ctxt)
3968{
3969 u32 flags;
3970
3971 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3972 X86_EFLAGS_SF;
3973 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3974
3975 ctxt->eflags &= ~0xffUL;
3976 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3977 return X86EMUL_CONTINUE;
3978}
3979
3980static int em_lahf(struct x86_emulate_ctxt *ctxt)
3981{
3982 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3983 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3984 return X86EMUL_CONTINUE;
3985}
3986
3987static int em_bswap(struct x86_emulate_ctxt *ctxt)
3988{
3989 switch (ctxt->op_bytes) {
3990#ifdef CONFIG_X86_64
3991 case 8:
3992 asm("bswap %0" : "+r"(ctxt->dst.val));
3993 break;
3994#endif
3995 default:
3996 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3997 break;
3998 }
3999 return X86EMUL_CONTINUE;
4000}
4001
4002static int em_clflush(struct x86_emulate_ctxt *ctxt)
4003{
4004
4005 return X86EMUL_CONTINUE;
4006}
4007
4008static int em_movsxd(struct x86_emulate_ctxt *ctxt)
4009{
4010 ctxt->dst.val = (s32) ctxt->src.val;
4011 return X86EMUL_CONTINUE;
4012}
4013
4014static int check_fxsr(struct x86_emulate_ctxt *ctxt)
4015{
4016 u32 eax = 1, ebx, ecx = 0, edx;
4017
4018 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
4019 if (!(edx & FFL(FXSR)))
4020 return emulate_ud(ctxt);
4021
4022 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
4023 return emulate_nm(ctxt);
4024
4025
4026
4027
4028
4029 if (ctxt->mode >= X86EMUL_MODE_PROT64)
4030 return X86EMUL_UNHANDLEABLE;
4031
4032 return X86EMUL_CONTINUE;
4033}
4034
4035
4036
4037
4038
4039static size_t __fxstate_size(int nregs)
4040{
4041 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4042}
4043
4044static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4045{
4046 bool cr4_osfxsr;
4047 if (ctxt->mode == X86EMUL_MODE_PROT64)
4048 return __fxstate_size(16);
4049
4050 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4051 return __fxstate_size(cr4_osfxsr ? 8 : 0);
4052}
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4073{
4074 struct fxregs_state fx_state;
4075 int rc;
4076
4077 rc = check_fxsr(ctxt);
4078 if (rc != X86EMUL_CONTINUE)
4079 return rc;
4080
4081 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4082
4083 if (rc != X86EMUL_CONTINUE)
4084 return rc;
4085
4086 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4087 fxstate_size(ctxt));
4088}
4089
4090
4091
4092
4093
4094
4095
4096
4097static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4098 const size_t used_size)
4099{
4100 struct fxregs_state fx_tmp;
4101 int rc;
4102
4103 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4104 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4105 __fxstate_size(16) - used_size);
4106
4107 return rc;
4108}
4109
4110static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4111{
4112 struct fxregs_state fx_state;
4113 int rc;
4114 size_t size;
4115
4116 rc = check_fxsr(ctxt);
4117 if (rc != X86EMUL_CONTINUE)
4118 return rc;
4119
4120 size = fxstate_size(ctxt);
4121 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4122 if (rc != X86EMUL_CONTINUE)
4123 return rc;
4124
4125 if (size < __fxstate_size(16)) {
4126 rc = fxregs_fixup(&fx_state, size);
4127 if (rc != X86EMUL_CONTINUE)
4128 goto out;
4129 }
4130
4131 if (fx_state.mxcsr >> 16) {
4132 rc = emulate_gp(ctxt, 0);
4133 goto out;
4134 }
4135
4136 if (rc == X86EMUL_CONTINUE)
4137 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4138
4139out:
4140 return rc;
4141}
4142
4143static bool valid_cr(int nr)
4144{
4145 switch (nr) {
4146 case 0:
4147 case 2 ... 4:
4148 case 8:
4149 return true;
4150 default:
4151 return false;
4152 }
4153}
4154
4155static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4156{
4157 if (!valid_cr(ctxt->modrm_reg))
4158 return emulate_ud(ctxt);
4159
4160 return X86EMUL_CONTINUE;
4161}
4162
4163static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4164{
4165 u64 new_val = ctxt->src.val64;
4166 int cr = ctxt->modrm_reg;
4167 u64 efer = 0;
4168
4169 static u64 cr_reserved_bits[] = {
4170 0xffffffff00000000ULL,
4171 0, 0, 0,
4172 CR4_RESERVED_BITS,
4173 0, 0, 0,
4174 CR8_RESERVED_BITS,
4175 };
4176
4177 if (!valid_cr(cr))
4178 return emulate_ud(ctxt);
4179
4180 if (new_val & cr_reserved_bits[cr])
4181 return emulate_gp(ctxt, 0);
4182
4183 switch (cr) {
4184 case 0: {
4185 u64 cr4;
4186 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4187 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4188 return emulate_gp(ctxt, 0);
4189
4190 cr4 = ctxt->ops->get_cr(ctxt, 4);
4191 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4192
4193 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4194 !(cr4 & X86_CR4_PAE))
4195 return emulate_gp(ctxt, 0);
4196
4197 break;
4198 }
4199 case 3: {
4200 u64 rsvd = 0;
4201
4202 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4203 if (efer & EFER_LMA) {
4204 u64 maxphyaddr;
4205 u32 eax, ebx, ecx, edx;
4206
4207 eax = 0x80000008;
4208 ecx = 0;
4209 if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4210 &edx, false))
4211 maxphyaddr = eax & 0xff;
4212 else
4213 maxphyaddr = 36;
4214 rsvd = rsvd_bits(maxphyaddr, 63);
4215 if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
4216 rsvd &= ~X86_CR3_PCID_NOFLUSH;
4217 }
4218
4219 if (new_val & rsvd)
4220 return emulate_gp(ctxt, 0);
4221
4222 break;
4223 }
4224 case 4: {
4225 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4226
4227 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4228 return emulate_gp(ctxt, 0);
4229
4230 break;
4231 }
4232 }
4233
4234 return X86EMUL_CONTINUE;
4235}
4236
4237static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4238{
4239 unsigned long dr7;
4240
4241 ctxt->ops->get_dr(ctxt, 7, &dr7);
4242
4243
4244 return dr7 & (1 << 13);
4245}
4246
4247static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4248{
4249 int dr = ctxt->modrm_reg;
4250 u64 cr4;
4251
4252 if (dr > 7)
4253 return emulate_ud(ctxt);
4254
4255 cr4 = ctxt->ops->get_cr(ctxt, 4);
4256 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4257 return emulate_ud(ctxt);
4258
4259 if (check_dr7_gd(ctxt)) {
4260 ulong dr6;
4261
4262 ctxt->ops->get_dr(ctxt, 6, &dr6);
4263 dr6 &= ~15;
4264 dr6 |= DR6_BD | DR6_RTM;
4265 ctxt->ops->set_dr(ctxt, 6, dr6);
4266 return emulate_db(ctxt);
4267 }
4268
4269 return X86EMUL_CONTINUE;
4270}
4271
4272static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4273{
4274 u64 new_val = ctxt->src.val64;
4275 int dr = ctxt->modrm_reg;
4276
4277 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4278 return emulate_gp(ctxt, 0);
4279
4280 return check_dr_read(ctxt);
4281}
4282
4283static int check_svme(struct x86_emulate_ctxt *ctxt)
4284{
4285 u64 efer = 0;
4286
4287 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4288
4289 if (!(efer & EFER_SVME))
4290 return emulate_ud(ctxt);
4291
4292 return X86EMUL_CONTINUE;
4293}
4294
4295static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4296{
4297 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4298
4299
4300 if (rax & 0xffff000000000000ULL)
4301 return emulate_gp(ctxt, 0);
4302
4303 return check_svme(ctxt);
4304}
4305
4306static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4307{
4308 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4309
4310 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4311 return emulate_ud(ctxt);
4312
4313 return X86EMUL_CONTINUE;
4314}
4315
4316static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4317{
4318 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4319 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4320
4321
4322
4323
4324
4325 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4326 return X86EMUL_CONTINUE;
4327
4328 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4329 ctxt->ops->check_pmc(ctxt, rcx))
4330 return emulate_gp(ctxt, 0);
4331
4332 return X86EMUL_CONTINUE;
4333}
4334
4335static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4336{
4337 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4338 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4339 return emulate_gp(ctxt, 0);
4340
4341 return X86EMUL_CONTINUE;
4342}
4343
4344static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4345{
4346 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4347 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4348 return emulate_gp(ctxt, 0);
4349
4350 return X86EMUL_CONTINUE;
4351}
4352
4353#define D(_y) { .flags = (_y) }
4354#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4355#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4356 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4357#define N D(NotImpl)
4358#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4359#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4360#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4361#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4362#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4363#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4364#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4365#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4366#define II(_f, _e, _i) \
4367 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4368#define IIP(_f, _e, _i, _p) \
4369 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4370 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4371#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4372
4373#define D2bv(_f) D((_f) | ByteOp), D(_f)
4374#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4375#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4376#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4377#define I2bvIP(_f, _e, _i, _p) \
4378 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4379
4380#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4381 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4382 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4383
4384static const struct opcode group7_rm0[] = {
4385 N,
4386 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4387 N, N, N, N, N, N,
4388};
4389
4390static const struct opcode group7_rm1[] = {
4391 DI(SrcNone | Priv, monitor),
4392 DI(SrcNone | Priv, mwait),
4393 N, N, N, N, N, N,
4394};
4395
4396static const struct opcode group7_rm3[] = {
4397 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4398 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4399 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4400 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4401 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4402 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4403 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4404 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4405};
4406
4407static const struct opcode group7_rm7[] = {
4408 N,
4409 DIP(SrcNone, rdtscp, check_rdtsc),
4410 N, N, N, N, N, N,
4411};
4412
4413static const struct opcode group1[] = {
4414 F(Lock, em_add),
4415 F(Lock | PageTable, em_or),
4416 F(Lock, em_adc),
4417 F(Lock, em_sbb),
4418 F(Lock | PageTable, em_and),
4419 F(Lock, em_sub),
4420 F(Lock, em_xor),
4421 F(NoWrite, em_cmp),
4422};
4423
4424static const struct opcode group1A[] = {
4425 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4426};
4427
4428static const struct opcode group2[] = {
4429 F(DstMem | ModRM, em_rol),
4430 F(DstMem | ModRM, em_ror),
4431 F(DstMem | ModRM, em_rcl),
4432 F(DstMem | ModRM, em_rcr),
4433 F(DstMem | ModRM, em_shl),
4434 F(DstMem | ModRM, em_shr),
4435 F(DstMem | ModRM, em_shl),
4436 F(DstMem | ModRM, em_sar),
4437};
4438
4439static const struct opcode group3[] = {
4440 F(DstMem | SrcImm | NoWrite, em_test),
4441 F(DstMem | SrcImm | NoWrite, em_test),
4442 F(DstMem | SrcNone | Lock, em_not),
4443 F(DstMem | SrcNone | Lock, em_neg),
4444 F(DstXacc | Src2Mem, em_mul_ex),
4445 F(DstXacc | Src2Mem, em_imul_ex),
4446 F(DstXacc | Src2Mem, em_div_ex),
4447 F(DstXacc | Src2Mem, em_idiv_ex),
4448};
4449
4450static const struct opcode group4[] = {
4451 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4452 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4453 N, N, N, N, N, N,
4454};
4455
4456static const struct opcode group5[] = {
4457 F(DstMem | SrcNone | Lock, em_inc),
4458 F(DstMem | SrcNone | Lock, em_dec),
4459 I(SrcMem | NearBranch, em_call_near_abs),
4460 I(SrcMemFAddr | ImplicitOps, em_call_far),
4461 I(SrcMem | NearBranch, em_jmp_abs),
4462 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4463 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4464};
4465
4466static const struct opcode group6[] = {
4467 II(Prot | DstMem, em_sldt, sldt),
4468 II(Prot | DstMem, em_str, str),
4469 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4470 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4471 N, N, N, N,
4472};
4473
4474static const struct group_dual group7 = { {
4475 II(Mov | DstMem, em_sgdt, sgdt),
4476 II(Mov | DstMem, em_sidt, sidt),
4477 II(SrcMem | Priv, em_lgdt, lgdt),
4478 II(SrcMem | Priv, em_lidt, lidt),
4479 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4480 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4481 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4482}, {
4483 EXT(0, group7_rm0),
4484 EXT(0, group7_rm1),
4485 N, EXT(0, group7_rm3),
4486 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4487 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4488 EXT(0, group7_rm7),
4489} };
4490
4491static const struct opcode group8[] = {
4492 N, N, N, N,
4493 F(DstMem | SrcImmByte | NoWrite, em_bt),
4494 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4495 F(DstMem | SrcImmByte | Lock, em_btr),
4496 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4497};
4498
4499
4500
4501
4502
4503static const struct gprefix pfx_0f_c7_7 = {
4504 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
4505};
4506
4507
4508static const struct group_dual group9 = { {
4509 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4510}, {
4511 N, N, N, N, N, N, N,
4512 GP(0, &pfx_0f_c7_7),
4513} };
4514
4515static const struct opcode group11[] = {
4516 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4517 X7(D(Undefined)),
4518};
4519
4520static const struct gprefix pfx_0f_ae_7 = {
4521 I(SrcMem | ByteOp, em_clflush), N, N, N,
4522};
4523
4524static const struct group_dual group15 = { {
4525 I(ModRM | Aligned16, em_fxsave),
4526 I(ModRM | Aligned16, em_fxrstor),
4527 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4528}, {
4529 N, N, N, N, N, N, N, N,
4530} };
4531
4532static const struct gprefix pfx_0f_6f_0f_7f = {
4533 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4534};
4535
4536static const struct instr_dual instr_dual_0f_2b = {
4537 I(0, em_mov), N
4538};
4539
4540static const struct gprefix pfx_0f_2b = {
4541 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4542};
4543
4544static const struct gprefix pfx_0f_10_0f_11 = {
4545 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4546};
4547
4548static const struct gprefix pfx_0f_28_0f_29 = {
4549 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4550};
4551
4552static const struct gprefix pfx_0f_e7 = {
4553 N, I(Sse, em_mov), N, N,
4554};
4555
4556static const struct escape escape_d9 = { {
4557 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4558}, {
4559
4560 N, N, N, N, N, N, N, N,
4561
4562 N, N, N, N, N, N, N, N,
4563
4564 N, N, N, N, N, N, N, N,
4565
4566 N, N, N, N, N, N, N, N,
4567
4568 N, N, N, N, N, N, N, N,
4569
4570 N, N, N, N, N, N, N, N,
4571
4572 N, N, N, N, N, N, N, N,
4573
4574 N, N, N, N, N, N, N, N,
4575} };
4576
4577static const struct escape escape_db = { {
4578 N, N, N, N, N, N, N, N,
4579}, {
4580
4581 N, N, N, N, N, N, N, N,
4582
4583 N, N, N, N, N, N, N, N,
4584
4585 N, N, N, N, N, N, N, N,
4586
4587 N, N, N, N, N, N, N, N,
4588
4589 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4590
4591 N, N, N, N, N, N, N, N,
4592
4593 N, N, N, N, N, N, N, N,
4594
4595 N, N, N, N, N, N, N, N,
4596} };
4597
4598static const struct escape escape_dd = { {
4599 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4600}, {
4601
4602 N, N, N, N, N, N, N, N,
4603
4604 N, N, N, N, N, N, N, N,
4605
4606 N, N, N, N, N, N, N, N,
4607
4608 N, N, N, N, N, N, N, N,
4609
4610 N, N, N, N, N, N, N, N,
4611
4612 N, N, N, N, N, N, N, N,
4613
4614 N, N, N, N, N, N, N, N,
4615
4616 N, N, N, N, N, N, N, N,
4617} };
4618
4619static const struct instr_dual instr_dual_0f_c3 = {
4620 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4621};
4622
4623static const struct mode_dual mode_dual_63 = {
4624 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4625};
4626
4627static const struct opcode opcode_table[256] = {
4628
4629 F6ALU(Lock, em_add),
4630 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4631 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4632
4633 F6ALU(Lock | PageTable, em_or),
4634 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4635 N,
4636
4637 F6ALU(Lock, em_adc),
4638 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4639 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4640
4641 F6ALU(Lock, em_sbb),
4642 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4643 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4644
4645 F6ALU(Lock | PageTable, em_and), N, N,
4646
4647 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4648
4649 F6ALU(Lock, em_xor), N, N,
4650
4651 F6ALU(NoWrite, em_cmp), N, N,
4652
4653 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4654
4655 X8(I(SrcReg | Stack, em_push)),
4656
4657 X8(I(DstReg | Stack, em_pop)),
4658
4659 I(ImplicitOps | Stack | No64, em_pusha),
4660 I(ImplicitOps | Stack | No64, em_popa),
4661 N, MD(ModRM, &mode_dual_63),
4662 N, N, N, N,
4663
4664 I(SrcImm | Mov | Stack, em_push),
4665 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4666 I(SrcImmByte | Mov | Stack, em_push),
4667 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4668 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in),
4669 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out),
4670
4671 X16(D(SrcImmByte | NearBranch)),
4672
4673 G(ByteOp | DstMem | SrcImm, group1),
4674 G(DstMem | SrcImm, group1),
4675 G(ByteOp | DstMem | SrcImm | No64, group1),
4676 G(DstMem | SrcImmByte, group1),
4677 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4678 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4679
4680 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4681 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4682 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4683 D(ModRM | SrcMem | NoAccess | DstReg),
4684 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4685 G(0, group1A),
4686
4687 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4688
4689 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4690 I(SrcImmFAddr | No64, em_call_far), N,
4691 II(ImplicitOps | Stack, em_pushf, pushf),
4692 II(ImplicitOps | Stack, em_popf, popf),
4693 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4694
4695 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4696 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4697 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4698 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4699
4700 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4701 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4702 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4703 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4704
4705 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4706
4707 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4708
4709 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4710 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4711 I(ImplicitOps | NearBranch, em_ret),
4712 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4713 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4714 G(ByteOp, group11), G(0, group11),
4715
4716 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4717 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4718 I(ImplicitOps, em_ret_far),
4719 D(ImplicitOps), DI(SrcImmByte, intn),
4720 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4721
4722 G(Src2One | ByteOp, group2), G(Src2One, group2),
4723 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4724 I(DstAcc | SrcImmUByte | No64, em_aam),
4725 I(DstAcc | SrcImmUByte | No64, em_aad),
4726 F(DstAcc | ByteOp | No64, em_salc),
4727 I(DstAcc | SrcXLat | ByteOp, em_mov),
4728
4729 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4730
4731 X3(I(SrcImmByte | NearBranch, em_loop)),
4732 I(SrcImmByte | NearBranch, em_jcxz),
4733 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4734 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4735
4736 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4737 I(SrcImmFAddr | No64, em_jmp_far),
4738 D(SrcImmByte | ImplicitOps | NearBranch),
4739 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4740 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4741
4742 N, DI(ImplicitOps, icebp), N, N,
4743 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4744 G(ByteOp, group3), G(0, group3),
4745
4746 D(ImplicitOps), D(ImplicitOps),
4747 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4748 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4749};
4750
4751static const struct opcode twobyte_table[256] = {
4752
4753 G(0, group6), GD(0, &group7), N, N,
4754 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4755 II(ImplicitOps | Priv, em_clts, clts), N,
4756 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4757 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4758
4759 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4760 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4761 N, N, N, N, N, N,
4762 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4763 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4764
4765 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4766 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4767 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4768 check_cr_write),
4769 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4770 check_dr_write),
4771 N, N, N, N,
4772 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4773 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4774 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4775 N, N, N, N,
4776
4777 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4778 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4779 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4780 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4781 I(ImplicitOps | EmulateOnUD, em_sysenter),
4782 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4783 N, N,
4784 N, N, N, N, N, N, N, N,
4785
4786 X16(D(DstReg | SrcMem | ModRM)),
4787
4788 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4789
4790 N, N, N, N,
4791 N, N, N, N,
4792 N, N, N, N,
4793 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4794
4795 N, N, N, N,
4796 N, N, N, N,
4797 N, N, N, N,
4798 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4799
4800 X16(D(SrcImm | NearBranch)),
4801
4802 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4803
4804 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4805 II(ImplicitOps, em_cpuid, cpuid),
4806 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4807 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4808 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4809
4810 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4811 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4812 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4813 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4814 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4815 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4816
4817 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4818 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4819 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4820 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4821 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4822 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4823
4824 N, N,
4825 G(BitOp, group8),
4826 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4827 I(DstReg | SrcMem | ModRM, em_bsf_c),
4828 I(DstReg | SrcMem | ModRM, em_bsr_c),
4829 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4830
4831 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4832 N, ID(0, &instr_dual_0f_c3),
4833 N, N, N, GD(0, &group9),
4834
4835 X8(I(DstReg, em_bswap)),
4836
4837 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4838
4839 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4840 N, N, N, N, N, N, N, N,
4841
4842 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4843};
4844
4845static const struct instr_dual instr_dual_0f_38_f0 = {
4846 I(DstReg | SrcMem | Mov, em_movbe), N
4847};
4848
4849static const struct instr_dual instr_dual_0f_38_f1 = {
4850 I(DstMem | SrcReg | Mov, em_movbe), N
4851};
4852
4853static const struct gprefix three_byte_0f_38_f0 = {
4854 ID(0, &instr_dual_0f_38_f0), N, N, N
4855};
4856
4857static const struct gprefix three_byte_0f_38_f1 = {
4858 ID(0, &instr_dual_0f_38_f1), N, N, N
4859};
4860
4861
4862
4863
4864
4865static const struct opcode opcode_map_0f_38[256] = {
4866
4867 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4868
4869 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4870
4871 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4872 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4873
4874 N, N, X4(N), X8(N)
4875};
4876
4877#undef D
4878#undef N
4879#undef G
4880#undef GD
4881#undef I
4882#undef GP
4883#undef EXT
4884#undef MD
4885#undef ID
4886
4887#undef D2bv
4888#undef D2bvIP
4889#undef I2bv
4890#undef I2bvIP
4891#undef I6ALU
4892
4893static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4894{
4895 unsigned size;
4896
4897 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4898 if (size == 8)
4899 size = 4;
4900 return size;
4901}
4902
4903static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4904 unsigned size, bool sign_extension)
4905{
4906 int rc = X86EMUL_CONTINUE;
4907
4908 op->type = OP_IMM;
4909 op->bytes = size;
4910 op->addr.mem.ea = ctxt->_eip;
4911
4912 switch (op->bytes) {
4913 case 1:
4914 op->val = insn_fetch(s8, ctxt);
4915 break;
4916 case 2:
4917 op->val = insn_fetch(s16, ctxt);
4918 break;
4919 case 4:
4920 op->val = insn_fetch(s32, ctxt);
4921 break;
4922 case 8:
4923 op->val = insn_fetch(s64, ctxt);
4924 break;
4925 }
4926 if (!sign_extension) {
4927 switch (op->bytes) {
4928 case 1:
4929 op->val &= 0xff;
4930 break;
4931 case 2:
4932 op->val &= 0xffff;
4933 break;
4934 case 4:
4935 op->val &= 0xffffffff;
4936 break;
4937 }
4938 }
4939done:
4940 return rc;
4941}
4942
4943static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4944 unsigned d)
4945{
4946 int rc = X86EMUL_CONTINUE;
4947
4948 switch (d) {
4949 case OpReg:
4950 decode_register_operand(ctxt, op);
4951 break;
4952 case OpImmUByte:
4953 rc = decode_imm(ctxt, op, 1, false);
4954 break;
4955 case OpMem:
4956 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4957 mem_common:
4958 *op = ctxt->memop;
4959 ctxt->memopp = op;
4960 if (ctxt->d & BitOp)
4961 fetch_bit_operand(ctxt);
4962 op->orig_val = op->val;
4963 break;
4964 case OpMem64:
4965 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4966 goto mem_common;
4967 case OpAcc:
4968 op->type = OP_REG;
4969 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4970 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4971 fetch_register_operand(op);
4972 op->orig_val = op->val;
4973 break;
4974 case OpAccLo:
4975 op->type = OP_REG;
4976 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4977 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4978 fetch_register_operand(op);
4979 op->orig_val = op->val;
4980 break;
4981 case OpAccHi:
4982 if (ctxt->d & ByteOp) {
4983 op->type = OP_NONE;
4984 break;
4985 }
4986 op->type = OP_REG;
4987 op->bytes = ctxt->op_bytes;
4988 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4989 fetch_register_operand(op);
4990 op->orig_val = op->val;
4991 break;
4992 case OpDI:
4993 op->type = OP_MEM;
4994 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4995 op->addr.mem.ea =
4996 register_address(ctxt, VCPU_REGS_RDI);
4997 op->addr.mem.seg = VCPU_SREG_ES;
4998 op->val = 0;
4999 op->count = 1;
5000 break;
5001 case OpDX:
5002 op->type = OP_REG;
5003 op->bytes = 2;
5004 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5005 fetch_register_operand(op);
5006 break;
5007 case OpCL:
5008 op->type = OP_IMM;
5009 op->bytes = 1;
5010 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
5011 break;
5012 case OpImmByte:
5013 rc = decode_imm(ctxt, op, 1, true);
5014 break;
5015 case OpOne:
5016 op->type = OP_IMM;
5017 op->bytes = 1;
5018 op->val = 1;
5019 break;
5020 case OpImm:
5021 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
5022 break;
5023 case OpImm64:
5024 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
5025 break;
5026 case OpMem8:
5027 ctxt->memop.bytes = 1;
5028 if (ctxt->memop.type == OP_REG) {
5029 ctxt->memop.addr.reg = decode_register(ctxt,
5030 ctxt->modrm_rm, true);
5031 fetch_register_operand(&ctxt->memop);
5032 }
5033 goto mem_common;
5034 case OpMem16:
5035 ctxt->memop.bytes = 2;
5036 goto mem_common;
5037 case OpMem32:
5038 ctxt->memop.bytes = 4;
5039 goto mem_common;
5040 case OpImmU16:
5041 rc = decode_imm(ctxt, op, 2, false);
5042 break;
5043 case OpImmU:
5044 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
5045 break;
5046 case OpSI:
5047 op->type = OP_MEM;
5048 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5049 op->addr.mem.ea =
5050 register_address(ctxt, VCPU_REGS_RSI);
5051 op->addr.mem.seg = ctxt->seg_override;
5052 op->val = 0;
5053 op->count = 1;
5054 break;
5055 case OpXLat:
5056 op->type = OP_MEM;
5057 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5058 op->addr.mem.ea =
5059 address_mask(ctxt,
5060 reg_read(ctxt, VCPU_REGS_RBX) +
5061 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
5062 op->addr.mem.seg = ctxt->seg_override;
5063 op->val = 0;
5064 break;
5065 case OpImmFAddr:
5066 op->type = OP_IMM;
5067 op->addr.mem.ea = ctxt->_eip;
5068 op->bytes = ctxt->op_bytes + 2;
5069 insn_fetch_arr(op->valptr, op->bytes, ctxt);
5070 break;
5071 case OpMemFAddr:
5072 ctxt->memop.bytes = ctxt->op_bytes + 2;
5073 goto mem_common;
5074 case OpES:
5075 op->type = OP_IMM;
5076 op->val = VCPU_SREG_ES;
5077 break;
5078 case OpCS:
5079 op->type = OP_IMM;
5080 op->val = VCPU_SREG_CS;
5081 break;
5082 case OpSS:
5083 op->type = OP_IMM;
5084 op->val = VCPU_SREG_SS;
5085 break;
5086 case OpDS:
5087 op->type = OP_IMM;
5088 op->val = VCPU_SREG_DS;
5089 break;
5090 case OpFS:
5091 op->type = OP_IMM;
5092 op->val = VCPU_SREG_FS;
5093 break;
5094 case OpGS:
5095 op->type = OP_IMM;
5096 op->val = VCPU_SREG_GS;
5097 break;
5098 case OpImplicit:
5099
5100 default:
5101 op->type = OP_NONE;
5102 break;
5103 }
5104
5105done:
5106 return rc;
5107}
5108
5109int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
5110{
5111 int rc = X86EMUL_CONTINUE;
5112 int mode = ctxt->mode;
5113 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5114 bool op_prefix = false;
5115 bool has_seg_override = false;
5116 struct opcode opcode;
5117 u16 dummy;
5118 struct desc_struct desc;
5119
5120 ctxt->memop.type = OP_NONE;
5121 ctxt->memopp = NULL;
5122 ctxt->_eip = ctxt->eip;
5123 ctxt->fetch.ptr = ctxt->fetch.data;
5124 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5125 ctxt->opcode_len = 1;
5126 if (insn_len > 0)
5127 memcpy(ctxt->fetch.data, insn, insn_len);
5128 else {
5129 rc = __do_insn_fetch_bytes(ctxt, 1);
5130 if (rc != X86EMUL_CONTINUE)
5131 return rc;
5132 }
5133
5134 switch (mode) {
5135 case X86EMUL_MODE_REAL:
5136 case X86EMUL_MODE_VM86:
5137 def_op_bytes = def_ad_bytes = 2;
5138 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5139 if (desc.d)
5140 def_op_bytes = def_ad_bytes = 4;
5141 break;
5142 case X86EMUL_MODE_PROT16:
5143 def_op_bytes = def_ad_bytes = 2;
5144 break;
5145 case X86EMUL_MODE_PROT32:
5146 def_op_bytes = def_ad_bytes = 4;
5147 break;
5148#ifdef CONFIG_X86_64
5149 case X86EMUL_MODE_PROT64:
5150 def_op_bytes = 4;
5151 def_ad_bytes = 8;
5152 break;
5153#endif
5154 default:
5155 return EMULATION_FAILED;
5156 }
5157
5158 ctxt->op_bytes = def_op_bytes;
5159 ctxt->ad_bytes = def_ad_bytes;
5160
5161
5162 for (;;) {
5163 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5164 case 0x66:
5165 op_prefix = true;
5166
5167 ctxt->op_bytes = def_op_bytes ^ 6;
5168 break;
5169 case 0x67:
5170 if (mode == X86EMUL_MODE_PROT64)
5171
5172 ctxt->ad_bytes = def_ad_bytes ^ 12;
5173 else
5174
5175 ctxt->ad_bytes = def_ad_bytes ^ 6;
5176 break;
5177 case 0x26:
5178 case 0x2e:
5179 case 0x36:
5180 case 0x3e:
5181 has_seg_override = true;
5182 ctxt->seg_override = (ctxt->b >> 3) & 3;
5183 break;
5184 case 0x64:
5185 case 0x65:
5186 has_seg_override = true;
5187 ctxt->seg_override = ctxt->b & 7;
5188 break;
5189 case 0x40 ... 0x4f:
5190 if (mode != X86EMUL_MODE_PROT64)
5191 goto done_prefixes;
5192 ctxt->rex_prefix = ctxt->b;
5193 continue;
5194 case 0xf0:
5195 ctxt->lock_prefix = 1;
5196 break;
5197 case 0xf2:
5198 case 0xf3:
5199 ctxt->rep_prefix = ctxt->b;
5200 break;
5201 default:
5202 goto done_prefixes;
5203 }
5204
5205
5206
5207 ctxt->rex_prefix = 0;
5208 }
5209
5210done_prefixes:
5211
5212
5213 if (ctxt->rex_prefix & 8)
5214 ctxt->op_bytes = 8;
5215
5216
5217 opcode = opcode_table[ctxt->b];
5218
5219 if (ctxt->b == 0x0f) {
5220 ctxt->opcode_len = 2;
5221 ctxt->b = insn_fetch(u8, ctxt);
5222 opcode = twobyte_table[ctxt->b];
5223
5224
5225 if (ctxt->b == 0x38) {
5226 ctxt->opcode_len = 3;
5227 ctxt->b = insn_fetch(u8, ctxt);
5228 opcode = opcode_map_0f_38[ctxt->b];
5229 }
5230 }
5231 ctxt->d = opcode.flags;
5232
5233 if (ctxt->d & ModRM)
5234 ctxt->modrm = insn_fetch(u8, ctxt);
5235
5236
5237 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5238 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5239 ctxt->d = NotImpl;
5240 }
5241
5242 while (ctxt->d & GroupMask) {
5243 switch (ctxt->d & GroupMask) {
5244 case Group:
5245 goffset = (ctxt->modrm >> 3) & 7;
5246 opcode = opcode.u.group[goffset];
5247 break;
5248 case GroupDual:
5249 goffset = (ctxt->modrm >> 3) & 7;
5250 if ((ctxt->modrm >> 6) == 3)
5251 opcode = opcode.u.gdual->mod3[goffset];
5252 else
5253 opcode = opcode.u.gdual->mod012[goffset];
5254 break;
5255 case RMExt:
5256 goffset = ctxt->modrm & 7;
5257 opcode = opcode.u.group[goffset];
5258 break;
5259 case Prefix:
5260 if (ctxt->rep_prefix && op_prefix)
5261 return EMULATION_FAILED;
5262 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5263 switch (simd_prefix) {
5264 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5265 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5266 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5267 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5268 }
5269 break;
5270 case Escape:
5271 if (ctxt->modrm > 0xbf)
5272 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5273 else
5274 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5275 break;
5276 case InstrDual:
5277 if ((ctxt->modrm >> 6) == 3)
5278 opcode = opcode.u.idual->mod3;
5279 else
5280 opcode = opcode.u.idual->mod012;
5281 break;
5282 case ModeDual:
5283 if (ctxt->mode == X86EMUL_MODE_PROT64)
5284 opcode = opcode.u.mdual->mode64;
5285 else
5286 opcode = opcode.u.mdual->mode32;
5287 break;
5288 default:
5289 return EMULATION_FAILED;
5290 }
5291
5292 ctxt->d &= ~(u64)GroupMask;
5293 ctxt->d |= opcode.flags;
5294 }
5295
5296
5297 if (ctxt->d == 0)
5298 return EMULATION_FAILED;
5299
5300 ctxt->execute = opcode.u.execute;
5301
5302 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5303 return EMULATION_FAILED;
5304
5305 if (unlikely(ctxt->d &
5306 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5307 No16))) {
5308
5309
5310
5311
5312 ctxt->check_perm = opcode.check_perm;
5313 ctxt->intercept = opcode.intercept;
5314
5315 if (ctxt->d & NotImpl)
5316 return EMULATION_FAILED;
5317
5318 if (mode == X86EMUL_MODE_PROT64) {
5319 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5320 ctxt->op_bytes = 8;
5321 else if (ctxt->d & NearBranch)
5322 ctxt->op_bytes = 8;
5323 }
5324
5325 if (ctxt->d & Op3264) {
5326 if (mode == X86EMUL_MODE_PROT64)
5327 ctxt->op_bytes = 8;
5328 else
5329 ctxt->op_bytes = 4;
5330 }
5331
5332 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5333 ctxt->op_bytes = 4;
5334
5335 if (ctxt->d & Sse)
5336 ctxt->op_bytes = 16;
5337 else if (ctxt->d & Mmx)
5338 ctxt->op_bytes = 8;
5339 }
5340
5341
5342 if (ctxt->d & ModRM) {
5343 rc = decode_modrm(ctxt, &ctxt->memop);
5344 if (!has_seg_override) {
5345 has_seg_override = true;
5346 ctxt->seg_override = ctxt->modrm_seg;
5347 }
5348 } else if (ctxt->d & MemAbs)
5349 rc = decode_abs(ctxt, &ctxt->memop);
5350 if (rc != X86EMUL_CONTINUE)
5351 goto done;
5352
5353 if (!has_seg_override)
5354 ctxt->seg_override = VCPU_SREG_DS;
5355
5356 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5357
5358
5359
5360
5361
5362 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5363 if (rc != X86EMUL_CONTINUE)
5364 goto done;
5365
5366
5367
5368
5369
5370 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5371 if (rc != X86EMUL_CONTINUE)
5372 goto done;
5373
5374
5375 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5376
5377 if (ctxt->rip_relative && likely(ctxt->memopp))
5378 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5379 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5380
5381done:
5382 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5383}
5384
5385bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5386{
5387 return ctxt->d & PageTable;
5388}
5389
5390static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5391{
5392
5393
5394
5395
5396
5397
5398
5399 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5400 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5401 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5402 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5403 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5404 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5405 return true;
5406
5407 return false;
5408}
5409
5410static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5411{
5412 int rc;
5413
5414 rc = asm_safe("fwait");
5415
5416 if (unlikely(rc != X86EMUL_CONTINUE))
5417 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5418
5419 return X86EMUL_CONTINUE;
5420}
5421
5422static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5423 struct operand *op)
5424{
5425 if (op->type == OP_MM)
5426 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5427}
5428
5429static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5430{
5431 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5432
5433 if (!(ctxt->d & ByteOp))
5434 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5435
5436 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5437 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5438 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5439 : "c"(ctxt->src2.val));
5440
5441 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5442 if (!fop)
5443 return emulate_de(ctxt);
5444 return X86EMUL_CONTINUE;
5445}
5446
5447void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5448{
5449 memset(&ctxt->rip_relative, 0,
5450 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5451
5452 ctxt->io_read.pos = 0;
5453 ctxt->io_read.end = 0;
5454 ctxt->mem_read.end = 0;
5455}
5456
5457int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5458{
5459 const struct x86_emulate_ops *ops = ctxt->ops;
5460 int rc = X86EMUL_CONTINUE;
5461 int saved_dst_type = ctxt->dst.type;
5462 unsigned emul_flags;
5463
5464 ctxt->mem_read.pos = 0;
5465
5466
5467 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5468 rc = emulate_ud(ctxt);
5469 goto done;
5470 }
5471
5472 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5473 rc = emulate_ud(ctxt);
5474 goto done;
5475 }
5476
5477 emul_flags = ctxt->ops->get_hflags(ctxt);
5478 if (unlikely(ctxt->d &
5479 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5480 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5481 (ctxt->d & Undefined)) {
5482 rc = emulate_ud(ctxt);
5483 goto done;
5484 }
5485
5486 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5487 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5488 rc = emulate_ud(ctxt);
5489 goto done;
5490 }
5491
5492 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5493 rc = emulate_nm(ctxt);
5494 goto done;
5495 }
5496
5497 if (ctxt->d & Mmx) {
5498 rc = flush_pending_x87_faults(ctxt);
5499 if (rc != X86EMUL_CONTINUE)
5500 goto done;
5501
5502
5503
5504
5505 fetch_possible_mmx_operand(ctxt, &ctxt->src);
5506 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5507 if (!(ctxt->d & Mov))
5508 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5509 }
5510
5511 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5512 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5513 X86_ICPT_PRE_EXCEPT);
5514 if (rc != X86EMUL_CONTINUE)
5515 goto done;
5516 }
5517
5518
5519 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5520 rc = emulate_ud(ctxt);
5521 goto done;
5522 }
5523
5524
5525 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5526 if (ctxt->d & PrivUD)
5527 rc = emulate_ud(ctxt);
5528 else
5529 rc = emulate_gp(ctxt, 0);
5530 goto done;
5531 }
5532
5533
5534 if (ctxt->d & CheckPerm) {
5535 rc = ctxt->check_perm(ctxt);
5536 if (rc != X86EMUL_CONTINUE)
5537 goto done;
5538 }
5539
5540 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5541 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5542 X86_ICPT_POST_EXCEPT);
5543 if (rc != X86EMUL_CONTINUE)
5544 goto done;
5545 }
5546
5547 if (ctxt->rep_prefix && (ctxt->d & String)) {
5548
5549 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5550 string_registers_quirk(ctxt);
5551 ctxt->eip = ctxt->_eip;
5552 ctxt->eflags &= ~X86_EFLAGS_RF;
5553 goto done;
5554 }
5555 }
5556 }
5557
5558 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5559 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5560 ctxt->src.valptr, ctxt->src.bytes);
5561 if (rc != X86EMUL_CONTINUE)
5562 goto done;
5563 ctxt->src.orig_val64 = ctxt->src.val64;
5564 }
5565
5566 if (ctxt->src2.type == OP_MEM) {
5567 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5568 &ctxt->src2.val, ctxt->src2.bytes);
5569 if (rc != X86EMUL_CONTINUE)
5570 goto done;
5571 }
5572
5573 if ((ctxt->d & DstMask) == ImplicitOps)
5574 goto special_insn;
5575
5576
5577 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5578
5579 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5580 &ctxt->dst.val, ctxt->dst.bytes);
5581 if (rc != X86EMUL_CONTINUE) {
5582 if (!(ctxt->d & NoWrite) &&
5583 rc == X86EMUL_PROPAGATE_FAULT &&
5584 ctxt->exception.vector == PF_VECTOR)
5585 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5586 goto done;
5587 }
5588 }
5589
5590 ctxt->dst.orig_val64 = ctxt->dst.val64;
5591
5592special_insn:
5593
5594 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5595 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5596 X86_ICPT_POST_MEMACCESS);
5597 if (rc != X86EMUL_CONTINUE)
5598 goto done;
5599 }
5600
5601 if (ctxt->rep_prefix && (ctxt->d & String))
5602 ctxt->eflags |= X86_EFLAGS_RF;
5603 else
5604 ctxt->eflags &= ~X86_EFLAGS_RF;
5605
5606 if (ctxt->execute) {
5607 if (ctxt->d & Fastop) {
5608 void (*fop)(struct fastop *) = (void *)ctxt->execute;
5609 rc = fastop(ctxt, fop);
5610 if (rc != X86EMUL_CONTINUE)
5611 goto done;
5612 goto writeback;
5613 }
5614 rc = ctxt->execute(ctxt);
5615 if (rc != X86EMUL_CONTINUE)
5616 goto done;
5617 goto writeback;
5618 }
5619
5620 if (ctxt->opcode_len == 2)
5621 goto twobyte_insn;
5622 else if (ctxt->opcode_len == 3)
5623 goto threebyte_insn;
5624
5625 switch (ctxt->b) {
5626 case 0x70 ... 0x7f:
5627 if (test_cc(ctxt->b, ctxt->eflags))
5628 rc = jmp_rel(ctxt, ctxt->src.val);
5629 break;
5630 case 0x8d:
5631 ctxt->dst.val = ctxt->src.addr.mem.ea;
5632 break;
5633 case 0x90 ... 0x97:
5634 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5635 ctxt->dst.type = OP_NONE;
5636 else
5637 rc = em_xchg(ctxt);
5638 break;
5639 case 0x98:
5640 switch (ctxt->op_bytes) {
5641 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5642 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5643 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5644 }
5645 break;
5646 case 0xcc:
5647 rc = emulate_int(ctxt, 3);
5648 break;
5649 case 0xcd:
5650 rc = emulate_int(ctxt, ctxt->src.val);
5651 break;
5652 case 0xce:
5653 if (ctxt->eflags & X86_EFLAGS_OF)
5654 rc = emulate_int(ctxt, 4);
5655 break;
5656 case 0xe9:
5657 case 0xeb:
5658 rc = jmp_rel(ctxt, ctxt->src.val);
5659 ctxt->dst.type = OP_NONE;
5660 break;
5661 case 0xf4:
5662 ctxt->ops->halt(ctxt);
5663 break;
5664 case 0xf5:
5665
5666 ctxt->eflags ^= X86_EFLAGS_CF;
5667 break;
5668 case 0xf8:
5669 ctxt->eflags &= ~X86_EFLAGS_CF;
5670 break;
5671 case 0xf9:
5672 ctxt->eflags |= X86_EFLAGS_CF;
5673 break;
5674 case 0xfc:
5675 ctxt->eflags &= ~X86_EFLAGS_DF;
5676 break;
5677 case 0xfd:
5678 ctxt->eflags |= X86_EFLAGS_DF;
5679 break;
5680 default:
5681 goto cannot_emulate;
5682 }
5683
5684 if (rc != X86EMUL_CONTINUE)
5685 goto done;
5686
5687writeback:
5688 if (ctxt->d & SrcWrite) {
5689 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5690 rc = writeback(ctxt, &ctxt->src);
5691 if (rc != X86EMUL_CONTINUE)
5692 goto done;
5693 }
5694 if (!(ctxt->d & NoWrite)) {
5695 rc = writeback(ctxt, &ctxt->dst);
5696 if (rc != X86EMUL_CONTINUE)
5697 goto done;
5698 }
5699
5700
5701
5702
5703
5704 ctxt->dst.type = saved_dst_type;
5705
5706 if ((ctxt->d & SrcMask) == SrcSI)
5707 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5708
5709 if ((ctxt->d & DstMask) == DstDI)
5710 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5711
5712 if (ctxt->rep_prefix && (ctxt->d & String)) {
5713 unsigned int count;
5714 struct read_cache *r = &ctxt->io_read;
5715 if ((ctxt->d & SrcMask) == SrcSI)
5716 count = ctxt->src.count;
5717 else
5718 count = ctxt->dst.count;
5719 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5720
5721 if (!string_insn_completed(ctxt)) {
5722
5723
5724
5725
5726 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5727 (r->end == 0 || r->end != r->pos)) {
5728
5729
5730
5731
5732
5733 ctxt->mem_read.end = 0;
5734 writeback_registers(ctxt);
5735 return EMULATION_RESTART;
5736 }
5737 goto done;
5738 }
5739 ctxt->eflags &= ~X86_EFLAGS_RF;
5740 }
5741
5742 ctxt->eip = ctxt->_eip;
5743
5744done:
5745 if (rc == X86EMUL_PROPAGATE_FAULT) {
5746 WARN_ON(ctxt->exception.vector > 0x1f);
5747 ctxt->have_exception = true;
5748 }
5749 if (rc == X86EMUL_INTERCEPTED)
5750 return EMULATION_INTERCEPTED;
5751
5752 if (rc == X86EMUL_CONTINUE)
5753 writeback_registers(ctxt);
5754
5755 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5756
5757twobyte_insn:
5758 switch (ctxt->b) {
5759 case 0x09:
5760 (ctxt->ops->wbinvd)(ctxt);
5761 break;
5762 case 0x08:
5763 case 0x0d:
5764 case 0x18:
5765 case 0x1f:
5766 break;
5767 case 0x20:
5768 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5769 break;
5770 case 0x21:
5771 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5772 break;
5773 case 0x40 ... 0x4f:
5774 if (test_cc(ctxt->b, ctxt->eflags))
5775 ctxt->dst.val = ctxt->src.val;
5776 else if (ctxt->op_bytes != 4)
5777 ctxt->dst.type = OP_NONE;
5778 break;
5779 case 0x80 ... 0x8f:
5780 if (test_cc(ctxt->b, ctxt->eflags))
5781 rc = jmp_rel(ctxt, ctxt->src.val);
5782 break;
5783 case 0x90 ... 0x9f:
5784 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5785 break;
5786 case 0xb6 ... 0xb7:
5787 ctxt->dst.bytes = ctxt->op_bytes;
5788 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5789 : (u16) ctxt->src.val;
5790 break;
5791 case 0xbe ... 0xbf:
5792 ctxt->dst.bytes = ctxt->op_bytes;
5793 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5794 (s16) ctxt->src.val;
5795 break;
5796 default:
5797 goto cannot_emulate;
5798 }
5799
5800threebyte_insn:
5801
5802 if (rc != X86EMUL_CONTINUE)
5803 goto done;
5804
5805 goto writeback;
5806
5807cannot_emulate:
5808 return EMULATION_FAILED;
5809}
5810
5811void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5812{
5813 invalidate_registers(ctxt);
5814}
5815
5816void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5817{
5818 writeback_registers(ctxt);
5819}
5820
5821bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5822{
5823 if (ctxt->rep_prefix && (ctxt->d & String))
5824 return false;
5825
5826 if (ctxt->d & TwoMemOp)
5827 return false;
5828
5829 return true;
5830}
5831