1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/kvm_host.h>
22#include "kvm_cache_regs.h"
23#include <asm/kvm_emulate.h>
24#include <linux/stringify.h>
25#include <asm/debugreg.h>
26#include <asm/nospec-branch.h>
27
28#include "x86.h"
29#include "tss.h"
30#include "mmu.h"
31#include "pmu.h"
32
33
34
35
36#define OpNone 0ull
37#define OpImplicit 1ull
38#define OpReg 2ull
39#define OpMem 3ull
40#define OpAcc 4ull
41#define OpDI 5ull
42#define OpMem64 6ull
43#define OpImmUByte 7ull
44#define OpDX 8ull
45#define OpCL 9ull
46#define OpImmByte 10ull
47#define OpOne 11ull
48#define OpImm 12ull
49#define OpMem16 13ull
50#define OpMem32 14ull
51#define OpImmU 15ull
52#define OpSI 16ull
53#define OpImmFAddr 17ull
54#define OpMemFAddr 18ull
55#define OpImmU16 19ull
56#define OpES 20ull
57#define OpCS 21ull
58#define OpSS 22ull
59#define OpDS 23ull
60#define OpFS 24ull
61#define OpGS 25ull
62#define OpMem8 26ull
63#define OpImm64 27ull
64#define OpXLat 28ull
65#define OpAccLo 29ull
66#define OpAccHi 30ull
67
68#define OpBits 5
69#define OpMask ((1ull << OpBits) - 1)
70
71
72
73
74
75
76
77
78
79
80
81#define ByteOp (1<<0)
82
83#define DstShift 1
84#define ImplicitOps (OpImplicit << DstShift)
85#define DstReg (OpReg << DstShift)
86#define DstMem (OpMem << DstShift)
87#define DstAcc (OpAcc << DstShift)
88#define DstDI (OpDI << DstShift)
89#define DstMem64 (OpMem64 << DstShift)
90#define DstMem16 (OpMem16 << DstShift)
91#define DstImmUByte (OpImmUByte << DstShift)
92#define DstDX (OpDX << DstShift)
93#define DstAccLo (OpAccLo << DstShift)
94#define DstMask (OpMask << DstShift)
95
96#define SrcShift 6
97#define SrcNone (OpNone << SrcShift)
98#define SrcReg (OpReg << SrcShift)
99#define SrcMem (OpMem << SrcShift)
100#define SrcMem16 (OpMem16 << SrcShift)
101#define SrcMem32 (OpMem32 << SrcShift)
102#define SrcImm (OpImm << SrcShift)
103#define SrcImmByte (OpImmByte << SrcShift)
104#define SrcOne (OpOne << SrcShift)
105#define SrcImmUByte (OpImmUByte << SrcShift)
106#define SrcImmU (OpImmU << SrcShift)
107#define SrcSI (OpSI << SrcShift)
108#define SrcXLat (OpXLat << SrcShift)
109#define SrcImmFAddr (OpImmFAddr << SrcShift)
110#define SrcMemFAddr (OpMemFAddr << SrcShift)
111#define SrcAcc (OpAcc << SrcShift)
112#define SrcImmU16 (OpImmU16 << SrcShift)
113#define SrcImm64 (OpImm64 << SrcShift)
114#define SrcDX (OpDX << SrcShift)
115#define SrcMem8 (OpMem8 << SrcShift)
116#define SrcAccHi (OpAccHi << SrcShift)
117#define SrcMask (OpMask << SrcShift)
118#define BitOp (1<<11)
119#define MemAbs (1<<12)
120#define String (1<<13)
121#define Stack (1<<14)
122#define GroupMask (7<<15)
123#define Group (1<<15)
124#define GroupDual (2<<15)
125#define Prefix (3<<15)
126#define RMExt (4<<15)
127#define Escape (5<<15)
128#define InstrDual (6<<15)
129#define ModeDual (7<<15)
130#define Sse (1<<18)
131
132#define ModRM (1<<19)
133
134#define Mov (1<<20)
135
136#define Prot (1<<21)
137#define EmulateOnUD (1<<22)
138#define NoAccess (1<<23)
139#define Op3264 (1<<24)
140#define Undefined (1<<25)
141#define Lock (1<<26)
142#define Priv (1<<27)
143#define No64 (1<<28)
144#define PageTable (1 << 29)
145#define NotImpl (1 << 30)
146
147#define Src2Shift (31)
148#define Src2None (OpNone << Src2Shift)
149#define Src2Mem (OpMem << Src2Shift)
150#define Src2CL (OpCL << Src2Shift)
151#define Src2ImmByte (OpImmByte << Src2Shift)
152#define Src2One (OpOne << Src2Shift)
153#define Src2Imm (OpImm << Src2Shift)
154#define Src2ES (OpES << Src2Shift)
155#define Src2CS (OpCS << Src2Shift)
156#define Src2SS (OpSS << Src2Shift)
157#define Src2DS (OpDS << Src2Shift)
158#define Src2FS (OpFS << Src2Shift)
159#define Src2GS (OpGS << Src2Shift)
160#define Src2Mask (OpMask << Src2Shift)
161#define Mmx ((u64)1 << 40)
162#define AlignMask ((u64)7 << 41)
163#define Aligned ((u64)1 << 41)
164#define Unaligned ((u64)2 << 41)
165#define Avx ((u64)3 << 41)
166#define Aligned16 ((u64)4 << 41)
167#define Fastop ((u64)1 << 44)
168#define NoWrite ((u64)1 << 45)
169#define SrcWrite ((u64)1 << 46)
170#define NoMod ((u64)1 << 47)
171#define Intercept ((u64)1 << 48)
172#define CheckPerm ((u64)1 << 49)
173#define PrivUD ((u64)1 << 51)
174#define NearBranch ((u64)1 << 52)
175#define No16 ((u64)1 << 53)
176#define IncSP ((u64)1 << 54)
177#define TwoMemOp ((u64)1 << 55)
178
179#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
180
181#define X2(x...) x, x
182#define X3(x...) X2(x), x
183#define X4(x...) X2(x), X2(x)
184#define X5(x...) X4(x), x
185#define X6(x...) X4(x), X2(x)
186#define X7(x...) X4(x), X3(x)
187#define X8(x...) X4(x), X4(x)
188#define X16(x...) X8(x), X8(x)
189
190#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
191#define FASTOP_SIZE 8
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210struct fastop;
211
212struct opcode {
213 u64 flags : 56;
214 u64 intercept : 8;
215 union {
216 int (*execute)(struct x86_emulate_ctxt *ctxt);
217 const struct opcode *group;
218 const struct group_dual *gdual;
219 const struct gprefix *gprefix;
220 const struct escape *esc;
221 const struct instr_dual *idual;
222 const struct mode_dual *mdual;
223 void (*fastop)(struct fastop *fake);
224 } u;
225 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
226};
227
228struct group_dual {
229 struct opcode mod012[8];
230 struct opcode mod3[8];
231};
232
233struct gprefix {
234 struct opcode pfx_no;
235 struct opcode pfx_66;
236 struct opcode pfx_f2;
237 struct opcode pfx_f3;
238};
239
240struct escape {
241 struct opcode op[8];
242 struct opcode high[64];
243};
244
245struct instr_dual {
246 struct opcode mod012;
247 struct opcode mod3;
248};
249
250struct mode_dual {
251 struct opcode mode32;
252 struct opcode mode64;
253};
254
255#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
256
257enum x86_transfer_type {
258 X86_TRANSFER_NONE,
259 X86_TRANSFER_CALL_JMP,
260 X86_TRANSFER_RET,
261 X86_TRANSFER_TASK_SWITCH,
262};
263
264static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
265{
266 if (!(ctxt->regs_valid & (1 << nr))) {
267 ctxt->regs_valid |= 1 << nr;
268 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
269 }
270 return ctxt->_regs[nr];
271}
272
273static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
274{
275 ctxt->regs_valid |= 1 << nr;
276 ctxt->regs_dirty |= 1 << nr;
277 return &ctxt->_regs[nr];
278}
279
280static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
281{
282 reg_read(ctxt, nr);
283 return reg_write(ctxt, nr);
284}
285
286static void writeback_registers(struct x86_emulate_ctxt *ctxt)
287{
288 unsigned reg;
289
290 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
291 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
292}
293
294static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
295{
296 ctxt->regs_dirty = 0;
297 ctxt->regs_valid = 0;
298}
299
300
301
302
303
304#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
305 X86_EFLAGS_PF|X86_EFLAGS_CF)
306
307#ifdef CONFIG_X86_64
308#define ON64(x) x
309#else
310#define ON64(x)
311#endif
312
313static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
314
315#define __FOP_FUNC(name) \
316 ".align " __stringify(FASTOP_SIZE) " \n\t" \
317 ".type " name ", @function \n\t" \
318 name ":\n\t"
319
320#define FOP_FUNC(name) \
321 __FOP_FUNC(#name)
322
323#define __FOP_RET(name) \
324 "ret \n\t" \
325 ".size " name ", .-" name "\n\t"
326
327#define FOP_RET(name) \
328 __FOP_RET(#name)
329
330#define FOP_START(op) \
331 extern void em_##op(struct fastop *fake); \
332 asm(".pushsection .text, \"ax\" \n\t" \
333 ".global em_" #op " \n\t" \
334 ".align " __stringify(FASTOP_SIZE) " \n\t" \
335 "em_" #op ":\n\t"
336
337#define FOP_END \
338 ".popsection")
339
340#define __FOPNOP(name) \
341 __FOP_FUNC(name) \
342 __FOP_RET(name)
343
344#define FOPNOP() \
345 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
346
347#define FOP1E(op, dst) \
348 __FOP_FUNC(#op "_" #dst) \
349 "10: " #op " %" #dst " \n\t" \
350 __FOP_RET(#op "_" #dst)
351
352#define FOP1EEX(op, dst) \
353 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
354
355#define FASTOP1(op) \
356 FOP_START(op) \
357 FOP1E(op##b, al) \
358 FOP1E(op##w, ax) \
359 FOP1E(op##l, eax) \
360 ON64(FOP1E(op##q, rax)) \
361 FOP_END
362
363
364#define FASTOP1SRC2(op, name) \
365 FOP_START(name) \
366 FOP1E(op, cl) \
367 FOP1E(op, cx) \
368 FOP1E(op, ecx) \
369 ON64(FOP1E(op, rcx)) \
370 FOP_END
371
372
373#define FASTOP1SRC2EX(op, name) \
374 FOP_START(name) \
375 FOP1EEX(op, cl) \
376 FOP1EEX(op, cx) \
377 FOP1EEX(op, ecx) \
378 ON64(FOP1EEX(op, rcx)) \
379 FOP_END
380
381#define FOP2E(op, dst, src) \
382 __FOP_FUNC(#op "_" #dst "_" #src) \
383 #op " %" #src ", %" #dst " \n\t" \
384 __FOP_RET(#op "_" #dst "_" #src)
385
386#define FASTOP2(op) \
387 FOP_START(op) \
388 FOP2E(op##b, al, dl) \
389 FOP2E(op##w, ax, dx) \
390 FOP2E(op##l, eax, edx) \
391 ON64(FOP2E(op##q, rax, rdx)) \
392 FOP_END
393
394
395#define FASTOP2W(op) \
396 FOP_START(op) \
397 FOPNOP() \
398 FOP2E(op##w, ax, dx) \
399 FOP2E(op##l, eax, edx) \
400 ON64(FOP2E(op##q, rax, rdx)) \
401 FOP_END
402
403
404#define FASTOP2CL(op) \
405 FOP_START(op) \
406 FOP2E(op##b, al, cl) \
407 FOP2E(op##w, ax, cl) \
408 FOP2E(op##l, eax, cl) \
409 ON64(FOP2E(op##q, rax, cl)) \
410 FOP_END
411
412
413#define FASTOP2R(op, name) \
414 FOP_START(name) \
415 FOP2E(op##b, dl, al) \
416 FOP2E(op##w, dx, ax) \
417 FOP2E(op##l, edx, eax) \
418 ON64(FOP2E(op##q, rdx, rax)) \
419 FOP_END
420
421#define FOP3E(op, dst, src, src2) \
422 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
423 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
424 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
425
426
427#define FASTOP3WCL(op) \
428 FOP_START(op) \
429 FOPNOP() \
430 FOP3E(op##w, ax, dx, cl) \
431 FOP3E(op##l, eax, edx, cl) \
432 ON64(FOP3E(op##q, rax, rdx, cl)) \
433 FOP_END
434
435
436#define FOP_SETCC(op) \
437 ".align 4 \n\t" \
438 ".type " #op ", @function \n\t" \
439 #op ": \n\t" \
440 #op " %al \n\t" \
441 __FOP_RET(#op)
442
443asm(".pushsection .fixup, \"ax\"\n"
444 ".global kvm_fastop_exception \n"
445 "kvm_fastop_exception: xor %esi, %esi; ret\n"
446 ".popsection");
447
448FOP_START(setcc)
449FOP_SETCC(seto)
450FOP_SETCC(setno)
451FOP_SETCC(setc)
452FOP_SETCC(setnc)
453FOP_SETCC(setz)
454FOP_SETCC(setnz)
455FOP_SETCC(setbe)
456FOP_SETCC(setnbe)
457FOP_SETCC(sets)
458FOP_SETCC(setns)
459FOP_SETCC(setp)
460FOP_SETCC(setnp)
461FOP_SETCC(setl)
462FOP_SETCC(setnl)
463FOP_SETCC(setle)
464FOP_SETCC(setnle)
465FOP_END;
466
467FOP_START(salc)
468FOP_FUNC(salc)
469"pushf; sbb %al, %al; popf \n\t"
470FOP_RET(salc)
471FOP_END;
472
473
474
475
476
477#define asm_safe(insn, inoutclob...) \
478({ \
479 int _fault = 0; \
480 \
481 asm volatile("1:" insn "\n" \
482 "2:\n" \
483 ".pushsection .fixup, \"ax\"\n" \
484 "3: movl $1, %[_fault]\n" \
485 " jmp 2b\n" \
486 ".popsection\n" \
487 _ASM_EXTABLE(1b, 3b) \
488 : [_fault] "+qm"(_fault) inoutclob ); \
489 \
490 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
491})
492
493static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
494 enum x86_intercept intercept,
495 enum x86_intercept_stage stage)
496{
497 struct x86_instruction_info info = {
498 .intercept = intercept,
499 .rep_prefix = ctxt->rep_prefix,
500 .modrm_mod = ctxt->modrm_mod,
501 .modrm_reg = ctxt->modrm_reg,
502 .modrm_rm = ctxt->modrm_rm,
503 .src_val = ctxt->src.val64,
504 .dst_val = ctxt->dst.val64,
505 .src_bytes = ctxt->src.bytes,
506 .dst_bytes = ctxt->dst.bytes,
507 .ad_bytes = ctxt->ad_bytes,
508 .next_rip = ctxt->eip,
509 };
510
511 return ctxt->ops->intercept(ctxt, &info, stage);
512}
513
514static void assign_masked(ulong *dest, ulong src, ulong mask)
515{
516 *dest = (*dest & ~mask) | (src & mask);
517}
518
519static void assign_register(unsigned long *reg, u64 val, int bytes)
520{
521
522 switch (bytes) {
523 case 1:
524 *(u8 *)reg = (u8)val;
525 break;
526 case 2:
527 *(u16 *)reg = (u16)val;
528 break;
529 case 4:
530 *reg = (u32)val;
531 break;
532 case 8:
533 *reg = val;
534 break;
535 }
536}
537
538static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
539{
540 return (1UL << (ctxt->ad_bytes << 3)) - 1;
541}
542
543static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
544{
545 u16 sel;
546 struct desc_struct ss;
547
548 if (ctxt->mode == X86EMUL_MODE_PROT64)
549 return ~0UL;
550 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
551 return ~0U >> ((ss.d ^ 1) * 16);
552}
553
554static int stack_size(struct x86_emulate_ctxt *ctxt)
555{
556 return (__fls(stack_mask(ctxt)) + 1) >> 3;
557}
558
559
560static inline unsigned long
561address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
562{
563 if (ctxt->ad_bytes == sizeof(unsigned long))
564 return reg;
565 else
566 return reg & ad_mask(ctxt);
567}
568
569static inline unsigned long
570register_address(struct x86_emulate_ctxt *ctxt, int reg)
571{
572 return address_mask(ctxt, reg_read(ctxt, reg));
573}
574
575static void masked_increment(ulong *reg, ulong mask, int inc)
576{
577 assign_masked(reg, *reg + inc, mask);
578}
579
580static inline void
581register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
582{
583 ulong *preg = reg_rmw(ctxt, reg);
584
585 assign_register(preg, *preg + inc, ctxt->ad_bytes);
586}
587
588static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
589{
590 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
591}
592
593static u32 desc_limit_scaled(struct desc_struct *desc)
594{
595 u32 limit = get_desc_limit(desc);
596
597 return desc->g ? (limit << 12) | 0xfff : limit;
598}
599
600static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
601{
602 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
603 return 0;
604
605 return ctxt->ops->get_cached_segment_base(ctxt, seg);
606}
607
608static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
609 u32 error, bool valid)
610{
611 WARN_ON(vec > 0x1f);
612 ctxt->exception.vector = vec;
613 ctxt->exception.error_code = error;
614 ctxt->exception.error_code_valid = valid;
615 return X86EMUL_PROPAGATE_FAULT;
616}
617
618static int emulate_db(struct x86_emulate_ctxt *ctxt)
619{
620 return emulate_exception(ctxt, DB_VECTOR, 0, false);
621}
622
623static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
624{
625 return emulate_exception(ctxt, GP_VECTOR, err, true);
626}
627
628static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
629{
630 return emulate_exception(ctxt, SS_VECTOR, err, true);
631}
632
633static int emulate_ud(struct x86_emulate_ctxt *ctxt)
634{
635 return emulate_exception(ctxt, UD_VECTOR, 0, false);
636}
637
638static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
639{
640 return emulate_exception(ctxt, TS_VECTOR, err, true);
641}
642
643static int emulate_de(struct x86_emulate_ctxt *ctxt)
644{
645 return emulate_exception(ctxt, DE_VECTOR, 0, false);
646}
647
648static int emulate_nm(struct x86_emulate_ctxt *ctxt)
649{
650 return emulate_exception(ctxt, NM_VECTOR, 0, false);
651}
652
653static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
654{
655 u16 selector;
656 struct desc_struct desc;
657
658 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
659 return selector;
660}
661
662static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
663 unsigned seg)
664{
665 u16 dummy;
666 u32 base3;
667 struct desc_struct desc;
668
669 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
670 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
671}
672
673
674
675
676
677
678
679
680
681
682static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
683{
684 u64 alignment = ctxt->d & AlignMask;
685
686 if (likely(size < 16))
687 return 1;
688
689 switch (alignment) {
690 case Unaligned:
691 case Avx:
692 return 1;
693 case Aligned16:
694 return 16;
695 case Aligned:
696 default:
697 return size;
698 }
699}
700
701static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
702 struct segmented_address addr,
703 unsigned *max_size, unsigned size,
704 bool write, bool fetch,
705 enum x86emul_mode mode, ulong *linear)
706{
707 struct desc_struct desc;
708 bool usable;
709 ulong la;
710 u32 lim;
711 u16 sel;
712 u8 va_bits;
713
714 la = seg_base(ctxt, addr.seg) + addr.ea;
715 *max_size = 0;
716 switch (mode) {
717 case X86EMUL_MODE_PROT64:
718 *linear = la;
719 va_bits = ctxt_virt_addr_bits(ctxt);
720 if (get_canonical(la, va_bits) != la)
721 goto bad;
722
723 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
724 if (size > *max_size)
725 goto bad;
726 break;
727 default:
728 *linear = la = (u32)la;
729 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
730 addr.seg);
731 if (!usable)
732 goto bad;
733
734 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
735 || !(desc.type & 2)) && write)
736 goto bad;
737
738 if (!fetch && (desc.type & 8) && !(desc.type & 2))
739 goto bad;
740 lim = desc_limit_scaled(&desc);
741 if (!(desc.type & 8) && (desc.type & 4)) {
742
743 if (addr.ea <= lim)
744 goto bad;
745 lim = desc.d ? 0xffffffff : 0xffff;
746 }
747 if (addr.ea > lim)
748 goto bad;
749 if (lim == 0xffffffff)
750 *max_size = ~0u;
751 else {
752 *max_size = (u64)lim + 1 - addr.ea;
753 if (size > *max_size)
754 goto bad;
755 }
756 break;
757 }
758 if (la & (insn_alignment(ctxt, size) - 1))
759 return emulate_gp(ctxt, 0);
760 return X86EMUL_CONTINUE;
761bad:
762 if (addr.seg == VCPU_SREG_SS)
763 return emulate_ss(ctxt, 0);
764 else
765 return emulate_gp(ctxt, 0);
766}
767
768static int linearize(struct x86_emulate_ctxt *ctxt,
769 struct segmented_address addr,
770 unsigned size, bool write,
771 ulong *linear)
772{
773 unsigned max_size;
774 return __linearize(ctxt, addr, &max_size, size, write, false,
775 ctxt->mode, linear);
776}
777
778static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
779 enum x86emul_mode mode)
780{
781 ulong linear;
782 int rc;
783 unsigned max_size;
784 struct segmented_address addr = { .seg = VCPU_SREG_CS,
785 .ea = dst };
786
787 if (ctxt->op_bytes != sizeof(unsigned long))
788 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
789 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
790 if (rc == X86EMUL_CONTINUE)
791 ctxt->_eip = addr.ea;
792 return rc;
793}
794
795static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
796{
797 return assign_eip(ctxt, dst, ctxt->mode);
798}
799
800static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
801 const struct desc_struct *cs_desc)
802{
803 enum x86emul_mode mode = ctxt->mode;
804 int rc;
805
806#ifdef CONFIG_X86_64
807 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
808 if (cs_desc->l) {
809 u64 efer = 0;
810
811 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
812 if (efer & EFER_LMA)
813 mode = X86EMUL_MODE_PROT64;
814 } else
815 mode = X86EMUL_MODE_PROT32;
816 }
817#endif
818 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
819 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
820 rc = assign_eip(ctxt, dst, mode);
821 if (rc == X86EMUL_CONTINUE)
822 ctxt->mode = mode;
823 return rc;
824}
825
826static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
827{
828 return assign_eip_near(ctxt, ctxt->_eip + rel);
829}
830
831static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
832 void *data, unsigned size)
833{
834 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
835}
836
837static int linear_write_system(struct x86_emulate_ctxt *ctxt,
838 ulong linear, void *data,
839 unsigned int size)
840{
841 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
842}
843
844static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
845 struct segmented_address addr,
846 void *data,
847 unsigned size)
848{
849 int rc;
850 ulong linear;
851
852 rc = linearize(ctxt, addr, size, false, &linear);
853 if (rc != X86EMUL_CONTINUE)
854 return rc;
855 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
856}
857
858static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
859 struct segmented_address addr,
860 void *data,
861 unsigned int size)
862{
863 int rc;
864 ulong linear;
865
866 rc = linearize(ctxt, addr, size, true, &linear);
867 if (rc != X86EMUL_CONTINUE)
868 return rc;
869 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
870}
871
872
873
874
875
876static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
877{
878 int rc;
879 unsigned size, max_size;
880 unsigned long linear;
881 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
882 struct segmented_address addr = { .seg = VCPU_SREG_CS,
883 .ea = ctxt->eip + cur_size };
884
885
886
887
888
889
890
891
892
893
894
895 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
896 &linear);
897 if (unlikely(rc != X86EMUL_CONTINUE))
898 return rc;
899
900 size = min_t(unsigned, 15UL ^ cur_size, max_size);
901 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
902
903
904
905
906
907
908
909 if (unlikely(size < op_size))
910 return emulate_gp(ctxt, 0);
911
912 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
913 size, &ctxt->exception);
914 if (unlikely(rc != X86EMUL_CONTINUE))
915 return rc;
916 ctxt->fetch.end += size;
917 return X86EMUL_CONTINUE;
918}
919
920static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
921 unsigned size)
922{
923 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
924
925 if (unlikely(done_size < size))
926 return __do_insn_fetch_bytes(ctxt, size - done_size);
927 else
928 return X86EMUL_CONTINUE;
929}
930
931
932#define insn_fetch(_type, _ctxt) \
933({ _type _x; \
934 \
935 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
936 if (rc != X86EMUL_CONTINUE) \
937 goto done; \
938 ctxt->_eip += sizeof(_type); \
939 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
940 ctxt->fetch.ptr += sizeof(_type); \
941 _x; \
942})
943
944#define insn_fetch_arr(_arr, _size, _ctxt) \
945({ \
946 rc = do_insn_fetch_bytes(_ctxt, _size); \
947 if (rc != X86EMUL_CONTINUE) \
948 goto done; \
949 ctxt->_eip += (_size); \
950 memcpy(_arr, ctxt->fetch.ptr, _size); \
951 ctxt->fetch.ptr += (_size); \
952})
953
954
955
956
957
958
959static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
960 int byteop)
961{
962 void *p;
963 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
964
965 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
966 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
967 else
968 p = reg_rmw(ctxt, modrm_reg);
969 return p;
970}
971
972static int read_descriptor(struct x86_emulate_ctxt *ctxt,
973 struct segmented_address addr,
974 u16 *size, unsigned long *address, int op_bytes)
975{
976 int rc;
977
978 if (op_bytes == 2)
979 op_bytes = 3;
980 *address = 0;
981 rc = segmented_read_std(ctxt, addr, size, 2);
982 if (rc != X86EMUL_CONTINUE)
983 return rc;
984 addr.ea += 2;
985 rc = segmented_read_std(ctxt, addr, address, op_bytes);
986 return rc;
987}
988
989FASTOP2(add);
990FASTOP2(or);
991FASTOP2(adc);
992FASTOP2(sbb);
993FASTOP2(and);
994FASTOP2(sub);
995FASTOP2(xor);
996FASTOP2(cmp);
997FASTOP2(test);
998
999FASTOP1SRC2(mul, mul_ex);
1000FASTOP1SRC2(imul, imul_ex);
1001FASTOP1SRC2EX(div, div_ex);
1002FASTOP1SRC2EX(idiv, idiv_ex);
1003
1004FASTOP3WCL(shld);
1005FASTOP3WCL(shrd);
1006
1007FASTOP2W(imul);
1008
1009FASTOP1(not);
1010FASTOP1(neg);
1011FASTOP1(inc);
1012FASTOP1(dec);
1013
1014FASTOP2CL(rol);
1015FASTOP2CL(ror);
1016FASTOP2CL(rcl);
1017FASTOP2CL(rcr);
1018FASTOP2CL(shl);
1019FASTOP2CL(shr);
1020FASTOP2CL(sar);
1021
1022FASTOP2W(bsf);
1023FASTOP2W(bsr);
1024FASTOP2W(bt);
1025FASTOP2W(bts);
1026FASTOP2W(btr);
1027FASTOP2W(btc);
1028
1029FASTOP2(xadd);
1030
1031FASTOP2R(cmp, cmp_r);
1032
1033static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1034{
1035
1036 if (ctxt->src.val == 0)
1037 ctxt->dst.type = OP_NONE;
1038 return fastop(ctxt, em_bsf);
1039}
1040
1041static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1042{
1043
1044 if (ctxt->src.val == 0)
1045 ctxt->dst.type = OP_NONE;
1046 return fastop(ctxt, em_bsr);
1047}
1048
1049static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1050{
1051 u8 rc;
1052 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1053
1054 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1055 asm("push %[flags]; popf; " CALL_NOSPEC
1056 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1057 return rc;
1058}
1059
1060static void fetch_register_operand(struct operand *op)
1061{
1062 switch (op->bytes) {
1063 case 1:
1064 op->val = *(u8 *)op->addr.reg;
1065 break;
1066 case 2:
1067 op->val = *(u16 *)op->addr.reg;
1068 break;
1069 case 4:
1070 op->val = *(u32 *)op->addr.reg;
1071 break;
1072 case 8:
1073 op->val = *(u64 *)op->addr.reg;
1074 break;
1075 }
1076}
1077
1078static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1079{
1080 switch (reg) {
1081 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1082 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1083 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1084 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1085 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1086 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1087 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1088 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1089#ifdef CONFIG_X86_64
1090 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1091 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1092 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1093 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1094 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1095 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1096 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1097 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1098#endif
1099 default: BUG();
1100 }
1101}
1102
1103static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1104 int reg)
1105{
1106 switch (reg) {
1107 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1108 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1109 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1110 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1111 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1112 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1113 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1114 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1115#ifdef CONFIG_X86_64
1116 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1117 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1118 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1119 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1120 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1121 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1122 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1123 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1124#endif
1125 default: BUG();
1126 }
1127}
1128
1129static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1130{
1131 switch (reg) {
1132 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1133 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1134 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1135 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1136 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1137 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1138 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1139 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1140 default: BUG();
1141 }
1142}
1143
1144static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1145{
1146 switch (reg) {
1147 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1148 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1149 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1150 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1151 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1152 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1153 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1154 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1155 default: BUG();
1156 }
1157}
1158
1159static int em_fninit(struct x86_emulate_ctxt *ctxt)
1160{
1161 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1162 return emulate_nm(ctxt);
1163
1164 asm volatile("fninit");
1165 return X86EMUL_CONTINUE;
1166}
1167
1168static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1169{
1170 u16 fcw;
1171
1172 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1173 return emulate_nm(ctxt);
1174
1175 asm volatile("fnstcw %0": "+m"(fcw));
1176
1177 ctxt->dst.val = fcw;
1178
1179 return X86EMUL_CONTINUE;
1180}
1181
1182static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1183{
1184 u16 fsw;
1185
1186 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1187 return emulate_nm(ctxt);
1188
1189 asm volatile("fnstsw %0": "+m"(fsw));
1190
1191 ctxt->dst.val = fsw;
1192
1193 return X86EMUL_CONTINUE;
1194}
1195
1196static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1197 struct operand *op)
1198{
1199 unsigned reg = ctxt->modrm_reg;
1200
1201 if (!(ctxt->d & ModRM))
1202 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1203
1204 if (ctxt->d & Sse) {
1205 op->type = OP_XMM;
1206 op->bytes = 16;
1207 op->addr.xmm = reg;
1208 read_sse_reg(ctxt, &op->vec_val, reg);
1209 return;
1210 }
1211 if (ctxt->d & Mmx) {
1212 reg &= 7;
1213 op->type = OP_MM;
1214 op->bytes = 8;
1215 op->addr.mm = reg;
1216 return;
1217 }
1218
1219 op->type = OP_REG;
1220 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1221 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1222
1223 fetch_register_operand(op);
1224 op->orig_val = op->val;
1225}
1226
1227static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1228{
1229 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1230 ctxt->modrm_seg = VCPU_SREG_SS;
1231}
1232
1233static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1234 struct operand *op)
1235{
1236 u8 sib;
1237 int index_reg, base_reg, scale;
1238 int rc = X86EMUL_CONTINUE;
1239 ulong modrm_ea = 0;
1240
1241 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8);
1242 index_reg = (ctxt->rex_prefix << 2) & 8;
1243 base_reg = (ctxt->rex_prefix << 3) & 8;
1244
1245 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1246 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1247 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1248 ctxt->modrm_seg = VCPU_SREG_DS;
1249
1250 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1251 op->type = OP_REG;
1252 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1253 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1254 ctxt->d & ByteOp);
1255 if (ctxt->d & Sse) {
1256 op->type = OP_XMM;
1257 op->bytes = 16;
1258 op->addr.xmm = ctxt->modrm_rm;
1259 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1260 return rc;
1261 }
1262 if (ctxt->d & Mmx) {
1263 op->type = OP_MM;
1264 op->bytes = 8;
1265 op->addr.mm = ctxt->modrm_rm & 7;
1266 return rc;
1267 }
1268 fetch_register_operand(op);
1269 return rc;
1270 }
1271
1272 op->type = OP_MEM;
1273
1274 if (ctxt->ad_bytes == 2) {
1275 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1276 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1277 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1278 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1279
1280
1281 switch (ctxt->modrm_mod) {
1282 case 0:
1283 if (ctxt->modrm_rm == 6)
1284 modrm_ea += insn_fetch(u16, ctxt);
1285 break;
1286 case 1:
1287 modrm_ea += insn_fetch(s8, ctxt);
1288 break;
1289 case 2:
1290 modrm_ea += insn_fetch(u16, ctxt);
1291 break;
1292 }
1293 switch (ctxt->modrm_rm) {
1294 case 0:
1295 modrm_ea += bx + si;
1296 break;
1297 case 1:
1298 modrm_ea += bx + di;
1299 break;
1300 case 2:
1301 modrm_ea += bp + si;
1302 break;
1303 case 3:
1304 modrm_ea += bp + di;
1305 break;
1306 case 4:
1307 modrm_ea += si;
1308 break;
1309 case 5:
1310 modrm_ea += di;
1311 break;
1312 case 6:
1313 if (ctxt->modrm_mod != 0)
1314 modrm_ea += bp;
1315 break;
1316 case 7:
1317 modrm_ea += bx;
1318 break;
1319 }
1320 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1321 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1322 ctxt->modrm_seg = VCPU_SREG_SS;
1323 modrm_ea = (u16)modrm_ea;
1324 } else {
1325
1326 if ((ctxt->modrm_rm & 7) == 4) {
1327 sib = insn_fetch(u8, ctxt);
1328 index_reg |= (sib >> 3) & 7;
1329 base_reg |= sib & 7;
1330 scale = sib >> 6;
1331
1332 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1333 modrm_ea += insn_fetch(s32, ctxt);
1334 else {
1335 modrm_ea += reg_read(ctxt, base_reg);
1336 adjust_modrm_seg(ctxt, base_reg);
1337
1338 if ((ctxt->d & IncSP) &&
1339 base_reg == VCPU_REGS_RSP)
1340 modrm_ea += ctxt->op_bytes;
1341 }
1342 if (index_reg != 4)
1343 modrm_ea += reg_read(ctxt, index_reg) << scale;
1344 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1345 modrm_ea += insn_fetch(s32, ctxt);
1346 if (ctxt->mode == X86EMUL_MODE_PROT64)
1347 ctxt->rip_relative = 1;
1348 } else {
1349 base_reg = ctxt->modrm_rm;
1350 modrm_ea += reg_read(ctxt, base_reg);
1351 adjust_modrm_seg(ctxt, base_reg);
1352 }
1353 switch (ctxt->modrm_mod) {
1354 case 1:
1355 modrm_ea += insn_fetch(s8, ctxt);
1356 break;
1357 case 2:
1358 modrm_ea += insn_fetch(s32, ctxt);
1359 break;
1360 }
1361 }
1362 op->addr.mem.ea = modrm_ea;
1363 if (ctxt->ad_bytes != 8)
1364 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1365
1366done:
1367 return rc;
1368}
1369
1370static int decode_abs(struct x86_emulate_ctxt *ctxt,
1371 struct operand *op)
1372{
1373 int rc = X86EMUL_CONTINUE;
1374
1375 op->type = OP_MEM;
1376 switch (ctxt->ad_bytes) {
1377 case 2:
1378 op->addr.mem.ea = insn_fetch(u16, ctxt);
1379 break;
1380 case 4:
1381 op->addr.mem.ea = insn_fetch(u32, ctxt);
1382 break;
1383 case 8:
1384 op->addr.mem.ea = insn_fetch(u64, ctxt);
1385 break;
1386 }
1387done:
1388 return rc;
1389}
1390
1391static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1392{
1393 long sv = 0, mask;
1394
1395 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1396 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1397
1398 if (ctxt->src.bytes == 2)
1399 sv = (s16)ctxt->src.val & (s16)mask;
1400 else if (ctxt->src.bytes == 4)
1401 sv = (s32)ctxt->src.val & (s32)mask;
1402 else
1403 sv = (s64)ctxt->src.val & (s64)mask;
1404
1405 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1406 ctxt->dst.addr.mem.ea + (sv >> 3));
1407 }
1408
1409
1410 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1411}
1412
1413static int read_emulated(struct x86_emulate_ctxt *ctxt,
1414 unsigned long addr, void *dest, unsigned size)
1415{
1416 int rc;
1417 struct read_cache *mc = &ctxt->mem_read;
1418
1419 if (mc->pos < mc->end)
1420 goto read_cached;
1421
1422 WARN_ON((mc->end + size) >= sizeof(mc->data));
1423
1424 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1425 &ctxt->exception);
1426 if (rc != X86EMUL_CONTINUE)
1427 return rc;
1428
1429 mc->end += size;
1430
1431read_cached:
1432 memcpy(dest, mc->data + mc->pos, size);
1433 mc->pos += size;
1434 return X86EMUL_CONTINUE;
1435}
1436
1437static int segmented_read(struct x86_emulate_ctxt *ctxt,
1438 struct segmented_address addr,
1439 void *data,
1440 unsigned size)
1441{
1442 int rc;
1443 ulong linear;
1444
1445 rc = linearize(ctxt, addr, size, false, &linear);
1446 if (rc != X86EMUL_CONTINUE)
1447 return rc;
1448 return read_emulated(ctxt, linear, data, size);
1449}
1450
1451static int segmented_write(struct x86_emulate_ctxt *ctxt,
1452 struct segmented_address addr,
1453 const void *data,
1454 unsigned size)
1455{
1456 int rc;
1457 ulong linear;
1458
1459 rc = linearize(ctxt, addr, size, true, &linear);
1460 if (rc != X86EMUL_CONTINUE)
1461 return rc;
1462 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1463 &ctxt->exception);
1464}
1465
1466static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1467 struct segmented_address addr,
1468 const void *orig_data, const void *data,
1469 unsigned size)
1470{
1471 int rc;
1472 ulong linear;
1473
1474 rc = linearize(ctxt, addr, size, true, &linear);
1475 if (rc != X86EMUL_CONTINUE)
1476 return rc;
1477 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1478 size, &ctxt->exception);
1479}
1480
1481static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1482 unsigned int size, unsigned short port,
1483 void *dest)
1484{
1485 struct read_cache *rc = &ctxt->io_read;
1486
1487 if (rc->pos == rc->end) {
1488 unsigned int in_page, n;
1489 unsigned int count = ctxt->rep_prefix ?
1490 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1491 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1492 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1493 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1494 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1495 if (n == 0)
1496 n = 1;
1497 rc->pos = rc->end = 0;
1498 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1499 return 0;
1500 rc->end = n * size;
1501 }
1502
1503 if (ctxt->rep_prefix && (ctxt->d & String) &&
1504 !(ctxt->eflags & X86_EFLAGS_DF)) {
1505 ctxt->dst.data = rc->data + rc->pos;
1506 ctxt->dst.type = OP_MEM_STR;
1507 ctxt->dst.count = (rc->end - rc->pos) / size;
1508 rc->pos = rc->end;
1509 } else {
1510 memcpy(dest, rc->data + rc->pos, size);
1511 rc->pos += size;
1512 }
1513 return 1;
1514}
1515
1516static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1517 u16 index, struct desc_struct *desc)
1518{
1519 struct desc_ptr dt;
1520 ulong addr;
1521
1522 ctxt->ops->get_idt(ctxt, &dt);
1523
1524 if (dt.size < index * 8 + 7)
1525 return emulate_gp(ctxt, index << 3 | 0x2);
1526
1527 addr = dt.address + index * 8;
1528 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1529}
1530
1531static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1532 u16 selector, struct desc_ptr *dt)
1533{
1534 const struct x86_emulate_ops *ops = ctxt->ops;
1535 u32 base3 = 0;
1536
1537 if (selector & 1 << 2) {
1538 struct desc_struct desc;
1539 u16 sel;
1540
1541 memset(dt, 0, sizeof(*dt));
1542 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1543 VCPU_SREG_LDTR))
1544 return;
1545
1546 dt->size = desc_limit_scaled(&desc);
1547 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1548 } else
1549 ops->get_gdt(ctxt, dt);
1550}
1551
1552static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1553 u16 selector, ulong *desc_addr_p)
1554{
1555 struct desc_ptr dt;
1556 u16 index = selector >> 3;
1557 ulong addr;
1558
1559 get_descriptor_table_ptr(ctxt, selector, &dt);
1560
1561 if (dt.size < index * 8 + 7)
1562 return emulate_gp(ctxt, selector & 0xfffc);
1563
1564 addr = dt.address + index * 8;
1565
1566#ifdef CONFIG_X86_64
1567 if (addr >> 32 != 0) {
1568 u64 efer = 0;
1569
1570 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1571 if (!(efer & EFER_LMA))
1572 addr &= (u32)-1;
1573 }
1574#endif
1575
1576 *desc_addr_p = addr;
1577 return X86EMUL_CONTINUE;
1578}
1579
1580
1581static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1582 u16 selector, struct desc_struct *desc,
1583 ulong *desc_addr_p)
1584{
1585 int rc;
1586
1587 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1588 if (rc != X86EMUL_CONTINUE)
1589 return rc;
1590
1591 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1592}
1593
1594
1595static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1596 u16 selector, struct desc_struct *desc)
1597{
1598 int rc;
1599 ulong addr;
1600
1601 rc = get_descriptor_ptr(ctxt, selector, &addr);
1602 if (rc != X86EMUL_CONTINUE)
1603 return rc;
1604
1605 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1606}
1607
1608static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1609 u16 selector, int seg, u8 cpl,
1610 enum x86_transfer_type transfer,
1611 struct desc_struct *desc)
1612{
1613 struct desc_struct seg_desc, old_desc;
1614 u8 dpl, rpl;
1615 unsigned err_vec = GP_VECTOR;
1616 u32 err_code = 0;
1617 bool null_selector = !(selector & ~0x3);
1618 ulong desc_addr;
1619 int ret;
1620 u16 dummy;
1621 u32 base3 = 0;
1622
1623 memset(&seg_desc, 0, sizeof(seg_desc));
1624
1625 if (ctxt->mode == X86EMUL_MODE_REAL) {
1626
1627
1628 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1629 set_desc_base(&seg_desc, selector << 4);
1630 goto load;
1631 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1632
1633 set_desc_base(&seg_desc, selector << 4);
1634 set_desc_limit(&seg_desc, 0xffff);
1635 seg_desc.type = 3;
1636 seg_desc.p = 1;
1637 seg_desc.s = 1;
1638 seg_desc.dpl = 3;
1639 goto load;
1640 }
1641
1642 rpl = selector & 3;
1643
1644
1645 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1646 goto exception;
1647
1648
1649 if (null_selector) {
1650 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1651 goto exception;
1652
1653 if (seg == VCPU_SREG_SS) {
1654 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1655 goto exception;
1656
1657
1658
1659
1660
1661 seg_desc.type = 3;
1662 seg_desc.p = 1;
1663 seg_desc.s = 1;
1664 seg_desc.dpl = cpl;
1665 seg_desc.d = 1;
1666 seg_desc.g = 1;
1667 }
1668
1669
1670 goto load;
1671 }
1672
1673 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1674 if (ret != X86EMUL_CONTINUE)
1675 return ret;
1676
1677 err_code = selector & 0xfffc;
1678 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1679 GP_VECTOR;
1680
1681
1682 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1683 if (transfer == X86_TRANSFER_CALL_JMP)
1684 return X86EMUL_UNHANDLEABLE;
1685 goto exception;
1686 }
1687
1688 if (!seg_desc.p) {
1689 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1690 goto exception;
1691 }
1692
1693 dpl = seg_desc.dpl;
1694
1695 switch (seg) {
1696 case VCPU_SREG_SS:
1697
1698
1699
1700
1701 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1702 goto exception;
1703 break;
1704 case VCPU_SREG_CS:
1705 if (!(seg_desc.type & 8))
1706 goto exception;
1707
1708 if (seg_desc.type & 4) {
1709
1710 if (dpl > cpl)
1711 goto exception;
1712 } else {
1713
1714 if (rpl > cpl || dpl != cpl)
1715 goto exception;
1716 }
1717
1718 if (seg_desc.d && seg_desc.l) {
1719 u64 efer = 0;
1720
1721 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1722 if (efer & EFER_LMA)
1723 goto exception;
1724 }
1725
1726
1727 selector = (selector & 0xfffc) | cpl;
1728 break;
1729 case VCPU_SREG_TR:
1730 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1731 goto exception;
1732 old_desc = seg_desc;
1733 seg_desc.type |= 2;
1734 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1735 sizeof(seg_desc), &ctxt->exception);
1736 if (ret != X86EMUL_CONTINUE)
1737 return ret;
1738 break;
1739 case VCPU_SREG_LDTR:
1740 if (seg_desc.s || seg_desc.type != 2)
1741 goto exception;
1742 break;
1743 default:
1744
1745
1746
1747
1748
1749 if ((seg_desc.type & 0xa) == 0x8 ||
1750 (((seg_desc.type & 0xc) != 0xc) &&
1751 (rpl > dpl && cpl > dpl)))
1752 goto exception;
1753 break;
1754 }
1755
1756 if (seg_desc.s) {
1757
1758 if (!(seg_desc.type & 1)) {
1759 seg_desc.type |= 1;
1760 ret = write_segment_descriptor(ctxt, selector,
1761 &seg_desc);
1762 if (ret != X86EMUL_CONTINUE)
1763 return ret;
1764 }
1765 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1766 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1767 if (ret != X86EMUL_CONTINUE)
1768 return ret;
1769 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1770 ((u64)base3 << 32), ctxt))
1771 return emulate_gp(ctxt, 0);
1772 }
1773load:
1774 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1775 if (desc)
1776 *desc = seg_desc;
1777 return X86EMUL_CONTINUE;
1778exception:
1779 return emulate_exception(ctxt, err_vec, err_code, true);
1780}
1781
1782static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1783 u16 selector, int seg)
1784{
1785 u8 cpl = ctxt->ops->cpl(ctxt);
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797 if (seg == VCPU_SREG_SS && selector == 3 &&
1798 ctxt->mode == X86EMUL_MODE_PROT64)
1799 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1800
1801 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1802 X86_TRANSFER_NONE, NULL);
1803}
1804
1805static void write_register_operand(struct operand *op)
1806{
1807 return assign_register(op->addr.reg, op->val, op->bytes);
1808}
1809
1810static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1811{
1812 switch (op->type) {
1813 case OP_REG:
1814 write_register_operand(op);
1815 break;
1816 case OP_MEM:
1817 if (ctxt->lock_prefix)
1818 return segmented_cmpxchg(ctxt,
1819 op->addr.mem,
1820 &op->orig_val,
1821 &op->val,
1822 op->bytes);
1823 else
1824 return segmented_write(ctxt,
1825 op->addr.mem,
1826 &op->val,
1827 op->bytes);
1828 break;
1829 case OP_MEM_STR:
1830 return segmented_write(ctxt,
1831 op->addr.mem,
1832 op->data,
1833 op->bytes * op->count);
1834 break;
1835 case OP_XMM:
1836 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1837 break;
1838 case OP_MM:
1839 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1840 break;
1841 case OP_NONE:
1842
1843 break;
1844 default:
1845 break;
1846 }
1847 return X86EMUL_CONTINUE;
1848}
1849
1850static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1851{
1852 struct segmented_address addr;
1853
1854 rsp_increment(ctxt, -bytes);
1855 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1856 addr.seg = VCPU_SREG_SS;
1857
1858 return segmented_write(ctxt, addr, data, bytes);
1859}
1860
1861static int em_push(struct x86_emulate_ctxt *ctxt)
1862{
1863
1864 ctxt->dst.type = OP_NONE;
1865 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1866}
1867
1868static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1869 void *dest, int len)
1870{
1871 int rc;
1872 struct segmented_address addr;
1873
1874 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1875 addr.seg = VCPU_SREG_SS;
1876 rc = segmented_read(ctxt, addr, dest, len);
1877 if (rc != X86EMUL_CONTINUE)
1878 return rc;
1879
1880 rsp_increment(ctxt, len);
1881 return rc;
1882}
1883
1884static int em_pop(struct x86_emulate_ctxt *ctxt)
1885{
1886 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1887}
1888
1889static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1890 void *dest, int len)
1891{
1892 int rc;
1893 unsigned long val, change_mask;
1894 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1895 int cpl = ctxt->ops->cpl(ctxt);
1896
1897 rc = emulate_pop(ctxt, &val, len);
1898 if (rc != X86EMUL_CONTINUE)
1899 return rc;
1900
1901 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1902 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1903 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1904 X86_EFLAGS_AC | X86_EFLAGS_ID;
1905
1906 switch(ctxt->mode) {
1907 case X86EMUL_MODE_PROT64:
1908 case X86EMUL_MODE_PROT32:
1909 case X86EMUL_MODE_PROT16:
1910 if (cpl == 0)
1911 change_mask |= X86_EFLAGS_IOPL;
1912 if (cpl <= iopl)
1913 change_mask |= X86_EFLAGS_IF;
1914 break;
1915 case X86EMUL_MODE_VM86:
1916 if (iopl < 3)
1917 return emulate_gp(ctxt, 0);
1918 change_mask |= X86_EFLAGS_IF;
1919 break;
1920 default:
1921 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1922 break;
1923 }
1924
1925 *(unsigned long *)dest =
1926 (ctxt->eflags & ~change_mask) | (val & change_mask);
1927
1928 return rc;
1929}
1930
1931static int em_popf(struct x86_emulate_ctxt *ctxt)
1932{
1933 ctxt->dst.type = OP_REG;
1934 ctxt->dst.addr.reg = &ctxt->eflags;
1935 ctxt->dst.bytes = ctxt->op_bytes;
1936 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1937}
1938
1939static int em_enter(struct x86_emulate_ctxt *ctxt)
1940{
1941 int rc;
1942 unsigned frame_size = ctxt->src.val;
1943 unsigned nesting_level = ctxt->src2.val & 31;
1944 ulong rbp;
1945
1946 if (nesting_level)
1947 return X86EMUL_UNHANDLEABLE;
1948
1949 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1950 rc = push(ctxt, &rbp, stack_size(ctxt));
1951 if (rc != X86EMUL_CONTINUE)
1952 return rc;
1953 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1954 stack_mask(ctxt));
1955 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1956 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1957 stack_mask(ctxt));
1958 return X86EMUL_CONTINUE;
1959}
1960
1961static int em_leave(struct x86_emulate_ctxt *ctxt)
1962{
1963 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1964 stack_mask(ctxt));
1965 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1966}
1967
1968static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1969{
1970 int seg = ctxt->src2.val;
1971
1972 ctxt->src.val = get_segment_selector(ctxt, seg);
1973 if (ctxt->op_bytes == 4) {
1974 rsp_increment(ctxt, -2);
1975 ctxt->op_bytes = 2;
1976 }
1977
1978 return em_push(ctxt);
1979}
1980
1981static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1982{
1983 int seg = ctxt->src2.val;
1984 unsigned long selector;
1985 int rc;
1986
1987 rc = emulate_pop(ctxt, &selector, 2);
1988 if (rc != X86EMUL_CONTINUE)
1989 return rc;
1990
1991 if (ctxt->modrm_reg == VCPU_SREG_SS)
1992 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1993 if (ctxt->op_bytes > 2)
1994 rsp_increment(ctxt, ctxt->op_bytes - 2);
1995
1996 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1997 return rc;
1998}
1999
2000static int em_pusha(struct x86_emulate_ctxt *ctxt)
2001{
2002 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
2003 int rc = X86EMUL_CONTINUE;
2004 int reg = VCPU_REGS_RAX;
2005
2006 while (reg <= VCPU_REGS_RDI) {
2007 (reg == VCPU_REGS_RSP) ?
2008 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
2009
2010 rc = em_push(ctxt);
2011 if (rc != X86EMUL_CONTINUE)
2012 return rc;
2013
2014 ++reg;
2015 }
2016
2017 return rc;
2018}
2019
2020static int em_pushf(struct x86_emulate_ctxt *ctxt)
2021{
2022 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2023 return em_push(ctxt);
2024}
2025
2026static int em_popa(struct x86_emulate_ctxt *ctxt)
2027{
2028 int rc = X86EMUL_CONTINUE;
2029 int reg = VCPU_REGS_RDI;
2030 u32 val;
2031
2032 while (reg >= VCPU_REGS_RAX) {
2033 if (reg == VCPU_REGS_RSP) {
2034 rsp_increment(ctxt, ctxt->op_bytes);
2035 --reg;
2036 }
2037
2038 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2039 if (rc != X86EMUL_CONTINUE)
2040 break;
2041 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2042 --reg;
2043 }
2044 return rc;
2045}
2046
2047static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2048{
2049 const struct x86_emulate_ops *ops = ctxt->ops;
2050 int rc;
2051 struct desc_ptr dt;
2052 gva_t cs_addr;
2053 gva_t eip_addr;
2054 u16 cs, eip;
2055
2056
2057 ctxt->src.val = ctxt->eflags;
2058 rc = em_push(ctxt);
2059 if (rc != X86EMUL_CONTINUE)
2060 return rc;
2061
2062 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2063
2064 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2065 rc = em_push(ctxt);
2066 if (rc != X86EMUL_CONTINUE)
2067 return rc;
2068
2069 ctxt->src.val = ctxt->_eip;
2070 rc = em_push(ctxt);
2071 if (rc != X86EMUL_CONTINUE)
2072 return rc;
2073
2074 ops->get_idt(ctxt, &dt);
2075
2076 eip_addr = dt.address + (irq << 2);
2077 cs_addr = dt.address + (irq << 2) + 2;
2078
2079 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2080 if (rc != X86EMUL_CONTINUE)
2081 return rc;
2082
2083 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2084 if (rc != X86EMUL_CONTINUE)
2085 return rc;
2086
2087 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2088 if (rc != X86EMUL_CONTINUE)
2089 return rc;
2090
2091 ctxt->_eip = eip;
2092
2093 return rc;
2094}
2095
2096int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2097{
2098 int rc;
2099
2100 invalidate_registers(ctxt);
2101 rc = __emulate_int_real(ctxt, irq);
2102 if (rc == X86EMUL_CONTINUE)
2103 writeback_registers(ctxt);
2104 return rc;
2105}
2106
2107static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2108{
2109 switch(ctxt->mode) {
2110 case X86EMUL_MODE_REAL:
2111 return __emulate_int_real(ctxt, irq);
2112 case X86EMUL_MODE_VM86:
2113 case X86EMUL_MODE_PROT16:
2114 case X86EMUL_MODE_PROT32:
2115 case X86EMUL_MODE_PROT64:
2116 default:
2117
2118 return X86EMUL_UNHANDLEABLE;
2119 }
2120}
2121
2122static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2123{
2124 int rc = X86EMUL_CONTINUE;
2125 unsigned long temp_eip = 0;
2126 unsigned long temp_eflags = 0;
2127 unsigned long cs = 0;
2128 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2129 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2130 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2131 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2132 X86_EFLAGS_AC | X86_EFLAGS_ID |
2133 X86_EFLAGS_FIXED;
2134 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2135 X86_EFLAGS_VIP;
2136
2137
2138
2139 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2140
2141 if (rc != X86EMUL_CONTINUE)
2142 return rc;
2143
2144 if (temp_eip & ~0xffff)
2145 return emulate_gp(ctxt, 0);
2146
2147 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2148
2149 if (rc != X86EMUL_CONTINUE)
2150 return rc;
2151
2152 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2153
2154 if (rc != X86EMUL_CONTINUE)
2155 return rc;
2156
2157 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2158
2159 if (rc != X86EMUL_CONTINUE)
2160 return rc;
2161
2162 ctxt->_eip = temp_eip;
2163
2164 if (ctxt->op_bytes == 4)
2165 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2166 else if (ctxt->op_bytes == 2) {
2167 ctxt->eflags &= ~0xffff;
2168 ctxt->eflags |= temp_eflags;
2169 }
2170
2171 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK;
2172 ctxt->eflags |= X86_EFLAGS_FIXED;
2173 ctxt->ops->set_nmi_mask(ctxt, false);
2174
2175 return rc;
2176}
2177
2178static int em_iret(struct x86_emulate_ctxt *ctxt)
2179{
2180 switch(ctxt->mode) {
2181 case X86EMUL_MODE_REAL:
2182 return emulate_iret_real(ctxt);
2183 case X86EMUL_MODE_VM86:
2184 case X86EMUL_MODE_PROT16:
2185 case X86EMUL_MODE_PROT32:
2186 case X86EMUL_MODE_PROT64:
2187 default:
2188
2189 return X86EMUL_UNHANDLEABLE;
2190 }
2191}
2192
2193static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2194{
2195 int rc;
2196 unsigned short sel;
2197 struct desc_struct new_desc;
2198 u8 cpl = ctxt->ops->cpl(ctxt);
2199
2200 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2201
2202 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2203 X86_TRANSFER_CALL_JMP,
2204 &new_desc);
2205 if (rc != X86EMUL_CONTINUE)
2206 return rc;
2207
2208 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2209
2210 if (rc != X86EMUL_CONTINUE)
2211 return X86EMUL_UNHANDLEABLE;
2212
2213 return rc;
2214}
2215
2216static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2217{
2218 return assign_eip_near(ctxt, ctxt->src.val);
2219}
2220
2221static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2222{
2223 int rc;
2224 long int old_eip;
2225
2226 old_eip = ctxt->_eip;
2227 rc = assign_eip_near(ctxt, ctxt->src.val);
2228 if (rc != X86EMUL_CONTINUE)
2229 return rc;
2230 ctxt->src.val = old_eip;
2231 rc = em_push(ctxt);
2232 return rc;
2233}
2234
2235static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2236{
2237 u64 old = ctxt->dst.orig_val64;
2238
2239 if (ctxt->dst.bytes == 16)
2240 return X86EMUL_UNHANDLEABLE;
2241
2242 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2243 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2244 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2245 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2246 ctxt->eflags &= ~X86_EFLAGS_ZF;
2247 } else {
2248 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2249 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2250
2251 ctxt->eflags |= X86_EFLAGS_ZF;
2252 }
2253 return X86EMUL_CONTINUE;
2254}
2255
2256static int em_ret(struct x86_emulate_ctxt *ctxt)
2257{
2258 int rc;
2259 unsigned long eip;
2260
2261 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2262 if (rc != X86EMUL_CONTINUE)
2263 return rc;
2264
2265 return assign_eip_near(ctxt, eip);
2266}
2267
2268static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2269{
2270 int rc;
2271 unsigned long eip, cs;
2272 int cpl = ctxt->ops->cpl(ctxt);
2273 struct desc_struct new_desc;
2274
2275 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2276 if (rc != X86EMUL_CONTINUE)
2277 return rc;
2278 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2279 if (rc != X86EMUL_CONTINUE)
2280 return rc;
2281
2282 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2283 return X86EMUL_UNHANDLEABLE;
2284 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2285 X86_TRANSFER_RET,
2286 &new_desc);
2287 if (rc != X86EMUL_CONTINUE)
2288 return rc;
2289 rc = assign_eip_far(ctxt, eip, &new_desc);
2290
2291 if (rc != X86EMUL_CONTINUE)
2292 return X86EMUL_UNHANDLEABLE;
2293
2294 return rc;
2295}
2296
2297static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2298{
2299 int rc;
2300
2301 rc = em_ret_far(ctxt);
2302 if (rc != X86EMUL_CONTINUE)
2303 return rc;
2304 rsp_increment(ctxt, ctxt->src.val);
2305 return X86EMUL_CONTINUE;
2306}
2307
2308static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2309{
2310
2311 ctxt->dst.orig_val = ctxt->dst.val;
2312 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2313 ctxt->src.orig_val = ctxt->src.val;
2314 ctxt->src.val = ctxt->dst.orig_val;
2315 fastop(ctxt, em_cmp);
2316
2317 if (ctxt->eflags & X86_EFLAGS_ZF) {
2318
2319 ctxt->src.type = OP_NONE;
2320 ctxt->dst.val = ctxt->src.orig_val;
2321 } else {
2322
2323 ctxt->src.type = OP_REG;
2324 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2325 ctxt->src.val = ctxt->dst.orig_val;
2326
2327 ctxt->dst.val = ctxt->dst.orig_val;
2328 }
2329 return X86EMUL_CONTINUE;
2330}
2331
2332static int em_lseg(struct x86_emulate_ctxt *ctxt)
2333{
2334 int seg = ctxt->src2.val;
2335 unsigned short sel;
2336 int rc;
2337
2338 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2339
2340 rc = load_segment_descriptor(ctxt, sel, seg);
2341 if (rc != X86EMUL_CONTINUE)
2342 return rc;
2343
2344 ctxt->dst.val = ctxt->src.val;
2345 return rc;
2346}
2347
2348static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2349{
2350#ifdef CONFIG_X86_64
2351 u32 eax, ebx, ecx, edx;
2352
2353 eax = 0x80000001;
2354 ecx = 0;
2355 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2356 return edx & bit(X86_FEATURE_LM);
2357#else
2358 return false;
2359#endif
2360}
2361
2362static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2363{
2364 desc->g = (flags >> 23) & 1;
2365 desc->d = (flags >> 22) & 1;
2366 desc->l = (flags >> 21) & 1;
2367 desc->avl = (flags >> 20) & 1;
2368 desc->p = (flags >> 15) & 1;
2369 desc->dpl = (flags >> 13) & 3;
2370 desc->s = (flags >> 12) & 1;
2371 desc->type = (flags >> 8) & 15;
2372}
2373
2374static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2375 int n)
2376{
2377 struct desc_struct desc;
2378 int offset;
2379 u16 selector;
2380
2381 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2382
2383 if (n < 3)
2384 offset = 0x7f84 + n * 12;
2385 else
2386 offset = 0x7f2c + (n - 3) * 12;
2387
2388 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2389 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2390 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2391 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2392 return X86EMUL_CONTINUE;
2393}
2394
2395#ifdef CONFIG_X86_64
2396static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2397 int n)
2398{
2399 struct desc_struct desc;
2400 int offset;
2401 u16 selector;
2402 u32 base3;
2403
2404 offset = 0x7e00 + n * 16;
2405
2406 selector = GET_SMSTATE(u16, smstate, offset);
2407 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2408 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2409 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2410 base3 = GET_SMSTATE(u32, smstate, offset + 12);
2411
2412 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2413 return X86EMUL_CONTINUE;
2414}
2415#endif
2416
2417static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2418 u64 cr0, u64 cr3, u64 cr4)
2419{
2420 int bad;
2421 u64 pcid;
2422
2423
2424 pcid = 0;
2425 if (cr4 & X86_CR4_PCIDE) {
2426 pcid = cr3 & 0xfff;
2427 cr3 &= ~0xfff;
2428 }
2429
2430 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2431 if (bad)
2432 return X86EMUL_UNHANDLEABLE;
2433
2434
2435
2436
2437
2438
2439 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2440 if (bad)
2441 return X86EMUL_UNHANDLEABLE;
2442
2443 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2444 if (bad)
2445 return X86EMUL_UNHANDLEABLE;
2446
2447 if (cr4 & X86_CR4_PCIDE) {
2448 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2449 if (bad)
2450 return X86EMUL_UNHANDLEABLE;
2451 if (pcid) {
2452 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2453 if (bad)
2454 return X86EMUL_UNHANDLEABLE;
2455 }
2456
2457 }
2458
2459 return X86EMUL_CONTINUE;
2460}
2461
2462static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2463 const char *smstate)
2464{
2465 struct desc_struct desc;
2466 struct desc_ptr dt;
2467 u16 selector;
2468 u32 val, cr0, cr3, cr4;
2469 int i;
2470
2471 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
2472 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
2473 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2474 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
2475
2476 for (i = 0; i < 8; i++)
2477 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2478
2479 val = GET_SMSTATE(u32, smstate, 0x7fcc);
2480 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2481 val = GET_SMSTATE(u32, smstate, 0x7fc8);
2482 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2483
2484 selector = GET_SMSTATE(u32, smstate, 0x7fc4);
2485 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
2486 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
2487 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
2488 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2489
2490 selector = GET_SMSTATE(u32, smstate, 0x7fc0);
2491 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
2492 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
2493 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
2494 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2495
2496 dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
2497 dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
2498 ctxt->ops->set_gdt(ctxt, &dt);
2499
2500 dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
2501 dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
2502 ctxt->ops->set_idt(ctxt, &dt);
2503
2504 for (i = 0; i < 6; i++) {
2505 int r = rsm_load_seg_32(ctxt, smstate, i);
2506 if (r != X86EMUL_CONTINUE)
2507 return r;
2508 }
2509
2510 cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2511
2512 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2513
2514 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2515}
2516
2517#ifdef CONFIG_X86_64
2518static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2519 const char *smstate)
2520{
2521 struct desc_struct desc;
2522 struct desc_ptr dt;
2523 u64 val, cr0, cr3, cr4;
2524 u32 base3;
2525 u16 selector;
2526 int i, r;
2527
2528 for (i = 0; i < 16; i++)
2529 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2530
2531 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
2532 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2533
2534 val = GET_SMSTATE(u32, smstate, 0x7f68);
2535 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2536 val = GET_SMSTATE(u32, smstate, 0x7f60);
2537 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2538
2539 cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
2540 cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
2541 cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
2542 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2543 val = GET_SMSTATE(u64, smstate, 0x7ed0);
2544 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2545
2546 selector = GET_SMSTATE(u32, smstate, 0x7e90);
2547 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2548 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
2549 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
2550 base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
2551 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2552
2553 dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
2554 dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
2555 ctxt->ops->set_idt(ctxt, &dt);
2556
2557 selector = GET_SMSTATE(u32, smstate, 0x7e70);
2558 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2559 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
2560 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
2561 base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
2562 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2563
2564 dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
2565 dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
2566 ctxt->ops->set_gdt(ctxt, &dt);
2567
2568 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2569 if (r != X86EMUL_CONTINUE)
2570 return r;
2571
2572 for (i = 0; i < 6; i++) {
2573 r = rsm_load_seg_64(ctxt, smstate, i);
2574 if (r != X86EMUL_CONTINUE)
2575 return r;
2576 }
2577
2578 return X86EMUL_CONTINUE;
2579}
2580#endif
2581
2582static int em_rsm(struct x86_emulate_ctxt *ctxt)
2583{
2584 unsigned long cr0, cr4, efer;
2585 char buf[512];
2586 u64 smbase;
2587 int ret;
2588
2589 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2590 return emulate_ud(ctxt);
2591
2592 smbase = ctxt->ops->get_smbase(ctxt);
2593
2594 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2595 if (ret != X86EMUL_CONTINUE)
2596 return X86EMUL_UNHANDLEABLE;
2597
2598 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2599 ctxt->ops->set_nmi_mask(ctxt, false);
2600
2601 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2602 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2603
2604
2605
2606
2607
2608
2609 if (emulator_has_longmode(ctxt)) {
2610 struct desc_struct cs_desc;
2611
2612
2613 cr4 = ctxt->ops->get_cr(ctxt, 4);
2614 if (cr4 & X86_CR4_PCIDE)
2615 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2616
2617
2618 memset(&cs_desc, 0, sizeof(cs_desc));
2619 cs_desc.type = 0xb;
2620 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2621 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2622 }
2623
2624
2625 cr0 = ctxt->ops->get_cr(ctxt, 0);
2626 if (cr0 & X86_CR0_PE)
2627 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2628
2629 if (emulator_has_longmode(ctxt)) {
2630
2631 cr4 = ctxt->ops->get_cr(ctxt, 4);
2632 if (cr4 & X86_CR4_PAE)
2633 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2634
2635
2636 efer = 0;
2637 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2638 }
2639
2640
2641
2642
2643
2644
2645 if (ctxt->ops->pre_leave_smm(ctxt, buf))
2646 return X86EMUL_UNHANDLEABLE;
2647
2648#ifdef CONFIG_X86_64
2649 if (emulator_has_longmode(ctxt))
2650 ret = rsm_load_state_64(ctxt, buf);
2651 else
2652#endif
2653 ret = rsm_load_state_32(ctxt, buf);
2654
2655 if (ret != X86EMUL_CONTINUE) {
2656
2657 return X86EMUL_UNHANDLEABLE;
2658 }
2659
2660 ctxt->ops->post_leave_smm(ctxt);
2661
2662 return X86EMUL_CONTINUE;
2663}
2664
2665static void
2666setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2667 struct desc_struct *cs, struct desc_struct *ss)
2668{
2669 cs->l = 0;
2670 set_desc_base(cs, 0);
2671 cs->g = 1;
2672 set_desc_limit(cs, 0xfffff);
2673 cs->type = 0x0b;
2674 cs->s = 1;
2675 cs->dpl = 0;
2676 cs->p = 1;
2677 cs->d = 1;
2678 cs->avl = 0;
2679
2680 set_desc_base(ss, 0);
2681 set_desc_limit(ss, 0xfffff);
2682 ss->g = 1;
2683 ss->s = 1;
2684 ss->type = 0x03;
2685 ss->d = 1;
2686 ss->dpl = 0;
2687 ss->p = 1;
2688 ss->l = 0;
2689 ss->avl = 0;
2690}
2691
2692static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2693{
2694 u32 eax, ebx, ecx, edx;
2695
2696 eax = ecx = 0;
2697 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2698 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2699 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2700 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2701}
2702
2703static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2704{
2705 const struct x86_emulate_ops *ops = ctxt->ops;
2706 u32 eax, ebx, ecx, edx;
2707
2708
2709
2710
2711
2712 if (ctxt->mode == X86EMUL_MODE_PROT64)
2713 return true;
2714
2715 eax = 0x00000000;
2716 ecx = 0x00000000;
2717 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2718
2719
2720
2721
2722
2723
2724
2725
2726 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2727 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2728 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2729 return false;
2730
2731
2732 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2733 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2734 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2735 return true;
2736
2737
2738 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2739 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2740 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2741 return true;
2742
2743
2744 if (ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx &&
2745 ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx &&
2746 edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx)
2747 return true;
2748
2749
2750
2751
2752
2753 return false;
2754}
2755
2756static int em_syscall(struct x86_emulate_ctxt *ctxt)
2757{
2758 const struct x86_emulate_ops *ops = ctxt->ops;
2759 struct desc_struct cs, ss;
2760 u64 msr_data;
2761 u16 cs_sel, ss_sel;
2762 u64 efer = 0;
2763
2764
2765 if (ctxt->mode == X86EMUL_MODE_REAL ||
2766 ctxt->mode == X86EMUL_MODE_VM86)
2767 return emulate_ud(ctxt);
2768
2769 if (!(em_syscall_is_enabled(ctxt)))
2770 return emulate_ud(ctxt);
2771
2772 ops->get_msr(ctxt, MSR_EFER, &efer);
2773 setup_syscalls_segments(ctxt, &cs, &ss);
2774
2775 if (!(efer & EFER_SCE))
2776 return emulate_ud(ctxt);
2777
2778 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2779 msr_data >>= 32;
2780 cs_sel = (u16)(msr_data & 0xfffc);
2781 ss_sel = (u16)(msr_data + 8);
2782
2783 if (efer & EFER_LMA) {
2784 cs.d = 0;
2785 cs.l = 1;
2786 }
2787 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2788 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2789
2790 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2791 if (efer & EFER_LMA) {
2792#ifdef CONFIG_X86_64
2793 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2794
2795 ops->get_msr(ctxt,
2796 ctxt->mode == X86EMUL_MODE_PROT64 ?
2797 MSR_LSTAR : MSR_CSTAR, &msr_data);
2798 ctxt->_eip = msr_data;
2799
2800 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2801 ctxt->eflags &= ~msr_data;
2802 ctxt->eflags |= X86_EFLAGS_FIXED;
2803#endif
2804 } else {
2805
2806 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2807 ctxt->_eip = (u32)msr_data;
2808
2809 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2810 }
2811
2812 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2813 return X86EMUL_CONTINUE;
2814}
2815
2816static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2817{
2818 const struct x86_emulate_ops *ops = ctxt->ops;
2819 struct desc_struct cs, ss;
2820 u64 msr_data;
2821 u16 cs_sel, ss_sel;
2822 u64 efer = 0;
2823
2824 ops->get_msr(ctxt, MSR_EFER, &efer);
2825
2826 if (ctxt->mode == X86EMUL_MODE_REAL)
2827 return emulate_gp(ctxt, 0);
2828
2829
2830
2831
2832
2833 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2834 && !vendor_intel(ctxt))
2835 return emulate_ud(ctxt);
2836
2837
2838 if (ctxt->mode == X86EMUL_MODE_PROT64)
2839 return X86EMUL_UNHANDLEABLE;
2840
2841 setup_syscalls_segments(ctxt, &cs, &ss);
2842
2843 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2844 if ((msr_data & 0xfffc) == 0x0)
2845 return emulate_gp(ctxt, 0);
2846
2847 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2848 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2849 ss_sel = cs_sel + 8;
2850 if (efer & EFER_LMA) {
2851 cs.d = 0;
2852 cs.l = 1;
2853 }
2854
2855 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2856 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2857
2858 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2859 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2860
2861 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2862 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2863 (u32)msr_data;
2864
2865 return X86EMUL_CONTINUE;
2866}
2867
2868static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2869{
2870 const struct x86_emulate_ops *ops = ctxt->ops;
2871 struct desc_struct cs, ss;
2872 u64 msr_data, rcx, rdx;
2873 int usermode;
2874 u16 cs_sel = 0, ss_sel = 0;
2875
2876
2877 if (ctxt->mode == X86EMUL_MODE_REAL ||
2878 ctxt->mode == X86EMUL_MODE_VM86)
2879 return emulate_gp(ctxt, 0);
2880
2881 setup_syscalls_segments(ctxt, &cs, &ss);
2882
2883 if ((ctxt->rex_prefix & 0x8) != 0x0)
2884 usermode = X86EMUL_MODE_PROT64;
2885 else
2886 usermode = X86EMUL_MODE_PROT32;
2887
2888 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2889 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2890
2891 cs.dpl = 3;
2892 ss.dpl = 3;
2893 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2894 switch (usermode) {
2895 case X86EMUL_MODE_PROT32:
2896 cs_sel = (u16)(msr_data + 16);
2897 if ((msr_data & 0xfffc) == 0x0)
2898 return emulate_gp(ctxt, 0);
2899 ss_sel = (u16)(msr_data + 24);
2900 rcx = (u32)rcx;
2901 rdx = (u32)rdx;
2902 break;
2903 case X86EMUL_MODE_PROT64:
2904 cs_sel = (u16)(msr_data + 32);
2905 if (msr_data == 0x0)
2906 return emulate_gp(ctxt, 0);
2907 ss_sel = cs_sel + 8;
2908 cs.d = 0;
2909 cs.l = 1;
2910 if (emul_is_noncanonical_address(rcx, ctxt) ||
2911 emul_is_noncanonical_address(rdx, ctxt))
2912 return emulate_gp(ctxt, 0);
2913 break;
2914 }
2915 cs_sel |= SEGMENT_RPL_MASK;
2916 ss_sel |= SEGMENT_RPL_MASK;
2917
2918 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2919 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2920
2921 ctxt->_eip = rdx;
2922 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2923
2924 return X86EMUL_CONTINUE;
2925}
2926
2927static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2928{
2929 int iopl;
2930 if (ctxt->mode == X86EMUL_MODE_REAL)
2931 return false;
2932 if (ctxt->mode == X86EMUL_MODE_VM86)
2933 return true;
2934 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2935 return ctxt->ops->cpl(ctxt) > iopl;
2936}
2937
2938#define VMWARE_PORT_VMPORT (0x5658)
2939#define VMWARE_PORT_VMRPC (0x5659)
2940
2941static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2942 u16 port, u16 len)
2943{
2944 const struct x86_emulate_ops *ops = ctxt->ops;
2945 struct desc_struct tr_seg;
2946 u32 base3;
2947 int r;
2948 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2949 unsigned mask = (1 << len) - 1;
2950 unsigned long base;
2951
2952
2953
2954
2955
2956 if (enable_vmware_backdoor &&
2957 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2958 return true;
2959
2960 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2961 if (!tr_seg.p)
2962 return false;
2963 if (desc_limit_scaled(&tr_seg) < 103)
2964 return false;
2965 base = get_desc_base(&tr_seg);
2966#ifdef CONFIG_X86_64
2967 base |= ((u64)base3) << 32;
2968#endif
2969 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2970 if (r != X86EMUL_CONTINUE)
2971 return false;
2972 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2973 return false;
2974 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2975 if (r != X86EMUL_CONTINUE)
2976 return false;
2977 if ((perm >> bit_idx) & mask)
2978 return false;
2979 return true;
2980}
2981
2982static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2983 u16 port, u16 len)
2984{
2985 if (ctxt->perm_ok)
2986 return true;
2987
2988 if (emulator_bad_iopl(ctxt))
2989 if (!emulator_io_port_access_allowed(ctxt, port, len))
2990 return false;
2991
2992 ctxt->perm_ok = true;
2993
2994 return true;
2995}
2996
2997static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2998{
2999
3000
3001
3002
3003#ifdef CONFIG_X86_64
3004 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
3005 return;
3006
3007 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
3008
3009 switch (ctxt->b) {
3010 case 0xa4:
3011 case 0xa5:
3012 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
3013
3014 case 0xaa:
3015 case 0xab:
3016 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
3017 }
3018#endif
3019}
3020
3021static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
3022 struct tss_segment_16 *tss)
3023{
3024 tss->ip = ctxt->_eip;
3025 tss->flag = ctxt->eflags;
3026 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
3027 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
3028 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
3029 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
3030 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
3031 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
3032 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
3033 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
3034
3035 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3036 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3037 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3038 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3039 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
3040}
3041
3042static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
3043 struct tss_segment_16 *tss)
3044{
3045 int ret;
3046 u8 cpl;
3047
3048 ctxt->_eip = tss->ip;
3049 ctxt->eflags = tss->flag | 2;
3050 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
3051 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
3052 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
3053 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
3054 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
3055 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
3056 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
3057 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3058
3059
3060
3061
3062
3063 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3064 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3065 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3066 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3067 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3068
3069 cpl = tss->cs & 3;
3070
3071
3072
3073
3074
3075 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3076 X86_TRANSFER_TASK_SWITCH, NULL);
3077 if (ret != X86EMUL_CONTINUE)
3078 return ret;
3079 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3080 X86_TRANSFER_TASK_SWITCH, NULL);
3081 if (ret != X86EMUL_CONTINUE)
3082 return ret;
3083 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3084 X86_TRANSFER_TASK_SWITCH, NULL);
3085 if (ret != X86EMUL_CONTINUE)
3086 return ret;
3087 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3088 X86_TRANSFER_TASK_SWITCH, NULL);
3089 if (ret != X86EMUL_CONTINUE)
3090 return ret;
3091 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3092 X86_TRANSFER_TASK_SWITCH, NULL);
3093 if (ret != X86EMUL_CONTINUE)
3094 return ret;
3095
3096 return X86EMUL_CONTINUE;
3097}
3098
3099static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3100 u16 tss_selector, u16 old_tss_sel,
3101 ulong old_tss_base, struct desc_struct *new_desc)
3102{
3103 struct tss_segment_16 tss_seg;
3104 int ret;
3105 u32 new_tss_base = get_desc_base(new_desc);
3106
3107 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3108 if (ret != X86EMUL_CONTINUE)
3109 return ret;
3110
3111 save_state_to_tss16(ctxt, &tss_seg);
3112
3113 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3114 if (ret != X86EMUL_CONTINUE)
3115 return ret;
3116
3117 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3118 if (ret != X86EMUL_CONTINUE)
3119 return ret;
3120
3121 if (old_tss_sel != 0xffff) {
3122 tss_seg.prev_task_link = old_tss_sel;
3123
3124 ret = linear_write_system(ctxt, new_tss_base,
3125 &tss_seg.prev_task_link,
3126 sizeof(tss_seg.prev_task_link));
3127 if (ret != X86EMUL_CONTINUE)
3128 return ret;
3129 }
3130
3131 return load_state_from_tss16(ctxt, &tss_seg);
3132}
3133
3134static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3135 struct tss_segment_32 *tss)
3136{
3137
3138 tss->eip = ctxt->_eip;
3139 tss->eflags = ctxt->eflags;
3140 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3141 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3142 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3143 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3144 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3145 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3146 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3147 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3148
3149 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3150 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3151 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3152 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3153 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3154 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3155}
3156
3157static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3158 struct tss_segment_32 *tss)
3159{
3160 int ret;
3161 u8 cpl;
3162
3163 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3164 return emulate_gp(ctxt, 0);
3165 ctxt->_eip = tss->eip;
3166 ctxt->eflags = tss->eflags | 2;
3167
3168
3169 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3170 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3171 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3172 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3173 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3174 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3175 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3176 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3177
3178
3179
3180
3181
3182
3183 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3184 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3185 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3186 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3187 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3188 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3189 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3190
3191
3192
3193
3194
3195
3196 if (ctxt->eflags & X86_EFLAGS_VM) {
3197 ctxt->mode = X86EMUL_MODE_VM86;
3198 cpl = 3;
3199 } else {
3200 ctxt->mode = X86EMUL_MODE_PROT32;
3201 cpl = tss->cs & 3;
3202 }
3203
3204
3205
3206
3207
3208 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3209 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3210 if (ret != X86EMUL_CONTINUE)
3211 return ret;
3212 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3213 X86_TRANSFER_TASK_SWITCH, NULL);
3214 if (ret != X86EMUL_CONTINUE)
3215 return ret;
3216 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3217 X86_TRANSFER_TASK_SWITCH, NULL);
3218 if (ret != X86EMUL_CONTINUE)
3219 return ret;
3220 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3221 X86_TRANSFER_TASK_SWITCH, NULL);
3222 if (ret != X86EMUL_CONTINUE)
3223 return ret;
3224 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3225 X86_TRANSFER_TASK_SWITCH, NULL);
3226 if (ret != X86EMUL_CONTINUE)
3227 return ret;
3228 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3229 X86_TRANSFER_TASK_SWITCH, NULL);
3230 if (ret != X86EMUL_CONTINUE)
3231 return ret;
3232 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3233 X86_TRANSFER_TASK_SWITCH, NULL);
3234
3235 return ret;
3236}
3237
3238static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3239 u16 tss_selector, u16 old_tss_sel,
3240 ulong old_tss_base, struct desc_struct *new_desc)
3241{
3242 struct tss_segment_32 tss_seg;
3243 int ret;
3244 u32 new_tss_base = get_desc_base(new_desc);
3245 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3246 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3247
3248 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3249 if (ret != X86EMUL_CONTINUE)
3250 return ret;
3251
3252 save_state_to_tss32(ctxt, &tss_seg);
3253
3254
3255 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3256 ldt_sel_offset - eip_offset);
3257 if (ret != X86EMUL_CONTINUE)
3258 return ret;
3259
3260 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3261 if (ret != X86EMUL_CONTINUE)
3262 return ret;
3263
3264 if (old_tss_sel != 0xffff) {
3265 tss_seg.prev_task_link = old_tss_sel;
3266
3267 ret = linear_write_system(ctxt, new_tss_base,
3268 &tss_seg.prev_task_link,
3269 sizeof(tss_seg.prev_task_link));
3270 if (ret != X86EMUL_CONTINUE)
3271 return ret;
3272 }
3273
3274 return load_state_from_tss32(ctxt, &tss_seg);
3275}
3276
3277static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3278 u16 tss_selector, int idt_index, int reason,
3279 bool has_error_code, u32 error_code)
3280{
3281 const struct x86_emulate_ops *ops = ctxt->ops;
3282 struct desc_struct curr_tss_desc, next_tss_desc;
3283 int ret;
3284 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3285 ulong old_tss_base =
3286 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3287 u32 desc_limit;
3288 ulong desc_addr, dr7;
3289
3290
3291
3292 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3293 if (ret != X86EMUL_CONTINUE)
3294 return ret;
3295 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3296 if (ret != X86EMUL_CONTINUE)
3297 return ret;
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309 if (reason == TASK_SWITCH_GATE) {
3310 if (idt_index != -1) {
3311
3312 struct desc_struct task_gate_desc;
3313 int dpl;
3314
3315 ret = read_interrupt_descriptor(ctxt, idt_index,
3316 &task_gate_desc);
3317 if (ret != X86EMUL_CONTINUE)
3318 return ret;
3319
3320 dpl = task_gate_desc.dpl;
3321 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3322 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3323 }
3324 }
3325
3326 desc_limit = desc_limit_scaled(&next_tss_desc);
3327 if (!next_tss_desc.p ||
3328 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3329 desc_limit < 0x2b)) {
3330 return emulate_ts(ctxt, tss_selector & 0xfffc);
3331 }
3332
3333 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3334 curr_tss_desc.type &= ~(1 << 1);
3335 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3336 }
3337
3338 if (reason == TASK_SWITCH_IRET)
3339 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3340
3341
3342
3343 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3344 old_tss_sel = 0xffff;
3345
3346 if (next_tss_desc.type & 8)
3347 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3348 old_tss_base, &next_tss_desc);
3349 else
3350 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3351 old_tss_base, &next_tss_desc);
3352 if (ret != X86EMUL_CONTINUE)
3353 return ret;
3354
3355 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3356 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3357
3358 if (reason != TASK_SWITCH_IRET) {
3359 next_tss_desc.type |= (1 << 1);
3360 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3361 }
3362
3363 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3364 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3365
3366 if (has_error_code) {
3367 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3368 ctxt->lock_prefix = 0;
3369 ctxt->src.val = (unsigned long) error_code;
3370 ret = em_push(ctxt);
3371 }
3372
3373 ops->get_dr(ctxt, 7, &dr7);
3374 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3375
3376 return ret;
3377}
3378
3379int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3380 u16 tss_selector, int idt_index, int reason,
3381 bool has_error_code, u32 error_code)
3382{
3383 int rc;
3384
3385 invalidate_registers(ctxt);
3386 ctxt->_eip = ctxt->eip;
3387 ctxt->dst.type = OP_NONE;
3388
3389 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3390 has_error_code, error_code);
3391
3392 if (rc == X86EMUL_CONTINUE) {
3393 ctxt->eip = ctxt->_eip;
3394 writeback_registers(ctxt);
3395 }
3396
3397 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3398}
3399
3400static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3401 struct operand *op)
3402{
3403 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3404
3405 register_address_increment(ctxt, reg, df * op->bytes);
3406 op->addr.mem.ea = register_address(ctxt, reg);
3407}
3408
3409static int em_das(struct x86_emulate_ctxt *ctxt)
3410{
3411 u8 al, old_al;
3412 bool af, cf, old_cf;
3413
3414 cf = ctxt->eflags & X86_EFLAGS_CF;
3415 al = ctxt->dst.val;
3416
3417 old_al = al;
3418 old_cf = cf;
3419 cf = false;
3420 af = ctxt->eflags & X86_EFLAGS_AF;
3421 if ((al & 0x0f) > 9 || af) {
3422 al -= 6;
3423 cf = old_cf | (al >= 250);
3424 af = true;
3425 } else {
3426 af = false;
3427 }
3428 if (old_al > 0x99 || old_cf) {
3429 al -= 0x60;
3430 cf = true;
3431 }
3432
3433 ctxt->dst.val = al;
3434
3435 ctxt->src.type = OP_IMM;
3436 ctxt->src.val = 0;
3437 ctxt->src.bytes = 1;
3438 fastop(ctxt, em_or);
3439 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3440 if (cf)
3441 ctxt->eflags |= X86_EFLAGS_CF;
3442 if (af)
3443 ctxt->eflags |= X86_EFLAGS_AF;
3444 return X86EMUL_CONTINUE;
3445}
3446
3447static int em_aam(struct x86_emulate_ctxt *ctxt)
3448{
3449 u8 al, ah;
3450
3451 if (ctxt->src.val == 0)
3452 return emulate_de(ctxt);
3453
3454 al = ctxt->dst.val & 0xff;
3455 ah = al / ctxt->src.val;
3456 al %= ctxt->src.val;
3457
3458 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3459
3460
3461 ctxt->src.type = OP_IMM;
3462 ctxt->src.val = 0;
3463 ctxt->src.bytes = 1;
3464 fastop(ctxt, em_or);
3465
3466 return X86EMUL_CONTINUE;
3467}
3468
3469static int em_aad(struct x86_emulate_ctxt *ctxt)
3470{
3471 u8 al = ctxt->dst.val & 0xff;
3472 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3473
3474 al = (al + (ah * ctxt->src.val)) & 0xff;
3475
3476 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3477
3478
3479 ctxt->src.type = OP_IMM;
3480 ctxt->src.val = 0;
3481 ctxt->src.bytes = 1;
3482 fastop(ctxt, em_or);
3483
3484 return X86EMUL_CONTINUE;
3485}
3486
3487static int em_call(struct x86_emulate_ctxt *ctxt)
3488{
3489 int rc;
3490 long rel = ctxt->src.val;
3491
3492 ctxt->src.val = (unsigned long)ctxt->_eip;
3493 rc = jmp_rel(ctxt, rel);
3494 if (rc != X86EMUL_CONTINUE)
3495 return rc;
3496 return em_push(ctxt);
3497}
3498
3499static int em_call_far(struct x86_emulate_ctxt *ctxt)
3500{
3501 u16 sel, old_cs;
3502 ulong old_eip;
3503 int rc;
3504 struct desc_struct old_desc, new_desc;
3505 const struct x86_emulate_ops *ops = ctxt->ops;
3506 int cpl = ctxt->ops->cpl(ctxt);
3507 enum x86emul_mode prev_mode = ctxt->mode;
3508
3509 old_eip = ctxt->_eip;
3510 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3511
3512 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3513 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3514 X86_TRANSFER_CALL_JMP, &new_desc);
3515 if (rc != X86EMUL_CONTINUE)
3516 return rc;
3517
3518 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3519 if (rc != X86EMUL_CONTINUE)
3520 goto fail;
3521
3522 ctxt->src.val = old_cs;
3523 rc = em_push(ctxt);
3524 if (rc != X86EMUL_CONTINUE)
3525 goto fail;
3526
3527 ctxt->src.val = old_eip;
3528 rc = em_push(ctxt);
3529
3530
3531 if (rc != X86EMUL_CONTINUE) {
3532 pr_warn_once("faulting far call emulation tainted memory\n");
3533 goto fail;
3534 }
3535 return rc;
3536fail:
3537 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3538 ctxt->mode = prev_mode;
3539 return rc;
3540
3541}
3542
3543static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3544{
3545 int rc;
3546 unsigned long eip;
3547
3548 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3549 if (rc != X86EMUL_CONTINUE)
3550 return rc;
3551 rc = assign_eip_near(ctxt, eip);
3552 if (rc != X86EMUL_CONTINUE)
3553 return rc;
3554 rsp_increment(ctxt, ctxt->src.val);
3555 return X86EMUL_CONTINUE;
3556}
3557
3558static int em_xchg(struct x86_emulate_ctxt *ctxt)
3559{
3560
3561 ctxt->src.val = ctxt->dst.val;
3562 write_register_operand(&ctxt->src);
3563
3564
3565 ctxt->dst.val = ctxt->src.orig_val;
3566 ctxt->lock_prefix = 1;
3567 return X86EMUL_CONTINUE;
3568}
3569
3570static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3571{
3572 ctxt->dst.val = ctxt->src2.val;
3573 return fastop(ctxt, em_imul);
3574}
3575
3576static int em_cwd(struct x86_emulate_ctxt *ctxt)
3577{
3578 ctxt->dst.type = OP_REG;
3579 ctxt->dst.bytes = ctxt->src.bytes;
3580 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3581 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3582
3583 return X86EMUL_CONTINUE;
3584}
3585
3586static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3587{
3588 u64 tsc_aux = 0;
3589
3590 if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3591 return emulate_gp(ctxt, 0);
3592 ctxt->dst.val = tsc_aux;
3593 return X86EMUL_CONTINUE;
3594}
3595
3596static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3597{
3598 u64 tsc = 0;
3599
3600 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3601 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3602 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3603 return X86EMUL_CONTINUE;
3604}
3605
3606static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3607{
3608 u64 pmc;
3609
3610 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3611 return emulate_gp(ctxt, 0);
3612 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3613 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3614 return X86EMUL_CONTINUE;
3615}
3616
3617static int em_mov(struct x86_emulate_ctxt *ctxt)
3618{
3619 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3620 return X86EMUL_CONTINUE;
3621}
3622
3623#define FFL(x) bit(X86_FEATURE_##x)
3624
3625static int em_movbe(struct x86_emulate_ctxt *ctxt)
3626{
3627 u32 ebx, ecx, edx, eax = 1;
3628 u16 tmp;
3629
3630
3631
3632
3633 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3634 if (!(ecx & FFL(MOVBE)))
3635 return emulate_ud(ctxt);
3636
3637 switch (ctxt->op_bytes) {
3638 case 2:
3639
3640
3641
3642
3643
3644
3645
3646
3647 tmp = (u16)ctxt->src.val;
3648 ctxt->dst.val &= ~0xffffUL;
3649 ctxt->dst.val |= (unsigned long)swab16(tmp);
3650 break;
3651 case 4:
3652 ctxt->dst.val = swab32((u32)ctxt->src.val);
3653 break;
3654 case 8:
3655 ctxt->dst.val = swab64(ctxt->src.val);
3656 break;
3657 default:
3658 BUG();
3659 }
3660 return X86EMUL_CONTINUE;
3661}
3662
3663static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3664{
3665 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3666 return emulate_gp(ctxt, 0);
3667
3668
3669 ctxt->dst.type = OP_NONE;
3670 return X86EMUL_CONTINUE;
3671}
3672
3673static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3674{
3675 unsigned long val;
3676
3677 if (ctxt->mode == X86EMUL_MODE_PROT64)
3678 val = ctxt->src.val & ~0ULL;
3679 else
3680 val = ctxt->src.val & ~0U;
3681
3682
3683 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3684 return emulate_gp(ctxt, 0);
3685
3686
3687 ctxt->dst.type = OP_NONE;
3688 return X86EMUL_CONTINUE;
3689}
3690
3691static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3692{
3693 u64 msr_data;
3694
3695 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3696 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3697 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3698 return emulate_gp(ctxt, 0);
3699
3700 return X86EMUL_CONTINUE;
3701}
3702
3703static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3704{
3705 u64 msr_data;
3706
3707 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3708 return emulate_gp(ctxt, 0);
3709
3710 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3711 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3712 return X86EMUL_CONTINUE;
3713}
3714
3715static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3716{
3717 if (segment > VCPU_SREG_GS &&
3718 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3719 ctxt->ops->cpl(ctxt) > 0)
3720 return emulate_gp(ctxt, 0);
3721
3722 ctxt->dst.val = get_segment_selector(ctxt, segment);
3723 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3724 ctxt->dst.bytes = 2;
3725 return X86EMUL_CONTINUE;
3726}
3727
3728static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3729{
3730 if (ctxt->modrm_reg > VCPU_SREG_GS)
3731 return emulate_ud(ctxt);
3732
3733 return em_store_sreg(ctxt, ctxt->modrm_reg);
3734}
3735
3736static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3737{
3738 u16 sel = ctxt->src.val;
3739
3740 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3741 return emulate_ud(ctxt);
3742
3743 if (ctxt->modrm_reg == VCPU_SREG_SS)
3744 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3745
3746
3747 ctxt->dst.type = OP_NONE;
3748 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3749}
3750
3751static int em_sldt(struct x86_emulate_ctxt *ctxt)
3752{
3753 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3754}
3755
3756static int em_lldt(struct x86_emulate_ctxt *ctxt)
3757{
3758 u16 sel = ctxt->src.val;
3759
3760
3761 ctxt->dst.type = OP_NONE;
3762 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3763}
3764
3765static int em_str(struct x86_emulate_ctxt *ctxt)
3766{
3767 return em_store_sreg(ctxt, VCPU_SREG_TR);
3768}
3769
3770static int em_ltr(struct x86_emulate_ctxt *ctxt)
3771{
3772 u16 sel = ctxt->src.val;
3773
3774
3775 ctxt->dst.type = OP_NONE;
3776 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3777}
3778
3779static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3780{
3781 int rc;
3782 ulong linear;
3783
3784 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3785 if (rc == X86EMUL_CONTINUE)
3786 ctxt->ops->invlpg(ctxt, linear);
3787
3788 ctxt->dst.type = OP_NONE;
3789 return X86EMUL_CONTINUE;
3790}
3791
3792static int em_clts(struct x86_emulate_ctxt *ctxt)
3793{
3794 ulong cr0;
3795
3796 cr0 = ctxt->ops->get_cr(ctxt, 0);
3797 cr0 &= ~X86_CR0_TS;
3798 ctxt->ops->set_cr(ctxt, 0, cr0);
3799 return X86EMUL_CONTINUE;
3800}
3801
3802static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3803{
3804 int rc = ctxt->ops->fix_hypercall(ctxt);
3805
3806 if (rc != X86EMUL_CONTINUE)
3807 return rc;
3808
3809
3810 ctxt->_eip = ctxt->eip;
3811
3812 ctxt->dst.type = OP_NONE;
3813 return X86EMUL_CONTINUE;
3814}
3815
3816static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3817 void (*get)(struct x86_emulate_ctxt *ctxt,
3818 struct desc_ptr *ptr))
3819{
3820 struct desc_ptr desc_ptr;
3821
3822 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3823 ctxt->ops->cpl(ctxt) > 0)
3824 return emulate_gp(ctxt, 0);
3825
3826 if (ctxt->mode == X86EMUL_MODE_PROT64)
3827 ctxt->op_bytes = 8;
3828 get(ctxt, &desc_ptr);
3829 if (ctxt->op_bytes == 2) {
3830 ctxt->op_bytes = 4;
3831 desc_ptr.address &= 0x00ffffff;
3832 }
3833
3834 ctxt->dst.type = OP_NONE;
3835 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3836 &desc_ptr, 2 + ctxt->op_bytes);
3837}
3838
3839static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3840{
3841 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3842}
3843
3844static int em_sidt(struct x86_emulate_ctxt *ctxt)
3845{
3846 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3847}
3848
3849static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3850{
3851 struct desc_ptr desc_ptr;
3852 int rc;
3853
3854 if (ctxt->mode == X86EMUL_MODE_PROT64)
3855 ctxt->op_bytes = 8;
3856 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3857 &desc_ptr.size, &desc_ptr.address,
3858 ctxt->op_bytes);
3859 if (rc != X86EMUL_CONTINUE)
3860 return rc;
3861 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3862 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3863 return emulate_gp(ctxt, 0);
3864 if (lgdt)
3865 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3866 else
3867 ctxt->ops->set_idt(ctxt, &desc_ptr);
3868
3869 ctxt->dst.type = OP_NONE;
3870 return X86EMUL_CONTINUE;
3871}
3872
3873static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3874{
3875 return em_lgdt_lidt(ctxt, true);
3876}
3877
3878static int em_lidt(struct x86_emulate_ctxt *ctxt)
3879{
3880 return em_lgdt_lidt(ctxt, false);
3881}
3882
3883static int em_smsw(struct x86_emulate_ctxt *ctxt)
3884{
3885 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3886 ctxt->ops->cpl(ctxt) > 0)
3887 return emulate_gp(ctxt, 0);
3888
3889 if (ctxt->dst.type == OP_MEM)
3890 ctxt->dst.bytes = 2;
3891 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3892 return X86EMUL_CONTINUE;
3893}
3894
3895static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3896{
3897 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3898 | (ctxt->src.val & 0x0f));
3899 ctxt->dst.type = OP_NONE;
3900 return X86EMUL_CONTINUE;
3901}
3902
3903static int em_loop(struct x86_emulate_ctxt *ctxt)
3904{
3905 int rc = X86EMUL_CONTINUE;
3906
3907 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3908 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3909 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3910 rc = jmp_rel(ctxt, ctxt->src.val);
3911
3912 return rc;
3913}
3914
3915static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3916{
3917 int rc = X86EMUL_CONTINUE;
3918
3919 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3920 rc = jmp_rel(ctxt, ctxt->src.val);
3921
3922 return rc;
3923}
3924
3925static int em_in(struct x86_emulate_ctxt *ctxt)
3926{
3927 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3928 &ctxt->dst.val))
3929 return X86EMUL_IO_NEEDED;
3930
3931 return X86EMUL_CONTINUE;
3932}
3933
3934static int em_out(struct x86_emulate_ctxt *ctxt)
3935{
3936 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3937 &ctxt->src.val, 1);
3938
3939 ctxt->dst.type = OP_NONE;
3940 return X86EMUL_CONTINUE;
3941}
3942
3943static int em_cli(struct x86_emulate_ctxt *ctxt)
3944{
3945 if (emulator_bad_iopl(ctxt))
3946 return emulate_gp(ctxt, 0);
3947
3948 ctxt->eflags &= ~X86_EFLAGS_IF;
3949 return X86EMUL_CONTINUE;
3950}
3951
3952static int em_sti(struct x86_emulate_ctxt *ctxt)
3953{
3954 if (emulator_bad_iopl(ctxt))
3955 return emulate_gp(ctxt, 0);
3956
3957 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3958 ctxt->eflags |= X86_EFLAGS_IF;
3959 return X86EMUL_CONTINUE;
3960}
3961
3962static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3963{
3964 u32 eax, ebx, ecx, edx;
3965 u64 msr = 0;
3966
3967 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3968 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3969 ctxt->ops->cpl(ctxt)) {
3970 return emulate_gp(ctxt, 0);
3971 }
3972
3973 eax = reg_read(ctxt, VCPU_REGS_RAX);
3974 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3975 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
3976 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3977 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3978 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3979 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3980 return X86EMUL_CONTINUE;
3981}
3982
3983static int em_sahf(struct x86_emulate_ctxt *ctxt)
3984{
3985 u32 flags;
3986
3987 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3988 X86_EFLAGS_SF;
3989 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3990
3991 ctxt->eflags &= ~0xffUL;
3992 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3993 return X86EMUL_CONTINUE;
3994}
3995
3996static int em_lahf(struct x86_emulate_ctxt *ctxt)
3997{
3998 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3999 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
4000 return X86EMUL_CONTINUE;
4001}
4002
4003static int em_bswap(struct x86_emulate_ctxt *ctxt)
4004{
4005 switch (ctxt->op_bytes) {
4006#ifdef CONFIG_X86_64
4007 case 8:
4008 asm("bswap %0" : "+r"(ctxt->dst.val));
4009 break;
4010#endif
4011 default:
4012 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
4013 break;
4014 }
4015 return X86EMUL_CONTINUE;
4016}
4017
4018static int em_clflush(struct x86_emulate_ctxt *ctxt)
4019{
4020
4021 return X86EMUL_CONTINUE;
4022}
4023
4024static int em_movsxd(struct x86_emulate_ctxt *ctxt)
4025{
4026 ctxt->dst.val = (s32) ctxt->src.val;
4027 return X86EMUL_CONTINUE;
4028}
4029
4030static int check_fxsr(struct x86_emulate_ctxt *ctxt)
4031{
4032 u32 eax = 1, ebx, ecx = 0, edx;
4033
4034 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
4035 if (!(edx & FFL(FXSR)))
4036 return emulate_ud(ctxt);
4037
4038 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
4039 return emulate_nm(ctxt);
4040
4041
4042
4043
4044
4045 if (ctxt->mode >= X86EMUL_MODE_PROT64)
4046 return X86EMUL_UNHANDLEABLE;
4047
4048 return X86EMUL_CONTINUE;
4049}
4050
4051
4052
4053
4054
4055static size_t __fxstate_size(int nregs)
4056{
4057 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4058}
4059
4060static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4061{
4062 bool cr4_osfxsr;
4063 if (ctxt->mode == X86EMUL_MODE_PROT64)
4064 return __fxstate_size(16);
4065
4066 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4067 return __fxstate_size(cr4_osfxsr ? 8 : 0);
4068}
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4089{
4090 struct fxregs_state fx_state;
4091 int rc;
4092
4093 rc = check_fxsr(ctxt);
4094 if (rc != X86EMUL_CONTINUE)
4095 return rc;
4096
4097 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4098
4099 if (rc != X86EMUL_CONTINUE)
4100 return rc;
4101
4102 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4103 fxstate_size(ctxt));
4104}
4105
4106
4107
4108
4109
4110
4111
4112
4113static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4114 const size_t used_size)
4115{
4116 struct fxregs_state fx_tmp;
4117 int rc;
4118
4119 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4120 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4121 __fxstate_size(16) - used_size);
4122
4123 return rc;
4124}
4125
4126static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4127{
4128 struct fxregs_state fx_state;
4129 int rc;
4130 size_t size;
4131
4132 rc = check_fxsr(ctxt);
4133 if (rc != X86EMUL_CONTINUE)
4134 return rc;
4135
4136 size = fxstate_size(ctxt);
4137 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4138 if (rc != X86EMUL_CONTINUE)
4139 return rc;
4140
4141 if (size < __fxstate_size(16)) {
4142 rc = fxregs_fixup(&fx_state, size);
4143 if (rc != X86EMUL_CONTINUE)
4144 goto out;
4145 }
4146
4147 if (fx_state.mxcsr >> 16) {
4148 rc = emulate_gp(ctxt, 0);
4149 goto out;
4150 }
4151
4152 if (rc == X86EMUL_CONTINUE)
4153 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4154
4155out:
4156 return rc;
4157}
4158
4159static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
4160{
4161 u32 eax, ecx, edx;
4162
4163 eax = reg_read(ctxt, VCPU_REGS_RAX);
4164 edx = reg_read(ctxt, VCPU_REGS_RDX);
4165 ecx = reg_read(ctxt, VCPU_REGS_RCX);
4166
4167 if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
4168 return emulate_gp(ctxt, 0);
4169
4170 return X86EMUL_CONTINUE;
4171}
4172
4173static bool valid_cr(int nr)
4174{
4175 switch (nr) {
4176 case 0:
4177 case 2 ... 4:
4178 case 8:
4179 return true;
4180 default:
4181 return false;
4182 }
4183}
4184
4185static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4186{
4187 if (!valid_cr(ctxt->modrm_reg))
4188 return emulate_ud(ctxt);
4189
4190 return X86EMUL_CONTINUE;
4191}
4192
4193static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4194{
4195 u64 new_val = ctxt->src.val64;
4196 int cr = ctxt->modrm_reg;
4197 u64 efer = 0;
4198
4199 static u64 cr_reserved_bits[] = {
4200 0xffffffff00000000ULL,
4201 0, 0, 0,
4202 CR4_RESERVED_BITS,
4203 0, 0, 0,
4204 CR8_RESERVED_BITS,
4205 };
4206
4207 if (!valid_cr(cr))
4208 return emulate_ud(ctxt);
4209
4210 if (new_val & cr_reserved_bits[cr])
4211 return emulate_gp(ctxt, 0);
4212
4213 switch (cr) {
4214 case 0: {
4215 u64 cr4;
4216 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4217 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4218 return emulate_gp(ctxt, 0);
4219
4220 cr4 = ctxt->ops->get_cr(ctxt, 4);
4221 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4222
4223 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4224 !(cr4 & X86_CR4_PAE))
4225 return emulate_gp(ctxt, 0);
4226
4227 break;
4228 }
4229 case 3: {
4230 u64 rsvd = 0;
4231
4232 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4233 if (efer & EFER_LMA) {
4234 u64 maxphyaddr;
4235 u32 eax, ebx, ecx, edx;
4236
4237 eax = 0x80000008;
4238 ecx = 0;
4239 if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4240 &edx, false))
4241 maxphyaddr = eax & 0xff;
4242 else
4243 maxphyaddr = 36;
4244 rsvd = rsvd_bits(maxphyaddr, 63);
4245 if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
4246 rsvd &= ~X86_CR3_PCID_NOFLUSH;
4247 }
4248
4249 if (new_val & rsvd)
4250 return emulate_gp(ctxt, 0);
4251
4252 break;
4253 }
4254 case 4: {
4255 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4256
4257 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4258 return emulate_gp(ctxt, 0);
4259
4260 break;
4261 }
4262 }
4263
4264 return X86EMUL_CONTINUE;
4265}
4266
4267static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4268{
4269 unsigned long dr7;
4270
4271 ctxt->ops->get_dr(ctxt, 7, &dr7);
4272
4273
4274 return dr7 & (1 << 13);
4275}
4276
4277static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4278{
4279 int dr = ctxt->modrm_reg;
4280 u64 cr4;
4281
4282 if (dr > 7)
4283 return emulate_ud(ctxt);
4284
4285 cr4 = ctxt->ops->get_cr(ctxt, 4);
4286 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4287 return emulate_ud(ctxt);
4288
4289 if (check_dr7_gd(ctxt)) {
4290 ulong dr6;
4291
4292 ctxt->ops->get_dr(ctxt, 6, &dr6);
4293 dr6 &= ~DR_TRAP_BITS;
4294 dr6 |= DR6_BD | DR6_RTM;
4295 ctxt->ops->set_dr(ctxt, 6, dr6);
4296 return emulate_db(ctxt);
4297 }
4298
4299 return X86EMUL_CONTINUE;
4300}
4301
4302static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4303{
4304 u64 new_val = ctxt->src.val64;
4305 int dr = ctxt->modrm_reg;
4306
4307 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4308 return emulate_gp(ctxt, 0);
4309
4310 return check_dr_read(ctxt);
4311}
4312
4313static int check_svme(struct x86_emulate_ctxt *ctxt)
4314{
4315 u64 efer = 0;
4316
4317 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4318
4319 if (!(efer & EFER_SVME))
4320 return emulate_ud(ctxt);
4321
4322 return X86EMUL_CONTINUE;
4323}
4324
4325static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4326{
4327 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4328
4329
4330 if (rax & 0xffff000000000000ULL)
4331 return emulate_gp(ctxt, 0);
4332
4333 return check_svme(ctxt);
4334}
4335
4336static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4337{
4338 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4339
4340 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4341 return emulate_ud(ctxt);
4342
4343 return X86EMUL_CONTINUE;
4344}
4345
4346static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4347{
4348 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4349 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4350
4351
4352
4353
4354
4355 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4356 return X86EMUL_CONTINUE;
4357
4358 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4359 ctxt->ops->check_pmc(ctxt, rcx))
4360 return emulate_gp(ctxt, 0);
4361
4362 return X86EMUL_CONTINUE;
4363}
4364
4365static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4366{
4367 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4368 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4369 return emulate_gp(ctxt, 0);
4370
4371 return X86EMUL_CONTINUE;
4372}
4373
4374static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4375{
4376 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4377 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4378 return emulate_gp(ctxt, 0);
4379
4380 return X86EMUL_CONTINUE;
4381}
4382
4383#define D(_y) { .flags = (_y) }
4384#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4385#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4386 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4387#define N D(NotImpl)
4388#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4389#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4390#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4391#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4392#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4393#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4394#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4395#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4396#define II(_f, _e, _i) \
4397 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4398#define IIP(_f, _e, _i, _p) \
4399 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4400 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4401#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4402
4403#define D2bv(_f) D((_f) | ByteOp), D(_f)
4404#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4405#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4406#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4407#define I2bvIP(_f, _e, _i, _p) \
4408 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4409
4410#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4411 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4412 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4413
4414static const struct opcode group7_rm0[] = {
4415 N,
4416 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4417 N, N, N, N, N, N,
4418};
4419
4420static const struct opcode group7_rm1[] = {
4421 DI(SrcNone | Priv, monitor),
4422 DI(SrcNone | Priv, mwait),
4423 N, N, N, N, N, N,
4424};
4425
4426static const struct opcode group7_rm2[] = {
4427 N,
4428 II(ImplicitOps | Priv, em_xsetbv, xsetbv),
4429 N, N, N, N, N, N,
4430};
4431
4432static const struct opcode group7_rm3[] = {
4433 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4434 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4435 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4436 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4437 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4438 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4439 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4440 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4441};
4442
4443static const struct opcode group7_rm7[] = {
4444 N,
4445 DIP(SrcNone, rdtscp, check_rdtsc),
4446 N, N, N, N, N, N,
4447};
4448
4449static const struct opcode group1[] = {
4450 F(Lock, em_add),
4451 F(Lock | PageTable, em_or),
4452 F(Lock, em_adc),
4453 F(Lock, em_sbb),
4454 F(Lock | PageTable, em_and),
4455 F(Lock, em_sub),
4456 F(Lock, em_xor),
4457 F(NoWrite, em_cmp),
4458};
4459
4460static const struct opcode group1A[] = {
4461 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4462};
4463
4464static const struct opcode group2[] = {
4465 F(DstMem | ModRM, em_rol),
4466 F(DstMem | ModRM, em_ror),
4467 F(DstMem | ModRM, em_rcl),
4468 F(DstMem | ModRM, em_rcr),
4469 F(DstMem | ModRM, em_shl),
4470 F(DstMem | ModRM, em_shr),
4471 F(DstMem | ModRM, em_shl),
4472 F(DstMem | ModRM, em_sar),
4473};
4474
4475static const struct opcode group3[] = {
4476 F(DstMem | SrcImm | NoWrite, em_test),
4477 F(DstMem | SrcImm | NoWrite, em_test),
4478 F(DstMem | SrcNone | Lock, em_not),
4479 F(DstMem | SrcNone | Lock, em_neg),
4480 F(DstXacc | Src2Mem, em_mul_ex),
4481 F(DstXacc | Src2Mem, em_imul_ex),
4482 F(DstXacc | Src2Mem, em_div_ex),
4483 F(DstXacc | Src2Mem, em_idiv_ex),
4484};
4485
4486static const struct opcode group4[] = {
4487 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4488 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4489 N, N, N, N, N, N,
4490};
4491
4492static const struct opcode group5[] = {
4493 F(DstMem | SrcNone | Lock, em_inc),
4494 F(DstMem | SrcNone | Lock, em_dec),
4495 I(SrcMem | NearBranch, em_call_near_abs),
4496 I(SrcMemFAddr | ImplicitOps, em_call_far),
4497 I(SrcMem | NearBranch, em_jmp_abs),
4498 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4499 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4500};
4501
4502static const struct opcode group6[] = {
4503 II(Prot | DstMem, em_sldt, sldt),
4504 II(Prot | DstMem, em_str, str),
4505 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4506 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4507 N, N, N, N,
4508};
4509
4510static const struct group_dual group7 = { {
4511 II(Mov | DstMem, em_sgdt, sgdt),
4512 II(Mov | DstMem, em_sidt, sidt),
4513 II(SrcMem | Priv, em_lgdt, lgdt),
4514 II(SrcMem | Priv, em_lidt, lidt),
4515 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4516 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4517 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4518}, {
4519 EXT(0, group7_rm0),
4520 EXT(0, group7_rm1),
4521 EXT(0, group7_rm2),
4522 EXT(0, group7_rm3),
4523 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4524 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4525 EXT(0, group7_rm7),
4526} };
4527
4528static const struct opcode group8[] = {
4529 N, N, N, N,
4530 F(DstMem | SrcImmByte | NoWrite, em_bt),
4531 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4532 F(DstMem | SrcImmByte | Lock, em_btr),
4533 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4534};
4535
4536
4537
4538
4539
4540static const struct gprefix pfx_0f_c7_7 = {
4541 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
4542};
4543
4544
4545static const struct group_dual group9 = { {
4546 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4547}, {
4548 N, N, N, N, N, N, N,
4549 GP(0, &pfx_0f_c7_7),
4550} };
4551
4552static const struct opcode group11[] = {
4553 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4554 X7(D(Undefined)),
4555};
4556
4557static const struct gprefix pfx_0f_ae_7 = {
4558 I(SrcMem | ByteOp, em_clflush), N, N, N,
4559};
4560
4561static const struct group_dual group15 = { {
4562 I(ModRM | Aligned16, em_fxsave),
4563 I(ModRM | Aligned16, em_fxrstor),
4564 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4565}, {
4566 N, N, N, N, N, N, N, N,
4567} };
4568
4569static const struct gprefix pfx_0f_6f_0f_7f = {
4570 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4571};
4572
4573static const struct instr_dual instr_dual_0f_2b = {
4574 I(0, em_mov), N
4575};
4576
4577static const struct gprefix pfx_0f_2b = {
4578 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4579};
4580
4581static const struct gprefix pfx_0f_10_0f_11 = {
4582 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4583};
4584
4585static const struct gprefix pfx_0f_28_0f_29 = {
4586 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4587};
4588
4589static const struct gprefix pfx_0f_e7 = {
4590 N, I(Sse, em_mov), N, N,
4591};
4592
4593static const struct escape escape_d9 = { {
4594 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4595}, {
4596
4597 N, N, N, N, N, N, N, N,
4598
4599 N, N, N, N, N, N, N, N,
4600
4601 N, N, N, N, N, N, N, N,
4602
4603 N, N, N, N, N, N, N, N,
4604
4605 N, N, N, N, N, N, N, N,
4606
4607 N, N, N, N, N, N, N, N,
4608
4609 N, N, N, N, N, N, N, N,
4610
4611 N, N, N, N, N, N, N, N,
4612} };
4613
4614static const struct escape escape_db = { {
4615 N, N, N, N, N, N, N, N,
4616}, {
4617
4618 N, N, N, N, N, N, N, N,
4619
4620 N, N, N, N, N, N, N, N,
4621
4622 N, N, N, N, N, N, N, N,
4623
4624 N, N, N, N, N, N, N, N,
4625
4626 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4627
4628 N, N, N, N, N, N, N, N,
4629
4630 N, N, N, N, N, N, N, N,
4631
4632 N, N, N, N, N, N, N, N,
4633} };
4634
4635static const struct escape escape_dd = { {
4636 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4637}, {
4638
4639 N, N, N, N, N, N, N, N,
4640
4641 N, N, N, N, N, N, N, N,
4642
4643 N, N, N, N, N, N, N, N,
4644
4645 N, N, N, N, N, N, N, N,
4646
4647 N, N, N, N, N, N, N, N,
4648
4649 N, N, N, N, N, N, N, N,
4650
4651 N, N, N, N, N, N, N, N,
4652
4653 N, N, N, N, N, N, N, N,
4654} };
4655
4656static const struct instr_dual instr_dual_0f_c3 = {
4657 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4658};
4659
4660static const struct mode_dual mode_dual_63 = {
4661 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4662};
4663
4664static const struct opcode opcode_table[256] = {
4665
4666 F6ALU(Lock, em_add),
4667 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4668 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4669
4670 F6ALU(Lock | PageTable, em_or),
4671 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4672 N,
4673
4674 F6ALU(Lock, em_adc),
4675 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4676 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4677
4678 F6ALU(Lock, em_sbb),
4679 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4680 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4681
4682 F6ALU(Lock | PageTable, em_and), N, N,
4683
4684 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4685
4686 F6ALU(Lock, em_xor), N, N,
4687
4688 F6ALU(NoWrite, em_cmp), N, N,
4689
4690 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4691
4692 X8(I(SrcReg | Stack, em_push)),
4693
4694 X8(I(DstReg | Stack, em_pop)),
4695
4696 I(ImplicitOps | Stack | No64, em_pusha),
4697 I(ImplicitOps | Stack | No64, em_popa),
4698 N, MD(ModRM, &mode_dual_63),
4699 N, N, N, N,
4700
4701 I(SrcImm | Mov | Stack, em_push),
4702 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4703 I(SrcImmByte | Mov | Stack, em_push),
4704 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4705 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in),
4706 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out),
4707
4708 X16(D(SrcImmByte | NearBranch)),
4709
4710 G(ByteOp | DstMem | SrcImm, group1),
4711 G(DstMem | SrcImm, group1),
4712 G(ByteOp | DstMem | SrcImm | No64, group1),
4713 G(DstMem | SrcImmByte, group1),
4714 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4715 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4716
4717 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4718 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4719 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4720 D(ModRM | SrcMem | NoAccess | DstReg),
4721 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4722 G(0, group1A),
4723
4724 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4725
4726 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4727 I(SrcImmFAddr | No64, em_call_far), N,
4728 II(ImplicitOps | Stack, em_pushf, pushf),
4729 II(ImplicitOps | Stack, em_popf, popf),
4730 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4731
4732 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4733 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4734 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4735 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4736
4737 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4738 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4739 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4740 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4741
4742 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4743
4744 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4745
4746 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4747 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4748 I(ImplicitOps | NearBranch, em_ret),
4749 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4750 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4751 G(ByteOp, group11), G(0, group11),
4752
4753 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4754 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4755 I(ImplicitOps, em_ret_far),
4756 D(ImplicitOps), DI(SrcImmByte, intn),
4757 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4758
4759 G(Src2One | ByteOp, group2), G(Src2One, group2),
4760 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4761 I(DstAcc | SrcImmUByte | No64, em_aam),
4762 I(DstAcc | SrcImmUByte | No64, em_aad),
4763 F(DstAcc | ByteOp | No64, em_salc),
4764 I(DstAcc | SrcXLat | ByteOp, em_mov),
4765
4766 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4767
4768 X3(I(SrcImmByte | NearBranch, em_loop)),
4769 I(SrcImmByte | NearBranch, em_jcxz),
4770 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4771 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4772
4773 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4774 I(SrcImmFAddr | No64, em_jmp_far),
4775 D(SrcImmByte | ImplicitOps | NearBranch),
4776 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4777 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4778
4779 N, DI(ImplicitOps, icebp), N, N,
4780 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4781 G(ByteOp, group3), G(0, group3),
4782
4783 D(ImplicitOps), D(ImplicitOps),
4784 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4785 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4786};
4787
4788static const struct opcode twobyte_table[256] = {
4789
4790 G(0, group6), GD(0, &group7), N, N,
4791 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4792 II(ImplicitOps | Priv, em_clts, clts), N,
4793 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4794 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4795
4796 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4797 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4798 N, N, N, N, N, N,
4799 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4800 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4801
4802 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4803 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4804 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4805 check_cr_write),
4806 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4807 check_dr_write),
4808 N, N, N, N,
4809 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4810 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4811 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4812 N, N, N, N,
4813
4814 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4815 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4816 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4817 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4818 I(ImplicitOps | EmulateOnUD, em_sysenter),
4819 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4820 N, N,
4821 N, N, N, N, N, N, N, N,
4822
4823 X16(D(DstReg | SrcMem | ModRM)),
4824
4825 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4826
4827 N, N, N, N,
4828 N, N, N, N,
4829 N, N, N, N,
4830 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4831
4832 N, N, N, N,
4833 N, N, N, N,
4834 N, N, N, N,
4835 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4836
4837 X16(D(SrcImm | NearBranch)),
4838
4839 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4840
4841 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4842 II(ImplicitOps, em_cpuid, cpuid),
4843 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4844 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4845 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4846
4847 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4848 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4849 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4850 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4851 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4852 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4853
4854 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4855 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4856 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4857 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4858 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4859 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4860
4861 N, N,
4862 G(BitOp, group8),
4863 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4864 I(DstReg | SrcMem | ModRM, em_bsf_c),
4865 I(DstReg | SrcMem | ModRM, em_bsr_c),
4866 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4867
4868 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4869 N, ID(0, &instr_dual_0f_c3),
4870 N, N, N, GD(0, &group9),
4871
4872 X8(I(DstReg, em_bswap)),
4873
4874 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4875
4876 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4877 N, N, N, N, N, N, N, N,
4878
4879 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4880};
4881
4882static const struct instr_dual instr_dual_0f_38_f0 = {
4883 I(DstReg | SrcMem | Mov, em_movbe), N
4884};
4885
4886static const struct instr_dual instr_dual_0f_38_f1 = {
4887 I(DstMem | SrcReg | Mov, em_movbe), N
4888};
4889
4890static const struct gprefix three_byte_0f_38_f0 = {
4891 ID(0, &instr_dual_0f_38_f0), N, N, N
4892};
4893
4894static const struct gprefix three_byte_0f_38_f1 = {
4895 ID(0, &instr_dual_0f_38_f1), N, N, N
4896};
4897
4898
4899
4900
4901
4902static const struct opcode opcode_map_0f_38[256] = {
4903
4904 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4905
4906 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4907
4908 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4909 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4910
4911 N, N, X4(N), X8(N)
4912};
4913
4914#undef D
4915#undef N
4916#undef G
4917#undef GD
4918#undef I
4919#undef GP
4920#undef EXT
4921#undef MD
4922#undef ID
4923
4924#undef D2bv
4925#undef D2bvIP
4926#undef I2bv
4927#undef I2bvIP
4928#undef I6ALU
4929
4930static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4931{
4932 unsigned size;
4933
4934 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4935 if (size == 8)
4936 size = 4;
4937 return size;
4938}
4939
4940static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4941 unsigned size, bool sign_extension)
4942{
4943 int rc = X86EMUL_CONTINUE;
4944
4945 op->type = OP_IMM;
4946 op->bytes = size;
4947 op->addr.mem.ea = ctxt->_eip;
4948
4949 switch (op->bytes) {
4950 case 1:
4951 op->val = insn_fetch(s8, ctxt);
4952 break;
4953 case 2:
4954 op->val = insn_fetch(s16, ctxt);
4955 break;
4956 case 4:
4957 op->val = insn_fetch(s32, ctxt);
4958 break;
4959 case 8:
4960 op->val = insn_fetch(s64, ctxt);
4961 break;
4962 }
4963 if (!sign_extension) {
4964 switch (op->bytes) {
4965 case 1:
4966 op->val &= 0xff;
4967 break;
4968 case 2:
4969 op->val &= 0xffff;
4970 break;
4971 case 4:
4972 op->val &= 0xffffffff;
4973 break;
4974 }
4975 }
4976done:
4977 return rc;
4978}
4979
4980static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4981 unsigned d)
4982{
4983 int rc = X86EMUL_CONTINUE;
4984
4985 switch (d) {
4986 case OpReg:
4987 decode_register_operand(ctxt, op);
4988 break;
4989 case OpImmUByte:
4990 rc = decode_imm(ctxt, op, 1, false);
4991 break;
4992 case OpMem:
4993 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4994 mem_common:
4995 *op = ctxt->memop;
4996 ctxt->memopp = op;
4997 if (ctxt->d & BitOp)
4998 fetch_bit_operand(ctxt);
4999 op->orig_val = op->val;
5000 break;
5001 case OpMem64:
5002 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
5003 goto mem_common;
5004 case OpAcc:
5005 op->type = OP_REG;
5006 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5007 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
5008 fetch_register_operand(op);
5009 op->orig_val = op->val;
5010 break;
5011 case OpAccLo:
5012 op->type = OP_REG;
5013 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
5014 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
5015 fetch_register_operand(op);
5016 op->orig_val = op->val;
5017 break;
5018 case OpAccHi:
5019 if (ctxt->d & ByteOp) {
5020 op->type = OP_NONE;
5021 break;
5022 }
5023 op->type = OP_REG;
5024 op->bytes = ctxt->op_bytes;
5025 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5026 fetch_register_operand(op);
5027 op->orig_val = op->val;
5028 break;
5029 case OpDI:
5030 op->type = OP_MEM;
5031 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5032 op->addr.mem.ea =
5033 register_address(ctxt, VCPU_REGS_RDI);
5034 op->addr.mem.seg = VCPU_SREG_ES;
5035 op->val = 0;
5036 op->count = 1;
5037 break;
5038 case OpDX:
5039 op->type = OP_REG;
5040 op->bytes = 2;
5041 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5042 fetch_register_operand(op);
5043 break;
5044 case OpCL:
5045 op->type = OP_IMM;
5046 op->bytes = 1;
5047 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
5048 break;
5049 case OpImmByte:
5050 rc = decode_imm(ctxt, op, 1, true);
5051 break;
5052 case OpOne:
5053 op->type = OP_IMM;
5054 op->bytes = 1;
5055 op->val = 1;
5056 break;
5057 case OpImm:
5058 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
5059 break;
5060 case OpImm64:
5061 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
5062 break;
5063 case OpMem8:
5064 ctxt->memop.bytes = 1;
5065 if (ctxt->memop.type == OP_REG) {
5066 ctxt->memop.addr.reg = decode_register(ctxt,
5067 ctxt->modrm_rm, true);
5068 fetch_register_operand(&ctxt->memop);
5069 }
5070 goto mem_common;
5071 case OpMem16:
5072 ctxt->memop.bytes = 2;
5073 goto mem_common;
5074 case OpMem32:
5075 ctxt->memop.bytes = 4;
5076 goto mem_common;
5077 case OpImmU16:
5078 rc = decode_imm(ctxt, op, 2, false);
5079 break;
5080 case OpImmU:
5081 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
5082 break;
5083 case OpSI:
5084 op->type = OP_MEM;
5085 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5086 op->addr.mem.ea =
5087 register_address(ctxt, VCPU_REGS_RSI);
5088 op->addr.mem.seg = ctxt->seg_override;
5089 op->val = 0;
5090 op->count = 1;
5091 break;
5092 case OpXLat:
5093 op->type = OP_MEM;
5094 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5095 op->addr.mem.ea =
5096 address_mask(ctxt,
5097 reg_read(ctxt, VCPU_REGS_RBX) +
5098 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
5099 op->addr.mem.seg = ctxt->seg_override;
5100 op->val = 0;
5101 break;
5102 case OpImmFAddr:
5103 op->type = OP_IMM;
5104 op->addr.mem.ea = ctxt->_eip;
5105 op->bytes = ctxt->op_bytes + 2;
5106 insn_fetch_arr(op->valptr, op->bytes, ctxt);
5107 break;
5108 case OpMemFAddr:
5109 ctxt->memop.bytes = ctxt->op_bytes + 2;
5110 goto mem_common;
5111 case OpES:
5112 op->type = OP_IMM;
5113 op->val = VCPU_SREG_ES;
5114 break;
5115 case OpCS:
5116 op->type = OP_IMM;
5117 op->val = VCPU_SREG_CS;
5118 break;
5119 case OpSS:
5120 op->type = OP_IMM;
5121 op->val = VCPU_SREG_SS;
5122 break;
5123 case OpDS:
5124 op->type = OP_IMM;
5125 op->val = VCPU_SREG_DS;
5126 break;
5127 case OpFS:
5128 op->type = OP_IMM;
5129 op->val = VCPU_SREG_FS;
5130 break;
5131 case OpGS:
5132 op->type = OP_IMM;
5133 op->val = VCPU_SREG_GS;
5134 break;
5135 case OpImplicit:
5136
5137 default:
5138 op->type = OP_NONE;
5139 break;
5140 }
5141
5142done:
5143 return rc;
5144}
5145
5146int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
5147{
5148 int rc = X86EMUL_CONTINUE;
5149 int mode = ctxt->mode;
5150 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5151 bool op_prefix = false;
5152 bool has_seg_override = false;
5153 struct opcode opcode;
5154 u16 dummy;
5155 struct desc_struct desc;
5156
5157 ctxt->memop.type = OP_NONE;
5158 ctxt->memopp = NULL;
5159 ctxt->_eip = ctxt->eip;
5160 ctxt->fetch.ptr = ctxt->fetch.data;
5161 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5162 ctxt->opcode_len = 1;
5163 if (insn_len > 0)
5164 memcpy(ctxt->fetch.data, insn, insn_len);
5165 else {
5166 rc = __do_insn_fetch_bytes(ctxt, 1);
5167 if (rc != X86EMUL_CONTINUE)
5168 goto done;
5169 }
5170
5171 switch (mode) {
5172 case X86EMUL_MODE_REAL:
5173 case X86EMUL_MODE_VM86:
5174 def_op_bytes = def_ad_bytes = 2;
5175 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5176 if (desc.d)
5177 def_op_bytes = def_ad_bytes = 4;
5178 break;
5179 case X86EMUL_MODE_PROT16:
5180 def_op_bytes = def_ad_bytes = 2;
5181 break;
5182 case X86EMUL_MODE_PROT32:
5183 def_op_bytes = def_ad_bytes = 4;
5184 break;
5185#ifdef CONFIG_X86_64
5186 case X86EMUL_MODE_PROT64:
5187 def_op_bytes = 4;
5188 def_ad_bytes = 8;
5189 break;
5190#endif
5191 default:
5192 return EMULATION_FAILED;
5193 }
5194
5195 ctxt->op_bytes = def_op_bytes;
5196 ctxt->ad_bytes = def_ad_bytes;
5197
5198
5199 for (;;) {
5200 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5201 case 0x66:
5202 op_prefix = true;
5203
5204 ctxt->op_bytes = def_op_bytes ^ 6;
5205 break;
5206 case 0x67:
5207 if (mode == X86EMUL_MODE_PROT64)
5208
5209 ctxt->ad_bytes = def_ad_bytes ^ 12;
5210 else
5211
5212 ctxt->ad_bytes = def_ad_bytes ^ 6;
5213 break;
5214 case 0x26:
5215 case 0x2e:
5216 case 0x36:
5217 case 0x3e:
5218 has_seg_override = true;
5219 ctxt->seg_override = (ctxt->b >> 3) & 3;
5220 break;
5221 case 0x64:
5222 case 0x65:
5223 has_seg_override = true;
5224 ctxt->seg_override = ctxt->b & 7;
5225 break;
5226 case 0x40 ... 0x4f:
5227 if (mode != X86EMUL_MODE_PROT64)
5228 goto done_prefixes;
5229 ctxt->rex_prefix = ctxt->b;
5230 continue;
5231 case 0xf0:
5232 ctxt->lock_prefix = 1;
5233 break;
5234 case 0xf2:
5235 case 0xf3:
5236 ctxt->rep_prefix = ctxt->b;
5237 break;
5238 default:
5239 goto done_prefixes;
5240 }
5241
5242
5243
5244 ctxt->rex_prefix = 0;
5245 }
5246
5247done_prefixes:
5248
5249
5250 if (ctxt->rex_prefix & 8)
5251 ctxt->op_bytes = 8;
5252
5253
5254 opcode = opcode_table[ctxt->b];
5255
5256 if (ctxt->b == 0x0f) {
5257 ctxt->opcode_len = 2;
5258 ctxt->b = insn_fetch(u8, ctxt);
5259 opcode = twobyte_table[ctxt->b];
5260
5261
5262 if (ctxt->b == 0x38) {
5263 ctxt->opcode_len = 3;
5264 ctxt->b = insn_fetch(u8, ctxt);
5265 opcode = opcode_map_0f_38[ctxt->b];
5266 }
5267 }
5268 ctxt->d = opcode.flags;
5269
5270 if (ctxt->d & ModRM)
5271 ctxt->modrm = insn_fetch(u8, ctxt);
5272
5273
5274 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5275 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5276 ctxt->d = NotImpl;
5277 }
5278
5279 while (ctxt->d & GroupMask) {
5280 switch (ctxt->d & GroupMask) {
5281 case Group:
5282 goffset = (ctxt->modrm >> 3) & 7;
5283 opcode = opcode.u.group[goffset];
5284 break;
5285 case GroupDual:
5286 goffset = (ctxt->modrm >> 3) & 7;
5287 if ((ctxt->modrm >> 6) == 3)
5288 opcode = opcode.u.gdual->mod3[goffset];
5289 else
5290 opcode = opcode.u.gdual->mod012[goffset];
5291 break;
5292 case RMExt:
5293 goffset = ctxt->modrm & 7;
5294 opcode = opcode.u.group[goffset];
5295 break;
5296 case Prefix:
5297 if (ctxt->rep_prefix && op_prefix)
5298 return EMULATION_FAILED;
5299 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5300 switch (simd_prefix) {
5301 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5302 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5303 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5304 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5305 }
5306 break;
5307 case Escape:
5308 if (ctxt->modrm > 0xbf)
5309 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5310 else
5311 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5312 break;
5313 case InstrDual:
5314 if ((ctxt->modrm >> 6) == 3)
5315 opcode = opcode.u.idual->mod3;
5316 else
5317 opcode = opcode.u.idual->mod012;
5318 break;
5319 case ModeDual:
5320 if (ctxt->mode == X86EMUL_MODE_PROT64)
5321 opcode = opcode.u.mdual->mode64;
5322 else
5323 opcode = opcode.u.mdual->mode32;
5324 break;
5325 default:
5326 return EMULATION_FAILED;
5327 }
5328
5329 ctxt->d &= ~(u64)GroupMask;
5330 ctxt->d |= opcode.flags;
5331 }
5332
5333
5334 if (ctxt->d == 0)
5335 return EMULATION_FAILED;
5336
5337 ctxt->execute = opcode.u.execute;
5338
5339 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5340 return EMULATION_FAILED;
5341
5342 if (unlikely(ctxt->d &
5343 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5344 No16))) {
5345
5346
5347
5348
5349 ctxt->check_perm = opcode.check_perm;
5350 ctxt->intercept = opcode.intercept;
5351
5352 if (ctxt->d & NotImpl)
5353 return EMULATION_FAILED;
5354
5355 if (mode == X86EMUL_MODE_PROT64) {
5356 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5357 ctxt->op_bytes = 8;
5358 else if (ctxt->d & NearBranch)
5359 ctxt->op_bytes = 8;
5360 }
5361
5362 if (ctxt->d & Op3264) {
5363 if (mode == X86EMUL_MODE_PROT64)
5364 ctxt->op_bytes = 8;
5365 else
5366 ctxt->op_bytes = 4;
5367 }
5368
5369 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5370 ctxt->op_bytes = 4;
5371
5372 if (ctxt->d & Sse)
5373 ctxt->op_bytes = 16;
5374 else if (ctxt->d & Mmx)
5375 ctxt->op_bytes = 8;
5376 }
5377
5378
5379 if (ctxt->d & ModRM) {
5380 rc = decode_modrm(ctxt, &ctxt->memop);
5381 if (!has_seg_override) {
5382 has_seg_override = true;
5383 ctxt->seg_override = ctxt->modrm_seg;
5384 }
5385 } else if (ctxt->d & MemAbs)
5386 rc = decode_abs(ctxt, &ctxt->memop);
5387 if (rc != X86EMUL_CONTINUE)
5388 goto done;
5389
5390 if (!has_seg_override)
5391 ctxt->seg_override = VCPU_SREG_DS;
5392
5393 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5394
5395
5396
5397
5398
5399 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5400 if (rc != X86EMUL_CONTINUE)
5401 goto done;
5402
5403
5404
5405
5406
5407 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5408 if (rc != X86EMUL_CONTINUE)
5409 goto done;
5410
5411
5412 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5413
5414 if (ctxt->rip_relative && likely(ctxt->memopp))
5415 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5416 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5417
5418done:
5419 if (rc == X86EMUL_PROPAGATE_FAULT)
5420 ctxt->have_exception = true;
5421 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5422}
5423
5424bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5425{
5426 return ctxt->d & PageTable;
5427}
5428
5429static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5430{
5431
5432
5433
5434
5435
5436
5437
5438 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5439 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5440 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5441 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5442 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5443 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5444 return true;
5445
5446 return false;
5447}
5448
5449static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5450{
5451 int rc;
5452
5453 rc = asm_safe("fwait");
5454
5455 if (unlikely(rc != X86EMUL_CONTINUE))
5456 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5457
5458 return X86EMUL_CONTINUE;
5459}
5460
5461static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5462 struct operand *op)
5463{
5464 if (op->type == OP_MM)
5465 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5466}
5467
5468static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5469{
5470 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5471
5472 if (!(ctxt->d & ByteOp))
5473 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5474
5475 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5476 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5477 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5478 : "c"(ctxt->src2.val));
5479
5480 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5481 if (!fop)
5482 return emulate_de(ctxt);
5483 return X86EMUL_CONTINUE;
5484}
5485
5486void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5487{
5488 memset(&ctxt->rip_relative, 0,
5489 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5490
5491 ctxt->io_read.pos = 0;
5492 ctxt->io_read.end = 0;
5493 ctxt->mem_read.end = 0;
5494}
5495
5496int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5497{
5498 const struct x86_emulate_ops *ops = ctxt->ops;
5499 int rc = X86EMUL_CONTINUE;
5500 int saved_dst_type = ctxt->dst.type;
5501 unsigned emul_flags;
5502
5503 ctxt->mem_read.pos = 0;
5504
5505
5506 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5507 rc = emulate_ud(ctxt);
5508 goto done;
5509 }
5510
5511 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5512 rc = emulate_ud(ctxt);
5513 goto done;
5514 }
5515
5516 emul_flags = ctxt->ops->get_hflags(ctxt);
5517 if (unlikely(ctxt->d &
5518 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5519 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5520 (ctxt->d & Undefined)) {
5521 rc = emulate_ud(ctxt);
5522 goto done;
5523 }
5524
5525 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5526 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5527 rc = emulate_ud(ctxt);
5528 goto done;
5529 }
5530
5531 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5532 rc = emulate_nm(ctxt);
5533 goto done;
5534 }
5535
5536 if (ctxt->d & Mmx) {
5537 rc = flush_pending_x87_faults(ctxt);
5538 if (rc != X86EMUL_CONTINUE)
5539 goto done;
5540
5541
5542
5543
5544 fetch_possible_mmx_operand(ctxt, &ctxt->src);
5545 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5546 if (!(ctxt->d & Mov))
5547 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5548 }
5549
5550 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5551 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5552 X86_ICPT_PRE_EXCEPT);
5553 if (rc != X86EMUL_CONTINUE)
5554 goto done;
5555 }
5556
5557
5558 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5559 rc = emulate_ud(ctxt);
5560 goto done;
5561 }
5562
5563
5564 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5565 if (ctxt->d & PrivUD)
5566 rc = emulate_ud(ctxt);
5567 else
5568 rc = emulate_gp(ctxt, 0);
5569 goto done;
5570 }
5571
5572
5573 if (ctxt->d & CheckPerm) {
5574 rc = ctxt->check_perm(ctxt);
5575 if (rc != X86EMUL_CONTINUE)
5576 goto done;
5577 }
5578
5579 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5580 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5581 X86_ICPT_POST_EXCEPT);
5582 if (rc != X86EMUL_CONTINUE)
5583 goto done;
5584 }
5585
5586 if (ctxt->rep_prefix && (ctxt->d & String)) {
5587
5588 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5589 string_registers_quirk(ctxt);
5590 ctxt->eip = ctxt->_eip;
5591 ctxt->eflags &= ~X86_EFLAGS_RF;
5592 goto done;
5593 }
5594 }
5595 }
5596
5597 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5598 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5599 ctxt->src.valptr, ctxt->src.bytes);
5600 if (rc != X86EMUL_CONTINUE)
5601 goto done;
5602 ctxt->src.orig_val64 = ctxt->src.val64;
5603 }
5604
5605 if (ctxt->src2.type == OP_MEM) {
5606 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5607 &ctxt->src2.val, ctxt->src2.bytes);
5608 if (rc != X86EMUL_CONTINUE)
5609 goto done;
5610 }
5611
5612 if ((ctxt->d & DstMask) == ImplicitOps)
5613 goto special_insn;
5614
5615
5616 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5617
5618 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5619 &ctxt->dst.val, ctxt->dst.bytes);
5620 if (rc != X86EMUL_CONTINUE) {
5621 if (!(ctxt->d & NoWrite) &&
5622 rc == X86EMUL_PROPAGATE_FAULT &&
5623 ctxt->exception.vector == PF_VECTOR)
5624 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5625 goto done;
5626 }
5627 }
5628
5629 ctxt->dst.orig_val64 = ctxt->dst.val64;
5630
5631special_insn:
5632
5633 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5634 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5635 X86_ICPT_POST_MEMACCESS);
5636 if (rc != X86EMUL_CONTINUE)
5637 goto done;
5638 }
5639
5640 if (ctxt->rep_prefix && (ctxt->d & String))
5641 ctxt->eflags |= X86_EFLAGS_RF;
5642 else
5643 ctxt->eflags &= ~X86_EFLAGS_RF;
5644
5645 if (ctxt->execute) {
5646 if (ctxt->d & Fastop) {
5647 void (*fop)(struct fastop *) = (void *)ctxt->execute;
5648 rc = fastop(ctxt, fop);
5649 if (rc != X86EMUL_CONTINUE)
5650 goto done;
5651 goto writeback;
5652 }
5653 rc = ctxt->execute(ctxt);
5654 if (rc != X86EMUL_CONTINUE)
5655 goto done;
5656 goto writeback;
5657 }
5658
5659 if (ctxt->opcode_len == 2)
5660 goto twobyte_insn;
5661 else if (ctxt->opcode_len == 3)
5662 goto threebyte_insn;
5663
5664 switch (ctxt->b) {
5665 case 0x70 ... 0x7f:
5666 if (test_cc(ctxt->b, ctxt->eflags))
5667 rc = jmp_rel(ctxt, ctxt->src.val);
5668 break;
5669 case 0x8d:
5670 ctxt->dst.val = ctxt->src.addr.mem.ea;
5671 break;
5672 case 0x90 ... 0x97:
5673 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5674 ctxt->dst.type = OP_NONE;
5675 else
5676 rc = em_xchg(ctxt);
5677 break;
5678 case 0x98:
5679 switch (ctxt->op_bytes) {
5680 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5681 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5682 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5683 }
5684 break;
5685 case 0xcc:
5686 rc = emulate_int(ctxt, 3);
5687 break;
5688 case 0xcd:
5689 rc = emulate_int(ctxt, ctxt->src.val);
5690 break;
5691 case 0xce:
5692 if (ctxt->eflags & X86_EFLAGS_OF)
5693 rc = emulate_int(ctxt, 4);
5694 break;
5695 case 0xe9:
5696 case 0xeb:
5697 rc = jmp_rel(ctxt, ctxt->src.val);
5698 ctxt->dst.type = OP_NONE;
5699 break;
5700 case 0xf4:
5701 ctxt->ops->halt(ctxt);
5702 break;
5703 case 0xf5:
5704
5705 ctxt->eflags ^= X86_EFLAGS_CF;
5706 break;
5707 case 0xf8:
5708 ctxt->eflags &= ~X86_EFLAGS_CF;
5709 break;
5710 case 0xf9:
5711 ctxt->eflags |= X86_EFLAGS_CF;
5712 break;
5713 case 0xfc:
5714 ctxt->eflags &= ~X86_EFLAGS_DF;
5715 break;
5716 case 0xfd:
5717 ctxt->eflags |= X86_EFLAGS_DF;
5718 break;
5719 default:
5720 goto cannot_emulate;
5721 }
5722
5723 if (rc != X86EMUL_CONTINUE)
5724 goto done;
5725
5726writeback:
5727 if (ctxt->d & SrcWrite) {
5728 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5729 rc = writeback(ctxt, &ctxt->src);
5730 if (rc != X86EMUL_CONTINUE)
5731 goto done;
5732 }
5733 if (!(ctxt->d & NoWrite)) {
5734 rc = writeback(ctxt, &ctxt->dst);
5735 if (rc != X86EMUL_CONTINUE)
5736 goto done;
5737 }
5738
5739
5740
5741
5742
5743 ctxt->dst.type = saved_dst_type;
5744
5745 if ((ctxt->d & SrcMask) == SrcSI)
5746 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5747
5748 if ((ctxt->d & DstMask) == DstDI)
5749 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5750
5751 if (ctxt->rep_prefix && (ctxt->d & String)) {
5752 unsigned int count;
5753 struct read_cache *r = &ctxt->io_read;
5754 if ((ctxt->d & SrcMask) == SrcSI)
5755 count = ctxt->src.count;
5756 else
5757 count = ctxt->dst.count;
5758 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5759
5760 if (!string_insn_completed(ctxt)) {
5761
5762
5763
5764
5765 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5766 (r->end == 0 || r->end != r->pos)) {
5767
5768
5769
5770
5771
5772 ctxt->mem_read.end = 0;
5773 writeback_registers(ctxt);
5774 return EMULATION_RESTART;
5775 }
5776 goto done;
5777 }
5778 ctxt->eflags &= ~X86_EFLAGS_RF;
5779 }
5780
5781 ctxt->eip = ctxt->_eip;
5782
5783done:
5784 if (rc == X86EMUL_PROPAGATE_FAULT) {
5785 WARN_ON(ctxt->exception.vector > 0x1f);
5786 ctxt->have_exception = true;
5787 }
5788 if (rc == X86EMUL_INTERCEPTED)
5789 return EMULATION_INTERCEPTED;
5790
5791 if (rc == X86EMUL_CONTINUE)
5792 writeback_registers(ctxt);
5793
5794 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5795
5796twobyte_insn:
5797 switch (ctxt->b) {
5798 case 0x09:
5799 (ctxt->ops->wbinvd)(ctxt);
5800 break;
5801 case 0x08:
5802 case 0x0d:
5803 case 0x18:
5804 case 0x1f:
5805 break;
5806 case 0x20:
5807 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5808 break;
5809 case 0x21:
5810 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5811 break;
5812 case 0x40 ... 0x4f:
5813 if (test_cc(ctxt->b, ctxt->eflags))
5814 ctxt->dst.val = ctxt->src.val;
5815 else if (ctxt->op_bytes != 4)
5816 ctxt->dst.type = OP_NONE;
5817 break;
5818 case 0x80 ... 0x8f:
5819 if (test_cc(ctxt->b, ctxt->eflags))
5820 rc = jmp_rel(ctxt, ctxt->src.val);
5821 break;
5822 case 0x90 ... 0x9f:
5823 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5824 break;
5825 case 0xb6 ... 0xb7:
5826 ctxt->dst.bytes = ctxt->op_bytes;
5827 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5828 : (u16) ctxt->src.val;
5829 break;
5830 case 0xbe ... 0xbf:
5831 ctxt->dst.bytes = ctxt->op_bytes;
5832 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5833 (s16) ctxt->src.val;
5834 break;
5835 default:
5836 goto cannot_emulate;
5837 }
5838
5839threebyte_insn:
5840
5841 if (rc != X86EMUL_CONTINUE)
5842 goto done;
5843
5844 goto writeback;
5845
5846cannot_emulate:
5847 return EMULATION_FAILED;
5848}
5849
5850void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5851{
5852 invalidate_registers(ctxt);
5853}
5854
5855void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5856{
5857 writeback_registers(ctxt);
5858}
5859
5860bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5861{
5862 if (ctxt->rep_prefix && (ctxt->d & String))
5863 return false;
5864
5865 if (ctxt->d & TwoMemOp)
5866 return false;
5867
5868 return true;
5869}
5870