1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kvm_host.h>
24#include "kvm_cache_regs.h"
25#include <linux/module.h>
26#include <asm/kvm_emulate.h>
27
28#include "x86.h"
29#include "tss.h"
30
31
32
33
34#define OpNone 0ull
35#define OpImplicit 1ull
36#define OpReg 2ull
37#define OpMem 3ull
38#define OpAcc 4ull
39#define OpDI 5ull
40#define OpMem64 6ull
41#define OpImmUByte 7ull
42#define OpDX 8ull
43#define OpCL 9ull
44#define OpImmByte 10ull
45#define OpOne 11ull
46#define OpImm 12ull
47#define OpMem16 13ull
48#define OpMem32 14ull
49#define OpImmU 15ull
50#define OpSI 16ull
51#define OpImmFAddr 17ull
52#define OpMemFAddr 18ull
53#define OpImmU16 19ull
54#define OpES 20ull
55#define OpCS 21ull
56#define OpSS 22ull
57#define OpDS 23ull
58#define OpFS 24ull
59#define OpGS 25ull
60#define OpMem8 26ull
61
62#define OpBits 5
63#define OpMask ((1ull << OpBits) - 1)
64
65
66
67
68
69
70
71
72
73
74
75#define ByteOp (1<<0)
76
77#define DstShift 1
78#define ImplicitOps (OpImplicit << DstShift)
79#define DstReg (OpReg << DstShift)
80#define DstMem (OpMem << DstShift)
81#define DstAcc (OpAcc << DstShift)
82#define DstDI (OpDI << DstShift)
83#define DstMem64 (OpMem64 << DstShift)
84#define DstImmUByte (OpImmUByte << DstShift)
85#define DstDX (OpDX << DstShift)
86#define DstMask (OpMask << DstShift)
87
88#define SrcShift 6
89#define SrcNone (OpNone << SrcShift)
90#define SrcReg (OpReg << SrcShift)
91#define SrcMem (OpMem << SrcShift)
92#define SrcMem16 (OpMem16 << SrcShift)
93#define SrcMem32 (OpMem32 << SrcShift)
94#define SrcImm (OpImm << SrcShift)
95#define SrcImmByte (OpImmByte << SrcShift)
96#define SrcOne (OpOne << SrcShift)
97#define SrcImmUByte (OpImmUByte << SrcShift)
98#define SrcImmU (OpImmU << SrcShift)
99#define SrcSI (OpSI << SrcShift)
100#define SrcImmFAddr (OpImmFAddr << SrcShift)
101#define SrcMemFAddr (OpMemFAddr << SrcShift)
102#define SrcAcc (OpAcc << SrcShift)
103#define SrcImmU16 (OpImmU16 << SrcShift)
104#define SrcDX (OpDX << SrcShift)
105#define SrcMem8 (OpMem8 << SrcShift)
106#define SrcMask (OpMask << SrcShift)
107#define BitOp (1<<11)
108#define MemAbs (1<<12)
109#define String (1<<13)
110#define Stack (1<<14)
111#define GroupMask (7<<15)
112#define Group (1<<15)
113#define GroupDual (2<<15)
114#define Prefix (3<<15)
115#define RMExt (4<<15)
116#define Sse (1<<18)
117
118#define ModRM (1<<19)
119
120#define Mov (1<<20)
121
122#define Prot (1<<21)
123#define VendorSpecific (1<<22)
124#define NoAccess (1<<23)
125#define Op3264 (1<<24)
126#define Undefined (1<<25)
127#define Lock (1<<26)
128#define Priv (1<<27)
129#define No64 (1<<28)
130#define PageTable (1 << 29)
131
132#define Src2Shift (30)
133#define Src2None (OpNone << Src2Shift)
134#define Src2CL (OpCL << Src2Shift)
135#define Src2ImmByte (OpImmByte << Src2Shift)
136#define Src2One (OpOne << Src2Shift)
137#define Src2Imm (OpImm << Src2Shift)
138#define Src2ES (OpES << Src2Shift)
139#define Src2CS (OpCS << Src2Shift)
140#define Src2SS (OpSS << Src2Shift)
141#define Src2DS (OpDS << Src2Shift)
142#define Src2FS (OpFS << Src2Shift)
143#define Src2GS (OpGS << Src2Shift)
144#define Src2Mask (OpMask << Src2Shift)
145#define Mmx ((u64)1 << 40)
146#define Aligned ((u64)1 << 41)
147#define Unaligned ((u64)1 << 42)
148#define Avx ((u64)1 << 43)
149
150#define X2(x...) x, x
151#define X3(x...) X2(x), x
152#define X4(x...) X2(x), X2(x)
153#define X5(x...) X4(x), x
154#define X6(x...) X4(x), X2(x)
155#define X7(x...) X4(x), X3(x)
156#define X8(x...) X4(x), X4(x)
157#define X16(x...) X8(x), X8(x)
158
159struct opcode {
160 u64 flags : 56;
161 u64 intercept : 8;
162 union {
163 int (*execute)(struct x86_emulate_ctxt *ctxt);
164 const struct opcode *group;
165 const struct group_dual *gdual;
166 const struct gprefix *gprefix;
167 } u;
168 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
169};
170
171struct group_dual {
172 struct opcode mod012[8];
173 struct opcode mod3[8];
174};
175
176struct gprefix {
177 struct opcode pfx_no;
178 struct opcode pfx_66;
179 struct opcode pfx_f2;
180 struct opcode pfx_f3;
181};
182
183
184#define EFLG_ID (1<<21)
185#define EFLG_VIP (1<<20)
186#define EFLG_VIF (1<<19)
187#define EFLG_AC (1<<18)
188#define EFLG_VM (1<<17)
189#define EFLG_RF (1<<16)
190#define EFLG_IOPL (3<<12)
191#define EFLG_NT (1<<14)
192#define EFLG_OF (1<<11)
193#define EFLG_DF (1<<10)
194#define EFLG_IF (1<<9)
195#define EFLG_TF (1<<8)
196#define EFLG_SF (1<<7)
197#define EFLG_ZF (1<<6)
198#define EFLG_AF (1<<4)
199#define EFLG_PF (1<<2)
200#define EFLG_CF (1<<0)
201
202#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
203#define EFLG_RESERVED_ONE_MASK 2
204
205static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
206{
207 if (!(ctxt->regs_valid & (1 << nr))) {
208 ctxt->regs_valid |= 1 << nr;
209 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
210 }
211 return ctxt->_regs[nr];
212}
213
214static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
215{
216 ctxt->regs_valid |= 1 << nr;
217 ctxt->regs_dirty |= 1 << nr;
218 return &ctxt->_regs[nr];
219}
220
221static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
222{
223 reg_read(ctxt, nr);
224 return reg_write(ctxt, nr);
225}
226
227static void writeback_registers(struct x86_emulate_ctxt *ctxt)
228{
229 unsigned reg;
230
231 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
232 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
233}
234
235static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
236{
237 ctxt->regs_dirty = 0;
238 ctxt->regs_valid = 0;
239}
240
241
242
243
244
245
246
247
248#if defined(CONFIG_X86_64)
249#define _LO32 "k"
250#define _STK "%%rsp"
251#elif defined(__i386__)
252#define _LO32 ""
253#define _STK "%%esp"
254#endif
255
256
257
258
259
260#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
261
262
263#define _PRE_EFLAGS(_sav, _msk, _tmp) \
264 \
265 "movl %"_sav",%"_LO32 _tmp"; " \
266 "push %"_tmp"; " \
267 "push %"_tmp"; " \
268 "movl %"_msk",%"_LO32 _tmp"; " \
269 "andl %"_LO32 _tmp",("_STK"); " \
270 "pushf; " \
271 "notl %"_LO32 _tmp"; " \
272 "andl %"_LO32 _tmp",("_STK"); " \
273 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
274 "pop %"_tmp"; " \
275 "orl %"_LO32 _tmp",("_STK"); " \
276 "popf; " \
277 "pop %"_sav"; "
278
279
280#define _POST_EFLAGS(_sav, _msk, _tmp) \
281 \
282 "pushf; " \
283 "pop %"_tmp"; " \
284 "andl %"_msk",%"_LO32 _tmp"; " \
285 "orl %"_LO32 _tmp",%"_sav"; "
286
287#ifdef CONFIG_X86_64
288#define ON64(x) x
289#else
290#define ON64(x)
291#endif
292
293#define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
294 do { \
295 __asm__ __volatile__ ( \
296 _PRE_EFLAGS("0", "4", "2") \
297 _op _suffix " %"_x"3,%1; " \
298 _POST_EFLAGS("0", "4", "2") \
299 : "=m" ((ctxt)->eflags), \
300 "+q" (*(_dsttype*)&(ctxt)->dst.val), \
301 "=&r" (_tmp) \
302 : _y ((ctxt)->src.val), "i" (EFLAGS_MASK)); \
303 } while (0)
304
305
306
307#define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
308 do { \
309 unsigned long _tmp; \
310 \
311 switch ((ctxt)->dst.bytes) { \
312 case 2: \
313 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
314 break; \
315 case 4: \
316 ____emulate_2op(ctxt,_op,_lx,_ly,"l",u32); \
317 break; \
318 case 8: \
319 ON64(____emulate_2op(ctxt,_op,_qx,_qy,"q",u64)); \
320 break; \
321 } \
322 } while (0)
323
324#define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
325 do { \
326 unsigned long _tmp; \
327 switch ((ctxt)->dst.bytes) { \
328 case 1: \
329 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
330 break; \
331 default: \
332 __emulate_2op_nobyte(ctxt, _op, \
333 _wx, _wy, _lx, _ly, _qx, _qy); \
334 break; \
335 } \
336 } while (0)
337
338
339#define emulate_2op_SrcB(ctxt, _op) \
340 __emulate_2op(ctxt, _op, "b", "c", "b", "c", "b", "c", "b", "c")
341
342
343#define emulate_2op_SrcV(ctxt, _op) \
344 __emulate_2op(ctxt, _op, "b", "q", "w", "r", _LO32, "r", "", "r")
345
346
347#define emulate_2op_SrcV_nobyte(ctxt, _op) \
348 __emulate_2op_nobyte(ctxt, _op, "w", "r", _LO32, "r", "", "r")
349
350
351#define __emulate_2op_cl(ctxt, _op, _suffix, _type) \
352 do { \
353 unsigned long _tmp; \
354 _type _clv = (ctxt)->src2.val; \
355 _type _srcv = (ctxt)->src.val; \
356 _type _dstv = (ctxt)->dst.val; \
357 \
358 __asm__ __volatile__ ( \
359 _PRE_EFLAGS("0", "5", "2") \
360 _op _suffix " %4,%1 \n" \
361 _POST_EFLAGS("0", "5", "2") \
362 : "=m" ((ctxt)->eflags), "+r" (_dstv), "=&r" (_tmp) \
363 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
364 ); \
365 \
366 (ctxt)->src2.val = (unsigned long) _clv; \
367 (ctxt)->src2.val = (unsigned long) _srcv; \
368 (ctxt)->dst.val = (unsigned long) _dstv; \
369 } while (0)
370
371#define emulate_2op_cl(ctxt, _op) \
372 do { \
373 switch ((ctxt)->dst.bytes) { \
374 case 2: \
375 __emulate_2op_cl(ctxt, _op, "w", u16); \
376 break; \
377 case 4: \
378 __emulate_2op_cl(ctxt, _op, "l", u32); \
379 break; \
380 case 8: \
381 ON64(__emulate_2op_cl(ctxt, _op, "q", ulong)); \
382 break; \
383 } \
384 } while (0)
385
386#define __emulate_1op(ctxt, _op, _suffix) \
387 do { \
388 unsigned long _tmp; \
389 \
390 __asm__ __volatile__ ( \
391 _PRE_EFLAGS("0", "3", "2") \
392 _op _suffix " %1; " \
393 _POST_EFLAGS("0", "3", "2") \
394 : "=m" ((ctxt)->eflags), "+m" ((ctxt)->dst.val), \
395 "=&r" (_tmp) \
396 : "i" (EFLAGS_MASK)); \
397 } while (0)
398
399
400#define emulate_1op(ctxt, _op) \
401 do { \
402 switch ((ctxt)->dst.bytes) { \
403 case 1: __emulate_1op(ctxt, _op, "b"); break; \
404 case 2: __emulate_1op(ctxt, _op, "w"); break; \
405 case 4: __emulate_1op(ctxt, _op, "l"); break; \
406 case 8: ON64(__emulate_1op(ctxt, _op, "q")); break; \
407 } \
408 } while (0)
409
410#define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \
411 do { \
412 unsigned long _tmp; \
413 ulong *rax = reg_rmw((ctxt), VCPU_REGS_RAX); \
414 ulong *rdx = reg_rmw((ctxt), VCPU_REGS_RDX); \
415 \
416 __asm__ __volatile__ ( \
417 _PRE_EFLAGS("0", "5", "1") \
418 "1: \n\t" \
419 _op _suffix " %6; " \
420 "2: \n\t" \
421 _POST_EFLAGS("0", "5", "1") \
422 ".pushsection .fixup,\"ax\" \n\t" \
423 "3: movb $1, %4 \n\t" \
424 "jmp 2b \n\t" \
425 ".popsection \n\t" \
426 _ASM_EXTABLE(1b, 3b) \
427 : "=m" ((ctxt)->eflags), "=&r" (_tmp), \
428 "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
429 : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val)); \
430 } while (0)
431
432
433#define emulate_1op_rax_rdx(ctxt, _op, _ex) \
434 do { \
435 switch((ctxt)->src.bytes) { \
436 case 1: \
437 __emulate_1op_rax_rdx(ctxt, _op, "b", _ex); \
438 break; \
439 case 2: \
440 __emulate_1op_rax_rdx(ctxt, _op, "w", _ex); \
441 break; \
442 case 4: \
443 __emulate_1op_rax_rdx(ctxt, _op, "l", _ex); \
444 break; \
445 case 8: ON64( \
446 __emulate_1op_rax_rdx(ctxt, _op, "q", _ex)); \
447 break; \
448 } \
449 } while (0)
450
451static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
452 enum x86_intercept intercept,
453 enum x86_intercept_stage stage)
454{
455 struct x86_instruction_info info = {
456 .intercept = intercept,
457 .rep_prefix = ctxt->rep_prefix,
458 .modrm_mod = ctxt->modrm_mod,
459 .modrm_reg = ctxt->modrm_reg,
460 .modrm_rm = ctxt->modrm_rm,
461 .src_val = ctxt->src.val64,
462 .src_bytes = ctxt->src.bytes,
463 .dst_bytes = ctxt->dst.bytes,
464 .ad_bytes = ctxt->ad_bytes,
465 .next_rip = ctxt->eip,
466 };
467
468 return ctxt->ops->intercept(ctxt, &info, stage);
469}
470
471static void assign_masked(ulong *dest, ulong src, ulong mask)
472{
473 *dest = (*dest & ~mask) | (src & mask);
474}
475
476static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
477{
478 return (1UL << (ctxt->ad_bytes << 3)) - 1;
479}
480
481static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
482{
483 u16 sel;
484 struct desc_struct ss;
485
486 if (ctxt->mode == X86EMUL_MODE_PROT64)
487 return ~0UL;
488 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
489 return ~0U >> ((ss.d ^ 1) * 16);
490}
491
492static int stack_size(struct x86_emulate_ctxt *ctxt)
493{
494 return (__fls(stack_mask(ctxt)) + 1) >> 3;
495}
496
497
498static inline unsigned long
499address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
500{
501 if (ctxt->ad_bytes == sizeof(unsigned long))
502 return reg;
503 else
504 return reg & ad_mask(ctxt);
505}
506
507static inline unsigned long
508register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
509{
510 return address_mask(ctxt, reg);
511}
512
513static void masked_increment(ulong *reg, ulong mask, int inc)
514{
515 assign_masked(reg, *reg + inc, mask);
516}
517
518static inline void
519register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
520{
521 ulong mask;
522
523 if (ctxt->ad_bytes == sizeof(unsigned long))
524 mask = ~0UL;
525 else
526 mask = ad_mask(ctxt);
527 masked_increment(reg, mask, inc);
528}
529
530static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
531{
532 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
533}
534
535static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
536{
537 register_address_increment(ctxt, &ctxt->_eip, rel);
538}
539
540static u32 desc_limit_scaled(struct desc_struct *desc)
541{
542 u32 limit = get_desc_limit(desc);
543
544 return desc->g ? (limit << 12) | 0xfff : limit;
545}
546
547static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
548{
549 ctxt->has_seg_override = true;
550 ctxt->seg_override = seg;
551}
552
553static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
554{
555 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
556 return 0;
557
558 return ctxt->ops->get_cached_segment_base(ctxt, seg);
559}
560
561static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
562{
563 if (!ctxt->has_seg_override)
564 return 0;
565
566 return ctxt->seg_override;
567}
568
569static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
570 u32 error, bool valid)
571{
572 ctxt->exception.vector = vec;
573 ctxt->exception.error_code = error;
574 ctxt->exception.error_code_valid = valid;
575 return X86EMUL_PROPAGATE_FAULT;
576}
577
578static int emulate_db(struct x86_emulate_ctxt *ctxt)
579{
580 return emulate_exception(ctxt, DB_VECTOR, 0, false);
581}
582
583static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
584{
585 return emulate_exception(ctxt, GP_VECTOR, err, true);
586}
587
588static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
589{
590 return emulate_exception(ctxt, SS_VECTOR, err, true);
591}
592
593static int emulate_ud(struct x86_emulate_ctxt *ctxt)
594{
595 return emulate_exception(ctxt, UD_VECTOR, 0, false);
596}
597
598static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
599{
600 return emulate_exception(ctxt, TS_VECTOR, err, true);
601}
602
603static int emulate_de(struct x86_emulate_ctxt *ctxt)
604{
605 return emulate_exception(ctxt, DE_VECTOR, 0, false);
606}
607
608static int emulate_nm(struct x86_emulate_ctxt *ctxt)
609{
610 return emulate_exception(ctxt, NM_VECTOR, 0, false);
611}
612
613static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
614{
615 u16 selector;
616 struct desc_struct desc;
617
618 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
619 return selector;
620}
621
622static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
623 unsigned seg)
624{
625 u16 dummy;
626 u32 base3;
627 struct desc_struct desc;
628
629 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
630 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
631}
632
633
634
635
636
637
638
639
640
641static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
642{
643 if (likely(size < 16))
644 return false;
645
646 if (ctxt->d & Aligned)
647 return true;
648 else if (ctxt->d & Unaligned)
649 return false;
650 else if (ctxt->d & Avx)
651 return false;
652 else
653 return true;
654}
655
656static int __linearize(struct x86_emulate_ctxt *ctxt,
657 struct segmented_address addr,
658 unsigned size, bool write, bool fetch,
659 ulong *linear)
660{
661 struct desc_struct desc;
662 bool usable;
663 ulong la;
664 u32 lim;
665 u16 sel;
666 unsigned cpl, rpl;
667
668 la = seg_base(ctxt, addr.seg) + addr.ea;
669 switch (ctxt->mode) {
670 case X86EMUL_MODE_PROT64:
671 if (((signed long)la << 16) >> 16 != la)
672 return emulate_gp(ctxt, 0);
673 break;
674 default:
675 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
676 addr.seg);
677 if (!usable)
678 goto bad;
679
680 if (((desc.type & 8) || !(desc.type & 2)) && write)
681 goto bad;
682
683 if (!fetch && (desc.type & 8) && !(desc.type & 2))
684 goto bad;
685 lim = desc_limit_scaled(&desc);
686 if ((desc.type & 8) || !(desc.type & 4)) {
687
688 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
689 goto bad;
690 } else {
691
692 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
693 goto bad;
694 lim = desc.d ? 0xffffffff : 0xffff;
695 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
696 goto bad;
697 }
698 cpl = ctxt->ops->cpl(ctxt);
699 if (ctxt->mode == X86EMUL_MODE_REAL)
700 rpl = 0;
701 else
702 rpl = sel & 3;
703 cpl = max(cpl, rpl);
704 if (!(desc.type & 8)) {
705
706 if (cpl > desc.dpl)
707 goto bad;
708 } else if ((desc.type & 8) && !(desc.type & 4)) {
709
710 if (cpl != desc.dpl)
711 goto bad;
712 } else if ((desc.type & 8) && (desc.type & 4)) {
713
714 if (cpl < desc.dpl)
715 goto bad;
716 }
717 break;
718 }
719 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
720 la &= (u32)-1;
721 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
722 return emulate_gp(ctxt, 0);
723 *linear = la;
724 return X86EMUL_CONTINUE;
725bad:
726 if (addr.seg == VCPU_SREG_SS)
727 return emulate_ss(ctxt, sel);
728 else
729 return emulate_gp(ctxt, sel);
730}
731
732static int linearize(struct x86_emulate_ctxt *ctxt,
733 struct segmented_address addr,
734 unsigned size, bool write,
735 ulong *linear)
736{
737 return __linearize(ctxt, addr, size, write, false, linear);
738}
739
740
741static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
742 struct segmented_address addr,
743 void *data,
744 unsigned size)
745{
746 int rc;
747 ulong linear;
748
749 rc = linearize(ctxt, addr, size, false, &linear);
750 if (rc != X86EMUL_CONTINUE)
751 return rc;
752 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
753}
754
755
756
757
758
759
760
761
762static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
763{
764 struct fetch_cache *fc = &ctxt->fetch;
765 int rc;
766 int size, cur_size;
767
768 if (ctxt->_eip == fc->end) {
769 unsigned long linear;
770 struct segmented_address addr = { .seg = VCPU_SREG_CS,
771 .ea = ctxt->_eip };
772 cur_size = fc->end - fc->start;
773 size = min(15UL - cur_size,
774 PAGE_SIZE - offset_in_page(ctxt->_eip));
775 rc = __linearize(ctxt, addr, size, false, true, &linear);
776 if (unlikely(rc != X86EMUL_CONTINUE))
777 return rc;
778 rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
779 size, &ctxt->exception);
780 if (unlikely(rc != X86EMUL_CONTINUE))
781 return rc;
782 fc->end += size;
783 }
784 *dest = fc->data[ctxt->_eip - fc->start];
785 ctxt->_eip++;
786 return X86EMUL_CONTINUE;
787}
788
789static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
790 void *dest, unsigned size)
791{
792 int rc;
793
794
795 if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
796 return X86EMUL_UNHANDLEABLE;
797 while (size--) {
798 rc = do_insn_fetch_byte(ctxt, dest++);
799 if (rc != X86EMUL_CONTINUE)
800 return rc;
801 }
802 return X86EMUL_CONTINUE;
803}
804
805
806#define insn_fetch(_type, _ctxt) \
807({ unsigned long _x; \
808 rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \
809 if (rc != X86EMUL_CONTINUE) \
810 goto done; \
811 (_type)_x; \
812})
813
814#define insn_fetch_arr(_arr, _size, _ctxt) \
815({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \
816 if (rc != X86EMUL_CONTINUE) \
817 goto done; \
818})
819
820
821
822
823
824
825static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
826 int highbyte_regs)
827{
828 void *p;
829
830 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
831 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
832 else
833 p = reg_rmw(ctxt, modrm_reg);
834 return p;
835}
836
837static int read_descriptor(struct x86_emulate_ctxt *ctxt,
838 struct segmented_address addr,
839 u16 *size, unsigned long *address, int op_bytes)
840{
841 int rc;
842
843 if (op_bytes == 2)
844 op_bytes = 3;
845 *address = 0;
846 rc = segmented_read_std(ctxt, addr, size, 2);
847 if (rc != X86EMUL_CONTINUE)
848 return rc;
849 addr.ea += 2;
850 rc = segmented_read_std(ctxt, addr, address, op_bytes);
851 return rc;
852}
853
854static int test_cc(unsigned int condition, unsigned int flags)
855{
856 int rc = 0;
857
858 switch ((condition & 15) >> 1) {
859 case 0:
860 rc |= (flags & EFLG_OF);
861 break;
862 case 1:
863 rc |= (flags & EFLG_CF);
864 break;
865 case 2:
866 rc |= (flags & EFLG_ZF);
867 break;
868 case 3:
869 rc |= (flags & (EFLG_CF|EFLG_ZF));
870 break;
871 case 4:
872 rc |= (flags & EFLG_SF);
873 break;
874 case 5:
875 rc |= (flags & EFLG_PF);
876 break;
877 case 7:
878 rc |= (flags & EFLG_ZF);
879
880 case 6:
881 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
882 break;
883 }
884
885
886 return (!!rc ^ (condition & 1));
887}
888
889static void fetch_register_operand(struct operand *op)
890{
891 switch (op->bytes) {
892 case 1:
893 op->val = *(u8 *)op->addr.reg;
894 break;
895 case 2:
896 op->val = *(u16 *)op->addr.reg;
897 break;
898 case 4:
899 op->val = *(u32 *)op->addr.reg;
900 break;
901 case 8:
902 op->val = *(u64 *)op->addr.reg;
903 break;
904 }
905}
906
907static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
908{
909 ctxt->ops->get_fpu(ctxt);
910 switch (reg) {
911 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
912 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
913 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
914 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
915 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
916 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
917 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
918 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
919#ifdef CONFIG_X86_64
920 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
921 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
922 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
923 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
924 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
925 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
926 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
927 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
928#endif
929 default: BUG();
930 }
931 ctxt->ops->put_fpu(ctxt);
932}
933
934static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
935 int reg)
936{
937 ctxt->ops->get_fpu(ctxt);
938 switch (reg) {
939 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
940 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
941 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
942 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
943 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
944 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
945 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
946 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
947#ifdef CONFIG_X86_64
948 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
949 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
950 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
951 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
952 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
953 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
954 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
955 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
956#endif
957 default: BUG();
958 }
959 ctxt->ops->put_fpu(ctxt);
960}
961
962static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
963{
964 ctxt->ops->get_fpu(ctxt);
965 switch (reg) {
966 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
967 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
968 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
969 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
970 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
971 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
972 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
973 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
974 default: BUG();
975 }
976 ctxt->ops->put_fpu(ctxt);
977}
978
979static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
980{
981 ctxt->ops->get_fpu(ctxt);
982 switch (reg) {
983 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
984 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
985 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
986 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
987 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
988 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
989 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
990 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
991 default: BUG();
992 }
993 ctxt->ops->put_fpu(ctxt);
994}
995
996static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
997 struct operand *op)
998{
999 unsigned reg = ctxt->modrm_reg;
1000 int highbyte_regs = ctxt->rex_prefix == 0;
1001
1002 if (!(ctxt->d & ModRM))
1003 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1004
1005 if (ctxt->d & Sse) {
1006 op->type = OP_XMM;
1007 op->bytes = 16;
1008 op->addr.xmm = reg;
1009 read_sse_reg(ctxt, &op->vec_val, reg);
1010 return;
1011 }
1012 if (ctxt->d & Mmx) {
1013 reg &= 7;
1014 op->type = OP_MM;
1015 op->bytes = 8;
1016 op->addr.mm = reg;
1017 return;
1018 }
1019
1020 op->type = OP_REG;
1021 if (ctxt->d & ByteOp) {
1022 op->addr.reg = decode_register(ctxt, reg, highbyte_regs);
1023 op->bytes = 1;
1024 } else {
1025 op->addr.reg = decode_register(ctxt, reg, 0);
1026 op->bytes = ctxt->op_bytes;
1027 }
1028 fetch_register_operand(op);
1029 op->orig_val = op->val;
1030}
1031
1032static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1033{
1034 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1035 ctxt->modrm_seg = VCPU_SREG_SS;
1036}
1037
1038static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1039 struct operand *op)
1040{
1041 u8 sib;
1042 int index_reg = 0, base_reg = 0, scale;
1043 int rc = X86EMUL_CONTINUE;
1044 ulong modrm_ea = 0;
1045
1046 if (ctxt->rex_prefix) {
1047 ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1;
1048 index_reg = (ctxt->rex_prefix & 2) << 2;
1049 ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3;
1050 }
1051
1052 ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
1053 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1054 ctxt->modrm_rm |= (ctxt->modrm & 0x07);
1055 ctxt->modrm_seg = VCPU_SREG_DS;
1056
1057 if (ctxt->modrm_mod == 3) {
1058 op->type = OP_REG;
1059 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1060 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp);
1061 if (ctxt->d & Sse) {
1062 op->type = OP_XMM;
1063 op->bytes = 16;
1064 op->addr.xmm = ctxt->modrm_rm;
1065 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1066 return rc;
1067 }
1068 if (ctxt->d & Mmx) {
1069 op->type = OP_MM;
1070 op->bytes = 8;
1071 op->addr.xmm = ctxt->modrm_rm & 7;
1072 return rc;
1073 }
1074 fetch_register_operand(op);
1075 return rc;
1076 }
1077
1078 op->type = OP_MEM;
1079
1080 if (ctxt->ad_bytes == 2) {
1081 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1082 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1083 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1084 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1085
1086
1087 switch (ctxt->modrm_mod) {
1088 case 0:
1089 if (ctxt->modrm_rm == 6)
1090 modrm_ea += insn_fetch(u16, ctxt);
1091 break;
1092 case 1:
1093 modrm_ea += insn_fetch(s8, ctxt);
1094 break;
1095 case 2:
1096 modrm_ea += insn_fetch(u16, ctxt);
1097 break;
1098 }
1099 switch (ctxt->modrm_rm) {
1100 case 0:
1101 modrm_ea += bx + si;
1102 break;
1103 case 1:
1104 modrm_ea += bx + di;
1105 break;
1106 case 2:
1107 modrm_ea += bp + si;
1108 break;
1109 case 3:
1110 modrm_ea += bp + di;
1111 break;
1112 case 4:
1113 modrm_ea += si;
1114 break;
1115 case 5:
1116 modrm_ea += di;
1117 break;
1118 case 6:
1119 if (ctxt->modrm_mod != 0)
1120 modrm_ea += bp;
1121 break;
1122 case 7:
1123 modrm_ea += bx;
1124 break;
1125 }
1126 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1127 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1128 ctxt->modrm_seg = VCPU_SREG_SS;
1129 modrm_ea = (u16)modrm_ea;
1130 } else {
1131
1132 if ((ctxt->modrm_rm & 7) == 4) {
1133 sib = insn_fetch(u8, ctxt);
1134 index_reg |= (sib >> 3) & 7;
1135 base_reg |= sib & 7;
1136 scale = sib >> 6;
1137
1138 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1139 modrm_ea += insn_fetch(s32, ctxt);
1140 else {
1141 modrm_ea += reg_read(ctxt, base_reg);
1142 adjust_modrm_seg(ctxt, base_reg);
1143 }
1144 if (index_reg != 4)
1145 modrm_ea += reg_read(ctxt, index_reg) << scale;
1146 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1147 if (ctxt->mode == X86EMUL_MODE_PROT64)
1148 ctxt->rip_relative = 1;
1149 } else {
1150 base_reg = ctxt->modrm_rm;
1151 modrm_ea += reg_read(ctxt, base_reg);
1152 adjust_modrm_seg(ctxt, base_reg);
1153 }
1154 switch (ctxt->modrm_mod) {
1155 case 0:
1156 if (ctxt->modrm_rm == 5)
1157 modrm_ea += insn_fetch(s32, ctxt);
1158 break;
1159 case 1:
1160 modrm_ea += insn_fetch(s8, ctxt);
1161 break;
1162 case 2:
1163 modrm_ea += insn_fetch(s32, ctxt);
1164 break;
1165 }
1166 }
1167 op->addr.mem.ea = modrm_ea;
1168done:
1169 return rc;
1170}
1171
1172static int decode_abs(struct x86_emulate_ctxt *ctxt,
1173 struct operand *op)
1174{
1175 int rc = X86EMUL_CONTINUE;
1176
1177 op->type = OP_MEM;
1178 switch (ctxt->ad_bytes) {
1179 case 2:
1180 op->addr.mem.ea = insn_fetch(u16, ctxt);
1181 break;
1182 case 4:
1183 op->addr.mem.ea = insn_fetch(u32, ctxt);
1184 break;
1185 case 8:
1186 op->addr.mem.ea = insn_fetch(u64, ctxt);
1187 break;
1188 }
1189done:
1190 return rc;
1191}
1192
1193static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1194{
1195 long sv = 0, mask;
1196
1197 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1198 mask = ~(ctxt->dst.bytes * 8 - 1);
1199
1200 if (ctxt->src.bytes == 2)
1201 sv = (s16)ctxt->src.val & (s16)mask;
1202 else if (ctxt->src.bytes == 4)
1203 sv = (s32)ctxt->src.val & (s32)mask;
1204
1205 ctxt->dst.addr.mem.ea += (sv >> 3);
1206 }
1207
1208
1209 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1210}
1211
1212static int read_emulated(struct x86_emulate_ctxt *ctxt,
1213 unsigned long addr, void *dest, unsigned size)
1214{
1215 int rc;
1216 struct read_cache *mc = &ctxt->mem_read;
1217
1218 if (mc->pos < mc->end)
1219 goto read_cached;
1220
1221 WARN_ON((mc->end + size) >= sizeof(mc->data));
1222
1223 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1224 &ctxt->exception);
1225 if (rc != X86EMUL_CONTINUE)
1226 return rc;
1227
1228 mc->end += size;
1229
1230read_cached:
1231 memcpy(dest, mc->data + mc->pos, size);
1232 mc->pos += size;
1233 return X86EMUL_CONTINUE;
1234}
1235
1236static int segmented_read(struct x86_emulate_ctxt *ctxt,
1237 struct segmented_address addr,
1238 void *data,
1239 unsigned size)
1240{
1241 int rc;
1242 ulong linear;
1243
1244 rc = linearize(ctxt, addr, size, false, &linear);
1245 if (rc != X86EMUL_CONTINUE)
1246 return rc;
1247 return read_emulated(ctxt, linear, data, size);
1248}
1249
1250static int segmented_write(struct x86_emulate_ctxt *ctxt,
1251 struct segmented_address addr,
1252 const void *data,
1253 unsigned size)
1254{
1255 int rc;
1256 ulong linear;
1257
1258 rc = linearize(ctxt, addr, size, true, &linear);
1259 if (rc != X86EMUL_CONTINUE)
1260 return rc;
1261 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1262 &ctxt->exception);
1263}
1264
1265static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1266 struct segmented_address addr,
1267 const void *orig_data, const void *data,
1268 unsigned size)
1269{
1270 int rc;
1271 ulong linear;
1272
1273 rc = linearize(ctxt, addr, size, true, &linear);
1274 if (rc != X86EMUL_CONTINUE)
1275 return rc;
1276 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1277 size, &ctxt->exception);
1278}
1279
1280static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1281 unsigned int size, unsigned short port,
1282 void *dest)
1283{
1284 struct read_cache *rc = &ctxt->io_read;
1285
1286 if (rc->pos == rc->end) {
1287 unsigned int in_page, n;
1288 unsigned int count = ctxt->rep_prefix ?
1289 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1290 in_page = (ctxt->eflags & EFLG_DF) ?
1291 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1292 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1293 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1294 count);
1295 if (n == 0)
1296 n = 1;
1297 rc->pos = rc->end = 0;
1298 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1299 return 0;
1300 rc->end = n * size;
1301 }
1302
1303 if (ctxt->rep_prefix && !(ctxt->eflags & EFLG_DF)) {
1304 ctxt->dst.data = rc->data + rc->pos;
1305 ctxt->dst.type = OP_MEM_STR;
1306 ctxt->dst.count = (rc->end - rc->pos) / size;
1307 rc->pos = rc->end;
1308 } else {
1309 memcpy(dest, rc->data + rc->pos, size);
1310 rc->pos += size;
1311 }
1312 return 1;
1313}
1314
1315static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1316 u16 index, struct desc_struct *desc)
1317{
1318 struct desc_ptr dt;
1319 ulong addr;
1320
1321 ctxt->ops->get_idt(ctxt, &dt);
1322
1323 if (dt.size < index * 8 + 7)
1324 return emulate_gp(ctxt, index << 3 | 0x2);
1325
1326 addr = dt.address + index * 8;
1327 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1328 &ctxt->exception);
1329}
1330
1331static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1332 u16 selector, struct desc_ptr *dt)
1333{
1334 const struct x86_emulate_ops *ops = ctxt->ops;
1335
1336 if (selector & 1 << 2) {
1337 struct desc_struct desc;
1338 u16 sel;
1339
1340 memset (dt, 0, sizeof *dt);
1341 if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1342 return;
1343
1344 dt->size = desc_limit_scaled(&desc);
1345 dt->address = get_desc_base(&desc);
1346 } else
1347 ops->get_gdt(ctxt, dt);
1348}
1349
1350
1351static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1352 u16 selector, struct desc_struct *desc,
1353 ulong *desc_addr_p)
1354{
1355 struct desc_ptr dt;
1356 u16 index = selector >> 3;
1357 ulong addr;
1358
1359 get_descriptor_table_ptr(ctxt, selector, &dt);
1360
1361 if (dt.size < index * 8 + 7)
1362 return emulate_gp(ctxt, selector & 0xfffc);
1363
1364 *desc_addr_p = addr = dt.address + index * 8;
1365 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1366 &ctxt->exception);
1367}
1368
1369
1370static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1371 u16 selector, struct desc_struct *desc)
1372{
1373 struct desc_ptr dt;
1374 u16 index = selector >> 3;
1375 ulong addr;
1376
1377 get_descriptor_table_ptr(ctxt, selector, &dt);
1378
1379 if (dt.size < index * 8 + 7)
1380 return emulate_gp(ctxt, selector & 0xfffc);
1381
1382 addr = dt.address + index * 8;
1383 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1384 &ctxt->exception);
1385}
1386
1387
1388static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1389 u16 selector, int seg)
1390{
1391 struct desc_struct seg_desc, old_desc;
1392 u8 dpl, rpl, cpl;
1393 unsigned err_vec = GP_VECTOR;
1394 u32 err_code = 0;
1395 bool null_selector = !(selector & ~0x3);
1396 ulong desc_addr;
1397 int ret;
1398 u16 dummy;
1399
1400 memset(&seg_desc, 0, sizeof seg_desc);
1401
1402 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1403 || ctxt->mode == X86EMUL_MODE_REAL) {
1404
1405 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1406 set_desc_base(&seg_desc, selector << 4);
1407 goto load;
1408 }
1409
1410 rpl = selector & 3;
1411 cpl = ctxt->ops->cpl(ctxt);
1412
1413
1414 if ((seg == VCPU_SREG_CS
1415 || (seg == VCPU_SREG_SS
1416 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1417 || seg == VCPU_SREG_TR)
1418 && null_selector)
1419 goto exception;
1420
1421
1422 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1423 goto exception;
1424
1425 if (null_selector)
1426 goto load;
1427
1428 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1429 if (ret != X86EMUL_CONTINUE)
1430 return ret;
1431
1432 err_code = selector & 0xfffc;
1433 err_vec = GP_VECTOR;
1434
1435
1436 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1437 goto exception;
1438
1439 if (!seg_desc.p) {
1440 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1441 goto exception;
1442 }
1443
1444 dpl = seg_desc.dpl;
1445
1446 switch (seg) {
1447 case VCPU_SREG_SS:
1448
1449
1450
1451
1452 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1453 goto exception;
1454 break;
1455 case VCPU_SREG_CS:
1456 if (!(seg_desc.type & 8))
1457 goto exception;
1458
1459 if (seg_desc.type & 4) {
1460
1461 if (dpl > cpl)
1462 goto exception;
1463 } else {
1464
1465 if (rpl > cpl || dpl != cpl)
1466 goto exception;
1467 }
1468
1469 selector = (selector & 0xfffc) | cpl;
1470 break;
1471 case VCPU_SREG_TR:
1472 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1473 goto exception;
1474 old_desc = seg_desc;
1475 seg_desc.type |= 2;
1476 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1477 sizeof(seg_desc), &ctxt->exception);
1478 if (ret != X86EMUL_CONTINUE)
1479 return ret;
1480 break;
1481 case VCPU_SREG_LDTR:
1482 if (seg_desc.s || seg_desc.type != 2)
1483 goto exception;
1484 break;
1485 default:
1486
1487
1488
1489
1490
1491 if ((seg_desc.type & 0xa) == 0x8 ||
1492 (((seg_desc.type & 0xc) != 0xc) &&
1493 (rpl > dpl && cpl > dpl)))
1494 goto exception;
1495 break;
1496 }
1497
1498 if (seg_desc.s) {
1499
1500 seg_desc.type |= 1;
1501 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1502 if (ret != X86EMUL_CONTINUE)
1503 return ret;
1504 }
1505load:
1506 ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1507 return X86EMUL_CONTINUE;
1508exception:
1509 emulate_exception(ctxt, err_vec, err_code, true);
1510 return X86EMUL_PROPAGATE_FAULT;
1511}
1512
1513static void write_register_operand(struct operand *op)
1514{
1515
1516 switch (op->bytes) {
1517 case 1:
1518 *(u8 *)op->addr.reg = (u8)op->val;
1519 break;
1520 case 2:
1521 *(u16 *)op->addr.reg = (u16)op->val;
1522 break;
1523 case 4:
1524 *op->addr.reg = (u32)op->val;
1525 break;
1526 case 8:
1527 *op->addr.reg = op->val;
1528 break;
1529 }
1530}
1531
1532static int writeback(struct x86_emulate_ctxt *ctxt)
1533{
1534 int rc;
1535
1536 switch (ctxt->dst.type) {
1537 case OP_REG:
1538 write_register_operand(&ctxt->dst);
1539 break;
1540 case OP_MEM:
1541 if (ctxt->lock_prefix)
1542 rc = segmented_cmpxchg(ctxt,
1543 ctxt->dst.addr.mem,
1544 &ctxt->dst.orig_val,
1545 &ctxt->dst.val,
1546 ctxt->dst.bytes);
1547 else
1548 rc = segmented_write(ctxt,
1549 ctxt->dst.addr.mem,
1550 &ctxt->dst.val,
1551 ctxt->dst.bytes);
1552 if (rc != X86EMUL_CONTINUE)
1553 return rc;
1554 break;
1555 case OP_MEM_STR:
1556 rc = segmented_write(ctxt,
1557 ctxt->dst.addr.mem,
1558 ctxt->dst.data,
1559 ctxt->dst.bytes * ctxt->dst.count);
1560 if (rc != X86EMUL_CONTINUE)
1561 return rc;
1562 break;
1563 case OP_XMM:
1564 write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
1565 break;
1566 case OP_MM:
1567 write_mmx_reg(ctxt, &ctxt->dst.mm_val, ctxt->dst.addr.mm);
1568 break;
1569 case OP_NONE:
1570
1571 break;
1572 default:
1573 break;
1574 }
1575 return X86EMUL_CONTINUE;
1576}
1577
1578static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1579{
1580 struct segmented_address addr;
1581
1582 rsp_increment(ctxt, -bytes);
1583 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1584 addr.seg = VCPU_SREG_SS;
1585
1586 return segmented_write(ctxt, addr, data, bytes);
1587}
1588
1589static int em_push(struct x86_emulate_ctxt *ctxt)
1590{
1591
1592 ctxt->dst.type = OP_NONE;
1593 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1594}
1595
1596static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1597 void *dest, int len)
1598{
1599 int rc;
1600 struct segmented_address addr;
1601
1602 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1603 addr.seg = VCPU_SREG_SS;
1604 rc = segmented_read(ctxt, addr, dest, len);
1605 if (rc != X86EMUL_CONTINUE)
1606 return rc;
1607
1608 rsp_increment(ctxt, len);
1609 return rc;
1610}
1611
1612static int em_pop(struct x86_emulate_ctxt *ctxt)
1613{
1614 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1615}
1616
1617static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1618 void *dest, int len)
1619{
1620 int rc;
1621 unsigned long val, change_mask;
1622 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1623 int cpl = ctxt->ops->cpl(ctxt);
1624
1625 rc = emulate_pop(ctxt, &val, len);
1626 if (rc != X86EMUL_CONTINUE)
1627 return rc;
1628
1629 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1630 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1631
1632 switch(ctxt->mode) {
1633 case X86EMUL_MODE_PROT64:
1634 case X86EMUL_MODE_PROT32:
1635 case X86EMUL_MODE_PROT16:
1636 if (cpl == 0)
1637 change_mask |= EFLG_IOPL;
1638 if (cpl <= iopl)
1639 change_mask |= EFLG_IF;
1640 break;
1641 case X86EMUL_MODE_VM86:
1642 if (iopl < 3)
1643 return emulate_gp(ctxt, 0);
1644 change_mask |= EFLG_IF;
1645 break;
1646 default:
1647 change_mask |= (EFLG_IOPL | EFLG_IF);
1648 break;
1649 }
1650
1651 *(unsigned long *)dest =
1652 (ctxt->eflags & ~change_mask) | (val & change_mask);
1653
1654 return rc;
1655}
1656
1657static int em_popf(struct x86_emulate_ctxt *ctxt)
1658{
1659 ctxt->dst.type = OP_REG;
1660 ctxt->dst.addr.reg = &ctxt->eflags;
1661 ctxt->dst.bytes = ctxt->op_bytes;
1662 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1663}
1664
1665static int em_enter(struct x86_emulate_ctxt *ctxt)
1666{
1667 int rc;
1668 unsigned frame_size = ctxt->src.val;
1669 unsigned nesting_level = ctxt->src2.val & 31;
1670 ulong rbp;
1671
1672 if (nesting_level)
1673 return X86EMUL_UNHANDLEABLE;
1674
1675 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1676 rc = push(ctxt, &rbp, stack_size(ctxt));
1677 if (rc != X86EMUL_CONTINUE)
1678 return rc;
1679 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1680 stack_mask(ctxt));
1681 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1682 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1683 stack_mask(ctxt));
1684 return X86EMUL_CONTINUE;
1685}
1686
1687static int em_leave(struct x86_emulate_ctxt *ctxt)
1688{
1689 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1690 stack_mask(ctxt));
1691 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1692}
1693
1694static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1695{
1696 int seg = ctxt->src2.val;
1697
1698 ctxt->src.val = get_segment_selector(ctxt, seg);
1699
1700 return em_push(ctxt);
1701}
1702
1703static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1704{
1705 int seg = ctxt->src2.val;
1706 unsigned long selector;
1707 int rc;
1708
1709 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1710 if (rc != X86EMUL_CONTINUE)
1711 return rc;
1712
1713 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1714 return rc;
1715}
1716
1717static int em_pusha(struct x86_emulate_ctxt *ctxt)
1718{
1719 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1720 int rc = X86EMUL_CONTINUE;
1721 int reg = VCPU_REGS_RAX;
1722
1723 while (reg <= VCPU_REGS_RDI) {
1724 (reg == VCPU_REGS_RSP) ?
1725 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1726
1727 rc = em_push(ctxt);
1728 if (rc != X86EMUL_CONTINUE)
1729 return rc;
1730
1731 ++reg;
1732 }
1733
1734 return rc;
1735}
1736
1737static int em_pushf(struct x86_emulate_ctxt *ctxt)
1738{
1739 ctxt->src.val = (unsigned long)ctxt->eflags;
1740 return em_push(ctxt);
1741}
1742
1743static int em_popa(struct x86_emulate_ctxt *ctxt)
1744{
1745 int rc = X86EMUL_CONTINUE;
1746 int reg = VCPU_REGS_RDI;
1747
1748 while (reg >= VCPU_REGS_RAX) {
1749 if (reg == VCPU_REGS_RSP) {
1750 rsp_increment(ctxt, ctxt->op_bytes);
1751 --reg;
1752 }
1753
1754 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1755 if (rc != X86EMUL_CONTINUE)
1756 break;
1757 --reg;
1758 }
1759 return rc;
1760}
1761
1762static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1763{
1764 const struct x86_emulate_ops *ops = ctxt->ops;
1765 int rc;
1766 struct desc_ptr dt;
1767 gva_t cs_addr;
1768 gva_t eip_addr;
1769 u16 cs, eip;
1770
1771
1772 ctxt->src.val = ctxt->eflags;
1773 rc = em_push(ctxt);
1774 if (rc != X86EMUL_CONTINUE)
1775 return rc;
1776
1777 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1778
1779 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1780 rc = em_push(ctxt);
1781 if (rc != X86EMUL_CONTINUE)
1782 return rc;
1783
1784 ctxt->src.val = ctxt->_eip;
1785 rc = em_push(ctxt);
1786 if (rc != X86EMUL_CONTINUE)
1787 return rc;
1788
1789 ops->get_idt(ctxt, &dt);
1790
1791 eip_addr = dt.address + (irq << 2);
1792 cs_addr = dt.address + (irq << 2) + 2;
1793
1794 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1795 if (rc != X86EMUL_CONTINUE)
1796 return rc;
1797
1798 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1799 if (rc != X86EMUL_CONTINUE)
1800 return rc;
1801
1802 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1803 if (rc != X86EMUL_CONTINUE)
1804 return rc;
1805
1806 ctxt->_eip = eip;
1807
1808 return rc;
1809}
1810
1811int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1812{
1813 int rc;
1814
1815 invalidate_registers(ctxt);
1816 rc = __emulate_int_real(ctxt, irq);
1817 if (rc == X86EMUL_CONTINUE)
1818 writeback_registers(ctxt);
1819 return rc;
1820}
1821
1822static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1823{
1824 switch(ctxt->mode) {
1825 case X86EMUL_MODE_REAL:
1826 return __emulate_int_real(ctxt, irq);
1827 case X86EMUL_MODE_VM86:
1828 case X86EMUL_MODE_PROT16:
1829 case X86EMUL_MODE_PROT32:
1830 case X86EMUL_MODE_PROT64:
1831 default:
1832
1833 return X86EMUL_UNHANDLEABLE;
1834 }
1835}
1836
1837static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1838{
1839 int rc = X86EMUL_CONTINUE;
1840 unsigned long temp_eip = 0;
1841 unsigned long temp_eflags = 0;
1842 unsigned long cs = 0;
1843 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1844 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1845 EFLG_AC | EFLG_ID | (1 << 1);
1846 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1847
1848
1849
1850 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1851
1852 if (rc != X86EMUL_CONTINUE)
1853 return rc;
1854
1855 if (temp_eip & ~0xffff)
1856 return emulate_gp(ctxt, 0);
1857
1858 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1859
1860 if (rc != X86EMUL_CONTINUE)
1861 return rc;
1862
1863 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1864
1865 if (rc != X86EMUL_CONTINUE)
1866 return rc;
1867
1868 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1869
1870 if (rc != X86EMUL_CONTINUE)
1871 return rc;
1872
1873 ctxt->_eip = temp_eip;
1874
1875
1876 if (ctxt->op_bytes == 4)
1877 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1878 else if (ctxt->op_bytes == 2) {
1879 ctxt->eflags &= ~0xffff;
1880 ctxt->eflags |= temp_eflags;
1881 }
1882
1883 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK;
1884 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1885
1886 return rc;
1887}
1888
1889static int em_iret(struct x86_emulate_ctxt *ctxt)
1890{
1891 switch(ctxt->mode) {
1892 case X86EMUL_MODE_REAL:
1893 return emulate_iret_real(ctxt);
1894 case X86EMUL_MODE_VM86:
1895 case X86EMUL_MODE_PROT16:
1896 case X86EMUL_MODE_PROT32:
1897 case X86EMUL_MODE_PROT64:
1898 default:
1899
1900 return X86EMUL_UNHANDLEABLE;
1901 }
1902}
1903
1904static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1905{
1906 int rc;
1907 unsigned short sel;
1908
1909 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1910
1911 rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
1912 if (rc != X86EMUL_CONTINUE)
1913 return rc;
1914
1915 ctxt->_eip = 0;
1916 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1917 return X86EMUL_CONTINUE;
1918}
1919
1920static int em_grp2(struct x86_emulate_ctxt *ctxt)
1921{
1922 switch (ctxt->modrm_reg) {
1923 case 0:
1924 emulate_2op_SrcB(ctxt, "rol");
1925 break;
1926 case 1:
1927 emulate_2op_SrcB(ctxt, "ror");
1928 break;
1929 case 2:
1930 emulate_2op_SrcB(ctxt, "rcl");
1931 break;
1932 case 3:
1933 emulate_2op_SrcB(ctxt, "rcr");
1934 break;
1935 case 4:
1936 case 6:
1937 emulate_2op_SrcB(ctxt, "sal");
1938 break;
1939 case 5:
1940 emulate_2op_SrcB(ctxt, "shr");
1941 break;
1942 case 7:
1943 emulate_2op_SrcB(ctxt, "sar");
1944 break;
1945 }
1946 return X86EMUL_CONTINUE;
1947}
1948
1949static int em_not(struct x86_emulate_ctxt *ctxt)
1950{
1951 ctxt->dst.val = ~ctxt->dst.val;
1952 return X86EMUL_CONTINUE;
1953}
1954
1955static int em_neg(struct x86_emulate_ctxt *ctxt)
1956{
1957 emulate_1op(ctxt, "neg");
1958 return X86EMUL_CONTINUE;
1959}
1960
1961static int em_mul_ex(struct x86_emulate_ctxt *ctxt)
1962{
1963 u8 ex = 0;
1964
1965 emulate_1op_rax_rdx(ctxt, "mul", ex);
1966 return X86EMUL_CONTINUE;
1967}
1968
1969static int em_imul_ex(struct x86_emulate_ctxt *ctxt)
1970{
1971 u8 ex = 0;
1972
1973 emulate_1op_rax_rdx(ctxt, "imul", ex);
1974 return X86EMUL_CONTINUE;
1975}
1976
1977static int em_div_ex(struct x86_emulate_ctxt *ctxt)
1978{
1979 u8 de = 0;
1980
1981 emulate_1op_rax_rdx(ctxt, "div", de);
1982 if (de)
1983 return emulate_de(ctxt);
1984 return X86EMUL_CONTINUE;
1985}
1986
1987static int em_idiv_ex(struct x86_emulate_ctxt *ctxt)
1988{
1989 u8 de = 0;
1990
1991 emulate_1op_rax_rdx(ctxt, "idiv", de);
1992 if (de)
1993 return emulate_de(ctxt);
1994 return X86EMUL_CONTINUE;
1995}
1996
1997static int em_grp45(struct x86_emulate_ctxt *ctxt)
1998{
1999 int rc = X86EMUL_CONTINUE;
2000
2001 switch (ctxt->modrm_reg) {
2002 case 0:
2003 emulate_1op(ctxt, "inc");
2004 break;
2005 case 1:
2006 emulate_1op(ctxt, "dec");
2007 break;
2008 case 2: {
2009 long int old_eip;
2010 old_eip = ctxt->_eip;
2011 ctxt->_eip = ctxt->src.val;
2012 ctxt->src.val = old_eip;
2013 rc = em_push(ctxt);
2014 break;
2015 }
2016 case 4:
2017 ctxt->_eip = ctxt->src.val;
2018 break;
2019 case 5:
2020 rc = em_jmp_far(ctxt);
2021 break;
2022 case 6:
2023 rc = em_push(ctxt);
2024 break;
2025 }
2026 return rc;
2027}
2028
2029static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2030{
2031 u64 old = ctxt->dst.orig_val64;
2032
2033 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2034 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2035 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2036 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2037 ctxt->eflags &= ~EFLG_ZF;
2038 } else {
2039 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2040 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2041
2042 ctxt->eflags |= EFLG_ZF;
2043 }
2044 return X86EMUL_CONTINUE;
2045}
2046
2047static int em_ret(struct x86_emulate_ctxt *ctxt)
2048{
2049 ctxt->dst.type = OP_REG;
2050 ctxt->dst.addr.reg = &ctxt->_eip;
2051 ctxt->dst.bytes = ctxt->op_bytes;
2052 return em_pop(ctxt);
2053}
2054
2055static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2056{
2057 int rc;
2058 unsigned long cs;
2059
2060 rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
2061 if (rc != X86EMUL_CONTINUE)
2062 return rc;
2063 if (ctxt->op_bytes == 4)
2064 ctxt->_eip = (u32)ctxt->_eip;
2065 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2066 if (rc != X86EMUL_CONTINUE)
2067 return rc;
2068 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2069 return rc;
2070}
2071
2072static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2073{
2074
2075 ctxt->src.orig_val = ctxt->src.val;
2076 ctxt->src.val = reg_read(ctxt, VCPU_REGS_RAX);
2077 emulate_2op_SrcV(ctxt, "cmp");
2078
2079 if (ctxt->eflags & EFLG_ZF) {
2080
2081 ctxt->dst.val = ctxt->src.orig_val;
2082 } else {
2083
2084 ctxt->dst.type = OP_REG;
2085 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2086 }
2087 return X86EMUL_CONTINUE;
2088}
2089
2090static int em_lseg(struct x86_emulate_ctxt *ctxt)
2091{
2092 int seg = ctxt->src2.val;
2093 unsigned short sel;
2094 int rc;
2095
2096 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2097
2098 rc = load_segment_descriptor(ctxt, sel, seg);
2099 if (rc != X86EMUL_CONTINUE)
2100 return rc;
2101
2102 ctxt->dst.val = ctxt->src.val;
2103 return rc;
2104}
2105
2106static void
2107setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2108 struct desc_struct *cs, struct desc_struct *ss)
2109{
2110 cs->l = 0;
2111 set_desc_base(cs, 0);
2112 cs->g = 1;
2113 set_desc_limit(cs, 0xfffff);
2114 cs->type = 0x0b;
2115 cs->s = 1;
2116 cs->dpl = 0;
2117 cs->p = 1;
2118 cs->d = 1;
2119 cs->avl = 0;
2120
2121 set_desc_base(ss, 0);
2122 set_desc_limit(ss, 0xfffff);
2123 ss->g = 1;
2124 ss->s = 1;
2125 ss->type = 0x03;
2126 ss->d = 1;
2127 ss->dpl = 0;
2128 ss->p = 1;
2129 ss->l = 0;
2130 ss->avl = 0;
2131}
2132
2133static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2134{
2135 u32 eax, ebx, ecx, edx;
2136
2137 eax = ecx = 0;
2138 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2139 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2140 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2141 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2142}
2143
2144static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2145{
2146 const struct x86_emulate_ops *ops = ctxt->ops;
2147 u32 eax, ebx, ecx, edx;
2148
2149
2150
2151
2152
2153 if (ctxt->mode == X86EMUL_MODE_PROT64)
2154 return true;
2155
2156 eax = 0x00000000;
2157 ecx = 0x00000000;
2158 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2159
2160
2161
2162
2163
2164
2165
2166
2167 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2168 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2169 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2170 return false;
2171
2172
2173 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2174 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2175 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2176 return true;
2177
2178
2179 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2180 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2181 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2182 return true;
2183
2184
2185 return false;
2186}
2187
2188static int em_syscall(struct x86_emulate_ctxt *ctxt)
2189{
2190 const struct x86_emulate_ops *ops = ctxt->ops;
2191 struct desc_struct cs, ss;
2192 u64 msr_data;
2193 u16 cs_sel, ss_sel;
2194 u64 efer = 0;
2195
2196
2197 if (ctxt->mode == X86EMUL_MODE_REAL ||
2198 ctxt->mode == X86EMUL_MODE_VM86)
2199 return emulate_ud(ctxt);
2200
2201 if (!(em_syscall_is_enabled(ctxt)))
2202 return emulate_ud(ctxt);
2203
2204 ops->get_msr(ctxt, MSR_EFER, &efer);
2205 setup_syscalls_segments(ctxt, &cs, &ss);
2206
2207 if (!(efer & EFER_SCE))
2208 return emulate_ud(ctxt);
2209
2210 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2211 msr_data >>= 32;
2212 cs_sel = (u16)(msr_data & 0xfffc);
2213 ss_sel = (u16)(msr_data + 8);
2214
2215 if (efer & EFER_LMA) {
2216 cs.d = 0;
2217 cs.l = 1;
2218 }
2219 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2220 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2221
2222 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2223 if (efer & EFER_LMA) {
2224#ifdef CONFIG_X86_64
2225 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags & ~EFLG_RF;
2226
2227 ops->get_msr(ctxt,
2228 ctxt->mode == X86EMUL_MODE_PROT64 ?
2229 MSR_LSTAR : MSR_CSTAR, &msr_data);
2230 ctxt->_eip = msr_data;
2231
2232 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2233 ctxt->eflags &= ~(msr_data | EFLG_RF);
2234#endif
2235 } else {
2236
2237 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2238 ctxt->_eip = (u32)msr_data;
2239
2240 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2241 }
2242
2243 return X86EMUL_CONTINUE;
2244}
2245
2246static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2247{
2248 const struct x86_emulate_ops *ops = ctxt->ops;
2249 struct desc_struct cs, ss;
2250 u64 msr_data;
2251 u16 cs_sel, ss_sel;
2252 u64 efer = 0;
2253
2254 ops->get_msr(ctxt, MSR_EFER, &efer);
2255
2256 if (ctxt->mode == X86EMUL_MODE_REAL)
2257 return emulate_gp(ctxt, 0);
2258
2259
2260
2261
2262
2263 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2264 && !vendor_intel(ctxt))
2265 return emulate_ud(ctxt);
2266
2267
2268
2269
2270 if (ctxt->mode == X86EMUL_MODE_PROT64)
2271 return emulate_ud(ctxt);
2272
2273 setup_syscalls_segments(ctxt, &cs, &ss);
2274
2275 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2276 switch (ctxt->mode) {
2277 case X86EMUL_MODE_PROT32:
2278 if ((msr_data & 0xfffc) == 0x0)
2279 return emulate_gp(ctxt, 0);
2280 break;
2281 case X86EMUL_MODE_PROT64:
2282 if (msr_data == 0x0)
2283 return emulate_gp(ctxt, 0);
2284 break;
2285 default:
2286 break;
2287 }
2288
2289 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2290 cs_sel = (u16)msr_data;
2291 cs_sel &= ~SELECTOR_RPL_MASK;
2292 ss_sel = cs_sel + 8;
2293 ss_sel &= ~SELECTOR_RPL_MASK;
2294 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2295 cs.d = 0;
2296 cs.l = 1;
2297 }
2298
2299 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2300 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2301
2302 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2303 ctxt->_eip = msr_data;
2304
2305 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2306 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2307
2308 return X86EMUL_CONTINUE;
2309}
2310
2311static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2312{
2313 const struct x86_emulate_ops *ops = ctxt->ops;
2314 struct desc_struct cs, ss;
2315 u64 msr_data;
2316 int usermode;
2317 u16 cs_sel = 0, ss_sel = 0;
2318
2319
2320 if (ctxt->mode == X86EMUL_MODE_REAL ||
2321 ctxt->mode == X86EMUL_MODE_VM86)
2322 return emulate_gp(ctxt, 0);
2323
2324 setup_syscalls_segments(ctxt, &cs, &ss);
2325
2326 if ((ctxt->rex_prefix & 0x8) != 0x0)
2327 usermode = X86EMUL_MODE_PROT64;
2328 else
2329 usermode = X86EMUL_MODE_PROT32;
2330
2331 cs.dpl = 3;
2332 ss.dpl = 3;
2333 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2334 switch (usermode) {
2335 case X86EMUL_MODE_PROT32:
2336 cs_sel = (u16)(msr_data + 16);
2337 if ((msr_data & 0xfffc) == 0x0)
2338 return emulate_gp(ctxt, 0);
2339 ss_sel = (u16)(msr_data + 24);
2340 break;
2341 case X86EMUL_MODE_PROT64:
2342 cs_sel = (u16)(msr_data + 32);
2343 if (msr_data == 0x0)
2344 return emulate_gp(ctxt, 0);
2345 ss_sel = cs_sel + 8;
2346 cs.d = 0;
2347 cs.l = 1;
2348 break;
2349 }
2350 cs_sel |= SELECTOR_RPL_MASK;
2351 ss_sel |= SELECTOR_RPL_MASK;
2352
2353 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2354 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2355
2356 ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
2357 *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
2358
2359 return X86EMUL_CONTINUE;
2360}
2361
2362static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2363{
2364 int iopl;
2365 if (ctxt->mode == X86EMUL_MODE_REAL)
2366 return false;
2367 if (ctxt->mode == X86EMUL_MODE_VM86)
2368 return true;
2369 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2370 return ctxt->ops->cpl(ctxt) > iopl;
2371}
2372
2373static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2374 u16 port, u16 len)
2375{
2376 const struct x86_emulate_ops *ops = ctxt->ops;
2377 struct desc_struct tr_seg;
2378 u32 base3;
2379 int r;
2380 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2381 unsigned mask = (1 << len) - 1;
2382 unsigned long base;
2383
2384 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2385 if (!tr_seg.p)
2386 return false;
2387 if (desc_limit_scaled(&tr_seg) < 103)
2388 return false;
2389 base = get_desc_base(&tr_seg);
2390#ifdef CONFIG_X86_64
2391 base |= ((u64)base3) << 32;
2392#endif
2393 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2394 if (r != X86EMUL_CONTINUE)
2395 return false;
2396 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2397 return false;
2398 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2399 if (r != X86EMUL_CONTINUE)
2400 return false;
2401 if ((perm >> bit_idx) & mask)
2402 return false;
2403 return true;
2404}
2405
2406static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2407 u16 port, u16 len)
2408{
2409 if (ctxt->perm_ok)
2410 return true;
2411
2412 if (emulator_bad_iopl(ctxt))
2413 if (!emulator_io_port_access_allowed(ctxt, port, len))
2414 return false;
2415
2416 ctxt->perm_ok = true;
2417
2418 return true;
2419}
2420
2421static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2422 struct tss_segment_16 *tss)
2423{
2424 tss->ip = ctxt->_eip;
2425 tss->flag = ctxt->eflags;
2426 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2427 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2428 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2429 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2430 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2431 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2432 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2433 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2434
2435 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2436 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2437 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2438 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2439 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2440}
2441
2442static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2443 struct tss_segment_16 *tss)
2444{
2445 int ret;
2446
2447 ctxt->_eip = tss->ip;
2448 ctxt->eflags = tss->flag | 2;
2449 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2450 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2451 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2452 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2453 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2454 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2455 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2456 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2457
2458
2459
2460
2461
2462 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2463 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2464 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2465 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2466 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2467
2468
2469
2470
2471
2472 ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
2473 if (ret != X86EMUL_CONTINUE)
2474 return ret;
2475 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2476 if (ret != X86EMUL_CONTINUE)
2477 return ret;
2478 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2479 if (ret != X86EMUL_CONTINUE)
2480 return ret;
2481 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2482 if (ret != X86EMUL_CONTINUE)
2483 return ret;
2484 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2485 if (ret != X86EMUL_CONTINUE)
2486 return ret;
2487
2488 return X86EMUL_CONTINUE;
2489}
2490
2491static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2492 u16 tss_selector, u16 old_tss_sel,
2493 ulong old_tss_base, struct desc_struct *new_desc)
2494{
2495 const struct x86_emulate_ops *ops = ctxt->ops;
2496 struct tss_segment_16 tss_seg;
2497 int ret;
2498 u32 new_tss_base = get_desc_base(new_desc);
2499
2500 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2501 &ctxt->exception);
2502 if (ret != X86EMUL_CONTINUE)
2503
2504 return ret;
2505
2506 save_state_to_tss16(ctxt, &tss_seg);
2507
2508 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2509 &ctxt->exception);
2510 if (ret != X86EMUL_CONTINUE)
2511
2512 return ret;
2513
2514 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2515 &ctxt->exception);
2516 if (ret != X86EMUL_CONTINUE)
2517
2518 return ret;
2519
2520 if (old_tss_sel != 0xffff) {
2521 tss_seg.prev_task_link = old_tss_sel;
2522
2523 ret = ops->write_std(ctxt, new_tss_base,
2524 &tss_seg.prev_task_link,
2525 sizeof tss_seg.prev_task_link,
2526 &ctxt->exception);
2527 if (ret != X86EMUL_CONTINUE)
2528
2529 return ret;
2530 }
2531
2532 return load_state_from_tss16(ctxt, &tss_seg);
2533}
2534
2535static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2536 struct tss_segment_32 *tss)
2537{
2538 tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2539 tss->eip = ctxt->_eip;
2540 tss->eflags = ctxt->eflags;
2541 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2542 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2543 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2544 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2545 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2546 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2547 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2548 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2549
2550 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2551 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2552 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2553 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2554 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2555 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2556 tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2557}
2558
2559static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2560 struct tss_segment_32 *tss)
2561{
2562 int ret;
2563
2564 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2565 return emulate_gp(ctxt, 0);
2566 ctxt->_eip = tss->eip;
2567 ctxt->eflags = tss->eflags | 2;
2568
2569
2570 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2571 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2572 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2573 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2574 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2575 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2576 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2577 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2578
2579
2580
2581
2582
2583 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2584 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2585 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2586 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2587 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2588 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2589 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602 if (ctxt->eflags & X86_EFLAGS_VM)
2603 ctxt->mode = X86EMUL_MODE_VM86;
2604 else
2605 ctxt->mode = X86EMUL_MODE_PROT32;
2606
2607 ctxt->ops->set_rflags(ctxt, ctxt->eflags);
2608
2609
2610
2611
2612
2613 ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2614 if (ret != X86EMUL_CONTINUE)
2615 return ret;
2616 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2617 if (ret != X86EMUL_CONTINUE)
2618 return ret;
2619 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2620 if (ret != X86EMUL_CONTINUE)
2621 return ret;
2622 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2623 if (ret != X86EMUL_CONTINUE)
2624 return ret;
2625 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2626 if (ret != X86EMUL_CONTINUE)
2627 return ret;
2628 ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
2629 if (ret != X86EMUL_CONTINUE)
2630 return ret;
2631 ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
2632 if (ret != X86EMUL_CONTINUE)
2633 return ret;
2634
2635 return X86EMUL_CONTINUE;
2636}
2637
2638static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2639 u16 tss_selector, u16 old_tss_sel,
2640 ulong old_tss_base, struct desc_struct *new_desc)
2641{
2642 const struct x86_emulate_ops *ops = ctxt->ops;
2643 struct tss_segment_32 tss_seg;
2644 int ret;
2645 u32 new_tss_base = get_desc_base(new_desc);
2646
2647 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2648 &ctxt->exception);
2649 if (ret != X86EMUL_CONTINUE)
2650
2651 return ret;
2652
2653 save_state_to_tss32(ctxt, &tss_seg);
2654
2655 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2656 &ctxt->exception);
2657 if (ret != X86EMUL_CONTINUE)
2658
2659 return ret;
2660
2661 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2662 &ctxt->exception);
2663 if (ret != X86EMUL_CONTINUE)
2664
2665 return ret;
2666
2667 if (old_tss_sel != 0xffff) {
2668 tss_seg.prev_task_link = old_tss_sel;
2669
2670 ret = ops->write_std(ctxt, new_tss_base,
2671 &tss_seg.prev_task_link,
2672 sizeof tss_seg.prev_task_link,
2673 &ctxt->exception);
2674 if (ret != X86EMUL_CONTINUE)
2675
2676 return ret;
2677 }
2678
2679 return load_state_from_tss32(ctxt, &tss_seg);
2680}
2681
2682static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2683 u16 tss_selector, int idt_index, int reason,
2684 bool has_error_code, u32 error_code)
2685{
2686 const struct x86_emulate_ops *ops = ctxt->ops;
2687 struct desc_struct curr_tss_desc, next_tss_desc;
2688 int ret;
2689 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2690 ulong old_tss_base =
2691 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2692 u32 desc_limit;
2693 ulong desc_addr;
2694
2695
2696
2697 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2698 if (ret != X86EMUL_CONTINUE)
2699 return ret;
2700 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2701 if (ret != X86EMUL_CONTINUE)
2702 return ret;
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713 if (reason == TASK_SWITCH_GATE) {
2714 if (idt_index != -1) {
2715
2716 struct desc_struct task_gate_desc;
2717 int dpl;
2718
2719 ret = read_interrupt_descriptor(ctxt, idt_index,
2720 &task_gate_desc);
2721 if (ret != X86EMUL_CONTINUE)
2722 return ret;
2723
2724 dpl = task_gate_desc.dpl;
2725 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2726 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2727 }
2728 } else if (reason != TASK_SWITCH_IRET) {
2729 int dpl = next_tss_desc.dpl;
2730 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2731 return emulate_gp(ctxt, tss_selector);
2732 }
2733
2734
2735 desc_limit = desc_limit_scaled(&next_tss_desc);
2736 if (!next_tss_desc.p ||
2737 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2738 desc_limit < 0x2b)) {
2739 emulate_ts(ctxt, tss_selector & 0xfffc);
2740 return X86EMUL_PROPAGATE_FAULT;
2741 }
2742
2743 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2744 curr_tss_desc.type &= ~(1 << 1);
2745 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2746 }
2747
2748 if (reason == TASK_SWITCH_IRET)
2749 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2750
2751
2752
2753 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2754 old_tss_sel = 0xffff;
2755
2756 if (next_tss_desc.type & 8)
2757 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2758 old_tss_base, &next_tss_desc);
2759 else
2760 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2761 old_tss_base, &next_tss_desc);
2762 if (ret != X86EMUL_CONTINUE)
2763 return ret;
2764
2765 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2766 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2767
2768 if (reason != TASK_SWITCH_IRET) {
2769 next_tss_desc.type |= (1 << 1);
2770 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2771 }
2772
2773 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2774 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2775
2776 if (has_error_code) {
2777 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2778 ctxt->lock_prefix = 0;
2779 ctxt->src.val = (unsigned long) error_code;
2780 ret = em_push(ctxt);
2781 }
2782
2783 return ret;
2784}
2785
2786int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2787 u16 tss_selector, int idt_index, int reason,
2788 bool has_error_code, u32 error_code)
2789{
2790 int rc;
2791
2792 invalidate_registers(ctxt);
2793 ctxt->_eip = ctxt->eip;
2794 ctxt->dst.type = OP_NONE;
2795
2796 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2797 has_error_code, error_code);
2798
2799 if (rc == X86EMUL_CONTINUE) {
2800 ctxt->eip = ctxt->_eip;
2801 writeback_registers(ctxt);
2802 }
2803
2804 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2805}
2806
2807static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2808 struct operand *op)
2809{
2810 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2811
2812 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2813 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2814}
2815
2816static int em_das(struct x86_emulate_ctxt *ctxt)
2817{
2818 u8 al, old_al;
2819 bool af, cf, old_cf;
2820
2821 cf = ctxt->eflags & X86_EFLAGS_CF;
2822 al = ctxt->dst.val;
2823
2824 old_al = al;
2825 old_cf = cf;
2826 cf = false;
2827 af = ctxt->eflags & X86_EFLAGS_AF;
2828 if ((al & 0x0f) > 9 || af) {
2829 al -= 6;
2830 cf = old_cf | (al >= 250);
2831 af = true;
2832 } else {
2833 af = false;
2834 }
2835 if (old_al > 0x99 || old_cf) {
2836 al -= 0x60;
2837 cf = true;
2838 }
2839
2840 ctxt->dst.val = al;
2841
2842 ctxt->src.type = OP_IMM;
2843 ctxt->src.val = 0;
2844 ctxt->src.bytes = 1;
2845 emulate_2op_SrcV(ctxt, "or");
2846 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2847 if (cf)
2848 ctxt->eflags |= X86_EFLAGS_CF;
2849 if (af)
2850 ctxt->eflags |= X86_EFLAGS_AF;
2851 return X86EMUL_CONTINUE;
2852}
2853
2854static int em_call(struct x86_emulate_ctxt *ctxt)
2855{
2856 long rel = ctxt->src.val;
2857
2858 ctxt->src.val = (unsigned long)ctxt->_eip;
2859 jmp_rel(ctxt, rel);
2860 return em_push(ctxt);
2861}
2862
2863static int em_call_far(struct x86_emulate_ctxt *ctxt)
2864{
2865 u16 sel, old_cs;
2866 ulong old_eip;
2867 int rc;
2868
2869 old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2870 old_eip = ctxt->_eip;
2871
2872 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2873 if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2874 return X86EMUL_CONTINUE;
2875
2876 ctxt->_eip = 0;
2877 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
2878
2879 ctxt->src.val = old_cs;
2880 rc = em_push(ctxt);
2881 if (rc != X86EMUL_CONTINUE)
2882 return rc;
2883
2884 ctxt->src.val = old_eip;
2885 return em_push(ctxt);
2886}
2887
2888static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2889{
2890 int rc;
2891
2892 ctxt->dst.type = OP_REG;
2893 ctxt->dst.addr.reg = &ctxt->_eip;
2894 ctxt->dst.bytes = ctxt->op_bytes;
2895 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2896 if (rc != X86EMUL_CONTINUE)
2897 return rc;
2898 rsp_increment(ctxt, ctxt->src.val);
2899 return X86EMUL_CONTINUE;
2900}
2901
2902static int em_add(struct x86_emulate_ctxt *ctxt)
2903{
2904 emulate_2op_SrcV(ctxt, "add");
2905 return X86EMUL_CONTINUE;
2906}
2907
2908static int em_or(struct x86_emulate_ctxt *ctxt)
2909{
2910 emulate_2op_SrcV(ctxt, "or");
2911 return X86EMUL_CONTINUE;
2912}
2913
2914static int em_adc(struct x86_emulate_ctxt *ctxt)
2915{
2916 emulate_2op_SrcV(ctxt, "adc");
2917 return X86EMUL_CONTINUE;
2918}
2919
2920static int em_sbb(struct x86_emulate_ctxt *ctxt)
2921{
2922 emulate_2op_SrcV(ctxt, "sbb");
2923 return X86EMUL_CONTINUE;
2924}
2925
2926static int em_and(struct x86_emulate_ctxt *ctxt)
2927{
2928 emulate_2op_SrcV(ctxt, "and");
2929 return X86EMUL_CONTINUE;
2930}
2931
2932static int em_sub(struct x86_emulate_ctxt *ctxt)
2933{
2934 emulate_2op_SrcV(ctxt, "sub");
2935 return X86EMUL_CONTINUE;
2936}
2937
2938static int em_xor(struct x86_emulate_ctxt *ctxt)
2939{
2940 emulate_2op_SrcV(ctxt, "xor");
2941 return X86EMUL_CONTINUE;
2942}
2943
2944static int em_cmp(struct x86_emulate_ctxt *ctxt)
2945{
2946 emulate_2op_SrcV(ctxt, "cmp");
2947
2948 ctxt->dst.type = OP_NONE;
2949 return X86EMUL_CONTINUE;
2950}
2951
2952static int em_test(struct x86_emulate_ctxt *ctxt)
2953{
2954 emulate_2op_SrcV(ctxt, "test");
2955
2956 ctxt->dst.type = OP_NONE;
2957 return X86EMUL_CONTINUE;
2958}
2959
2960static int em_xchg(struct x86_emulate_ctxt *ctxt)
2961{
2962
2963 ctxt->src.val = ctxt->dst.val;
2964 write_register_operand(&ctxt->src);
2965
2966
2967 ctxt->dst.val = ctxt->src.orig_val;
2968 ctxt->lock_prefix = 1;
2969 return X86EMUL_CONTINUE;
2970}
2971
2972static int em_imul(struct x86_emulate_ctxt *ctxt)
2973{
2974 emulate_2op_SrcV_nobyte(ctxt, "imul");
2975 return X86EMUL_CONTINUE;
2976}
2977
2978static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2979{
2980 ctxt->dst.val = ctxt->src2.val;
2981 return em_imul(ctxt);
2982}
2983
2984static int em_cwd(struct x86_emulate_ctxt *ctxt)
2985{
2986 ctxt->dst.type = OP_REG;
2987 ctxt->dst.bytes = ctxt->src.bytes;
2988 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
2989 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2990
2991 return X86EMUL_CONTINUE;
2992}
2993
2994static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2995{
2996 u64 tsc = 0;
2997
2998 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2999 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3000 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3001 return X86EMUL_CONTINUE;
3002}
3003
3004static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3005{
3006 u64 pmc;
3007
3008 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3009 return emulate_gp(ctxt, 0);
3010 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3011 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3012 return X86EMUL_CONTINUE;
3013}
3014
3015static int em_mov(struct x86_emulate_ctxt *ctxt)
3016{
3017 memcpy(ctxt->dst.valptr, ctxt->src.valptr, ctxt->op_bytes);
3018 return X86EMUL_CONTINUE;
3019}
3020
3021static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3022{
3023 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3024 return emulate_gp(ctxt, 0);
3025
3026
3027 ctxt->dst.type = OP_NONE;
3028 return X86EMUL_CONTINUE;
3029}
3030
3031static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3032{
3033 unsigned long val;
3034
3035 if (ctxt->mode == X86EMUL_MODE_PROT64)
3036 val = ctxt->src.val & ~0ULL;
3037 else
3038 val = ctxt->src.val & ~0U;
3039
3040
3041 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3042 return emulate_gp(ctxt, 0);
3043
3044
3045 ctxt->dst.type = OP_NONE;
3046 return X86EMUL_CONTINUE;
3047}
3048
3049static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3050{
3051 u64 msr_data;
3052
3053 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3054 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3055 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3056 return emulate_gp(ctxt, 0);
3057
3058 return X86EMUL_CONTINUE;
3059}
3060
3061static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3062{
3063 u64 msr_data;
3064
3065 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3066 return emulate_gp(ctxt, 0);
3067
3068 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3069 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3070 return X86EMUL_CONTINUE;
3071}
3072
3073static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3074{
3075 if (ctxt->modrm_reg > VCPU_SREG_GS)
3076 return emulate_ud(ctxt);
3077
3078 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3079 return X86EMUL_CONTINUE;
3080}
3081
3082static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3083{
3084 u16 sel = ctxt->src.val;
3085
3086 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3087 return emulate_ud(ctxt);
3088
3089 if (ctxt->modrm_reg == VCPU_SREG_SS)
3090 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3091
3092
3093 ctxt->dst.type = OP_NONE;
3094 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3095}
3096
3097static int em_lldt(struct x86_emulate_ctxt *ctxt)
3098{
3099 u16 sel = ctxt->src.val;
3100
3101
3102 ctxt->dst.type = OP_NONE;
3103 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3104}
3105
3106static int em_ltr(struct x86_emulate_ctxt *ctxt)
3107{
3108 u16 sel = ctxt->src.val;
3109
3110
3111 ctxt->dst.type = OP_NONE;
3112 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3113}
3114
3115static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3116{
3117 int rc;
3118 ulong linear;
3119
3120 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3121 if (rc == X86EMUL_CONTINUE)
3122 ctxt->ops->invlpg(ctxt, linear);
3123
3124 ctxt->dst.type = OP_NONE;
3125 return X86EMUL_CONTINUE;
3126}
3127
3128static int em_clts(struct x86_emulate_ctxt *ctxt)
3129{
3130 ulong cr0;
3131
3132 cr0 = ctxt->ops->get_cr(ctxt, 0);
3133 cr0 &= ~X86_CR0_TS;
3134 ctxt->ops->set_cr(ctxt, 0, cr0);
3135 return X86EMUL_CONTINUE;
3136}
3137
3138static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3139{
3140 int rc;
3141
3142 if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
3143 return X86EMUL_UNHANDLEABLE;
3144
3145 rc = ctxt->ops->fix_hypercall(ctxt);
3146 if (rc != X86EMUL_CONTINUE)
3147 return rc;
3148
3149
3150 ctxt->_eip = ctxt->eip;
3151
3152 ctxt->dst.type = OP_NONE;
3153 return X86EMUL_CONTINUE;
3154}
3155
3156static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3157 void (*get)(struct x86_emulate_ctxt *ctxt,
3158 struct desc_ptr *ptr))
3159{
3160 struct desc_ptr desc_ptr;
3161
3162 if (ctxt->mode == X86EMUL_MODE_PROT64)
3163 ctxt->op_bytes = 8;
3164 get(ctxt, &desc_ptr);
3165 if (ctxt->op_bytes == 2) {
3166 ctxt->op_bytes = 4;
3167 desc_ptr.address &= 0x00ffffff;
3168 }
3169
3170 ctxt->dst.type = OP_NONE;
3171 return segmented_write(ctxt, ctxt->dst.addr.mem,
3172 &desc_ptr, 2 + ctxt->op_bytes);
3173}
3174
3175static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3176{
3177 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3178}
3179
3180static int em_sidt(struct x86_emulate_ctxt *ctxt)
3181{
3182 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3183}
3184
3185static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3186{
3187 struct desc_ptr desc_ptr;
3188 int rc;
3189
3190 if (ctxt->mode == X86EMUL_MODE_PROT64)
3191 ctxt->op_bytes = 8;
3192 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3193 &desc_ptr.size, &desc_ptr.address,
3194 ctxt->op_bytes);
3195 if (rc != X86EMUL_CONTINUE)
3196 return rc;
3197 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3198
3199 ctxt->dst.type = OP_NONE;
3200 return X86EMUL_CONTINUE;
3201}
3202
3203static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3204{
3205 int rc;
3206
3207 rc = ctxt->ops->fix_hypercall(ctxt);
3208
3209
3210 ctxt->dst.type = OP_NONE;
3211 return rc;
3212}
3213
3214static int em_lidt(struct x86_emulate_ctxt *ctxt)
3215{
3216 struct desc_ptr desc_ptr;
3217 int rc;
3218
3219 if (ctxt->mode == X86EMUL_MODE_PROT64)
3220 ctxt->op_bytes = 8;
3221 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3222 &desc_ptr.size, &desc_ptr.address,
3223 ctxt->op_bytes);
3224 if (rc != X86EMUL_CONTINUE)
3225 return rc;
3226 ctxt->ops->set_idt(ctxt, &desc_ptr);
3227
3228 ctxt->dst.type = OP_NONE;
3229 return X86EMUL_CONTINUE;
3230}
3231
3232static int em_smsw(struct x86_emulate_ctxt *ctxt)
3233{
3234 ctxt->dst.bytes = 2;
3235 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3236 return X86EMUL_CONTINUE;
3237}
3238
3239static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3240{
3241 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3242 | (ctxt->src.val & 0x0f));
3243 ctxt->dst.type = OP_NONE;
3244 return X86EMUL_CONTINUE;
3245}
3246
3247static int em_loop(struct x86_emulate_ctxt *ctxt)
3248{
3249 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3250 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3251 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3252 jmp_rel(ctxt, ctxt->src.val);
3253
3254 return X86EMUL_CONTINUE;
3255}
3256
3257static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3258{
3259 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3260 jmp_rel(ctxt, ctxt->src.val);
3261
3262 return X86EMUL_CONTINUE;
3263}
3264
3265static int em_in(struct x86_emulate_ctxt *ctxt)
3266{
3267 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3268 &ctxt->dst.val))
3269 return X86EMUL_IO_NEEDED;
3270
3271 return X86EMUL_CONTINUE;
3272}
3273
3274static int em_out(struct x86_emulate_ctxt *ctxt)
3275{
3276 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3277 &ctxt->src.val, 1);
3278
3279 ctxt->dst.type = OP_NONE;
3280 return X86EMUL_CONTINUE;
3281}
3282
3283static int em_cli(struct x86_emulate_ctxt *ctxt)
3284{
3285 if (emulator_bad_iopl(ctxt))
3286 return emulate_gp(ctxt, 0);
3287
3288 ctxt->eflags &= ~X86_EFLAGS_IF;
3289 return X86EMUL_CONTINUE;
3290}
3291
3292static int em_sti(struct x86_emulate_ctxt *ctxt)
3293{
3294 if (emulator_bad_iopl(ctxt))
3295 return emulate_gp(ctxt, 0);
3296
3297 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3298 ctxt->eflags |= X86_EFLAGS_IF;
3299 return X86EMUL_CONTINUE;
3300}
3301
3302static int em_bt(struct x86_emulate_ctxt *ctxt)
3303{
3304
3305 ctxt->dst.type = OP_NONE;
3306
3307 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
3308
3309 emulate_2op_SrcV_nobyte(ctxt, "bt");
3310 return X86EMUL_CONTINUE;
3311}
3312
3313static int em_bts(struct x86_emulate_ctxt *ctxt)
3314{
3315 emulate_2op_SrcV_nobyte(ctxt, "bts");
3316 return X86EMUL_CONTINUE;
3317}
3318
3319static int em_btr(struct x86_emulate_ctxt *ctxt)
3320{
3321 emulate_2op_SrcV_nobyte(ctxt, "btr");
3322 return X86EMUL_CONTINUE;
3323}
3324
3325static int em_btc(struct x86_emulate_ctxt *ctxt)
3326{
3327 emulate_2op_SrcV_nobyte(ctxt, "btc");
3328 return X86EMUL_CONTINUE;
3329}
3330
3331static int em_bsf(struct x86_emulate_ctxt *ctxt)
3332{
3333 emulate_2op_SrcV_nobyte(ctxt, "bsf");
3334 return X86EMUL_CONTINUE;
3335}
3336
3337static int em_bsr(struct x86_emulate_ctxt *ctxt)
3338{
3339 emulate_2op_SrcV_nobyte(ctxt, "bsr");
3340 return X86EMUL_CONTINUE;
3341}
3342
3343static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3344{
3345 u32 eax, ebx, ecx, edx;
3346
3347 eax = reg_read(ctxt, VCPU_REGS_RAX);
3348 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3349 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3350 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3351 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3352 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3353 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3354 return X86EMUL_CONTINUE;
3355}
3356
3357static int em_lahf(struct x86_emulate_ctxt *ctxt)
3358{
3359 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3360 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3361 return X86EMUL_CONTINUE;
3362}
3363
3364static int em_bswap(struct x86_emulate_ctxt *ctxt)
3365{
3366 switch (ctxt->op_bytes) {
3367#ifdef CONFIG_X86_64
3368 case 8:
3369 asm("bswap %0" : "+r"(ctxt->dst.val));
3370 break;
3371#endif
3372 default:
3373 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3374 break;
3375 }
3376 return X86EMUL_CONTINUE;
3377}
3378
3379static bool valid_cr(int nr)
3380{
3381 switch (nr) {
3382 case 0:
3383 case 2 ... 4:
3384 case 8:
3385 return true;
3386 default:
3387 return false;
3388 }
3389}
3390
3391static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3392{
3393 if (!valid_cr(ctxt->modrm_reg))
3394 return emulate_ud(ctxt);
3395
3396 return X86EMUL_CONTINUE;
3397}
3398
3399static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3400{
3401 u64 new_val = ctxt->src.val64;
3402 int cr = ctxt->modrm_reg;
3403 u64 efer = 0;
3404
3405 static u64 cr_reserved_bits[] = {
3406 0xffffffff00000000ULL,
3407 0, 0, 0,
3408 CR4_RESERVED_BITS,
3409 0, 0, 0,
3410 CR8_RESERVED_BITS,
3411 };
3412
3413 if (!valid_cr(cr))
3414 return emulate_ud(ctxt);
3415
3416 if (new_val & cr_reserved_bits[cr])
3417 return emulate_gp(ctxt, 0);
3418
3419 switch (cr) {
3420 case 0: {
3421 u64 cr4;
3422 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3423 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3424 return emulate_gp(ctxt, 0);
3425
3426 cr4 = ctxt->ops->get_cr(ctxt, 4);
3427 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3428
3429 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3430 !(cr4 & X86_CR4_PAE))
3431 return emulate_gp(ctxt, 0);
3432
3433 break;
3434 }
3435 case 3: {
3436 u64 rsvd = 0;
3437
3438 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3439 if (efer & EFER_LMA)
3440 rsvd = CR3_L_MODE_RESERVED_BITS;
3441 else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
3442 rsvd = CR3_PAE_RESERVED_BITS;
3443 else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
3444 rsvd = CR3_NONPAE_RESERVED_BITS;
3445
3446 if (new_val & rsvd)
3447 return emulate_gp(ctxt, 0);
3448
3449 break;
3450 }
3451 case 4: {
3452 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3453
3454 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3455 return emulate_gp(ctxt, 0);
3456
3457 break;
3458 }
3459 }
3460
3461 return X86EMUL_CONTINUE;
3462}
3463
3464static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3465{
3466 unsigned long dr7;
3467
3468 ctxt->ops->get_dr(ctxt, 7, &dr7);
3469
3470
3471 return dr7 & (1 << 13);
3472}
3473
3474static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3475{
3476 int dr = ctxt->modrm_reg;
3477 u64 cr4;
3478
3479 if (dr > 7)
3480 return emulate_ud(ctxt);
3481
3482 cr4 = ctxt->ops->get_cr(ctxt, 4);
3483 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3484 return emulate_ud(ctxt);
3485
3486 if (check_dr7_gd(ctxt))
3487 return emulate_db(ctxt);
3488
3489 return X86EMUL_CONTINUE;
3490}
3491
3492static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3493{
3494 u64 new_val = ctxt->src.val64;
3495 int dr = ctxt->modrm_reg;
3496
3497 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3498 return emulate_gp(ctxt, 0);
3499
3500 return check_dr_read(ctxt);
3501}
3502
3503static int check_svme(struct x86_emulate_ctxt *ctxt)
3504{
3505 u64 efer;
3506
3507 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3508
3509 if (!(efer & EFER_SVME))
3510 return emulate_ud(ctxt);
3511
3512 return X86EMUL_CONTINUE;
3513}
3514
3515static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3516{
3517 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3518
3519
3520 if (rax & 0xffff000000000000ULL)
3521 return emulate_gp(ctxt, 0);
3522
3523 return check_svme(ctxt);
3524}
3525
3526static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3527{
3528 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3529
3530 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3531 return emulate_ud(ctxt);
3532
3533 return X86EMUL_CONTINUE;
3534}
3535
3536static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3537{
3538 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3539 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3540
3541 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3542 (rcx > 3))
3543 return emulate_gp(ctxt, 0);
3544
3545 return X86EMUL_CONTINUE;
3546}
3547
3548static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3549{
3550 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3551 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3552 return emulate_gp(ctxt, 0);
3553
3554 return X86EMUL_CONTINUE;
3555}
3556
3557static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3558{
3559 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3560 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3561 return emulate_gp(ctxt, 0);
3562
3563 return X86EMUL_CONTINUE;
3564}
3565
3566#define D(_y) { .flags = (_y) }
3567#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
3568#define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
3569 .check_perm = (_p) }
3570#define N D(0)
3571#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3572#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3573#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3574#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3575#define II(_f, _e, _i) \
3576 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
3577#define IIP(_f, _e, _i, _p) \
3578 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
3579 .check_perm = (_p) }
3580#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3581
3582#define D2bv(_f) D((_f) | ByteOp), D(_f)
3583#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3584#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3585#define I2bvIP(_f, _e, _i, _p) \
3586 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3587
3588#define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3589 I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3590 I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3591
3592static const struct opcode group7_rm1[] = {
3593 DI(SrcNone | Priv, monitor),
3594 DI(SrcNone | Priv, mwait),
3595 N, N, N, N, N, N,
3596};
3597
3598static const struct opcode group7_rm3[] = {
3599 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3600 II(SrcNone | Prot | VendorSpecific, em_vmmcall, vmmcall),
3601 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3602 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3603 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3604 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3605 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3606 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3607};
3608
3609static const struct opcode group7_rm7[] = {
3610 N,
3611 DIP(SrcNone, rdtscp, check_rdtsc),
3612 N, N, N, N, N, N,
3613};
3614
3615static const struct opcode group1[] = {
3616 I(Lock, em_add),
3617 I(Lock | PageTable, em_or),
3618 I(Lock, em_adc),
3619 I(Lock, em_sbb),
3620 I(Lock | PageTable, em_and),
3621 I(Lock, em_sub),
3622 I(Lock, em_xor),
3623 I(0, em_cmp),
3624};
3625
3626static const struct opcode group1A[] = {
3627 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3628};
3629
3630static const struct opcode group3[] = {
3631 I(DstMem | SrcImm, em_test),
3632 I(DstMem | SrcImm, em_test),
3633 I(DstMem | SrcNone | Lock, em_not),
3634 I(DstMem | SrcNone | Lock, em_neg),
3635 I(SrcMem, em_mul_ex),
3636 I(SrcMem, em_imul_ex),
3637 I(SrcMem, em_div_ex),
3638 I(SrcMem, em_idiv_ex),
3639};
3640
3641static const struct opcode group4[] = {
3642 I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
3643 I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
3644 N, N, N, N, N, N,
3645};
3646
3647static const struct opcode group5[] = {
3648 I(DstMem | SrcNone | Lock, em_grp45),
3649 I(DstMem | SrcNone | Lock, em_grp45),
3650 I(SrcMem | Stack, em_grp45),
3651 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3652 I(SrcMem | Stack, em_grp45),
3653 I(SrcMemFAddr | ImplicitOps, em_grp45),
3654 I(SrcMem | Stack, em_grp45), N,
3655};
3656
3657static const struct opcode group6[] = {
3658 DI(Prot, sldt),
3659 DI(Prot, str),
3660 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3661 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3662 N, N, N, N,
3663};
3664
3665static const struct group_dual group7 = { {
3666 II(Mov | DstMem | Priv, em_sgdt, sgdt),
3667 II(Mov | DstMem | Priv, em_sidt, sidt),
3668 II(SrcMem | Priv, em_lgdt, lgdt),
3669 II(SrcMem | Priv, em_lidt, lidt),
3670 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3671 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3672 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3673}, {
3674 I(SrcNone | Priv | VendorSpecific, em_vmcall),
3675 EXT(0, group7_rm1),
3676 N, EXT(0, group7_rm3),
3677 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3678 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3679 EXT(0, group7_rm7),
3680} };
3681
3682static const struct opcode group8[] = {
3683 N, N, N, N,
3684 I(DstMem | SrcImmByte, em_bt),
3685 I(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3686 I(DstMem | SrcImmByte | Lock, em_btr),
3687 I(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3688};
3689
3690static const struct group_dual group9 = { {
3691 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3692}, {
3693 N, N, N, N, N, N, N, N,
3694} };
3695
3696static const struct opcode group11[] = {
3697 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3698 X7(D(Undefined)),
3699};
3700
3701static const struct gprefix pfx_0f_6f_0f_7f = {
3702 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3703};
3704
3705static const struct gprefix pfx_vmovntpx = {
3706 I(0, em_mov), N, N, N,
3707};
3708
3709static const struct opcode opcode_table[256] = {
3710
3711 I6ALU(Lock, em_add),
3712 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3713 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3714
3715 I6ALU(Lock | PageTable, em_or),
3716 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3717 N,
3718
3719 I6ALU(Lock, em_adc),
3720 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3721 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3722
3723 I6ALU(Lock, em_sbb),
3724 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3725 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3726
3727 I6ALU(Lock | PageTable, em_and), N, N,
3728
3729 I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3730
3731 I6ALU(Lock, em_xor), N, N,
3732
3733 I6ALU(0, em_cmp), N, N,
3734
3735 X16(D(DstReg)),
3736
3737 X8(I(SrcReg | Stack, em_push)),
3738
3739 X8(I(DstReg | Stack, em_pop)),
3740
3741 I(ImplicitOps | Stack | No64, em_pusha),
3742 I(ImplicitOps | Stack | No64, em_popa),
3743 N, D(DstReg | SrcMem32 | ModRM | Mov) ,
3744 N, N, N, N,
3745
3746 I(SrcImm | Mov | Stack, em_push),
3747 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3748 I(SrcImmByte | Mov | Stack, em_push),
3749 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3750 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in),
3751 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out),
3752
3753 X16(D(SrcImmByte)),
3754
3755 G(ByteOp | DstMem | SrcImm, group1),
3756 G(DstMem | SrcImm, group1),
3757 G(ByteOp | DstMem | SrcImm | No64, group1),
3758 G(DstMem | SrcImmByte, group1),
3759 I2bv(DstMem | SrcReg | ModRM, em_test),
3760 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3761
3762 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3763 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3764 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3765 D(ModRM | SrcMem | NoAccess | DstReg),
3766 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3767 G(0, group1A),
3768
3769 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3770
3771 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3772 I(SrcImmFAddr | No64, em_call_far), N,
3773 II(ImplicitOps | Stack, em_pushf, pushf),
3774 II(ImplicitOps | Stack, em_popf, popf), N, I(ImplicitOps, em_lahf),
3775
3776 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3777 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3778 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3779 I2bv(SrcSI | DstDI | String, em_cmp),
3780
3781 I2bv(DstAcc | SrcImm, em_test),
3782 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3783 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3784 I2bv(SrcAcc | DstDI | String, em_cmp),
3785
3786 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3787
3788 X8(I(DstReg | SrcImm | Mov, em_mov)),
3789
3790 D2bv(DstMem | SrcImmByte | ModRM),
3791 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3792 I(ImplicitOps | Stack, em_ret),
3793 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
3794 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
3795 G(ByteOp, group11), G(0, group11),
3796
3797 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
3798 N, I(ImplicitOps | Stack, em_ret_far),
3799 D(ImplicitOps), DI(SrcImmByte, intn),
3800 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3801
3802 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
3803 N, N, N, N,
3804
3805 N, N, N, N, N, N, N, N,
3806
3807 X3(I(SrcImmByte, em_loop)),
3808 I(SrcImmByte, em_jcxz),
3809 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
3810 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
3811
3812 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
3813 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3814 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
3815 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
3816
3817 N, DI(ImplicitOps, icebp), N, N,
3818 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3819 G(ByteOp, group3), G(0, group3),
3820
3821 D(ImplicitOps), D(ImplicitOps),
3822 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3823 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3824};
3825
3826static const struct opcode twobyte_table[256] = {
3827
3828 G(0, group6), GD(0, &group7), N, N,
3829 N, I(ImplicitOps | VendorSpecific, em_syscall),
3830 II(ImplicitOps | Priv, em_clts, clts), N,
3831 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3832 N, D(ImplicitOps | ModRM), N, N,
3833
3834 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
3835
3836 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3837 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3838 IIP(ModRM | SrcMem | Priv | Op3264, em_cr_write, cr_write, check_cr_write),
3839 IIP(ModRM | SrcMem | Priv | Op3264, em_dr_write, dr_write, check_dr_write),
3840 N, N, N, N,
3841 N, N, N, GP(ModRM | DstMem | SrcReg | Sse | Mov | Aligned, &pfx_vmovntpx),
3842 N, N, N, N,
3843
3844 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
3845 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3846 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
3847 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
3848 I(ImplicitOps | VendorSpecific, em_sysenter),
3849 I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
3850 N, N,
3851 N, N, N, N, N, N, N, N,
3852
3853 X16(D(DstReg | SrcMem | ModRM | Mov)),
3854
3855 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3856
3857 N, N, N, N,
3858 N, N, N, N,
3859 N, N, N, N,
3860 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3861
3862 N, N, N, N,
3863 N, N, N, N,
3864 N, N, N, N,
3865 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3866
3867 X16(D(SrcImm)),
3868
3869 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3870
3871 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
3872 II(ImplicitOps, em_cpuid, cpuid), I(DstMem | SrcReg | ModRM | BitOp, em_bt),
3873 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3874 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
3875
3876 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
3877 DI(ImplicitOps, rsm),
3878 I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
3879 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3880 D(DstMem | SrcReg | Src2CL | ModRM),
3881 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
3882
3883 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
3884 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
3885 I(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
3886 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
3887 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
3888 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3889
3890 N, N,
3891 G(BitOp, group8),
3892 I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
3893 I(DstReg | SrcMem | ModRM, em_bsf), I(DstReg | SrcMem | ModRM, em_bsr),
3894 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3895
3896 D2bv(DstMem | SrcReg | ModRM | Lock),
3897 N, D(DstMem | SrcReg | ModRM | Mov),
3898 N, N, N, GD(0, &group9),
3899
3900 X8(I(DstReg, em_bswap)),
3901
3902 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3903
3904 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3905
3906 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3907};
3908
3909#undef D
3910#undef N
3911#undef G
3912#undef GD
3913#undef I
3914#undef GP
3915#undef EXT
3916
3917#undef D2bv
3918#undef D2bvIP
3919#undef I2bv
3920#undef I2bvIP
3921#undef I6ALU
3922
3923static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
3924{
3925 unsigned size;
3926
3927 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3928 if (size == 8)
3929 size = 4;
3930 return size;
3931}
3932
3933static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3934 unsigned size, bool sign_extension)
3935{
3936 int rc = X86EMUL_CONTINUE;
3937
3938 op->type = OP_IMM;
3939 op->bytes = size;
3940 op->addr.mem.ea = ctxt->_eip;
3941
3942 switch (op->bytes) {
3943 case 1:
3944 op->val = insn_fetch(s8, ctxt);
3945 break;
3946 case 2:
3947 op->val = insn_fetch(s16, ctxt);
3948 break;
3949 case 4:
3950 op->val = insn_fetch(s32, ctxt);
3951 break;
3952 }
3953 if (!sign_extension) {
3954 switch (op->bytes) {
3955 case 1:
3956 op->val &= 0xff;
3957 break;
3958 case 2:
3959 op->val &= 0xffff;
3960 break;
3961 case 4:
3962 op->val &= 0xffffffff;
3963 break;
3964 }
3965 }
3966done:
3967 return rc;
3968}
3969
3970static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
3971 unsigned d)
3972{
3973 int rc = X86EMUL_CONTINUE;
3974
3975 switch (d) {
3976 case OpReg:
3977 decode_register_operand(ctxt, op);
3978 break;
3979 case OpImmUByte:
3980 rc = decode_imm(ctxt, op, 1, false);
3981 break;
3982 case OpMem:
3983 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3984 mem_common:
3985 *op = ctxt->memop;
3986 ctxt->memopp = op;
3987 if ((ctxt->d & BitOp) && op == &ctxt->dst)
3988 fetch_bit_operand(ctxt);
3989 op->orig_val = op->val;
3990 break;
3991 case OpMem64:
3992 ctxt->memop.bytes = 8;
3993 goto mem_common;
3994 case OpAcc:
3995 op->type = OP_REG;
3996 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3997 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
3998 fetch_register_operand(op);
3999 op->orig_val = op->val;
4000 break;
4001 case OpDI:
4002 op->type = OP_MEM;
4003 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4004 op->addr.mem.ea =
4005 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4006 op->addr.mem.seg = VCPU_SREG_ES;
4007 op->val = 0;
4008 op->count = 1;
4009 break;
4010 case OpDX:
4011 op->type = OP_REG;
4012 op->bytes = 2;
4013 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4014 fetch_register_operand(op);
4015 break;
4016 case OpCL:
4017 op->bytes = 1;
4018 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4019 break;
4020 case OpImmByte:
4021 rc = decode_imm(ctxt, op, 1, true);
4022 break;
4023 case OpOne:
4024 op->bytes = 1;
4025 op->val = 1;
4026 break;
4027 case OpImm:
4028 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4029 break;
4030 case OpMem8:
4031 ctxt->memop.bytes = 1;
4032 goto mem_common;
4033 case OpMem16:
4034 ctxt->memop.bytes = 2;
4035 goto mem_common;
4036 case OpMem32:
4037 ctxt->memop.bytes = 4;
4038 goto mem_common;
4039 case OpImmU16:
4040 rc = decode_imm(ctxt, op, 2, false);
4041 break;
4042 case OpImmU:
4043 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4044 break;
4045 case OpSI:
4046 op->type = OP_MEM;
4047 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4048 op->addr.mem.ea =
4049 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4050 op->addr.mem.seg = seg_override(ctxt);
4051 op->val = 0;
4052 op->count = 1;
4053 break;
4054 case OpImmFAddr:
4055 op->type = OP_IMM;
4056 op->addr.mem.ea = ctxt->_eip;
4057 op->bytes = ctxt->op_bytes + 2;
4058 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4059 break;
4060 case OpMemFAddr:
4061 ctxt->memop.bytes = ctxt->op_bytes + 2;
4062 goto mem_common;
4063 case OpES:
4064 op->val = VCPU_SREG_ES;
4065 break;
4066 case OpCS:
4067 op->val = VCPU_SREG_CS;
4068 break;
4069 case OpSS:
4070 op->val = VCPU_SREG_SS;
4071 break;
4072 case OpDS:
4073 op->val = VCPU_SREG_DS;
4074 break;
4075 case OpFS:
4076 op->val = VCPU_SREG_FS;
4077 break;
4078 case OpGS:
4079 op->val = VCPU_SREG_GS;
4080 break;
4081 case OpImplicit:
4082
4083 default:
4084 op->type = OP_NONE;
4085 break;
4086 }
4087
4088done:
4089 return rc;
4090}
4091
4092int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4093{
4094 int rc = X86EMUL_CONTINUE;
4095 int mode = ctxt->mode;
4096 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4097 bool op_prefix = false;
4098 struct opcode opcode;
4099
4100 ctxt->memop.type = OP_NONE;
4101 ctxt->memopp = NULL;
4102 ctxt->_eip = ctxt->eip;
4103 ctxt->fetch.start = ctxt->_eip;
4104 ctxt->fetch.end = ctxt->fetch.start + insn_len;
4105 if (insn_len > 0)
4106 memcpy(ctxt->fetch.data, insn, insn_len);
4107
4108 switch (mode) {
4109 case X86EMUL_MODE_REAL:
4110 case X86EMUL_MODE_VM86:
4111 case X86EMUL_MODE_PROT16:
4112 def_op_bytes = def_ad_bytes = 2;
4113 break;
4114 case X86EMUL_MODE_PROT32:
4115 def_op_bytes = def_ad_bytes = 4;
4116 break;
4117#ifdef CONFIG_X86_64
4118 case X86EMUL_MODE_PROT64:
4119 def_op_bytes = 4;
4120 def_ad_bytes = 8;
4121 break;
4122#endif
4123 default:
4124 return EMULATION_FAILED;
4125 }
4126
4127 ctxt->op_bytes = def_op_bytes;
4128 ctxt->ad_bytes = def_ad_bytes;
4129
4130
4131 for (;;) {
4132 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4133 case 0x66:
4134 op_prefix = true;
4135
4136 ctxt->op_bytes = def_op_bytes ^ 6;
4137 break;
4138 case 0x67:
4139 if (mode == X86EMUL_MODE_PROT64)
4140
4141 ctxt->ad_bytes = def_ad_bytes ^ 12;
4142 else
4143
4144 ctxt->ad_bytes = def_ad_bytes ^ 6;
4145 break;
4146 case 0x26:
4147 case 0x2e:
4148 case 0x36:
4149 case 0x3e:
4150 set_seg_override(ctxt, (ctxt->b >> 3) & 3);
4151 break;
4152 case 0x64:
4153 case 0x65:
4154 set_seg_override(ctxt, ctxt->b & 7);
4155 break;
4156 case 0x40 ... 0x4f:
4157 if (mode != X86EMUL_MODE_PROT64)
4158 goto done_prefixes;
4159 ctxt->rex_prefix = ctxt->b;
4160 continue;
4161 case 0xf0:
4162 ctxt->lock_prefix = 1;
4163 break;
4164 case 0xf2:
4165 case 0xf3:
4166 ctxt->rep_prefix = ctxt->b;
4167 break;
4168 default:
4169 goto done_prefixes;
4170 }
4171
4172
4173
4174 ctxt->rex_prefix = 0;
4175 }
4176
4177done_prefixes:
4178
4179
4180 if (ctxt->rex_prefix & 8)
4181 ctxt->op_bytes = 8;
4182
4183
4184 opcode = opcode_table[ctxt->b];
4185
4186 if (ctxt->b == 0x0f) {
4187 ctxt->twobyte = 1;
4188 ctxt->b = insn_fetch(u8, ctxt);
4189 opcode = twobyte_table[ctxt->b];
4190 }
4191 ctxt->d = opcode.flags;
4192
4193 if (ctxt->d & ModRM)
4194 ctxt->modrm = insn_fetch(u8, ctxt);
4195
4196 while (ctxt->d & GroupMask) {
4197 switch (ctxt->d & GroupMask) {
4198 case Group:
4199 goffset = (ctxt->modrm >> 3) & 7;
4200 opcode = opcode.u.group[goffset];
4201 break;
4202 case GroupDual:
4203 goffset = (ctxt->modrm >> 3) & 7;
4204 if ((ctxt->modrm >> 6) == 3)
4205 opcode = opcode.u.gdual->mod3[goffset];
4206 else
4207 opcode = opcode.u.gdual->mod012[goffset];
4208 break;
4209 case RMExt:
4210 goffset = ctxt->modrm & 7;
4211 opcode = opcode.u.group[goffset];
4212 break;
4213 case Prefix:
4214 if (ctxt->rep_prefix && op_prefix)
4215 return EMULATION_FAILED;
4216 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4217 switch (simd_prefix) {
4218 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4219 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4220 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4221 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4222 }
4223 break;
4224 default:
4225 return EMULATION_FAILED;
4226 }
4227
4228 ctxt->d &= ~(u64)GroupMask;
4229 ctxt->d |= opcode.flags;
4230 }
4231
4232 ctxt->execute = opcode.u.execute;
4233 ctxt->check_perm = opcode.check_perm;
4234 ctxt->intercept = opcode.intercept;
4235
4236
4237 if (ctxt->d == 0 || (ctxt->d & Undefined))
4238 return EMULATION_FAILED;
4239
4240 if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
4241 return EMULATION_FAILED;
4242
4243 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
4244 ctxt->op_bytes = 8;
4245
4246 if (ctxt->d & Op3264) {
4247 if (mode == X86EMUL_MODE_PROT64)
4248 ctxt->op_bytes = 8;
4249 else
4250 ctxt->op_bytes = 4;
4251 }
4252
4253 if (ctxt->d & Sse)
4254 ctxt->op_bytes = 16;
4255 else if (ctxt->d & Mmx)
4256 ctxt->op_bytes = 8;
4257
4258
4259 if (ctxt->d & ModRM) {
4260 rc = decode_modrm(ctxt, &ctxt->memop);
4261 if (!ctxt->has_seg_override)
4262 set_seg_override(ctxt, ctxt->modrm_seg);
4263 } else if (ctxt->d & MemAbs)
4264 rc = decode_abs(ctxt, &ctxt->memop);
4265 if (rc != X86EMUL_CONTINUE)
4266 goto done;
4267
4268 if (!ctxt->has_seg_override)
4269 set_seg_override(ctxt, VCPU_SREG_DS);
4270
4271 ctxt->memop.addr.mem.seg = seg_override(ctxt);
4272
4273 if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8)
4274 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
4275
4276
4277
4278
4279
4280 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4281 if (rc != X86EMUL_CONTINUE)
4282 goto done;
4283
4284
4285
4286
4287
4288 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4289 if (rc != X86EMUL_CONTINUE)
4290 goto done;
4291
4292
4293 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4294
4295done:
4296 if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative)
4297 ctxt->memopp->addr.mem.ea += ctxt->_eip;
4298
4299 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4300}
4301
4302bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4303{
4304 return ctxt->d & PageTable;
4305}
4306
4307static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4308{
4309
4310
4311
4312
4313
4314
4315
4316 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4317 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4318 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4319 ((ctxt->eflags & EFLG_ZF) == 0))
4320 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4321 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4322 return true;
4323
4324 return false;
4325}
4326
4327static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4328{
4329 bool fault = false;
4330
4331 ctxt->ops->get_fpu(ctxt);
4332 asm volatile("1: fwait \n\t"
4333 "2: \n\t"
4334 ".pushsection .fixup,\"ax\" \n\t"
4335 "3: \n\t"
4336 "movb $1, %[fault] \n\t"
4337 "jmp 2b \n\t"
4338 ".popsection \n\t"
4339 _ASM_EXTABLE(1b, 3b)
4340 : [fault]"+qm"(fault));
4341 ctxt->ops->put_fpu(ctxt);
4342
4343 if (unlikely(fault))
4344 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4345
4346 return X86EMUL_CONTINUE;
4347}
4348
4349static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4350 struct operand *op)
4351{
4352 if (op->type == OP_MM)
4353 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4354}
4355
4356
4357int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4358{
4359 const struct x86_emulate_ops *ops = ctxt->ops;
4360 int rc = X86EMUL_CONTINUE;
4361 int saved_dst_type = ctxt->dst.type;
4362
4363 ctxt->mem_read.pos = 0;
4364
4365 if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
4366 rc = emulate_ud(ctxt);
4367 goto done;
4368 }
4369
4370
4371 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4372 rc = emulate_ud(ctxt);
4373 goto done;
4374 }
4375
4376 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4377 rc = emulate_ud(ctxt);
4378 goto done;
4379 }
4380
4381 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4382 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4383 rc = emulate_ud(ctxt);
4384 goto done;
4385 }
4386
4387 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4388 rc = emulate_nm(ctxt);
4389 goto done;
4390 }
4391
4392 if (ctxt->d & Mmx) {
4393 rc = flush_pending_x87_faults(ctxt);
4394 if (rc != X86EMUL_CONTINUE)
4395 goto done;
4396
4397
4398
4399
4400 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4401 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4402 if (!(ctxt->d & Mov))
4403 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4404 }
4405
4406 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4407 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4408 X86_ICPT_PRE_EXCEPT);
4409 if (rc != X86EMUL_CONTINUE)
4410 goto done;
4411 }
4412
4413
4414 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4415 rc = emulate_gp(ctxt, 0);
4416 goto done;
4417 }
4418
4419
4420 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4421 rc = emulate_ud(ctxt);
4422 goto done;
4423 }
4424
4425
4426 if (ctxt->check_perm) {
4427 rc = ctxt->check_perm(ctxt);
4428 if (rc != X86EMUL_CONTINUE)
4429 goto done;
4430 }
4431
4432 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4433 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4434 X86_ICPT_POST_EXCEPT);
4435 if (rc != X86EMUL_CONTINUE)
4436 goto done;
4437 }
4438
4439 if (ctxt->rep_prefix && (ctxt->d & String)) {
4440
4441 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4442 ctxt->eip = ctxt->_eip;
4443 goto done;
4444 }
4445 }
4446
4447 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4448 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4449 ctxt->src.valptr, ctxt->src.bytes);
4450 if (rc != X86EMUL_CONTINUE)
4451 goto done;
4452 ctxt->src.orig_val64 = ctxt->src.val64;
4453 }
4454
4455 if (ctxt->src2.type == OP_MEM) {
4456 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4457 &ctxt->src2.val, ctxt->src2.bytes);
4458 if (rc != X86EMUL_CONTINUE)
4459 goto done;
4460 }
4461
4462 if ((ctxt->d & DstMask) == ImplicitOps)
4463 goto special_insn;
4464
4465
4466 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4467
4468 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4469 &ctxt->dst.val, ctxt->dst.bytes);
4470 if (rc != X86EMUL_CONTINUE)
4471 goto done;
4472 }
4473 ctxt->dst.orig_val = ctxt->dst.val;
4474
4475special_insn:
4476
4477 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4478 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4479 X86_ICPT_POST_MEMACCESS);
4480 if (rc != X86EMUL_CONTINUE)
4481 goto done;
4482 }
4483
4484 if (ctxt->execute) {
4485 rc = ctxt->execute(ctxt);
4486 if (rc != X86EMUL_CONTINUE)
4487 goto done;
4488 goto writeback;
4489 }
4490
4491 if (ctxt->twobyte)
4492 goto twobyte_insn;
4493
4494 switch (ctxt->b) {
4495 case 0x40 ... 0x47:
4496 emulate_1op(ctxt, "inc");
4497 break;
4498 case 0x48 ... 0x4f:
4499 emulate_1op(ctxt, "dec");
4500 break;
4501 case 0x63:
4502 if (ctxt->mode != X86EMUL_MODE_PROT64)
4503 goto cannot_emulate;
4504 ctxt->dst.val = (s32) ctxt->src.val;
4505 break;
4506 case 0x70 ... 0x7f:
4507 if (test_cc(ctxt->b, ctxt->eflags))
4508 jmp_rel(ctxt, ctxt->src.val);
4509 break;
4510 case 0x8d:
4511 ctxt->dst.val = ctxt->src.addr.mem.ea;
4512 break;
4513 case 0x90 ... 0x97:
4514 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4515 break;
4516 rc = em_xchg(ctxt);
4517 break;
4518 case 0x98:
4519 switch (ctxt->op_bytes) {
4520 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4521 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4522 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4523 }
4524 break;
4525 case 0xc0 ... 0xc1:
4526 rc = em_grp2(ctxt);
4527 break;
4528 case 0xcc:
4529 rc = emulate_int(ctxt, 3);
4530 break;
4531 case 0xcd:
4532 rc = emulate_int(ctxt, ctxt->src.val);
4533 break;
4534 case 0xce:
4535 if (ctxt->eflags & EFLG_OF)
4536 rc = emulate_int(ctxt, 4);
4537 break;
4538 case 0xd0 ... 0xd1:
4539 rc = em_grp2(ctxt);
4540 break;
4541 case 0xd2 ... 0xd3:
4542 ctxt->src.val = reg_read(ctxt, VCPU_REGS_RCX);
4543 rc = em_grp2(ctxt);
4544 break;
4545 case 0xe9:
4546 case 0xeb:
4547 jmp_rel(ctxt, ctxt->src.val);
4548 ctxt->dst.type = OP_NONE;
4549 break;
4550 case 0xf4:
4551 ctxt->ops->halt(ctxt);
4552 break;
4553 case 0xf5:
4554
4555 ctxt->eflags ^= EFLG_CF;
4556 break;
4557 case 0xf8:
4558 ctxt->eflags &= ~EFLG_CF;
4559 break;
4560 case 0xf9:
4561 ctxt->eflags |= EFLG_CF;
4562 break;
4563 case 0xfc:
4564 ctxt->eflags &= ~EFLG_DF;
4565 break;
4566 case 0xfd:
4567 ctxt->eflags |= EFLG_DF;
4568 break;
4569 default:
4570 goto cannot_emulate;
4571 }
4572
4573 if (rc != X86EMUL_CONTINUE)
4574 goto done;
4575
4576writeback:
4577 rc = writeback(ctxt);
4578 if (rc != X86EMUL_CONTINUE)
4579 goto done;
4580
4581
4582
4583
4584
4585 ctxt->dst.type = saved_dst_type;
4586
4587 if ((ctxt->d & SrcMask) == SrcSI)
4588 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4589
4590 if ((ctxt->d & DstMask) == DstDI)
4591 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4592
4593 if (ctxt->rep_prefix && (ctxt->d & String)) {
4594 unsigned int count;
4595 struct read_cache *r = &ctxt->io_read;
4596 if ((ctxt->d & SrcMask) == SrcSI)
4597 count = ctxt->src.count;
4598 else
4599 count = ctxt->dst.count;
4600 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4601 -count);
4602
4603 if (!string_insn_completed(ctxt)) {
4604
4605
4606
4607
4608 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4609 (r->end == 0 || r->end != r->pos)) {
4610
4611
4612
4613
4614
4615 ctxt->mem_read.end = 0;
4616 writeback_registers(ctxt);
4617 return EMULATION_RESTART;
4618 }
4619 goto done;
4620 }
4621 }
4622
4623 ctxt->eip = ctxt->_eip;
4624
4625done:
4626 if (rc == X86EMUL_PROPAGATE_FAULT)
4627 ctxt->have_exception = true;
4628 if (rc == X86EMUL_INTERCEPTED)
4629 return EMULATION_INTERCEPTED;
4630
4631 if (rc == X86EMUL_CONTINUE)
4632 writeback_registers(ctxt);
4633
4634 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4635
4636twobyte_insn:
4637 switch (ctxt->b) {
4638 case 0x09:
4639 (ctxt->ops->wbinvd)(ctxt);
4640 break;
4641 case 0x08:
4642 case 0x0d:
4643 case 0x18:
4644 break;
4645 case 0x20:
4646 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4647 break;
4648 case 0x21:
4649 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
4650 break;
4651 case 0x40 ... 0x4f:
4652 ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
4653 if (!test_cc(ctxt->b, ctxt->eflags))
4654 ctxt->dst.type = OP_NONE;
4655 break;
4656 case 0x80 ... 0x8f:
4657 if (test_cc(ctxt->b, ctxt->eflags))
4658 jmp_rel(ctxt, ctxt->src.val);
4659 break;
4660 case 0x90 ... 0x9f:
4661 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4662 break;
4663 case 0xa4:
4664 case 0xa5:
4665 emulate_2op_cl(ctxt, "shld");
4666 break;
4667 case 0xac:
4668 case 0xad:
4669 emulate_2op_cl(ctxt, "shrd");
4670 break;
4671 case 0xae:
4672 break;
4673 case 0xb6 ... 0xb7:
4674 ctxt->dst.bytes = ctxt->op_bytes;
4675 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
4676 : (u16) ctxt->src.val;
4677 break;
4678 case 0xbe ... 0xbf:
4679 ctxt->dst.bytes = ctxt->op_bytes;
4680 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
4681 (s16) ctxt->src.val;
4682 break;
4683 case 0xc0 ... 0xc1:
4684 emulate_2op_SrcV(ctxt, "add");
4685
4686 ctxt->src.val = ctxt->dst.orig_val;
4687 write_register_operand(&ctxt->src);
4688 break;
4689 case 0xc3:
4690 ctxt->dst.bytes = ctxt->op_bytes;
4691 ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
4692 (u64) ctxt->src.val;
4693 break;
4694 default:
4695 goto cannot_emulate;
4696 }
4697
4698 if (rc != X86EMUL_CONTINUE)
4699 goto done;
4700
4701 goto writeback;
4702
4703cannot_emulate:
4704 return EMULATION_FAILED;
4705}
4706
4707void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
4708{
4709 invalidate_registers(ctxt);
4710}
4711
4712void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
4713{
4714 writeback_registers(ctxt);
4715}
4716