1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kvm_host.h>
24#include "kvm_cache_regs.h"
25#include <linux/module.h>
26#include <asm/kvm_emulate.h>
27
28#include "x86.h"
29#include "tss.h"
30
31
32
33
34
35
36
37
38
39
40
41#define ByteOp (1<<0)
42
43#define ImplicitOps (1<<1)
44#define DstReg (2<<1)
45#define DstMem (3<<1)
46#define DstAcc (4<<1)
47#define DstDI (5<<1)
48#define DstMem64 (6<<1)
49#define DstImmUByte (7<<1)
50#define DstDX (8<<1)
51#define DstMask (0xf<<1)
52
53#define SrcNone (0<<5)
54#define SrcReg (1<<5)
55#define SrcMem (2<<5)
56#define SrcMem16 (3<<5)
57#define SrcMem32 (4<<5)
58#define SrcImm (5<<5)
59#define SrcImmByte (6<<5)
60#define SrcOne (7<<5)
61#define SrcImmUByte (8<<5)
62#define SrcImmU (9<<5)
63#define SrcSI (0xa<<5)
64#define SrcImmFAddr (0xb<<5)
65#define SrcMemFAddr (0xc<<5)
66#define SrcAcc (0xd<<5)
67#define SrcImmU16 (0xe<<5)
68#define SrcDX (0xf<<5)
69#define SrcMask (0xf<<5)
70
71#define ModRM (1<<9)
72
73#define Mov (1<<10)
74#define BitOp (1<<11)
75#define MemAbs (1<<12)
76#define String (1<<13)
77#define Stack (1<<14)
78#define GroupMask (7<<15)
79#define Group (1<<15)
80#define GroupDual (2<<15)
81#define Prefix (3<<15)
82#define RMExt (4<<15)
83#define Sse (1<<18)
84
85#define Prot (1<<21)
86#define VendorSpecific (1<<22)
87#define NoAccess (1<<23)
88#define Op3264 (1<<24)
89#define Undefined (1<<25)
90#define Lock (1<<26)
91#define Priv (1<<27)
92#define No64 (1<<28)
93
94#define Src2None (0<<29)
95#define Src2CL (1<<29)
96#define Src2ImmByte (2<<29)
97#define Src2One (3<<29)
98#define Src2Imm (4<<29)
99#define Src2Mask (7<<29)
100
101#define X2(x...) x, x
102#define X3(x...) X2(x), x
103#define X4(x...) X2(x), X2(x)
104#define X5(x...) X4(x), x
105#define X6(x...) X4(x), X2(x)
106#define X7(x...) X4(x), X3(x)
107#define X8(x...) X4(x), X4(x)
108#define X16(x...) X8(x), X8(x)
109
110struct opcode {
111 u32 flags;
112 u8 intercept;
113 union {
114 int (*execute)(struct x86_emulate_ctxt *ctxt);
115 struct opcode *group;
116 struct group_dual *gdual;
117 struct gprefix *gprefix;
118 } u;
119 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
120};
121
122struct group_dual {
123 struct opcode mod012[8];
124 struct opcode mod3[8];
125};
126
127struct gprefix {
128 struct opcode pfx_no;
129 struct opcode pfx_66;
130 struct opcode pfx_f2;
131 struct opcode pfx_f3;
132};
133
134
135#define EFLG_ID (1<<21)
136#define EFLG_VIP (1<<20)
137#define EFLG_VIF (1<<19)
138#define EFLG_AC (1<<18)
139#define EFLG_VM (1<<17)
140#define EFLG_RF (1<<16)
141#define EFLG_IOPL (3<<12)
142#define EFLG_NT (1<<14)
143#define EFLG_OF (1<<11)
144#define EFLG_DF (1<<10)
145#define EFLG_IF (1<<9)
146#define EFLG_TF (1<<8)
147#define EFLG_SF (1<<7)
148#define EFLG_ZF (1<<6)
149#define EFLG_AF (1<<4)
150#define EFLG_PF (1<<2)
151#define EFLG_CF (1<<0)
152
153#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
154#define EFLG_RESERVED_ONE_MASK 2
155
156
157
158
159
160
161
162
163#if defined(CONFIG_X86_64)
164#define _LO32 "k"
165#define _STK "%%rsp"
166#elif defined(__i386__)
167#define _LO32 ""
168#define _STK "%%esp"
169#endif
170
171
172
173
174
175#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
176
177
178#define _PRE_EFLAGS(_sav, _msk, _tmp) \
179 \
180 "movl %"_sav",%"_LO32 _tmp"; " \
181 "push %"_tmp"; " \
182 "push %"_tmp"; " \
183 "movl %"_msk",%"_LO32 _tmp"; " \
184 "andl %"_LO32 _tmp",("_STK"); " \
185 "pushf; " \
186 "notl %"_LO32 _tmp"; " \
187 "andl %"_LO32 _tmp",("_STK"); " \
188 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
189 "pop %"_tmp"; " \
190 "orl %"_LO32 _tmp",("_STK"); " \
191 "popf; " \
192 "pop %"_sav"; "
193
194
195#define _POST_EFLAGS(_sav, _msk, _tmp) \
196 \
197 "pushf; " \
198 "pop %"_tmp"; " \
199 "andl %"_msk",%"_LO32 _tmp"; " \
200 "orl %"_LO32 _tmp",%"_sav"; "
201
202#ifdef CONFIG_X86_64
203#define ON64(x) x
204#else
205#define ON64(x)
206#endif
207
208#define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
209 do { \
210 __asm__ __volatile__ ( \
211 _PRE_EFLAGS("0", "4", "2") \
212 _op _suffix " %"_x"3,%1; " \
213 _POST_EFLAGS("0", "4", "2") \
214 : "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
215 "=&r" (_tmp) \
216 : _y ((_src).val), "i" (EFLAGS_MASK)); \
217 } while (0)
218
219
220
221#define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
222 do { \
223 unsigned long _tmp; \
224 \
225 switch ((_dst).bytes) { \
226 case 2: \
227 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
228 break; \
229 case 4: \
230 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
231 break; \
232 case 8: \
233 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
234 break; \
235 } \
236 } while (0)
237
238#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
239 do { \
240 unsigned long _tmp; \
241 switch ((_dst).bytes) { \
242 case 1: \
243 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
244 break; \
245 default: \
246 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
247 _wx, _wy, _lx, _ly, _qx, _qy); \
248 break; \
249 } \
250 } while (0)
251
252
253#define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
254 __emulate_2op(_op, _src, _dst, _eflags, \
255 "b", "c", "b", "c", "b", "c", "b", "c")
256
257
258#define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
259 __emulate_2op(_op, _src, _dst, _eflags, \
260 "b", "q", "w", "r", _LO32, "r", "", "r")
261
262
263#define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
264 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
265 "w", "r", _LO32, "r", "", "r")
266
267
268#define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
269 do { \
270 unsigned long _tmp; \
271 _type _clv = (_cl).val; \
272 _type _srcv = (_src).val; \
273 _type _dstv = (_dst).val; \
274 \
275 __asm__ __volatile__ ( \
276 _PRE_EFLAGS("0", "5", "2") \
277 _op _suffix " %4,%1 \n" \
278 _POST_EFLAGS("0", "5", "2") \
279 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
280 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
281 ); \
282 \
283 (_cl).val = (unsigned long) _clv; \
284 (_src).val = (unsigned long) _srcv; \
285 (_dst).val = (unsigned long) _dstv; \
286 } while (0)
287
288#define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
289 do { \
290 switch ((_dst).bytes) { \
291 case 2: \
292 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
293 "w", unsigned short); \
294 break; \
295 case 4: \
296 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
297 "l", unsigned int); \
298 break; \
299 case 8: \
300 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
301 "q", unsigned long)); \
302 break; \
303 } \
304 } while (0)
305
306#define __emulate_1op(_op, _dst, _eflags, _suffix) \
307 do { \
308 unsigned long _tmp; \
309 \
310 __asm__ __volatile__ ( \
311 _PRE_EFLAGS("0", "3", "2") \
312 _op _suffix " %1; " \
313 _POST_EFLAGS("0", "3", "2") \
314 : "=m" (_eflags), "+m" ((_dst).val), \
315 "=&r" (_tmp) \
316 : "i" (EFLAGS_MASK)); \
317 } while (0)
318
319
320#define emulate_1op(_op, _dst, _eflags) \
321 do { \
322 switch ((_dst).bytes) { \
323 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
324 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
325 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
326 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
327 } \
328 } while (0)
329
330#define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix) \
331 do { \
332 unsigned long _tmp; \
333 \
334 __asm__ __volatile__ ( \
335 _PRE_EFLAGS("0", "4", "1") \
336 _op _suffix " %5; " \
337 _POST_EFLAGS("0", "4", "1") \
338 : "=m" (_eflags), "=&r" (_tmp), \
339 "+a" (_rax), "+d" (_rdx) \
340 : "i" (EFLAGS_MASK), "m" ((_src).val), \
341 "a" (_rax), "d" (_rdx)); \
342 } while (0)
343
344#define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
345 do { \
346 unsigned long _tmp; \
347 \
348 __asm__ __volatile__ ( \
349 _PRE_EFLAGS("0", "5", "1") \
350 "1: \n\t" \
351 _op _suffix " %6; " \
352 "2: \n\t" \
353 _POST_EFLAGS("0", "5", "1") \
354 ".pushsection .fixup,\"ax\" \n\t" \
355 "3: movb $1, %4 \n\t" \
356 "jmp 2b \n\t" \
357 ".popsection \n\t" \
358 _ASM_EXTABLE(1b, 3b) \
359 : "=m" (_eflags), "=&r" (_tmp), \
360 "+a" (_rax), "+d" (_rdx), "+qm"(_ex) \
361 : "i" (EFLAGS_MASK), "m" ((_src).val), \
362 "a" (_rax), "d" (_rdx)); \
363 } while (0)
364
365
366#define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags) \
367 do { \
368 switch((_src).bytes) { \
369 case 1: \
370 __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
371 _eflags, "b"); \
372 break; \
373 case 2: \
374 __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
375 _eflags, "w"); \
376 break; \
377 case 4: \
378 __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
379 _eflags, "l"); \
380 break; \
381 case 8: \
382 ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
383 _eflags, "q")); \
384 break; \
385 } \
386 } while (0)
387
388#define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex) \
389 do { \
390 switch((_src).bytes) { \
391 case 1: \
392 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
393 _eflags, "b", _ex); \
394 break; \
395 case 2: \
396 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
397 _eflags, "w", _ex); \
398 break; \
399 case 4: \
400 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
401 _eflags, "l", _ex); \
402 break; \
403 case 8: ON64( \
404 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
405 _eflags, "q", _ex)); \
406 break; \
407 } \
408 } while (0)
409
410
411#define insn_fetch(_type, _size, _eip) \
412({ unsigned long _x; \
413 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
414 if (rc != X86EMUL_CONTINUE) \
415 goto done; \
416 (_eip) += (_size); \
417 (_type)_x; \
418})
419
420#define insn_fetch_arr(_arr, _size, _eip) \
421({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
422 if (rc != X86EMUL_CONTINUE) \
423 goto done; \
424 (_eip) += (_size); \
425})
426
427static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
428 enum x86_intercept intercept,
429 enum x86_intercept_stage stage)
430{
431 struct x86_instruction_info info = {
432 .intercept = intercept,
433 .rep_prefix = ctxt->decode.rep_prefix,
434 .modrm_mod = ctxt->decode.modrm_mod,
435 .modrm_reg = ctxt->decode.modrm_reg,
436 .modrm_rm = ctxt->decode.modrm_rm,
437 .src_val = ctxt->decode.src.val64,
438 .src_bytes = ctxt->decode.src.bytes,
439 .dst_bytes = ctxt->decode.dst.bytes,
440 .ad_bytes = ctxt->decode.ad_bytes,
441 .next_rip = ctxt->eip,
442 };
443
444 return ctxt->ops->intercept(ctxt, &info, stage);
445}
446
447static inline unsigned long ad_mask(struct decode_cache *c)
448{
449 return (1UL << (c->ad_bytes << 3)) - 1;
450}
451
452
453static inline unsigned long
454address_mask(struct decode_cache *c, unsigned long reg)
455{
456 if (c->ad_bytes == sizeof(unsigned long))
457 return reg;
458 else
459 return reg & ad_mask(c);
460}
461
462static inline unsigned long
463register_address(struct decode_cache *c, unsigned long reg)
464{
465 return address_mask(c, reg);
466}
467
468static inline void
469register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
470{
471 if (c->ad_bytes == sizeof(unsigned long))
472 *reg += inc;
473 else
474 *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
475}
476
477static inline void jmp_rel(struct decode_cache *c, int rel)
478{
479 register_address_increment(c, &c->eip, rel);
480}
481
482static u32 desc_limit_scaled(struct desc_struct *desc)
483{
484 u32 limit = get_desc_limit(desc);
485
486 return desc->g ? (limit << 12) | 0xfff : limit;
487}
488
489static void set_seg_override(struct decode_cache *c, int seg)
490{
491 c->has_seg_override = true;
492 c->seg_override = seg;
493}
494
495static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
496 struct x86_emulate_ops *ops, int seg)
497{
498 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
499 return 0;
500
501 return ops->get_cached_segment_base(ctxt, seg);
502}
503
504static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
505 struct decode_cache *c)
506{
507 if (!c->has_seg_override)
508 return 0;
509
510 return c->seg_override;
511}
512
513static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
514 u32 error, bool valid)
515{
516 ctxt->exception.vector = vec;
517 ctxt->exception.error_code = error;
518 ctxt->exception.error_code_valid = valid;
519 return X86EMUL_PROPAGATE_FAULT;
520}
521
522static int emulate_db(struct x86_emulate_ctxt *ctxt)
523{
524 return emulate_exception(ctxt, DB_VECTOR, 0, false);
525}
526
527static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
528{
529 return emulate_exception(ctxt, GP_VECTOR, err, true);
530}
531
532static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
533{
534 return emulate_exception(ctxt, SS_VECTOR, err, true);
535}
536
537static int emulate_ud(struct x86_emulate_ctxt *ctxt)
538{
539 return emulate_exception(ctxt, UD_VECTOR, 0, false);
540}
541
542static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
543{
544 return emulate_exception(ctxt, TS_VECTOR, err, true);
545}
546
547static int emulate_de(struct x86_emulate_ctxt *ctxt)
548{
549 return emulate_exception(ctxt, DE_VECTOR, 0, false);
550}
551
552static int emulate_nm(struct x86_emulate_ctxt *ctxt)
553{
554 return emulate_exception(ctxt, NM_VECTOR, 0, false);
555}
556
557static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
558{
559 u16 selector;
560 struct desc_struct desc;
561
562 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
563 return selector;
564}
565
566static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
567 unsigned seg)
568{
569 u16 dummy;
570 u32 base3;
571 struct desc_struct desc;
572
573 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
574 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
575}
576
577static int __linearize(struct x86_emulate_ctxt *ctxt,
578 struct segmented_address addr,
579 unsigned size, bool write, bool fetch,
580 ulong *linear)
581{
582 struct decode_cache *c = &ctxt->decode;
583 struct desc_struct desc;
584 bool usable;
585 ulong la;
586 u32 lim;
587 u16 sel;
588 unsigned cpl, rpl;
589
590 la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
591 switch (ctxt->mode) {
592 case X86EMUL_MODE_REAL:
593 break;
594 case X86EMUL_MODE_PROT64:
595 if (((signed long)la << 16) >> 16 != la)
596 return emulate_gp(ctxt, 0);
597 break;
598 default:
599 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
600 addr.seg);
601 if (!usable)
602 goto bad;
603
604 if (((desc.type & 8) || !(desc.type & 2)) && write)
605 goto bad;
606
607 if (!fetch && (desc.type & 8) && !(desc.type & 2))
608 goto bad;
609 lim = desc_limit_scaled(&desc);
610 if ((desc.type & 8) || !(desc.type & 4)) {
611
612 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
613 goto bad;
614 } else {
615
616 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
617 goto bad;
618 lim = desc.d ? 0xffffffff : 0xffff;
619 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
620 goto bad;
621 }
622 cpl = ctxt->ops->cpl(ctxt);
623 rpl = sel & 3;
624 cpl = max(cpl, rpl);
625 if (!(desc.type & 8)) {
626
627 if (cpl > desc.dpl)
628 goto bad;
629 } else if ((desc.type & 8) && !(desc.type & 4)) {
630
631 if (cpl != desc.dpl)
632 goto bad;
633 } else if ((desc.type & 8) && (desc.type & 4)) {
634
635 if (cpl < desc.dpl)
636 goto bad;
637 }
638 break;
639 }
640 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : c->ad_bytes != 8)
641 la &= (u32)-1;
642 *linear = la;
643 return X86EMUL_CONTINUE;
644bad:
645 if (addr.seg == VCPU_SREG_SS)
646 return emulate_ss(ctxt, addr.seg);
647 else
648 return emulate_gp(ctxt, addr.seg);
649}
650
651static int linearize(struct x86_emulate_ctxt *ctxt,
652 struct segmented_address addr,
653 unsigned size, bool write,
654 ulong *linear)
655{
656 return __linearize(ctxt, addr, size, write, false, linear);
657}
658
659
660static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
661 struct segmented_address addr,
662 void *data,
663 unsigned size)
664{
665 int rc;
666 ulong linear;
667
668 rc = linearize(ctxt, addr, size, false, &linear);
669 if (rc != X86EMUL_CONTINUE)
670 return rc;
671 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
672}
673
674static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
675 struct x86_emulate_ops *ops,
676 unsigned long eip, u8 *dest)
677{
678 struct fetch_cache *fc = &ctxt->decode.fetch;
679 int rc;
680 int size, cur_size;
681
682 if (eip == fc->end) {
683 unsigned long linear;
684 struct segmented_address addr = { .seg=VCPU_SREG_CS, .ea=eip};
685 cur_size = fc->end - fc->start;
686 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
687 rc = __linearize(ctxt, addr, size, false, true, &linear);
688 if (rc != X86EMUL_CONTINUE)
689 return rc;
690 rc = ops->fetch(ctxt, linear, fc->data + cur_size,
691 size, &ctxt->exception);
692 if (rc != X86EMUL_CONTINUE)
693 return rc;
694 fc->end += size;
695 }
696 *dest = fc->data[eip - fc->start];
697 return X86EMUL_CONTINUE;
698}
699
700static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
701 struct x86_emulate_ops *ops,
702 unsigned long eip, void *dest, unsigned size)
703{
704 int rc;
705
706
707 if (eip + size - ctxt->eip > 15)
708 return X86EMUL_UNHANDLEABLE;
709 while (size--) {
710 rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
711 if (rc != X86EMUL_CONTINUE)
712 return rc;
713 }
714 return X86EMUL_CONTINUE;
715}
716
717
718
719
720
721
722static void *decode_register(u8 modrm_reg, unsigned long *regs,
723 int highbyte_regs)
724{
725 void *p;
726
727 p = ®s[modrm_reg];
728 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
729 p = (unsigned char *)®s[modrm_reg & 3] + 1;
730 return p;
731}
732
733static int read_descriptor(struct x86_emulate_ctxt *ctxt,
734 struct segmented_address addr,
735 u16 *size, unsigned long *address, int op_bytes)
736{
737 int rc;
738
739 if (op_bytes == 2)
740 op_bytes = 3;
741 *address = 0;
742 rc = segmented_read_std(ctxt, addr, size, 2);
743 if (rc != X86EMUL_CONTINUE)
744 return rc;
745 addr.ea += 2;
746 rc = segmented_read_std(ctxt, addr, address, op_bytes);
747 return rc;
748}
749
750static int test_cc(unsigned int condition, unsigned int flags)
751{
752 int rc = 0;
753
754 switch ((condition & 15) >> 1) {
755 case 0:
756 rc |= (flags & EFLG_OF);
757 break;
758 case 1:
759 rc |= (flags & EFLG_CF);
760 break;
761 case 2:
762 rc |= (flags & EFLG_ZF);
763 break;
764 case 3:
765 rc |= (flags & (EFLG_CF|EFLG_ZF));
766 break;
767 case 4:
768 rc |= (flags & EFLG_SF);
769 break;
770 case 5:
771 rc |= (flags & EFLG_PF);
772 break;
773 case 7:
774 rc |= (flags & EFLG_ZF);
775
776 case 6:
777 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
778 break;
779 }
780
781
782 return (!!rc ^ (condition & 1));
783}
784
785static void fetch_register_operand(struct operand *op)
786{
787 switch (op->bytes) {
788 case 1:
789 op->val = *(u8 *)op->addr.reg;
790 break;
791 case 2:
792 op->val = *(u16 *)op->addr.reg;
793 break;
794 case 4:
795 op->val = *(u32 *)op->addr.reg;
796 break;
797 case 8:
798 op->val = *(u64 *)op->addr.reg;
799 break;
800 }
801}
802
803static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
804{
805 ctxt->ops->get_fpu(ctxt);
806 switch (reg) {
807 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
808 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
809 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
810 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
811 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
812 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
813 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
814 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
815#ifdef CONFIG_X86_64
816 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
817 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
818 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
819 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
820 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
821 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
822 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
823 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
824#endif
825 default: BUG();
826 }
827 ctxt->ops->put_fpu(ctxt);
828}
829
830static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
831 int reg)
832{
833 ctxt->ops->get_fpu(ctxt);
834 switch (reg) {
835 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
836 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
837 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
838 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
839 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
840 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
841 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
842 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
843#ifdef CONFIG_X86_64
844 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
845 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
846 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
847 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
848 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
849 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
850 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
851 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
852#endif
853 default: BUG();
854 }
855 ctxt->ops->put_fpu(ctxt);
856}
857
858static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
859 struct operand *op,
860 struct decode_cache *c,
861 int inhibit_bytereg)
862{
863 unsigned reg = c->modrm_reg;
864 int highbyte_regs = c->rex_prefix == 0;
865
866 if (!(c->d & ModRM))
867 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
868
869 if (c->d & Sse) {
870 op->type = OP_XMM;
871 op->bytes = 16;
872 op->addr.xmm = reg;
873 read_sse_reg(ctxt, &op->vec_val, reg);
874 return;
875 }
876
877 op->type = OP_REG;
878 if ((c->d & ByteOp) && !inhibit_bytereg) {
879 op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
880 op->bytes = 1;
881 } else {
882 op->addr.reg = decode_register(reg, c->regs, 0);
883 op->bytes = c->op_bytes;
884 }
885 fetch_register_operand(op);
886 op->orig_val = op->val;
887}
888
889static int decode_modrm(struct x86_emulate_ctxt *ctxt,
890 struct x86_emulate_ops *ops,
891 struct operand *op)
892{
893 struct decode_cache *c = &ctxt->decode;
894 u8 sib;
895 int index_reg = 0, base_reg = 0, scale;
896 int rc = X86EMUL_CONTINUE;
897 ulong modrm_ea = 0;
898
899 if (c->rex_prefix) {
900 c->modrm_reg = (c->rex_prefix & 4) << 1;
901 index_reg = (c->rex_prefix & 2) << 2;
902 c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3;
903 }
904
905 c->modrm = insn_fetch(u8, 1, c->eip);
906 c->modrm_mod |= (c->modrm & 0xc0) >> 6;
907 c->modrm_reg |= (c->modrm & 0x38) >> 3;
908 c->modrm_rm |= (c->modrm & 0x07);
909 c->modrm_seg = VCPU_SREG_DS;
910
911 if (c->modrm_mod == 3) {
912 op->type = OP_REG;
913 op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
914 op->addr.reg = decode_register(c->modrm_rm,
915 c->regs, c->d & ByteOp);
916 if (c->d & Sse) {
917 op->type = OP_XMM;
918 op->bytes = 16;
919 op->addr.xmm = c->modrm_rm;
920 read_sse_reg(ctxt, &op->vec_val, c->modrm_rm);
921 return rc;
922 }
923 fetch_register_operand(op);
924 return rc;
925 }
926
927 op->type = OP_MEM;
928
929 if (c->ad_bytes == 2) {
930 unsigned bx = c->regs[VCPU_REGS_RBX];
931 unsigned bp = c->regs[VCPU_REGS_RBP];
932 unsigned si = c->regs[VCPU_REGS_RSI];
933 unsigned di = c->regs[VCPU_REGS_RDI];
934
935
936 switch (c->modrm_mod) {
937 case 0:
938 if (c->modrm_rm == 6)
939 modrm_ea += insn_fetch(u16, 2, c->eip);
940 break;
941 case 1:
942 modrm_ea += insn_fetch(s8, 1, c->eip);
943 break;
944 case 2:
945 modrm_ea += insn_fetch(u16, 2, c->eip);
946 break;
947 }
948 switch (c->modrm_rm) {
949 case 0:
950 modrm_ea += bx + si;
951 break;
952 case 1:
953 modrm_ea += bx + di;
954 break;
955 case 2:
956 modrm_ea += bp + si;
957 break;
958 case 3:
959 modrm_ea += bp + di;
960 break;
961 case 4:
962 modrm_ea += si;
963 break;
964 case 5:
965 modrm_ea += di;
966 break;
967 case 6:
968 if (c->modrm_mod != 0)
969 modrm_ea += bp;
970 break;
971 case 7:
972 modrm_ea += bx;
973 break;
974 }
975 if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
976 (c->modrm_rm == 6 && c->modrm_mod != 0))
977 c->modrm_seg = VCPU_SREG_SS;
978 modrm_ea = (u16)modrm_ea;
979 } else {
980
981 if ((c->modrm_rm & 7) == 4) {
982 sib = insn_fetch(u8, 1, c->eip);
983 index_reg |= (sib >> 3) & 7;
984 base_reg |= sib & 7;
985 scale = sib >> 6;
986
987 if ((base_reg & 7) == 5 && c->modrm_mod == 0)
988 modrm_ea += insn_fetch(s32, 4, c->eip);
989 else
990 modrm_ea += c->regs[base_reg];
991 if (index_reg != 4)
992 modrm_ea += c->regs[index_reg] << scale;
993 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
994 if (ctxt->mode == X86EMUL_MODE_PROT64)
995 c->rip_relative = 1;
996 } else
997 modrm_ea += c->regs[c->modrm_rm];
998 switch (c->modrm_mod) {
999 case 0:
1000 if (c->modrm_rm == 5)
1001 modrm_ea += insn_fetch(s32, 4, c->eip);
1002 break;
1003 case 1:
1004 modrm_ea += insn_fetch(s8, 1, c->eip);
1005 break;
1006 case 2:
1007 modrm_ea += insn_fetch(s32, 4, c->eip);
1008 break;
1009 }
1010 }
1011 op->addr.mem.ea = modrm_ea;
1012done:
1013 return rc;
1014}
1015
1016static int decode_abs(struct x86_emulate_ctxt *ctxt,
1017 struct x86_emulate_ops *ops,
1018 struct operand *op)
1019{
1020 struct decode_cache *c = &ctxt->decode;
1021 int rc = X86EMUL_CONTINUE;
1022
1023 op->type = OP_MEM;
1024 switch (c->ad_bytes) {
1025 case 2:
1026 op->addr.mem.ea = insn_fetch(u16, 2, c->eip);
1027 break;
1028 case 4:
1029 op->addr.mem.ea = insn_fetch(u32, 4, c->eip);
1030 break;
1031 case 8:
1032 op->addr.mem.ea = insn_fetch(u64, 8, c->eip);
1033 break;
1034 }
1035done:
1036 return rc;
1037}
1038
1039static void fetch_bit_operand(struct decode_cache *c)
1040{
1041 long sv = 0, mask;
1042
1043 if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
1044 mask = ~(c->dst.bytes * 8 - 1);
1045
1046 if (c->src.bytes == 2)
1047 sv = (s16)c->src.val & (s16)mask;
1048 else if (c->src.bytes == 4)
1049 sv = (s32)c->src.val & (s32)mask;
1050
1051 c->dst.addr.mem.ea += (sv >> 3);
1052 }
1053
1054
1055 c->src.val &= (c->dst.bytes << 3) - 1;
1056}
1057
1058static int read_emulated(struct x86_emulate_ctxt *ctxt,
1059 struct x86_emulate_ops *ops,
1060 unsigned long addr, void *dest, unsigned size)
1061{
1062 int rc;
1063 struct read_cache *mc = &ctxt->decode.mem_read;
1064
1065 while (size) {
1066 int n = min(size, 8u);
1067 size -= n;
1068 if (mc->pos < mc->end)
1069 goto read_cached;
1070
1071 rc = ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1072 &ctxt->exception);
1073 if (rc != X86EMUL_CONTINUE)
1074 return rc;
1075 mc->end += n;
1076
1077 read_cached:
1078 memcpy(dest, mc->data + mc->pos, n);
1079 mc->pos += n;
1080 dest += n;
1081 addr += n;
1082 }
1083 return X86EMUL_CONTINUE;
1084}
1085
1086static int segmented_read(struct x86_emulate_ctxt *ctxt,
1087 struct segmented_address addr,
1088 void *data,
1089 unsigned size)
1090{
1091 int rc;
1092 ulong linear;
1093
1094 rc = linearize(ctxt, addr, size, false, &linear);
1095 if (rc != X86EMUL_CONTINUE)
1096 return rc;
1097 return read_emulated(ctxt, ctxt->ops, linear, data, size);
1098}
1099
1100static int segmented_write(struct x86_emulate_ctxt *ctxt,
1101 struct segmented_address addr,
1102 const void *data,
1103 unsigned size)
1104{
1105 int rc;
1106 ulong linear;
1107
1108 rc = linearize(ctxt, addr, size, true, &linear);
1109 if (rc != X86EMUL_CONTINUE)
1110 return rc;
1111 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1112 &ctxt->exception);
1113}
1114
1115static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1116 struct segmented_address addr,
1117 const void *orig_data, const void *data,
1118 unsigned size)
1119{
1120 int rc;
1121 ulong linear;
1122
1123 rc = linearize(ctxt, addr, size, true, &linear);
1124 if (rc != X86EMUL_CONTINUE)
1125 return rc;
1126 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1127 size, &ctxt->exception);
1128}
1129
1130static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1131 struct x86_emulate_ops *ops,
1132 unsigned int size, unsigned short port,
1133 void *dest)
1134{
1135 struct read_cache *rc = &ctxt->decode.io_read;
1136
1137 if (rc->pos == rc->end) {
1138 struct decode_cache *c = &ctxt->decode;
1139 unsigned int in_page, n;
1140 unsigned int count = c->rep_prefix ?
1141 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
1142 in_page = (ctxt->eflags & EFLG_DF) ?
1143 offset_in_page(c->regs[VCPU_REGS_RDI]) :
1144 PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
1145 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1146 count);
1147 if (n == 0)
1148 n = 1;
1149 rc->pos = rc->end = 0;
1150 if (!ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1151 return 0;
1152 rc->end = n * size;
1153 }
1154
1155 memcpy(dest, rc->data + rc->pos, size);
1156 rc->pos += size;
1157 return 1;
1158}
1159
1160static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1161 struct x86_emulate_ops *ops,
1162 u16 selector, struct desc_ptr *dt)
1163{
1164 if (selector & 1 << 2) {
1165 struct desc_struct desc;
1166 u16 sel;
1167
1168 memset (dt, 0, sizeof *dt);
1169 if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1170 return;
1171
1172 dt->size = desc_limit_scaled(&desc);
1173 dt->address = get_desc_base(&desc);
1174 } else
1175 ops->get_gdt(ctxt, dt);
1176}
1177
1178
1179static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1180 struct x86_emulate_ops *ops,
1181 u16 selector, struct desc_struct *desc)
1182{
1183 struct desc_ptr dt;
1184 u16 index = selector >> 3;
1185 int ret;
1186 ulong addr;
1187
1188 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
1189
1190 if (dt.size < index * 8 + 7)
1191 return emulate_gp(ctxt, selector & 0xfffc);
1192 addr = dt.address + index * 8;
1193 ret = ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception);
1194
1195 return ret;
1196}
1197
1198
1199static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1200 struct x86_emulate_ops *ops,
1201 u16 selector, struct desc_struct *desc)
1202{
1203 struct desc_ptr dt;
1204 u16 index = selector >> 3;
1205 ulong addr;
1206 int ret;
1207
1208 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
1209
1210 if (dt.size < index * 8 + 7)
1211 return emulate_gp(ctxt, selector & 0xfffc);
1212
1213 addr = dt.address + index * 8;
1214 ret = ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception);
1215
1216 return ret;
1217}
1218
1219
1220static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1221 struct x86_emulate_ops *ops,
1222 u16 selector, int seg)
1223{
1224 struct desc_struct seg_desc;
1225 u8 dpl, rpl, cpl;
1226 unsigned err_vec = GP_VECTOR;
1227 u32 err_code = 0;
1228 bool null_selector = !(selector & ~0x3);
1229 int ret;
1230
1231 memset(&seg_desc, 0, sizeof seg_desc);
1232
1233 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1234 || ctxt->mode == X86EMUL_MODE_REAL) {
1235
1236 set_desc_base(&seg_desc, selector << 4);
1237 set_desc_limit(&seg_desc, 0xffff);
1238 seg_desc.type = 3;
1239 seg_desc.p = 1;
1240 seg_desc.s = 1;
1241 goto load;
1242 }
1243
1244
1245 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1246 && null_selector)
1247 goto exception;
1248
1249
1250 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1251 goto exception;
1252
1253 if (null_selector)
1254 goto load;
1255
1256 ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
1257 if (ret != X86EMUL_CONTINUE)
1258 return ret;
1259
1260 err_code = selector & 0xfffc;
1261 err_vec = GP_VECTOR;
1262
1263
1264 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1265 goto exception;
1266
1267 if (!seg_desc.p) {
1268 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1269 goto exception;
1270 }
1271
1272 rpl = selector & 3;
1273 dpl = seg_desc.dpl;
1274 cpl = ops->cpl(ctxt);
1275
1276 switch (seg) {
1277 case VCPU_SREG_SS:
1278
1279
1280
1281
1282 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1283 goto exception;
1284 break;
1285 case VCPU_SREG_CS:
1286 if (!(seg_desc.type & 8))
1287 goto exception;
1288
1289 if (seg_desc.type & 4) {
1290
1291 if (dpl > cpl)
1292 goto exception;
1293 } else {
1294
1295 if (rpl > cpl || dpl != cpl)
1296 goto exception;
1297 }
1298
1299 selector = (selector & 0xfffc) | cpl;
1300 break;
1301 case VCPU_SREG_TR:
1302 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1303 goto exception;
1304 break;
1305 case VCPU_SREG_LDTR:
1306 if (seg_desc.s || seg_desc.type != 2)
1307 goto exception;
1308 break;
1309 default:
1310
1311
1312
1313
1314
1315 if ((seg_desc.type & 0xa) == 0x8 ||
1316 (((seg_desc.type & 0xc) != 0xc) &&
1317 (rpl > dpl && cpl > dpl)))
1318 goto exception;
1319 break;
1320 }
1321
1322 if (seg_desc.s) {
1323
1324 seg_desc.type |= 1;
1325 ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
1326 if (ret != X86EMUL_CONTINUE)
1327 return ret;
1328 }
1329load:
1330 ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1331 return X86EMUL_CONTINUE;
1332exception:
1333 emulate_exception(ctxt, err_vec, err_code, true);
1334 return X86EMUL_PROPAGATE_FAULT;
1335}
1336
1337static void write_register_operand(struct operand *op)
1338{
1339
1340 switch (op->bytes) {
1341 case 1:
1342 *(u8 *)op->addr.reg = (u8)op->val;
1343 break;
1344 case 2:
1345 *(u16 *)op->addr.reg = (u16)op->val;
1346 break;
1347 case 4:
1348 *op->addr.reg = (u32)op->val;
1349 break;
1350 case 8:
1351 *op->addr.reg = op->val;
1352 break;
1353 }
1354}
1355
1356static int writeback(struct x86_emulate_ctxt *ctxt)
1357{
1358 int rc;
1359 struct decode_cache *c = &ctxt->decode;
1360
1361 switch (c->dst.type) {
1362 case OP_REG:
1363 write_register_operand(&c->dst);
1364 break;
1365 case OP_MEM:
1366 if (c->lock_prefix)
1367 rc = segmented_cmpxchg(ctxt,
1368 c->dst.addr.mem,
1369 &c->dst.orig_val,
1370 &c->dst.val,
1371 c->dst.bytes);
1372 else
1373 rc = segmented_write(ctxt,
1374 c->dst.addr.mem,
1375 &c->dst.val,
1376 c->dst.bytes);
1377 if (rc != X86EMUL_CONTINUE)
1378 return rc;
1379 break;
1380 case OP_XMM:
1381 write_sse_reg(ctxt, &c->dst.vec_val, c->dst.addr.xmm);
1382 break;
1383 case OP_NONE:
1384
1385 break;
1386 default:
1387 break;
1388 }
1389 return X86EMUL_CONTINUE;
1390}
1391
1392static int em_push(struct x86_emulate_ctxt *ctxt)
1393{
1394 struct decode_cache *c = &ctxt->decode;
1395 struct segmented_address addr;
1396
1397 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1398 addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1399 addr.seg = VCPU_SREG_SS;
1400
1401
1402 c->dst.type = OP_NONE;
1403 return segmented_write(ctxt, addr, &c->src.val, c->op_bytes);
1404}
1405
1406static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1407 void *dest, int len)
1408{
1409 struct decode_cache *c = &ctxt->decode;
1410 int rc;
1411 struct segmented_address addr;
1412
1413 addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1414 addr.seg = VCPU_SREG_SS;
1415 rc = segmented_read(ctxt, addr, dest, len);
1416 if (rc != X86EMUL_CONTINUE)
1417 return rc;
1418
1419 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1420 return rc;
1421}
1422
1423static int em_pop(struct x86_emulate_ctxt *ctxt)
1424{
1425 struct decode_cache *c = &ctxt->decode;
1426
1427 return emulate_pop(ctxt, &c->dst.val, c->op_bytes);
1428}
1429
1430static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1431 struct x86_emulate_ops *ops,
1432 void *dest, int len)
1433{
1434 int rc;
1435 unsigned long val, change_mask;
1436 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1437 int cpl = ops->cpl(ctxt);
1438
1439 rc = emulate_pop(ctxt, &val, len);
1440 if (rc != X86EMUL_CONTINUE)
1441 return rc;
1442
1443 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1444 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1445
1446 switch(ctxt->mode) {
1447 case X86EMUL_MODE_PROT64:
1448 case X86EMUL_MODE_PROT32:
1449 case X86EMUL_MODE_PROT16:
1450 if (cpl == 0)
1451 change_mask |= EFLG_IOPL;
1452 if (cpl <= iopl)
1453 change_mask |= EFLG_IF;
1454 break;
1455 case X86EMUL_MODE_VM86:
1456 if (iopl < 3)
1457 return emulate_gp(ctxt, 0);
1458 change_mask |= EFLG_IF;
1459 break;
1460 default:
1461 change_mask |= (EFLG_IOPL | EFLG_IF);
1462 break;
1463 }
1464
1465 *(unsigned long *)dest =
1466 (ctxt->eflags & ~change_mask) | (val & change_mask);
1467
1468 return rc;
1469}
1470
1471static int em_popf(struct x86_emulate_ctxt *ctxt)
1472{
1473 struct decode_cache *c = &ctxt->decode;
1474
1475 c->dst.type = OP_REG;
1476 c->dst.addr.reg = &ctxt->eflags;
1477 c->dst.bytes = c->op_bytes;
1478 return emulate_popf(ctxt, ctxt->ops, &c->dst.val, c->op_bytes);
1479}
1480
1481static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1482 struct x86_emulate_ops *ops, int seg)
1483{
1484 struct decode_cache *c = &ctxt->decode;
1485
1486 c->src.val = get_segment_selector(ctxt, seg);
1487
1488 return em_push(ctxt);
1489}
1490
1491static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1492 struct x86_emulate_ops *ops, int seg)
1493{
1494 struct decode_cache *c = &ctxt->decode;
1495 unsigned long selector;
1496 int rc;
1497
1498 rc = emulate_pop(ctxt, &selector, c->op_bytes);
1499 if (rc != X86EMUL_CONTINUE)
1500 return rc;
1501
1502 rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
1503 return rc;
1504}
1505
1506static int em_pusha(struct x86_emulate_ctxt *ctxt)
1507{
1508 struct decode_cache *c = &ctxt->decode;
1509 unsigned long old_esp = c->regs[VCPU_REGS_RSP];
1510 int rc = X86EMUL_CONTINUE;
1511 int reg = VCPU_REGS_RAX;
1512
1513 while (reg <= VCPU_REGS_RDI) {
1514 (reg == VCPU_REGS_RSP) ?
1515 (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1516
1517 rc = em_push(ctxt);
1518 if (rc != X86EMUL_CONTINUE)
1519 return rc;
1520
1521 ++reg;
1522 }
1523
1524 return rc;
1525}
1526
1527static int em_pushf(struct x86_emulate_ctxt *ctxt)
1528{
1529 struct decode_cache *c = &ctxt->decode;
1530
1531 c->src.val = (unsigned long)ctxt->eflags;
1532 return em_push(ctxt);
1533}
1534
1535static int em_popa(struct x86_emulate_ctxt *ctxt)
1536{
1537 struct decode_cache *c = &ctxt->decode;
1538 int rc = X86EMUL_CONTINUE;
1539 int reg = VCPU_REGS_RDI;
1540
1541 while (reg >= VCPU_REGS_RAX) {
1542 if (reg == VCPU_REGS_RSP) {
1543 register_address_increment(c, &c->regs[VCPU_REGS_RSP],
1544 c->op_bytes);
1545 --reg;
1546 }
1547
1548 rc = emulate_pop(ctxt, &c->regs[reg], c->op_bytes);
1549 if (rc != X86EMUL_CONTINUE)
1550 break;
1551 --reg;
1552 }
1553 return rc;
1554}
1555
1556int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1557 struct x86_emulate_ops *ops, int irq)
1558{
1559 struct decode_cache *c = &ctxt->decode;
1560 int rc;
1561 struct desc_ptr dt;
1562 gva_t cs_addr;
1563 gva_t eip_addr;
1564 u16 cs, eip;
1565
1566
1567 c->src.val = ctxt->eflags;
1568 rc = em_push(ctxt);
1569 if (rc != X86EMUL_CONTINUE)
1570 return rc;
1571
1572 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1573
1574 c->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1575 rc = em_push(ctxt);
1576 if (rc != X86EMUL_CONTINUE)
1577 return rc;
1578
1579 c->src.val = c->eip;
1580 rc = em_push(ctxt);
1581 if (rc != X86EMUL_CONTINUE)
1582 return rc;
1583
1584 ops->get_idt(ctxt, &dt);
1585
1586 eip_addr = dt.address + (irq << 2);
1587 cs_addr = dt.address + (irq << 2) + 2;
1588
1589 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1590 if (rc != X86EMUL_CONTINUE)
1591 return rc;
1592
1593 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1594 if (rc != X86EMUL_CONTINUE)
1595 return rc;
1596
1597 rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
1598 if (rc != X86EMUL_CONTINUE)
1599 return rc;
1600
1601 c->eip = eip;
1602
1603 return rc;
1604}
1605
1606static int emulate_int(struct x86_emulate_ctxt *ctxt,
1607 struct x86_emulate_ops *ops, int irq)
1608{
1609 switch(ctxt->mode) {
1610 case X86EMUL_MODE_REAL:
1611 return emulate_int_real(ctxt, ops, irq);
1612 case X86EMUL_MODE_VM86:
1613 case X86EMUL_MODE_PROT16:
1614 case X86EMUL_MODE_PROT32:
1615 case X86EMUL_MODE_PROT64:
1616 default:
1617
1618 return X86EMUL_UNHANDLEABLE;
1619 }
1620}
1621
1622static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
1623 struct x86_emulate_ops *ops)
1624{
1625 struct decode_cache *c = &ctxt->decode;
1626 int rc = X86EMUL_CONTINUE;
1627 unsigned long temp_eip = 0;
1628 unsigned long temp_eflags = 0;
1629 unsigned long cs = 0;
1630 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1631 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1632 EFLG_AC | EFLG_ID | (1 << 1);
1633 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1634
1635
1636
1637 rc = emulate_pop(ctxt, &temp_eip, c->op_bytes);
1638
1639 if (rc != X86EMUL_CONTINUE)
1640 return rc;
1641
1642 if (temp_eip & ~0xffff)
1643 return emulate_gp(ctxt, 0);
1644
1645 rc = emulate_pop(ctxt, &cs, c->op_bytes);
1646
1647 if (rc != X86EMUL_CONTINUE)
1648 return rc;
1649
1650 rc = emulate_pop(ctxt, &temp_eflags, c->op_bytes);
1651
1652 if (rc != X86EMUL_CONTINUE)
1653 return rc;
1654
1655 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1656
1657 if (rc != X86EMUL_CONTINUE)
1658 return rc;
1659
1660 c->eip = temp_eip;
1661
1662
1663 if (c->op_bytes == 4)
1664 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1665 else if (c->op_bytes == 2) {
1666 ctxt->eflags &= ~0xffff;
1667 ctxt->eflags |= temp_eflags;
1668 }
1669
1670 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK;
1671 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1672
1673 return rc;
1674}
1675
1676static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
1677 struct x86_emulate_ops* ops)
1678{
1679 switch(ctxt->mode) {
1680 case X86EMUL_MODE_REAL:
1681 return emulate_iret_real(ctxt, ops);
1682 case X86EMUL_MODE_VM86:
1683 case X86EMUL_MODE_PROT16:
1684 case X86EMUL_MODE_PROT32:
1685 case X86EMUL_MODE_PROT64:
1686 default:
1687
1688 return X86EMUL_UNHANDLEABLE;
1689 }
1690}
1691
1692static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1693{
1694 struct decode_cache *c = &ctxt->decode;
1695 int rc;
1696 unsigned short sel;
1697
1698 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
1699
1700 rc = load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS);
1701 if (rc != X86EMUL_CONTINUE)
1702 return rc;
1703
1704 c->eip = 0;
1705 memcpy(&c->eip, c->src.valptr, c->op_bytes);
1706 return X86EMUL_CONTINUE;
1707}
1708
1709static int em_grp1a(struct x86_emulate_ctxt *ctxt)
1710{
1711 struct decode_cache *c = &ctxt->decode;
1712
1713 return emulate_pop(ctxt, &c->dst.val, c->dst.bytes);
1714}
1715
1716static int em_grp2(struct x86_emulate_ctxt *ctxt)
1717{
1718 struct decode_cache *c = &ctxt->decode;
1719 switch (c->modrm_reg) {
1720 case 0:
1721 emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1722 break;
1723 case 1:
1724 emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1725 break;
1726 case 2:
1727 emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1728 break;
1729 case 3:
1730 emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1731 break;
1732 case 4:
1733 case 6:
1734 emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1735 break;
1736 case 5:
1737 emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1738 break;
1739 case 7:
1740 emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1741 break;
1742 }
1743 return X86EMUL_CONTINUE;
1744}
1745
1746static int em_grp3(struct x86_emulate_ctxt *ctxt)
1747{
1748 struct decode_cache *c = &ctxt->decode;
1749 unsigned long *rax = &c->regs[VCPU_REGS_RAX];
1750 unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
1751 u8 de = 0;
1752
1753 switch (c->modrm_reg) {
1754 case 0 ... 1:
1755 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1756 break;
1757 case 2:
1758 c->dst.val = ~c->dst.val;
1759 break;
1760 case 3:
1761 emulate_1op("neg", c->dst, ctxt->eflags);
1762 break;
1763 case 4:
1764 emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
1765 break;
1766 case 5:
1767 emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
1768 break;
1769 case 6:
1770 emulate_1op_rax_rdx_ex("div", c->src, *rax, *rdx,
1771 ctxt->eflags, de);
1772 break;
1773 case 7:
1774 emulate_1op_rax_rdx_ex("idiv", c->src, *rax, *rdx,
1775 ctxt->eflags, de);
1776 break;
1777 default:
1778 return X86EMUL_UNHANDLEABLE;
1779 }
1780 if (de)
1781 return emulate_de(ctxt);
1782 return X86EMUL_CONTINUE;
1783}
1784
1785static int em_grp45(struct x86_emulate_ctxt *ctxt)
1786{
1787 struct decode_cache *c = &ctxt->decode;
1788 int rc = X86EMUL_CONTINUE;
1789
1790 switch (c->modrm_reg) {
1791 case 0:
1792 emulate_1op("inc", c->dst, ctxt->eflags);
1793 break;
1794 case 1:
1795 emulate_1op("dec", c->dst, ctxt->eflags);
1796 break;
1797 case 2: {
1798 long int old_eip;
1799 old_eip = c->eip;
1800 c->eip = c->src.val;
1801 c->src.val = old_eip;
1802 rc = em_push(ctxt);
1803 break;
1804 }
1805 case 4:
1806 c->eip = c->src.val;
1807 break;
1808 case 5:
1809 rc = em_jmp_far(ctxt);
1810 break;
1811 case 6:
1812 rc = em_push(ctxt);
1813 break;
1814 }
1815 return rc;
1816}
1817
1818static int em_grp9(struct x86_emulate_ctxt *ctxt)
1819{
1820 struct decode_cache *c = &ctxt->decode;
1821 u64 old = c->dst.orig_val64;
1822
1823 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1824 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1825 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1826 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1827 ctxt->eflags &= ~EFLG_ZF;
1828 } else {
1829 c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1830 (u32) c->regs[VCPU_REGS_RBX];
1831
1832 ctxt->eflags |= EFLG_ZF;
1833 }
1834 return X86EMUL_CONTINUE;
1835}
1836
1837static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1838 struct x86_emulate_ops *ops)
1839{
1840 struct decode_cache *c = &ctxt->decode;
1841 int rc;
1842 unsigned long cs;
1843
1844 rc = emulate_pop(ctxt, &c->eip, c->op_bytes);
1845 if (rc != X86EMUL_CONTINUE)
1846 return rc;
1847 if (c->op_bytes == 4)
1848 c->eip = (u32)c->eip;
1849 rc = emulate_pop(ctxt, &cs, c->op_bytes);
1850 if (rc != X86EMUL_CONTINUE)
1851 return rc;
1852 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1853 return rc;
1854}
1855
1856static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
1857 struct x86_emulate_ops *ops, int seg)
1858{
1859 struct decode_cache *c = &ctxt->decode;
1860 unsigned short sel;
1861 int rc;
1862
1863 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
1864
1865 rc = load_segment_descriptor(ctxt, ops, sel, seg);
1866 if (rc != X86EMUL_CONTINUE)
1867 return rc;
1868
1869 c->dst.val = c->src.val;
1870 return rc;
1871}
1872
1873static inline void
1874setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1875 struct x86_emulate_ops *ops, struct desc_struct *cs,
1876 struct desc_struct *ss)
1877{
1878 u16 selector;
1879
1880 memset(cs, 0, sizeof(struct desc_struct));
1881 ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
1882 memset(ss, 0, sizeof(struct desc_struct));
1883
1884 cs->l = 0;
1885 set_desc_base(cs, 0);
1886 cs->g = 1;
1887 set_desc_limit(cs, 0xfffff);
1888 cs->type = 0x0b;
1889 cs->s = 1;
1890 cs->dpl = 0;
1891 cs->p = 1;
1892 cs->d = 1;
1893
1894 set_desc_base(ss, 0);
1895 set_desc_limit(ss, 0xfffff);
1896 ss->g = 1;
1897 ss->s = 1;
1898 ss->type = 0x03;
1899 ss->d = 1;
1900 ss->dpl = 0;
1901 ss->p = 1;
1902}
1903
1904static int
1905emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1906{
1907 struct decode_cache *c = &ctxt->decode;
1908 struct desc_struct cs, ss;
1909 u64 msr_data;
1910 u16 cs_sel, ss_sel;
1911 u64 efer = 0;
1912
1913
1914 if (ctxt->mode == X86EMUL_MODE_REAL ||
1915 ctxt->mode == X86EMUL_MODE_VM86)
1916 return emulate_ud(ctxt);
1917
1918 ops->get_msr(ctxt, MSR_EFER, &efer);
1919 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1920
1921 ops->get_msr(ctxt, MSR_STAR, &msr_data);
1922 msr_data >>= 32;
1923 cs_sel = (u16)(msr_data & 0xfffc);
1924 ss_sel = (u16)(msr_data + 8);
1925
1926 if (efer & EFER_LMA) {
1927 cs.d = 0;
1928 cs.l = 1;
1929 }
1930 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
1931 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1932
1933 c->regs[VCPU_REGS_RCX] = c->eip;
1934 if (efer & EFER_LMA) {
1935#ifdef CONFIG_X86_64
1936 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1937
1938 ops->get_msr(ctxt,
1939 ctxt->mode == X86EMUL_MODE_PROT64 ?
1940 MSR_LSTAR : MSR_CSTAR, &msr_data);
1941 c->eip = msr_data;
1942
1943 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
1944 ctxt->eflags &= ~(msr_data | EFLG_RF);
1945#endif
1946 } else {
1947
1948 ops->get_msr(ctxt, MSR_STAR, &msr_data);
1949 c->eip = (u32)msr_data;
1950
1951 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1952 }
1953
1954 return X86EMUL_CONTINUE;
1955}
1956
1957static int
1958emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1959{
1960 struct decode_cache *c = &ctxt->decode;
1961 struct desc_struct cs, ss;
1962 u64 msr_data;
1963 u16 cs_sel, ss_sel;
1964 u64 efer = 0;
1965
1966 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1967
1968 if (ctxt->mode == X86EMUL_MODE_REAL)
1969 return emulate_gp(ctxt, 0);
1970
1971
1972
1973
1974 if (ctxt->mode == X86EMUL_MODE_PROT64)
1975 return emulate_ud(ctxt);
1976
1977 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1978
1979 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1980 switch (ctxt->mode) {
1981 case X86EMUL_MODE_PROT32:
1982 if ((msr_data & 0xfffc) == 0x0)
1983 return emulate_gp(ctxt, 0);
1984 break;
1985 case X86EMUL_MODE_PROT64:
1986 if (msr_data == 0x0)
1987 return emulate_gp(ctxt, 0);
1988 break;
1989 }
1990
1991 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1992 cs_sel = (u16)msr_data;
1993 cs_sel &= ~SELECTOR_RPL_MASK;
1994 ss_sel = cs_sel + 8;
1995 ss_sel &= ~SELECTOR_RPL_MASK;
1996 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
1997 cs.d = 0;
1998 cs.l = 1;
1999 }
2000
2001 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2002 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2003
2004 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2005 c->eip = msr_data;
2006
2007 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2008 c->regs[VCPU_REGS_RSP] = msr_data;
2009
2010 return X86EMUL_CONTINUE;
2011}
2012
2013static int
2014emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2015{
2016 struct decode_cache *c = &ctxt->decode;
2017 struct desc_struct cs, ss;
2018 u64 msr_data;
2019 int usermode;
2020 u16 cs_sel, ss_sel;
2021
2022
2023 if (ctxt->mode == X86EMUL_MODE_REAL ||
2024 ctxt->mode == X86EMUL_MODE_VM86)
2025 return emulate_gp(ctxt, 0);
2026
2027 setup_syscalls_segments(ctxt, ops, &cs, &ss);
2028
2029 if ((c->rex_prefix & 0x8) != 0x0)
2030 usermode = X86EMUL_MODE_PROT64;
2031 else
2032 usermode = X86EMUL_MODE_PROT32;
2033
2034 cs.dpl = 3;
2035 ss.dpl = 3;
2036 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2037 switch (usermode) {
2038 case X86EMUL_MODE_PROT32:
2039 cs_sel = (u16)(msr_data + 16);
2040 if ((msr_data & 0xfffc) == 0x0)
2041 return emulate_gp(ctxt, 0);
2042 ss_sel = (u16)(msr_data + 24);
2043 break;
2044 case X86EMUL_MODE_PROT64:
2045 cs_sel = (u16)(msr_data + 32);
2046 if (msr_data == 0x0)
2047 return emulate_gp(ctxt, 0);
2048 ss_sel = cs_sel + 8;
2049 cs.d = 0;
2050 cs.l = 1;
2051 break;
2052 }
2053 cs_sel |= SELECTOR_RPL_MASK;
2054 ss_sel |= SELECTOR_RPL_MASK;
2055
2056 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2057 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2058
2059 c->eip = c->regs[VCPU_REGS_RDX];
2060 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
2061
2062 return X86EMUL_CONTINUE;
2063}
2064
2065static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
2066 struct x86_emulate_ops *ops)
2067{
2068 int iopl;
2069 if (ctxt->mode == X86EMUL_MODE_REAL)
2070 return false;
2071 if (ctxt->mode == X86EMUL_MODE_VM86)
2072 return true;
2073 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2074 return ops->cpl(ctxt) > iopl;
2075}
2076
2077static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2078 struct x86_emulate_ops *ops,
2079 u16 port, u16 len)
2080{
2081 struct desc_struct tr_seg;
2082 u32 base3;
2083 int r;
2084 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2085 unsigned mask = (1 << len) - 1;
2086 unsigned long base;
2087
2088 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2089 if (!tr_seg.p)
2090 return false;
2091 if (desc_limit_scaled(&tr_seg) < 103)
2092 return false;
2093 base = get_desc_base(&tr_seg);
2094#ifdef CONFIG_X86_64
2095 base |= ((u64)base3) << 32;
2096#endif
2097 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2098 if (r != X86EMUL_CONTINUE)
2099 return false;
2100 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2101 return false;
2102 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2103 if (r != X86EMUL_CONTINUE)
2104 return false;
2105 if ((perm >> bit_idx) & mask)
2106 return false;
2107 return true;
2108}
2109
2110static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2111 struct x86_emulate_ops *ops,
2112 u16 port, u16 len)
2113{
2114 if (ctxt->perm_ok)
2115 return true;
2116
2117 if (emulator_bad_iopl(ctxt, ops))
2118 if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
2119 return false;
2120
2121 ctxt->perm_ok = true;
2122
2123 return true;
2124}
2125
2126static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2127 struct x86_emulate_ops *ops,
2128 struct tss_segment_16 *tss)
2129{
2130 struct decode_cache *c = &ctxt->decode;
2131
2132 tss->ip = c->eip;
2133 tss->flag = ctxt->eflags;
2134 tss->ax = c->regs[VCPU_REGS_RAX];
2135 tss->cx = c->regs[VCPU_REGS_RCX];
2136 tss->dx = c->regs[VCPU_REGS_RDX];
2137 tss->bx = c->regs[VCPU_REGS_RBX];
2138 tss->sp = c->regs[VCPU_REGS_RSP];
2139 tss->bp = c->regs[VCPU_REGS_RBP];
2140 tss->si = c->regs[VCPU_REGS_RSI];
2141 tss->di = c->regs[VCPU_REGS_RDI];
2142
2143 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2144 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2145 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2146 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2147 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2148}
2149
2150static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2151 struct x86_emulate_ops *ops,
2152 struct tss_segment_16 *tss)
2153{
2154 struct decode_cache *c = &ctxt->decode;
2155 int ret;
2156
2157 c->eip = tss->ip;
2158 ctxt->eflags = tss->flag | 2;
2159 c->regs[VCPU_REGS_RAX] = tss->ax;
2160 c->regs[VCPU_REGS_RCX] = tss->cx;
2161 c->regs[VCPU_REGS_RDX] = tss->dx;
2162 c->regs[VCPU_REGS_RBX] = tss->bx;
2163 c->regs[VCPU_REGS_RSP] = tss->sp;
2164 c->regs[VCPU_REGS_RBP] = tss->bp;
2165 c->regs[VCPU_REGS_RSI] = tss->si;
2166 c->regs[VCPU_REGS_RDI] = tss->di;
2167
2168
2169
2170
2171
2172 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2173 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2174 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2175 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2176 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2177
2178
2179
2180
2181
2182 ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
2183 if (ret != X86EMUL_CONTINUE)
2184 return ret;
2185 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
2186 if (ret != X86EMUL_CONTINUE)
2187 return ret;
2188 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
2189 if (ret != X86EMUL_CONTINUE)
2190 return ret;
2191 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2192 if (ret != X86EMUL_CONTINUE)
2193 return ret;
2194 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2195 if (ret != X86EMUL_CONTINUE)
2196 return ret;
2197
2198 return X86EMUL_CONTINUE;
2199}
2200
2201static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2202 struct x86_emulate_ops *ops,
2203 u16 tss_selector, u16 old_tss_sel,
2204 ulong old_tss_base, struct desc_struct *new_desc)
2205{
2206 struct tss_segment_16 tss_seg;
2207 int ret;
2208 u32 new_tss_base = get_desc_base(new_desc);
2209
2210 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2211 &ctxt->exception);
2212 if (ret != X86EMUL_CONTINUE)
2213
2214 return ret;
2215
2216 save_state_to_tss16(ctxt, ops, &tss_seg);
2217
2218 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2219 &ctxt->exception);
2220 if (ret != X86EMUL_CONTINUE)
2221
2222 return ret;
2223
2224 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2225 &ctxt->exception);
2226 if (ret != X86EMUL_CONTINUE)
2227
2228 return ret;
2229
2230 if (old_tss_sel != 0xffff) {
2231 tss_seg.prev_task_link = old_tss_sel;
2232
2233 ret = ops->write_std(ctxt, new_tss_base,
2234 &tss_seg.prev_task_link,
2235 sizeof tss_seg.prev_task_link,
2236 &ctxt->exception);
2237 if (ret != X86EMUL_CONTINUE)
2238
2239 return ret;
2240 }
2241
2242 return load_state_from_tss16(ctxt, ops, &tss_seg);
2243}
2244
2245static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2246 struct x86_emulate_ops *ops,
2247 struct tss_segment_32 *tss)
2248{
2249 struct decode_cache *c = &ctxt->decode;
2250
2251 tss->cr3 = ops->get_cr(ctxt, 3);
2252 tss->eip = c->eip;
2253 tss->eflags = ctxt->eflags;
2254 tss->eax = c->regs[VCPU_REGS_RAX];
2255 tss->ecx = c->regs[VCPU_REGS_RCX];
2256 tss->edx = c->regs[VCPU_REGS_RDX];
2257 tss->ebx = c->regs[VCPU_REGS_RBX];
2258 tss->esp = c->regs[VCPU_REGS_RSP];
2259 tss->ebp = c->regs[VCPU_REGS_RBP];
2260 tss->esi = c->regs[VCPU_REGS_RSI];
2261 tss->edi = c->regs[VCPU_REGS_RDI];
2262
2263 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2264 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2265 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2266 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2267 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2268 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2269 tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2270}
2271
2272static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2273 struct x86_emulate_ops *ops,
2274 struct tss_segment_32 *tss)
2275{
2276 struct decode_cache *c = &ctxt->decode;
2277 int ret;
2278
2279 if (ops->set_cr(ctxt, 3, tss->cr3))
2280 return emulate_gp(ctxt, 0);
2281 c->eip = tss->eip;
2282 ctxt->eflags = tss->eflags | 2;
2283 c->regs[VCPU_REGS_RAX] = tss->eax;
2284 c->regs[VCPU_REGS_RCX] = tss->ecx;
2285 c->regs[VCPU_REGS_RDX] = tss->edx;
2286 c->regs[VCPU_REGS_RBX] = tss->ebx;
2287 c->regs[VCPU_REGS_RSP] = tss->esp;
2288 c->regs[VCPU_REGS_RBP] = tss->ebp;
2289 c->regs[VCPU_REGS_RSI] = tss->esi;
2290 c->regs[VCPU_REGS_RDI] = tss->edi;
2291
2292
2293
2294
2295
2296 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2297 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2298 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2299 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2300 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2301 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2302 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2303
2304
2305
2306
2307
2308 ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
2309 if (ret != X86EMUL_CONTINUE)
2310 return ret;
2311 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
2312 if (ret != X86EMUL_CONTINUE)
2313 return ret;
2314 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
2315 if (ret != X86EMUL_CONTINUE)
2316 return ret;
2317 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2318 if (ret != X86EMUL_CONTINUE)
2319 return ret;
2320 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2321 if (ret != X86EMUL_CONTINUE)
2322 return ret;
2323 ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
2324 if (ret != X86EMUL_CONTINUE)
2325 return ret;
2326 ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
2327 if (ret != X86EMUL_CONTINUE)
2328 return ret;
2329
2330 return X86EMUL_CONTINUE;
2331}
2332
2333static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2334 struct x86_emulate_ops *ops,
2335 u16 tss_selector, u16 old_tss_sel,
2336 ulong old_tss_base, struct desc_struct *new_desc)
2337{
2338 struct tss_segment_32 tss_seg;
2339 int ret;
2340 u32 new_tss_base = get_desc_base(new_desc);
2341
2342 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2343 &ctxt->exception);
2344 if (ret != X86EMUL_CONTINUE)
2345
2346 return ret;
2347
2348 save_state_to_tss32(ctxt, ops, &tss_seg);
2349
2350 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2351 &ctxt->exception);
2352 if (ret != X86EMUL_CONTINUE)
2353
2354 return ret;
2355
2356 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2357 &ctxt->exception);
2358 if (ret != X86EMUL_CONTINUE)
2359
2360 return ret;
2361
2362 if (old_tss_sel != 0xffff) {
2363 tss_seg.prev_task_link = old_tss_sel;
2364
2365 ret = ops->write_std(ctxt, new_tss_base,
2366 &tss_seg.prev_task_link,
2367 sizeof tss_seg.prev_task_link,
2368 &ctxt->exception);
2369 if (ret != X86EMUL_CONTINUE)
2370
2371 return ret;
2372 }
2373
2374 return load_state_from_tss32(ctxt, ops, &tss_seg);
2375}
2376
2377static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2378 struct x86_emulate_ops *ops,
2379 u16 tss_selector, int reason,
2380 bool has_error_code, u32 error_code)
2381{
2382 struct desc_struct curr_tss_desc, next_tss_desc;
2383 int ret;
2384 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2385 ulong old_tss_base =
2386 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2387 u32 desc_limit;
2388
2389
2390
2391 ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
2392 if (ret != X86EMUL_CONTINUE)
2393 return ret;
2394 ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
2395 if (ret != X86EMUL_CONTINUE)
2396 return ret;
2397
2398
2399
2400 if (reason != TASK_SWITCH_IRET) {
2401 if ((tss_selector & 3) > next_tss_desc.dpl ||
2402 ops->cpl(ctxt) > next_tss_desc.dpl)
2403 return emulate_gp(ctxt, 0);
2404 }
2405
2406 desc_limit = desc_limit_scaled(&next_tss_desc);
2407 if (!next_tss_desc.p ||
2408 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2409 desc_limit < 0x2b)) {
2410 emulate_ts(ctxt, tss_selector & 0xfffc);
2411 return X86EMUL_PROPAGATE_FAULT;
2412 }
2413
2414 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2415 curr_tss_desc.type &= ~(1 << 1);
2416 write_segment_descriptor(ctxt, ops, old_tss_sel,
2417 &curr_tss_desc);
2418 }
2419
2420 if (reason == TASK_SWITCH_IRET)
2421 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2422
2423
2424
2425 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2426 old_tss_sel = 0xffff;
2427
2428 if (next_tss_desc.type & 8)
2429 ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
2430 old_tss_base, &next_tss_desc);
2431 else
2432 ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
2433 old_tss_base, &next_tss_desc);
2434 if (ret != X86EMUL_CONTINUE)
2435 return ret;
2436
2437 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2438 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2439
2440 if (reason != TASK_SWITCH_IRET) {
2441 next_tss_desc.type |= (1 << 1);
2442 write_segment_descriptor(ctxt, ops, tss_selector,
2443 &next_tss_desc);
2444 }
2445
2446 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2447 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2448
2449 if (has_error_code) {
2450 struct decode_cache *c = &ctxt->decode;
2451
2452 c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2453 c->lock_prefix = 0;
2454 c->src.val = (unsigned long) error_code;
2455 ret = em_push(ctxt);
2456 }
2457
2458 return ret;
2459}
2460
2461int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2462 u16 tss_selector, int reason,
2463 bool has_error_code, u32 error_code)
2464{
2465 struct x86_emulate_ops *ops = ctxt->ops;
2466 struct decode_cache *c = &ctxt->decode;
2467 int rc;
2468
2469 c->eip = ctxt->eip;
2470 c->dst.type = OP_NONE;
2471
2472 rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
2473 has_error_code, error_code);
2474
2475 if (rc == X86EMUL_CONTINUE)
2476 ctxt->eip = c->eip;
2477
2478 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2479}
2480
2481static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2482 int reg, struct operand *op)
2483{
2484 struct decode_cache *c = &ctxt->decode;
2485 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2486
2487 register_address_increment(c, &c->regs[reg], df * op->bytes);
2488 op->addr.mem.ea = register_address(c, c->regs[reg]);
2489 op->addr.mem.seg = seg;
2490}
2491
2492static int em_das(struct x86_emulate_ctxt *ctxt)
2493{
2494 struct decode_cache *c = &ctxt->decode;
2495 u8 al, old_al;
2496 bool af, cf, old_cf;
2497
2498 cf = ctxt->eflags & X86_EFLAGS_CF;
2499 al = c->dst.val;
2500
2501 old_al = al;
2502 old_cf = cf;
2503 cf = false;
2504 af = ctxt->eflags & X86_EFLAGS_AF;
2505 if ((al & 0x0f) > 9 || af) {
2506 al -= 6;
2507 cf = old_cf | (al >= 250);
2508 af = true;
2509 } else {
2510 af = false;
2511 }
2512 if (old_al > 0x99 || old_cf) {
2513 al -= 0x60;
2514 cf = true;
2515 }
2516
2517 c->dst.val = al;
2518
2519 c->src.type = OP_IMM;
2520 c->src.val = 0;
2521 c->src.bytes = 1;
2522 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2523 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2524 if (cf)
2525 ctxt->eflags |= X86_EFLAGS_CF;
2526 if (af)
2527 ctxt->eflags |= X86_EFLAGS_AF;
2528 return X86EMUL_CONTINUE;
2529}
2530
2531static int em_call_far(struct x86_emulate_ctxt *ctxt)
2532{
2533 struct decode_cache *c = &ctxt->decode;
2534 u16 sel, old_cs;
2535 ulong old_eip;
2536 int rc;
2537
2538 old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2539 old_eip = c->eip;
2540
2541 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
2542 if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS))
2543 return X86EMUL_CONTINUE;
2544
2545 c->eip = 0;
2546 memcpy(&c->eip, c->src.valptr, c->op_bytes);
2547
2548 c->src.val = old_cs;
2549 rc = em_push(ctxt);
2550 if (rc != X86EMUL_CONTINUE)
2551 return rc;
2552
2553 c->src.val = old_eip;
2554 return em_push(ctxt);
2555}
2556
2557static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2558{
2559 struct decode_cache *c = &ctxt->decode;
2560 int rc;
2561
2562 c->dst.type = OP_REG;
2563 c->dst.addr.reg = &c->eip;
2564 c->dst.bytes = c->op_bytes;
2565 rc = emulate_pop(ctxt, &c->dst.val, c->op_bytes);
2566 if (rc != X86EMUL_CONTINUE)
2567 return rc;
2568 register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val);
2569 return X86EMUL_CONTINUE;
2570}
2571
2572static int em_add(struct x86_emulate_ctxt *ctxt)
2573{
2574 struct decode_cache *c = &ctxt->decode;
2575
2576 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
2577 return X86EMUL_CONTINUE;
2578}
2579
2580static int em_or(struct x86_emulate_ctxt *ctxt)
2581{
2582 struct decode_cache *c = &ctxt->decode;
2583
2584 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2585 return X86EMUL_CONTINUE;
2586}
2587
2588static int em_adc(struct x86_emulate_ctxt *ctxt)
2589{
2590 struct decode_cache *c = &ctxt->decode;
2591
2592 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
2593 return X86EMUL_CONTINUE;
2594}
2595
2596static int em_sbb(struct x86_emulate_ctxt *ctxt)
2597{
2598 struct decode_cache *c = &ctxt->decode;
2599
2600 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
2601 return X86EMUL_CONTINUE;
2602}
2603
2604static int em_and(struct x86_emulate_ctxt *ctxt)
2605{
2606 struct decode_cache *c = &ctxt->decode;
2607
2608 emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
2609 return X86EMUL_CONTINUE;
2610}
2611
2612static int em_sub(struct x86_emulate_ctxt *ctxt)
2613{
2614 struct decode_cache *c = &ctxt->decode;
2615
2616 emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
2617 return X86EMUL_CONTINUE;
2618}
2619
2620static int em_xor(struct x86_emulate_ctxt *ctxt)
2621{
2622 struct decode_cache *c = &ctxt->decode;
2623
2624 emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
2625 return X86EMUL_CONTINUE;
2626}
2627
2628static int em_cmp(struct x86_emulate_ctxt *ctxt)
2629{
2630 struct decode_cache *c = &ctxt->decode;
2631
2632 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
2633
2634 c->dst.type = OP_NONE;
2635 return X86EMUL_CONTINUE;
2636}
2637
2638static int em_imul(struct x86_emulate_ctxt *ctxt)
2639{
2640 struct decode_cache *c = &ctxt->decode;
2641
2642 emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags);
2643 return X86EMUL_CONTINUE;
2644}
2645
2646static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2647{
2648 struct decode_cache *c = &ctxt->decode;
2649
2650 c->dst.val = c->src2.val;
2651 return em_imul(ctxt);
2652}
2653
2654static int em_cwd(struct x86_emulate_ctxt *ctxt)
2655{
2656 struct decode_cache *c = &ctxt->decode;
2657
2658 c->dst.type = OP_REG;
2659 c->dst.bytes = c->src.bytes;
2660 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
2661 c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1);
2662
2663 return X86EMUL_CONTINUE;
2664}
2665
2666static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2667{
2668 struct decode_cache *c = &ctxt->decode;
2669 u64 tsc = 0;
2670
2671 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2672 c->regs[VCPU_REGS_RAX] = (u32)tsc;
2673 c->regs[VCPU_REGS_RDX] = tsc >> 32;
2674 return X86EMUL_CONTINUE;
2675}
2676
2677static int em_mov(struct x86_emulate_ctxt *ctxt)
2678{
2679 struct decode_cache *c = &ctxt->decode;
2680 c->dst.val = c->src.val;
2681 return X86EMUL_CONTINUE;
2682}
2683
2684static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2685{
2686 struct decode_cache *c = &ctxt->decode;
2687 memcpy(&c->dst.vec_val, &c->src.vec_val, c->op_bytes);
2688 return X86EMUL_CONTINUE;
2689}
2690
2691static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2692{
2693 struct decode_cache *c = &ctxt->decode;
2694 int rc;
2695 ulong linear;
2696
2697 rc = linearize(ctxt, c->src.addr.mem, 1, false, &linear);
2698 if (rc == X86EMUL_CONTINUE)
2699 ctxt->ops->invlpg(ctxt, linear);
2700
2701 c->dst.type = OP_NONE;
2702 return X86EMUL_CONTINUE;
2703}
2704
2705static int em_clts(struct x86_emulate_ctxt *ctxt)
2706{
2707 ulong cr0;
2708
2709 cr0 = ctxt->ops->get_cr(ctxt, 0);
2710 cr0 &= ~X86_CR0_TS;
2711 ctxt->ops->set_cr(ctxt, 0, cr0);
2712 return X86EMUL_CONTINUE;
2713}
2714
2715static int em_vmcall(struct x86_emulate_ctxt *ctxt)
2716{
2717 struct decode_cache *c = &ctxt->decode;
2718 int rc;
2719
2720 if (c->modrm_mod != 3 || c->modrm_rm != 1)
2721 return X86EMUL_UNHANDLEABLE;
2722
2723 rc = ctxt->ops->fix_hypercall(ctxt);
2724 if (rc != X86EMUL_CONTINUE)
2725 return rc;
2726
2727
2728 c->eip = ctxt->eip;
2729
2730 c->dst.type = OP_NONE;
2731 return X86EMUL_CONTINUE;
2732}
2733
2734static int em_lgdt(struct x86_emulate_ctxt *ctxt)
2735{
2736 struct decode_cache *c = &ctxt->decode;
2737 struct desc_ptr desc_ptr;
2738 int rc;
2739
2740 rc = read_descriptor(ctxt, c->src.addr.mem,
2741 &desc_ptr.size, &desc_ptr.address,
2742 c->op_bytes);
2743 if (rc != X86EMUL_CONTINUE)
2744 return rc;
2745 ctxt->ops->set_gdt(ctxt, &desc_ptr);
2746
2747 c->dst.type = OP_NONE;
2748 return X86EMUL_CONTINUE;
2749}
2750
2751static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
2752{
2753 struct decode_cache *c = &ctxt->decode;
2754 int rc;
2755
2756 rc = ctxt->ops->fix_hypercall(ctxt);
2757
2758
2759 c->dst.type = OP_NONE;
2760 return rc;
2761}
2762
2763static int em_lidt(struct x86_emulate_ctxt *ctxt)
2764{
2765 struct decode_cache *c = &ctxt->decode;
2766 struct desc_ptr desc_ptr;
2767 int rc;
2768
2769 rc = read_descriptor(ctxt, c->src.addr.mem,
2770 &desc_ptr.size, &desc_ptr.address,
2771 c->op_bytes);
2772 if (rc != X86EMUL_CONTINUE)
2773 return rc;
2774 ctxt->ops->set_idt(ctxt, &desc_ptr);
2775
2776 c->dst.type = OP_NONE;
2777 return X86EMUL_CONTINUE;
2778}
2779
2780static int em_smsw(struct x86_emulate_ctxt *ctxt)
2781{
2782 struct decode_cache *c = &ctxt->decode;
2783
2784 c->dst.bytes = 2;
2785 c->dst.val = ctxt->ops->get_cr(ctxt, 0);
2786 return X86EMUL_CONTINUE;
2787}
2788
2789static int em_lmsw(struct x86_emulate_ctxt *ctxt)
2790{
2791 struct decode_cache *c = &ctxt->decode;
2792 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
2793 | (c->src.val & 0x0f));
2794 c->dst.type = OP_NONE;
2795 return X86EMUL_CONTINUE;
2796}
2797
2798static bool valid_cr(int nr)
2799{
2800 switch (nr) {
2801 case 0:
2802 case 2 ... 4:
2803 case 8:
2804 return true;
2805 default:
2806 return false;
2807 }
2808}
2809
2810static int check_cr_read(struct x86_emulate_ctxt *ctxt)
2811{
2812 struct decode_cache *c = &ctxt->decode;
2813
2814 if (!valid_cr(c->modrm_reg))
2815 return emulate_ud(ctxt);
2816
2817 return X86EMUL_CONTINUE;
2818}
2819
2820static int check_cr_write(struct x86_emulate_ctxt *ctxt)
2821{
2822 struct decode_cache *c = &ctxt->decode;
2823 u64 new_val = c->src.val64;
2824 int cr = c->modrm_reg;
2825 u64 efer = 0;
2826
2827 static u64 cr_reserved_bits[] = {
2828 0xffffffff00000000ULL,
2829 0, 0, 0,
2830 CR4_RESERVED_BITS,
2831 0, 0, 0,
2832 CR8_RESERVED_BITS,
2833 };
2834
2835 if (!valid_cr(cr))
2836 return emulate_ud(ctxt);
2837
2838 if (new_val & cr_reserved_bits[cr])
2839 return emulate_gp(ctxt, 0);
2840
2841 switch (cr) {
2842 case 0: {
2843 u64 cr4;
2844 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
2845 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
2846 return emulate_gp(ctxt, 0);
2847
2848 cr4 = ctxt->ops->get_cr(ctxt, 4);
2849 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2850
2851 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
2852 !(cr4 & X86_CR4_PAE))
2853 return emulate_gp(ctxt, 0);
2854
2855 break;
2856 }
2857 case 3: {
2858 u64 rsvd = 0;
2859
2860 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2861 if (efer & EFER_LMA)
2862 rsvd = CR3_L_MODE_RESERVED_BITS;
2863 else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
2864 rsvd = CR3_PAE_RESERVED_BITS;
2865 else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
2866 rsvd = CR3_NONPAE_RESERVED_BITS;
2867
2868 if (new_val & rsvd)
2869 return emulate_gp(ctxt, 0);
2870
2871 break;
2872 }
2873 case 4: {
2874 u64 cr4;
2875
2876 cr4 = ctxt->ops->get_cr(ctxt, 4);
2877 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2878
2879 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
2880 return emulate_gp(ctxt, 0);
2881
2882 break;
2883 }
2884 }
2885
2886 return X86EMUL_CONTINUE;
2887}
2888
2889static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
2890{
2891 unsigned long dr7;
2892
2893 ctxt->ops->get_dr(ctxt, 7, &dr7);
2894
2895
2896 return dr7 & (1 << 13);
2897}
2898
2899static int check_dr_read(struct x86_emulate_ctxt *ctxt)
2900{
2901 struct decode_cache *c = &ctxt->decode;
2902 int dr = c->modrm_reg;
2903 u64 cr4;
2904
2905 if (dr > 7)
2906 return emulate_ud(ctxt);
2907
2908 cr4 = ctxt->ops->get_cr(ctxt, 4);
2909 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
2910 return emulate_ud(ctxt);
2911
2912 if (check_dr7_gd(ctxt))
2913 return emulate_db(ctxt);
2914
2915 return X86EMUL_CONTINUE;
2916}
2917
2918static int check_dr_write(struct x86_emulate_ctxt *ctxt)
2919{
2920 struct decode_cache *c = &ctxt->decode;
2921 u64 new_val = c->src.val64;
2922 int dr = c->modrm_reg;
2923
2924 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
2925 return emulate_gp(ctxt, 0);
2926
2927 return check_dr_read(ctxt);
2928}
2929
2930static int check_svme(struct x86_emulate_ctxt *ctxt)
2931{
2932 u64 efer;
2933
2934 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2935
2936 if (!(efer & EFER_SVME))
2937 return emulate_ud(ctxt);
2938
2939 return X86EMUL_CONTINUE;
2940}
2941
2942static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
2943{
2944 u64 rax = ctxt->decode.regs[VCPU_REGS_RAX];
2945
2946
2947 if (rax & 0xffff000000000000ULL)
2948 return emulate_gp(ctxt, 0);
2949
2950 return check_svme(ctxt);
2951}
2952
2953static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
2954{
2955 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2956
2957 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
2958 return emulate_ud(ctxt);
2959
2960 return X86EMUL_CONTINUE;
2961}
2962
2963static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
2964{
2965 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2966 u64 rcx = ctxt->decode.regs[VCPU_REGS_RCX];
2967
2968 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
2969 (rcx > 3))
2970 return emulate_gp(ctxt, 0);
2971
2972 return X86EMUL_CONTINUE;
2973}
2974
2975static int check_perm_in(struct x86_emulate_ctxt *ctxt)
2976{
2977 struct decode_cache *c = &ctxt->decode;
2978
2979 c->dst.bytes = min(c->dst.bytes, 4u);
2980 if (!emulator_io_permited(ctxt, ctxt->ops, c->src.val, c->dst.bytes))
2981 return emulate_gp(ctxt, 0);
2982
2983 return X86EMUL_CONTINUE;
2984}
2985
2986static int check_perm_out(struct x86_emulate_ctxt *ctxt)
2987{
2988 struct decode_cache *c = &ctxt->decode;
2989
2990 c->src.bytes = min(c->src.bytes, 4u);
2991 if (!emulator_io_permited(ctxt, ctxt->ops, c->dst.val, c->src.bytes))
2992 return emulate_gp(ctxt, 0);
2993
2994 return X86EMUL_CONTINUE;
2995}
2996
2997#define D(_y) { .flags = (_y) }
2998#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
2999#define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
3000 .check_perm = (_p) }
3001#define N D(0)
3002#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3003#define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
3004#define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
3005#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3006#define II(_f, _e, _i) \
3007 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
3008#define IIP(_f, _e, _i, _p) \
3009 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
3010 .check_perm = (_p) }
3011#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3012
3013#define D2bv(_f) D((_f) | ByteOp), D(_f)
3014#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3015#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3016
3017#define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3018 I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3019 I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3020
3021static struct opcode group7_rm1[] = {
3022 DI(SrcNone | ModRM | Priv, monitor),
3023 DI(SrcNone | ModRM | Priv, mwait),
3024 N, N, N, N, N, N,
3025};
3026
3027static struct opcode group7_rm3[] = {
3028 DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa),
3029 II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
3030 DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa),
3031 DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa),
3032 DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme),
3033 DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme),
3034 DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme),
3035 DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
3036};
3037
3038static struct opcode group7_rm7[] = {
3039 N,
3040 DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
3041 N, N, N, N, N, N,
3042};
3043
3044static struct opcode group1[] = {
3045 I(Lock, em_add),
3046 I(Lock, em_or),
3047 I(Lock, em_adc),
3048 I(Lock, em_sbb),
3049 I(Lock, em_and),
3050 I(Lock, em_sub),
3051 I(Lock, em_xor),
3052 I(0, em_cmp),
3053};
3054
3055static struct opcode group1A[] = {
3056 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
3057};
3058
3059static struct opcode group3[] = {
3060 D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
3061 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3062 X4(D(SrcMem | ModRM)),
3063};
3064
3065static struct opcode group4[] = {
3066 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
3067 N, N, N, N, N, N,
3068};
3069
3070static struct opcode group5[] = {
3071 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3072 D(SrcMem | ModRM | Stack),
3073 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
3074 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
3075 D(SrcMem | ModRM | Stack), N,
3076};
3077
3078static struct opcode group6[] = {
3079 DI(ModRM | Prot, sldt),
3080 DI(ModRM | Prot, str),
3081 DI(ModRM | Prot | Priv, lldt),
3082 DI(ModRM | Prot | Priv, ltr),
3083 N, N, N, N,
3084};
3085
3086static struct group_dual group7 = { {
3087 DI(ModRM | Mov | DstMem | Priv, sgdt),
3088 DI(ModRM | Mov | DstMem | Priv, sidt),
3089 II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
3090 II(ModRM | SrcMem | Priv, em_lidt, lidt),
3091 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3092 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
3093 II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3094}, {
3095 I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
3096 EXT(0, group7_rm1),
3097 N, EXT(0, group7_rm3),
3098 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3099 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
3100} };
3101
3102static struct opcode group8[] = {
3103 N, N, N, N,
3104 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
3105 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
3106};
3107
3108static struct group_dual group9 = { {
3109 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
3110}, {
3111 N, N, N, N, N, N, N, N,
3112} };
3113
3114static struct opcode group11[] = {
3115 I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
3116};
3117
3118static struct gprefix pfx_0f_6f_0f_7f = {
3119 N, N, N, I(Sse, em_movdqu),
3120};
3121
3122static struct opcode opcode_table[256] = {
3123
3124 I6ALU(Lock, em_add),
3125 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
3126
3127 I6ALU(Lock, em_or),
3128 D(ImplicitOps | Stack | No64), N,
3129
3130 I6ALU(Lock, em_adc),
3131 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
3132
3133 I6ALU(Lock, em_sbb),
3134 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
3135
3136 I6ALU(Lock, em_and), N, N,
3137
3138 I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3139
3140 I6ALU(Lock, em_xor), N, N,
3141
3142 I6ALU(0, em_cmp), N, N,
3143
3144 X16(D(DstReg)),
3145
3146 X8(I(SrcReg | Stack, em_push)),
3147
3148 X8(I(DstReg | Stack, em_pop)),
3149
3150 I(ImplicitOps | Stack | No64, em_pusha),
3151 I(ImplicitOps | Stack | No64, em_popa),
3152 N, D(DstReg | SrcMem32 | ModRM | Mov) ,
3153 N, N, N, N,
3154
3155 I(SrcImm | Mov | Stack, em_push),
3156 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3157 I(SrcImmByte | Mov | Stack, em_push),
3158 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3159 D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in),
3160 D2bvIP(SrcSI | DstDX | String, outs, check_perm_out),
3161
3162 X16(D(SrcImmByte)),
3163
3164 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
3165 G(DstMem | SrcImm | ModRM | Group, group1),
3166 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
3167 G(DstMem | SrcImmByte | ModRM | Group, group1),
3168 D2bv(DstMem | SrcReg | ModRM), D2bv(DstMem | SrcReg | ModRM | Lock),
3169
3170 I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
3171 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3172 D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
3173 D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
3174
3175 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3176
3177 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3178 I(SrcImmFAddr | No64, em_call_far), N,
3179 II(ImplicitOps | Stack, em_pushf, pushf),
3180 II(ImplicitOps | Stack, em_popf, popf), N, N,
3181
3182 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3183 I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
3184 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3185 I2bv(SrcSI | DstDI | String, em_cmp),
3186
3187 D2bv(DstAcc | SrcImm),
3188 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3189 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3190 I2bv(SrcAcc | DstDI | String, em_cmp),
3191
3192 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3193
3194 X8(I(DstReg | SrcImm | Mov, em_mov)),
3195
3196 D2bv(DstMem | SrcImmByte | ModRM),
3197 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3198 D(ImplicitOps | Stack),
3199 D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
3200 G(ByteOp, group11), G(0, group11),
3201
3202 N, N, N, D(ImplicitOps | Stack),
3203 D(ImplicitOps), DI(SrcImmByte, intn),
3204 D(ImplicitOps | No64), DI(ImplicitOps, iret),
3205
3206 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
3207 N, N, N, N,
3208
3209 N, N, N, N, N, N, N, N,
3210
3211 X4(D(SrcImmByte)),
3212 D2bvIP(SrcImmUByte | DstAcc, in, check_perm_in),
3213 D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
3214
3215 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
3216 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
3217 D2bvIP(SrcDX | DstAcc, in, check_perm_in),
3218 D2bvIP(SrcAcc | DstDX, out, check_perm_out),
3219
3220 N, DI(ImplicitOps, icebp), N, N,
3221 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3222 G(ByteOp, group3), G(0, group3),
3223
3224 D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
3225 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3226};
3227
3228static struct opcode twobyte_table[256] = {
3229
3230 G(0, group6), GD(0, &group7), N, N,
3231 N, D(ImplicitOps | VendorSpecific), DI(ImplicitOps | Priv, clts), N,
3232 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3233 N, D(ImplicitOps | ModRM), N, N,
3234
3235 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
3236
3237 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3238 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3239 DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
3240 DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
3241 N, N, N, N,
3242 N, N, N, N, N, N, N, N,
3243
3244 DI(ImplicitOps | Priv, wrmsr),
3245 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3246 DI(ImplicitOps | Priv, rdmsr),
3247 DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
3248 D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv | VendorSpecific),
3249 N, N,
3250 N, N, N, N, N, N, N, N,
3251
3252 X16(D(DstReg | SrcMem | ModRM | Mov)),
3253
3254 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3255
3256 N, N, N, N,
3257 N, N, N, N,
3258 N, N, N, N,
3259 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3260
3261 N, N, N, N,
3262 N, N, N, N,
3263 N, N, N, N,
3264 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3265
3266 X16(D(SrcImm)),
3267
3268 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3269
3270 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3271 DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
3272 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3273 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
3274
3275 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3276 DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3277 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3278 D(DstMem | SrcReg | Src2CL | ModRM),
3279 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
3280
3281 D2bv(DstMem | SrcReg | ModRM | Lock),
3282 D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3283 D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
3284 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3285
3286 N, N,
3287 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3288 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
3289 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3290
3291 D2bv(DstMem | SrcReg | ModRM | Lock),
3292 N, D(DstMem | SrcReg | ModRM | Mov),
3293 N, N, N, GD(0, &group9),
3294 N, N, N, N, N, N, N, N,
3295
3296 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3297
3298 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3299
3300 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3301};
3302
3303#undef D
3304#undef N
3305#undef G
3306#undef GD
3307#undef I
3308#undef GP
3309#undef EXT
3310
3311#undef D2bv
3312#undef D2bvIP
3313#undef I2bv
3314#undef I6ALU
3315
3316static unsigned imm_size(struct decode_cache *c)
3317{
3318 unsigned size;
3319
3320 size = (c->d & ByteOp) ? 1 : c->op_bytes;
3321 if (size == 8)
3322 size = 4;
3323 return size;
3324}
3325
3326static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3327 unsigned size, bool sign_extension)
3328{
3329 struct decode_cache *c = &ctxt->decode;
3330 struct x86_emulate_ops *ops = ctxt->ops;
3331 int rc = X86EMUL_CONTINUE;
3332
3333 op->type = OP_IMM;
3334 op->bytes = size;
3335 op->addr.mem.ea = c->eip;
3336
3337 switch (op->bytes) {
3338 case 1:
3339 op->val = insn_fetch(s8, 1, c->eip);
3340 break;
3341 case 2:
3342 op->val = insn_fetch(s16, 2, c->eip);
3343 break;
3344 case 4:
3345 op->val = insn_fetch(s32, 4, c->eip);
3346 break;
3347 }
3348 if (!sign_extension) {
3349 switch (op->bytes) {
3350 case 1:
3351 op->val &= 0xff;
3352 break;
3353 case 2:
3354 op->val &= 0xffff;
3355 break;
3356 case 4:
3357 op->val &= 0xffffffff;
3358 break;
3359 }
3360 }
3361done:
3362 return rc;
3363}
3364
3365int
3366x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3367{
3368 struct x86_emulate_ops *ops = ctxt->ops;
3369 struct decode_cache *c = &ctxt->decode;
3370 int rc = X86EMUL_CONTINUE;
3371 int mode = ctxt->mode;
3372 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
3373 bool op_prefix = false;
3374 struct opcode opcode;
3375 struct operand memop = { .type = OP_NONE }, *memopp = NULL;
3376
3377 c->eip = ctxt->eip;
3378 c->fetch.start = c->eip;
3379 c->fetch.end = c->fetch.start + insn_len;
3380 if (insn_len > 0)
3381 memcpy(c->fetch.data, insn, insn_len);
3382
3383 switch (mode) {
3384 case X86EMUL_MODE_REAL:
3385 case X86EMUL_MODE_VM86:
3386 case X86EMUL_MODE_PROT16:
3387 def_op_bytes = def_ad_bytes = 2;
3388 break;
3389 case X86EMUL_MODE_PROT32:
3390 def_op_bytes = def_ad_bytes = 4;
3391 break;
3392#ifdef CONFIG_X86_64
3393 case X86EMUL_MODE_PROT64:
3394 def_op_bytes = 4;
3395 def_ad_bytes = 8;
3396 break;
3397#endif
3398 default:
3399 return -1;
3400 }
3401
3402 c->op_bytes = def_op_bytes;
3403 c->ad_bytes = def_ad_bytes;
3404
3405
3406 for (;;) {
3407 switch (c->b = insn_fetch(u8, 1, c->eip)) {
3408 case 0x66:
3409 op_prefix = true;
3410
3411 c->op_bytes = def_op_bytes ^ 6;
3412 break;
3413 case 0x67:
3414 if (mode == X86EMUL_MODE_PROT64)
3415
3416 c->ad_bytes = def_ad_bytes ^ 12;
3417 else
3418
3419 c->ad_bytes = def_ad_bytes ^ 6;
3420 break;
3421 case 0x26:
3422 case 0x2e:
3423 case 0x36:
3424 case 0x3e:
3425 set_seg_override(c, (c->b >> 3) & 3);
3426 break;
3427 case 0x64:
3428 case 0x65:
3429 set_seg_override(c, c->b & 7);
3430 break;
3431 case 0x40 ... 0x4f:
3432 if (mode != X86EMUL_MODE_PROT64)
3433 goto done_prefixes;
3434 c->rex_prefix = c->b;
3435 continue;
3436 case 0xf0:
3437 c->lock_prefix = 1;
3438 break;
3439 case 0xf2:
3440 case 0xf3:
3441 c->rep_prefix = c->b;
3442 break;
3443 default:
3444 goto done_prefixes;
3445 }
3446
3447
3448
3449 c->rex_prefix = 0;
3450 }
3451
3452done_prefixes:
3453
3454
3455 if (c->rex_prefix & 8)
3456 c->op_bytes = 8;
3457
3458
3459 opcode = opcode_table[c->b];
3460
3461 if (c->b == 0x0f) {
3462 c->twobyte = 1;
3463 c->b = insn_fetch(u8, 1, c->eip);
3464 opcode = twobyte_table[c->b];
3465 }
3466 c->d = opcode.flags;
3467
3468 while (c->d & GroupMask) {
3469 switch (c->d & GroupMask) {
3470 case Group:
3471 c->modrm = insn_fetch(u8, 1, c->eip);
3472 --c->eip;
3473 goffset = (c->modrm >> 3) & 7;
3474 opcode = opcode.u.group[goffset];
3475 break;
3476 case GroupDual:
3477 c->modrm = insn_fetch(u8, 1, c->eip);
3478 --c->eip;
3479 goffset = (c->modrm >> 3) & 7;
3480 if ((c->modrm >> 6) == 3)
3481 opcode = opcode.u.gdual->mod3[goffset];
3482 else
3483 opcode = opcode.u.gdual->mod012[goffset];
3484 break;
3485 case RMExt:
3486 goffset = c->modrm & 7;
3487 opcode = opcode.u.group[goffset];
3488 break;
3489 case Prefix:
3490 if (c->rep_prefix && op_prefix)
3491 return X86EMUL_UNHANDLEABLE;
3492 simd_prefix = op_prefix ? 0x66 : c->rep_prefix;
3493 switch (simd_prefix) {
3494 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
3495 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
3496 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
3497 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
3498 }
3499 break;
3500 default:
3501 return X86EMUL_UNHANDLEABLE;
3502 }
3503
3504 c->d &= ~GroupMask;
3505 c->d |= opcode.flags;
3506 }
3507
3508 c->execute = opcode.u.execute;
3509 c->check_perm = opcode.check_perm;
3510 c->intercept = opcode.intercept;
3511
3512
3513 if (c->d == 0 || (c->d & Undefined))
3514 return -1;
3515
3516 if (!(c->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
3517 return -1;
3518
3519 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
3520 c->op_bytes = 8;
3521
3522 if (c->d & Op3264) {
3523 if (mode == X86EMUL_MODE_PROT64)
3524 c->op_bytes = 8;
3525 else
3526 c->op_bytes = 4;
3527 }
3528
3529 if (c->d & Sse)
3530 c->op_bytes = 16;
3531
3532
3533 if (c->d & ModRM) {
3534 rc = decode_modrm(ctxt, ops, &memop);
3535 if (!c->has_seg_override)
3536 set_seg_override(c, c->modrm_seg);
3537 } else if (c->d & MemAbs)
3538 rc = decode_abs(ctxt, ops, &memop);
3539 if (rc != X86EMUL_CONTINUE)
3540 goto done;
3541
3542 if (!c->has_seg_override)
3543 set_seg_override(c, VCPU_SREG_DS);
3544
3545 memop.addr.mem.seg = seg_override(ctxt, c);
3546
3547 if (memop.type == OP_MEM && c->ad_bytes != 8)
3548 memop.addr.mem.ea = (u32)memop.addr.mem.ea;
3549
3550
3551
3552
3553
3554 switch (c->d & SrcMask) {
3555 case SrcNone:
3556 break;
3557 case SrcReg:
3558 decode_register_operand(ctxt, &c->src, c, 0);
3559 break;
3560 case SrcMem16:
3561 memop.bytes = 2;
3562 goto srcmem_common;
3563 case SrcMem32:
3564 memop.bytes = 4;
3565 goto srcmem_common;
3566 case SrcMem:
3567 memop.bytes = (c->d & ByteOp) ? 1 :
3568 c->op_bytes;
3569 srcmem_common:
3570 c->src = memop;
3571 memopp = &c->src;
3572 break;
3573 case SrcImmU16:
3574 rc = decode_imm(ctxt, &c->src, 2, false);
3575 break;
3576 case SrcImm:
3577 rc = decode_imm(ctxt, &c->src, imm_size(c), true);
3578 break;
3579 case SrcImmU:
3580 rc = decode_imm(ctxt, &c->src, imm_size(c), false);
3581 break;
3582 case SrcImmByte:
3583 rc = decode_imm(ctxt, &c->src, 1, true);
3584 break;
3585 case SrcImmUByte:
3586 rc = decode_imm(ctxt, &c->src, 1, false);
3587 break;
3588 case SrcAcc:
3589 c->src.type = OP_REG;
3590 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3591 c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
3592 fetch_register_operand(&c->src);
3593 break;
3594 case SrcOne:
3595 c->src.bytes = 1;
3596 c->src.val = 1;
3597 break;
3598 case SrcSI:
3599 c->src.type = OP_MEM;
3600 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3601 c->src.addr.mem.ea =
3602 register_address(c, c->regs[VCPU_REGS_RSI]);
3603 c->src.addr.mem.seg = seg_override(ctxt, c);
3604 c->src.val = 0;
3605 break;
3606 case SrcImmFAddr:
3607 c->src.type = OP_IMM;
3608 c->src.addr.mem.ea = c->eip;
3609 c->src.bytes = c->op_bytes + 2;
3610 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
3611 break;
3612 case SrcMemFAddr:
3613 memop.bytes = c->op_bytes + 2;
3614 goto srcmem_common;
3615 break;
3616 case SrcDX:
3617 c->src.type = OP_REG;
3618 c->src.bytes = 2;
3619 c->src.addr.reg = &c->regs[VCPU_REGS_RDX];
3620 fetch_register_operand(&c->src);
3621 break;
3622 }
3623
3624 if (rc != X86EMUL_CONTINUE)
3625 goto done;
3626
3627
3628
3629
3630
3631 switch (c->d & Src2Mask) {
3632 case Src2None:
3633 break;
3634 case Src2CL:
3635 c->src2.bytes = 1;
3636 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
3637 break;
3638 case Src2ImmByte:
3639 rc = decode_imm(ctxt, &c->src2, 1, true);
3640 break;
3641 case Src2One:
3642 c->src2.bytes = 1;
3643 c->src2.val = 1;
3644 break;
3645 case Src2Imm:
3646 rc = decode_imm(ctxt, &c->src2, imm_size(c), true);
3647 break;
3648 }
3649
3650 if (rc != X86EMUL_CONTINUE)
3651 goto done;
3652
3653
3654 switch (c->d & DstMask) {
3655 case DstReg:
3656 decode_register_operand(ctxt, &c->dst, c,
3657 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
3658 break;
3659 case DstImmUByte:
3660 c->dst.type = OP_IMM;
3661 c->dst.addr.mem.ea = c->eip;
3662 c->dst.bytes = 1;
3663 c->dst.val = insn_fetch(u8, 1, c->eip);
3664 break;
3665 case DstMem:
3666 case DstMem64:
3667 c->dst = memop;
3668 memopp = &c->dst;
3669 if ((c->d & DstMask) == DstMem64)
3670 c->dst.bytes = 8;
3671 else
3672 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3673 if (c->d & BitOp)
3674 fetch_bit_operand(c);
3675 c->dst.orig_val = c->dst.val;
3676 break;
3677 case DstAcc:
3678 c->dst.type = OP_REG;
3679 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3680 c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
3681 fetch_register_operand(&c->dst);
3682 c->dst.orig_val = c->dst.val;
3683 break;
3684 case DstDI:
3685 c->dst.type = OP_MEM;
3686 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3687 c->dst.addr.mem.ea =
3688 register_address(c, c->regs[VCPU_REGS_RDI]);
3689 c->dst.addr.mem.seg = VCPU_SREG_ES;
3690 c->dst.val = 0;
3691 break;
3692 case DstDX:
3693 c->dst.type = OP_REG;
3694 c->dst.bytes = 2;
3695 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
3696 fetch_register_operand(&c->dst);
3697 break;
3698 case ImplicitOps:
3699
3700 default:
3701 c->dst.type = OP_NONE;
3702 break;
3703 }
3704
3705done:
3706 if (memopp && memopp->type == OP_MEM && c->rip_relative)
3707 memopp->addr.mem.ea += c->eip;
3708
3709 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3710}
3711
3712static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
3713{
3714 struct decode_cache *c = &ctxt->decode;
3715
3716
3717
3718
3719
3720
3721
3722
3723 if (((c->b == 0xa6) || (c->b == 0xa7) ||
3724 (c->b == 0xae) || (c->b == 0xaf))
3725 && (((c->rep_prefix == REPE_PREFIX) &&
3726 ((ctxt->eflags & EFLG_ZF) == 0))
3727 || ((c->rep_prefix == REPNE_PREFIX) &&
3728 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3729 return true;
3730
3731 return false;
3732}
3733
3734int
3735x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3736{
3737 struct x86_emulate_ops *ops = ctxt->ops;
3738 u64 msr_data;
3739 struct decode_cache *c = &ctxt->decode;
3740 int rc = X86EMUL_CONTINUE;
3741 int saved_dst_type = c->dst.type;
3742 int irq;
3743
3744 ctxt->decode.mem_read.pos = 0;
3745
3746 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
3747 rc = emulate_ud(ctxt);
3748 goto done;
3749 }
3750
3751
3752 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
3753 rc = emulate_ud(ctxt);
3754 goto done;
3755 }
3756
3757 if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
3758 rc = emulate_ud(ctxt);
3759 goto done;
3760 }
3761
3762 if ((c->d & Sse)
3763 && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
3764 || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
3765 rc = emulate_ud(ctxt);
3766 goto done;
3767 }
3768
3769 if ((c->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
3770 rc = emulate_nm(ctxt);
3771 goto done;
3772 }
3773
3774 if (unlikely(ctxt->guest_mode) && c->intercept) {
3775 rc = emulator_check_intercept(ctxt, c->intercept,
3776 X86_ICPT_PRE_EXCEPT);
3777 if (rc != X86EMUL_CONTINUE)
3778 goto done;
3779 }
3780
3781
3782 if ((c->d & Priv) && ops->cpl(ctxt)) {
3783 rc = emulate_gp(ctxt, 0);
3784 goto done;
3785 }
3786
3787
3788 if ((c->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
3789 rc = emulate_ud(ctxt);
3790 goto done;
3791 }
3792
3793
3794 if (c->check_perm) {
3795 rc = c->check_perm(ctxt);
3796 if (rc != X86EMUL_CONTINUE)
3797 goto done;
3798 }
3799
3800 if (unlikely(ctxt->guest_mode) && c->intercept) {
3801 rc = emulator_check_intercept(ctxt, c->intercept,
3802 X86_ICPT_POST_EXCEPT);
3803 if (rc != X86EMUL_CONTINUE)
3804 goto done;
3805 }
3806
3807 if (c->rep_prefix && (c->d & String)) {
3808
3809 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
3810 ctxt->eip = c->eip;
3811 goto done;
3812 }
3813 }
3814
3815 if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
3816 rc = segmented_read(ctxt, c->src.addr.mem,
3817 c->src.valptr, c->src.bytes);
3818 if (rc != X86EMUL_CONTINUE)
3819 goto done;
3820 c->src.orig_val64 = c->src.val64;
3821 }
3822
3823 if (c->src2.type == OP_MEM) {
3824 rc = segmented_read(ctxt, c->src2.addr.mem,
3825 &c->src2.val, c->src2.bytes);
3826 if (rc != X86EMUL_CONTINUE)
3827 goto done;
3828 }
3829
3830 if ((c->d & DstMask) == ImplicitOps)
3831 goto special_insn;
3832
3833
3834 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
3835
3836 rc = segmented_read(ctxt, c->dst.addr.mem,
3837 &c->dst.val, c->dst.bytes);
3838 if (rc != X86EMUL_CONTINUE)
3839 goto done;
3840 }
3841 c->dst.orig_val = c->dst.val;
3842
3843special_insn:
3844
3845 if (unlikely(ctxt->guest_mode) && c->intercept) {
3846 rc = emulator_check_intercept(ctxt, c->intercept,
3847 X86_ICPT_POST_MEMACCESS);
3848 if (rc != X86EMUL_CONTINUE)
3849 goto done;
3850 }
3851
3852 if (c->execute) {
3853 rc = c->execute(ctxt);
3854 if (rc != X86EMUL_CONTINUE)
3855 goto done;
3856 goto writeback;
3857 }
3858
3859 if (c->twobyte)
3860 goto twobyte_insn;
3861
3862 switch (c->b) {
3863 case 0x06:
3864 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
3865 break;
3866 case 0x07:
3867 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
3868 break;
3869 case 0x0e:
3870 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
3871 break;
3872 case 0x16:
3873 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
3874 break;
3875 case 0x17:
3876 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
3877 break;
3878 case 0x1e:
3879 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
3880 break;
3881 case 0x1f:
3882 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
3883 break;
3884 case 0x40 ... 0x47:
3885 emulate_1op("inc", c->dst, ctxt->eflags);
3886 break;
3887 case 0x48 ... 0x4f:
3888 emulate_1op("dec", c->dst, ctxt->eflags);
3889 break;
3890 case 0x63:
3891 if (ctxt->mode != X86EMUL_MODE_PROT64)
3892 goto cannot_emulate;
3893 c->dst.val = (s32) c->src.val;
3894 break;
3895 case 0x6c:
3896 case 0x6d:
3897 c->src.val = c->regs[VCPU_REGS_RDX];
3898 goto do_io_in;
3899 case 0x6e:
3900 case 0x6f:
3901 c->dst.val = c->regs[VCPU_REGS_RDX];
3902 goto do_io_out;
3903 break;
3904 case 0x70 ... 0x7f:
3905 if (test_cc(c->b, ctxt->eflags))
3906 jmp_rel(c, c->src.val);
3907 break;
3908 case 0x84 ... 0x85:
3909 test:
3910 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
3911 break;
3912 case 0x86 ... 0x87:
3913 xchg:
3914
3915 c->src.val = c->dst.val;
3916 write_register_operand(&c->src);
3917
3918
3919
3920
3921 c->dst.val = c->src.orig_val;
3922 c->lock_prefix = 1;
3923 break;
3924 case 0x8c:
3925 if (c->modrm_reg > VCPU_SREG_GS) {
3926 rc = emulate_ud(ctxt);
3927 goto done;
3928 }
3929 c->dst.val = get_segment_selector(ctxt, c->modrm_reg);
3930 break;
3931 case 0x8d:
3932 c->dst.val = c->src.addr.mem.ea;
3933 break;
3934 case 0x8e: {
3935 uint16_t sel;
3936
3937 sel = c->src.val;
3938
3939 if (c->modrm_reg == VCPU_SREG_CS ||
3940 c->modrm_reg > VCPU_SREG_GS) {
3941 rc = emulate_ud(ctxt);
3942 goto done;
3943 }
3944
3945 if (c->modrm_reg == VCPU_SREG_SS)
3946 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3947
3948 rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
3949
3950 c->dst.type = OP_NONE;
3951 break;
3952 }
3953 case 0x8f:
3954 rc = em_grp1a(ctxt);
3955 break;
3956 case 0x90 ... 0x97:
3957 if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
3958 break;
3959 goto xchg;
3960 case 0x98:
3961 switch (c->op_bytes) {
3962 case 2: c->dst.val = (s8)c->dst.val; break;
3963 case 4: c->dst.val = (s16)c->dst.val; break;
3964 case 8: c->dst.val = (s32)c->dst.val; break;
3965 }
3966 break;
3967 case 0xa8 ... 0xa9:
3968 goto test;
3969 case 0xc0 ... 0xc1:
3970 rc = em_grp2(ctxt);
3971 break;
3972 case 0xc3:
3973 c->dst.type = OP_REG;
3974 c->dst.addr.reg = &c->eip;
3975 c->dst.bytes = c->op_bytes;
3976 rc = em_pop(ctxt);
3977 break;
3978 case 0xc4:
3979 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES);
3980 break;
3981 case 0xc5:
3982 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS);
3983 break;
3984 case 0xcb:
3985 rc = emulate_ret_far(ctxt, ops);
3986 break;
3987 case 0xcc:
3988 irq = 3;
3989 goto do_interrupt;
3990 case 0xcd:
3991 irq = c->src.val;
3992 do_interrupt:
3993 rc = emulate_int(ctxt, ops, irq);
3994 break;
3995 case 0xce:
3996 if (ctxt->eflags & EFLG_OF) {
3997 irq = 4;
3998 goto do_interrupt;
3999 }
4000 break;
4001 case 0xcf:
4002 rc = emulate_iret(ctxt, ops);
4003 break;
4004 case 0xd0 ... 0xd1:
4005 rc = em_grp2(ctxt);
4006 break;
4007 case 0xd2 ... 0xd3:
4008 c->src.val = c->regs[VCPU_REGS_RCX];
4009 rc = em_grp2(ctxt);
4010 break;
4011 case 0xe0 ... 0xe2:
4012 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
4013 if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 &&
4014 (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags)))
4015 jmp_rel(c, c->src.val);
4016 break;
4017 case 0xe3:
4018 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0)
4019 jmp_rel(c, c->src.val);
4020 break;
4021 case 0xe4:
4022 case 0xe5:
4023 goto do_io_in;
4024 case 0xe6:
4025 case 0xe7:
4026 goto do_io_out;
4027 case 0xe8: {
4028 long int rel = c->src.val;
4029 c->src.val = (unsigned long) c->eip;
4030 jmp_rel(c, rel);
4031 rc = em_push(ctxt);
4032 break;
4033 }
4034 case 0xe9:
4035 goto jmp;
4036 case 0xea:
4037 rc = em_jmp_far(ctxt);
4038 break;
4039 case 0xeb:
4040 jmp:
4041 jmp_rel(c, c->src.val);
4042 c->dst.type = OP_NONE;
4043 break;
4044 case 0xec:
4045 case 0xed:
4046 do_io_in:
4047 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
4048 &c->dst.val))
4049 goto done;
4050 break;
4051 case 0xee:
4052 case 0xef:
4053 do_io_out:
4054 ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val,
4055 &c->src.val, 1);
4056 c->dst.type = OP_NONE;
4057 break;
4058 case 0xf4:
4059 ctxt->ops->halt(ctxt);
4060 break;
4061 case 0xf5:
4062
4063 ctxt->eflags ^= EFLG_CF;
4064 break;
4065 case 0xf6 ... 0xf7:
4066 rc = em_grp3(ctxt);
4067 break;
4068 case 0xf8:
4069 ctxt->eflags &= ~EFLG_CF;
4070 break;
4071 case 0xf9:
4072 ctxt->eflags |= EFLG_CF;
4073 break;
4074 case 0xfa:
4075 if (emulator_bad_iopl(ctxt, ops)) {
4076 rc = emulate_gp(ctxt, 0);
4077 goto done;
4078 } else
4079 ctxt->eflags &= ~X86_EFLAGS_IF;
4080 break;
4081 case 0xfb:
4082 if (emulator_bad_iopl(ctxt, ops)) {
4083 rc = emulate_gp(ctxt, 0);
4084 goto done;
4085 } else {
4086 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
4087 ctxt->eflags |= X86_EFLAGS_IF;
4088 }
4089 break;
4090 case 0xfc:
4091 ctxt->eflags &= ~EFLG_DF;
4092 break;
4093 case 0xfd:
4094 ctxt->eflags |= EFLG_DF;
4095 break;
4096 case 0xfe:
4097 rc = em_grp45(ctxt);
4098 break;
4099 case 0xff:
4100 rc = em_grp45(ctxt);
4101 break;
4102 default:
4103 goto cannot_emulate;
4104 }
4105
4106 if (rc != X86EMUL_CONTINUE)
4107 goto done;
4108
4109writeback:
4110 rc = writeback(ctxt);
4111 if (rc != X86EMUL_CONTINUE)
4112 goto done;
4113
4114
4115
4116
4117
4118 c->dst.type = saved_dst_type;
4119
4120 if ((c->d & SrcMask) == SrcSI)
4121 string_addr_inc(ctxt, seg_override(ctxt, c),
4122 VCPU_REGS_RSI, &c->src);
4123
4124 if ((c->d & DstMask) == DstDI)
4125 string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
4126 &c->dst);
4127
4128 if (c->rep_prefix && (c->d & String)) {
4129 struct read_cache *r = &ctxt->decode.io_read;
4130 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
4131
4132 if (!string_insn_completed(ctxt)) {
4133
4134
4135
4136
4137 if ((r->end != 0 || c->regs[VCPU_REGS_RCX] & 0x3ff) &&
4138 (r->end == 0 || r->end != r->pos)) {
4139
4140
4141
4142
4143
4144 ctxt->decode.mem_read.end = 0;
4145 return EMULATION_RESTART;
4146 }
4147 goto done;
4148 }
4149 }
4150
4151 ctxt->eip = c->eip;
4152
4153done:
4154 if (rc == X86EMUL_PROPAGATE_FAULT)
4155 ctxt->have_exception = true;
4156 if (rc == X86EMUL_INTERCEPTED)
4157 return EMULATION_INTERCEPTED;
4158
4159 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4160
4161twobyte_insn:
4162 switch (c->b) {
4163 case 0x05:
4164 rc = emulate_syscall(ctxt, ops);
4165 break;
4166 case 0x06:
4167 rc = em_clts(ctxt);
4168 break;
4169 case 0x09:
4170 (ctxt->ops->wbinvd)(ctxt);
4171 break;
4172 case 0x08:
4173 case 0x0d:
4174 case 0x18:
4175 break;
4176 case 0x20:
4177 c->dst.val = ops->get_cr(ctxt, c->modrm_reg);
4178 break;
4179 case 0x21:
4180 ops->get_dr(ctxt, c->modrm_reg, &c->dst.val);
4181 break;
4182 case 0x22:
4183 if (ops->set_cr(ctxt, c->modrm_reg, c->src.val)) {
4184 emulate_gp(ctxt, 0);
4185 rc = X86EMUL_PROPAGATE_FAULT;
4186 goto done;
4187 }
4188 c->dst.type = OP_NONE;
4189 break;
4190 case 0x23:
4191 if (ops->set_dr(ctxt, c->modrm_reg, c->src.val &
4192 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
4193 ~0ULL : ~0U)) < 0) {
4194
4195 emulate_gp(ctxt, 0);
4196 rc = X86EMUL_PROPAGATE_FAULT;
4197 goto done;
4198 }
4199
4200 c->dst.type = OP_NONE;
4201 break;
4202 case 0x30:
4203
4204 msr_data = (u32)c->regs[VCPU_REGS_RAX]
4205 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
4206 if (ops->set_msr(ctxt, c->regs[VCPU_REGS_RCX], msr_data)) {
4207 emulate_gp(ctxt, 0);
4208 rc = X86EMUL_PROPAGATE_FAULT;
4209 goto done;
4210 }
4211 rc = X86EMUL_CONTINUE;
4212 break;
4213 case 0x32:
4214
4215 if (ops->get_msr(ctxt, c->regs[VCPU_REGS_RCX], &msr_data)) {
4216 emulate_gp(ctxt, 0);
4217 rc = X86EMUL_PROPAGATE_FAULT;
4218 goto done;
4219 } else {
4220 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
4221 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
4222 }
4223 rc = X86EMUL_CONTINUE;
4224 break;
4225 case 0x34:
4226 rc = emulate_sysenter(ctxt, ops);
4227 break;
4228 case 0x35:
4229 rc = emulate_sysexit(ctxt, ops);
4230 break;
4231 case 0x40 ... 0x4f:
4232 c->dst.val = c->dst.orig_val = c->src.val;
4233 if (!test_cc(c->b, ctxt->eflags))
4234 c->dst.type = OP_NONE;
4235 break;
4236 case 0x80 ... 0x8f:
4237 if (test_cc(c->b, ctxt->eflags))
4238 jmp_rel(c, c->src.val);
4239 break;
4240 case 0x90 ... 0x9f:
4241 c->dst.val = test_cc(c->b, ctxt->eflags);
4242 break;
4243 case 0xa0:
4244 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
4245 break;
4246 case 0xa1:
4247 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
4248 break;
4249 case 0xa3:
4250 bt:
4251 c->dst.type = OP_NONE;
4252
4253 c->src.val &= (c->dst.bytes << 3) - 1;
4254 emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
4255 break;
4256 case 0xa4:
4257 case 0xa5:
4258 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
4259 break;
4260 case 0xa8:
4261 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
4262 break;
4263 case 0xa9:
4264 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
4265 break;
4266 case 0xab:
4267 bts:
4268 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
4269 break;
4270 case 0xac:
4271 case 0xad:
4272 emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
4273 break;
4274 case 0xae:
4275 break;
4276 case 0xb0 ... 0xb1:
4277
4278
4279
4280
4281 c->src.orig_val = c->src.val;
4282 c->src.val = c->regs[VCPU_REGS_RAX];
4283 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
4284 if (ctxt->eflags & EFLG_ZF) {
4285
4286 c->dst.val = c->src.orig_val;
4287 } else {
4288
4289 c->dst.type = OP_REG;
4290 c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
4291 }
4292 break;
4293 case 0xb2:
4294 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS);
4295 break;
4296 case 0xb3:
4297 btr:
4298 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
4299 break;
4300 case 0xb4:
4301 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS);
4302 break;
4303 case 0xb5:
4304 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS);
4305 break;
4306 case 0xb6 ... 0xb7:
4307 c->dst.bytes = c->op_bytes;
4308 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
4309 : (u16) c->src.val;
4310 break;
4311 case 0xba:
4312 switch (c->modrm_reg & 3) {
4313 case 0:
4314 goto bt;
4315 case 1:
4316 goto bts;
4317 case 2:
4318 goto btr;
4319 case 3:
4320 goto btc;
4321 }
4322 break;
4323 case 0xbb:
4324 btc:
4325 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
4326 break;
4327 case 0xbc: {
4328 u8 zf;
4329 __asm__ ("bsf %2, %0; setz %1"
4330 : "=r"(c->dst.val), "=q"(zf)
4331 : "r"(c->src.val));
4332 ctxt->eflags &= ~X86_EFLAGS_ZF;
4333 if (zf) {
4334 ctxt->eflags |= X86_EFLAGS_ZF;
4335 c->dst.type = OP_NONE;
4336 }
4337 break;
4338 }
4339 case 0xbd: {
4340 u8 zf;
4341 __asm__ ("bsr %2, %0; setz %1"
4342 : "=r"(c->dst.val), "=q"(zf)
4343 : "r"(c->src.val));
4344 ctxt->eflags &= ~X86_EFLAGS_ZF;
4345 if (zf) {
4346 ctxt->eflags |= X86_EFLAGS_ZF;
4347 c->dst.type = OP_NONE;
4348 }
4349 break;
4350 }
4351 case 0xbe ... 0xbf:
4352 c->dst.bytes = c->op_bytes;
4353 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
4354 (s16) c->src.val;
4355 break;
4356 case 0xc0 ... 0xc1:
4357 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
4358
4359 c->src.val = c->dst.orig_val;
4360 write_register_operand(&c->src);
4361 break;
4362 case 0xc3:
4363 c->dst.bytes = c->op_bytes;
4364 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
4365 (u64) c->src.val;
4366 break;
4367 case 0xc7:
4368 rc = em_grp9(ctxt);
4369 break;
4370 default:
4371 goto cannot_emulate;
4372 }
4373
4374 if (rc != X86EMUL_CONTINUE)
4375 goto done;
4376
4377 goto writeback;
4378
4379cannot_emulate:
4380 return EMULATION_FAILED;
4381}
4382