1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kvm_host.h>
24#include "kvm_cache_regs.h"
25#include <linux/module.h>
26#include <asm/kvm_emulate.h>
27#include <linux/stringify.h>
28
29#include "x86.h"
30#include "tss.h"
31
32
33
34
35#define OpNone 0ull
36#define OpImplicit 1ull
37#define OpReg 2ull
38#define OpMem 3ull
39#define OpAcc 4ull
40#define OpDI 5ull
41#define OpMem64 6ull
42#define OpImmUByte 7ull
43#define OpDX 8ull
44#define OpCL 9ull
45#define OpImmByte 10ull
46#define OpOne 11ull
47#define OpImm 12ull
48#define OpMem16 13ull
49#define OpMem32 14ull
50#define OpImmU 15ull
51#define OpSI 16ull
52#define OpImmFAddr 17ull
53#define OpMemFAddr 18ull
54#define OpImmU16 19ull
55#define OpES 20ull
56#define OpCS 21ull
57#define OpSS 22ull
58#define OpDS 23ull
59#define OpFS 24ull
60#define OpGS 25ull
61#define OpMem8 26ull
62#define OpImm64 27ull
63#define OpXLat 28ull
64#define OpAccLo 29ull
65#define OpAccHi 30ull
66
67#define OpBits 5
68#define OpMask ((1ull << OpBits) - 1)
69
70
71
72
73
74
75
76
77
78
79
80#define ByteOp (1<<0)
81
82#define DstShift 1
83#define ImplicitOps (OpImplicit << DstShift)
84#define DstReg (OpReg << DstShift)
85#define DstMem (OpMem << DstShift)
86#define DstAcc (OpAcc << DstShift)
87#define DstDI (OpDI << DstShift)
88#define DstMem64 (OpMem64 << DstShift)
89#define DstImmUByte (OpImmUByte << DstShift)
90#define DstDX (OpDX << DstShift)
91#define DstAccLo (OpAccLo << DstShift)
92#define DstMask (OpMask << DstShift)
93
94#define SrcShift 6
95#define SrcNone (OpNone << SrcShift)
96#define SrcReg (OpReg << SrcShift)
97#define SrcMem (OpMem << SrcShift)
98#define SrcMem16 (OpMem16 << SrcShift)
99#define SrcMem32 (OpMem32 << SrcShift)
100#define SrcImm (OpImm << SrcShift)
101#define SrcImmByte (OpImmByte << SrcShift)
102#define SrcOne (OpOne << SrcShift)
103#define SrcImmUByte (OpImmUByte << SrcShift)
104#define SrcImmU (OpImmU << SrcShift)
105#define SrcSI (OpSI << SrcShift)
106#define SrcXLat (OpXLat << SrcShift)
107#define SrcImmFAddr (OpImmFAddr << SrcShift)
108#define SrcMemFAddr (OpMemFAddr << SrcShift)
109#define SrcAcc (OpAcc << SrcShift)
110#define SrcImmU16 (OpImmU16 << SrcShift)
111#define SrcImm64 (OpImm64 << SrcShift)
112#define SrcDX (OpDX << SrcShift)
113#define SrcMem8 (OpMem8 << SrcShift)
114#define SrcAccHi (OpAccHi << SrcShift)
115#define SrcMask (OpMask << SrcShift)
116#define BitOp (1<<11)
117#define MemAbs (1<<12)
118#define String (1<<13)
119#define Stack (1<<14)
120#define GroupMask (7<<15)
121#define Group (1<<15)
122#define GroupDual (2<<15)
123#define Prefix (3<<15)
124#define RMExt (4<<15)
125#define Escape (5<<15)
126#define Sse (1<<18)
127
128#define ModRM (1<<19)
129
130#define Mov (1<<20)
131
132#define Prot (1<<21)
133#define EmulateOnUD (1<<22)
134#define NoAccess (1<<23)
135#define Op3264 (1<<24)
136#define Undefined (1<<25)
137#define Lock (1<<26)
138#define Priv (1<<27)
139#define No64 (1<<28)
140#define PageTable (1 << 29)
141#define NotImpl (1 << 30)
142
143#define Src2Shift (31)
144#define Src2None (OpNone << Src2Shift)
145#define Src2Mem (OpMem << Src2Shift)
146#define Src2CL (OpCL << Src2Shift)
147#define Src2ImmByte (OpImmByte << Src2Shift)
148#define Src2One (OpOne << Src2Shift)
149#define Src2Imm (OpImm << Src2Shift)
150#define Src2ES (OpES << Src2Shift)
151#define Src2CS (OpCS << Src2Shift)
152#define Src2SS (OpSS << Src2Shift)
153#define Src2DS (OpDS << Src2Shift)
154#define Src2FS (OpFS << Src2Shift)
155#define Src2GS (OpGS << Src2Shift)
156#define Src2Mask (OpMask << Src2Shift)
157#define Mmx ((u64)1 << 40)
158#define Aligned ((u64)1 << 41)
159#define Unaligned ((u64)1 << 42)
160#define Avx ((u64)1 << 43)
161#define Fastop ((u64)1 << 44)
162#define NoWrite ((u64)1 << 45)
163#define SrcWrite ((u64)1 << 46)
164
165#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
166
167#define X2(x...) x, x
168#define X3(x...) X2(x), x
169#define X4(x...) X2(x), X2(x)
170#define X5(x...) X4(x), x
171#define X6(x...) X4(x), X2(x)
172#define X7(x...) X4(x), X3(x)
173#define X8(x...) X4(x), X4(x)
174#define X16(x...) X8(x), X8(x)
175
176#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
177#define FASTOP_SIZE 8
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196struct fastop;
197
198struct opcode {
199 u64 flags : 56;
200 u64 intercept : 8;
201 union {
202 int (*execute)(struct x86_emulate_ctxt *ctxt);
203 const struct opcode *group;
204 const struct group_dual *gdual;
205 const struct gprefix *gprefix;
206 const struct escape *esc;
207 void (*fastop)(struct fastop *fake);
208 } u;
209 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
210};
211
212struct group_dual {
213 struct opcode mod012[8];
214 struct opcode mod3[8];
215};
216
217struct gprefix {
218 struct opcode pfx_no;
219 struct opcode pfx_66;
220 struct opcode pfx_f2;
221 struct opcode pfx_f3;
222};
223
224struct escape {
225 struct opcode op[8];
226 struct opcode high[64];
227};
228
229
230#define EFLG_ID (1<<21)
231#define EFLG_VIP (1<<20)
232#define EFLG_VIF (1<<19)
233#define EFLG_AC (1<<18)
234#define EFLG_VM (1<<17)
235#define EFLG_RF (1<<16)
236#define EFLG_IOPL (3<<12)
237#define EFLG_NT (1<<14)
238#define EFLG_OF (1<<11)
239#define EFLG_DF (1<<10)
240#define EFLG_IF (1<<9)
241#define EFLG_TF (1<<8)
242#define EFLG_SF (1<<7)
243#define EFLG_ZF (1<<6)
244#define EFLG_AF (1<<4)
245#define EFLG_PF (1<<2)
246#define EFLG_CF (1<<0)
247
248#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
249#define EFLG_RESERVED_ONE_MASK 2
250
251static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
252{
253 if (!(ctxt->regs_valid & (1 << nr))) {
254 ctxt->regs_valid |= 1 << nr;
255 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
256 }
257 return ctxt->_regs[nr];
258}
259
260static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
261{
262 ctxt->regs_valid |= 1 << nr;
263 ctxt->regs_dirty |= 1 << nr;
264 return &ctxt->_regs[nr];
265}
266
267static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
268{
269 reg_read(ctxt, nr);
270 return reg_write(ctxt, nr);
271}
272
273static void writeback_registers(struct x86_emulate_ctxt *ctxt)
274{
275 unsigned reg;
276
277 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
278 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
279}
280
281static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
282{
283 ctxt->regs_dirty = 0;
284 ctxt->regs_valid = 0;
285}
286
287
288
289
290
291#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
292
293#ifdef CONFIG_X86_64
294#define ON64(x) x
295#else
296#define ON64(x)
297#endif
298
299static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
300
301#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
302#define FOP_RET "ret \n\t"
303
304#define FOP_START(op) \
305 extern void em_##op(struct fastop *fake); \
306 asm(".pushsection .text, \"ax\" \n\t" \
307 ".global em_" #op " \n\t" \
308 FOP_ALIGN \
309 "em_" #op ": \n\t"
310
311#define FOP_END \
312 ".popsection")
313
314#define FOPNOP() FOP_ALIGN FOP_RET
315
316#define FOP1E(op, dst) \
317 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
318
319#define FOP1EEX(op, dst) \
320 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
321
322#define FASTOP1(op) \
323 FOP_START(op) \
324 FOP1E(op##b, al) \
325 FOP1E(op##w, ax) \
326 FOP1E(op##l, eax) \
327 ON64(FOP1E(op##q, rax)) \
328 FOP_END
329
330
331#define FASTOP1SRC2(op, name) \
332 FOP_START(name) \
333 FOP1E(op, cl) \
334 FOP1E(op, cx) \
335 FOP1E(op, ecx) \
336 ON64(FOP1E(op, rcx)) \
337 FOP_END
338
339
340#define FASTOP1SRC2EX(op, name) \
341 FOP_START(name) \
342 FOP1EEX(op, cl) \
343 FOP1EEX(op, cx) \
344 FOP1EEX(op, ecx) \
345 ON64(FOP1EEX(op, rcx)) \
346 FOP_END
347
348#define FOP2E(op, dst, src) \
349 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
350
351#define FASTOP2(op) \
352 FOP_START(op) \
353 FOP2E(op##b, al, dl) \
354 FOP2E(op##w, ax, dx) \
355 FOP2E(op##l, eax, edx) \
356 ON64(FOP2E(op##q, rax, rdx)) \
357 FOP_END
358
359
360#define FASTOP2W(op) \
361 FOP_START(op) \
362 FOPNOP() \
363 FOP2E(op##w, ax, dx) \
364 FOP2E(op##l, eax, edx) \
365 ON64(FOP2E(op##q, rax, rdx)) \
366 FOP_END
367
368
369#define FASTOP2CL(op) \
370 FOP_START(op) \
371 FOP2E(op##b, al, cl) \
372 FOP2E(op##w, ax, cl) \
373 FOP2E(op##l, eax, cl) \
374 ON64(FOP2E(op##q, rax, cl)) \
375 FOP_END
376
377#define FOP3E(op, dst, src, src2) \
378 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
379
380
381#define FASTOP3WCL(op) \
382 FOP_START(op) \
383 FOPNOP() \
384 FOP3E(op##w, ax, dx, cl) \
385 FOP3E(op##l, eax, edx, cl) \
386 ON64(FOP3E(op##q, rax, rdx, cl)) \
387 FOP_END
388
389
390#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
391
392asm(".global kvm_fastop_exception \n"
393 "kvm_fastop_exception: xor %esi, %esi; ret");
394
395FOP_START(setcc)
396FOP_SETCC(seto)
397FOP_SETCC(setno)
398FOP_SETCC(setc)
399FOP_SETCC(setnc)
400FOP_SETCC(setz)
401FOP_SETCC(setnz)
402FOP_SETCC(setbe)
403FOP_SETCC(setnbe)
404FOP_SETCC(sets)
405FOP_SETCC(setns)
406FOP_SETCC(setp)
407FOP_SETCC(setnp)
408FOP_SETCC(setl)
409FOP_SETCC(setnl)
410FOP_SETCC(setle)
411FOP_SETCC(setnle)
412FOP_END;
413
414FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
415FOP_END;
416
417static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
418 enum x86_intercept intercept,
419 enum x86_intercept_stage stage)
420{
421 struct x86_instruction_info info = {
422 .intercept = intercept,
423 .rep_prefix = ctxt->rep_prefix,
424 .modrm_mod = ctxt->modrm_mod,
425 .modrm_reg = ctxt->modrm_reg,
426 .modrm_rm = ctxt->modrm_rm,
427 .src_val = ctxt->src.val64,
428 .src_bytes = ctxt->src.bytes,
429 .dst_bytes = ctxt->dst.bytes,
430 .ad_bytes = ctxt->ad_bytes,
431 .next_rip = ctxt->eip,
432 };
433
434 return ctxt->ops->intercept(ctxt, &info, stage);
435}
436
437static void assign_masked(ulong *dest, ulong src, ulong mask)
438{
439 *dest = (*dest & ~mask) | (src & mask);
440}
441
442static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
443{
444 return (1UL << (ctxt->ad_bytes << 3)) - 1;
445}
446
447static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
448{
449 u16 sel;
450 struct desc_struct ss;
451
452 if (ctxt->mode == X86EMUL_MODE_PROT64)
453 return ~0UL;
454 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
455 return ~0U >> ((ss.d ^ 1) * 16);
456}
457
458static int stack_size(struct x86_emulate_ctxt *ctxt)
459{
460 return (__fls(stack_mask(ctxt)) + 1) >> 3;
461}
462
463
464static inline unsigned long
465address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
466{
467 if (ctxt->ad_bytes == sizeof(unsigned long))
468 return reg;
469 else
470 return reg & ad_mask(ctxt);
471}
472
473static inline unsigned long
474register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
475{
476 return address_mask(ctxt, reg);
477}
478
479static void masked_increment(ulong *reg, ulong mask, int inc)
480{
481 assign_masked(reg, *reg + inc, mask);
482}
483
484static inline void
485register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
486{
487 ulong mask;
488
489 if (ctxt->ad_bytes == sizeof(unsigned long))
490 mask = ~0UL;
491 else
492 mask = ad_mask(ctxt);
493 masked_increment(reg, mask, inc);
494}
495
496static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
497{
498 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
499}
500
501static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
502{
503 register_address_increment(ctxt, &ctxt->_eip, rel);
504}
505
506static u32 desc_limit_scaled(struct desc_struct *desc)
507{
508 u32 limit = get_desc_limit(desc);
509
510 return desc->g ? (limit << 12) | 0xfff : limit;
511}
512
513static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
514{
515 ctxt->has_seg_override = true;
516 ctxt->seg_override = seg;
517}
518
519static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
520{
521 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
522 return 0;
523
524 return ctxt->ops->get_cached_segment_base(ctxt, seg);
525}
526
527static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
528{
529 if (!ctxt->has_seg_override)
530 return 0;
531
532 return ctxt->seg_override;
533}
534
535static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
536 u32 error, bool valid)
537{
538 ctxt->exception.vector = vec;
539 ctxt->exception.error_code = error;
540 ctxt->exception.error_code_valid = valid;
541 return X86EMUL_PROPAGATE_FAULT;
542}
543
544static int emulate_db(struct x86_emulate_ctxt *ctxt)
545{
546 return emulate_exception(ctxt, DB_VECTOR, 0, false);
547}
548
549static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
550{
551 return emulate_exception(ctxt, GP_VECTOR, err, true);
552}
553
554static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
555{
556 return emulate_exception(ctxt, SS_VECTOR, err, true);
557}
558
559static int emulate_ud(struct x86_emulate_ctxt *ctxt)
560{
561 return emulate_exception(ctxt, UD_VECTOR, 0, false);
562}
563
564static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
565{
566 return emulate_exception(ctxt, TS_VECTOR, err, true);
567}
568
569static int emulate_de(struct x86_emulate_ctxt *ctxt)
570{
571 return emulate_exception(ctxt, DE_VECTOR, 0, false);
572}
573
574static int emulate_nm(struct x86_emulate_ctxt *ctxt)
575{
576 return emulate_exception(ctxt, NM_VECTOR, 0, false);
577}
578
579static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
580{
581 u16 selector;
582 struct desc_struct desc;
583
584 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
585 return selector;
586}
587
588static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
589 unsigned seg)
590{
591 u16 dummy;
592 u32 base3;
593 struct desc_struct desc;
594
595 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
596 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
597}
598
599
600
601
602
603
604
605
606
607static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
608{
609 if (likely(size < 16))
610 return false;
611
612 if (ctxt->d & Aligned)
613 return true;
614 else if (ctxt->d & Unaligned)
615 return false;
616 else if (ctxt->d & Avx)
617 return false;
618 else
619 return true;
620}
621
622static int __linearize(struct x86_emulate_ctxt *ctxt,
623 struct segmented_address addr,
624 unsigned size, bool write, bool fetch,
625 ulong *linear)
626{
627 struct desc_struct desc;
628 bool usable;
629 ulong la;
630 u32 lim;
631 u16 sel;
632 unsigned cpl;
633
634 la = seg_base(ctxt, addr.seg) + addr.ea;
635 switch (ctxt->mode) {
636 case X86EMUL_MODE_PROT64:
637 if (((signed long)la << 16) >> 16 != la)
638 return emulate_gp(ctxt, 0);
639 break;
640 default:
641 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
642 addr.seg);
643 if (!usable)
644 goto bad;
645
646 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
647 || !(desc.type & 2)) && write)
648 goto bad;
649
650 if (!fetch && (desc.type & 8) && !(desc.type & 2))
651 goto bad;
652 lim = desc_limit_scaled(&desc);
653 if ((desc.type & 8) || !(desc.type & 4)) {
654
655 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
656 goto bad;
657 } else {
658
659 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
660 goto bad;
661 lim = desc.d ? 0xffffffff : 0xffff;
662 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
663 goto bad;
664 }
665 cpl = ctxt->ops->cpl(ctxt);
666 if (!(desc.type & 8)) {
667
668 if (cpl > desc.dpl)
669 goto bad;
670 } else if ((desc.type & 8) && !(desc.type & 4)) {
671
672 if (cpl != desc.dpl)
673 goto bad;
674 } else if ((desc.type & 8) && (desc.type & 4)) {
675
676 if (cpl < desc.dpl)
677 goto bad;
678 }
679 break;
680 }
681 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
682 la &= (u32)-1;
683 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
684 return emulate_gp(ctxt, 0);
685 *linear = la;
686 return X86EMUL_CONTINUE;
687bad:
688 if (addr.seg == VCPU_SREG_SS)
689 return emulate_ss(ctxt, sel);
690 else
691 return emulate_gp(ctxt, sel);
692}
693
694static int linearize(struct x86_emulate_ctxt *ctxt,
695 struct segmented_address addr,
696 unsigned size, bool write,
697 ulong *linear)
698{
699 return __linearize(ctxt, addr, size, write, false, linear);
700}
701
702
703static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
704 struct segmented_address addr,
705 void *data,
706 unsigned size)
707{
708 int rc;
709 ulong linear;
710
711 rc = linearize(ctxt, addr, size, false, &linear);
712 if (rc != X86EMUL_CONTINUE)
713 return rc;
714 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
715}
716
717
718
719
720
721
722
723
724static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
725{
726 struct fetch_cache *fc = &ctxt->fetch;
727 int rc;
728 int size, cur_size;
729
730 if (ctxt->_eip == fc->end) {
731 unsigned long linear;
732 struct segmented_address addr = { .seg = VCPU_SREG_CS,
733 .ea = ctxt->_eip };
734 cur_size = fc->end - fc->start;
735 size = min(15UL - cur_size,
736 PAGE_SIZE - offset_in_page(ctxt->_eip));
737 rc = __linearize(ctxt, addr, size, false, true, &linear);
738 if (unlikely(rc != X86EMUL_CONTINUE))
739 return rc;
740 rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
741 size, &ctxt->exception);
742 if (unlikely(rc != X86EMUL_CONTINUE))
743 return rc;
744 fc->end += size;
745 }
746 *dest = fc->data[ctxt->_eip - fc->start];
747 ctxt->_eip++;
748 return X86EMUL_CONTINUE;
749}
750
751static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
752 void *dest, unsigned size)
753{
754 int rc;
755
756
757 if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
758 return X86EMUL_UNHANDLEABLE;
759 while (size--) {
760 rc = do_insn_fetch_byte(ctxt, dest++);
761 if (rc != X86EMUL_CONTINUE)
762 return rc;
763 }
764 return X86EMUL_CONTINUE;
765}
766
767
768#define insn_fetch(_type, _ctxt) \
769({ unsigned long _x; \
770 rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \
771 if (rc != X86EMUL_CONTINUE) \
772 goto done; \
773 (_type)_x; \
774})
775
776#define insn_fetch_arr(_arr, _size, _ctxt) \
777({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \
778 if (rc != X86EMUL_CONTINUE) \
779 goto done; \
780})
781
782
783
784
785
786
787static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
788 int byteop)
789{
790 void *p;
791 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
792
793 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
794 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
795 else
796 p = reg_rmw(ctxt, modrm_reg);
797 return p;
798}
799
800static int read_descriptor(struct x86_emulate_ctxt *ctxt,
801 struct segmented_address addr,
802 u16 *size, unsigned long *address, int op_bytes)
803{
804 int rc;
805
806 if (op_bytes == 2)
807 op_bytes = 3;
808 *address = 0;
809 rc = segmented_read_std(ctxt, addr, size, 2);
810 if (rc != X86EMUL_CONTINUE)
811 return rc;
812 addr.ea += 2;
813 rc = segmented_read_std(ctxt, addr, address, op_bytes);
814 return rc;
815}
816
817FASTOP2(add);
818FASTOP2(or);
819FASTOP2(adc);
820FASTOP2(sbb);
821FASTOP2(and);
822FASTOP2(sub);
823FASTOP2(xor);
824FASTOP2(cmp);
825FASTOP2(test);
826
827FASTOP1SRC2(mul, mul_ex);
828FASTOP1SRC2(imul, imul_ex);
829FASTOP1SRC2EX(div, div_ex);
830FASTOP1SRC2EX(idiv, idiv_ex);
831
832FASTOP3WCL(shld);
833FASTOP3WCL(shrd);
834
835FASTOP2W(imul);
836
837FASTOP1(not);
838FASTOP1(neg);
839FASTOP1(inc);
840FASTOP1(dec);
841
842FASTOP2CL(rol);
843FASTOP2CL(ror);
844FASTOP2CL(rcl);
845FASTOP2CL(rcr);
846FASTOP2CL(shl);
847FASTOP2CL(shr);
848FASTOP2CL(sar);
849
850FASTOP2W(bsf);
851FASTOP2W(bsr);
852FASTOP2W(bt);
853FASTOP2W(bts);
854FASTOP2W(btr);
855FASTOP2W(btc);
856
857FASTOP2(xadd);
858
859static u8 test_cc(unsigned int condition, unsigned long flags)
860{
861 u8 rc;
862 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
863
864 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
865 asm("push %[flags]; popf; call *%[fastop]"
866 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
867 return rc;
868}
869
870static void fetch_register_operand(struct operand *op)
871{
872 switch (op->bytes) {
873 case 1:
874 op->val = *(u8 *)op->addr.reg;
875 break;
876 case 2:
877 op->val = *(u16 *)op->addr.reg;
878 break;
879 case 4:
880 op->val = *(u32 *)op->addr.reg;
881 break;
882 case 8:
883 op->val = *(u64 *)op->addr.reg;
884 break;
885 }
886}
887
888static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
889{
890 ctxt->ops->get_fpu(ctxt);
891 switch (reg) {
892 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
893 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
894 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
895 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
896 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
897 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
898 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
899 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
900#ifdef CONFIG_X86_64
901 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
902 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
903 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
904 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
905 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
906 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
907 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
908 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
909#endif
910 default: BUG();
911 }
912 ctxt->ops->put_fpu(ctxt);
913}
914
915static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
916 int reg)
917{
918 ctxt->ops->get_fpu(ctxt);
919 switch (reg) {
920 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
921 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
922 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
923 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
924 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
925 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
926 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
927 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
928#ifdef CONFIG_X86_64
929 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
930 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
931 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
932 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
933 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
934 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
935 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
936 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
937#endif
938 default: BUG();
939 }
940 ctxt->ops->put_fpu(ctxt);
941}
942
943static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
944{
945 ctxt->ops->get_fpu(ctxt);
946 switch (reg) {
947 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
948 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
949 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
950 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
951 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
952 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
953 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
954 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
955 default: BUG();
956 }
957 ctxt->ops->put_fpu(ctxt);
958}
959
960static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
961{
962 ctxt->ops->get_fpu(ctxt);
963 switch (reg) {
964 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
965 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
966 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
967 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
968 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
969 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
970 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
971 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
972 default: BUG();
973 }
974 ctxt->ops->put_fpu(ctxt);
975}
976
977static int em_fninit(struct x86_emulate_ctxt *ctxt)
978{
979 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
980 return emulate_nm(ctxt);
981
982 ctxt->ops->get_fpu(ctxt);
983 asm volatile("fninit");
984 ctxt->ops->put_fpu(ctxt);
985 return X86EMUL_CONTINUE;
986}
987
988static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
989{
990 u16 fcw;
991
992 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
993 return emulate_nm(ctxt);
994
995 ctxt->ops->get_fpu(ctxt);
996 asm volatile("fnstcw %0": "+m"(fcw));
997 ctxt->ops->put_fpu(ctxt);
998
999
1000 ctxt->dst.bytes = 2;
1001 ctxt->dst.val = fcw;
1002
1003 return X86EMUL_CONTINUE;
1004}
1005
1006static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1007{
1008 u16 fsw;
1009
1010 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1011 return emulate_nm(ctxt);
1012
1013 ctxt->ops->get_fpu(ctxt);
1014 asm volatile("fnstsw %0": "+m"(fsw));
1015 ctxt->ops->put_fpu(ctxt);
1016
1017
1018 ctxt->dst.bytes = 2;
1019 ctxt->dst.val = fsw;
1020
1021 return X86EMUL_CONTINUE;
1022}
1023
1024static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1025 struct operand *op)
1026{
1027 unsigned reg = ctxt->modrm_reg;
1028
1029 if (!(ctxt->d & ModRM))
1030 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1031
1032 if (ctxt->d & Sse) {
1033 op->type = OP_XMM;
1034 op->bytes = 16;
1035 op->addr.xmm = reg;
1036 read_sse_reg(ctxt, &op->vec_val, reg);
1037 return;
1038 }
1039 if (ctxt->d & Mmx) {
1040 reg &= 7;
1041 op->type = OP_MM;
1042 op->bytes = 8;
1043 op->addr.mm = reg;
1044 return;
1045 }
1046
1047 op->type = OP_REG;
1048 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1049 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1050
1051 fetch_register_operand(op);
1052 op->orig_val = op->val;
1053}
1054
1055static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1056{
1057 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1058 ctxt->modrm_seg = VCPU_SREG_SS;
1059}
1060
1061static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1062 struct operand *op)
1063{
1064 u8 sib;
1065 int index_reg = 0, base_reg = 0, scale;
1066 int rc = X86EMUL_CONTINUE;
1067 ulong modrm_ea = 0;
1068
1069 if (ctxt->rex_prefix) {
1070 ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1;
1071 index_reg = (ctxt->rex_prefix & 2) << 2;
1072 ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3;
1073 }
1074
1075 ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
1076 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1077 ctxt->modrm_rm |= (ctxt->modrm & 0x07);
1078 ctxt->modrm_seg = VCPU_SREG_DS;
1079
1080 if (ctxt->modrm_mod == 3) {
1081 op->type = OP_REG;
1082 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1083 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1084 ctxt->d & ByteOp);
1085 if (ctxt->d & Sse) {
1086 op->type = OP_XMM;
1087 op->bytes = 16;
1088 op->addr.xmm = ctxt->modrm_rm;
1089 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1090 return rc;
1091 }
1092 if (ctxt->d & Mmx) {
1093 op->type = OP_MM;
1094 op->bytes = 8;
1095 op->addr.xmm = ctxt->modrm_rm & 7;
1096 return rc;
1097 }
1098 fetch_register_operand(op);
1099 return rc;
1100 }
1101
1102 op->type = OP_MEM;
1103
1104 if (ctxt->ad_bytes == 2) {
1105 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1106 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1107 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1108 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1109
1110
1111 switch (ctxt->modrm_mod) {
1112 case 0:
1113 if (ctxt->modrm_rm == 6)
1114 modrm_ea += insn_fetch(u16, ctxt);
1115 break;
1116 case 1:
1117 modrm_ea += insn_fetch(s8, ctxt);
1118 break;
1119 case 2:
1120 modrm_ea += insn_fetch(u16, ctxt);
1121 break;
1122 }
1123 switch (ctxt->modrm_rm) {
1124 case 0:
1125 modrm_ea += bx + si;
1126 break;
1127 case 1:
1128 modrm_ea += bx + di;
1129 break;
1130 case 2:
1131 modrm_ea += bp + si;
1132 break;
1133 case 3:
1134 modrm_ea += bp + di;
1135 break;
1136 case 4:
1137 modrm_ea += si;
1138 break;
1139 case 5:
1140 modrm_ea += di;
1141 break;
1142 case 6:
1143 if (ctxt->modrm_mod != 0)
1144 modrm_ea += bp;
1145 break;
1146 case 7:
1147 modrm_ea += bx;
1148 break;
1149 }
1150 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1151 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1152 ctxt->modrm_seg = VCPU_SREG_SS;
1153 modrm_ea = (u16)modrm_ea;
1154 } else {
1155
1156 if ((ctxt->modrm_rm & 7) == 4) {
1157 sib = insn_fetch(u8, ctxt);
1158 index_reg |= (sib >> 3) & 7;
1159 base_reg |= sib & 7;
1160 scale = sib >> 6;
1161
1162 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1163 modrm_ea += insn_fetch(s32, ctxt);
1164 else {
1165 modrm_ea += reg_read(ctxt, base_reg);
1166 adjust_modrm_seg(ctxt, base_reg);
1167 }
1168 if (index_reg != 4)
1169 modrm_ea += reg_read(ctxt, index_reg) << scale;
1170 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1171 if (ctxt->mode == X86EMUL_MODE_PROT64)
1172 ctxt->rip_relative = 1;
1173 } else {
1174 base_reg = ctxt->modrm_rm;
1175 modrm_ea += reg_read(ctxt, base_reg);
1176 adjust_modrm_seg(ctxt, base_reg);
1177 }
1178 switch (ctxt->modrm_mod) {
1179 case 0:
1180 if (ctxt->modrm_rm == 5)
1181 modrm_ea += insn_fetch(s32, ctxt);
1182 break;
1183 case 1:
1184 modrm_ea += insn_fetch(s8, ctxt);
1185 break;
1186 case 2:
1187 modrm_ea += insn_fetch(s32, ctxt);
1188 break;
1189 }
1190 }
1191 op->addr.mem.ea = modrm_ea;
1192done:
1193 return rc;
1194}
1195
1196static int decode_abs(struct x86_emulate_ctxt *ctxt,
1197 struct operand *op)
1198{
1199 int rc = X86EMUL_CONTINUE;
1200
1201 op->type = OP_MEM;
1202 switch (ctxt->ad_bytes) {
1203 case 2:
1204 op->addr.mem.ea = insn_fetch(u16, ctxt);
1205 break;
1206 case 4:
1207 op->addr.mem.ea = insn_fetch(u32, ctxt);
1208 break;
1209 case 8:
1210 op->addr.mem.ea = insn_fetch(u64, ctxt);
1211 break;
1212 }
1213done:
1214 return rc;
1215}
1216
1217static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1218{
1219 long sv = 0, mask;
1220
1221 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1222 mask = ~(ctxt->dst.bytes * 8 - 1);
1223
1224 if (ctxt->src.bytes == 2)
1225 sv = (s16)ctxt->src.val & (s16)mask;
1226 else if (ctxt->src.bytes == 4)
1227 sv = (s32)ctxt->src.val & (s32)mask;
1228
1229 ctxt->dst.addr.mem.ea += (sv >> 3);
1230 }
1231
1232
1233 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1234}
1235
1236static int read_emulated(struct x86_emulate_ctxt *ctxt,
1237 unsigned long addr, void *dest, unsigned size)
1238{
1239 int rc;
1240 struct read_cache *mc = &ctxt->mem_read;
1241
1242 if (mc->pos < mc->end)
1243 goto read_cached;
1244
1245 WARN_ON((mc->end + size) >= sizeof(mc->data));
1246
1247 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1248 &ctxt->exception);
1249 if (rc != X86EMUL_CONTINUE)
1250 return rc;
1251
1252 mc->end += size;
1253
1254read_cached:
1255 memcpy(dest, mc->data + mc->pos, size);
1256 mc->pos += size;
1257 return X86EMUL_CONTINUE;
1258}
1259
1260static int segmented_read(struct x86_emulate_ctxt *ctxt,
1261 struct segmented_address addr,
1262 void *data,
1263 unsigned size)
1264{
1265 int rc;
1266 ulong linear;
1267
1268 rc = linearize(ctxt, addr, size, false, &linear);
1269 if (rc != X86EMUL_CONTINUE)
1270 return rc;
1271 return read_emulated(ctxt, linear, data, size);
1272}
1273
1274static int segmented_write(struct x86_emulate_ctxt *ctxt,
1275 struct segmented_address addr,
1276 const void *data,
1277 unsigned size)
1278{
1279 int rc;
1280 ulong linear;
1281
1282 rc = linearize(ctxt, addr, size, true, &linear);
1283 if (rc != X86EMUL_CONTINUE)
1284 return rc;
1285 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1286 &ctxt->exception);
1287}
1288
1289static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1290 struct segmented_address addr,
1291 const void *orig_data, const void *data,
1292 unsigned size)
1293{
1294 int rc;
1295 ulong linear;
1296
1297 rc = linearize(ctxt, addr, size, true, &linear);
1298 if (rc != X86EMUL_CONTINUE)
1299 return rc;
1300 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1301 size, &ctxt->exception);
1302}
1303
1304static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1305 unsigned int size, unsigned short port,
1306 void *dest)
1307{
1308 struct read_cache *rc = &ctxt->io_read;
1309
1310 if (rc->pos == rc->end) {
1311 unsigned int in_page, n;
1312 unsigned int count = ctxt->rep_prefix ?
1313 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1314 in_page = (ctxt->eflags & EFLG_DF) ?
1315 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1316 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1317 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1318 count);
1319 if (n == 0)
1320 n = 1;
1321 rc->pos = rc->end = 0;
1322 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1323 return 0;
1324 rc->end = n * size;
1325 }
1326
1327 if (ctxt->rep_prefix && !(ctxt->eflags & EFLG_DF)) {
1328 ctxt->dst.data = rc->data + rc->pos;
1329 ctxt->dst.type = OP_MEM_STR;
1330 ctxt->dst.count = (rc->end - rc->pos) / size;
1331 rc->pos = rc->end;
1332 } else {
1333 memcpy(dest, rc->data + rc->pos, size);
1334 rc->pos += size;
1335 }
1336 return 1;
1337}
1338
1339static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1340 u16 index, struct desc_struct *desc)
1341{
1342 struct desc_ptr dt;
1343 ulong addr;
1344
1345 ctxt->ops->get_idt(ctxt, &dt);
1346
1347 if (dt.size < index * 8 + 7)
1348 return emulate_gp(ctxt, index << 3 | 0x2);
1349
1350 addr = dt.address + index * 8;
1351 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1352 &ctxt->exception);
1353}
1354
1355static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1356 u16 selector, struct desc_ptr *dt)
1357{
1358 const struct x86_emulate_ops *ops = ctxt->ops;
1359
1360 if (selector & 1 << 2) {
1361 struct desc_struct desc;
1362 u16 sel;
1363
1364 memset (dt, 0, sizeof *dt);
1365 if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1366 return;
1367
1368 dt->size = desc_limit_scaled(&desc);
1369 dt->address = get_desc_base(&desc);
1370 } else
1371 ops->get_gdt(ctxt, dt);
1372}
1373
1374
1375static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1376 u16 selector, struct desc_struct *desc,
1377 ulong *desc_addr_p)
1378{
1379 struct desc_ptr dt;
1380 u16 index = selector >> 3;
1381 ulong addr;
1382
1383 get_descriptor_table_ptr(ctxt, selector, &dt);
1384
1385 if (dt.size < index * 8 + 7)
1386 return emulate_gp(ctxt, selector & 0xfffc);
1387
1388 *desc_addr_p = addr = dt.address + index * 8;
1389 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1390 &ctxt->exception);
1391}
1392
1393
1394static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1395 u16 selector, struct desc_struct *desc)
1396{
1397 struct desc_ptr dt;
1398 u16 index = selector >> 3;
1399 ulong addr;
1400
1401 get_descriptor_table_ptr(ctxt, selector, &dt);
1402
1403 if (dt.size < index * 8 + 7)
1404 return emulate_gp(ctxt, selector & 0xfffc);
1405
1406 addr = dt.address + index * 8;
1407 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1408 &ctxt->exception);
1409}
1410
1411
1412static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1413 u16 selector, int seg)
1414{
1415 struct desc_struct seg_desc, old_desc;
1416 u8 dpl, rpl, cpl;
1417 unsigned err_vec = GP_VECTOR;
1418 u32 err_code = 0;
1419 bool null_selector = !(selector & ~0x3);
1420 ulong desc_addr;
1421 int ret;
1422 u16 dummy;
1423
1424 memset(&seg_desc, 0, sizeof seg_desc);
1425
1426 if (ctxt->mode == X86EMUL_MODE_REAL) {
1427
1428
1429 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1430 set_desc_base(&seg_desc, selector << 4);
1431 goto load;
1432 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1433
1434 set_desc_base(&seg_desc, selector << 4);
1435 set_desc_limit(&seg_desc, 0xffff);
1436 seg_desc.type = 3;
1437 seg_desc.p = 1;
1438 seg_desc.s = 1;
1439 seg_desc.dpl = 3;
1440 goto load;
1441 }
1442
1443 rpl = selector & 3;
1444 cpl = ctxt->ops->cpl(ctxt);
1445
1446
1447 if ((seg == VCPU_SREG_CS
1448 || (seg == VCPU_SREG_SS
1449 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1450 || seg == VCPU_SREG_TR)
1451 && null_selector)
1452 goto exception;
1453
1454
1455 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1456 goto exception;
1457
1458 if (null_selector)
1459 goto load;
1460
1461 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1462 if (ret != X86EMUL_CONTINUE)
1463 return ret;
1464
1465 err_code = selector & 0xfffc;
1466 err_vec = GP_VECTOR;
1467
1468
1469 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1470 goto exception;
1471
1472 if (!seg_desc.p) {
1473 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1474 goto exception;
1475 }
1476
1477 dpl = seg_desc.dpl;
1478
1479 switch (seg) {
1480 case VCPU_SREG_SS:
1481
1482
1483
1484
1485 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1486 goto exception;
1487 break;
1488 case VCPU_SREG_CS:
1489 if (!(seg_desc.type & 8))
1490 goto exception;
1491
1492 if (seg_desc.type & 4) {
1493
1494 if (dpl > cpl)
1495 goto exception;
1496 } else {
1497
1498 if (rpl > cpl || dpl != cpl)
1499 goto exception;
1500 }
1501
1502 selector = (selector & 0xfffc) | cpl;
1503 break;
1504 case VCPU_SREG_TR:
1505 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1506 goto exception;
1507 old_desc = seg_desc;
1508 seg_desc.type |= 2;
1509 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1510 sizeof(seg_desc), &ctxt->exception);
1511 if (ret != X86EMUL_CONTINUE)
1512 return ret;
1513 break;
1514 case VCPU_SREG_LDTR:
1515 if (seg_desc.s || seg_desc.type != 2)
1516 goto exception;
1517 break;
1518 default:
1519
1520
1521
1522
1523
1524 if ((seg_desc.type & 0xa) == 0x8 ||
1525 (((seg_desc.type & 0xc) != 0xc) &&
1526 (rpl > dpl && cpl > dpl)))
1527 goto exception;
1528 break;
1529 }
1530
1531 if (seg_desc.s) {
1532
1533 seg_desc.type |= 1;
1534 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1535 if (ret != X86EMUL_CONTINUE)
1536 return ret;
1537 }
1538load:
1539 ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1540 return X86EMUL_CONTINUE;
1541exception:
1542 emulate_exception(ctxt, err_vec, err_code, true);
1543 return X86EMUL_PROPAGATE_FAULT;
1544}
1545
1546static void write_register_operand(struct operand *op)
1547{
1548
1549 switch (op->bytes) {
1550 case 1:
1551 *(u8 *)op->addr.reg = (u8)op->val;
1552 break;
1553 case 2:
1554 *(u16 *)op->addr.reg = (u16)op->val;
1555 break;
1556 case 4:
1557 *op->addr.reg = (u32)op->val;
1558 break;
1559 case 8:
1560 *op->addr.reg = op->val;
1561 break;
1562 }
1563}
1564
1565static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1566{
1567 int rc;
1568
1569 switch (op->type) {
1570 case OP_REG:
1571 write_register_operand(op);
1572 break;
1573 case OP_MEM:
1574 if (ctxt->lock_prefix)
1575 rc = segmented_cmpxchg(ctxt,
1576 op->addr.mem,
1577 &op->orig_val,
1578 &op->val,
1579 op->bytes);
1580 else
1581 rc = segmented_write(ctxt,
1582 op->addr.mem,
1583 &op->val,
1584 op->bytes);
1585 if (rc != X86EMUL_CONTINUE)
1586 return rc;
1587 break;
1588 case OP_MEM_STR:
1589 rc = segmented_write(ctxt,
1590 op->addr.mem,
1591 op->data,
1592 op->bytes * op->count);
1593 if (rc != X86EMUL_CONTINUE)
1594 return rc;
1595 break;
1596 case OP_XMM:
1597 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1598 break;
1599 case OP_MM:
1600 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1601 break;
1602 case OP_NONE:
1603
1604 break;
1605 default:
1606 break;
1607 }
1608 return X86EMUL_CONTINUE;
1609}
1610
1611static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1612{
1613 struct segmented_address addr;
1614
1615 rsp_increment(ctxt, -bytes);
1616 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1617 addr.seg = VCPU_SREG_SS;
1618
1619 return segmented_write(ctxt, addr, data, bytes);
1620}
1621
1622static int em_push(struct x86_emulate_ctxt *ctxt)
1623{
1624
1625 ctxt->dst.type = OP_NONE;
1626 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1627}
1628
1629static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1630 void *dest, int len)
1631{
1632 int rc;
1633 struct segmented_address addr;
1634
1635 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1636 addr.seg = VCPU_SREG_SS;
1637 rc = segmented_read(ctxt, addr, dest, len);
1638 if (rc != X86EMUL_CONTINUE)
1639 return rc;
1640
1641 rsp_increment(ctxt, len);
1642 return rc;
1643}
1644
1645static int em_pop(struct x86_emulate_ctxt *ctxt)
1646{
1647 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1648}
1649
1650static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1651 void *dest, int len)
1652{
1653 int rc;
1654 unsigned long val, change_mask;
1655 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1656 int cpl = ctxt->ops->cpl(ctxt);
1657
1658 rc = emulate_pop(ctxt, &val, len);
1659 if (rc != X86EMUL_CONTINUE)
1660 return rc;
1661
1662 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1663 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1664
1665 switch(ctxt->mode) {
1666 case X86EMUL_MODE_PROT64:
1667 case X86EMUL_MODE_PROT32:
1668 case X86EMUL_MODE_PROT16:
1669 if (cpl == 0)
1670 change_mask |= EFLG_IOPL;
1671 if (cpl <= iopl)
1672 change_mask |= EFLG_IF;
1673 break;
1674 case X86EMUL_MODE_VM86:
1675 if (iopl < 3)
1676 return emulate_gp(ctxt, 0);
1677 change_mask |= EFLG_IF;
1678 break;
1679 default:
1680 change_mask |= (EFLG_IOPL | EFLG_IF);
1681 break;
1682 }
1683
1684 *(unsigned long *)dest =
1685 (ctxt->eflags & ~change_mask) | (val & change_mask);
1686
1687 return rc;
1688}
1689
1690static int em_popf(struct x86_emulate_ctxt *ctxt)
1691{
1692 ctxt->dst.type = OP_REG;
1693 ctxt->dst.addr.reg = &ctxt->eflags;
1694 ctxt->dst.bytes = ctxt->op_bytes;
1695 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1696}
1697
1698static int em_enter(struct x86_emulate_ctxt *ctxt)
1699{
1700 int rc;
1701 unsigned frame_size = ctxt->src.val;
1702 unsigned nesting_level = ctxt->src2.val & 31;
1703 ulong rbp;
1704
1705 if (nesting_level)
1706 return X86EMUL_UNHANDLEABLE;
1707
1708 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1709 rc = push(ctxt, &rbp, stack_size(ctxt));
1710 if (rc != X86EMUL_CONTINUE)
1711 return rc;
1712 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1713 stack_mask(ctxt));
1714 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1715 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1716 stack_mask(ctxt));
1717 return X86EMUL_CONTINUE;
1718}
1719
1720static int em_leave(struct x86_emulate_ctxt *ctxt)
1721{
1722 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1723 stack_mask(ctxt));
1724 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1725}
1726
1727static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1728{
1729 int seg = ctxt->src2.val;
1730
1731 ctxt->src.val = get_segment_selector(ctxt, seg);
1732
1733 return em_push(ctxt);
1734}
1735
1736static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1737{
1738 int seg = ctxt->src2.val;
1739 unsigned long selector;
1740 int rc;
1741
1742 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1743 if (rc != X86EMUL_CONTINUE)
1744 return rc;
1745
1746 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1747 return rc;
1748}
1749
1750static int em_pusha(struct x86_emulate_ctxt *ctxt)
1751{
1752 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1753 int rc = X86EMUL_CONTINUE;
1754 int reg = VCPU_REGS_RAX;
1755
1756 while (reg <= VCPU_REGS_RDI) {
1757 (reg == VCPU_REGS_RSP) ?
1758 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1759
1760 rc = em_push(ctxt);
1761 if (rc != X86EMUL_CONTINUE)
1762 return rc;
1763
1764 ++reg;
1765 }
1766
1767 return rc;
1768}
1769
1770static int em_pushf(struct x86_emulate_ctxt *ctxt)
1771{
1772 ctxt->src.val = (unsigned long)ctxt->eflags;
1773 return em_push(ctxt);
1774}
1775
1776static int em_popa(struct x86_emulate_ctxt *ctxt)
1777{
1778 int rc = X86EMUL_CONTINUE;
1779 int reg = VCPU_REGS_RDI;
1780
1781 while (reg >= VCPU_REGS_RAX) {
1782 if (reg == VCPU_REGS_RSP) {
1783 rsp_increment(ctxt, ctxt->op_bytes);
1784 --reg;
1785 }
1786
1787 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1788 if (rc != X86EMUL_CONTINUE)
1789 break;
1790 --reg;
1791 }
1792 return rc;
1793}
1794
1795static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1796{
1797 const struct x86_emulate_ops *ops = ctxt->ops;
1798 int rc;
1799 struct desc_ptr dt;
1800 gva_t cs_addr;
1801 gva_t eip_addr;
1802 u16 cs, eip;
1803
1804
1805 ctxt->src.val = ctxt->eflags;
1806 rc = em_push(ctxt);
1807 if (rc != X86EMUL_CONTINUE)
1808 return rc;
1809
1810 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1811
1812 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1813 rc = em_push(ctxt);
1814 if (rc != X86EMUL_CONTINUE)
1815 return rc;
1816
1817 ctxt->src.val = ctxt->_eip;
1818 rc = em_push(ctxt);
1819 if (rc != X86EMUL_CONTINUE)
1820 return rc;
1821
1822 ops->get_idt(ctxt, &dt);
1823
1824 eip_addr = dt.address + (irq << 2);
1825 cs_addr = dt.address + (irq << 2) + 2;
1826
1827 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1828 if (rc != X86EMUL_CONTINUE)
1829 return rc;
1830
1831 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1832 if (rc != X86EMUL_CONTINUE)
1833 return rc;
1834
1835 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1836 if (rc != X86EMUL_CONTINUE)
1837 return rc;
1838
1839 ctxt->_eip = eip;
1840
1841 return rc;
1842}
1843
1844int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1845{
1846 int rc;
1847
1848 invalidate_registers(ctxt);
1849 rc = __emulate_int_real(ctxt, irq);
1850 if (rc == X86EMUL_CONTINUE)
1851 writeback_registers(ctxt);
1852 return rc;
1853}
1854
1855static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1856{
1857 switch(ctxt->mode) {
1858 case X86EMUL_MODE_REAL:
1859 return __emulate_int_real(ctxt, irq);
1860 case X86EMUL_MODE_VM86:
1861 case X86EMUL_MODE_PROT16:
1862 case X86EMUL_MODE_PROT32:
1863 case X86EMUL_MODE_PROT64:
1864 default:
1865
1866 return X86EMUL_UNHANDLEABLE;
1867 }
1868}
1869
1870static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1871{
1872 int rc = X86EMUL_CONTINUE;
1873 unsigned long temp_eip = 0;
1874 unsigned long temp_eflags = 0;
1875 unsigned long cs = 0;
1876 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1877 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1878 EFLG_AC | EFLG_ID | (1 << 1);
1879 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1880
1881
1882
1883 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1884
1885 if (rc != X86EMUL_CONTINUE)
1886 return rc;
1887
1888 if (temp_eip & ~0xffff)
1889 return emulate_gp(ctxt, 0);
1890
1891 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1892
1893 if (rc != X86EMUL_CONTINUE)
1894 return rc;
1895
1896 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1897
1898 if (rc != X86EMUL_CONTINUE)
1899 return rc;
1900
1901 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1902
1903 if (rc != X86EMUL_CONTINUE)
1904 return rc;
1905
1906 ctxt->_eip = temp_eip;
1907
1908
1909 if (ctxt->op_bytes == 4)
1910 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1911 else if (ctxt->op_bytes == 2) {
1912 ctxt->eflags &= ~0xffff;
1913 ctxt->eflags |= temp_eflags;
1914 }
1915
1916 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK;
1917 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1918
1919 return rc;
1920}
1921
1922static int em_iret(struct x86_emulate_ctxt *ctxt)
1923{
1924 switch(ctxt->mode) {
1925 case X86EMUL_MODE_REAL:
1926 return emulate_iret_real(ctxt);
1927 case X86EMUL_MODE_VM86:
1928 case X86EMUL_MODE_PROT16:
1929 case X86EMUL_MODE_PROT32:
1930 case X86EMUL_MODE_PROT64:
1931 default:
1932
1933 return X86EMUL_UNHANDLEABLE;
1934 }
1935}
1936
1937static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1938{
1939 int rc;
1940 unsigned short sel;
1941
1942 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1943
1944 rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
1945 if (rc != X86EMUL_CONTINUE)
1946 return rc;
1947
1948 ctxt->_eip = 0;
1949 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1950 return X86EMUL_CONTINUE;
1951}
1952
1953static int em_grp45(struct x86_emulate_ctxt *ctxt)
1954{
1955 int rc = X86EMUL_CONTINUE;
1956
1957 switch (ctxt->modrm_reg) {
1958 case 2: {
1959 long int old_eip;
1960 old_eip = ctxt->_eip;
1961 ctxt->_eip = ctxt->src.val;
1962 ctxt->src.val = old_eip;
1963 rc = em_push(ctxt);
1964 break;
1965 }
1966 case 4:
1967 ctxt->_eip = ctxt->src.val;
1968 break;
1969 case 5:
1970 rc = em_jmp_far(ctxt);
1971 break;
1972 case 6:
1973 rc = em_push(ctxt);
1974 break;
1975 }
1976 return rc;
1977}
1978
1979static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
1980{
1981 u64 old = ctxt->dst.orig_val64;
1982
1983 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
1984 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
1985 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
1986 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
1987 ctxt->eflags &= ~EFLG_ZF;
1988 } else {
1989 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
1990 (u32) reg_read(ctxt, VCPU_REGS_RBX);
1991
1992 ctxt->eflags |= EFLG_ZF;
1993 }
1994 return X86EMUL_CONTINUE;
1995}
1996
1997static int em_ret(struct x86_emulate_ctxt *ctxt)
1998{
1999 ctxt->dst.type = OP_REG;
2000 ctxt->dst.addr.reg = &ctxt->_eip;
2001 ctxt->dst.bytes = ctxt->op_bytes;
2002 return em_pop(ctxt);
2003}
2004
2005static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2006{
2007 int rc;
2008 unsigned long cs;
2009
2010 rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
2011 if (rc != X86EMUL_CONTINUE)
2012 return rc;
2013 if (ctxt->op_bytes == 4)
2014 ctxt->_eip = (u32)ctxt->_eip;
2015 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2016 if (rc != X86EMUL_CONTINUE)
2017 return rc;
2018 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2019 return rc;
2020}
2021
2022static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2023{
2024 int rc;
2025
2026 rc = em_ret_far(ctxt);
2027 if (rc != X86EMUL_CONTINUE)
2028 return rc;
2029 rsp_increment(ctxt, ctxt->src.val);
2030 return X86EMUL_CONTINUE;
2031}
2032
2033static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2034{
2035
2036 ctxt->src.orig_val = ctxt->src.val;
2037 ctxt->src.val = reg_read(ctxt, VCPU_REGS_RAX);
2038 fastop(ctxt, em_cmp);
2039
2040 if (ctxt->eflags & EFLG_ZF) {
2041
2042 ctxt->dst.val = ctxt->src.orig_val;
2043 } else {
2044
2045 ctxt->dst.type = OP_REG;
2046 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2047 }
2048 return X86EMUL_CONTINUE;
2049}
2050
2051static int em_lseg(struct x86_emulate_ctxt *ctxt)
2052{
2053 int seg = ctxt->src2.val;
2054 unsigned short sel;
2055 int rc;
2056
2057 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2058
2059 rc = load_segment_descriptor(ctxt, sel, seg);
2060 if (rc != X86EMUL_CONTINUE)
2061 return rc;
2062
2063 ctxt->dst.val = ctxt->src.val;
2064 return rc;
2065}
2066
2067static void
2068setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2069 struct desc_struct *cs, struct desc_struct *ss)
2070{
2071 cs->l = 0;
2072 set_desc_base(cs, 0);
2073 cs->g = 1;
2074 set_desc_limit(cs, 0xfffff);
2075 cs->type = 0x0b;
2076 cs->s = 1;
2077 cs->dpl = 0;
2078 cs->p = 1;
2079 cs->d = 1;
2080 cs->avl = 0;
2081
2082 set_desc_base(ss, 0);
2083 set_desc_limit(ss, 0xfffff);
2084 ss->g = 1;
2085 ss->s = 1;
2086 ss->type = 0x03;
2087 ss->d = 1;
2088 ss->dpl = 0;
2089 ss->p = 1;
2090 ss->l = 0;
2091 ss->avl = 0;
2092}
2093
2094static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2095{
2096 u32 eax, ebx, ecx, edx;
2097
2098 eax = ecx = 0;
2099 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2100 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2101 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2102 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2103}
2104
2105static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2106{
2107 const struct x86_emulate_ops *ops = ctxt->ops;
2108 u32 eax, ebx, ecx, edx;
2109
2110
2111
2112
2113
2114 if (ctxt->mode == X86EMUL_MODE_PROT64)
2115 return true;
2116
2117 eax = 0x00000000;
2118 ecx = 0x00000000;
2119 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2120
2121
2122
2123
2124
2125
2126
2127
2128 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2129 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2130 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2131 return false;
2132
2133
2134 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2135 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2136 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2137 return true;
2138
2139
2140 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2141 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2142 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2143 return true;
2144
2145
2146 return false;
2147}
2148
2149static int em_syscall(struct x86_emulate_ctxt *ctxt)
2150{
2151 const struct x86_emulate_ops *ops = ctxt->ops;
2152 struct desc_struct cs, ss;
2153 u64 msr_data;
2154 u16 cs_sel, ss_sel;
2155 u64 efer = 0;
2156
2157
2158 if (ctxt->mode == X86EMUL_MODE_REAL ||
2159 ctxt->mode == X86EMUL_MODE_VM86)
2160 return emulate_ud(ctxt);
2161
2162 if (!(em_syscall_is_enabled(ctxt)))
2163 return emulate_ud(ctxt);
2164
2165 ops->get_msr(ctxt, MSR_EFER, &efer);
2166 setup_syscalls_segments(ctxt, &cs, &ss);
2167
2168 if (!(efer & EFER_SCE))
2169 return emulate_ud(ctxt);
2170
2171 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2172 msr_data >>= 32;
2173 cs_sel = (u16)(msr_data & 0xfffc);
2174 ss_sel = (u16)(msr_data + 8);
2175
2176 if (efer & EFER_LMA) {
2177 cs.d = 0;
2178 cs.l = 1;
2179 }
2180 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2181 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2182
2183 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2184 if (efer & EFER_LMA) {
2185#ifdef CONFIG_X86_64
2186 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags & ~EFLG_RF;
2187
2188 ops->get_msr(ctxt,
2189 ctxt->mode == X86EMUL_MODE_PROT64 ?
2190 MSR_LSTAR : MSR_CSTAR, &msr_data);
2191 ctxt->_eip = msr_data;
2192
2193 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2194 ctxt->eflags &= ~(msr_data | EFLG_RF);
2195#endif
2196 } else {
2197
2198 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2199 ctxt->_eip = (u32)msr_data;
2200
2201 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2202 }
2203
2204 return X86EMUL_CONTINUE;
2205}
2206
2207static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2208{
2209 const struct x86_emulate_ops *ops = ctxt->ops;
2210 struct desc_struct cs, ss;
2211 u64 msr_data;
2212 u16 cs_sel, ss_sel;
2213 u64 efer = 0;
2214
2215 ops->get_msr(ctxt, MSR_EFER, &efer);
2216
2217 if (ctxt->mode == X86EMUL_MODE_REAL)
2218 return emulate_gp(ctxt, 0);
2219
2220
2221
2222
2223
2224 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2225 && !vendor_intel(ctxt))
2226 return emulate_ud(ctxt);
2227
2228
2229
2230
2231 if (ctxt->mode == X86EMUL_MODE_PROT64)
2232 return emulate_ud(ctxt);
2233
2234 setup_syscalls_segments(ctxt, &cs, &ss);
2235
2236 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2237 switch (ctxt->mode) {
2238 case X86EMUL_MODE_PROT32:
2239 if ((msr_data & 0xfffc) == 0x0)
2240 return emulate_gp(ctxt, 0);
2241 break;
2242 case X86EMUL_MODE_PROT64:
2243 if (msr_data == 0x0)
2244 return emulate_gp(ctxt, 0);
2245 break;
2246 default:
2247 break;
2248 }
2249
2250 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2251 cs_sel = (u16)msr_data;
2252 cs_sel &= ~SELECTOR_RPL_MASK;
2253 ss_sel = cs_sel + 8;
2254 ss_sel &= ~SELECTOR_RPL_MASK;
2255 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2256 cs.d = 0;
2257 cs.l = 1;
2258 }
2259
2260 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2261 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2262
2263 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2264 ctxt->_eip = msr_data;
2265
2266 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2267 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2268
2269 return X86EMUL_CONTINUE;
2270}
2271
2272static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2273{
2274 const struct x86_emulate_ops *ops = ctxt->ops;
2275 struct desc_struct cs, ss;
2276 u64 msr_data;
2277 int usermode;
2278 u16 cs_sel = 0, ss_sel = 0;
2279
2280
2281 if (ctxt->mode == X86EMUL_MODE_REAL ||
2282 ctxt->mode == X86EMUL_MODE_VM86)
2283 return emulate_gp(ctxt, 0);
2284
2285 setup_syscalls_segments(ctxt, &cs, &ss);
2286
2287 if ((ctxt->rex_prefix & 0x8) != 0x0)
2288 usermode = X86EMUL_MODE_PROT64;
2289 else
2290 usermode = X86EMUL_MODE_PROT32;
2291
2292 cs.dpl = 3;
2293 ss.dpl = 3;
2294 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2295 switch (usermode) {
2296 case X86EMUL_MODE_PROT32:
2297 cs_sel = (u16)(msr_data + 16);
2298 if ((msr_data & 0xfffc) == 0x0)
2299 return emulate_gp(ctxt, 0);
2300 ss_sel = (u16)(msr_data + 24);
2301 break;
2302 case X86EMUL_MODE_PROT64:
2303 cs_sel = (u16)(msr_data + 32);
2304 if (msr_data == 0x0)
2305 return emulate_gp(ctxt, 0);
2306 ss_sel = cs_sel + 8;
2307 cs.d = 0;
2308 cs.l = 1;
2309 break;
2310 }
2311 cs_sel |= SELECTOR_RPL_MASK;
2312 ss_sel |= SELECTOR_RPL_MASK;
2313
2314 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2315 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2316
2317 ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
2318 *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
2319
2320 return X86EMUL_CONTINUE;
2321}
2322
2323static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2324{
2325 int iopl;
2326 if (ctxt->mode == X86EMUL_MODE_REAL)
2327 return false;
2328 if (ctxt->mode == X86EMUL_MODE_VM86)
2329 return true;
2330 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2331 return ctxt->ops->cpl(ctxt) > iopl;
2332}
2333
2334static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2335 u16 port, u16 len)
2336{
2337 const struct x86_emulate_ops *ops = ctxt->ops;
2338 struct desc_struct tr_seg;
2339 u32 base3;
2340 int r;
2341 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2342 unsigned mask = (1 << len) - 1;
2343 unsigned long base;
2344
2345 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2346 if (!tr_seg.p)
2347 return false;
2348 if (desc_limit_scaled(&tr_seg) < 103)
2349 return false;
2350 base = get_desc_base(&tr_seg);
2351#ifdef CONFIG_X86_64
2352 base |= ((u64)base3) << 32;
2353#endif
2354 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2355 if (r != X86EMUL_CONTINUE)
2356 return false;
2357 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2358 return false;
2359 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2360 if (r != X86EMUL_CONTINUE)
2361 return false;
2362 if ((perm >> bit_idx) & mask)
2363 return false;
2364 return true;
2365}
2366
2367static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2368 u16 port, u16 len)
2369{
2370 if (ctxt->perm_ok)
2371 return true;
2372
2373 if (emulator_bad_iopl(ctxt))
2374 if (!emulator_io_port_access_allowed(ctxt, port, len))
2375 return false;
2376
2377 ctxt->perm_ok = true;
2378
2379 return true;
2380}
2381
2382static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2383 struct tss_segment_16 *tss)
2384{
2385 tss->ip = ctxt->_eip;
2386 tss->flag = ctxt->eflags;
2387 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2388 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2389 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2390 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2391 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2392 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2393 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2394 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2395
2396 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2397 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2398 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2399 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2400 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2401}
2402
2403static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2404 struct tss_segment_16 *tss)
2405{
2406 int ret;
2407
2408 ctxt->_eip = tss->ip;
2409 ctxt->eflags = tss->flag | 2;
2410 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2411 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2412 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2413 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2414 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2415 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2416 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2417 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2418
2419
2420
2421
2422
2423 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2424 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2425 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2426 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2427 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2428
2429
2430
2431
2432
2433 ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
2434 if (ret != X86EMUL_CONTINUE)
2435 return ret;
2436 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2437 if (ret != X86EMUL_CONTINUE)
2438 return ret;
2439 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2440 if (ret != X86EMUL_CONTINUE)
2441 return ret;
2442 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2443 if (ret != X86EMUL_CONTINUE)
2444 return ret;
2445 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2446 if (ret != X86EMUL_CONTINUE)
2447 return ret;
2448
2449 return X86EMUL_CONTINUE;
2450}
2451
2452static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2453 u16 tss_selector, u16 old_tss_sel,
2454 ulong old_tss_base, struct desc_struct *new_desc)
2455{
2456 const struct x86_emulate_ops *ops = ctxt->ops;
2457 struct tss_segment_16 tss_seg;
2458 int ret;
2459 u32 new_tss_base = get_desc_base(new_desc);
2460
2461 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2462 &ctxt->exception);
2463 if (ret != X86EMUL_CONTINUE)
2464
2465 return ret;
2466
2467 save_state_to_tss16(ctxt, &tss_seg);
2468
2469 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2470 &ctxt->exception);
2471 if (ret != X86EMUL_CONTINUE)
2472
2473 return ret;
2474
2475 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2476 &ctxt->exception);
2477 if (ret != X86EMUL_CONTINUE)
2478
2479 return ret;
2480
2481 if (old_tss_sel != 0xffff) {
2482 tss_seg.prev_task_link = old_tss_sel;
2483
2484 ret = ops->write_std(ctxt, new_tss_base,
2485 &tss_seg.prev_task_link,
2486 sizeof tss_seg.prev_task_link,
2487 &ctxt->exception);
2488 if (ret != X86EMUL_CONTINUE)
2489
2490 return ret;
2491 }
2492
2493 return load_state_from_tss16(ctxt, &tss_seg);
2494}
2495
2496static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2497 struct tss_segment_32 *tss)
2498{
2499 tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2500 tss->eip = ctxt->_eip;
2501 tss->eflags = ctxt->eflags;
2502 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2503 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2504 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2505 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2506 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2507 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2508 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2509 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2510
2511 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2512 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2513 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2514 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2515 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2516 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2517 tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2518}
2519
2520static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2521 struct tss_segment_32 *tss)
2522{
2523 int ret;
2524
2525 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2526 return emulate_gp(ctxt, 0);
2527 ctxt->_eip = tss->eip;
2528 ctxt->eflags = tss->eflags | 2;
2529
2530
2531 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2532 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2533 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2534 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2535 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2536 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2537 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2538 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2539
2540
2541
2542
2543
2544 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2545 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2546 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2547 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2548 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2549 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2550 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563 if (ctxt->eflags & X86_EFLAGS_VM)
2564 ctxt->mode = X86EMUL_MODE_VM86;
2565 else
2566 ctxt->mode = X86EMUL_MODE_PROT32;
2567
2568 ctxt->ops->set_rflags(ctxt, ctxt->eflags);
2569
2570
2571
2572
2573
2574 ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2575 if (ret != X86EMUL_CONTINUE)
2576 return ret;
2577 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2578 if (ret != X86EMUL_CONTINUE)
2579 return ret;
2580 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2581 if (ret != X86EMUL_CONTINUE)
2582 return ret;
2583 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2584 if (ret != X86EMUL_CONTINUE)
2585 return ret;
2586 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2587 if (ret != X86EMUL_CONTINUE)
2588 return ret;
2589 ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
2590 if (ret != X86EMUL_CONTINUE)
2591 return ret;
2592 ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
2593 if (ret != X86EMUL_CONTINUE)
2594 return ret;
2595
2596 return X86EMUL_CONTINUE;
2597}
2598
2599static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2600 u16 tss_selector, u16 old_tss_sel,
2601 ulong old_tss_base, struct desc_struct *new_desc)
2602{
2603 const struct x86_emulate_ops *ops = ctxt->ops;
2604 struct tss_segment_32 tss_seg;
2605 int ret;
2606 u32 new_tss_base = get_desc_base(new_desc);
2607
2608 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2609 &ctxt->exception);
2610 if (ret != X86EMUL_CONTINUE)
2611
2612 return ret;
2613
2614 save_state_to_tss32(ctxt, &tss_seg);
2615
2616 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2617 &ctxt->exception);
2618 if (ret != X86EMUL_CONTINUE)
2619
2620 return ret;
2621
2622 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2623 &ctxt->exception);
2624 if (ret != X86EMUL_CONTINUE)
2625
2626 return ret;
2627
2628 if (old_tss_sel != 0xffff) {
2629 tss_seg.prev_task_link = old_tss_sel;
2630
2631 ret = ops->write_std(ctxt, new_tss_base,
2632 &tss_seg.prev_task_link,
2633 sizeof tss_seg.prev_task_link,
2634 &ctxt->exception);
2635 if (ret != X86EMUL_CONTINUE)
2636
2637 return ret;
2638 }
2639
2640 return load_state_from_tss32(ctxt, &tss_seg);
2641}
2642
2643static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2644 u16 tss_selector, int idt_index, int reason,
2645 bool has_error_code, u32 error_code)
2646{
2647 const struct x86_emulate_ops *ops = ctxt->ops;
2648 struct desc_struct curr_tss_desc, next_tss_desc;
2649 int ret;
2650 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2651 ulong old_tss_base =
2652 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2653 u32 desc_limit;
2654 ulong desc_addr;
2655
2656
2657
2658 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2659 if (ret != X86EMUL_CONTINUE)
2660 return ret;
2661 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2662 if (ret != X86EMUL_CONTINUE)
2663 return ret;
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674 if (reason == TASK_SWITCH_GATE) {
2675 if (idt_index != -1) {
2676
2677 struct desc_struct task_gate_desc;
2678 int dpl;
2679
2680 ret = read_interrupt_descriptor(ctxt, idt_index,
2681 &task_gate_desc);
2682 if (ret != X86EMUL_CONTINUE)
2683 return ret;
2684
2685 dpl = task_gate_desc.dpl;
2686 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2687 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2688 }
2689 } else if (reason != TASK_SWITCH_IRET) {
2690 int dpl = next_tss_desc.dpl;
2691 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2692 return emulate_gp(ctxt, tss_selector);
2693 }
2694
2695
2696 desc_limit = desc_limit_scaled(&next_tss_desc);
2697 if (!next_tss_desc.p ||
2698 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2699 desc_limit < 0x2b)) {
2700 emulate_ts(ctxt, tss_selector & 0xfffc);
2701 return X86EMUL_PROPAGATE_FAULT;
2702 }
2703
2704 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2705 curr_tss_desc.type &= ~(1 << 1);
2706 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2707 }
2708
2709 if (reason == TASK_SWITCH_IRET)
2710 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2711
2712
2713
2714 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2715 old_tss_sel = 0xffff;
2716
2717 if (next_tss_desc.type & 8)
2718 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2719 old_tss_base, &next_tss_desc);
2720 else
2721 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2722 old_tss_base, &next_tss_desc);
2723 if (ret != X86EMUL_CONTINUE)
2724 return ret;
2725
2726 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2727 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2728
2729 if (reason != TASK_SWITCH_IRET) {
2730 next_tss_desc.type |= (1 << 1);
2731 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2732 }
2733
2734 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2735 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2736
2737 if (has_error_code) {
2738 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2739 ctxt->lock_prefix = 0;
2740 ctxt->src.val = (unsigned long) error_code;
2741 ret = em_push(ctxt);
2742 }
2743
2744 return ret;
2745}
2746
2747int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2748 u16 tss_selector, int idt_index, int reason,
2749 bool has_error_code, u32 error_code)
2750{
2751 int rc;
2752
2753 invalidate_registers(ctxt);
2754 ctxt->_eip = ctxt->eip;
2755 ctxt->dst.type = OP_NONE;
2756
2757 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2758 has_error_code, error_code);
2759
2760 if (rc == X86EMUL_CONTINUE) {
2761 ctxt->eip = ctxt->_eip;
2762 writeback_registers(ctxt);
2763 }
2764
2765 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2766}
2767
2768static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2769 struct operand *op)
2770{
2771 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2772
2773 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2774 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2775}
2776
2777static int em_das(struct x86_emulate_ctxt *ctxt)
2778{
2779 u8 al, old_al;
2780 bool af, cf, old_cf;
2781
2782 cf = ctxt->eflags & X86_EFLAGS_CF;
2783 al = ctxt->dst.val;
2784
2785 old_al = al;
2786 old_cf = cf;
2787 cf = false;
2788 af = ctxt->eflags & X86_EFLAGS_AF;
2789 if ((al & 0x0f) > 9 || af) {
2790 al -= 6;
2791 cf = old_cf | (al >= 250);
2792 af = true;
2793 } else {
2794 af = false;
2795 }
2796 if (old_al > 0x99 || old_cf) {
2797 al -= 0x60;
2798 cf = true;
2799 }
2800
2801 ctxt->dst.val = al;
2802
2803 ctxt->src.type = OP_IMM;
2804 ctxt->src.val = 0;
2805 ctxt->src.bytes = 1;
2806 fastop(ctxt, em_or);
2807 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2808 if (cf)
2809 ctxt->eflags |= X86_EFLAGS_CF;
2810 if (af)
2811 ctxt->eflags |= X86_EFLAGS_AF;
2812 return X86EMUL_CONTINUE;
2813}
2814
2815static int em_aam(struct x86_emulate_ctxt *ctxt)
2816{
2817 u8 al, ah;
2818
2819 if (ctxt->src.val == 0)
2820 return emulate_de(ctxt);
2821
2822 al = ctxt->dst.val & 0xff;
2823 ah = al / ctxt->src.val;
2824 al %= ctxt->src.val;
2825
2826 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2827
2828
2829 ctxt->src.type = OP_IMM;
2830 ctxt->src.val = 0;
2831 ctxt->src.bytes = 1;
2832 fastop(ctxt, em_or);
2833
2834 return X86EMUL_CONTINUE;
2835}
2836
2837static int em_aad(struct x86_emulate_ctxt *ctxt)
2838{
2839 u8 al = ctxt->dst.val & 0xff;
2840 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2841
2842 al = (al + (ah * ctxt->src.val)) & 0xff;
2843
2844 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2845
2846
2847 ctxt->src.type = OP_IMM;
2848 ctxt->src.val = 0;
2849 ctxt->src.bytes = 1;
2850 fastop(ctxt, em_or);
2851
2852 return X86EMUL_CONTINUE;
2853}
2854
2855static int em_call(struct x86_emulate_ctxt *ctxt)
2856{
2857 long rel = ctxt->src.val;
2858
2859 ctxt->src.val = (unsigned long)ctxt->_eip;
2860 jmp_rel(ctxt, rel);
2861 return em_push(ctxt);
2862}
2863
2864static int em_call_far(struct x86_emulate_ctxt *ctxt)
2865{
2866 u16 sel, old_cs;
2867 ulong old_eip;
2868 int rc;
2869
2870 old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2871 old_eip = ctxt->_eip;
2872
2873 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2874 if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2875 return X86EMUL_CONTINUE;
2876
2877 ctxt->_eip = 0;
2878 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
2879
2880 ctxt->src.val = old_cs;
2881 rc = em_push(ctxt);
2882 if (rc != X86EMUL_CONTINUE)
2883 return rc;
2884
2885 ctxt->src.val = old_eip;
2886 return em_push(ctxt);
2887}
2888
2889static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2890{
2891 int rc;
2892
2893 ctxt->dst.type = OP_REG;
2894 ctxt->dst.addr.reg = &ctxt->_eip;
2895 ctxt->dst.bytes = ctxt->op_bytes;
2896 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2897 if (rc != X86EMUL_CONTINUE)
2898 return rc;
2899 rsp_increment(ctxt, ctxt->src.val);
2900 return X86EMUL_CONTINUE;
2901}
2902
2903static int em_xchg(struct x86_emulate_ctxt *ctxt)
2904{
2905
2906 ctxt->src.val = ctxt->dst.val;
2907 write_register_operand(&ctxt->src);
2908
2909
2910 ctxt->dst.val = ctxt->src.orig_val;
2911 ctxt->lock_prefix = 1;
2912 return X86EMUL_CONTINUE;
2913}
2914
2915static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2916{
2917 ctxt->dst.val = ctxt->src2.val;
2918 return fastop(ctxt, em_imul);
2919}
2920
2921static int em_cwd(struct x86_emulate_ctxt *ctxt)
2922{
2923 ctxt->dst.type = OP_REG;
2924 ctxt->dst.bytes = ctxt->src.bytes;
2925 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
2926 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2927
2928 return X86EMUL_CONTINUE;
2929}
2930
2931static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2932{
2933 u64 tsc = 0;
2934
2935 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2936 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
2937 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
2938 return X86EMUL_CONTINUE;
2939}
2940
2941static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
2942{
2943 u64 pmc;
2944
2945 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
2946 return emulate_gp(ctxt, 0);
2947 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
2948 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
2949 return X86EMUL_CONTINUE;
2950}
2951
2952static int em_mov(struct x86_emulate_ctxt *ctxt)
2953{
2954 memcpy(ctxt->dst.valptr, ctxt->src.valptr, ctxt->op_bytes);
2955 return X86EMUL_CONTINUE;
2956}
2957
2958#define FFL(x) bit(X86_FEATURE_##x)
2959
2960static int em_movbe(struct x86_emulate_ctxt *ctxt)
2961{
2962 u32 ebx, ecx, edx, eax = 1;
2963 u16 tmp;
2964
2965
2966
2967
2968 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2969 if (!(ecx & FFL(MOVBE)))
2970 return emulate_ud(ctxt);
2971
2972 switch (ctxt->op_bytes) {
2973 case 2:
2974
2975
2976
2977
2978
2979
2980
2981
2982 tmp = (u16)ctxt->src.val;
2983 ctxt->dst.val &= ~0xffffUL;
2984 ctxt->dst.val |= (unsigned long)swab16(tmp);
2985 break;
2986 case 4:
2987 ctxt->dst.val = swab32((u32)ctxt->src.val);
2988 break;
2989 case 8:
2990 ctxt->dst.val = swab64(ctxt->src.val);
2991 break;
2992 default:
2993 return X86EMUL_PROPAGATE_FAULT;
2994 }
2995 return X86EMUL_CONTINUE;
2996}
2997
2998static int em_cr_write(struct x86_emulate_ctxt *ctxt)
2999{
3000 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3001 return emulate_gp(ctxt, 0);
3002
3003
3004 ctxt->dst.type = OP_NONE;
3005 return X86EMUL_CONTINUE;
3006}
3007
3008static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3009{
3010 unsigned long val;
3011
3012 if (ctxt->mode == X86EMUL_MODE_PROT64)
3013 val = ctxt->src.val & ~0ULL;
3014 else
3015 val = ctxt->src.val & ~0U;
3016
3017
3018 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3019 return emulate_gp(ctxt, 0);
3020
3021
3022 ctxt->dst.type = OP_NONE;
3023 return X86EMUL_CONTINUE;
3024}
3025
3026static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3027{
3028 u64 msr_data;
3029
3030 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3031 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3032 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3033 return emulate_gp(ctxt, 0);
3034
3035 return X86EMUL_CONTINUE;
3036}
3037
3038static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3039{
3040 u64 msr_data;
3041
3042 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3043 return emulate_gp(ctxt, 0);
3044
3045 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3046 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3047 return X86EMUL_CONTINUE;
3048}
3049
3050static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3051{
3052 if (ctxt->modrm_reg > VCPU_SREG_GS)
3053 return emulate_ud(ctxt);
3054
3055 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3056 return X86EMUL_CONTINUE;
3057}
3058
3059static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3060{
3061 u16 sel = ctxt->src.val;
3062
3063 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3064 return emulate_ud(ctxt);
3065
3066 if (ctxt->modrm_reg == VCPU_SREG_SS)
3067 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3068
3069
3070 ctxt->dst.type = OP_NONE;
3071 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3072}
3073
3074static int em_lldt(struct x86_emulate_ctxt *ctxt)
3075{
3076 u16 sel = ctxt->src.val;
3077
3078
3079 ctxt->dst.type = OP_NONE;
3080 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3081}
3082
3083static int em_ltr(struct x86_emulate_ctxt *ctxt)
3084{
3085 u16 sel = ctxt->src.val;
3086
3087
3088 ctxt->dst.type = OP_NONE;
3089 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3090}
3091
3092static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3093{
3094 int rc;
3095 ulong linear;
3096
3097 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3098 if (rc == X86EMUL_CONTINUE)
3099 ctxt->ops->invlpg(ctxt, linear);
3100
3101 ctxt->dst.type = OP_NONE;
3102 return X86EMUL_CONTINUE;
3103}
3104
3105static int em_clts(struct x86_emulate_ctxt *ctxt)
3106{
3107 ulong cr0;
3108
3109 cr0 = ctxt->ops->get_cr(ctxt, 0);
3110 cr0 &= ~X86_CR0_TS;
3111 ctxt->ops->set_cr(ctxt, 0, cr0);
3112 return X86EMUL_CONTINUE;
3113}
3114
3115static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3116{
3117 int rc;
3118
3119 if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
3120 return X86EMUL_UNHANDLEABLE;
3121
3122 rc = ctxt->ops->fix_hypercall(ctxt);
3123 if (rc != X86EMUL_CONTINUE)
3124 return rc;
3125
3126
3127 ctxt->_eip = ctxt->eip;
3128
3129 ctxt->dst.type = OP_NONE;
3130 return X86EMUL_CONTINUE;
3131}
3132
3133static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3134 void (*get)(struct x86_emulate_ctxt *ctxt,
3135 struct desc_ptr *ptr))
3136{
3137 struct desc_ptr desc_ptr;
3138
3139 if (ctxt->mode == X86EMUL_MODE_PROT64)
3140 ctxt->op_bytes = 8;
3141 get(ctxt, &desc_ptr);
3142 if (ctxt->op_bytes == 2) {
3143 ctxt->op_bytes = 4;
3144 desc_ptr.address &= 0x00ffffff;
3145 }
3146
3147 ctxt->dst.type = OP_NONE;
3148 return segmented_write(ctxt, ctxt->dst.addr.mem,
3149 &desc_ptr, 2 + ctxt->op_bytes);
3150}
3151
3152static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3153{
3154 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3155}
3156
3157static int em_sidt(struct x86_emulate_ctxt *ctxt)
3158{
3159 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3160}
3161
3162static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3163{
3164 struct desc_ptr desc_ptr;
3165 int rc;
3166
3167 if (ctxt->mode == X86EMUL_MODE_PROT64)
3168 ctxt->op_bytes = 8;
3169 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3170 &desc_ptr.size, &desc_ptr.address,
3171 ctxt->op_bytes);
3172 if (rc != X86EMUL_CONTINUE)
3173 return rc;
3174 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3175
3176 ctxt->dst.type = OP_NONE;
3177 return X86EMUL_CONTINUE;
3178}
3179
3180static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3181{
3182 int rc;
3183
3184 rc = ctxt->ops->fix_hypercall(ctxt);
3185
3186
3187 ctxt->dst.type = OP_NONE;
3188 return rc;
3189}
3190
3191static int em_lidt(struct x86_emulate_ctxt *ctxt)
3192{
3193 struct desc_ptr desc_ptr;
3194 int rc;
3195
3196 if (ctxt->mode == X86EMUL_MODE_PROT64)
3197 ctxt->op_bytes = 8;
3198 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3199 &desc_ptr.size, &desc_ptr.address,
3200 ctxt->op_bytes);
3201 if (rc != X86EMUL_CONTINUE)
3202 return rc;
3203 ctxt->ops->set_idt(ctxt, &desc_ptr);
3204
3205 ctxt->dst.type = OP_NONE;
3206 return X86EMUL_CONTINUE;
3207}
3208
3209static int em_smsw(struct x86_emulate_ctxt *ctxt)
3210{
3211 ctxt->dst.bytes = 2;
3212 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3213 return X86EMUL_CONTINUE;
3214}
3215
3216static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3217{
3218 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3219 | (ctxt->src.val & 0x0f));
3220 ctxt->dst.type = OP_NONE;
3221 return X86EMUL_CONTINUE;
3222}
3223
3224static int em_loop(struct x86_emulate_ctxt *ctxt)
3225{
3226 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3227 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3228 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3229 jmp_rel(ctxt, ctxt->src.val);
3230
3231 return X86EMUL_CONTINUE;
3232}
3233
3234static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3235{
3236 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3237 jmp_rel(ctxt, ctxt->src.val);
3238
3239 return X86EMUL_CONTINUE;
3240}
3241
3242static int em_in(struct x86_emulate_ctxt *ctxt)
3243{
3244 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3245 &ctxt->dst.val))
3246 return X86EMUL_IO_NEEDED;
3247
3248 return X86EMUL_CONTINUE;
3249}
3250
3251static int em_out(struct x86_emulate_ctxt *ctxt)
3252{
3253 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3254 &ctxt->src.val, 1);
3255
3256 ctxt->dst.type = OP_NONE;
3257 return X86EMUL_CONTINUE;
3258}
3259
3260static int em_cli(struct x86_emulate_ctxt *ctxt)
3261{
3262 if (emulator_bad_iopl(ctxt))
3263 return emulate_gp(ctxt, 0);
3264
3265 ctxt->eflags &= ~X86_EFLAGS_IF;
3266 return X86EMUL_CONTINUE;
3267}
3268
3269static int em_sti(struct x86_emulate_ctxt *ctxt)
3270{
3271 if (emulator_bad_iopl(ctxt))
3272 return emulate_gp(ctxt, 0);
3273
3274 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3275 ctxt->eflags |= X86_EFLAGS_IF;
3276 return X86EMUL_CONTINUE;
3277}
3278
3279static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3280{
3281 u32 eax, ebx, ecx, edx;
3282
3283 eax = reg_read(ctxt, VCPU_REGS_RAX);
3284 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3285 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3286 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3287 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3288 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3289 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3290 return X86EMUL_CONTINUE;
3291}
3292
3293static int em_sahf(struct x86_emulate_ctxt *ctxt)
3294{
3295 u32 flags;
3296
3297 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3298 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3299
3300 ctxt->eflags &= ~0xffUL;
3301 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3302 return X86EMUL_CONTINUE;
3303}
3304
3305static int em_lahf(struct x86_emulate_ctxt *ctxt)
3306{
3307 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3308 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3309 return X86EMUL_CONTINUE;
3310}
3311
3312static int em_bswap(struct x86_emulate_ctxt *ctxt)
3313{
3314 switch (ctxt->op_bytes) {
3315#ifdef CONFIG_X86_64
3316 case 8:
3317 asm("bswap %0" : "+r"(ctxt->dst.val));
3318 break;
3319#endif
3320 default:
3321 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3322 break;
3323 }
3324 return X86EMUL_CONTINUE;
3325}
3326
3327static bool valid_cr(int nr)
3328{
3329 switch (nr) {
3330 case 0:
3331 case 2 ... 4:
3332 case 8:
3333 return true;
3334 default:
3335 return false;
3336 }
3337}
3338
3339static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3340{
3341 if (!valid_cr(ctxt->modrm_reg))
3342 return emulate_ud(ctxt);
3343
3344 return X86EMUL_CONTINUE;
3345}
3346
3347static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3348{
3349 u64 new_val = ctxt->src.val64;
3350 int cr = ctxt->modrm_reg;
3351 u64 efer = 0;
3352
3353 static u64 cr_reserved_bits[] = {
3354 0xffffffff00000000ULL,
3355 0, 0, 0,
3356 CR4_RESERVED_BITS,
3357 0, 0, 0,
3358 CR8_RESERVED_BITS,
3359 };
3360
3361 if (!valid_cr(cr))
3362 return emulate_ud(ctxt);
3363
3364 if (new_val & cr_reserved_bits[cr])
3365 return emulate_gp(ctxt, 0);
3366
3367 switch (cr) {
3368 case 0: {
3369 u64 cr4;
3370 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3371 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3372 return emulate_gp(ctxt, 0);
3373
3374 cr4 = ctxt->ops->get_cr(ctxt, 4);
3375 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3376
3377 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3378 !(cr4 & X86_CR4_PAE))
3379 return emulate_gp(ctxt, 0);
3380
3381 break;
3382 }
3383 case 3: {
3384 u64 rsvd = 0;
3385
3386 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3387 if (efer & EFER_LMA)
3388 rsvd = CR3_L_MODE_RESERVED_BITS;
3389 else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
3390 rsvd = CR3_PAE_RESERVED_BITS;
3391 else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
3392 rsvd = CR3_NONPAE_RESERVED_BITS;
3393
3394 if (new_val & rsvd)
3395 return emulate_gp(ctxt, 0);
3396
3397 break;
3398 }
3399 case 4: {
3400 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3401
3402 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3403 return emulate_gp(ctxt, 0);
3404
3405 break;
3406 }
3407 }
3408
3409 return X86EMUL_CONTINUE;
3410}
3411
3412static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3413{
3414 unsigned long dr7;
3415
3416 ctxt->ops->get_dr(ctxt, 7, &dr7);
3417
3418
3419 return dr7 & (1 << 13);
3420}
3421
3422static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3423{
3424 int dr = ctxt->modrm_reg;
3425 u64 cr4;
3426
3427 if (dr > 7)
3428 return emulate_ud(ctxt);
3429
3430 cr4 = ctxt->ops->get_cr(ctxt, 4);
3431 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3432 return emulate_ud(ctxt);
3433
3434 if (check_dr7_gd(ctxt))
3435 return emulate_db(ctxt);
3436
3437 return X86EMUL_CONTINUE;
3438}
3439
3440static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3441{
3442 u64 new_val = ctxt->src.val64;
3443 int dr = ctxt->modrm_reg;
3444
3445 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3446 return emulate_gp(ctxt, 0);
3447
3448 return check_dr_read(ctxt);
3449}
3450
3451static int check_svme(struct x86_emulate_ctxt *ctxt)
3452{
3453 u64 efer;
3454
3455 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3456
3457 if (!(efer & EFER_SVME))
3458 return emulate_ud(ctxt);
3459
3460 return X86EMUL_CONTINUE;
3461}
3462
3463static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3464{
3465 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3466
3467
3468 if (rax & 0xffff000000000000ULL)
3469 return emulate_gp(ctxt, 0);
3470
3471 return check_svme(ctxt);
3472}
3473
3474static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3475{
3476 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3477
3478 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3479 return emulate_ud(ctxt);
3480
3481 return X86EMUL_CONTINUE;
3482}
3483
3484static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3485{
3486 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3487 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3488
3489 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3490 (rcx > 3))
3491 return emulate_gp(ctxt, 0);
3492
3493 return X86EMUL_CONTINUE;
3494}
3495
3496static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3497{
3498 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3499 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3500 return emulate_gp(ctxt, 0);
3501
3502 return X86EMUL_CONTINUE;
3503}
3504
3505static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3506{
3507 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3508 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3509 return emulate_gp(ctxt, 0);
3510
3511 return X86EMUL_CONTINUE;
3512}
3513
3514#define D(_y) { .flags = (_y) }
3515#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
3516#define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
3517 .check_perm = (_p) }
3518#define N D(NotImpl)
3519#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3520#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3521#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3522#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3523#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3524#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3525#define II(_f, _e, _i) \
3526 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
3527#define IIP(_f, _e, _i, _p) \
3528 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
3529 .check_perm = (_p) }
3530#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3531
3532#define D2bv(_f) D((_f) | ByteOp), D(_f)
3533#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3534#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3535#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3536#define I2bvIP(_f, _e, _i, _p) \
3537 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3538
3539#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3540 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3541 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3542
3543static const struct opcode group7_rm1[] = {
3544 DI(SrcNone | Priv, monitor),
3545 DI(SrcNone | Priv, mwait),
3546 N, N, N, N, N, N,
3547};
3548
3549static const struct opcode group7_rm3[] = {
3550 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3551 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3552 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3553 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3554 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3555 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3556 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3557 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3558};
3559
3560static const struct opcode group7_rm7[] = {
3561 N,
3562 DIP(SrcNone, rdtscp, check_rdtsc),
3563 N, N, N, N, N, N,
3564};
3565
3566static const struct opcode group1[] = {
3567 F(Lock, em_add),
3568 F(Lock | PageTable, em_or),
3569 F(Lock, em_adc),
3570 F(Lock, em_sbb),
3571 F(Lock | PageTable, em_and),
3572 F(Lock, em_sub),
3573 F(Lock, em_xor),
3574 F(NoWrite, em_cmp),
3575};
3576
3577static const struct opcode group1A[] = {
3578 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3579};
3580
3581static const struct opcode group2[] = {
3582 F(DstMem | ModRM, em_rol),
3583 F(DstMem | ModRM, em_ror),
3584 F(DstMem | ModRM, em_rcl),
3585 F(DstMem | ModRM, em_rcr),
3586 F(DstMem | ModRM, em_shl),
3587 F(DstMem | ModRM, em_shr),
3588 F(DstMem | ModRM, em_shl),
3589 F(DstMem | ModRM, em_sar),
3590};
3591
3592static const struct opcode group3[] = {
3593 F(DstMem | SrcImm | NoWrite, em_test),
3594 F(DstMem | SrcImm | NoWrite, em_test),
3595 F(DstMem | SrcNone | Lock, em_not),
3596 F(DstMem | SrcNone | Lock, em_neg),
3597 F(DstXacc | Src2Mem, em_mul_ex),
3598 F(DstXacc | Src2Mem, em_imul_ex),
3599 F(DstXacc | Src2Mem, em_div_ex),
3600 F(DstXacc | Src2Mem, em_idiv_ex),
3601};
3602
3603static const struct opcode group4[] = {
3604 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3605 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3606 N, N, N, N, N, N,
3607};
3608
3609static const struct opcode group5[] = {
3610 F(DstMem | SrcNone | Lock, em_inc),
3611 F(DstMem | SrcNone | Lock, em_dec),
3612 I(SrcMem | Stack, em_grp45),
3613 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3614 I(SrcMem | Stack, em_grp45),
3615 I(SrcMemFAddr | ImplicitOps, em_grp45),
3616 I(SrcMem | Stack, em_grp45), D(Undefined),
3617};
3618
3619static const struct opcode group6[] = {
3620 DI(Prot, sldt),
3621 DI(Prot, str),
3622 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3623 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3624 N, N, N, N,
3625};
3626
3627static const struct group_dual group7 = { {
3628 II(Mov | DstMem | Priv, em_sgdt, sgdt),
3629 II(Mov | DstMem | Priv, em_sidt, sidt),
3630 II(SrcMem | Priv, em_lgdt, lgdt),
3631 II(SrcMem | Priv, em_lidt, lidt),
3632 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3633 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3634 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3635}, {
3636 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3637 EXT(0, group7_rm1),
3638 N, EXT(0, group7_rm3),
3639 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3640 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3641 EXT(0, group7_rm7),
3642} };
3643
3644static const struct opcode group8[] = {
3645 N, N, N, N,
3646 F(DstMem | SrcImmByte | NoWrite, em_bt),
3647 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3648 F(DstMem | SrcImmByte | Lock, em_btr),
3649 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3650};
3651
3652static const struct group_dual group9 = { {
3653 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3654}, {
3655 N, N, N, N, N, N, N, N,
3656} };
3657
3658static const struct opcode group11[] = {
3659 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3660 X7(D(Undefined)),
3661};
3662
3663static const struct gprefix pfx_0f_6f_0f_7f = {
3664 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3665};
3666
3667static const struct gprefix pfx_vmovntpx = {
3668 I(0, em_mov), N, N, N,
3669};
3670
3671static const struct gprefix pfx_0f_28_0f_29 = {
3672 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3673};
3674
3675static const struct escape escape_d9 = { {
3676 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3677}, {
3678
3679 N, N, N, N, N, N, N, N,
3680
3681 N, N, N, N, N, N, N, N,
3682
3683 N, N, N, N, N, N, N, N,
3684
3685 N, N, N, N, N, N, N, N,
3686
3687 N, N, N, N, N, N, N, N,
3688
3689 N, N, N, N, N, N, N, N,
3690
3691 N, N, N, N, N, N, N, N,
3692
3693 N, N, N, N, N, N, N, N,
3694} };
3695
3696static const struct escape escape_db = { {
3697 N, N, N, N, N, N, N, N,
3698}, {
3699
3700 N, N, N, N, N, N, N, N,
3701
3702 N, N, N, N, N, N, N, N,
3703
3704 N, N, N, N, N, N, N, N,
3705
3706 N, N, N, N, N, N, N, N,
3707
3708 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3709
3710 N, N, N, N, N, N, N, N,
3711
3712 N, N, N, N, N, N, N, N,
3713
3714 N, N, N, N, N, N, N, N,
3715} };
3716
3717static const struct escape escape_dd = { {
3718 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3719}, {
3720
3721 N, N, N, N, N, N, N, N,
3722
3723 N, N, N, N, N, N, N, N,
3724
3725 N, N, N, N, N, N, N, N,
3726
3727 N, N, N, N, N, N, N, N,
3728
3729 N, N, N, N, N, N, N, N,
3730
3731 N, N, N, N, N, N, N, N,
3732
3733 N, N, N, N, N, N, N, N,
3734
3735 N, N, N, N, N, N, N, N,
3736} };
3737
3738static const struct opcode opcode_table[256] = {
3739
3740 F6ALU(Lock, em_add),
3741 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3742 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3743
3744 F6ALU(Lock | PageTable, em_or),
3745 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3746 N,
3747
3748 F6ALU(Lock, em_adc),
3749 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3750 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3751
3752 F6ALU(Lock, em_sbb),
3753 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3754 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3755
3756 F6ALU(Lock | PageTable, em_and), N, N,
3757
3758 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3759
3760 F6ALU(Lock, em_xor), N, N,
3761
3762 F6ALU(NoWrite, em_cmp), N, N,
3763
3764 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3765
3766 X8(I(SrcReg | Stack, em_push)),
3767
3768 X8(I(DstReg | Stack, em_pop)),
3769
3770 I(ImplicitOps | Stack | No64, em_pusha),
3771 I(ImplicitOps | Stack | No64, em_popa),
3772 N, D(DstReg | SrcMem32 | ModRM | Mov) ,
3773 N, N, N, N,
3774
3775 I(SrcImm | Mov | Stack, em_push),
3776 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3777 I(SrcImmByte | Mov | Stack, em_push),
3778 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3779 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in),
3780 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out),
3781
3782 X16(D(SrcImmByte)),
3783
3784 G(ByteOp | DstMem | SrcImm, group1),
3785 G(DstMem | SrcImm, group1),
3786 G(ByteOp | DstMem | SrcImm | No64, group1),
3787 G(DstMem | SrcImmByte, group1),
3788 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3789 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3790
3791 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3792 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3793 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3794 D(ModRM | SrcMem | NoAccess | DstReg),
3795 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3796 G(0, group1A),
3797
3798 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3799
3800 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3801 I(SrcImmFAddr | No64, em_call_far), N,
3802 II(ImplicitOps | Stack, em_pushf, pushf),
3803 II(ImplicitOps | Stack, em_popf, popf),
3804 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3805
3806 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3807 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3808 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3809 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
3810
3811 F2bv(DstAcc | SrcImm | NoWrite, em_test),
3812 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3813 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3814 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
3815
3816 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3817
3818 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
3819
3820 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
3821 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3822 I(ImplicitOps | Stack, em_ret),
3823 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
3824 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
3825 G(ByteOp, group11), G(0, group11),
3826
3827 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
3828 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
3829 I(ImplicitOps | Stack, em_ret_far),
3830 D(ImplicitOps), DI(SrcImmByte, intn),
3831 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3832
3833 G(Src2One | ByteOp, group2), G(Src2One, group2),
3834 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
3835 I(DstAcc | SrcImmUByte | No64, em_aam),
3836 I(DstAcc | SrcImmUByte | No64, em_aad),
3837 F(DstAcc | ByteOp | No64, em_salc),
3838 I(DstAcc | SrcXLat | ByteOp, em_mov),
3839
3840 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
3841
3842 X3(I(SrcImmByte, em_loop)),
3843 I(SrcImmByte, em_jcxz),
3844 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
3845 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
3846
3847 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
3848 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3849 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
3850 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
3851
3852 N, DI(ImplicitOps, icebp), N, N,
3853 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3854 G(ByteOp, group3), G(0, group3),
3855
3856 D(ImplicitOps), D(ImplicitOps),
3857 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3858 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3859};
3860
3861static const struct opcode twobyte_table[256] = {
3862
3863 G(0, group6), GD(0, &group7), N, N,
3864 N, I(ImplicitOps | EmulateOnUD, em_syscall),
3865 II(ImplicitOps | Priv, em_clts, clts), N,
3866 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3867 N, D(ImplicitOps | ModRM), N, N,
3868
3869 N, N, N, N, N, N, N, N,
3870 D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM),
3871
3872 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3873 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3874 IIP(ModRM | SrcMem | Priv | Op3264, em_cr_write, cr_write, check_cr_write),
3875 IIP(ModRM | SrcMem | Priv | Op3264, em_dr_write, dr_write, check_dr_write),
3876 N, N, N, N,
3877 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
3878 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
3879 N, GP(ModRM | DstMem | SrcReg | Sse | Mov | Aligned, &pfx_vmovntpx),
3880 N, N, N, N,
3881
3882 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
3883 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3884 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
3885 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
3886 I(ImplicitOps | EmulateOnUD, em_sysenter),
3887 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
3888 N, N,
3889 N, N, N, N, N, N, N, N,
3890
3891 X16(D(DstReg | SrcMem | ModRM | Mov)),
3892
3893 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3894
3895 N, N, N, N,
3896 N, N, N, N,
3897 N, N, N, N,
3898 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3899
3900 N, N, N, N,
3901 N, N, N, N,
3902 N, N, N, N,
3903 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3904
3905 X16(D(SrcImm)),
3906
3907 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3908
3909 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
3910 II(ImplicitOps, em_cpuid, cpuid),
3911 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
3912 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
3913 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
3914
3915 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
3916 DI(ImplicitOps, rsm),
3917 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
3918 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
3919 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
3920 D(ModRM), F(DstReg | SrcMem | ModRM, em_imul),
3921
3922 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
3923 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
3924 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
3925 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
3926 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
3927 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3928
3929 N, N,
3930 G(BitOp, group8),
3931 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
3932 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
3933 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3934
3935 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
3936 N, D(DstMem | SrcReg | ModRM | Mov),
3937 N, N, N, GD(0, &group9),
3938
3939 X8(I(DstReg, em_bswap)),
3940
3941 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3942
3943 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3944
3945 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3946};
3947
3948static const struct gprefix three_byte_0f_38_f0 = {
3949 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
3950};
3951
3952static const struct gprefix three_byte_0f_38_f1 = {
3953 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
3954};
3955
3956
3957
3958
3959
3960static const struct opcode opcode_map_0f_38[256] = {
3961
3962 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
3963
3964 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
3965
3966 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
3967 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
3968
3969 N, N, X4(N), X8(N)
3970};
3971
3972#undef D
3973#undef N
3974#undef G
3975#undef GD
3976#undef I
3977#undef GP
3978#undef EXT
3979
3980#undef D2bv
3981#undef D2bvIP
3982#undef I2bv
3983#undef I2bvIP
3984#undef I6ALU
3985
3986static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
3987{
3988 unsigned size;
3989
3990 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3991 if (size == 8)
3992 size = 4;
3993 return size;
3994}
3995
3996static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3997 unsigned size, bool sign_extension)
3998{
3999 int rc = X86EMUL_CONTINUE;
4000
4001 op->type = OP_IMM;
4002 op->bytes = size;
4003 op->addr.mem.ea = ctxt->_eip;
4004
4005 switch (op->bytes) {
4006 case 1:
4007 op->val = insn_fetch(s8, ctxt);
4008 break;
4009 case 2:
4010 op->val = insn_fetch(s16, ctxt);
4011 break;
4012 case 4:
4013 op->val = insn_fetch(s32, ctxt);
4014 break;
4015 case 8:
4016 op->val = insn_fetch(s64, ctxt);
4017 break;
4018 }
4019 if (!sign_extension) {
4020 switch (op->bytes) {
4021 case 1:
4022 op->val &= 0xff;
4023 break;
4024 case 2:
4025 op->val &= 0xffff;
4026 break;
4027 case 4:
4028 op->val &= 0xffffffff;
4029 break;
4030 }
4031 }
4032done:
4033 return rc;
4034}
4035
4036static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4037 unsigned d)
4038{
4039 int rc = X86EMUL_CONTINUE;
4040
4041 switch (d) {
4042 case OpReg:
4043 decode_register_operand(ctxt, op);
4044 break;
4045 case OpImmUByte:
4046 rc = decode_imm(ctxt, op, 1, false);
4047 break;
4048 case OpMem:
4049 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4050 mem_common:
4051 *op = ctxt->memop;
4052 ctxt->memopp = op;
4053 if ((ctxt->d & BitOp) && op == &ctxt->dst)
4054 fetch_bit_operand(ctxt);
4055 op->orig_val = op->val;
4056 break;
4057 case OpMem64:
4058 ctxt->memop.bytes = 8;
4059 goto mem_common;
4060 case OpAcc:
4061 op->type = OP_REG;
4062 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4063 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4064 fetch_register_operand(op);
4065 op->orig_val = op->val;
4066 break;
4067 case OpAccLo:
4068 op->type = OP_REG;
4069 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4070 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4071 fetch_register_operand(op);
4072 op->orig_val = op->val;
4073 break;
4074 case OpAccHi:
4075 if (ctxt->d & ByteOp) {
4076 op->type = OP_NONE;
4077 break;
4078 }
4079 op->type = OP_REG;
4080 op->bytes = ctxt->op_bytes;
4081 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4082 fetch_register_operand(op);
4083 op->orig_val = op->val;
4084 break;
4085 case OpDI:
4086 op->type = OP_MEM;
4087 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4088 op->addr.mem.ea =
4089 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4090 op->addr.mem.seg = VCPU_SREG_ES;
4091 op->val = 0;
4092 op->count = 1;
4093 break;
4094 case OpDX:
4095 op->type = OP_REG;
4096 op->bytes = 2;
4097 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4098 fetch_register_operand(op);
4099 break;
4100 case OpCL:
4101 op->bytes = 1;
4102 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4103 break;
4104 case OpImmByte:
4105 rc = decode_imm(ctxt, op, 1, true);
4106 break;
4107 case OpOne:
4108 op->bytes = 1;
4109 op->val = 1;
4110 break;
4111 case OpImm:
4112 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4113 break;
4114 case OpImm64:
4115 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4116 break;
4117 case OpMem8:
4118 ctxt->memop.bytes = 1;
4119 if (ctxt->memop.type == OP_REG) {
4120 ctxt->memop.addr.reg = decode_register(ctxt,
4121 ctxt->modrm_rm, true);
4122 fetch_register_operand(&ctxt->memop);
4123 }
4124 goto mem_common;
4125 case OpMem16:
4126 ctxt->memop.bytes = 2;
4127 goto mem_common;
4128 case OpMem32:
4129 ctxt->memop.bytes = 4;
4130 goto mem_common;
4131 case OpImmU16:
4132 rc = decode_imm(ctxt, op, 2, false);
4133 break;
4134 case OpImmU:
4135 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4136 break;
4137 case OpSI:
4138 op->type = OP_MEM;
4139 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4140 op->addr.mem.ea =
4141 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4142 op->addr.mem.seg = seg_override(ctxt);
4143 op->val = 0;
4144 op->count = 1;
4145 break;
4146 case OpXLat:
4147 op->type = OP_MEM;
4148 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4149 op->addr.mem.ea =
4150 register_address(ctxt,
4151 reg_read(ctxt, VCPU_REGS_RBX) +
4152 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4153 op->addr.mem.seg = seg_override(ctxt);
4154 op->val = 0;
4155 break;
4156 case OpImmFAddr:
4157 op->type = OP_IMM;
4158 op->addr.mem.ea = ctxt->_eip;
4159 op->bytes = ctxt->op_bytes + 2;
4160 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4161 break;
4162 case OpMemFAddr:
4163 ctxt->memop.bytes = ctxt->op_bytes + 2;
4164 goto mem_common;
4165 case OpES:
4166 op->val = VCPU_SREG_ES;
4167 break;
4168 case OpCS:
4169 op->val = VCPU_SREG_CS;
4170 break;
4171 case OpSS:
4172 op->val = VCPU_SREG_SS;
4173 break;
4174 case OpDS:
4175 op->val = VCPU_SREG_DS;
4176 break;
4177 case OpFS:
4178 op->val = VCPU_SREG_FS;
4179 break;
4180 case OpGS:
4181 op->val = VCPU_SREG_GS;
4182 break;
4183 case OpImplicit:
4184
4185 default:
4186 op->type = OP_NONE;
4187 break;
4188 }
4189
4190done:
4191 return rc;
4192}
4193
4194int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4195{
4196 int rc = X86EMUL_CONTINUE;
4197 int mode = ctxt->mode;
4198 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4199 bool op_prefix = false;
4200 struct opcode opcode;
4201
4202 ctxt->memop.type = OP_NONE;
4203 ctxt->memopp = NULL;
4204 ctxt->_eip = ctxt->eip;
4205 ctxt->fetch.start = ctxt->_eip;
4206 ctxt->fetch.end = ctxt->fetch.start + insn_len;
4207 ctxt->opcode_len = 1;
4208 if (insn_len > 0)
4209 memcpy(ctxt->fetch.data, insn, insn_len);
4210
4211 switch (mode) {
4212 case X86EMUL_MODE_REAL:
4213 case X86EMUL_MODE_VM86:
4214 case X86EMUL_MODE_PROT16:
4215 def_op_bytes = def_ad_bytes = 2;
4216 break;
4217 case X86EMUL_MODE_PROT32:
4218 def_op_bytes = def_ad_bytes = 4;
4219 break;
4220#ifdef CONFIG_X86_64
4221 case X86EMUL_MODE_PROT64:
4222 def_op_bytes = 4;
4223 def_ad_bytes = 8;
4224 break;
4225#endif
4226 default:
4227 return EMULATION_FAILED;
4228 }
4229
4230 ctxt->op_bytes = def_op_bytes;
4231 ctxt->ad_bytes = def_ad_bytes;
4232
4233
4234 for (;;) {
4235 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4236 case 0x66:
4237 op_prefix = true;
4238
4239 ctxt->op_bytes = def_op_bytes ^ 6;
4240 break;
4241 case 0x67:
4242 if (mode == X86EMUL_MODE_PROT64)
4243
4244 ctxt->ad_bytes = def_ad_bytes ^ 12;
4245 else
4246
4247 ctxt->ad_bytes = def_ad_bytes ^ 6;
4248 break;
4249 case 0x26:
4250 case 0x2e:
4251 case 0x36:
4252 case 0x3e:
4253 set_seg_override(ctxt, (ctxt->b >> 3) & 3);
4254 break;
4255 case 0x64:
4256 case 0x65:
4257 set_seg_override(ctxt, ctxt->b & 7);
4258 break;
4259 case 0x40 ... 0x4f:
4260 if (mode != X86EMUL_MODE_PROT64)
4261 goto done_prefixes;
4262 ctxt->rex_prefix = ctxt->b;
4263 continue;
4264 case 0xf0:
4265 ctxt->lock_prefix = 1;
4266 break;
4267 case 0xf2:
4268 case 0xf3:
4269 ctxt->rep_prefix = ctxt->b;
4270 break;
4271 default:
4272 goto done_prefixes;
4273 }
4274
4275
4276
4277 ctxt->rex_prefix = 0;
4278 }
4279
4280done_prefixes:
4281
4282
4283 if (ctxt->rex_prefix & 8)
4284 ctxt->op_bytes = 8;
4285
4286
4287 opcode = opcode_table[ctxt->b];
4288
4289 if (ctxt->b == 0x0f) {
4290 ctxt->opcode_len = 2;
4291 ctxt->b = insn_fetch(u8, ctxt);
4292 opcode = twobyte_table[ctxt->b];
4293
4294
4295 if (ctxt->b == 0x38) {
4296 ctxt->opcode_len = 3;
4297 ctxt->b = insn_fetch(u8, ctxt);
4298 opcode = opcode_map_0f_38[ctxt->b];
4299 }
4300 }
4301 ctxt->d = opcode.flags;
4302
4303 if (ctxt->d & ModRM)
4304 ctxt->modrm = insn_fetch(u8, ctxt);
4305
4306 while (ctxt->d & GroupMask) {
4307 switch (ctxt->d & GroupMask) {
4308 case Group:
4309 goffset = (ctxt->modrm >> 3) & 7;
4310 opcode = opcode.u.group[goffset];
4311 break;
4312 case GroupDual:
4313 goffset = (ctxt->modrm >> 3) & 7;
4314 if ((ctxt->modrm >> 6) == 3)
4315 opcode = opcode.u.gdual->mod3[goffset];
4316 else
4317 opcode = opcode.u.gdual->mod012[goffset];
4318 break;
4319 case RMExt:
4320 goffset = ctxt->modrm & 7;
4321 opcode = opcode.u.group[goffset];
4322 break;
4323 case Prefix:
4324 if (ctxt->rep_prefix && op_prefix)
4325 return EMULATION_FAILED;
4326 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4327 switch (simd_prefix) {
4328 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4329 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4330 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4331 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4332 }
4333 break;
4334 case Escape:
4335 if (ctxt->modrm > 0xbf)
4336 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4337 else
4338 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4339 break;
4340 default:
4341 return EMULATION_FAILED;
4342 }
4343
4344 ctxt->d &= ~(u64)GroupMask;
4345 ctxt->d |= opcode.flags;
4346 }
4347
4348 ctxt->execute = opcode.u.execute;
4349 ctxt->check_perm = opcode.check_perm;
4350 ctxt->intercept = opcode.intercept;
4351
4352
4353 if (ctxt->d == 0 || (ctxt->d & NotImpl))
4354 return EMULATION_FAILED;
4355
4356 if (!(ctxt->d & EmulateOnUD) && ctxt->ud)
4357 return EMULATION_FAILED;
4358
4359 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
4360 ctxt->op_bytes = 8;
4361
4362 if (ctxt->d & Op3264) {
4363 if (mode == X86EMUL_MODE_PROT64)
4364 ctxt->op_bytes = 8;
4365 else
4366 ctxt->op_bytes = 4;
4367 }
4368
4369 if (ctxt->d & Sse)
4370 ctxt->op_bytes = 16;
4371 else if (ctxt->d & Mmx)
4372 ctxt->op_bytes = 8;
4373
4374
4375 if (ctxt->d & ModRM) {
4376 rc = decode_modrm(ctxt, &ctxt->memop);
4377 if (!ctxt->has_seg_override)
4378 set_seg_override(ctxt, ctxt->modrm_seg);
4379 } else if (ctxt->d & MemAbs)
4380 rc = decode_abs(ctxt, &ctxt->memop);
4381 if (rc != X86EMUL_CONTINUE)
4382 goto done;
4383
4384 if (!ctxt->has_seg_override)
4385 set_seg_override(ctxt, VCPU_SREG_DS);
4386
4387 ctxt->memop.addr.mem.seg = seg_override(ctxt);
4388
4389 if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8)
4390 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
4391
4392
4393
4394
4395
4396 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4397 if (rc != X86EMUL_CONTINUE)
4398 goto done;
4399
4400
4401
4402
4403
4404 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4405 if (rc != X86EMUL_CONTINUE)
4406 goto done;
4407
4408
4409 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4410
4411done:
4412 if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative)
4413 ctxt->memopp->addr.mem.ea += ctxt->_eip;
4414
4415 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4416}
4417
4418bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4419{
4420 return ctxt->d & PageTable;
4421}
4422
4423static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4424{
4425
4426
4427
4428
4429
4430
4431
4432 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4433 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4434 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4435 ((ctxt->eflags & EFLG_ZF) == 0))
4436 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4437 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4438 return true;
4439
4440 return false;
4441}
4442
4443static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4444{
4445 bool fault = false;
4446
4447 ctxt->ops->get_fpu(ctxt);
4448 asm volatile("1: fwait \n\t"
4449 "2: \n\t"
4450 ".pushsection .fixup,\"ax\" \n\t"
4451 "3: \n\t"
4452 "movb $1, %[fault] \n\t"
4453 "jmp 2b \n\t"
4454 ".popsection \n\t"
4455 _ASM_EXTABLE(1b, 3b)
4456 : [fault]"+qm"(fault));
4457 ctxt->ops->put_fpu(ctxt);
4458
4459 if (unlikely(fault))
4460 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4461
4462 return X86EMUL_CONTINUE;
4463}
4464
4465static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4466 struct operand *op)
4467{
4468 if (op->type == OP_MM)
4469 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4470}
4471
4472static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4473{
4474 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4475 if (!(ctxt->d & ByteOp))
4476 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4477 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4478 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4479 [fastop]"+S"(fop)
4480 : "c"(ctxt->src2.val));
4481 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4482 if (!fop)
4483 return emulate_de(ctxt);
4484 return X86EMUL_CONTINUE;
4485}
4486
4487int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4488{
4489 const struct x86_emulate_ops *ops = ctxt->ops;
4490 int rc = X86EMUL_CONTINUE;
4491 int saved_dst_type = ctxt->dst.type;
4492
4493 ctxt->mem_read.pos = 0;
4494
4495 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4496 (ctxt->d & Undefined)) {
4497 rc = emulate_ud(ctxt);
4498 goto done;
4499 }
4500
4501
4502 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4503 rc = emulate_ud(ctxt);
4504 goto done;
4505 }
4506
4507 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4508 rc = emulate_ud(ctxt);
4509 goto done;
4510 }
4511
4512 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4513 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4514 rc = emulate_ud(ctxt);
4515 goto done;
4516 }
4517
4518 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4519 rc = emulate_nm(ctxt);
4520 goto done;
4521 }
4522
4523 if (ctxt->d & Mmx) {
4524 rc = flush_pending_x87_faults(ctxt);
4525 if (rc != X86EMUL_CONTINUE)
4526 goto done;
4527
4528
4529
4530
4531 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4532 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4533 if (!(ctxt->d & Mov))
4534 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4535 }
4536
4537 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4538 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4539 X86_ICPT_PRE_EXCEPT);
4540 if (rc != X86EMUL_CONTINUE)
4541 goto done;
4542 }
4543
4544
4545 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4546 rc = emulate_gp(ctxt, 0);
4547 goto done;
4548 }
4549
4550
4551 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4552 rc = emulate_ud(ctxt);
4553 goto done;
4554 }
4555
4556
4557 if (ctxt->check_perm) {
4558 rc = ctxt->check_perm(ctxt);
4559 if (rc != X86EMUL_CONTINUE)
4560 goto done;
4561 }
4562
4563 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4564 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4565 X86_ICPT_POST_EXCEPT);
4566 if (rc != X86EMUL_CONTINUE)
4567 goto done;
4568 }
4569
4570 if (ctxt->rep_prefix && (ctxt->d & String)) {
4571
4572 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4573 ctxt->eip = ctxt->_eip;
4574 goto done;
4575 }
4576 }
4577
4578 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4579 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4580 ctxt->src.valptr, ctxt->src.bytes);
4581 if (rc != X86EMUL_CONTINUE)
4582 goto done;
4583 ctxt->src.orig_val64 = ctxt->src.val64;
4584 }
4585
4586 if (ctxt->src2.type == OP_MEM) {
4587 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4588 &ctxt->src2.val, ctxt->src2.bytes);
4589 if (rc != X86EMUL_CONTINUE)
4590 goto done;
4591 }
4592
4593 if ((ctxt->d & DstMask) == ImplicitOps)
4594 goto special_insn;
4595
4596
4597 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4598
4599 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4600 &ctxt->dst.val, ctxt->dst.bytes);
4601 if (rc != X86EMUL_CONTINUE)
4602 goto done;
4603 }
4604 ctxt->dst.orig_val = ctxt->dst.val;
4605
4606special_insn:
4607
4608 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4609 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4610 X86_ICPT_POST_MEMACCESS);
4611 if (rc != X86EMUL_CONTINUE)
4612 goto done;
4613 }
4614
4615 if (ctxt->execute) {
4616 if (ctxt->d & Fastop) {
4617 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4618 rc = fastop(ctxt, fop);
4619 if (rc != X86EMUL_CONTINUE)
4620 goto done;
4621 goto writeback;
4622 }
4623 rc = ctxt->execute(ctxt);
4624 if (rc != X86EMUL_CONTINUE)
4625 goto done;
4626 goto writeback;
4627 }
4628
4629 if (ctxt->opcode_len == 2)
4630 goto twobyte_insn;
4631 else if (ctxt->opcode_len == 3)
4632 goto threebyte_insn;
4633
4634 switch (ctxt->b) {
4635 case 0x63:
4636 if (ctxt->mode != X86EMUL_MODE_PROT64)
4637 goto cannot_emulate;
4638 ctxt->dst.val = (s32) ctxt->src.val;
4639 break;
4640 case 0x70 ... 0x7f:
4641 if (test_cc(ctxt->b, ctxt->eflags))
4642 jmp_rel(ctxt, ctxt->src.val);
4643 break;
4644 case 0x8d:
4645 ctxt->dst.val = ctxt->src.addr.mem.ea;
4646 break;
4647 case 0x90 ... 0x97:
4648 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4649 break;
4650 rc = em_xchg(ctxt);
4651 break;
4652 case 0x98:
4653 switch (ctxt->op_bytes) {
4654 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4655 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4656 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4657 }
4658 break;
4659 case 0xcc:
4660 rc = emulate_int(ctxt, 3);
4661 break;
4662 case 0xcd:
4663 rc = emulate_int(ctxt, ctxt->src.val);
4664 break;
4665 case 0xce:
4666 if (ctxt->eflags & EFLG_OF)
4667 rc = emulate_int(ctxt, 4);
4668 break;
4669 case 0xe9:
4670 case 0xeb:
4671 jmp_rel(ctxt, ctxt->src.val);
4672 ctxt->dst.type = OP_NONE;
4673 break;
4674 case 0xf4:
4675 ctxt->ops->halt(ctxt);
4676 break;
4677 case 0xf5:
4678
4679 ctxt->eflags ^= EFLG_CF;
4680 break;
4681 case 0xf8:
4682 ctxt->eflags &= ~EFLG_CF;
4683 break;
4684 case 0xf9:
4685 ctxt->eflags |= EFLG_CF;
4686 break;
4687 case 0xfc:
4688 ctxt->eflags &= ~EFLG_DF;
4689 break;
4690 case 0xfd:
4691 ctxt->eflags |= EFLG_DF;
4692 break;
4693 default:
4694 goto cannot_emulate;
4695 }
4696
4697 if (rc != X86EMUL_CONTINUE)
4698 goto done;
4699
4700writeback:
4701 if (!(ctxt->d & NoWrite)) {
4702 rc = writeback(ctxt, &ctxt->dst);
4703 if (rc != X86EMUL_CONTINUE)
4704 goto done;
4705 }
4706 if (ctxt->d & SrcWrite) {
4707 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4708 rc = writeback(ctxt, &ctxt->src);
4709 if (rc != X86EMUL_CONTINUE)
4710 goto done;
4711 }
4712
4713
4714
4715
4716
4717 ctxt->dst.type = saved_dst_type;
4718
4719 if ((ctxt->d & SrcMask) == SrcSI)
4720 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4721
4722 if ((ctxt->d & DstMask) == DstDI)
4723 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4724
4725 if (ctxt->rep_prefix && (ctxt->d & String)) {
4726 unsigned int count;
4727 struct read_cache *r = &ctxt->io_read;
4728 if ((ctxt->d & SrcMask) == SrcSI)
4729 count = ctxt->src.count;
4730 else
4731 count = ctxt->dst.count;
4732 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4733 -count);
4734
4735 if (!string_insn_completed(ctxt)) {
4736
4737
4738
4739
4740 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4741 (r->end == 0 || r->end != r->pos)) {
4742
4743
4744
4745
4746
4747 ctxt->mem_read.end = 0;
4748 writeback_registers(ctxt);
4749 return EMULATION_RESTART;
4750 }
4751 goto done;
4752 }
4753 }
4754
4755 ctxt->eip = ctxt->_eip;
4756
4757done:
4758 if (rc == X86EMUL_PROPAGATE_FAULT)
4759 ctxt->have_exception = true;
4760 if (rc == X86EMUL_INTERCEPTED)
4761 return EMULATION_INTERCEPTED;
4762
4763 if (rc == X86EMUL_CONTINUE)
4764 writeback_registers(ctxt);
4765
4766 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4767
4768twobyte_insn:
4769 switch (ctxt->b) {
4770 case 0x09:
4771 (ctxt->ops->wbinvd)(ctxt);
4772 break;
4773 case 0x08:
4774 case 0x0d:
4775 case 0x18:
4776 case 0x1f:
4777 break;
4778 case 0x20:
4779 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4780 break;
4781 case 0x21:
4782 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
4783 break;
4784 case 0x40 ... 0x4f:
4785 ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
4786 if (!test_cc(ctxt->b, ctxt->eflags))
4787 ctxt->dst.type = OP_NONE;
4788 break;
4789 case 0x80 ... 0x8f:
4790 if (test_cc(ctxt->b, ctxt->eflags))
4791 jmp_rel(ctxt, ctxt->src.val);
4792 break;
4793 case 0x90 ... 0x9f:
4794 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4795 break;
4796 case 0xae:
4797 break;
4798 case 0xb6 ... 0xb7:
4799 ctxt->dst.bytes = ctxt->op_bytes;
4800 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
4801 : (u16) ctxt->src.val;
4802 break;
4803 case 0xbe ... 0xbf:
4804 ctxt->dst.bytes = ctxt->op_bytes;
4805 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
4806 (s16) ctxt->src.val;
4807 break;
4808 case 0xc3:
4809 ctxt->dst.bytes = ctxt->op_bytes;
4810 ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
4811 (u64) ctxt->src.val;
4812 break;
4813 default:
4814 goto cannot_emulate;
4815 }
4816
4817threebyte_insn:
4818
4819 if (rc != X86EMUL_CONTINUE)
4820 goto done;
4821
4822 goto writeback;
4823
4824cannot_emulate:
4825 return EMULATION_FAILED;
4826}
4827
4828void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
4829{
4830 invalidate_registers(ctxt);
4831}
4832
4833void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
4834{
4835 writeback_registers(ctxt);
4836}
4837