1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kvm_host.h>
24#include "kvm_cache_regs.h"
25#include <asm/kvm_emulate.h>
26#include <linux/stringify.h>
27#include <asm/debugreg.h>
28
29#include "x86.h"
30#include "tss.h"
31
32
33
34
35#define OpNone 0ull
36#define OpImplicit 1ull
37#define OpReg 2ull
38#define OpMem 3ull
39#define OpAcc 4ull
40#define OpDI 5ull
41#define OpMem64 6ull
42#define OpImmUByte 7ull
43#define OpDX 8ull
44#define OpCL 9ull
45#define OpImmByte 10ull
46#define OpOne 11ull
47#define OpImm 12ull
48#define OpMem16 13ull
49#define OpMem32 14ull
50#define OpImmU 15ull
51#define OpSI 16ull
52#define OpImmFAddr 17ull
53#define OpMemFAddr 18ull
54#define OpImmU16 19ull
55#define OpES 20ull
56#define OpCS 21ull
57#define OpSS 22ull
58#define OpDS 23ull
59#define OpFS 24ull
60#define OpGS 25ull
61#define OpMem8 26ull
62#define OpImm64 27ull
63#define OpXLat 28ull
64#define OpAccLo 29ull
65#define OpAccHi 30ull
66
67#define OpBits 5
68#define OpMask ((1ull << OpBits) - 1)
69
70
71
72
73
74
75
76
77
78
79
80#define ByteOp (1<<0)
81
82#define DstShift 1
83#define ImplicitOps (OpImplicit << DstShift)
84#define DstReg (OpReg << DstShift)
85#define DstMem (OpMem << DstShift)
86#define DstAcc (OpAcc << DstShift)
87#define DstDI (OpDI << DstShift)
88#define DstMem64 (OpMem64 << DstShift)
89#define DstMem16 (OpMem16 << DstShift)
90#define DstImmUByte (OpImmUByte << DstShift)
91#define DstDX (OpDX << DstShift)
92#define DstAccLo (OpAccLo << DstShift)
93#define DstMask (OpMask << DstShift)
94
95#define SrcShift 6
96#define SrcNone (OpNone << SrcShift)
97#define SrcReg (OpReg << SrcShift)
98#define SrcMem (OpMem << SrcShift)
99#define SrcMem16 (OpMem16 << SrcShift)
100#define SrcMem32 (OpMem32 << SrcShift)
101#define SrcImm (OpImm << SrcShift)
102#define SrcImmByte (OpImmByte << SrcShift)
103#define SrcOne (OpOne << SrcShift)
104#define SrcImmUByte (OpImmUByte << SrcShift)
105#define SrcImmU (OpImmU << SrcShift)
106#define SrcSI (OpSI << SrcShift)
107#define SrcXLat (OpXLat << SrcShift)
108#define SrcImmFAddr (OpImmFAddr << SrcShift)
109#define SrcMemFAddr (OpMemFAddr << SrcShift)
110#define SrcAcc (OpAcc << SrcShift)
111#define SrcImmU16 (OpImmU16 << SrcShift)
112#define SrcImm64 (OpImm64 << SrcShift)
113#define SrcDX (OpDX << SrcShift)
114#define SrcMem8 (OpMem8 << SrcShift)
115#define SrcAccHi (OpAccHi << SrcShift)
116#define SrcMask (OpMask << SrcShift)
117#define BitOp (1<<11)
118#define MemAbs (1<<12)
119#define String (1<<13)
120#define Stack (1<<14)
121#define GroupMask (7<<15)
122#define Group (1<<15)
123#define GroupDual (2<<15)
124#define Prefix (3<<15)
125#define RMExt (4<<15)
126#define Escape (5<<15)
127#define InstrDual (6<<15)
128#define ModeDual (7<<15)
129#define Sse (1<<18)
130
131#define ModRM (1<<19)
132
133#define Mov (1<<20)
134
135#define Prot (1<<21)
136#define EmulateOnUD (1<<22)
137#define NoAccess (1<<23)
138#define Op3264 (1<<24)
139#define Undefined (1<<25)
140#define Lock (1<<26)
141#define Priv (1<<27)
142#define No64 (1<<28)
143#define PageTable (1 << 29)
144#define NotImpl (1 << 30)
145
146#define Src2Shift (31)
147#define Src2None (OpNone << Src2Shift)
148#define Src2Mem (OpMem << Src2Shift)
149#define Src2CL (OpCL << Src2Shift)
150#define Src2ImmByte (OpImmByte << Src2Shift)
151#define Src2One (OpOne << Src2Shift)
152#define Src2Imm (OpImm << Src2Shift)
153#define Src2ES (OpES << Src2Shift)
154#define Src2CS (OpCS << Src2Shift)
155#define Src2SS (OpSS << Src2Shift)
156#define Src2DS (OpDS << Src2Shift)
157#define Src2FS (OpFS << Src2Shift)
158#define Src2GS (OpGS << Src2Shift)
159#define Src2Mask (OpMask << Src2Shift)
160#define Mmx ((u64)1 << 40)
161#define AlignMask ((u64)7 << 41)
162#define Aligned ((u64)1 << 41)
163#define Unaligned ((u64)2 << 41)
164#define Avx ((u64)3 << 41)
165#define Aligned16 ((u64)4 << 41)
166#define Fastop ((u64)1 << 44)
167#define NoWrite ((u64)1 << 45)
168#define SrcWrite ((u64)1 << 46)
169#define NoMod ((u64)1 << 47)
170#define Intercept ((u64)1 << 48)
171#define CheckPerm ((u64)1 << 49)
172#define PrivUD ((u64)1 << 51)
173#define NearBranch ((u64)1 << 52)
174#define No16 ((u64)1 << 53)
175#define IncSP ((u64)1 << 54)
176#define TwoMemOp ((u64)1 << 55)
177
178#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
179
180#define X2(x...) x, x
181#define X3(x...) X2(x), x
182#define X4(x...) X2(x), X2(x)
183#define X5(x...) X4(x), x
184#define X6(x...) X4(x), X2(x)
185#define X7(x...) X4(x), X3(x)
186#define X8(x...) X4(x), X4(x)
187#define X16(x...) X8(x), X8(x)
188
189#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
190#define FASTOP_SIZE 8
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209struct fastop;
210
211struct opcode {
212 u64 flags : 56;
213 u64 intercept : 8;
214 union {
215 int (*execute)(struct x86_emulate_ctxt *ctxt);
216 const struct opcode *group;
217 const struct group_dual *gdual;
218 const struct gprefix *gprefix;
219 const struct escape *esc;
220 const struct instr_dual *idual;
221 const struct mode_dual *mdual;
222 void (*fastop)(struct fastop *fake);
223 } u;
224 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
225};
226
227struct group_dual {
228 struct opcode mod012[8];
229 struct opcode mod3[8];
230};
231
232struct gprefix {
233 struct opcode pfx_no;
234 struct opcode pfx_66;
235 struct opcode pfx_f2;
236 struct opcode pfx_f3;
237};
238
239struct escape {
240 struct opcode op[8];
241 struct opcode high[64];
242};
243
244struct instr_dual {
245 struct opcode mod012;
246 struct opcode mod3;
247};
248
249struct mode_dual {
250 struct opcode mode32;
251 struct opcode mode64;
252};
253
254#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
255
256enum x86_transfer_type {
257 X86_TRANSFER_NONE,
258 X86_TRANSFER_CALL_JMP,
259 X86_TRANSFER_RET,
260 X86_TRANSFER_TASK_SWITCH,
261};
262
263static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
264{
265 if (!(ctxt->regs_valid & (1 << nr))) {
266 ctxt->regs_valid |= 1 << nr;
267 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
268 }
269 return ctxt->_regs[nr];
270}
271
272static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
273{
274 ctxt->regs_valid |= 1 << nr;
275 ctxt->regs_dirty |= 1 << nr;
276 return &ctxt->_regs[nr];
277}
278
279static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
280{
281 reg_read(ctxt, nr);
282 return reg_write(ctxt, nr);
283}
284
285static void writeback_registers(struct x86_emulate_ctxt *ctxt)
286{
287 unsigned reg;
288
289 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
290 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
291}
292
293static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
294{
295 ctxt->regs_dirty = 0;
296 ctxt->regs_valid = 0;
297}
298
299
300
301
302
303#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
304 X86_EFLAGS_PF|X86_EFLAGS_CF)
305
306#ifdef CONFIG_X86_64
307#define ON64(x) x
308#else
309#define ON64(x)
310#endif
311
312static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
313
314#define FOP_FUNC(name) \
315 ".align " __stringify(FASTOP_SIZE) " \n\t" \
316 ".type " name ", @function \n\t" \
317 name ":\n\t"
318
319#define FOP_RET "ret \n\t"
320
321#define FOP_START(op) \
322 extern void em_##op(struct fastop *fake); \
323 asm(".pushsection .text, \"ax\" \n\t" \
324 ".global em_" #op " \n\t" \
325 FOP_FUNC("em_" #op)
326
327#define FOP_END \
328 ".popsection")
329
330#define FOPNOP() \
331 FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
332 FOP_RET
333
334#define FOP1E(op, dst) \
335 FOP_FUNC(#op "_" #dst) \
336 "10: " #op " %" #dst " \n\t" FOP_RET
337
338#define FOP1EEX(op, dst) \
339 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
340
341#define FASTOP1(op) \
342 FOP_START(op) \
343 FOP1E(op##b, al) \
344 FOP1E(op##w, ax) \
345 FOP1E(op##l, eax) \
346 ON64(FOP1E(op##q, rax)) \
347 FOP_END
348
349
350#define FASTOP1SRC2(op, name) \
351 FOP_START(name) \
352 FOP1E(op, cl) \
353 FOP1E(op, cx) \
354 FOP1E(op, ecx) \
355 ON64(FOP1E(op, rcx)) \
356 FOP_END
357
358
359#define FASTOP1SRC2EX(op, name) \
360 FOP_START(name) \
361 FOP1EEX(op, cl) \
362 FOP1EEX(op, cx) \
363 FOP1EEX(op, ecx) \
364 ON64(FOP1EEX(op, rcx)) \
365 FOP_END
366
367#define FOP2E(op, dst, src) \
368 FOP_FUNC(#op "_" #dst "_" #src) \
369 #op " %" #src ", %" #dst " \n\t" FOP_RET
370
371#define FASTOP2(op) \
372 FOP_START(op) \
373 FOP2E(op##b, al, dl) \
374 FOP2E(op##w, ax, dx) \
375 FOP2E(op##l, eax, edx) \
376 ON64(FOP2E(op##q, rax, rdx)) \
377 FOP_END
378
379
380#define FASTOP2W(op) \
381 FOP_START(op) \
382 FOPNOP() \
383 FOP2E(op##w, ax, dx) \
384 FOP2E(op##l, eax, edx) \
385 ON64(FOP2E(op##q, rax, rdx)) \
386 FOP_END
387
388
389#define FASTOP2CL(op) \
390 FOP_START(op) \
391 FOP2E(op##b, al, cl) \
392 FOP2E(op##w, ax, cl) \
393 FOP2E(op##l, eax, cl) \
394 ON64(FOP2E(op##q, rax, cl)) \
395 FOP_END
396
397
398#define FASTOP2R(op, name) \
399 FOP_START(name) \
400 FOP2E(op##b, dl, al) \
401 FOP2E(op##w, dx, ax) \
402 FOP2E(op##l, edx, eax) \
403 ON64(FOP2E(op##q, rdx, rax)) \
404 FOP_END
405
406#define FOP3E(op, dst, src, src2) \
407 FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
408 #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
409
410
411#define FASTOP3WCL(op) \
412 FOP_START(op) \
413 FOPNOP() \
414 FOP3E(op##w, ax, dx, cl) \
415 FOP3E(op##l, eax, edx, cl) \
416 ON64(FOP3E(op##q, rax, rdx, cl)) \
417 FOP_END
418
419
420#define FOP_SETCC(op) \
421 ".align 4 \n\t" \
422 ".type " #op ", @function \n\t" \
423 #op ": \n\t" \
424 #op " %al \n\t" \
425 FOP_RET
426
427asm(".global kvm_fastop_exception \n"
428 "kvm_fastop_exception: xor %esi, %esi; ret");
429
430FOP_START(setcc)
431FOP_SETCC(seto)
432FOP_SETCC(setno)
433FOP_SETCC(setc)
434FOP_SETCC(setnc)
435FOP_SETCC(setz)
436FOP_SETCC(setnz)
437FOP_SETCC(setbe)
438FOP_SETCC(setnbe)
439FOP_SETCC(sets)
440FOP_SETCC(setns)
441FOP_SETCC(setp)
442FOP_SETCC(setnp)
443FOP_SETCC(setl)
444FOP_SETCC(setnl)
445FOP_SETCC(setle)
446FOP_SETCC(setnle)
447FOP_END;
448
449FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
450FOP_END;
451
452
453
454
455
456#define asm_safe(insn, inoutclob...) \
457({ \
458 int _fault = 0; \
459 \
460 asm volatile("1:" insn "\n" \
461 "2:\n" \
462 ".pushsection .fixup, \"ax\"\n" \
463 "3: movl $1, %[_fault]\n" \
464 " jmp 2b\n" \
465 ".popsection\n" \
466 _ASM_EXTABLE(1b, 3b) \
467 : [_fault] "+qm"(_fault) inoutclob ); \
468 \
469 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
470})
471
472static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
473 enum x86_intercept intercept,
474 enum x86_intercept_stage stage)
475{
476 struct x86_instruction_info info = {
477 .intercept = intercept,
478 .rep_prefix = ctxt->rep_prefix,
479 .modrm_mod = ctxt->modrm_mod,
480 .modrm_reg = ctxt->modrm_reg,
481 .modrm_rm = ctxt->modrm_rm,
482 .src_val = ctxt->src.val64,
483 .dst_val = ctxt->dst.val64,
484 .src_bytes = ctxt->src.bytes,
485 .dst_bytes = ctxt->dst.bytes,
486 .ad_bytes = ctxt->ad_bytes,
487 .next_rip = ctxt->eip,
488 };
489
490 return ctxt->ops->intercept(ctxt, &info, stage);
491}
492
493static void assign_masked(ulong *dest, ulong src, ulong mask)
494{
495 *dest = (*dest & ~mask) | (src & mask);
496}
497
498static void assign_register(unsigned long *reg, u64 val, int bytes)
499{
500
501 switch (bytes) {
502 case 1:
503 *(u8 *)reg = (u8)val;
504 break;
505 case 2:
506 *(u16 *)reg = (u16)val;
507 break;
508 case 4:
509 *reg = (u32)val;
510 break;
511 case 8:
512 *reg = val;
513 break;
514 }
515}
516
517static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
518{
519 return (1UL << (ctxt->ad_bytes << 3)) - 1;
520}
521
522static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
523{
524 u16 sel;
525 struct desc_struct ss;
526
527 if (ctxt->mode == X86EMUL_MODE_PROT64)
528 return ~0UL;
529 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
530 return ~0U >> ((ss.d ^ 1) * 16);
531}
532
533static int stack_size(struct x86_emulate_ctxt *ctxt)
534{
535 return (__fls(stack_mask(ctxt)) + 1) >> 3;
536}
537
538
539static inline unsigned long
540address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
541{
542 if (ctxt->ad_bytes == sizeof(unsigned long))
543 return reg;
544 else
545 return reg & ad_mask(ctxt);
546}
547
548static inline unsigned long
549register_address(struct x86_emulate_ctxt *ctxt, int reg)
550{
551 return address_mask(ctxt, reg_read(ctxt, reg));
552}
553
554static void masked_increment(ulong *reg, ulong mask, int inc)
555{
556 assign_masked(reg, *reg + inc, mask);
557}
558
559static inline void
560register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
561{
562 ulong *preg = reg_rmw(ctxt, reg);
563
564 assign_register(preg, *preg + inc, ctxt->ad_bytes);
565}
566
567static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
568{
569 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
570}
571
572static u32 desc_limit_scaled(struct desc_struct *desc)
573{
574 u32 limit = get_desc_limit(desc);
575
576 return desc->g ? (limit << 12) | 0xfff : limit;
577}
578
579static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
580{
581 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
582 return 0;
583
584 return ctxt->ops->get_cached_segment_base(ctxt, seg);
585}
586
587static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
588 u32 error, bool valid)
589{
590 WARN_ON(vec > 0x1f);
591 ctxt->exception.vector = vec;
592 ctxt->exception.error_code = error;
593 ctxt->exception.error_code_valid = valid;
594 return X86EMUL_PROPAGATE_FAULT;
595}
596
597static int emulate_db(struct x86_emulate_ctxt *ctxt)
598{
599 return emulate_exception(ctxt, DB_VECTOR, 0, false);
600}
601
602static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
603{
604 return emulate_exception(ctxt, GP_VECTOR, err, true);
605}
606
607static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
608{
609 return emulate_exception(ctxt, SS_VECTOR, err, true);
610}
611
612static int emulate_ud(struct x86_emulate_ctxt *ctxt)
613{
614 return emulate_exception(ctxt, UD_VECTOR, 0, false);
615}
616
617static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
618{
619 return emulate_exception(ctxt, TS_VECTOR, err, true);
620}
621
622static int emulate_de(struct x86_emulate_ctxt *ctxt)
623{
624 return emulate_exception(ctxt, DE_VECTOR, 0, false);
625}
626
627static int emulate_nm(struct x86_emulate_ctxt *ctxt)
628{
629 return emulate_exception(ctxt, NM_VECTOR, 0, false);
630}
631
632static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
633{
634 u16 selector;
635 struct desc_struct desc;
636
637 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
638 return selector;
639}
640
641static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
642 unsigned seg)
643{
644 u16 dummy;
645 u32 base3;
646 struct desc_struct desc;
647
648 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
649 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
650}
651
652
653
654
655
656
657
658
659
660
661static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
662{
663 u64 alignment = ctxt->d & AlignMask;
664
665 if (likely(size < 16))
666 return 1;
667
668 switch (alignment) {
669 case Unaligned:
670 case Avx:
671 return 1;
672 case Aligned16:
673 return 16;
674 case Aligned:
675 default:
676 return size;
677 }
678}
679
680static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
681 struct segmented_address addr,
682 unsigned *max_size, unsigned size,
683 bool write, bool fetch,
684 enum x86emul_mode mode, ulong *linear)
685{
686 struct desc_struct desc;
687 bool usable;
688 ulong la;
689 u32 lim;
690 u16 sel;
691
692 la = seg_base(ctxt, addr.seg) + addr.ea;
693 *max_size = 0;
694 switch (mode) {
695 case X86EMUL_MODE_PROT64:
696 *linear = la;
697 if (is_noncanonical_address(la))
698 goto bad;
699
700 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
701 if (size > *max_size)
702 goto bad;
703 break;
704 default:
705 *linear = la = (u32)la;
706 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
707 addr.seg);
708 if (!usable)
709 goto bad;
710
711 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
712 || !(desc.type & 2)) && write)
713 goto bad;
714
715 if (!fetch && (desc.type & 8) && !(desc.type & 2))
716 goto bad;
717 lim = desc_limit_scaled(&desc);
718 if (!(desc.type & 8) && (desc.type & 4)) {
719
720 if (addr.ea <= lim)
721 goto bad;
722 lim = desc.d ? 0xffffffff : 0xffff;
723 }
724 if (addr.ea > lim)
725 goto bad;
726 if (lim == 0xffffffff)
727 *max_size = ~0u;
728 else {
729 *max_size = (u64)lim + 1 - addr.ea;
730 if (size > *max_size)
731 goto bad;
732 }
733 break;
734 }
735 if (la & (insn_alignment(ctxt, size) - 1))
736 return emulate_gp(ctxt, 0);
737 return X86EMUL_CONTINUE;
738bad:
739 if (addr.seg == VCPU_SREG_SS)
740 return emulate_ss(ctxt, 0);
741 else
742 return emulate_gp(ctxt, 0);
743}
744
745static int linearize(struct x86_emulate_ctxt *ctxt,
746 struct segmented_address addr,
747 unsigned size, bool write,
748 ulong *linear)
749{
750 unsigned max_size;
751 return __linearize(ctxt, addr, &max_size, size, write, false,
752 ctxt->mode, linear);
753}
754
755static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
756 enum x86emul_mode mode)
757{
758 ulong linear;
759 int rc;
760 unsigned max_size;
761 struct segmented_address addr = { .seg = VCPU_SREG_CS,
762 .ea = dst };
763
764 if (ctxt->op_bytes != sizeof(unsigned long))
765 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
766 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
767 if (rc == X86EMUL_CONTINUE)
768 ctxt->_eip = addr.ea;
769 return rc;
770}
771
772static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
773{
774 return assign_eip(ctxt, dst, ctxt->mode);
775}
776
777static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
778 const struct desc_struct *cs_desc)
779{
780 enum x86emul_mode mode = ctxt->mode;
781 int rc;
782
783#ifdef CONFIG_X86_64
784 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
785 if (cs_desc->l) {
786 u64 efer = 0;
787
788 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
789 if (efer & EFER_LMA)
790 mode = X86EMUL_MODE_PROT64;
791 } else
792 mode = X86EMUL_MODE_PROT32;
793 }
794#endif
795 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
796 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
797 rc = assign_eip(ctxt, dst, mode);
798 if (rc == X86EMUL_CONTINUE)
799 ctxt->mode = mode;
800 return rc;
801}
802
803static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
804{
805 return assign_eip_near(ctxt, ctxt->_eip + rel);
806}
807
808static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
809 struct segmented_address addr,
810 void *data,
811 unsigned size)
812{
813 int rc;
814 ulong linear;
815
816 rc = linearize(ctxt, addr, size, false, &linear);
817 if (rc != X86EMUL_CONTINUE)
818 return rc;
819 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
820}
821
822static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
823 struct segmented_address addr,
824 void *data,
825 unsigned int size)
826{
827 int rc;
828 ulong linear;
829
830 rc = linearize(ctxt, addr, size, true, &linear);
831 if (rc != X86EMUL_CONTINUE)
832 return rc;
833 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
834}
835
836
837
838
839
840static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
841{
842 int rc;
843 unsigned size, max_size;
844 unsigned long linear;
845 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
846 struct segmented_address addr = { .seg = VCPU_SREG_CS,
847 .ea = ctxt->eip + cur_size };
848
849
850
851
852
853
854
855
856
857
858
859 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
860 &linear);
861 if (unlikely(rc != X86EMUL_CONTINUE))
862 return rc;
863
864 size = min_t(unsigned, 15UL ^ cur_size, max_size);
865 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
866
867
868
869
870
871
872
873 if (unlikely(size < op_size))
874 return emulate_gp(ctxt, 0);
875
876 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
877 size, &ctxt->exception);
878 if (unlikely(rc != X86EMUL_CONTINUE))
879 return rc;
880 ctxt->fetch.end += size;
881 return X86EMUL_CONTINUE;
882}
883
884static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
885 unsigned size)
886{
887 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
888
889 if (unlikely(done_size < size))
890 return __do_insn_fetch_bytes(ctxt, size - done_size);
891 else
892 return X86EMUL_CONTINUE;
893}
894
895
896#define insn_fetch(_type, _ctxt) \
897({ _type _x; \
898 \
899 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
900 if (rc != X86EMUL_CONTINUE) \
901 goto done; \
902 ctxt->_eip += sizeof(_type); \
903 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
904 ctxt->fetch.ptr += sizeof(_type); \
905 _x; \
906})
907
908#define insn_fetch_arr(_arr, _size, _ctxt) \
909({ \
910 rc = do_insn_fetch_bytes(_ctxt, _size); \
911 if (rc != X86EMUL_CONTINUE) \
912 goto done; \
913 ctxt->_eip += (_size); \
914 memcpy(_arr, ctxt->fetch.ptr, _size); \
915 ctxt->fetch.ptr += (_size); \
916})
917
918
919
920
921
922
923static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
924 int byteop)
925{
926 void *p;
927 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
928
929 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
930 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
931 else
932 p = reg_rmw(ctxt, modrm_reg);
933 return p;
934}
935
936static int read_descriptor(struct x86_emulate_ctxt *ctxt,
937 struct segmented_address addr,
938 u16 *size, unsigned long *address, int op_bytes)
939{
940 int rc;
941
942 if (op_bytes == 2)
943 op_bytes = 3;
944 *address = 0;
945 rc = segmented_read_std(ctxt, addr, size, 2);
946 if (rc != X86EMUL_CONTINUE)
947 return rc;
948 addr.ea += 2;
949 rc = segmented_read_std(ctxt, addr, address, op_bytes);
950 return rc;
951}
952
953FASTOP2(add);
954FASTOP2(or);
955FASTOP2(adc);
956FASTOP2(sbb);
957FASTOP2(and);
958FASTOP2(sub);
959FASTOP2(xor);
960FASTOP2(cmp);
961FASTOP2(test);
962
963FASTOP1SRC2(mul, mul_ex);
964FASTOP1SRC2(imul, imul_ex);
965FASTOP1SRC2EX(div, div_ex);
966FASTOP1SRC2EX(idiv, idiv_ex);
967
968FASTOP3WCL(shld);
969FASTOP3WCL(shrd);
970
971FASTOP2W(imul);
972
973FASTOP1(not);
974FASTOP1(neg);
975FASTOP1(inc);
976FASTOP1(dec);
977
978FASTOP2CL(rol);
979FASTOP2CL(ror);
980FASTOP2CL(rcl);
981FASTOP2CL(rcr);
982FASTOP2CL(shl);
983FASTOP2CL(shr);
984FASTOP2CL(sar);
985
986FASTOP2W(bsf);
987FASTOP2W(bsr);
988FASTOP2W(bt);
989FASTOP2W(bts);
990FASTOP2W(btr);
991FASTOP2W(btc);
992
993FASTOP2(xadd);
994
995FASTOP2R(cmp, cmp_r);
996
997static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
998{
999
1000 if (ctxt->src.val == 0)
1001 ctxt->dst.type = OP_NONE;
1002 return fastop(ctxt, em_bsf);
1003}
1004
1005static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1006{
1007
1008 if (ctxt->src.val == 0)
1009 ctxt->dst.type = OP_NONE;
1010 return fastop(ctxt, em_bsr);
1011}
1012
1013static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1014{
1015 u8 rc;
1016 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1017
1018 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1019 asm("push %[flags]; popf; call *%[fastop]"
1020 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
1021 return rc;
1022}
1023
1024static void fetch_register_operand(struct operand *op)
1025{
1026 switch (op->bytes) {
1027 case 1:
1028 op->val = *(u8 *)op->addr.reg;
1029 break;
1030 case 2:
1031 op->val = *(u16 *)op->addr.reg;
1032 break;
1033 case 4:
1034 op->val = *(u32 *)op->addr.reg;
1035 break;
1036 case 8:
1037 op->val = *(u64 *)op->addr.reg;
1038 break;
1039 }
1040}
1041
1042static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1043{
1044 ctxt->ops->get_fpu(ctxt);
1045 switch (reg) {
1046 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1047 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1048 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1049 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1050 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1051 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1052 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1053 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1054#ifdef CONFIG_X86_64
1055 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1056 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1057 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1058 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1059 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1060 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1061 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1062 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1063#endif
1064 default: BUG();
1065 }
1066 ctxt->ops->put_fpu(ctxt);
1067}
1068
1069static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1070 int reg)
1071{
1072 ctxt->ops->get_fpu(ctxt);
1073 switch (reg) {
1074 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1075 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1076 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1077 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1078 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1079 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1080 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1081 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1082#ifdef CONFIG_X86_64
1083 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1084 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1085 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1086 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1087 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1088 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1089 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1090 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1091#endif
1092 default: BUG();
1093 }
1094 ctxt->ops->put_fpu(ctxt);
1095}
1096
1097static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1098{
1099 ctxt->ops->get_fpu(ctxt);
1100 switch (reg) {
1101 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1102 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1103 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1104 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1105 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1106 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1107 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1108 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1109 default: BUG();
1110 }
1111 ctxt->ops->put_fpu(ctxt);
1112}
1113
1114static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1115{
1116 ctxt->ops->get_fpu(ctxt);
1117 switch (reg) {
1118 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1119 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1120 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1121 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1122 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1123 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1124 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1125 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1126 default: BUG();
1127 }
1128 ctxt->ops->put_fpu(ctxt);
1129}
1130
1131static int em_fninit(struct x86_emulate_ctxt *ctxt)
1132{
1133 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1134 return emulate_nm(ctxt);
1135
1136 ctxt->ops->get_fpu(ctxt);
1137 asm volatile("fninit");
1138 ctxt->ops->put_fpu(ctxt);
1139 return X86EMUL_CONTINUE;
1140}
1141
1142static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1143{
1144 u16 fcw;
1145
1146 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1147 return emulate_nm(ctxt);
1148
1149 ctxt->ops->get_fpu(ctxt);
1150 asm volatile("fnstcw %0": "+m"(fcw));
1151 ctxt->ops->put_fpu(ctxt);
1152
1153 ctxt->dst.val = fcw;
1154
1155 return X86EMUL_CONTINUE;
1156}
1157
1158static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1159{
1160 u16 fsw;
1161
1162 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1163 return emulate_nm(ctxt);
1164
1165 ctxt->ops->get_fpu(ctxt);
1166 asm volatile("fnstsw %0": "+m"(fsw));
1167 ctxt->ops->put_fpu(ctxt);
1168
1169 ctxt->dst.val = fsw;
1170
1171 return X86EMUL_CONTINUE;
1172}
1173
1174static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1175 struct operand *op)
1176{
1177 unsigned reg = ctxt->modrm_reg;
1178
1179 if (!(ctxt->d & ModRM))
1180 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1181
1182 if (ctxt->d & Sse) {
1183 op->type = OP_XMM;
1184 op->bytes = 16;
1185 op->addr.xmm = reg;
1186 read_sse_reg(ctxt, &op->vec_val, reg);
1187 return;
1188 }
1189 if (ctxt->d & Mmx) {
1190 reg &= 7;
1191 op->type = OP_MM;
1192 op->bytes = 8;
1193 op->addr.mm = reg;
1194 return;
1195 }
1196
1197 op->type = OP_REG;
1198 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1199 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1200
1201 fetch_register_operand(op);
1202 op->orig_val = op->val;
1203}
1204
1205static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1206{
1207 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1208 ctxt->modrm_seg = VCPU_SREG_SS;
1209}
1210
1211static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1212 struct operand *op)
1213{
1214 u8 sib;
1215 int index_reg, base_reg, scale;
1216 int rc = X86EMUL_CONTINUE;
1217 ulong modrm_ea = 0;
1218
1219 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8);
1220 index_reg = (ctxt->rex_prefix << 2) & 8;
1221 base_reg = (ctxt->rex_prefix << 3) & 8;
1222
1223 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1224 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1225 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1226 ctxt->modrm_seg = VCPU_SREG_DS;
1227
1228 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1229 op->type = OP_REG;
1230 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1231 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1232 ctxt->d & ByteOp);
1233 if (ctxt->d & Sse) {
1234 op->type = OP_XMM;
1235 op->bytes = 16;
1236 op->addr.xmm = ctxt->modrm_rm;
1237 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1238 return rc;
1239 }
1240 if (ctxt->d & Mmx) {
1241 op->type = OP_MM;
1242 op->bytes = 8;
1243 op->addr.mm = ctxt->modrm_rm & 7;
1244 return rc;
1245 }
1246 fetch_register_operand(op);
1247 return rc;
1248 }
1249
1250 op->type = OP_MEM;
1251
1252 if (ctxt->ad_bytes == 2) {
1253 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1254 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1255 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1256 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1257
1258
1259 switch (ctxt->modrm_mod) {
1260 case 0:
1261 if (ctxt->modrm_rm == 6)
1262 modrm_ea += insn_fetch(u16, ctxt);
1263 break;
1264 case 1:
1265 modrm_ea += insn_fetch(s8, ctxt);
1266 break;
1267 case 2:
1268 modrm_ea += insn_fetch(u16, ctxt);
1269 break;
1270 }
1271 switch (ctxt->modrm_rm) {
1272 case 0:
1273 modrm_ea += bx + si;
1274 break;
1275 case 1:
1276 modrm_ea += bx + di;
1277 break;
1278 case 2:
1279 modrm_ea += bp + si;
1280 break;
1281 case 3:
1282 modrm_ea += bp + di;
1283 break;
1284 case 4:
1285 modrm_ea += si;
1286 break;
1287 case 5:
1288 modrm_ea += di;
1289 break;
1290 case 6:
1291 if (ctxt->modrm_mod != 0)
1292 modrm_ea += bp;
1293 break;
1294 case 7:
1295 modrm_ea += bx;
1296 break;
1297 }
1298 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1299 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1300 ctxt->modrm_seg = VCPU_SREG_SS;
1301 modrm_ea = (u16)modrm_ea;
1302 } else {
1303
1304 if ((ctxt->modrm_rm & 7) == 4) {
1305 sib = insn_fetch(u8, ctxt);
1306 index_reg |= (sib >> 3) & 7;
1307 base_reg |= sib & 7;
1308 scale = sib >> 6;
1309
1310 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1311 modrm_ea += insn_fetch(s32, ctxt);
1312 else {
1313 modrm_ea += reg_read(ctxt, base_reg);
1314 adjust_modrm_seg(ctxt, base_reg);
1315
1316 if ((ctxt->d & IncSP) &&
1317 base_reg == VCPU_REGS_RSP)
1318 modrm_ea += ctxt->op_bytes;
1319 }
1320 if (index_reg != 4)
1321 modrm_ea += reg_read(ctxt, index_reg) << scale;
1322 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1323 modrm_ea += insn_fetch(s32, ctxt);
1324 if (ctxt->mode == X86EMUL_MODE_PROT64)
1325 ctxt->rip_relative = 1;
1326 } else {
1327 base_reg = ctxt->modrm_rm;
1328 modrm_ea += reg_read(ctxt, base_reg);
1329 adjust_modrm_seg(ctxt, base_reg);
1330 }
1331 switch (ctxt->modrm_mod) {
1332 case 1:
1333 modrm_ea += insn_fetch(s8, ctxt);
1334 break;
1335 case 2:
1336 modrm_ea += insn_fetch(s32, ctxt);
1337 break;
1338 }
1339 }
1340 op->addr.mem.ea = modrm_ea;
1341 if (ctxt->ad_bytes != 8)
1342 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1343
1344done:
1345 return rc;
1346}
1347
1348static int decode_abs(struct x86_emulate_ctxt *ctxt,
1349 struct operand *op)
1350{
1351 int rc = X86EMUL_CONTINUE;
1352
1353 op->type = OP_MEM;
1354 switch (ctxt->ad_bytes) {
1355 case 2:
1356 op->addr.mem.ea = insn_fetch(u16, ctxt);
1357 break;
1358 case 4:
1359 op->addr.mem.ea = insn_fetch(u32, ctxt);
1360 break;
1361 case 8:
1362 op->addr.mem.ea = insn_fetch(u64, ctxt);
1363 break;
1364 }
1365done:
1366 return rc;
1367}
1368
1369static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1370{
1371 long sv = 0, mask;
1372
1373 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1374 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1375
1376 if (ctxt->src.bytes == 2)
1377 sv = (s16)ctxt->src.val & (s16)mask;
1378 else if (ctxt->src.bytes == 4)
1379 sv = (s32)ctxt->src.val & (s32)mask;
1380 else
1381 sv = (s64)ctxt->src.val & (s64)mask;
1382
1383 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1384 ctxt->dst.addr.mem.ea + (sv >> 3));
1385 }
1386
1387
1388 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1389}
1390
1391static int read_emulated(struct x86_emulate_ctxt *ctxt,
1392 unsigned long addr, void *dest, unsigned size)
1393{
1394 int rc;
1395 struct read_cache *mc = &ctxt->mem_read;
1396
1397 if (mc->pos < mc->end)
1398 goto read_cached;
1399
1400 WARN_ON((mc->end + size) >= sizeof(mc->data));
1401
1402 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1403 &ctxt->exception);
1404 if (rc != X86EMUL_CONTINUE)
1405 return rc;
1406
1407 mc->end += size;
1408
1409read_cached:
1410 memcpy(dest, mc->data + mc->pos, size);
1411 mc->pos += size;
1412 return X86EMUL_CONTINUE;
1413}
1414
1415static int segmented_read(struct x86_emulate_ctxt *ctxt,
1416 struct segmented_address addr,
1417 void *data,
1418 unsigned size)
1419{
1420 int rc;
1421 ulong linear;
1422
1423 rc = linearize(ctxt, addr, size, false, &linear);
1424 if (rc != X86EMUL_CONTINUE)
1425 return rc;
1426 return read_emulated(ctxt, linear, data, size);
1427}
1428
1429static int segmented_write(struct x86_emulate_ctxt *ctxt,
1430 struct segmented_address addr,
1431 const void *data,
1432 unsigned size)
1433{
1434 int rc;
1435 ulong linear;
1436
1437 rc = linearize(ctxt, addr, size, true, &linear);
1438 if (rc != X86EMUL_CONTINUE)
1439 return rc;
1440 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1441 &ctxt->exception);
1442}
1443
1444static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1445 struct segmented_address addr,
1446 const void *orig_data, const void *data,
1447 unsigned size)
1448{
1449 int rc;
1450 ulong linear;
1451
1452 rc = linearize(ctxt, addr, size, true, &linear);
1453 if (rc != X86EMUL_CONTINUE)
1454 return rc;
1455 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1456 size, &ctxt->exception);
1457}
1458
1459static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1460 unsigned int size, unsigned short port,
1461 void *dest)
1462{
1463 struct read_cache *rc = &ctxt->io_read;
1464
1465 if (rc->pos == rc->end) {
1466 unsigned int in_page, n;
1467 unsigned int count = ctxt->rep_prefix ?
1468 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1469 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1470 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1471 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1472 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1473 if (n == 0)
1474 n = 1;
1475 rc->pos = rc->end = 0;
1476 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1477 return 0;
1478 rc->end = n * size;
1479 }
1480
1481 if (ctxt->rep_prefix && (ctxt->d & String) &&
1482 !(ctxt->eflags & X86_EFLAGS_DF)) {
1483 ctxt->dst.data = rc->data + rc->pos;
1484 ctxt->dst.type = OP_MEM_STR;
1485 ctxt->dst.count = (rc->end - rc->pos) / size;
1486 rc->pos = rc->end;
1487 } else {
1488 memcpy(dest, rc->data + rc->pos, size);
1489 rc->pos += size;
1490 }
1491 return 1;
1492}
1493
1494static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1495 u16 index, struct desc_struct *desc)
1496{
1497 struct desc_ptr dt;
1498 ulong addr;
1499
1500 ctxt->ops->get_idt(ctxt, &dt);
1501
1502 if (dt.size < index * 8 + 7)
1503 return emulate_gp(ctxt, index << 3 | 0x2);
1504
1505 addr = dt.address + index * 8;
1506 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1507 &ctxt->exception);
1508}
1509
1510static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1511 u16 selector, struct desc_ptr *dt)
1512{
1513 const struct x86_emulate_ops *ops = ctxt->ops;
1514 u32 base3 = 0;
1515
1516 if (selector & 1 << 2) {
1517 struct desc_struct desc;
1518 u16 sel;
1519
1520 memset (dt, 0, sizeof *dt);
1521 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1522 VCPU_SREG_LDTR))
1523 return;
1524
1525 dt->size = desc_limit_scaled(&desc);
1526 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1527 } else
1528 ops->get_gdt(ctxt, dt);
1529}
1530
1531static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1532 u16 selector, ulong *desc_addr_p)
1533{
1534 struct desc_ptr dt;
1535 u16 index = selector >> 3;
1536 ulong addr;
1537
1538 get_descriptor_table_ptr(ctxt, selector, &dt);
1539
1540 if (dt.size < index * 8 + 7)
1541 return emulate_gp(ctxt, selector & 0xfffc);
1542
1543 addr = dt.address + index * 8;
1544
1545#ifdef CONFIG_X86_64
1546 if (addr >> 32 != 0) {
1547 u64 efer = 0;
1548
1549 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1550 if (!(efer & EFER_LMA))
1551 addr &= (u32)-1;
1552 }
1553#endif
1554
1555 *desc_addr_p = addr;
1556 return X86EMUL_CONTINUE;
1557}
1558
1559
1560static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1561 u16 selector, struct desc_struct *desc,
1562 ulong *desc_addr_p)
1563{
1564 int rc;
1565
1566 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1567 if (rc != X86EMUL_CONTINUE)
1568 return rc;
1569
1570 return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1571 &ctxt->exception);
1572}
1573
1574
1575static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1576 u16 selector, struct desc_struct *desc)
1577{
1578 int rc;
1579 ulong addr;
1580
1581 rc = get_descriptor_ptr(ctxt, selector, &addr);
1582 if (rc != X86EMUL_CONTINUE)
1583 return rc;
1584
1585 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1586 &ctxt->exception);
1587}
1588
1589static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1590 u16 selector, int seg, u8 cpl,
1591 enum x86_transfer_type transfer,
1592 struct desc_struct *desc)
1593{
1594 struct desc_struct seg_desc, old_desc;
1595 u8 dpl, rpl;
1596 unsigned err_vec = GP_VECTOR;
1597 u32 err_code = 0;
1598 bool null_selector = !(selector & ~0x3);
1599 ulong desc_addr;
1600 int ret;
1601 u16 dummy;
1602 u32 base3 = 0;
1603
1604 memset(&seg_desc, 0, sizeof seg_desc);
1605
1606 if (ctxt->mode == X86EMUL_MODE_REAL) {
1607
1608
1609 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1610 set_desc_base(&seg_desc, selector << 4);
1611 goto load;
1612 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1613
1614 set_desc_base(&seg_desc, selector << 4);
1615 set_desc_limit(&seg_desc, 0xffff);
1616 seg_desc.type = 3;
1617 seg_desc.p = 1;
1618 seg_desc.s = 1;
1619 seg_desc.dpl = 3;
1620 goto load;
1621 }
1622
1623 rpl = selector & 3;
1624
1625
1626 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1627 goto exception;
1628
1629
1630 if (null_selector) {
1631 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1632 goto exception;
1633
1634 if (seg == VCPU_SREG_SS) {
1635 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1636 goto exception;
1637
1638
1639
1640
1641
1642 seg_desc.type = 3;
1643 seg_desc.p = 1;
1644 seg_desc.s = 1;
1645 seg_desc.dpl = cpl;
1646 seg_desc.d = 1;
1647 seg_desc.g = 1;
1648 }
1649
1650
1651 goto load;
1652 }
1653
1654 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1655 if (ret != X86EMUL_CONTINUE)
1656 return ret;
1657
1658 err_code = selector & 0xfffc;
1659 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1660 GP_VECTOR;
1661
1662
1663 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1664 if (transfer == X86_TRANSFER_CALL_JMP)
1665 return X86EMUL_UNHANDLEABLE;
1666 goto exception;
1667 }
1668
1669 if (!seg_desc.p) {
1670 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1671 goto exception;
1672 }
1673
1674 dpl = seg_desc.dpl;
1675
1676 switch (seg) {
1677 case VCPU_SREG_SS:
1678
1679
1680
1681
1682 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1683 goto exception;
1684 break;
1685 case VCPU_SREG_CS:
1686 if (!(seg_desc.type & 8))
1687 goto exception;
1688
1689 if (seg_desc.type & 4) {
1690
1691 if (dpl > cpl)
1692 goto exception;
1693 } else {
1694
1695 if (rpl > cpl || dpl != cpl)
1696 goto exception;
1697 }
1698
1699 if (seg_desc.d && seg_desc.l) {
1700 u64 efer = 0;
1701
1702 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1703 if (efer & EFER_LMA)
1704 goto exception;
1705 }
1706
1707
1708 selector = (selector & 0xfffc) | cpl;
1709 break;
1710 case VCPU_SREG_TR:
1711 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1712 goto exception;
1713 old_desc = seg_desc;
1714 seg_desc.type |= 2;
1715 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1716 sizeof(seg_desc), &ctxt->exception);
1717 if (ret != X86EMUL_CONTINUE)
1718 return ret;
1719 break;
1720 case VCPU_SREG_LDTR:
1721 if (seg_desc.s || seg_desc.type != 2)
1722 goto exception;
1723 break;
1724 default:
1725
1726
1727
1728
1729
1730 if ((seg_desc.type & 0xa) == 0x8 ||
1731 (((seg_desc.type & 0xc) != 0xc) &&
1732 (rpl > dpl && cpl > dpl)))
1733 goto exception;
1734 break;
1735 }
1736
1737 if (seg_desc.s) {
1738
1739 if (!(seg_desc.type & 1)) {
1740 seg_desc.type |= 1;
1741 ret = write_segment_descriptor(ctxt, selector,
1742 &seg_desc);
1743 if (ret != X86EMUL_CONTINUE)
1744 return ret;
1745 }
1746 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1747 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1748 sizeof(base3), &ctxt->exception);
1749 if (ret != X86EMUL_CONTINUE)
1750 return ret;
1751 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1752 ((u64)base3 << 32)))
1753 return emulate_gp(ctxt, 0);
1754 }
1755load:
1756 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1757 if (desc)
1758 *desc = seg_desc;
1759 return X86EMUL_CONTINUE;
1760exception:
1761 return emulate_exception(ctxt, err_vec, err_code, true);
1762}
1763
1764static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1765 u16 selector, int seg)
1766{
1767 u8 cpl = ctxt->ops->cpl(ctxt);
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779 if (seg == VCPU_SREG_SS && selector == 3 &&
1780 ctxt->mode == X86EMUL_MODE_PROT64)
1781 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1782
1783 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1784 X86_TRANSFER_NONE, NULL);
1785}
1786
1787static void write_register_operand(struct operand *op)
1788{
1789 return assign_register(op->addr.reg, op->val, op->bytes);
1790}
1791
1792static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1793{
1794 switch (op->type) {
1795 case OP_REG:
1796 write_register_operand(op);
1797 break;
1798 case OP_MEM:
1799 if (ctxt->lock_prefix)
1800 return segmented_cmpxchg(ctxt,
1801 op->addr.mem,
1802 &op->orig_val,
1803 &op->val,
1804 op->bytes);
1805 else
1806 return segmented_write(ctxt,
1807 op->addr.mem,
1808 &op->val,
1809 op->bytes);
1810 break;
1811 case OP_MEM_STR:
1812 return segmented_write(ctxt,
1813 op->addr.mem,
1814 op->data,
1815 op->bytes * op->count);
1816 break;
1817 case OP_XMM:
1818 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1819 break;
1820 case OP_MM:
1821 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1822 break;
1823 case OP_NONE:
1824
1825 break;
1826 default:
1827 break;
1828 }
1829 return X86EMUL_CONTINUE;
1830}
1831
1832static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1833{
1834 struct segmented_address addr;
1835
1836 rsp_increment(ctxt, -bytes);
1837 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1838 addr.seg = VCPU_SREG_SS;
1839
1840 return segmented_write(ctxt, addr, data, bytes);
1841}
1842
1843static int em_push(struct x86_emulate_ctxt *ctxt)
1844{
1845
1846 ctxt->dst.type = OP_NONE;
1847 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1848}
1849
1850static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1851 void *dest, int len)
1852{
1853 int rc;
1854 struct segmented_address addr;
1855
1856 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1857 addr.seg = VCPU_SREG_SS;
1858 rc = segmented_read(ctxt, addr, dest, len);
1859 if (rc != X86EMUL_CONTINUE)
1860 return rc;
1861
1862 rsp_increment(ctxt, len);
1863 return rc;
1864}
1865
1866static int em_pop(struct x86_emulate_ctxt *ctxt)
1867{
1868 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1869}
1870
1871static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1872 void *dest, int len)
1873{
1874 int rc;
1875 unsigned long val, change_mask;
1876 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1877 int cpl = ctxt->ops->cpl(ctxt);
1878
1879 rc = emulate_pop(ctxt, &val, len);
1880 if (rc != X86EMUL_CONTINUE)
1881 return rc;
1882
1883 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1884 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1885 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1886 X86_EFLAGS_AC | X86_EFLAGS_ID;
1887
1888 switch(ctxt->mode) {
1889 case X86EMUL_MODE_PROT64:
1890 case X86EMUL_MODE_PROT32:
1891 case X86EMUL_MODE_PROT16:
1892 if (cpl == 0)
1893 change_mask |= X86_EFLAGS_IOPL;
1894 if (cpl <= iopl)
1895 change_mask |= X86_EFLAGS_IF;
1896 break;
1897 case X86EMUL_MODE_VM86:
1898 if (iopl < 3)
1899 return emulate_gp(ctxt, 0);
1900 change_mask |= X86_EFLAGS_IF;
1901 break;
1902 default:
1903 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1904 break;
1905 }
1906
1907 *(unsigned long *)dest =
1908 (ctxt->eflags & ~change_mask) | (val & change_mask);
1909
1910 return rc;
1911}
1912
1913static int em_popf(struct x86_emulate_ctxt *ctxt)
1914{
1915 ctxt->dst.type = OP_REG;
1916 ctxt->dst.addr.reg = &ctxt->eflags;
1917 ctxt->dst.bytes = ctxt->op_bytes;
1918 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1919}
1920
1921static int em_enter(struct x86_emulate_ctxt *ctxt)
1922{
1923 int rc;
1924 unsigned frame_size = ctxt->src.val;
1925 unsigned nesting_level = ctxt->src2.val & 31;
1926 ulong rbp;
1927
1928 if (nesting_level)
1929 return X86EMUL_UNHANDLEABLE;
1930
1931 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1932 rc = push(ctxt, &rbp, stack_size(ctxt));
1933 if (rc != X86EMUL_CONTINUE)
1934 return rc;
1935 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1936 stack_mask(ctxt));
1937 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1938 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1939 stack_mask(ctxt));
1940 return X86EMUL_CONTINUE;
1941}
1942
1943static int em_leave(struct x86_emulate_ctxt *ctxt)
1944{
1945 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1946 stack_mask(ctxt));
1947 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1948}
1949
1950static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1951{
1952 int seg = ctxt->src2.val;
1953
1954 ctxt->src.val = get_segment_selector(ctxt, seg);
1955 if (ctxt->op_bytes == 4) {
1956 rsp_increment(ctxt, -2);
1957 ctxt->op_bytes = 2;
1958 }
1959
1960 return em_push(ctxt);
1961}
1962
1963static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1964{
1965 int seg = ctxt->src2.val;
1966 unsigned long selector;
1967 int rc;
1968
1969 rc = emulate_pop(ctxt, &selector, 2);
1970 if (rc != X86EMUL_CONTINUE)
1971 return rc;
1972
1973 if (ctxt->modrm_reg == VCPU_SREG_SS)
1974 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1975 if (ctxt->op_bytes > 2)
1976 rsp_increment(ctxt, ctxt->op_bytes - 2);
1977
1978 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1979 return rc;
1980}
1981
1982static int em_pusha(struct x86_emulate_ctxt *ctxt)
1983{
1984 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1985 int rc = X86EMUL_CONTINUE;
1986 int reg = VCPU_REGS_RAX;
1987
1988 while (reg <= VCPU_REGS_RDI) {
1989 (reg == VCPU_REGS_RSP) ?
1990 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1991
1992 rc = em_push(ctxt);
1993 if (rc != X86EMUL_CONTINUE)
1994 return rc;
1995
1996 ++reg;
1997 }
1998
1999 return rc;
2000}
2001
2002static int em_pushf(struct x86_emulate_ctxt *ctxt)
2003{
2004 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2005 return em_push(ctxt);
2006}
2007
2008static int em_popa(struct x86_emulate_ctxt *ctxt)
2009{
2010 int rc = X86EMUL_CONTINUE;
2011 int reg = VCPU_REGS_RDI;
2012 u32 val;
2013
2014 while (reg >= VCPU_REGS_RAX) {
2015 if (reg == VCPU_REGS_RSP) {
2016 rsp_increment(ctxt, ctxt->op_bytes);
2017 --reg;
2018 }
2019
2020 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2021 if (rc != X86EMUL_CONTINUE)
2022 break;
2023 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2024 --reg;
2025 }
2026 return rc;
2027}
2028
2029static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2030{
2031 const struct x86_emulate_ops *ops = ctxt->ops;
2032 int rc;
2033 struct desc_ptr dt;
2034 gva_t cs_addr;
2035 gva_t eip_addr;
2036 u16 cs, eip;
2037
2038
2039 ctxt->src.val = ctxt->eflags;
2040 rc = em_push(ctxt);
2041 if (rc != X86EMUL_CONTINUE)
2042 return rc;
2043
2044 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2045
2046 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2047 rc = em_push(ctxt);
2048 if (rc != X86EMUL_CONTINUE)
2049 return rc;
2050
2051 ctxt->src.val = ctxt->_eip;
2052 rc = em_push(ctxt);
2053 if (rc != X86EMUL_CONTINUE)
2054 return rc;
2055
2056 ops->get_idt(ctxt, &dt);
2057
2058 eip_addr = dt.address + (irq << 2);
2059 cs_addr = dt.address + (irq << 2) + 2;
2060
2061 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
2062 if (rc != X86EMUL_CONTINUE)
2063 return rc;
2064
2065 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
2066 if (rc != X86EMUL_CONTINUE)
2067 return rc;
2068
2069 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2070 if (rc != X86EMUL_CONTINUE)
2071 return rc;
2072
2073 ctxt->_eip = eip;
2074
2075 return rc;
2076}
2077
2078int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2079{
2080 int rc;
2081
2082 invalidate_registers(ctxt);
2083 rc = __emulate_int_real(ctxt, irq);
2084 if (rc == X86EMUL_CONTINUE)
2085 writeback_registers(ctxt);
2086 return rc;
2087}
2088
2089static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2090{
2091 switch(ctxt->mode) {
2092 case X86EMUL_MODE_REAL:
2093 return __emulate_int_real(ctxt, irq);
2094 case X86EMUL_MODE_VM86:
2095 case X86EMUL_MODE_PROT16:
2096 case X86EMUL_MODE_PROT32:
2097 case X86EMUL_MODE_PROT64:
2098 default:
2099
2100 return X86EMUL_UNHANDLEABLE;
2101 }
2102}
2103
2104static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2105{
2106 int rc = X86EMUL_CONTINUE;
2107 unsigned long temp_eip = 0;
2108 unsigned long temp_eflags = 0;
2109 unsigned long cs = 0;
2110 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2111 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2112 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2113 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2114 X86_EFLAGS_AC | X86_EFLAGS_ID |
2115 X86_EFLAGS_FIXED;
2116 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2117 X86_EFLAGS_VIP;
2118
2119
2120
2121 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2122
2123 if (rc != X86EMUL_CONTINUE)
2124 return rc;
2125
2126 if (temp_eip & ~0xffff)
2127 return emulate_gp(ctxt, 0);
2128
2129 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2130
2131 if (rc != X86EMUL_CONTINUE)
2132 return rc;
2133
2134 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2135
2136 if (rc != X86EMUL_CONTINUE)
2137 return rc;
2138
2139 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2140
2141 if (rc != X86EMUL_CONTINUE)
2142 return rc;
2143
2144 ctxt->_eip = temp_eip;
2145
2146 if (ctxt->op_bytes == 4)
2147 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2148 else if (ctxt->op_bytes == 2) {
2149 ctxt->eflags &= ~0xffff;
2150 ctxt->eflags |= temp_eflags;
2151 }
2152
2153 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK;
2154 ctxt->eflags |= X86_EFLAGS_FIXED;
2155 ctxt->ops->set_nmi_mask(ctxt, false);
2156
2157 return rc;
2158}
2159
2160static int em_iret(struct x86_emulate_ctxt *ctxt)
2161{
2162 switch(ctxt->mode) {
2163 case X86EMUL_MODE_REAL:
2164 return emulate_iret_real(ctxt);
2165 case X86EMUL_MODE_VM86:
2166 case X86EMUL_MODE_PROT16:
2167 case X86EMUL_MODE_PROT32:
2168 case X86EMUL_MODE_PROT64:
2169 default:
2170
2171 return X86EMUL_UNHANDLEABLE;
2172 }
2173}
2174
2175static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2176{
2177 int rc;
2178 unsigned short sel;
2179 struct desc_struct new_desc;
2180 u8 cpl = ctxt->ops->cpl(ctxt);
2181
2182 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2183
2184 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2185 X86_TRANSFER_CALL_JMP,
2186 &new_desc);
2187 if (rc != X86EMUL_CONTINUE)
2188 return rc;
2189
2190 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2191
2192 if (rc != X86EMUL_CONTINUE)
2193 return X86EMUL_UNHANDLEABLE;
2194
2195 return rc;
2196}
2197
2198static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2199{
2200 return assign_eip_near(ctxt, ctxt->src.val);
2201}
2202
2203static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2204{
2205 int rc;
2206 long int old_eip;
2207
2208 old_eip = ctxt->_eip;
2209 rc = assign_eip_near(ctxt, ctxt->src.val);
2210 if (rc != X86EMUL_CONTINUE)
2211 return rc;
2212 ctxt->src.val = old_eip;
2213 rc = em_push(ctxt);
2214 return rc;
2215}
2216
2217static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2218{
2219 u64 old = ctxt->dst.orig_val64;
2220
2221 if (ctxt->dst.bytes == 16)
2222 return X86EMUL_UNHANDLEABLE;
2223
2224 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2225 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2226 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2227 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2228 ctxt->eflags &= ~X86_EFLAGS_ZF;
2229 } else {
2230 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2231 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2232
2233 ctxt->eflags |= X86_EFLAGS_ZF;
2234 }
2235 return X86EMUL_CONTINUE;
2236}
2237
2238static int em_ret(struct x86_emulate_ctxt *ctxt)
2239{
2240 int rc;
2241 unsigned long eip;
2242
2243 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2244 if (rc != X86EMUL_CONTINUE)
2245 return rc;
2246
2247 return assign_eip_near(ctxt, eip);
2248}
2249
2250static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2251{
2252 int rc;
2253 unsigned long eip, cs;
2254 int cpl = ctxt->ops->cpl(ctxt);
2255 struct desc_struct new_desc;
2256
2257 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2258 if (rc != X86EMUL_CONTINUE)
2259 return rc;
2260 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2261 if (rc != X86EMUL_CONTINUE)
2262 return rc;
2263
2264 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2265 return X86EMUL_UNHANDLEABLE;
2266 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2267 X86_TRANSFER_RET,
2268 &new_desc);
2269 if (rc != X86EMUL_CONTINUE)
2270 return rc;
2271 rc = assign_eip_far(ctxt, eip, &new_desc);
2272
2273 if (rc != X86EMUL_CONTINUE)
2274 return X86EMUL_UNHANDLEABLE;
2275
2276 return rc;
2277}
2278
2279static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2280{
2281 int rc;
2282
2283 rc = em_ret_far(ctxt);
2284 if (rc != X86EMUL_CONTINUE)
2285 return rc;
2286 rsp_increment(ctxt, ctxt->src.val);
2287 return X86EMUL_CONTINUE;
2288}
2289
2290static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2291{
2292
2293 ctxt->dst.orig_val = ctxt->dst.val;
2294 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2295 ctxt->src.orig_val = ctxt->src.val;
2296 ctxt->src.val = ctxt->dst.orig_val;
2297 fastop(ctxt, em_cmp);
2298
2299 if (ctxt->eflags & X86_EFLAGS_ZF) {
2300
2301 ctxt->src.type = OP_NONE;
2302 ctxt->dst.val = ctxt->src.orig_val;
2303 } else {
2304
2305 ctxt->src.type = OP_REG;
2306 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2307 ctxt->src.val = ctxt->dst.orig_val;
2308
2309 ctxt->dst.val = ctxt->dst.orig_val;
2310 }
2311 return X86EMUL_CONTINUE;
2312}
2313
2314static int em_lseg(struct x86_emulate_ctxt *ctxt)
2315{
2316 int seg = ctxt->src2.val;
2317 unsigned short sel;
2318 int rc;
2319
2320 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2321
2322 rc = load_segment_descriptor(ctxt, sel, seg);
2323 if (rc != X86EMUL_CONTINUE)
2324 return rc;
2325
2326 ctxt->dst.val = ctxt->src.val;
2327 return rc;
2328}
2329
2330static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2331{
2332 u32 eax, ebx, ecx, edx;
2333
2334 eax = 0x80000001;
2335 ecx = 0;
2336 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2337 return edx & bit(X86_FEATURE_LM);
2338}
2339
2340#define GET_SMSTATE(type, smbase, offset) \
2341 ({ \
2342 type __val; \
2343 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
2344 sizeof(__val)); \
2345 if (r != X86EMUL_CONTINUE) \
2346 return X86EMUL_UNHANDLEABLE; \
2347 __val; \
2348 })
2349
2350static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2351{
2352 desc->g = (flags >> 23) & 1;
2353 desc->d = (flags >> 22) & 1;
2354 desc->l = (flags >> 21) & 1;
2355 desc->avl = (flags >> 20) & 1;
2356 desc->p = (flags >> 15) & 1;
2357 desc->dpl = (flags >> 13) & 3;
2358 desc->s = (flags >> 12) & 1;
2359 desc->type = (flags >> 8) & 15;
2360}
2361
2362static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2363{
2364 struct desc_struct desc;
2365 int offset;
2366 u16 selector;
2367
2368 selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
2369
2370 if (n < 3)
2371 offset = 0x7f84 + n * 12;
2372 else
2373 offset = 0x7f2c + (n - 3) * 12;
2374
2375 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2376 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2377 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
2378 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2379 return X86EMUL_CONTINUE;
2380}
2381
2382static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2383{
2384 struct desc_struct desc;
2385 int offset;
2386 u16 selector;
2387 u32 base3;
2388
2389 offset = 0x7e00 + n * 16;
2390
2391 selector = GET_SMSTATE(u16, smbase, offset);
2392 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
2393 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2394 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2395 base3 = GET_SMSTATE(u32, smbase, offset + 12);
2396
2397 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2398 return X86EMUL_CONTINUE;
2399}
2400
2401static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2402 u64 cr0, u64 cr4)
2403{
2404 int bad;
2405
2406
2407
2408
2409
2410
2411 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2412 if (bad)
2413 return X86EMUL_UNHANDLEABLE;
2414
2415 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2416 if (bad)
2417 return X86EMUL_UNHANDLEABLE;
2418
2419 if (cr4 & X86_CR4_PCIDE) {
2420 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2421 if (bad)
2422 return X86EMUL_UNHANDLEABLE;
2423 }
2424
2425 return X86EMUL_CONTINUE;
2426}
2427
2428static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
2429{
2430 struct desc_struct desc;
2431 struct desc_ptr dt;
2432 u16 selector;
2433 u32 val, cr0, cr4;
2434 int i;
2435
2436 cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
2437 ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
2438 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
2439 ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
2440
2441 for (i = 0; i < 8; i++)
2442 *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
2443
2444 val = GET_SMSTATE(u32, smbase, 0x7fcc);
2445 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2446 val = GET_SMSTATE(u32, smbase, 0x7fc8);
2447 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2448
2449 selector = GET_SMSTATE(u32, smbase, 0x7fc4);
2450 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
2451 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
2452 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
2453 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2454
2455 selector = GET_SMSTATE(u32, smbase, 0x7fc0);
2456 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
2457 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
2458 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
2459 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2460
2461 dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
2462 dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
2463 ctxt->ops->set_gdt(ctxt, &dt);
2464
2465 dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
2466 dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
2467 ctxt->ops->set_idt(ctxt, &dt);
2468
2469 for (i = 0; i < 6; i++) {
2470 int r = rsm_load_seg_32(ctxt, smbase, i);
2471 if (r != X86EMUL_CONTINUE)
2472 return r;
2473 }
2474
2475 cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
2476
2477 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
2478
2479 return rsm_enter_protected_mode(ctxt, cr0, cr4);
2480}
2481
2482static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2483{
2484 struct desc_struct desc;
2485 struct desc_ptr dt;
2486 u64 val, cr0, cr4;
2487 u32 base3;
2488 u16 selector;
2489 int i, r;
2490
2491 for (i = 0; i < 16; i++)
2492 *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
2493
2494 ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
2495 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
2496
2497 val = GET_SMSTATE(u32, smbase, 0x7f68);
2498 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2499 val = GET_SMSTATE(u32, smbase, 0x7f60);
2500 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2501
2502 cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
2503 ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50));
2504 cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
2505 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
2506 val = GET_SMSTATE(u64, smbase, 0x7ed0);
2507 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2508
2509 selector = GET_SMSTATE(u32, smbase, 0x7e90);
2510 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
2511 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
2512 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
2513 base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
2514 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2515
2516 dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
2517 dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
2518 ctxt->ops->set_idt(ctxt, &dt);
2519
2520 selector = GET_SMSTATE(u32, smbase, 0x7e70);
2521 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
2522 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
2523 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
2524 base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
2525 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2526
2527 dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
2528 dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
2529 ctxt->ops->set_gdt(ctxt, &dt);
2530
2531 r = rsm_enter_protected_mode(ctxt, cr0, cr4);
2532 if (r != X86EMUL_CONTINUE)
2533 return r;
2534
2535 for (i = 0; i < 6; i++) {
2536 r = rsm_load_seg_64(ctxt, smbase, i);
2537 if (r != X86EMUL_CONTINUE)
2538 return r;
2539 }
2540
2541 return X86EMUL_CONTINUE;
2542}
2543
2544static int em_rsm(struct x86_emulate_ctxt *ctxt)
2545{
2546 unsigned long cr0, cr4, efer;
2547 u64 smbase;
2548 int ret;
2549
2550 if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
2551 return emulate_ud(ctxt);
2552
2553
2554
2555
2556
2557
2558 cr4 = ctxt->ops->get_cr(ctxt, 4);
2559 if (emulator_has_longmode(ctxt)) {
2560 struct desc_struct cs_desc;
2561
2562
2563 if (cr4 & X86_CR4_PCIDE) {
2564 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2565 cr4 &= ~X86_CR4_PCIDE;
2566 }
2567
2568
2569 memset(&cs_desc, 0, sizeof(cs_desc));
2570 cs_desc.type = 0xb;
2571 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2572 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2573 }
2574
2575
2576 cr0 = ctxt->ops->get_cr(ctxt, 0);
2577 if (cr0 & X86_CR0_PE)
2578 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2579
2580
2581 if (cr4 & X86_CR4_PAE)
2582 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2583
2584
2585 efer = 0;
2586 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2587
2588 smbase = ctxt->ops->get_smbase(ctxt);
2589 if (emulator_has_longmode(ctxt))
2590 ret = rsm_load_state_64(ctxt, smbase + 0x8000);
2591 else
2592 ret = rsm_load_state_32(ctxt, smbase + 0x8000);
2593
2594 if (ret != X86EMUL_CONTINUE) {
2595
2596 return X86EMUL_UNHANDLEABLE;
2597 }
2598
2599 if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2600 ctxt->ops->set_nmi_mask(ctxt, false);
2601
2602 ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
2603 ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
2604 return X86EMUL_CONTINUE;
2605}
2606
2607static void
2608setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2609 struct desc_struct *cs, struct desc_struct *ss)
2610{
2611 cs->l = 0;
2612 set_desc_base(cs, 0);
2613 cs->g = 1;
2614 set_desc_limit(cs, 0xfffff);
2615 cs->type = 0x0b;
2616 cs->s = 1;
2617 cs->dpl = 0;
2618 cs->p = 1;
2619 cs->d = 1;
2620 cs->avl = 0;
2621
2622 set_desc_base(ss, 0);
2623 set_desc_limit(ss, 0xfffff);
2624 ss->g = 1;
2625 ss->s = 1;
2626 ss->type = 0x03;
2627 ss->d = 1;
2628 ss->dpl = 0;
2629 ss->p = 1;
2630 ss->l = 0;
2631 ss->avl = 0;
2632}
2633
2634static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2635{
2636 u32 eax, ebx, ecx, edx;
2637
2638 eax = ecx = 0;
2639 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2640 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2641 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2642 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2643}
2644
2645static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2646{
2647 const struct x86_emulate_ops *ops = ctxt->ops;
2648 u32 eax, ebx, ecx, edx;
2649
2650
2651
2652
2653
2654 if (ctxt->mode == X86EMUL_MODE_PROT64)
2655 return true;
2656
2657 eax = 0x00000000;
2658 ecx = 0x00000000;
2659 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2660
2661
2662
2663
2664
2665
2666
2667
2668 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2669 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2670 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2671 return false;
2672
2673
2674 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2675 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2676 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2677 return true;
2678
2679
2680 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2681 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2682 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2683 return true;
2684
2685
2686 return false;
2687}
2688
2689static int em_syscall(struct x86_emulate_ctxt *ctxt)
2690{
2691 const struct x86_emulate_ops *ops = ctxt->ops;
2692 struct desc_struct cs, ss;
2693 u64 msr_data;
2694 u16 cs_sel, ss_sel;
2695 u64 efer = 0;
2696
2697
2698 if (ctxt->mode == X86EMUL_MODE_REAL ||
2699 ctxt->mode == X86EMUL_MODE_VM86)
2700 return emulate_ud(ctxt);
2701
2702 if (!(em_syscall_is_enabled(ctxt)))
2703 return emulate_ud(ctxt);
2704
2705 ops->get_msr(ctxt, MSR_EFER, &efer);
2706 setup_syscalls_segments(ctxt, &cs, &ss);
2707
2708 if (!(efer & EFER_SCE))
2709 return emulate_ud(ctxt);
2710
2711 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2712 msr_data >>= 32;
2713 cs_sel = (u16)(msr_data & 0xfffc);
2714 ss_sel = (u16)(msr_data + 8);
2715
2716 if (efer & EFER_LMA) {
2717 cs.d = 0;
2718 cs.l = 1;
2719 }
2720 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2721 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2722
2723 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2724 if (efer & EFER_LMA) {
2725#ifdef CONFIG_X86_64
2726 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2727
2728 ops->get_msr(ctxt,
2729 ctxt->mode == X86EMUL_MODE_PROT64 ?
2730 MSR_LSTAR : MSR_CSTAR, &msr_data);
2731 ctxt->_eip = msr_data;
2732
2733 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2734 ctxt->eflags &= ~msr_data;
2735 ctxt->eflags |= X86_EFLAGS_FIXED;
2736#endif
2737 } else {
2738
2739 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2740 ctxt->_eip = (u32)msr_data;
2741
2742 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2743 }
2744
2745 return X86EMUL_CONTINUE;
2746}
2747
2748static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2749{
2750 const struct x86_emulate_ops *ops = ctxt->ops;
2751 struct desc_struct cs, ss;
2752 u64 msr_data;
2753 u16 cs_sel, ss_sel;
2754 u64 efer = 0;
2755
2756 ops->get_msr(ctxt, MSR_EFER, &efer);
2757
2758 if (ctxt->mode == X86EMUL_MODE_REAL)
2759 return emulate_gp(ctxt, 0);
2760
2761
2762
2763
2764
2765 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2766 && !vendor_intel(ctxt))
2767 return emulate_ud(ctxt);
2768
2769
2770 if (ctxt->mode == X86EMUL_MODE_PROT64)
2771 return X86EMUL_UNHANDLEABLE;
2772
2773 setup_syscalls_segments(ctxt, &cs, &ss);
2774
2775 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2776 if ((msr_data & 0xfffc) == 0x0)
2777 return emulate_gp(ctxt, 0);
2778
2779 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2780 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2781 ss_sel = cs_sel + 8;
2782 if (efer & EFER_LMA) {
2783 cs.d = 0;
2784 cs.l = 1;
2785 }
2786
2787 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2788 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2789
2790 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2791 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2792
2793 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2794 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2795 (u32)msr_data;
2796
2797 return X86EMUL_CONTINUE;
2798}
2799
2800static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2801{
2802 const struct x86_emulate_ops *ops = ctxt->ops;
2803 struct desc_struct cs, ss;
2804 u64 msr_data, rcx, rdx;
2805 int usermode;
2806 u16 cs_sel = 0, ss_sel = 0;
2807
2808
2809 if (ctxt->mode == X86EMUL_MODE_REAL ||
2810 ctxt->mode == X86EMUL_MODE_VM86)
2811 return emulate_gp(ctxt, 0);
2812
2813 setup_syscalls_segments(ctxt, &cs, &ss);
2814
2815 if ((ctxt->rex_prefix & 0x8) != 0x0)
2816 usermode = X86EMUL_MODE_PROT64;
2817 else
2818 usermode = X86EMUL_MODE_PROT32;
2819
2820 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2821 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2822
2823 cs.dpl = 3;
2824 ss.dpl = 3;
2825 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2826 switch (usermode) {
2827 case X86EMUL_MODE_PROT32:
2828 cs_sel = (u16)(msr_data + 16);
2829 if ((msr_data & 0xfffc) == 0x0)
2830 return emulate_gp(ctxt, 0);
2831 ss_sel = (u16)(msr_data + 24);
2832 rcx = (u32)rcx;
2833 rdx = (u32)rdx;
2834 break;
2835 case X86EMUL_MODE_PROT64:
2836 cs_sel = (u16)(msr_data + 32);
2837 if (msr_data == 0x0)
2838 return emulate_gp(ctxt, 0);
2839 ss_sel = cs_sel + 8;
2840 cs.d = 0;
2841 cs.l = 1;
2842 if (is_noncanonical_address(rcx) ||
2843 is_noncanonical_address(rdx))
2844 return emulate_gp(ctxt, 0);
2845 break;
2846 }
2847 cs_sel |= SEGMENT_RPL_MASK;
2848 ss_sel |= SEGMENT_RPL_MASK;
2849
2850 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2851 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2852
2853 ctxt->_eip = rdx;
2854 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2855
2856 return X86EMUL_CONTINUE;
2857}
2858
2859static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2860{
2861 int iopl;
2862 if (ctxt->mode == X86EMUL_MODE_REAL)
2863 return false;
2864 if (ctxt->mode == X86EMUL_MODE_VM86)
2865 return true;
2866 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2867 return ctxt->ops->cpl(ctxt) > iopl;
2868}
2869
2870static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2871 u16 port, u16 len)
2872{
2873 const struct x86_emulate_ops *ops = ctxt->ops;
2874 struct desc_struct tr_seg;
2875 u32 base3;
2876 int r;
2877 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2878 unsigned mask = (1 << len) - 1;
2879 unsigned long base;
2880
2881 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2882 if (!tr_seg.p)
2883 return false;
2884 if (desc_limit_scaled(&tr_seg) < 103)
2885 return false;
2886 base = get_desc_base(&tr_seg);
2887#ifdef CONFIG_X86_64
2888 base |= ((u64)base3) << 32;
2889#endif
2890 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2891 if (r != X86EMUL_CONTINUE)
2892 return false;
2893 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2894 return false;
2895 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2896 if (r != X86EMUL_CONTINUE)
2897 return false;
2898 if ((perm >> bit_idx) & mask)
2899 return false;
2900 return true;
2901}
2902
2903static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2904 u16 port, u16 len)
2905{
2906 if (ctxt->perm_ok)
2907 return true;
2908
2909 if (emulator_bad_iopl(ctxt))
2910 if (!emulator_io_port_access_allowed(ctxt, port, len))
2911 return false;
2912
2913 ctxt->perm_ok = true;
2914
2915 return true;
2916}
2917
2918static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2919{
2920
2921
2922
2923
2924#ifdef CONFIG_X86_64
2925 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2926 return;
2927
2928 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2929
2930 switch (ctxt->b) {
2931 case 0xa4:
2932 case 0xa5:
2933 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2934
2935 case 0xaa:
2936 case 0xab:
2937 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2938 }
2939#endif
2940}
2941
2942static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2943 struct tss_segment_16 *tss)
2944{
2945 tss->ip = ctxt->_eip;
2946 tss->flag = ctxt->eflags;
2947 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2948 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2949 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2950 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2951 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2952 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2953 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2954 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2955
2956 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2957 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2958 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2959 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2960 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2961}
2962
2963static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2964 struct tss_segment_16 *tss)
2965{
2966 int ret;
2967 u8 cpl;
2968
2969 ctxt->_eip = tss->ip;
2970 ctxt->eflags = tss->flag | 2;
2971 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2972 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2973 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2974 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2975 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2976 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2977 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2978 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2979
2980
2981
2982
2983
2984 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2985 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2986 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2987 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2988 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2989
2990 cpl = tss->cs & 3;
2991
2992
2993
2994
2995
2996 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2997 X86_TRANSFER_TASK_SWITCH, NULL);
2998 if (ret != X86EMUL_CONTINUE)
2999 return ret;
3000 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3001 X86_TRANSFER_TASK_SWITCH, NULL);
3002 if (ret != X86EMUL_CONTINUE)
3003 return ret;
3004 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3005 X86_TRANSFER_TASK_SWITCH, NULL);
3006 if (ret != X86EMUL_CONTINUE)
3007 return ret;
3008 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3009 X86_TRANSFER_TASK_SWITCH, NULL);
3010 if (ret != X86EMUL_CONTINUE)
3011 return ret;
3012 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3013 X86_TRANSFER_TASK_SWITCH, NULL);
3014 if (ret != X86EMUL_CONTINUE)
3015 return ret;
3016
3017 return X86EMUL_CONTINUE;
3018}
3019
3020static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3021 u16 tss_selector, u16 old_tss_sel,
3022 ulong old_tss_base, struct desc_struct *new_desc)
3023{
3024 const struct x86_emulate_ops *ops = ctxt->ops;
3025 struct tss_segment_16 tss_seg;
3026 int ret;
3027 u32 new_tss_base = get_desc_base(new_desc);
3028
3029 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3030 &ctxt->exception);
3031 if (ret != X86EMUL_CONTINUE)
3032 return ret;
3033
3034 save_state_to_tss16(ctxt, &tss_seg);
3035
3036 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3037 &ctxt->exception);
3038 if (ret != X86EMUL_CONTINUE)
3039 return ret;
3040
3041 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3042 &ctxt->exception);
3043 if (ret != X86EMUL_CONTINUE)
3044 return ret;
3045
3046 if (old_tss_sel != 0xffff) {
3047 tss_seg.prev_task_link = old_tss_sel;
3048
3049 ret = ops->write_std(ctxt, new_tss_base,
3050 &tss_seg.prev_task_link,
3051 sizeof tss_seg.prev_task_link,
3052 &ctxt->exception);
3053 if (ret != X86EMUL_CONTINUE)
3054 return ret;
3055 }
3056
3057 return load_state_from_tss16(ctxt, &tss_seg);
3058}
3059
3060static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3061 struct tss_segment_32 *tss)
3062{
3063
3064 tss->eip = ctxt->_eip;
3065 tss->eflags = ctxt->eflags;
3066 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3067 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3068 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3069 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3070 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3071 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3072 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3073 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3074
3075 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3076 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3077 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3078 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3079 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3080 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3081}
3082
3083static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3084 struct tss_segment_32 *tss)
3085{
3086 int ret;
3087 u8 cpl;
3088
3089 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3090 return emulate_gp(ctxt, 0);
3091 ctxt->_eip = tss->eip;
3092 ctxt->eflags = tss->eflags | 2;
3093
3094
3095 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3096 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3097 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3098 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3099 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3100 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3101 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3102 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3103
3104
3105
3106
3107
3108
3109 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3110 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3111 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3112 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3113 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3114 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3115 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3116
3117
3118
3119
3120
3121
3122 if (ctxt->eflags & X86_EFLAGS_VM) {
3123 ctxt->mode = X86EMUL_MODE_VM86;
3124 cpl = 3;
3125 } else {
3126 ctxt->mode = X86EMUL_MODE_PROT32;
3127 cpl = tss->cs & 3;
3128 }
3129
3130
3131
3132
3133
3134 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3135 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3136 if (ret != X86EMUL_CONTINUE)
3137 return ret;
3138 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3139 X86_TRANSFER_TASK_SWITCH, NULL);
3140 if (ret != X86EMUL_CONTINUE)
3141 return ret;
3142 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3143 X86_TRANSFER_TASK_SWITCH, NULL);
3144 if (ret != X86EMUL_CONTINUE)
3145 return ret;
3146 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3147 X86_TRANSFER_TASK_SWITCH, NULL);
3148 if (ret != X86EMUL_CONTINUE)
3149 return ret;
3150 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3151 X86_TRANSFER_TASK_SWITCH, NULL);
3152 if (ret != X86EMUL_CONTINUE)
3153 return ret;
3154 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3155 X86_TRANSFER_TASK_SWITCH, NULL);
3156 if (ret != X86EMUL_CONTINUE)
3157 return ret;
3158 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3159 X86_TRANSFER_TASK_SWITCH, NULL);
3160
3161 return ret;
3162}
3163
3164static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3165 u16 tss_selector, u16 old_tss_sel,
3166 ulong old_tss_base, struct desc_struct *new_desc)
3167{
3168 const struct x86_emulate_ops *ops = ctxt->ops;
3169 struct tss_segment_32 tss_seg;
3170 int ret;
3171 u32 new_tss_base = get_desc_base(new_desc);
3172 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3173 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3174
3175 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3176 &ctxt->exception);
3177 if (ret != X86EMUL_CONTINUE)
3178 return ret;
3179
3180 save_state_to_tss32(ctxt, &tss_seg);
3181
3182
3183 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3184 ldt_sel_offset - eip_offset, &ctxt->exception);
3185 if (ret != X86EMUL_CONTINUE)
3186 return ret;
3187
3188 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3189 &ctxt->exception);
3190 if (ret != X86EMUL_CONTINUE)
3191 return ret;
3192
3193 if (old_tss_sel != 0xffff) {
3194 tss_seg.prev_task_link = old_tss_sel;
3195
3196 ret = ops->write_std(ctxt, new_tss_base,
3197 &tss_seg.prev_task_link,
3198 sizeof tss_seg.prev_task_link,
3199 &ctxt->exception);
3200 if (ret != X86EMUL_CONTINUE)
3201 return ret;
3202 }
3203
3204 return load_state_from_tss32(ctxt, &tss_seg);
3205}
3206
3207static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3208 u16 tss_selector, int idt_index, int reason,
3209 bool has_error_code, u32 error_code)
3210{
3211 const struct x86_emulate_ops *ops = ctxt->ops;
3212 struct desc_struct curr_tss_desc, next_tss_desc;
3213 int ret;
3214 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3215 ulong old_tss_base =
3216 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3217 u32 desc_limit;
3218 ulong desc_addr, dr7;
3219
3220
3221
3222 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3223 if (ret != X86EMUL_CONTINUE)
3224 return ret;
3225 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3226 if (ret != X86EMUL_CONTINUE)
3227 return ret;
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239 if (reason == TASK_SWITCH_GATE) {
3240 if (idt_index != -1) {
3241
3242 struct desc_struct task_gate_desc;
3243 int dpl;
3244
3245 ret = read_interrupt_descriptor(ctxt, idt_index,
3246 &task_gate_desc);
3247 if (ret != X86EMUL_CONTINUE)
3248 return ret;
3249
3250 dpl = task_gate_desc.dpl;
3251 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3252 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3253 }
3254 }
3255
3256 desc_limit = desc_limit_scaled(&next_tss_desc);
3257 if (!next_tss_desc.p ||
3258 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3259 desc_limit < 0x2b)) {
3260 return emulate_ts(ctxt, tss_selector & 0xfffc);
3261 }
3262
3263 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3264 curr_tss_desc.type &= ~(1 << 1);
3265 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3266 }
3267
3268 if (reason == TASK_SWITCH_IRET)
3269 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3270
3271
3272
3273 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3274 old_tss_sel = 0xffff;
3275
3276 if (next_tss_desc.type & 8)
3277 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3278 old_tss_base, &next_tss_desc);
3279 else
3280 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3281 old_tss_base, &next_tss_desc);
3282 if (ret != X86EMUL_CONTINUE)
3283 return ret;
3284
3285 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3286 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3287
3288 if (reason != TASK_SWITCH_IRET) {
3289 next_tss_desc.type |= (1 << 1);
3290 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3291 }
3292
3293 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3294 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3295
3296 if (has_error_code) {
3297 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3298 ctxt->lock_prefix = 0;
3299 ctxt->src.val = (unsigned long) error_code;
3300 ret = em_push(ctxt);
3301 }
3302
3303 ops->get_dr(ctxt, 7, &dr7);
3304 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3305
3306 return ret;
3307}
3308
3309int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3310 u16 tss_selector, int idt_index, int reason,
3311 bool has_error_code, u32 error_code)
3312{
3313 int rc;
3314
3315 invalidate_registers(ctxt);
3316 ctxt->_eip = ctxt->eip;
3317 ctxt->dst.type = OP_NONE;
3318
3319 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3320 has_error_code, error_code);
3321
3322 if (rc == X86EMUL_CONTINUE) {
3323 ctxt->eip = ctxt->_eip;
3324 writeback_registers(ctxt);
3325 }
3326
3327 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3328}
3329
3330static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3331 struct operand *op)
3332{
3333 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3334
3335 register_address_increment(ctxt, reg, df * op->bytes);
3336 op->addr.mem.ea = register_address(ctxt, reg);
3337}
3338
3339static int em_das(struct x86_emulate_ctxt *ctxt)
3340{
3341 u8 al, old_al;
3342 bool af, cf, old_cf;
3343
3344 cf = ctxt->eflags & X86_EFLAGS_CF;
3345 al = ctxt->dst.val;
3346
3347 old_al = al;
3348 old_cf = cf;
3349 cf = false;
3350 af = ctxt->eflags & X86_EFLAGS_AF;
3351 if ((al & 0x0f) > 9 || af) {
3352 al -= 6;
3353 cf = old_cf | (al >= 250);
3354 af = true;
3355 } else {
3356 af = false;
3357 }
3358 if (old_al > 0x99 || old_cf) {
3359 al -= 0x60;
3360 cf = true;
3361 }
3362
3363 ctxt->dst.val = al;
3364
3365 ctxt->src.type = OP_IMM;
3366 ctxt->src.val = 0;
3367 ctxt->src.bytes = 1;
3368 fastop(ctxt, em_or);
3369 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3370 if (cf)
3371 ctxt->eflags |= X86_EFLAGS_CF;
3372 if (af)
3373 ctxt->eflags |= X86_EFLAGS_AF;
3374 return X86EMUL_CONTINUE;
3375}
3376
3377static int em_aam(struct x86_emulate_ctxt *ctxt)
3378{
3379 u8 al, ah;
3380
3381 if (ctxt->src.val == 0)
3382 return emulate_de(ctxt);
3383
3384 al = ctxt->dst.val & 0xff;
3385 ah = al / ctxt->src.val;
3386 al %= ctxt->src.val;
3387
3388 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3389
3390
3391 ctxt->src.type = OP_IMM;
3392 ctxt->src.val = 0;
3393 ctxt->src.bytes = 1;
3394 fastop(ctxt, em_or);
3395
3396 return X86EMUL_CONTINUE;
3397}
3398
3399static int em_aad(struct x86_emulate_ctxt *ctxt)
3400{
3401 u8 al = ctxt->dst.val & 0xff;
3402 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3403
3404 al = (al + (ah * ctxt->src.val)) & 0xff;
3405
3406 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3407
3408
3409 ctxt->src.type = OP_IMM;
3410 ctxt->src.val = 0;
3411 ctxt->src.bytes = 1;
3412 fastop(ctxt, em_or);
3413
3414 return X86EMUL_CONTINUE;
3415}
3416
3417static int em_call(struct x86_emulate_ctxt *ctxt)
3418{
3419 int rc;
3420 long rel = ctxt->src.val;
3421
3422 ctxt->src.val = (unsigned long)ctxt->_eip;
3423 rc = jmp_rel(ctxt, rel);
3424 if (rc != X86EMUL_CONTINUE)
3425 return rc;
3426 return em_push(ctxt);
3427}
3428
3429static int em_call_far(struct x86_emulate_ctxt *ctxt)
3430{
3431 u16 sel, old_cs;
3432 ulong old_eip;
3433 int rc;
3434 struct desc_struct old_desc, new_desc;
3435 const struct x86_emulate_ops *ops = ctxt->ops;
3436 int cpl = ctxt->ops->cpl(ctxt);
3437 enum x86emul_mode prev_mode = ctxt->mode;
3438
3439 old_eip = ctxt->_eip;
3440 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3441
3442 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3443 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3444 X86_TRANSFER_CALL_JMP, &new_desc);
3445 if (rc != X86EMUL_CONTINUE)
3446 return rc;
3447
3448 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3449 if (rc != X86EMUL_CONTINUE)
3450 goto fail;
3451
3452 ctxt->src.val = old_cs;
3453 rc = em_push(ctxt);
3454 if (rc != X86EMUL_CONTINUE)
3455 goto fail;
3456
3457 ctxt->src.val = old_eip;
3458 rc = em_push(ctxt);
3459
3460
3461 if (rc != X86EMUL_CONTINUE) {
3462 pr_warn_once("faulting far call emulation tainted memory\n");
3463 goto fail;
3464 }
3465 return rc;
3466fail:
3467 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3468 ctxt->mode = prev_mode;
3469 return rc;
3470
3471}
3472
3473static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3474{
3475 int rc;
3476 unsigned long eip;
3477
3478 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3479 if (rc != X86EMUL_CONTINUE)
3480 return rc;
3481 rc = assign_eip_near(ctxt, eip);
3482 if (rc != X86EMUL_CONTINUE)
3483 return rc;
3484 rsp_increment(ctxt, ctxt->src.val);
3485 return X86EMUL_CONTINUE;
3486}
3487
3488static int em_xchg(struct x86_emulate_ctxt *ctxt)
3489{
3490
3491 ctxt->src.val = ctxt->dst.val;
3492 write_register_operand(&ctxt->src);
3493
3494
3495 ctxt->dst.val = ctxt->src.orig_val;
3496 ctxt->lock_prefix = 1;
3497 return X86EMUL_CONTINUE;
3498}
3499
3500static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3501{
3502 ctxt->dst.val = ctxt->src2.val;
3503 return fastop(ctxt, em_imul);
3504}
3505
3506static int em_cwd(struct x86_emulate_ctxt *ctxt)
3507{
3508 ctxt->dst.type = OP_REG;
3509 ctxt->dst.bytes = ctxt->src.bytes;
3510 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3511 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3512
3513 return X86EMUL_CONTINUE;
3514}
3515
3516static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3517{
3518 u64 tsc = 0;
3519
3520 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3521 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3522 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3523 return X86EMUL_CONTINUE;
3524}
3525
3526static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3527{
3528 u64 pmc;
3529
3530 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3531 return emulate_gp(ctxt, 0);
3532 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3533 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3534 return X86EMUL_CONTINUE;
3535}
3536
3537static int em_mov(struct x86_emulate_ctxt *ctxt)
3538{
3539 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3540 return X86EMUL_CONTINUE;
3541}
3542
3543#define FFL(x) bit(X86_FEATURE_##x)
3544
3545static int em_movbe(struct x86_emulate_ctxt *ctxt)
3546{
3547 u32 ebx, ecx, edx, eax = 1;
3548 u16 tmp;
3549
3550
3551
3552
3553 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3554 if (!(ecx & FFL(MOVBE)))
3555 return emulate_ud(ctxt);
3556
3557 switch (ctxt->op_bytes) {
3558 case 2:
3559
3560
3561
3562
3563
3564
3565
3566
3567 tmp = (u16)ctxt->src.val;
3568 ctxt->dst.val &= ~0xffffUL;
3569 ctxt->dst.val |= (unsigned long)swab16(tmp);
3570 break;
3571 case 4:
3572 ctxt->dst.val = swab32((u32)ctxt->src.val);
3573 break;
3574 case 8:
3575 ctxt->dst.val = swab64(ctxt->src.val);
3576 break;
3577 default:
3578 BUG();
3579 }
3580 return X86EMUL_CONTINUE;
3581}
3582
3583static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3584{
3585 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3586 return emulate_gp(ctxt, 0);
3587
3588
3589 ctxt->dst.type = OP_NONE;
3590 return X86EMUL_CONTINUE;
3591}
3592
3593static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3594{
3595 unsigned long val;
3596
3597 if (ctxt->mode == X86EMUL_MODE_PROT64)
3598 val = ctxt->src.val & ~0ULL;
3599 else
3600 val = ctxt->src.val & ~0U;
3601
3602
3603 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3604 return emulate_gp(ctxt, 0);
3605
3606
3607 ctxt->dst.type = OP_NONE;
3608 return X86EMUL_CONTINUE;
3609}
3610
3611static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3612{
3613 u64 msr_data;
3614
3615 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3616 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3617 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3618 return emulate_gp(ctxt, 0);
3619
3620 return X86EMUL_CONTINUE;
3621}
3622
3623static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3624{
3625 u64 msr_data;
3626
3627 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3628 return emulate_gp(ctxt, 0);
3629
3630 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3631 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3632 return X86EMUL_CONTINUE;
3633}
3634
3635static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3636{
3637 if (ctxt->modrm_reg > VCPU_SREG_GS)
3638 return emulate_ud(ctxt);
3639
3640 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3641 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3642 ctxt->dst.bytes = 2;
3643 return X86EMUL_CONTINUE;
3644}
3645
3646static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3647{
3648 u16 sel = ctxt->src.val;
3649
3650 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3651 return emulate_ud(ctxt);
3652
3653 if (ctxt->modrm_reg == VCPU_SREG_SS)
3654 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3655
3656
3657 ctxt->dst.type = OP_NONE;
3658 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3659}
3660
3661static int em_lldt(struct x86_emulate_ctxt *ctxt)
3662{
3663 u16 sel = ctxt->src.val;
3664
3665
3666 ctxt->dst.type = OP_NONE;
3667 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3668}
3669
3670static int em_ltr(struct x86_emulate_ctxt *ctxt)
3671{
3672 u16 sel = ctxt->src.val;
3673
3674
3675 ctxt->dst.type = OP_NONE;
3676 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3677}
3678
3679static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3680{
3681 int rc;
3682 ulong linear;
3683
3684 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3685 if (rc == X86EMUL_CONTINUE)
3686 ctxt->ops->invlpg(ctxt, linear);
3687
3688 ctxt->dst.type = OP_NONE;
3689 return X86EMUL_CONTINUE;
3690}
3691
3692static int em_clts(struct x86_emulate_ctxt *ctxt)
3693{
3694 ulong cr0;
3695
3696 cr0 = ctxt->ops->get_cr(ctxt, 0);
3697 cr0 &= ~X86_CR0_TS;
3698 ctxt->ops->set_cr(ctxt, 0, cr0);
3699 return X86EMUL_CONTINUE;
3700}
3701
3702static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3703{
3704 int rc = ctxt->ops->fix_hypercall(ctxt);
3705
3706 if (rc != X86EMUL_CONTINUE)
3707 return rc;
3708
3709
3710 ctxt->_eip = ctxt->eip;
3711
3712 ctxt->dst.type = OP_NONE;
3713 return X86EMUL_CONTINUE;
3714}
3715
3716static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3717 void (*get)(struct x86_emulate_ctxt *ctxt,
3718 struct desc_ptr *ptr))
3719{
3720 struct desc_ptr desc_ptr;
3721
3722 if (ctxt->mode == X86EMUL_MODE_PROT64)
3723 ctxt->op_bytes = 8;
3724 get(ctxt, &desc_ptr);
3725 if (ctxt->op_bytes == 2) {
3726 ctxt->op_bytes = 4;
3727 desc_ptr.address &= 0x00ffffff;
3728 }
3729
3730 ctxt->dst.type = OP_NONE;
3731 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3732 &desc_ptr, 2 + ctxt->op_bytes);
3733}
3734
3735static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3736{
3737 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3738}
3739
3740static int em_sidt(struct x86_emulate_ctxt *ctxt)
3741{
3742 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3743}
3744
3745static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3746{
3747 struct desc_ptr desc_ptr;
3748 int rc;
3749
3750 if (ctxt->mode == X86EMUL_MODE_PROT64)
3751 ctxt->op_bytes = 8;
3752 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3753 &desc_ptr.size, &desc_ptr.address,
3754 ctxt->op_bytes);
3755 if (rc != X86EMUL_CONTINUE)
3756 return rc;
3757 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3758 is_noncanonical_address(desc_ptr.address))
3759 return emulate_gp(ctxt, 0);
3760 if (lgdt)
3761 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3762 else
3763 ctxt->ops->set_idt(ctxt, &desc_ptr);
3764
3765 ctxt->dst.type = OP_NONE;
3766 return X86EMUL_CONTINUE;
3767}
3768
3769static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3770{
3771 return em_lgdt_lidt(ctxt, true);
3772}
3773
3774static int em_lidt(struct x86_emulate_ctxt *ctxt)
3775{
3776 return em_lgdt_lidt(ctxt, false);
3777}
3778
3779static int em_smsw(struct x86_emulate_ctxt *ctxt)
3780{
3781 if (ctxt->dst.type == OP_MEM)
3782 ctxt->dst.bytes = 2;
3783 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3784 return X86EMUL_CONTINUE;
3785}
3786
3787static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3788{
3789 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3790 | (ctxt->src.val & 0x0f));
3791 ctxt->dst.type = OP_NONE;
3792 return X86EMUL_CONTINUE;
3793}
3794
3795static int em_loop(struct x86_emulate_ctxt *ctxt)
3796{
3797 int rc = X86EMUL_CONTINUE;
3798
3799 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3800 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3801 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3802 rc = jmp_rel(ctxt, ctxt->src.val);
3803
3804 return rc;
3805}
3806
3807static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3808{
3809 int rc = X86EMUL_CONTINUE;
3810
3811 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3812 rc = jmp_rel(ctxt, ctxt->src.val);
3813
3814 return rc;
3815}
3816
3817static int em_in(struct x86_emulate_ctxt *ctxt)
3818{
3819 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3820 &ctxt->dst.val))
3821 return X86EMUL_IO_NEEDED;
3822
3823 return X86EMUL_CONTINUE;
3824}
3825
3826static int em_out(struct x86_emulate_ctxt *ctxt)
3827{
3828 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3829 &ctxt->src.val, 1);
3830
3831 ctxt->dst.type = OP_NONE;
3832 return X86EMUL_CONTINUE;
3833}
3834
3835static int em_cli(struct x86_emulate_ctxt *ctxt)
3836{
3837 if (emulator_bad_iopl(ctxt))
3838 return emulate_gp(ctxt, 0);
3839
3840 ctxt->eflags &= ~X86_EFLAGS_IF;
3841 return X86EMUL_CONTINUE;
3842}
3843
3844static int em_sti(struct x86_emulate_ctxt *ctxt)
3845{
3846 if (emulator_bad_iopl(ctxt))
3847 return emulate_gp(ctxt, 0);
3848
3849 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3850 ctxt->eflags |= X86_EFLAGS_IF;
3851 return X86EMUL_CONTINUE;
3852}
3853
3854static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3855{
3856 u32 eax, ebx, ecx, edx;
3857
3858 eax = reg_read(ctxt, VCPU_REGS_RAX);
3859 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3860 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3861 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3862 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3863 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3864 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3865 return X86EMUL_CONTINUE;
3866}
3867
3868static int em_sahf(struct x86_emulate_ctxt *ctxt)
3869{
3870 u32 flags;
3871
3872 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3873 X86_EFLAGS_SF;
3874 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3875
3876 ctxt->eflags &= ~0xffUL;
3877 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3878 return X86EMUL_CONTINUE;
3879}
3880
3881static int em_lahf(struct x86_emulate_ctxt *ctxt)
3882{
3883 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3884 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3885 return X86EMUL_CONTINUE;
3886}
3887
3888static int em_bswap(struct x86_emulate_ctxt *ctxt)
3889{
3890 switch (ctxt->op_bytes) {
3891#ifdef CONFIG_X86_64
3892 case 8:
3893 asm("bswap %0" : "+r"(ctxt->dst.val));
3894 break;
3895#endif
3896 default:
3897 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3898 break;
3899 }
3900 return X86EMUL_CONTINUE;
3901}
3902
3903static int em_clflush(struct x86_emulate_ctxt *ctxt)
3904{
3905
3906 return X86EMUL_CONTINUE;
3907}
3908
3909static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3910{
3911 ctxt->dst.val = (s32) ctxt->src.val;
3912 return X86EMUL_CONTINUE;
3913}
3914
3915static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3916{
3917 u32 eax = 1, ebx, ecx = 0, edx;
3918
3919 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3920 if (!(edx & FFL(FXSR)))
3921 return emulate_ud(ctxt);
3922
3923 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3924 return emulate_nm(ctxt);
3925
3926
3927
3928
3929
3930 if (ctxt->mode >= X86EMUL_MODE_PROT64)
3931 return X86EMUL_UNHANDLEABLE;
3932
3933 return X86EMUL_CONTINUE;
3934}
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954static int em_fxsave(struct x86_emulate_ctxt *ctxt)
3955{
3956 struct fxregs_state fx_state;
3957 size_t size;
3958 int rc;
3959
3960 rc = check_fxsr(ctxt);
3961 if (rc != X86EMUL_CONTINUE)
3962 return rc;
3963
3964 ctxt->ops->get_fpu(ctxt);
3965
3966 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
3967
3968 ctxt->ops->put_fpu(ctxt);
3969
3970 if (rc != X86EMUL_CONTINUE)
3971 return rc;
3972
3973 if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
3974 size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
3975 else
3976 size = offsetof(struct fxregs_state, xmm_space[0]);
3977
3978 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
3979}
3980
3981static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
3982 struct fxregs_state *new)
3983{
3984 int rc = X86EMUL_CONTINUE;
3985 struct fxregs_state old;
3986
3987 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
3988 if (rc != X86EMUL_CONTINUE)
3989 return rc;
3990
3991
3992
3993
3994
3995
3996#ifdef CONFIG_X86_64
3997
3998 memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
3999#endif
4000
4001
4002
4003
4004
4005 if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
4006 memcpy(new->xmm_space, old.xmm_space, 8 * 16);
4007
4008 return rc;
4009}
4010
4011static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4012{
4013 struct fxregs_state fx_state;
4014 int rc;
4015
4016 rc = check_fxsr(ctxt);
4017 if (rc != X86EMUL_CONTINUE)
4018 return rc;
4019
4020 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
4021 if (rc != X86EMUL_CONTINUE)
4022 return rc;
4023
4024 if (fx_state.mxcsr >> 16)
4025 return emulate_gp(ctxt, 0);
4026
4027 ctxt->ops->get_fpu(ctxt);
4028
4029 if (ctxt->mode < X86EMUL_MODE_PROT64)
4030 rc = fxrstor_fixup(ctxt, &fx_state);
4031
4032 if (rc == X86EMUL_CONTINUE)
4033 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4034
4035 ctxt->ops->put_fpu(ctxt);
4036
4037 return rc;
4038}
4039
4040static bool valid_cr(int nr)
4041{
4042 switch (nr) {
4043 case 0:
4044 case 2 ... 4:
4045 case 8:
4046 return true;
4047 default:
4048 return false;
4049 }
4050}
4051
4052static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4053{
4054 if (!valid_cr(ctxt->modrm_reg))
4055 return emulate_ud(ctxt);
4056
4057 return X86EMUL_CONTINUE;
4058}
4059
4060static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4061{
4062 u64 new_val = ctxt->src.val64;
4063 int cr = ctxt->modrm_reg;
4064 u64 efer = 0;
4065
4066 static u64 cr_reserved_bits[] = {
4067 0xffffffff00000000ULL,
4068 0, 0, 0,
4069 CR4_RESERVED_BITS,
4070 0, 0, 0,
4071 CR8_RESERVED_BITS,
4072 };
4073
4074 if (!valid_cr(cr))
4075 return emulate_ud(ctxt);
4076
4077 if (new_val & cr_reserved_bits[cr])
4078 return emulate_gp(ctxt, 0);
4079
4080 switch (cr) {
4081 case 0: {
4082 u64 cr4;
4083 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4084 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4085 return emulate_gp(ctxt, 0);
4086
4087 cr4 = ctxt->ops->get_cr(ctxt, 4);
4088 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4089
4090 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4091 !(cr4 & X86_CR4_PAE))
4092 return emulate_gp(ctxt, 0);
4093
4094 break;
4095 }
4096 case 3: {
4097 u64 rsvd = 0;
4098
4099 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4100 if (efer & EFER_LMA)
4101 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
4102
4103 if (new_val & rsvd)
4104 return emulate_gp(ctxt, 0);
4105
4106 break;
4107 }
4108 case 4: {
4109 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4110
4111 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4112 return emulate_gp(ctxt, 0);
4113
4114 break;
4115 }
4116 }
4117
4118 return X86EMUL_CONTINUE;
4119}
4120
4121static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4122{
4123 unsigned long dr7;
4124
4125 ctxt->ops->get_dr(ctxt, 7, &dr7);
4126
4127
4128 return dr7 & (1 << 13);
4129}
4130
4131static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4132{
4133 int dr = ctxt->modrm_reg;
4134 u64 cr4;
4135
4136 if (dr > 7)
4137 return emulate_ud(ctxt);
4138
4139 cr4 = ctxt->ops->get_cr(ctxt, 4);
4140 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4141 return emulate_ud(ctxt);
4142
4143 if (check_dr7_gd(ctxt)) {
4144 ulong dr6;
4145
4146 ctxt->ops->get_dr(ctxt, 6, &dr6);
4147 dr6 &= ~15;
4148 dr6 |= DR6_BD | DR6_RTM;
4149 ctxt->ops->set_dr(ctxt, 6, dr6);
4150 return emulate_db(ctxt);
4151 }
4152
4153 return X86EMUL_CONTINUE;
4154}
4155
4156static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4157{
4158 u64 new_val = ctxt->src.val64;
4159 int dr = ctxt->modrm_reg;
4160
4161 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4162 return emulate_gp(ctxt, 0);
4163
4164 return check_dr_read(ctxt);
4165}
4166
4167static int check_svme(struct x86_emulate_ctxt *ctxt)
4168{
4169 u64 efer;
4170
4171 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4172
4173 if (!(efer & EFER_SVME))
4174 return emulate_ud(ctxt);
4175
4176 return X86EMUL_CONTINUE;
4177}
4178
4179static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4180{
4181 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4182
4183
4184 if (rax & 0xffff000000000000ULL)
4185 return emulate_gp(ctxt, 0);
4186
4187 return check_svme(ctxt);
4188}
4189
4190static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4191{
4192 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4193
4194 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4195 return emulate_ud(ctxt);
4196
4197 return X86EMUL_CONTINUE;
4198}
4199
4200static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4201{
4202 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4203 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4204
4205 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4206 ctxt->ops->check_pmc(ctxt, rcx))
4207 return emulate_gp(ctxt, 0);
4208
4209 return X86EMUL_CONTINUE;
4210}
4211
4212static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4213{
4214 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4215 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4216 return emulate_gp(ctxt, 0);
4217
4218 return X86EMUL_CONTINUE;
4219}
4220
4221static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4222{
4223 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4224 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4225 return emulate_gp(ctxt, 0);
4226
4227 return X86EMUL_CONTINUE;
4228}
4229
4230#define D(_y) { .flags = (_y) }
4231#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4232#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4233 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4234#define N D(NotImpl)
4235#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4236#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4237#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4238#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4239#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4240#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4241#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4242#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4243#define II(_f, _e, _i) \
4244 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4245#define IIP(_f, _e, _i, _p) \
4246 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4247 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4248#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4249
4250#define D2bv(_f) D((_f) | ByteOp), D(_f)
4251#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4252#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4253#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4254#define I2bvIP(_f, _e, _i, _p) \
4255 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4256
4257#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4258 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4259 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4260
4261static const struct opcode group7_rm0[] = {
4262 N,
4263 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4264 N, N, N, N, N, N,
4265};
4266
4267static const struct opcode group7_rm1[] = {
4268 DI(SrcNone | Priv, monitor),
4269 DI(SrcNone | Priv, mwait),
4270 N, N, N, N, N, N,
4271};
4272
4273static const struct opcode group7_rm3[] = {
4274 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4275 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4276 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4277 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4278 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4279 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4280 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4281 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4282};
4283
4284static const struct opcode group7_rm7[] = {
4285 N,
4286 DIP(SrcNone, rdtscp, check_rdtsc),
4287 N, N, N, N, N, N,
4288};
4289
4290static const struct opcode group1[] = {
4291 F(Lock, em_add),
4292 F(Lock | PageTable, em_or),
4293 F(Lock, em_adc),
4294 F(Lock, em_sbb),
4295 F(Lock | PageTable, em_and),
4296 F(Lock, em_sub),
4297 F(Lock, em_xor),
4298 F(NoWrite, em_cmp),
4299};
4300
4301static const struct opcode group1A[] = {
4302 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4303};
4304
4305static const struct opcode group2[] = {
4306 F(DstMem | ModRM, em_rol),
4307 F(DstMem | ModRM, em_ror),
4308 F(DstMem | ModRM, em_rcl),
4309 F(DstMem | ModRM, em_rcr),
4310 F(DstMem | ModRM, em_shl),
4311 F(DstMem | ModRM, em_shr),
4312 F(DstMem | ModRM, em_shl),
4313 F(DstMem | ModRM, em_sar),
4314};
4315
4316static const struct opcode group3[] = {
4317 F(DstMem | SrcImm | NoWrite, em_test),
4318 F(DstMem | SrcImm | NoWrite, em_test),
4319 F(DstMem | SrcNone | Lock, em_not),
4320 F(DstMem | SrcNone | Lock, em_neg),
4321 F(DstXacc | Src2Mem, em_mul_ex),
4322 F(DstXacc | Src2Mem, em_imul_ex),
4323 F(DstXacc | Src2Mem, em_div_ex),
4324 F(DstXacc | Src2Mem, em_idiv_ex),
4325};
4326
4327static const struct opcode group4[] = {
4328 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4329 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4330 N, N, N, N, N, N,
4331};
4332
4333static const struct opcode group5[] = {
4334 F(DstMem | SrcNone | Lock, em_inc),
4335 F(DstMem | SrcNone | Lock, em_dec),
4336 I(SrcMem | NearBranch, em_call_near_abs),
4337 I(SrcMemFAddr | ImplicitOps, em_call_far),
4338 I(SrcMem | NearBranch, em_jmp_abs),
4339 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4340 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4341};
4342
4343static const struct opcode group6[] = {
4344 DI(Prot | DstMem, sldt),
4345 DI(Prot | DstMem, str),
4346 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4347 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4348 N, N, N, N,
4349};
4350
4351static const struct group_dual group7 = { {
4352 II(Mov | DstMem, em_sgdt, sgdt),
4353 II(Mov | DstMem, em_sidt, sidt),
4354 II(SrcMem | Priv, em_lgdt, lgdt),
4355 II(SrcMem | Priv, em_lidt, lidt),
4356 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4357 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4358 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4359}, {
4360 EXT(0, group7_rm0),
4361 EXT(0, group7_rm1),
4362 N, EXT(0, group7_rm3),
4363 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4364 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4365 EXT(0, group7_rm7),
4366} };
4367
4368static const struct opcode group8[] = {
4369 N, N, N, N,
4370 F(DstMem | SrcImmByte | NoWrite, em_bt),
4371 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4372 F(DstMem | SrcImmByte | Lock, em_btr),
4373 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4374};
4375
4376static const struct group_dual group9 = { {
4377 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4378}, {
4379 N, N, N, N, N, N, N, N,
4380} };
4381
4382static const struct opcode group11[] = {
4383 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4384 X7(D(Undefined)),
4385};
4386
4387static const struct gprefix pfx_0f_ae_7 = {
4388 I(SrcMem | ByteOp, em_clflush), N, N, N,
4389};
4390
4391static const struct group_dual group15 = { {
4392 I(ModRM | Aligned16, em_fxsave),
4393 I(ModRM | Aligned16, em_fxrstor),
4394 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4395}, {
4396 N, N, N, N, N, N, N, N,
4397} };
4398
4399static const struct gprefix pfx_0f_6f_0f_7f = {
4400 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4401};
4402
4403static const struct instr_dual instr_dual_0f_2b = {
4404 I(0, em_mov), N
4405};
4406
4407static const struct gprefix pfx_0f_2b = {
4408 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4409};
4410
4411static const struct gprefix pfx_0f_28_0f_29 = {
4412 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4413};
4414
4415static const struct gprefix pfx_0f_e7 = {
4416 N, I(Sse, em_mov), N, N,
4417};
4418
4419static const struct escape escape_d9 = { {
4420 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4421}, {
4422
4423 N, N, N, N, N, N, N, N,
4424
4425 N, N, N, N, N, N, N, N,
4426
4427 N, N, N, N, N, N, N, N,
4428
4429 N, N, N, N, N, N, N, N,
4430
4431 N, N, N, N, N, N, N, N,
4432
4433 N, N, N, N, N, N, N, N,
4434
4435 N, N, N, N, N, N, N, N,
4436
4437 N, N, N, N, N, N, N, N,
4438} };
4439
4440static const struct escape escape_db = { {
4441 N, N, N, N, N, N, N, N,
4442}, {
4443
4444 N, N, N, N, N, N, N, N,
4445
4446 N, N, N, N, N, N, N, N,
4447
4448 N, N, N, N, N, N, N, N,
4449
4450 N, N, N, N, N, N, N, N,
4451
4452 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4453
4454 N, N, N, N, N, N, N, N,
4455
4456 N, N, N, N, N, N, N, N,
4457
4458 N, N, N, N, N, N, N, N,
4459} };
4460
4461static const struct escape escape_dd = { {
4462 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4463}, {
4464
4465 N, N, N, N, N, N, N, N,
4466
4467 N, N, N, N, N, N, N, N,
4468
4469 N, N, N, N, N, N, N, N,
4470
4471 N, N, N, N, N, N, N, N,
4472
4473 N, N, N, N, N, N, N, N,
4474
4475 N, N, N, N, N, N, N, N,
4476
4477 N, N, N, N, N, N, N, N,
4478
4479 N, N, N, N, N, N, N, N,
4480} };
4481
4482static const struct instr_dual instr_dual_0f_c3 = {
4483 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4484};
4485
4486static const struct mode_dual mode_dual_63 = {
4487 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4488};
4489
4490static const struct opcode opcode_table[256] = {
4491
4492 F6ALU(Lock, em_add),
4493 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4494 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4495
4496 F6ALU(Lock | PageTable, em_or),
4497 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4498 N,
4499
4500 F6ALU(Lock, em_adc),
4501 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4502 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4503
4504 F6ALU(Lock, em_sbb),
4505 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4506 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4507
4508 F6ALU(Lock | PageTable, em_and), N, N,
4509
4510 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4511
4512 F6ALU(Lock, em_xor), N, N,
4513
4514 F6ALU(NoWrite, em_cmp), N, N,
4515
4516 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4517
4518 X8(I(SrcReg | Stack, em_push)),
4519
4520 X8(I(DstReg | Stack, em_pop)),
4521
4522 I(ImplicitOps | Stack | No64, em_pusha),
4523 I(ImplicitOps | Stack | No64, em_popa),
4524 N, MD(ModRM, &mode_dual_63),
4525 N, N, N, N,
4526
4527 I(SrcImm | Mov | Stack, em_push),
4528 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4529 I(SrcImmByte | Mov | Stack, em_push),
4530 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4531 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in),
4532 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out),
4533
4534 X16(D(SrcImmByte | NearBranch)),
4535
4536 G(ByteOp | DstMem | SrcImm, group1),
4537 G(DstMem | SrcImm, group1),
4538 G(ByteOp | DstMem | SrcImm | No64, group1),
4539 G(DstMem | SrcImmByte, group1),
4540 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4541 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4542
4543 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4544 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4545 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4546 D(ModRM | SrcMem | NoAccess | DstReg),
4547 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4548 G(0, group1A),
4549
4550 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4551
4552 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4553 I(SrcImmFAddr | No64, em_call_far), N,
4554 II(ImplicitOps | Stack, em_pushf, pushf),
4555 II(ImplicitOps | Stack, em_popf, popf),
4556 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4557
4558 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4559 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4560 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4561 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4562
4563 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4564 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4565 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4566 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4567
4568 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4569
4570 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4571
4572 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4573 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4574 I(ImplicitOps | NearBranch, em_ret),
4575 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4576 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4577 G(ByteOp, group11), G(0, group11),
4578
4579 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4580 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4581 I(ImplicitOps, em_ret_far),
4582 D(ImplicitOps), DI(SrcImmByte, intn),
4583 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4584
4585 G(Src2One | ByteOp, group2), G(Src2One, group2),
4586 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4587 I(DstAcc | SrcImmUByte | No64, em_aam),
4588 I(DstAcc | SrcImmUByte | No64, em_aad),
4589 F(DstAcc | ByteOp | No64, em_salc),
4590 I(DstAcc | SrcXLat | ByteOp, em_mov),
4591
4592 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4593
4594 X3(I(SrcImmByte | NearBranch, em_loop)),
4595 I(SrcImmByte | NearBranch, em_jcxz),
4596 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4597 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4598
4599 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4600 I(SrcImmFAddr | No64, em_jmp_far),
4601 D(SrcImmByte | ImplicitOps | NearBranch),
4602 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4603 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4604
4605 N, DI(ImplicitOps, icebp), N, N,
4606 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4607 G(ByteOp, group3), G(0, group3),
4608
4609 D(ImplicitOps), D(ImplicitOps),
4610 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4611 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4612};
4613
4614static const struct opcode twobyte_table[256] = {
4615
4616 G(0, group6), GD(0, &group7), N, N,
4617 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4618 II(ImplicitOps | Priv, em_clts, clts), N,
4619 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4620 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4621
4622 N, N, N, N, N, N, N, N,
4623 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4624 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4625
4626 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4627 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4628 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4629 check_cr_write),
4630 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4631 check_dr_write),
4632 N, N, N, N,
4633 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4634 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4635 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4636 N, N, N, N,
4637
4638 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4639 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4640 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4641 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4642 I(ImplicitOps | EmulateOnUD, em_sysenter),
4643 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4644 N, N,
4645 N, N, N, N, N, N, N, N,
4646
4647 X16(D(DstReg | SrcMem | ModRM)),
4648
4649 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4650
4651 N, N, N, N,
4652 N, N, N, N,
4653 N, N, N, N,
4654 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4655
4656 N, N, N, N,
4657 N, N, N, N,
4658 N, N, N, N,
4659 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4660
4661 X16(D(SrcImm | NearBranch)),
4662
4663 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4664
4665 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4666 II(ImplicitOps, em_cpuid, cpuid),
4667 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4668 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4669 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4670
4671 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4672 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4673 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4674 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4675 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4676 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4677
4678 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4679 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4680 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4681 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4682 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4683 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4684
4685 N, N,
4686 G(BitOp, group8),
4687 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4688 I(DstReg | SrcMem | ModRM, em_bsf_c),
4689 I(DstReg | SrcMem | ModRM, em_bsr_c),
4690 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4691
4692 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4693 N, ID(0, &instr_dual_0f_c3),
4694 N, N, N, GD(0, &group9),
4695
4696 X8(I(DstReg, em_bswap)),
4697
4698 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4699
4700 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4701 N, N, N, N, N, N, N, N,
4702
4703 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4704};
4705
4706static const struct instr_dual instr_dual_0f_38_f0 = {
4707 I(DstReg | SrcMem | Mov, em_movbe), N
4708};
4709
4710static const struct instr_dual instr_dual_0f_38_f1 = {
4711 I(DstMem | SrcReg | Mov, em_movbe), N
4712};
4713
4714static const struct gprefix three_byte_0f_38_f0 = {
4715 ID(0, &instr_dual_0f_38_f0), N, N, N
4716};
4717
4718static const struct gprefix three_byte_0f_38_f1 = {
4719 ID(0, &instr_dual_0f_38_f1), N, N, N
4720};
4721
4722
4723
4724
4725
4726static const struct opcode opcode_map_0f_38[256] = {
4727
4728 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4729
4730 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4731
4732 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4733 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4734
4735 N, N, X4(N), X8(N)
4736};
4737
4738#undef D
4739#undef N
4740#undef G
4741#undef GD
4742#undef I
4743#undef GP
4744#undef EXT
4745#undef MD
4746#undef ID
4747
4748#undef D2bv
4749#undef D2bvIP
4750#undef I2bv
4751#undef I2bvIP
4752#undef I6ALU
4753
4754static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4755{
4756 unsigned size;
4757
4758 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4759 if (size == 8)
4760 size = 4;
4761 return size;
4762}
4763
4764static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4765 unsigned size, bool sign_extension)
4766{
4767 int rc = X86EMUL_CONTINUE;
4768
4769 op->type = OP_IMM;
4770 op->bytes = size;
4771 op->addr.mem.ea = ctxt->_eip;
4772
4773 switch (op->bytes) {
4774 case 1:
4775 op->val = insn_fetch(s8, ctxt);
4776 break;
4777 case 2:
4778 op->val = insn_fetch(s16, ctxt);
4779 break;
4780 case 4:
4781 op->val = insn_fetch(s32, ctxt);
4782 break;
4783 case 8:
4784 op->val = insn_fetch(s64, ctxt);
4785 break;
4786 }
4787 if (!sign_extension) {
4788 switch (op->bytes) {
4789 case 1:
4790 op->val &= 0xff;
4791 break;
4792 case 2:
4793 op->val &= 0xffff;
4794 break;
4795 case 4:
4796 op->val &= 0xffffffff;
4797 break;
4798 }
4799 }
4800done:
4801 return rc;
4802}
4803
4804static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4805 unsigned d)
4806{
4807 int rc = X86EMUL_CONTINUE;
4808
4809 switch (d) {
4810 case OpReg:
4811 decode_register_operand(ctxt, op);
4812 break;
4813 case OpImmUByte:
4814 rc = decode_imm(ctxt, op, 1, false);
4815 break;
4816 case OpMem:
4817 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4818 mem_common:
4819 *op = ctxt->memop;
4820 ctxt->memopp = op;
4821 if (ctxt->d & BitOp)
4822 fetch_bit_operand(ctxt);
4823 op->orig_val = op->val;
4824 break;
4825 case OpMem64:
4826 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4827 goto mem_common;
4828 case OpAcc:
4829 op->type = OP_REG;
4830 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4831 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4832 fetch_register_operand(op);
4833 op->orig_val = op->val;
4834 break;
4835 case OpAccLo:
4836 op->type = OP_REG;
4837 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4838 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4839 fetch_register_operand(op);
4840 op->orig_val = op->val;
4841 break;
4842 case OpAccHi:
4843 if (ctxt->d & ByteOp) {
4844 op->type = OP_NONE;
4845 break;
4846 }
4847 op->type = OP_REG;
4848 op->bytes = ctxt->op_bytes;
4849 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4850 fetch_register_operand(op);
4851 op->orig_val = op->val;
4852 break;
4853 case OpDI:
4854 op->type = OP_MEM;
4855 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4856 op->addr.mem.ea =
4857 register_address(ctxt, VCPU_REGS_RDI);
4858 op->addr.mem.seg = VCPU_SREG_ES;
4859 op->val = 0;
4860 op->count = 1;
4861 break;
4862 case OpDX:
4863 op->type = OP_REG;
4864 op->bytes = 2;
4865 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4866 fetch_register_operand(op);
4867 break;
4868 case OpCL:
4869 op->type = OP_IMM;
4870 op->bytes = 1;
4871 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4872 break;
4873 case OpImmByte:
4874 rc = decode_imm(ctxt, op, 1, true);
4875 break;
4876 case OpOne:
4877 op->type = OP_IMM;
4878 op->bytes = 1;
4879 op->val = 1;
4880 break;
4881 case OpImm:
4882 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4883 break;
4884 case OpImm64:
4885 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4886 break;
4887 case OpMem8:
4888 ctxt->memop.bytes = 1;
4889 if (ctxt->memop.type == OP_REG) {
4890 ctxt->memop.addr.reg = decode_register(ctxt,
4891 ctxt->modrm_rm, true);
4892 fetch_register_operand(&ctxt->memop);
4893 }
4894 goto mem_common;
4895 case OpMem16:
4896 ctxt->memop.bytes = 2;
4897 goto mem_common;
4898 case OpMem32:
4899 ctxt->memop.bytes = 4;
4900 goto mem_common;
4901 case OpImmU16:
4902 rc = decode_imm(ctxt, op, 2, false);
4903 break;
4904 case OpImmU:
4905 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4906 break;
4907 case OpSI:
4908 op->type = OP_MEM;
4909 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4910 op->addr.mem.ea =
4911 register_address(ctxt, VCPU_REGS_RSI);
4912 op->addr.mem.seg = ctxt->seg_override;
4913 op->val = 0;
4914 op->count = 1;
4915 break;
4916 case OpXLat:
4917 op->type = OP_MEM;
4918 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4919 op->addr.mem.ea =
4920 address_mask(ctxt,
4921 reg_read(ctxt, VCPU_REGS_RBX) +
4922 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4923 op->addr.mem.seg = ctxt->seg_override;
4924 op->val = 0;
4925 break;
4926 case OpImmFAddr:
4927 op->type = OP_IMM;
4928 op->addr.mem.ea = ctxt->_eip;
4929 op->bytes = ctxt->op_bytes + 2;
4930 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4931 break;
4932 case OpMemFAddr:
4933 ctxt->memop.bytes = ctxt->op_bytes + 2;
4934 goto mem_common;
4935 case OpES:
4936 op->type = OP_IMM;
4937 op->val = VCPU_SREG_ES;
4938 break;
4939 case OpCS:
4940 op->type = OP_IMM;
4941 op->val = VCPU_SREG_CS;
4942 break;
4943 case OpSS:
4944 op->type = OP_IMM;
4945 op->val = VCPU_SREG_SS;
4946 break;
4947 case OpDS:
4948 op->type = OP_IMM;
4949 op->val = VCPU_SREG_DS;
4950 break;
4951 case OpFS:
4952 op->type = OP_IMM;
4953 op->val = VCPU_SREG_FS;
4954 break;
4955 case OpGS:
4956 op->type = OP_IMM;
4957 op->val = VCPU_SREG_GS;
4958 break;
4959 case OpImplicit:
4960
4961 default:
4962 op->type = OP_NONE;
4963 break;
4964 }
4965
4966done:
4967 return rc;
4968}
4969
4970int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4971{
4972 int rc = X86EMUL_CONTINUE;
4973 int mode = ctxt->mode;
4974 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4975 bool op_prefix = false;
4976 bool has_seg_override = false;
4977 struct opcode opcode;
4978
4979 ctxt->memop.type = OP_NONE;
4980 ctxt->memopp = NULL;
4981 ctxt->_eip = ctxt->eip;
4982 ctxt->fetch.ptr = ctxt->fetch.data;
4983 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4984 ctxt->opcode_len = 1;
4985 if (insn_len > 0)
4986 memcpy(ctxt->fetch.data, insn, insn_len);
4987 else {
4988 rc = __do_insn_fetch_bytes(ctxt, 1);
4989 if (rc != X86EMUL_CONTINUE)
4990 return rc;
4991 }
4992
4993 switch (mode) {
4994 case X86EMUL_MODE_REAL:
4995 case X86EMUL_MODE_VM86:
4996 case X86EMUL_MODE_PROT16:
4997 def_op_bytes = def_ad_bytes = 2;
4998 break;
4999 case X86EMUL_MODE_PROT32:
5000 def_op_bytes = def_ad_bytes = 4;
5001 break;
5002#ifdef CONFIG_X86_64
5003 case X86EMUL_MODE_PROT64:
5004 def_op_bytes = 4;
5005 def_ad_bytes = 8;
5006 break;
5007#endif
5008 default:
5009 return EMULATION_FAILED;
5010 }
5011
5012 ctxt->op_bytes = def_op_bytes;
5013 ctxt->ad_bytes = def_ad_bytes;
5014
5015
5016 for (;;) {
5017 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5018 case 0x66:
5019 op_prefix = true;
5020
5021 ctxt->op_bytes = def_op_bytes ^ 6;
5022 break;
5023 case 0x67:
5024 if (mode == X86EMUL_MODE_PROT64)
5025
5026 ctxt->ad_bytes = def_ad_bytes ^ 12;
5027 else
5028
5029 ctxt->ad_bytes = def_ad_bytes ^ 6;
5030 break;
5031 case 0x26:
5032 case 0x2e:
5033 case 0x36:
5034 case 0x3e:
5035 has_seg_override = true;
5036 ctxt->seg_override = (ctxt->b >> 3) & 3;
5037 break;
5038 case 0x64:
5039 case 0x65:
5040 has_seg_override = true;
5041 ctxt->seg_override = ctxt->b & 7;
5042 break;
5043 case 0x40 ... 0x4f:
5044 if (mode != X86EMUL_MODE_PROT64)
5045 goto done_prefixes;
5046 ctxt->rex_prefix = ctxt->b;
5047 continue;
5048 case 0xf0:
5049 ctxt->lock_prefix = 1;
5050 break;
5051 case 0xf2:
5052 case 0xf3:
5053 ctxt->rep_prefix = ctxt->b;
5054 break;
5055 default:
5056 goto done_prefixes;
5057 }
5058
5059
5060
5061 ctxt->rex_prefix = 0;
5062 }
5063
5064done_prefixes:
5065
5066
5067 if (ctxt->rex_prefix & 8)
5068 ctxt->op_bytes = 8;
5069
5070
5071 opcode = opcode_table[ctxt->b];
5072
5073 if (ctxt->b == 0x0f) {
5074 ctxt->opcode_len = 2;
5075 ctxt->b = insn_fetch(u8, ctxt);
5076 opcode = twobyte_table[ctxt->b];
5077
5078
5079 if (ctxt->b == 0x38) {
5080 ctxt->opcode_len = 3;
5081 ctxt->b = insn_fetch(u8, ctxt);
5082 opcode = opcode_map_0f_38[ctxt->b];
5083 }
5084 }
5085 ctxt->d = opcode.flags;
5086
5087 if (ctxt->d & ModRM)
5088 ctxt->modrm = insn_fetch(u8, ctxt);
5089
5090
5091 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5092 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5093 ctxt->d = NotImpl;
5094 }
5095
5096 while (ctxt->d & GroupMask) {
5097 switch (ctxt->d & GroupMask) {
5098 case Group:
5099 goffset = (ctxt->modrm >> 3) & 7;
5100 opcode = opcode.u.group[goffset];
5101 break;
5102 case GroupDual:
5103 goffset = (ctxt->modrm >> 3) & 7;
5104 if ((ctxt->modrm >> 6) == 3)
5105 opcode = opcode.u.gdual->mod3[goffset];
5106 else
5107 opcode = opcode.u.gdual->mod012[goffset];
5108 break;
5109 case RMExt:
5110 goffset = ctxt->modrm & 7;
5111 opcode = opcode.u.group[goffset];
5112 break;
5113 case Prefix:
5114 if (ctxt->rep_prefix && op_prefix)
5115 return EMULATION_FAILED;
5116 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5117 switch (simd_prefix) {
5118 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5119 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5120 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5121 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5122 }
5123 break;
5124 case Escape:
5125 if (ctxt->modrm > 0xbf)
5126 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5127 else
5128 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5129 break;
5130 case InstrDual:
5131 if ((ctxt->modrm >> 6) == 3)
5132 opcode = opcode.u.idual->mod3;
5133 else
5134 opcode = opcode.u.idual->mod012;
5135 break;
5136 case ModeDual:
5137 if (ctxt->mode == X86EMUL_MODE_PROT64)
5138 opcode = opcode.u.mdual->mode64;
5139 else
5140 opcode = opcode.u.mdual->mode32;
5141 break;
5142 default:
5143 return EMULATION_FAILED;
5144 }
5145
5146 ctxt->d &= ~(u64)GroupMask;
5147 ctxt->d |= opcode.flags;
5148 }
5149
5150
5151 if (ctxt->d == 0)
5152 return EMULATION_FAILED;
5153
5154 ctxt->execute = opcode.u.execute;
5155
5156 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5157 return EMULATION_FAILED;
5158
5159 if (unlikely(ctxt->d &
5160 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5161 No16))) {
5162
5163
5164
5165
5166 ctxt->check_perm = opcode.check_perm;
5167 ctxt->intercept = opcode.intercept;
5168
5169 if (ctxt->d & NotImpl)
5170 return EMULATION_FAILED;
5171
5172 if (mode == X86EMUL_MODE_PROT64) {
5173 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5174 ctxt->op_bytes = 8;
5175 else if (ctxt->d & NearBranch)
5176 ctxt->op_bytes = 8;
5177 }
5178
5179 if (ctxt->d & Op3264) {
5180 if (mode == X86EMUL_MODE_PROT64)
5181 ctxt->op_bytes = 8;
5182 else
5183 ctxt->op_bytes = 4;
5184 }
5185
5186 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5187 ctxt->op_bytes = 4;
5188
5189 if (ctxt->d & Sse)
5190 ctxt->op_bytes = 16;
5191 else if (ctxt->d & Mmx)
5192 ctxt->op_bytes = 8;
5193 }
5194
5195
5196 if (ctxt->d & ModRM) {
5197 rc = decode_modrm(ctxt, &ctxt->memop);
5198 if (!has_seg_override) {
5199 has_seg_override = true;
5200 ctxt->seg_override = ctxt->modrm_seg;
5201 }
5202 } else if (ctxt->d & MemAbs)
5203 rc = decode_abs(ctxt, &ctxt->memop);
5204 if (rc != X86EMUL_CONTINUE)
5205 goto done;
5206
5207 if (!has_seg_override)
5208 ctxt->seg_override = VCPU_SREG_DS;
5209
5210 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5211
5212
5213
5214
5215
5216 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5217 if (rc != X86EMUL_CONTINUE)
5218 goto done;
5219
5220
5221
5222
5223
5224 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5225 if (rc != X86EMUL_CONTINUE)
5226 goto done;
5227
5228
5229 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5230
5231 if (ctxt->rip_relative && likely(ctxt->memopp))
5232 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5233 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5234
5235done:
5236 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5237}
5238
5239bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5240{
5241 return ctxt->d & PageTable;
5242}
5243
5244static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5245{
5246
5247
5248
5249
5250
5251
5252
5253 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5254 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5255 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5256 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5257 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5258 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5259 return true;
5260
5261 return false;
5262}
5263
5264static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5265{
5266 int rc;
5267
5268 ctxt->ops->get_fpu(ctxt);
5269 rc = asm_safe("fwait");
5270 ctxt->ops->put_fpu(ctxt);
5271
5272 if (unlikely(rc != X86EMUL_CONTINUE))
5273 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5274
5275 return X86EMUL_CONTINUE;
5276}
5277
5278static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5279 struct operand *op)
5280{
5281 if (op->type == OP_MM)
5282 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5283}
5284
5285static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5286{
5287 register void *__sp asm(_ASM_SP);
5288 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5289
5290 if (!(ctxt->d & ByteOp))
5291 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5292
5293 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
5294 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5295 [fastop]"+S"(fop), "+r"(__sp)
5296 : "c"(ctxt->src2.val));
5297
5298 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5299 if (!fop)
5300 return emulate_de(ctxt);
5301 return X86EMUL_CONTINUE;
5302}
5303
5304void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5305{
5306 memset(&ctxt->rip_relative, 0,
5307 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5308
5309 ctxt->io_read.pos = 0;
5310 ctxt->io_read.end = 0;
5311 ctxt->mem_read.end = 0;
5312}
5313
5314int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5315{
5316 const struct x86_emulate_ops *ops = ctxt->ops;
5317 int rc = X86EMUL_CONTINUE;
5318 int saved_dst_type = ctxt->dst.type;
5319
5320 ctxt->mem_read.pos = 0;
5321
5322
5323 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5324 rc = emulate_ud(ctxt);
5325 goto done;
5326 }
5327
5328 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5329 rc = emulate_ud(ctxt);
5330 goto done;
5331 }
5332
5333 if (unlikely(ctxt->d &
5334 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5335 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5336 (ctxt->d & Undefined)) {
5337 rc = emulate_ud(ctxt);
5338 goto done;
5339 }
5340
5341 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5342 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5343 rc = emulate_ud(ctxt);
5344 goto done;
5345 }
5346
5347 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5348 rc = emulate_nm(ctxt);
5349 goto done;
5350 }
5351
5352 if (ctxt->d & Mmx) {
5353 rc = flush_pending_x87_faults(ctxt);
5354 if (rc != X86EMUL_CONTINUE)
5355 goto done;
5356
5357
5358
5359
5360 fetch_possible_mmx_operand(ctxt, &ctxt->src);
5361 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5362 if (!(ctxt->d & Mov))
5363 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5364 }
5365
5366 if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5367 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5368 X86_ICPT_PRE_EXCEPT);
5369 if (rc != X86EMUL_CONTINUE)
5370 goto done;
5371 }
5372
5373
5374 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5375 rc = emulate_ud(ctxt);
5376 goto done;
5377 }
5378
5379
5380 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5381 if (ctxt->d & PrivUD)
5382 rc = emulate_ud(ctxt);
5383 else
5384 rc = emulate_gp(ctxt, 0);
5385 goto done;
5386 }
5387
5388
5389 if (ctxt->d & CheckPerm) {
5390 rc = ctxt->check_perm(ctxt);
5391 if (rc != X86EMUL_CONTINUE)
5392 goto done;
5393 }
5394
5395 if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5396 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5397 X86_ICPT_POST_EXCEPT);
5398 if (rc != X86EMUL_CONTINUE)
5399 goto done;
5400 }
5401
5402 if (ctxt->rep_prefix && (ctxt->d & String)) {
5403
5404 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5405 string_registers_quirk(ctxt);
5406 ctxt->eip = ctxt->_eip;
5407 ctxt->eflags &= ~X86_EFLAGS_RF;
5408 goto done;
5409 }
5410 }
5411 }
5412
5413 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5414 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5415 ctxt->src.valptr, ctxt->src.bytes);
5416 if (rc != X86EMUL_CONTINUE)
5417 goto done;
5418 ctxt->src.orig_val64 = ctxt->src.val64;
5419 }
5420
5421 if (ctxt->src2.type == OP_MEM) {
5422 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5423 &ctxt->src2.val, ctxt->src2.bytes);
5424 if (rc != X86EMUL_CONTINUE)
5425 goto done;
5426 }
5427
5428 if ((ctxt->d & DstMask) == ImplicitOps)
5429 goto special_insn;
5430
5431
5432 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5433
5434 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5435 &ctxt->dst.val, ctxt->dst.bytes);
5436 if (rc != X86EMUL_CONTINUE) {
5437 if (!(ctxt->d & NoWrite) &&
5438 rc == X86EMUL_PROPAGATE_FAULT &&
5439 ctxt->exception.vector == PF_VECTOR)
5440 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5441 goto done;
5442 }
5443 }
5444
5445 ctxt->dst.orig_val64 = ctxt->dst.val64;
5446
5447special_insn:
5448
5449 if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5450 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5451 X86_ICPT_POST_MEMACCESS);
5452 if (rc != X86EMUL_CONTINUE)
5453 goto done;
5454 }
5455
5456 if (ctxt->rep_prefix && (ctxt->d & String))
5457 ctxt->eflags |= X86_EFLAGS_RF;
5458 else
5459 ctxt->eflags &= ~X86_EFLAGS_RF;
5460
5461 if (ctxt->execute) {
5462 if (ctxt->d & Fastop) {
5463 void (*fop)(struct fastop *) = (void *)ctxt->execute;
5464 rc = fastop(ctxt, fop);
5465 if (rc != X86EMUL_CONTINUE)
5466 goto done;
5467 goto writeback;
5468 }
5469 rc = ctxt->execute(ctxt);
5470 if (rc != X86EMUL_CONTINUE)
5471 goto done;
5472 goto writeback;
5473 }
5474
5475 if (ctxt->opcode_len == 2)
5476 goto twobyte_insn;
5477 else if (ctxt->opcode_len == 3)
5478 goto threebyte_insn;
5479
5480 switch (ctxt->b) {
5481 case 0x70 ... 0x7f:
5482 if (test_cc(ctxt->b, ctxt->eflags))
5483 rc = jmp_rel(ctxt, ctxt->src.val);
5484 break;
5485 case 0x8d:
5486 ctxt->dst.val = ctxt->src.addr.mem.ea;
5487 break;
5488 case 0x90 ... 0x97:
5489 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5490 ctxt->dst.type = OP_NONE;
5491 else
5492 rc = em_xchg(ctxt);
5493 break;
5494 case 0x98:
5495 switch (ctxt->op_bytes) {
5496 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5497 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5498 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5499 }
5500 break;
5501 case 0xcc:
5502 rc = emulate_int(ctxt, 3);
5503 break;
5504 case 0xcd:
5505 rc = emulate_int(ctxt, ctxt->src.val);
5506 break;
5507 case 0xce:
5508 if (ctxt->eflags & X86_EFLAGS_OF)
5509 rc = emulate_int(ctxt, 4);
5510 break;
5511 case 0xe9:
5512 case 0xeb:
5513 rc = jmp_rel(ctxt, ctxt->src.val);
5514 ctxt->dst.type = OP_NONE;
5515 break;
5516 case 0xf4:
5517 ctxt->ops->halt(ctxt);
5518 break;
5519 case 0xf5:
5520
5521 ctxt->eflags ^= X86_EFLAGS_CF;
5522 break;
5523 case 0xf8:
5524 ctxt->eflags &= ~X86_EFLAGS_CF;
5525 break;
5526 case 0xf9:
5527 ctxt->eflags |= X86_EFLAGS_CF;
5528 break;
5529 case 0xfc:
5530 ctxt->eflags &= ~X86_EFLAGS_DF;
5531 break;
5532 case 0xfd:
5533 ctxt->eflags |= X86_EFLAGS_DF;
5534 break;
5535 default:
5536 goto cannot_emulate;
5537 }
5538
5539 if (rc != X86EMUL_CONTINUE)
5540 goto done;
5541
5542writeback:
5543 if (ctxt->d & SrcWrite) {
5544 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5545 rc = writeback(ctxt, &ctxt->src);
5546 if (rc != X86EMUL_CONTINUE)
5547 goto done;
5548 }
5549 if (!(ctxt->d & NoWrite)) {
5550 rc = writeback(ctxt, &ctxt->dst);
5551 if (rc != X86EMUL_CONTINUE)
5552 goto done;
5553 }
5554
5555
5556
5557
5558
5559 ctxt->dst.type = saved_dst_type;
5560
5561 if ((ctxt->d & SrcMask) == SrcSI)
5562 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5563
5564 if ((ctxt->d & DstMask) == DstDI)
5565 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5566
5567 if (ctxt->rep_prefix && (ctxt->d & String)) {
5568 unsigned int count;
5569 struct read_cache *r = &ctxt->io_read;
5570 if ((ctxt->d & SrcMask) == SrcSI)
5571 count = ctxt->src.count;
5572 else
5573 count = ctxt->dst.count;
5574 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5575
5576 if (!string_insn_completed(ctxt)) {
5577
5578
5579
5580
5581 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5582 (r->end == 0 || r->end != r->pos)) {
5583
5584
5585
5586
5587
5588 ctxt->mem_read.end = 0;
5589 writeback_registers(ctxt);
5590 return EMULATION_RESTART;
5591 }
5592 goto done;
5593 }
5594 ctxt->eflags &= ~X86_EFLAGS_RF;
5595 }
5596
5597 ctxt->eip = ctxt->_eip;
5598
5599done:
5600 if (rc == X86EMUL_PROPAGATE_FAULT) {
5601 WARN_ON(ctxt->exception.vector > 0x1f);
5602 ctxt->have_exception = true;
5603 }
5604 if (rc == X86EMUL_INTERCEPTED)
5605 return EMULATION_INTERCEPTED;
5606
5607 if (rc == X86EMUL_CONTINUE)
5608 writeback_registers(ctxt);
5609
5610 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5611
5612twobyte_insn:
5613 switch (ctxt->b) {
5614 case 0x09:
5615 (ctxt->ops->wbinvd)(ctxt);
5616 break;
5617 case 0x08:
5618 case 0x0d:
5619 case 0x18:
5620 case 0x1f:
5621 break;
5622 case 0x20:
5623 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5624 break;
5625 case 0x21:
5626 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5627 break;
5628 case 0x40 ... 0x4f:
5629 if (test_cc(ctxt->b, ctxt->eflags))
5630 ctxt->dst.val = ctxt->src.val;
5631 else if (ctxt->op_bytes != 4)
5632 ctxt->dst.type = OP_NONE;
5633 break;
5634 case 0x80 ... 0x8f:
5635 if (test_cc(ctxt->b, ctxt->eflags))
5636 rc = jmp_rel(ctxt, ctxt->src.val);
5637 break;
5638 case 0x90 ... 0x9f:
5639 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5640 break;
5641 case 0xb6 ... 0xb7:
5642 ctxt->dst.bytes = ctxt->op_bytes;
5643 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5644 : (u16) ctxt->src.val;
5645 break;
5646 case 0xbe ... 0xbf:
5647 ctxt->dst.bytes = ctxt->op_bytes;
5648 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5649 (s16) ctxt->src.val;
5650 break;
5651 default:
5652 goto cannot_emulate;
5653 }
5654
5655threebyte_insn:
5656
5657 if (rc != X86EMUL_CONTINUE)
5658 goto done;
5659
5660 goto writeback;
5661
5662cannot_emulate:
5663 return EMULATION_FAILED;
5664}
5665
5666void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5667{
5668 invalidate_registers(ctxt);
5669}
5670
5671void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5672{
5673 writeback_registers(ctxt);
5674}
5675
5676bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5677{
5678 if (ctxt->rep_prefix && (ctxt->d & String))
5679 return false;
5680
5681 if (ctxt->d & TwoMemOp)
5682 return false;
5683
5684 return true;
5685}
5686