1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kvm_host.h>
24#include "kvm_cache_regs.h"
25#include <asm/kvm_emulate.h>
26#include <linux/stringify.h>
27#include <asm/debugreg.h>
28
29#include "x86.h"
30#include "tss.h"
31
32
33
34
35#define OpNone 0ull
36#define OpImplicit 1ull
37#define OpReg 2ull
38#define OpMem 3ull
39#define OpAcc 4ull
40#define OpDI 5ull
41#define OpMem64 6ull
42#define OpImmUByte 7ull
43#define OpDX 8ull
44#define OpCL 9ull
45#define OpImmByte 10ull
46#define OpOne 11ull
47#define OpImm 12ull
48#define OpMem16 13ull
49#define OpMem32 14ull
50#define OpImmU 15ull
51#define OpSI 16ull
52#define OpImmFAddr 17ull
53#define OpMemFAddr 18ull
54#define OpImmU16 19ull
55#define OpES 20ull
56#define OpCS 21ull
57#define OpSS 22ull
58#define OpDS 23ull
59#define OpFS 24ull
60#define OpGS 25ull
61#define OpMem8 26ull
62#define OpImm64 27ull
63#define OpXLat 28ull
64#define OpAccLo 29ull
65#define OpAccHi 30ull
66
67#define OpBits 5
68#define OpMask ((1ull << OpBits) - 1)
69
70
71
72
73
74
75
76
77
78
79
80#define ByteOp (1<<0)
81
82#define DstShift 1
83#define ImplicitOps (OpImplicit << DstShift)
84#define DstReg (OpReg << DstShift)
85#define DstMem (OpMem << DstShift)
86#define DstAcc (OpAcc << DstShift)
87#define DstDI (OpDI << DstShift)
88#define DstMem64 (OpMem64 << DstShift)
89#define DstMem16 (OpMem16 << DstShift)
90#define DstImmUByte (OpImmUByte << DstShift)
91#define DstDX (OpDX << DstShift)
92#define DstAccLo (OpAccLo << DstShift)
93#define DstMask (OpMask << DstShift)
94
95#define SrcShift 6
96#define SrcNone (OpNone << SrcShift)
97#define SrcReg (OpReg << SrcShift)
98#define SrcMem (OpMem << SrcShift)
99#define SrcMem16 (OpMem16 << SrcShift)
100#define SrcMem32 (OpMem32 << SrcShift)
101#define SrcImm (OpImm << SrcShift)
102#define SrcImmByte (OpImmByte << SrcShift)
103#define SrcOne (OpOne << SrcShift)
104#define SrcImmUByte (OpImmUByte << SrcShift)
105#define SrcImmU (OpImmU << SrcShift)
106#define SrcSI (OpSI << SrcShift)
107#define SrcXLat (OpXLat << SrcShift)
108#define SrcImmFAddr (OpImmFAddr << SrcShift)
109#define SrcMemFAddr (OpMemFAddr << SrcShift)
110#define SrcAcc (OpAcc << SrcShift)
111#define SrcImmU16 (OpImmU16 << SrcShift)
112#define SrcImm64 (OpImm64 << SrcShift)
113#define SrcDX (OpDX << SrcShift)
114#define SrcMem8 (OpMem8 << SrcShift)
115#define SrcAccHi (OpAccHi << SrcShift)
116#define SrcMask (OpMask << SrcShift)
117#define BitOp (1<<11)
118#define MemAbs (1<<12)
119#define String (1<<13)
120#define Stack (1<<14)
121#define GroupMask (7<<15)
122#define Group (1<<15)
123#define GroupDual (2<<15)
124#define Prefix (3<<15)
125#define RMExt (4<<15)
126#define Escape (5<<15)
127#define InstrDual (6<<15)
128#define ModeDual (7<<15)
129#define Sse (1<<18)
130
131#define ModRM (1<<19)
132
133#define Mov (1<<20)
134
135#define Prot (1<<21)
136#define EmulateOnUD (1<<22)
137#define NoAccess (1<<23)
138#define Op3264 (1<<24)
139#define Undefined (1<<25)
140#define Lock (1<<26)
141#define Priv (1<<27)
142#define No64 (1<<28)
143#define PageTable (1 << 29)
144#define NotImpl (1 << 30)
145
146#define Src2Shift (31)
147#define Src2None (OpNone << Src2Shift)
148#define Src2Mem (OpMem << Src2Shift)
149#define Src2CL (OpCL << Src2Shift)
150#define Src2ImmByte (OpImmByte << Src2Shift)
151#define Src2One (OpOne << Src2Shift)
152#define Src2Imm (OpImm << Src2Shift)
153#define Src2ES (OpES << Src2Shift)
154#define Src2CS (OpCS << Src2Shift)
155#define Src2SS (OpSS << Src2Shift)
156#define Src2DS (OpDS << Src2Shift)
157#define Src2FS (OpFS << Src2Shift)
158#define Src2GS (OpGS << Src2Shift)
159#define Src2Mask (OpMask << Src2Shift)
160#define Mmx ((u64)1 << 40)
161#define AlignMask ((u64)7 << 41)
162#define Aligned ((u64)1 << 41)
163#define Unaligned ((u64)2 << 41)
164#define Avx ((u64)3 << 41)
165#define Aligned16 ((u64)4 << 41)
166#define Fastop ((u64)1 << 44)
167#define NoWrite ((u64)1 << 45)
168#define SrcWrite ((u64)1 << 46)
169#define NoMod ((u64)1 << 47)
170#define Intercept ((u64)1 << 48)
171#define CheckPerm ((u64)1 << 49)
172#define PrivUD ((u64)1 << 51)
173#define NearBranch ((u64)1 << 52)
174#define No16 ((u64)1 << 53)
175#define IncSP ((u64)1 << 54)
176
177#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
178
179#define X2(x...) x, x
180#define X3(x...) X2(x), x
181#define X4(x...) X2(x), X2(x)
182#define X5(x...) X4(x), x
183#define X6(x...) X4(x), X2(x)
184#define X7(x...) X4(x), X3(x)
185#define X8(x...) X4(x), X4(x)
186#define X16(x...) X8(x), X8(x)
187
188#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
189#define FASTOP_SIZE 8
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208struct fastop;
209
210struct opcode {
211 u64 flags : 56;
212 u64 intercept : 8;
213 union {
214 int (*execute)(struct x86_emulate_ctxt *ctxt);
215 const struct opcode *group;
216 const struct group_dual *gdual;
217 const struct gprefix *gprefix;
218 const struct escape *esc;
219 const struct instr_dual *idual;
220 const struct mode_dual *mdual;
221 void (*fastop)(struct fastop *fake);
222 } u;
223 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
224};
225
226struct group_dual {
227 struct opcode mod012[8];
228 struct opcode mod3[8];
229};
230
231struct gprefix {
232 struct opcode pfx_no;
233 struct opcode pfx_66;
234 struct opcode pfx_f2;
235 struct opcode pfx_f3;
236};
237
238struct escape {
239 struct opcode op[8];
240 struct opcode high[64];
241};
242
243struct instr_dual {
244 struct opcode mod012;
245 struct opcode mod3;
246};
247
248struct mode_dual {
249 struct opcode mode32;
250 struct opcode mode64;
251};
252
253#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
254
255enum x86_transfer_type {
256 X86_TRANSFER_NONE,
257 X86_TRANSFER_CALL_JMP,
258 X86_TRANSFER_RET,
259 X86_TRANSFER_TASK_SWITCH,
260};
261
262static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
263{
264 if (!(ctxt->regs_valid & (1 << nr))) {
265 ctxt->regs_valid |= 1 << nr;
266 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
267 }
268 return ctxt->_regs[nr];
269}
270
271static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
272{
273 ctxt->regs_valid |= 1 << nr;
274 ctxt->regs_dirty |= 1 << nr;
275 return &ctxt->_regs[nr];
276}
277
278static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
279{
280 reg_read(ctxt, nr);
281 return reg_write(ctxt, nr);
282}
283
284static void writeback_registers(struct x86_emulate_ctxt *ctxt)
285{
286 unsigned reg;
287
288 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
289 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
290}
291
292static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
293{
294 ctxt->regs_dirty = 0;
295 ctxt->regs_valid = 0;
296}
297
298
299
300
301
302#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
303 X86_EFLAGS_PF|X86_EFLAGS_CF)
304
305#ifdef CONFIG_X86_64
306#define ON64(x) x
307#else
308#define ON64(x)
309#endif
310
311static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
312
313#define FOP_FUNC(name) \
314 ".align " __stringify(FASTOP_SIZE) " \n\t" \
315 ".type " name ", @function \n\t" \
316 name ":\n\t"
317
318#define FOP_RET "ret \n\t"
319
320#define FOP_START(op) \
321 extern void em_##op(struct fastop *fake); \
322 asm(".pushsection .text, \"ax\" \n\t" \
323 ".global em_" #op " \n\t" \
324 FOP_FUNC("em_" #op)
325
326#define FOP_END \
327 ".popsection")
328
329#define FOPNOP() \
330 FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
331 FOP_RET
332
333#define FOP1E(op, dst) \
334 FOP_FUNC(#op "_" #dst) \
335 "10: " #op " %" #dst " \n\t" FOP_RET
336
337#define FOP1EEX(op, dst) \
338 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
339
340#define FASTOP1(op) \
341 FOP_START(op) \
342 FOP1E(op##b, al) \
343 FOP1E(op##w, ax) \
344 FOP1E(op##l, eax) \
345 ON64(FOP1E(op##q, rax)) \
346 FOP_END
347
348
349#define FASTOP1SRC2(op, name) \
350 FOP_START(name) \
351 FOP1E(op, cl) \
352 FOP1E(op, cx) \
353 FOP1E(op, ecx) \
354 ON64(FOP1E(op, rcx)) \
355 FOP_END
356
357
358#define FASTOP1SRC2EX(op, name) \
359 FOP_START(name) \
360 FOP1EEX(op, cl) \
361 FOP1EEX(op, cx) \
362 FOP1EEX(op, ecx) \
363 ON64(FOP1EEX(op, rcx)) \
364 FOP_END
365
366#define FOP2E(op, dst, src) \
367 FOP_FUNC(#op "_" #dst "_" #src) \
368 #op " %" #src ", %" #dst " \n\t" FOP_RET
369
370#define FASTOP2(op) \
371 FOP_START(op) \
372 FOP2E(op##b, al, dl) \
373 FOP2E(op##w, ax, dx) \
374 FOP2E(op##l, eax, edx) \
375 ON64(FOP2E(op##q, rax, rdx)) \
376 FOP_END
377
378
379#define FASTOP2W(op) \
380 FOP_START(op) \
381 FOPNOP() \
382 FOP2E(op##w, ax, dx) \
383 FOP2E(op##l, eax, edx) \
384 ON64(FOP2E(op##q, rax, rdx)) \
385 FOP_END
386
387
388#define FASTOP2CL(op) \
389 FOP_START(op) \
390 FOP2E(op##b, al, cl) \
391 FOP2E(op##w, ax, cl) \
392 FOP2E(op##l, eax, cl) \
393 ON64(FOP2E(op##q, rax, cl)) \
394 FOP_END
395
396
397#define FASTOP2R(op, name) \
398 FOP_START(name) \
399 FOP2E(op##b, dl, al) \
400 FOP2E(op##w, dx, ax) \
401 FOP2E(op##l, edx, eax) \
402 ON64(FOP2E(op##q, rdx, rax)) \
403 FOP_END
404
405#define FOP3E(op, dst, src, src2) \
406 FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
407 #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
408
409
410#define FASTOP3WCL(op) \
411 FOP_START(op) \
412 FOPNOP() \
413 FOP3E(op##w, ax, dx, cl) \
414 FOP3E(op##l, eax, edx, cl) \
415 ON64(FOP3E(op##q, rax, rdx, cl)) \
416 FOP_END
417
418
419#define FOP_SETCC(op) \
420 ".align 4 \n\t" \
421 ".type " #op ", @function \n\t" \
422 #op ": \n\t" \
423 #op " %al \n\t" \
424 FOP_RET
425
426asm(".global kvm_fastop_exception \n"
427 "kvm_fastop_exception: xor %esi, %esi; ret");
428
429FOP_START(setcc)
430FOP_SETCC(seto)
431FOP_SETCC(setno)
432FOP_SETCC(setc)
433FOP_SETCC(setnc)
434FOP_SETCC(setz)
435FOP_SETCC(setnz)
436FOP_SETCC(setbe)
437FOP_SETCC(setnbe)
438FOP_SETCC(sets)
439FOP_SETCC(setns)
440FOP_SETCC(setp)
441FOP_SETCC(setnp)
442FOP_SETCC(setl)
443FOP_SETCC(setnl)
444FOP_SETCC(setle)
445FOP_SETCC(setnle)
446FOP_END;
447
448FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
449FOP_END;
450
451
452
453
454
455#define asm_safe(insn, inoutclob...) \
456({ \
457 int _fault = 0; \
458 \
459 asm volatile("1:" insn "\n" \
460 "2:\n" \
461 ".pushsection .fixup, \"ax\"\n" \
462 "3: movl $1, %[_fault]\n" \
463 " jmp 2b\n" \
464 ".popsection\n" \
465 _ASM_EXTABLE(1b, 3b) \
466 : [_fault] "+qm"(_fault) inoutclob ); \
467 \
468 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
469})
470
471static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
472 enum x86_intercept intercept,
473 enum x86_intercept_stage stage)
474{
475 struct x86_instruction_info info = {
476 .intercept = intercept,
477 .rep_prefix = ctxt->rep_prefix,
478 .modrm_mod = ctxt->modrm_mod,
479 .modrm_reg = ctxt->modrm_reg,
480 .modrm_rm = ctxt->modrm_rm,
481 .src_val = ctxt->src.val64,
482 .dst_val = ctxt->dst.val64,
483 .src_bytes = ctxt->src.bytes,
484 .dst_bytes = ctxt->dst.bytes,
485 .ad_bytes = ctxt->ad_bytes,
486 .next_rip = ctxt->eip,
487 };
488
489 return ctxt->ops->intercept(ctxt, &info, stage);
490}
491
492static void assign_masked(ulong *dest, ulong src, ulong mask)
493{
494 *dest = (*dest & ~mask) | (src & mask);
495}
496
497static void assign_register(unsigned long *reg, u64 val, int bytes)
498{
499
500 switch (bytes) {
501 case 1:
502 *(u8 *)reg = (u8)val;
503 break;
504 case 2:
505 *(u16 *)reg = (u16)val;
506 break;
507 case 4:
508 *reg = (u32)val;
509 break;
510 case 8:
511 *reg = val;
512 break;
513 }
514}
515
516static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
517{
518 return (1UL << (ctxt->ad_bytes << 3)) - 1;
519}
520
521static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
522{
523 u16 sel;
524 struct desc_struct ss;
525
526 if (ctxt->mode == X86EMUL_MODE_PROT64)
527 return ~0UL;
528 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
529 return ~0U >> ((ss.d ^ 1) * 16);
530}
531
532static int stack_size(struct x86_emulate_ctxt *ctxt)
533{
534 return (__fls(stack_mask(ctxt)) + 1) >> 3;
535}
536
537
538static inline unsigned long
539address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
540{
541 if (ctxt->ad_bytes == sizeof(unsigned long))
542 return reg;
543 else
544 return reg & ad_mask(ctxt);
545}
546
547static inline unsigned long
548register_address(struct x86_emulate_ctxt *ctxt, int reg)
549{
550 return address_mask(ctxt, reg_read(ctxt, reg));
551}
552
553static void masked_increment(ulong *reg, ulong mask, int inc)
554{
555 assign_masked(reg, *reg + inc, mask);
556}
557
558static inline void
559register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
560{
561 ulong *preg = reg_rmw(ctxt, reg);
562
563 assign_register(preg, *preg + inc, ctxt->ad_bytes);
564}
565
566static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
567{
568 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
569}
570
571static u32 desc_limit_scaled(struct desc_struct *desc)
572{
573 u32 limit = get_desc_limit(desc);
574
575 return desc->g ? (limit << 12) | 0xfff : limit;
576}
577
578static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
579{
580 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
581 return 0;
582
583 return ctxt->ops->get_cached_segment_base(ctxt, seg);
584}
585
586static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
587 u32 error, bool valid)
588{
589 WARN_ON(vec > 0x1f);
590 ctxt->exception.vector = vec;
591 ctxt->exception.error_code = error;
592 ctxt->exception.error_code_valid = valid;
593 return X86EMUL_PROPAGATE_FAULT;
594}
595
596static int emulate_db(struct x86_emulate_ctxt *ctxt)
597{
598 return emulate_exception(ctxt, DB_VECTOR, 0, false);
599}
600
601static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
602{
603 return emulate_exception(ctxt, GP_VECTOR, err, true);
604}
605
606static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
607{
608 return emulate_exception(ctxt, SS_VECTOR, err, true);
609}
610
611static int emulate_ud(struct x86_emulate_ctxt *ctxt)
612{
613 return emulate_exception(ctxt, UD_VECTOR, 0, false);
614}
615
616static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
617{
618 return emulate_exception(ctxt, TS_VECTOR, err, true);
619}
620
621static int emulate_de(struct x86_emulate_ctxt *ctxt)
622{
623 return emulate_exception(ctxt, DE_VECTOR, 0, false);
624}
625
626static int emulate_nm(struct x86_emulate_ctxt *ctxt)
627{
628 return emulate_exception(ctxt, NM_VECTOR, 0, false);
629}
630
631static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
632{
633 u16 selector;
634 struct desc_struct desc;
635
636 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
637 return selector;
638}
639
640static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
641 unsigned seg)
642{
643 u16 dummy;
644 u32 base3;
645 struct desc_struct desc;
646
647 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
648 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
649}
650
651
652
653
654
655
656
657
658
659
660static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
661{
662 u64 alignment = ctxt->d & AlignMask;
663
664 if (likely(size < 16))
665 return 1;
666
667 switch (alignment) {
668 case Unaligned:
669 case Avx:
670 return 1;
671 case Aligned16:
672 return 16;
673 case Aligned:
674 default:
675 return size;
676 }
677}
678
679static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
680 struct segmented_address addr,
681 unsigned *max_size, unsigned size,
682 bool write, bool fetch,
683 enum x86emul_mode mode, ulong *linear)
684{
685 struct desc_struct desc;
686 bool usable;
687 ulong la;
688 u32 lim;
689 u16 sel;
690
691 la = seg_base(ctxt, addr.seg) + addr.ea;
692 *max_size = 0;
693 switch (mode) {
694 case X86EMUL_MODE_PROT64:
695 *linear = la;
696 if (is_noncanonical_address(la))
697 goto bad;
698
699 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
700 if (size > *max_size)
701 goto bad;
702 break;
703 default:
704 *linear = la = (u32)la;
705 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
706 addr.seg);
707 if (!usable)
708 goto bad;
709
710 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
711 || !(desc.type & 2)) && write)
712 goto bad;
713
714 if (!fetch && (desc.type & 8) && !(desc.type & 2))
715 goto bad;
716 lim = desc_limit_scaled(&desc);
717 if (!(desc.type & 8) && (desc.type & 4)) {
718
719 if (addr.ea <= lim)
720 goto bad;
721 lim = desc.d ? 0xffffffff : 0xffff;
722 }
723 if (addr.ea > lim)
724 goto bad;
725 if (lim == 0xffffffff)
726 *max_size = ~0u;
727 else {
728 *max_size = (u64)lim + 1 - addr.ea;
729 if (size > *max_size)
730 goto bad;
731 }
732 break;
733 }
734 if (la & (insn_alignment(ctxt, size) - 1))
735 return emulate_gp(ctxt, 0);
736 return X86EMUL_CONTINUE;
737bad:
738 if (addr.seg == VCPU_SREG_SS)
739 return emulate_ss(ctxt, 0);
740 else
741 return emulate_gp(ctxt, 0);
742}
743
744static int linearize(struct x86_emulate_ctxt *ctxt,
745 struct segmented_address addr,
746 unsigned size, bool write,
747 ulong *linear)
748{
749 unsigned max_size;
750 return __linearize(ctxt, addr, &max_size, size, write, false,
751 ctxt->mode, linear);
752}
753
754static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
755 enum x86emul_mode mode)
756{
757 ulong linear;
758 int rc;
759 unsigned max_size;
760 struct segmented_address addr = { .seg = VCPU_SREG_CS,
761 .ea = dst };
762
763 if (ctxt->op_bytes != sizeof(unsigned long))
764 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
765 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
766 if (rc == X86EMUL_CONTINUE)
767 ctxt->_eip = addr.ea;
768 return rc;
769}
770
771static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
772{
773 return assign_eip(ctxt, dst, ctxt->mode);
774}
775
776static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
777 const struct desc_struct *cs_desc)
778{
779 enum x86emul_mode mode = ctxt->mode;
780 int rc;
781
782#ifdef CONFIG_X86_64
783 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
784 if (cs_desc->l) {
785 u64 efer = 0;
786
787 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
788 if (efer & EFER_LMA)
789 mode = X86EMUL_MODE_PROT64;
790 } else
791 mode = X86EMUL_MODE_PROT32;
792 }
793#endif
794 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
795 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
796 rc = assign_eip(ctxt, dst, mode);
797 if (rc == X86EMUL_CONTINUE)
798 ctxt->mode = mode;
799 return rc;
800}
801
802static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
803{
804 return assign_eip_near(ctxt, ctxt->_eip + rel);
805}
806
807static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
808 struct segmented_address addr,
809 void *data,
810 unsigned size)
811{
812 int rc;
813 ulong linear;
814
815 rc = linearize(ctxt, addr, size, false, &linear);
816 if (rc != X86EMUL_CONTINUE)
817 return rc;
818 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
819}
820
821static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
822 struct segmented_address addr,
823 void *data,
824 unsigned int size)
825{
826 int rc;
827 ulong linear;
828
829 rc = linearize(ctxt, addr, size, true, &linear);
830 if (rc != X86EMUL_CONTINUE)
831 return rc;
832 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
833}
834
835
836
837
838
839static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
840{
841 int rc;
842 unsigned size, max_size;
843 unsigned long linear;
844 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
845 struct segmented_address addr = { .seg = VCPU_SREG_CS,
846 .ea = ctxt->eip + cur_size };
847
848
849
850
851
852
853
854
855
856
857
858 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
859 &linear);
860 if (unlikely(rc != X86EMUL_CONTINUE))
861 return rc;
862
863 size = min_t(unsigned, 15UL ^ cur_size, max_size);
864 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
865
866
867
868
869
870
871
872 if (unlikely(size < op_size))
873 return emulate_gp(ctxt, 0);
874
875 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
876 size, &ctxt->exception);
877 if (unlikely(rc != X86EMUL_CONTINUE))
878 return rc;
879 ctxt->fetch.end += size;
880 return X86EMUL_CONTINUE;
881}
882
883static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
884 unsigned size)
885{
886 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
887
888 if (unlikely(done_size < size))
889 return __do_insn_fetch_bytes(ctxt, size - done_size);
890 else
891 return X86EMUL_CONTINUE;
892}
893
894
895#define insn_fetch(_type, _ctxt) \
896({ _type _x; \
897 \
898 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
899 if (rc != X86EMUL_CONTINUE) \
900 goto done; \
901 ctxt->_eip += sizeof(_type); \
902 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
903 ctxt->fetch.ptr += sizeof(_type); \
904 _x; \
905})
906
907#define insn_fetch_arr(_arr, _size, _ctxt) \
908({ \
909 rc = do_insn_fetch_bytes(_ctxt, _size); \
910 if (rc != X86EMUL_CONTINUE) \
911 goto done; \
912 ctxt->_eip += (_size); \
913 memcpy(_arr, ctxt->fetch.ptr, _size); \
914 ctxt->fetch.ptr += (_size); \
915})
916
917
918
919
920
921
922static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
923 int byteop)
924{
925 void *p;
926 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
927
928 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
929 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
930 else
931 p = reg_rmw(ctxt, modrm_reg);
932 return p;
933}
934
935static int read_descriptor(struct x86_emulate_ctxt *ctxt,
936 struct segmented_address addr,
937 u16 *size, unsigned long *address, int op_bytes)
938{
939 int rc;
940
941 if (op_bytes == 2)
942 op_bytes = 3;
943 *address = 0;
944 rc = segmented_read_std(ctxt, addr, size, 2);
945 if (rc != X86EMUL_CONTINUE)
946 return rc;
947 addr.ea += 2;
948 rc = segmented_read_std(ctxt, addr, address, op_bytes);
949 return rc;
950}
951
952FASTOP2(add);
953FASTOP2(or);
954FASTOP2(adc);
955FASTOP2(sbb);
956FASTOP2(and);
957FASTOP2(sub);
958FASTOP2(xor);
959FASTOP2(cmp);
960FASTOP2(test);
961
962FASTOP1SRC2(mul, mul_ex);
963FASTOP1SRC2(imul, imul_ex);
964FASTOP1SRC2EX(div, div_ex);
965FASTOP1SRC2EX(idiv, idiv_ex);
966
967FASTOP3WCL(shld);
968FASTOP3WCL(shrd);
969
970FASTOP2W(imul);
971
972FASTOP1(not);
973FASTOP1(neg);
974FASTOP1(inc);
975FASTOP1(dec);
976
977FASTOP2CL(rol);
978FASTOP2CL(ror);
979FASTOP2CL(rcl);
980FASTOP2CL(rcr);
981FASTOP2CL(shl);
982FASTOP2CL(shr);
983FASTOP2CL(sar);
984
985FASTOP2W(bsf);
986FASTOP2W(bsr);
987FASTOP2W(bt);
988FASTOP2W(bts);
989FASTOP2W(btr);
990FASTOP2W(btc);
991
992FASTOP2(xadd);
993
994FASTOP2R(cmp, cmp_r);
995
996static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
997{
998
999 if (ctxt->src.val == 0)
1000 ctxt->dst.type = OP_NONE;
1001 return fastop(ctxt, em_bsf);
1002}
1003
1004static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1005{
1006
1007 if (ctxt->src.val == 0)
1008 ctxt->dst.type = OP_NONE;
1009 return fastop(ctxt, em_bsr);
1010}
1011
1012static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1013{
1014 u8 rc;
1015 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1016
1017 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1018 asm("push %[flags]; popf; call *%[fastop]"
1019 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
1020 return rc;
1021}
1022
1023static void fetch_register_operand(struct operand *op)
1024{
1025 switch (op->bytes) {
1026 case 1:
1027 op->val = *(u8 *)op->addr.reg;
1028 break;
1029 case 2:
1030 op->val = *(u16 *)op->addr.reg;
1031 break;
1032 case 4:
1033 op->val = *(u32 *)op->addr.reg;
1034 break;
1035 case 8:
1036 op->val = *(u64 *)op->addr.reg;
1037 break;
1038 }
1039}
1040
1041static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1042{
1043 ctxt->ops->get_fpu(ctxt);
1044 switch (reg) {
1045 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1046 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1047 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1048 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1049 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1050 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1051 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1052 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1053#ifdef CONFIG_X86_64
1054 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1055 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1056 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1057 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1058 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1059 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1060 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1061 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1062#endif
1063 default: BUG();
1064 }
1065 ctxt->ops->put_fpu(ctxt);
1066}
1067
1068static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1069 int reg)
1070{
1071 ctxt->ops->get_fpu(ctxt);
1072 switch (reg) {
1073 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1074 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1075 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1076 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1077 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1078 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1079 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1080 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1081#ifdef CONFIG_X86_64
1082 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1083 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1084 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1085 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1086 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1087 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1088 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1089 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1090#endif
1091 default: BUG();
1092 }
1093 ctxt->ops->put_fpu(ctxt);
1094}
1095
1096static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1097{
1098 ctxt->ops->get_fpu(ctxt);
1099 switch (reg) {
1100 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1101 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1102 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1103 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1104 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1105 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1106 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1107 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1108 default: BUG();
1109 }
1110 ctxt->ops->put_fpu(ctxt);
1111}
1112
1113static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1114{
1115 ctxt->ops->get_fpu(ctxt);
1116 switch (reg) {
1117 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1118 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1119 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1120 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1121 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1122 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1123 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1124 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1125 default: BUG();
1126 }
1127 ctxt->ops->put_fpu(ctxt);
1128}
1129
1130static int em_fninit(struct x86_emulate_ctxt *ctxt)
1131{
1132 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1133 return emulate_nm(ctxt);
1134
1135 ctxt->ops->get_fpu(ctxt);
1136 asm volatile("fninit");
1137 ctxt->ops->put_fpu(ctxt);
1138 return X86EMUL_CONTINUE;
1139}
1140
1141static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1142{
1143 u16 fcw;
1144
1145 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1146 return emulate_nm(ctxt);
1147
1148 ctxt->ops->get_fpu(ctxt);
1149 asm volatile("fnstcw %0": "+m"(fcw));
1150 ctxt->ops->put_fpu(ctxt);
1151
1152 ctxt->dst.val = fcw;
1153
1154 return X86EMUL_CONTINUE;
1155}
1156
1157static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1158{
1159 u16 fsw;
1160
1161 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1162 return emulate_nm(ctxt);
1163
1164 ctxt->ops->get_fpu(ctxt);
1165 asm volatile("fnstsw %0": "+m"(fsw));
1166 ctxt->ops->put_fpu(ctxt);
1167
1168 ctxt->dst.val = fsw;
1169
1170 return X86EMUL_CONTINUE;
1171}
1172
1173static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1174 struct operand *op)
1175{
1176 unsigned reg = ctxt->modrm_reg;
1177
1178 if (!(ctxt->d & ModRM))
1179 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1180
1181 if (ctxt->d & Sse) {
1182 op->type = OP_XMM;
1183 op->bytes = 16;
1184 op->addr.xmm = reg;
1185 read_sse_reg(ctxt, &op->vec_val, reg);
1186 return;
1187 }
1188 if (ctxt->d & Mmx) {
1189 reg &= 7;
1190 op->type = OP_MM;
1191 op->bytes = 8;
1192 op->addr.mm = reg;
1193 return;
1194 }
1195
1196 op->type = OP_REG;
1197 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1198 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1199
1200 fetch_register_operand(op);
1201 op->orig_val = op->val;
1202}
1203
1204static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1205{
1206 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1207 ctxt->modrm_seg = VCPU_SREG_SS;
1208}
1209
1210static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1211 struct operand *op)
1212{
1213 u8 sib;
1214 int index_reg, base_reg, scale;
1215 int rc = X86EMUL_CONTINUE;
1216 ulong modrm_ea = 0;
1217
1218 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8);
1219 index_reg = (ctxt->rex_prefix << 2) & 8;
1220 base_reg = (ctxt->rex_prefix << 3) & 8;
1221
1222 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1223 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1224 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1225 ctxt->modrm_seg = VCPU_SREG_DS;
1226
1227 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1228 op->type = OP_REG;
1229 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1230 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1231 ctxt->d & ByteOp);
1232 if (ctxt->d & Sse) {
1233 op->type = OP_XMM;
1234 op->bytes = 16;
1235 op->addr.xmm = ctxt->modrm_rm;
1236 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1237 return rc;
1238 }
1239 if (ctxt->d & Mmx) {
1240 op->type = OP_MM;
1241 op->bytes = 8;
1242 op->addr.mm = ctxt->modrm_rm & 7;
1243 return rc;
1244 }
1245 fetch_register_operand(op);
1246 return rc;
1247 }
1248
1249 op->type = OP_MEM;
1250
1251 if (ctxt->ad_bytes == 2) {
1252 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1253 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1254 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1255 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1256
1257
1258 switch (ctxt->modrm_mod) {
1259 case 0:
1260 if (ctxt->modrm_rm == 6)
1261 modrm_ea += insn_fetch(u16, ctxt);
1262 break;
1263 case 1:
1264 modrm_ea += insn_fetch(s8, ctxt);
1265 break;
1266 case 2:
1267 modrm_ea += insn_fetch(u16, ctxt);
1268 break;
1269 }
1270 switch (ctxt->modrm_rm) {
1271 case 0:
1272 modrm_ea += bx + si;
1273 break;
1274 case 1:
1275 modrm_ea += bx + di;
1276 break;
1277 case 2:
1278 modrm_ea += bp + si;
1279 break;
1280 case 3:
1281 modrm_ea += bp + di;
1282 break;
1283 case 4:
1284 modrm_ea += si;
1285 break;
1286 case 5:
1287 modrm_ea += di;
1288 break;
1289 case 6:
1290 if (ctxt->modrm_mod != 0)
1291 modrm_ea += bp;
1292 break;
1293 case 7:
1294 modrm_ea += bx;
1295 break;
1296 }
1297 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1298 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1299 ctxt->modrm_seg = VCPU_SREG_SS;
1300 modrm_ea = (u16)modrm_ea;
1301 } else {
1302
1303 if ((ctxt->modrm_rm & 7) == 4) {
1304 sib = insn_fetch(u8, ctxt);
1305 index_reg |= (sib >> 3) & 7;
1306 base_reg |= sib & 7;
1307 scale = sib >> 6;
1308
1309 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1310 modrm_ea += insn_fetch(s32, ctxt);
1311 else {
1312 modrm_ea += reg_read(ctxt, base_reg);
1313 adjust_modrm_seg(ctxt, base_reg);
1314
1315 if ((ctxt->d & IncSP) &&
1316 base_reg == VCPU_REGS_RSP)
1317 modrm_ea += ctxt->op_bytes;
1318 }
1319 if (index_reg != 4)
1320 modrm_ea += reg_read(ctxt, index_reg) << scale;
1321 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1322 modrm_ea += insn_fetch(s32, ctxt);
1323 if (ctxt->mode == X86EMUL_MODE_PROT64)
1324 ctxt->rip_relative = 1;
1325 } else {
1326 base_reg = ctxt->modrm_rm;
1327 modrm_ea += reg_read(ctxt, base_reg);
1328 adjust_modrm_seg(ctxt, base_reg);
1329 }
1330 switch (ctxt->modrm_mod) {
1331 case 1:
1332 modrm_ea += insn_fetch(s8, ctxt);
1333 break;
1334 case 2:
1335 modrm_ea += insn_fetch(s32, ctxt);
1336 break;
1337 }
1338 }
1339 op->addr.mem.ea = modrm_ea;
1340 if (ctxt->ad_bytes != 8)
1341 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1342
1343done:
1344 return rc;
1345}
1346
1347static int decode_abs(struct x86_emulate_ctxt *ctxt,
1348 struct operand *op)
1349{
1350 int rc = X86EMUL_CONTINUE;
1351
1352 op->type = OP_MEM;
1353 switch (ctxt->ad_bytes) {
1354 case 2:
1355 op->addr.mem.ea = insn_fetch(u16, ctxt);
1356 break;
1357 case 4:
1358 op->addr.mem.ea = insn_fetch(u32, ctxt);
1359 break;
1360 case 8:
1361 op->addr.mem.ea = insn_fetch(u64, ctxt);
1362 break;
1363 }
1364done:
1365 return rc;
1366}
1367
1368static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1369{
1370 long sv = 0, mask;
1371
1372 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1373 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1374
1375 if (ctxt->src.bytes == 2)
1376 sv = (s16)ctxt->src.val & (s16)mask;
1377 else if (ctxt->src.bytes == 4)
1378 sv = (s32)ctxt->src.val & (s32)mask;
1379 else
1380 sv = (s64)ctxt->src.val & (s64)mask;
1381
1382 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1383 ctxt->dst.addr.mem.ea + (sv >> 3));
1384 }
1385
1386
1387 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1388}
1389
1390static int read_emulated(struct x86_emulate_ctxt *ctxt,
1391 unsigned long addr, void *dest, unsigned size)
1392{
1393 int rc;
1394 struct read_cache *mc = &ctxt->mem_read;
1395
1396 if (mc->pos < mc->end)
1397 goto read_cached;
1398
1399 WARN_ON((mc->end + size) >= sizeof(mc->data));
1400
1401 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1402 &ctxt->exception);
1403 if (rc != X86EMUL_CONTINUE)
1404 return rc;
1405
1406 mc->end += size;
1407
1408read_cached:
1409 memcpy(dest, mc->data + mc->pos, size);
1410 mc->pos += size;
1411 return X86EMUL_CONTINUE;
1412}
1413
1414static int segmented_read(struct x86_emulate_ctxt *ctxt,
1415 struct segmented_address addr,
1416 void *data,
1417 unsigned size)
1418{
1419 int rc;
1420 ulong linear;
1421
1422 rc = linearize(ctxt, addr, size, false, &linear);
1423 if (rc != X86EMUL_CONTINUE)
1424 return rc;
1425 return read_emulated(ctxt, linear, data, size);
1426}
1427
1428static int segmented_write(struct x86_emulate_ctxt *ctxt,
1429 struct segmented_address addr,
1430 const void *data,
1431 unsigned size)
1432{
1433 int rc;
1434 ulong linear;
1435
1436 rc = linearize(ctxt, addr, size, true, &linear);
1437 if (rc != X86EMUL_CONTINUE)
1438 return rc;
1439 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1440 &ctxt->exception);
1441}
1442
1443static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1444 struct segmented_address addr,
1445 const void *orig_data, const void *data,
1446 unsigned size)
1447{
1448 int rc;
1449 ulong linear;
1450
1451 rc = linearize(ctxt, addr, size, true, &linear);
1452 if (rc != X86EMUL_CONTINUE)
1453 return rc;
1454 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1455 size, &ctxt->exception);
1456}
1457
1458static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1459 unsigned int size, unsigned short port,
1460 void *dest)
1461{
1462 struct read_cache *rc = &ctxt->io_read;
1463
1464 if (rc->pos == rc->end) {
1465 unsigned int in_page, n;
1466 unsigned int count = ctxt->rep_prefix ?
1467 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1468 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1469 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1470 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1471 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1472 if (n == 0)
1473 n = 1;
1474 rc->pos = rc->end = 0;
1475 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1476 return 0;
1477 rc->end = n * size;
1478 }
1479
1480 if (ctxt->rep_prefix && (ctxt->d & String) &&
1481 !(ctxt->eflags & X86_EFLAGS_DF)) {
1482 ctxt->dst.data = rc->data + rc->pos;
1483 ctxt->dst.type = OP_MEM_STR;
1484 ctxt->dst.count = (rc->end - rc->pos) / size;
1485 rc->pos = rc->end;
1486 } else {
1487 memcpy(dest, rc->data + rc->pos, size);
1488 rc->pos += size;
1489 }
1490 return 1;
1491}
1492
1493static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1494 u16 index, struct desc_struct *desc)
1495{
1496 struct desc_ptr dt;
1497 ulong addr;
1498
1499 ctxt->ops->get_idt(ctxt, &dt);
1500
1501 if (dt.size < index * 8 + 7)
1502 return emulate_gp(ctxt, index << 3 | 0x2);
1503
1504 addr = dt.address + index * 8;
1505 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1506 &ctxt->exception);
1507}
1508
1509static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1510 u16 selector, struct desc_ptr *dt)
1511{
1512 const struct x86_emulate_ops *ops = ctxt->ops;
1513 u32 base3 = 0;
1514
1515 if (selector & 1 << 2) {
1516 struct desc_struct desc;
1517 u16 sel;
1518
1519 memset (dt, 0, sizeof *dt);
1520 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1521 VCPU_SREG_LDTR))
1522 return;
1523
1524 dt->size = desc_limit_scaled(&desc);
1525 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1526 } else
1527 ops->get_gdt(ctxt, dt);
1528}
1529
1530static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1531 u16 selector, ulong *desc_addr_p)
1532{
1533 struct desc_ptr dt;
1534 u16 index = selector >> 3;
1535 ulong addr;
1536
1537 get_descriptor_table_ptr(ctxt, selector, &dt);
1538
1539 if (dt.size < index * 8 + 7)
1540 return emulate_gp(ctxt, selector & 0xfffc);
1541
1542 addr = dt.address + index * 8;
1543
1544#ifdef CONFIG_X86_64
1545 if (addr >> 32 != 0) {
1546 u64 efer = 0;
1547
1548 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1549 if (!(efer & EFER_LMA))
1550 addr &= (u32)-1;
1551 }
1552#endif
1553
1554 *desc_addr_p = addr;
1555 return X86EMUL_CONTINUE;
1556}
1557
1558
1559static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1560 u16 selector, struct desc_struct *desc,
1561 ulong *desc_addr_p)
1562{
1563 int rc;
1564
1565 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1566 if (rc != X86EMUL_CONTINUE)
1567 return rc;
1568
1569 return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1570 &ctxt->exception);
1571}
1572
1573
1574static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1575 u16 selector, struct desc_struct *desc)
1576{
1577 int rc;
1578 ulong addr;
1579
1580 rc = get_descriptor_ptr(ctxt, selector, &addr);
1581 if (rc != X86EMUL_CONTINUE)
1582 return rc;
1583
1584 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1585 &ctxt->exception);
1586}
1587
1588static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1589 u16 selector, int seg, u8 cpl,
1590 enum x86_transfer_type transfer,
1591 struct desc_struct *desc)
1592{
1593 struct desc_struct seg_desc, old_desc;
1594 u8 dpl, rpl;
1595 unsigned err_vec = GP_VECTOR;
1596 u32 err_code = 0;
1597 bool null_selector = !(selector & ~0x3);
1598 ulong desc_addr;
1599 int ret;
1600 u16 dummy;
1601 u32 base3 = 0;
1602
1603 memset(&seg_desc, 0, sizeof seg_desc);
1604
1605 if (ctxt->mode == X86EMUL_MODE_REAL) {
1606
1607
1608 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1609 set_desc_base(&seg_desc, selector << 4);
1610 goto load;
1611 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1612
1613 set_desc_base(&seg_desc, selector << 4);
1614 set_desc_limit(&seg_desc, 0xffff);
1615 seg_desc.type = 3;
1616 seg_desc.p = 1;
1617 seg_desc.s = 1;
1618 seg_desc.dpl = 3;
1619 goto load;
1620 }
1621
1622 rpl = selector & 3;
1623
1624
1625 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1626 goto exception;
1627
1628
1629 if (null_selector) {
1630 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1631 goto exception;
1632
1633 if (seg == VCPU_SREG_SS) {
1634 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1635 goto exception;
1636
1637
1638
1639
1640
1641 seg_desc.type = 3;
1642 seg_desc.p = 1;
1643 seg_desc.s = 1;
1644 seg_desc.dpl = cpl;
1645 seg_desc.d = 1;
1646 seg_desc.g = 1;
1647 }
1648
1649
1650 goto load;
1651 }
1652
1653 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1654 if (ret != X86EMUL_CONTINUE)
1655 return ret;
1656
1657 err_code = selector & 0xfffc;
1658 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1659 GP_VECTOR;
1660
1661
1662 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1663 if (transfer == X86_TRANSFER_CALL_JMP)
1664 return X86EMUL_UNHANDLEABLE;
1665 goto exception;
1666 }
1667
1668 if (!seg_desc.p) {
1669 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1670 goto exception;
1671 }
1672
1673 dpl = seg_desc.dpl;
1674
1675 switch (seg) {
1676 case VCPU_SREG_SS:
1677
1678
1679
1680
1681 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1682 goto exception;
1683 break;
1684 case VCPU_SREG_CS:
1685 if (!(seg_desc.type & 8))
1686 goto exception;
1687
1688 if (seg_desc.type & 4) {
1689
1690 if (dpl > cpl)
1691 goto exception;
1692 } else {
1693
1694 if (rpl > cpl || dpl != cpl)
1695 goto exception;
1696 }
1697
1698 if (seg_desc.d && seg_desc.l) {
1699 u64 efer = 0;
1700
1701 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1702 if (efer & EFER_LMA)
1703 goto exception;
1704 }
1705
1706
1707 selector = (selector & 0xfffc) | cpl;
1708 break;
1709 case VCPU_SREG_TR:
1710 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1711 goto exception;
1712 old_desc = seg_desc;
1713 seg_desc.type |= 2;
1714 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1715 sizeof(seg_desc), &ctxt->exception);
1716 if (ret != X86EMUL_CONTINUE)
1717 return ret;
1718 break;
1719 case VCPU_SREG_LDTR:
1720 if (seg_desc.s || seg_desc.type != 2)
1721 goto exception;
1722 break;
1723 default:
1724
1725
1726
1727
1728
1729 if ((seg_desc.type & 0xa) == 0x8 ||
1730 (((seg_desc.type & 0xc) != 0xc) &&
1731 (rpl > dpl && cpl > dpl)))
1732 goto exception;
1733 break;
1734 }
1735
1736 if (seg_desc.s) {
1737
1738 if (!(seg_desc.type & 1)) {
1739 seg_desc.type |= 1;
1740 ret = write_segment_descriptor(ctxt, selector,
1741 &seg_desc);
1742 if (ret != X86EMUL_CONTINUE)
1743 return ret;
1744 }
1745 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1746 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1747 sizeof(base3), &ctxt->exception);
1748 if (ret != X86EMUL_CONTINUE)
1749 return ret;
1750 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1751 ((u64)base3 << 32)))
1752 return emulate_gp(ctxt, 0);
1753 }
1754load:
1755 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1756 if (desc)
1757 *desc = seg_desc;
1758 return X86EMUL_CONTINUE;
1759exception:
1760 return emulate_exception(ctxt, err_vec, err_code, true);
1761}
1762
1763static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1764 u16 selector, int seg)
1765{
1766 u8 cpl = ctxt->ops->cpl(ctxt);
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778 if (seg == VCPU_SREG_SS && selector == 3 &&
1779 ctxt->mode == X86EMUL_MODE_PROT64)
1780 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1781
1782 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1783 X86_TRANSFER_NONE, NULL);
1784}
1785
1786static void write_register_operand(struct operand *op)
1787{
1788 return assign_register(op->addr.reg, op->val, op->bytes);
1789}
1790
1791static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1792{
1793 switch (op->type) {
1794 case OP_REG:
1795 write_register_operand(op);
1796 break;
1797 case OP_MEM:
1798 if (ctxt->lock_prefix)
1799 return segmented_cmpxchg(ctxt,
1800 op->addr.mem,
1801 &op->orig_val,
1802 &op->val,
1803 op->bytes);
1804 else
1805 return segmented_write(ctxt,
1806 op->addr.mem,
1807 &op->val,
1808 op->bytes);
1809 break;
1810 case OP_MEM_STR:
1811 return segmented_write(ctxt,
1812 op->addr.mem,
1813 op->data,
1814 op->bytes * op->count);
1815 break;
1816 case OP_XMM:
1817 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1818 break;
1819 case OP_MM:
1820 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1821 break;
1822 case OP_NONE:
1823
1824 break;
1825 default:
1826 break;
1827 }
1828 return X86EMUL_CONTINUE;
1829}
1830
1831static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1832{
1833 struct segmented_address addr;
1834
1835 rsp_increment(ctxt, -bytes);
1836 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1837 addr.seg = VCPU_SREG_SS;
1838
1839 return segmented_write(ctxt, addr, data, bytes);
1840}
1841
1842static int em_push(struct x86_emulate_ctxt *ctxt)
1843{
1844
1845 ctxt->dst.type = OP_NONE;
1846 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1847}
1848
1849static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1850 void *dest, int len)
1851{
1852 int rc;
1853 struct segmented_address addr;
1854
1855 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1856 addr.seg = VCPU_SREG_SS;
1857 rc = segmented_read(ctxt, addr, dest, len);
1858 if (rc != X86EMUL_CONTINUE)
1859 return rc;
1860
1861 rsp_increment(ctxt, len);
1862 return rc;
1863}
1864
1865static int em_pop(struct x86_emulate_ctxt *ctxt)
1866{
1867 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1868}
1869
1870static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1871 void *dest, int len)
1872{
1873 int rc;
1874 unsigned long val, change_mask;
1875 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1876 int cpl = ctxt->ops->cpl(ctxt);
1877
1878 rc = emulate_pop(ctxt, &val, len);
1879 if (rc != X86EMUL_CONTINUE)
1880 return rc;
1881
1882 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1883 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1884 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1885 X86_EFLAGS_AC | X86_EFLAGS_ID;
1886
1887 switch(ctxt->mode) {
1888 case X86EMUL_MODE_PROT64:
1889 case X86EMUL_MODE_PROT32:
1890 case X86EMUL_MODE_PROT16:
1891 if (cpl == 0)
1892 change_mask |= X86_EFLAGS_IOPL;
1893 if (cpl <= iopl)
1894 change_mask |= X86_EFLAGS_IF;
1895 break;
1896 case X86EMUL_MODE_VM86:
1897 if (iopl < 3)
1898 return emulate_gp(ctxt, 0);
1899 change_mask |= X86_EFLAGS_IF;
1900 break;
1901 default:
1902 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1903 break;
1904 }
1905
1906 *(unsigned long *)dest =
1907 (ctxt->eflags & ~change_mask) | (val & change_mask);
1908
1909 return rc;
1910}
1911
1912static int em_popf(struct x86_emulate_ctxt *ctxt)
1913{
1914 ctxt->dst.type = OP_REG;
1915 ctxt->dst.addr.reg = &ctxt->eflags;
1916 ctxt->dst.bytes = ctxt->op_bytes;
1917 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1918}
1919
1920static int em_enter(struct x86_emulate_ctxt *ctxt)
1921{
1922 int rc;
1923 unsigned frame_size = ctxt->src.val;
1924 unsigned nesting_level = ctxt->src2.val & 31;
1925 ulong rbp;
1926
1927 if (nesting_level)
1928 return X86EMUL_UNHANDLEABLE;
1929
1930 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1931 rc = push(ctxt, &rbp, stack_size(ctxt));
1932 if (rc != X86EMUL_CONTINUE)
1933 return rc;
1934 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1935 stack_mask(ctxt));
1936 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1937 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1938 stack_mask(ctxt));
1939 return X86EMUL_CONTINUE;
1940}
1941
1942static int em_leave(struct x86_emulate_ctxt *ctxt)
1943{
1944 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1945 stack_mask(ctxt));
1946 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1947}
1948
1949static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1950{
1951 int seg = ctxt->src2.val;
1952
1953 ctxt->src.val = get_segment_selector(ctxt, seg);
1954 if (ctxt->op_bytes == 4) {
1955 rsp_increment(ctxt, -2);
1956 ctxt->op_bytes = 2;
1957 }
1958
1959 return em_push(ctxt);
1960}
1961
1962static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1963{
1964 int seg = ctxt->src2.val;
1965 unsigned long selector;
1966 int rc;
1967
1968 rc = emulate_pop(ctxt, &selector, 2);
1969 if (rc != X86EMUL_CONTINUE)
1970 return rc;
1971
1972 if (ctxt->modrm_reg == VCPU_SREG_SS)
1973 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1974 if (ctxt->op_bytes > 2)
1975 rsp_increment(ctxt, ctxt->op_bytes - 2);
1976
1977 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1978 return rc;
1979}
1980
1981static int em_pusha(struct x86_emulate_ctxt *ctxt)
1982{
1983 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1984 int rc = X86EMUL_CONTINUE;
1985 int reg = VCPU_REGS_RAX;
1986
1987 while (reg <= VCPU_REGS_RDI) {
1988 (reg == VCPU_REGS_RSP) ?
1989 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1990
1991 rc = em_push(ctxt);
1992 if (rc != X86EMUL_CONTINUE)
1993 return rc;
1994
1995 ++reg;
1996 }
1997
1998 return rc;
1999}
2000
2001static int em_pushf(struct x86_emulate_ctxt *ctxt)
2002{
2003 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2004 return em_push(ctxt);
2005}
2006
2007static int em_popa(struct x86_emulate_ctxt *ctxt)
2008{
2009 int rc = X86EMUL_CONTINUE;
2010 int reg = VCPU_REGS_RDI;
2011 u32 val;
2012
2013 while (reg >= VCPU_REGS_RAX) {
2014 if (reg == VCPU_REGS_RSP) {
2015 rsp_increment(ctxt, ctxt->op_bytes);
2016 --reg;
2017 }
2018
2019 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2020 if (rc != X86EMUL_CONTINUE)
2021 break;
2022 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2023 --reg;
2024 }
2025 return rc;
2026}
2027
2028static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2029{
2030 const struct x86_emulate_ops *ops = ctxt->ops;
2031 int rc;
2032 struct desc_ptr dt;
2033 gva_t cs_addr;
2034 gva_t eip_addr;
2035 u16 cs, eip;
2036
2037
2038 ctxt->src.val = ctxt->eflags;
2039 rc = em_push(ctxt);
2040 if (rc != X86EMUL_CONTINUE)
2041 return rc;
2042
2043 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2044
2045 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2046 rc = em_push(ctxt);
2047 if (rc != X86EMUL_CONTINUE)
2048 return rc;
2049
2050 ctxt->src.val = ctxt->_eip;
2051 rc = em_push(ctxt);
2052 if (rc != X86EMUL_CONTINUE)
2053 return rc;
2054
2055 ops->get_idt(ctxt, &dt);
2056
2057 eip_addr = dt.address + (irq << 2);
2058 cs_addr = dt.address + (irq << 2) + 2;
2059
2060 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
2061 if (rc != X86EMUL_CONTINUE)
2062 return rc;
2063
2064 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
2065 if (rc != X86EMUL_CONTINUE)
2066 return rc;
2067
2068 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2069 if (rc != X86EMUL_CONTINUE)
2070 return rc;
2071
2072 ctxt->_eip = eip;
2073
2074 return rc;
2075}
2076
2077int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2078{
2079 int rc;
2080
2081 invalidate_registers(ctxt);
2082 rc = __emulate_int_real(ctxt, irq);
2083 if (rc == X86EMUL_CONTINUE)
2084 writeback_registers(ctxt);
2085 return rc;
2086}
2087
2088static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2089{
2090 switch(ctxt->mode) {
2091 case X86EMUL_MODE_REAL:
2092 return __emulate_int_real(ctxt, irq);
2093 case X86EMUL_MODE_VM86:
2094 case X86EMUL_MODE_PROT16:
2095 case X86EMUL_MODE_PROT32:
2096 case X86EMUL_MODE_PROT64:
2097 default:
2098
2099 return X86EMUL_UNHANDLEABLE;
2100 }
2101}
2102
2103static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2104{
2105 int rc = X86EMUL_CONTINUE;
2106 unsigned long temp_eip = 0;
2107 unsigned long temp_eflags = 0;
2108 unsigned long cs = 0;
2109 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2110 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2111 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2112 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2113 X86_EFLAGS_AC | X86_EFLAGS_ID |
2114 X86_EFLAGS_FIXED;
2115 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2116 X86_EFLAGS_VIP;
2117
2118
2119
2120 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2121
2122 if (rc != X86EMUL_CONTINUE)
2123 return rc;
2124
2125 if (temp_eip & ~0xffff)
2126 return emulate_gp(ctxt, 0);
2127
2128 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2129
2130 if (rc != X86EMUL_CONTINUE)
2131 return rc;
2132
2133 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2134
2135 if (rc != X86EMUL_CONTINUE)
2136 return rc;
2137
2138 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2139
2140 if (rc != X86EMUL_CONTINUE)
2141 return rc;
2142
2143 ctxt->_eip = temp_eip;
2144
2145 if (ctxt->op_bytes == 4)
2146 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2147 else if (ctxt->op_bytes == 2) {
2148 ctxt->eflags &= ~0xffff;
2149 ctxt->eflags |= temp_eflags;
2150 }
2151
2152 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK;
2153 ctxt->eflags |= X86_EFLAGS_FIXED;
2154 ctxt->ops->set_nmi_mask(ctxt, false);
2155
2156 return rc;
2157}
2158
2159static int em_iret(struct x86_emulate_ctxt *ctxt)
2160{
2161 switch(ctxt->mode) {
2162 case X86EMUL_MODE_REAL:
2163 return emulate_iret_real(ctxt);
2164 case X86EMUL_MODE_VM86:
2165 case X86EMUL_MODE_PROT16:
2166 case X86EMUL_MODE_PROT32:
2167 case X86EMUL_MODE_PROT64:
2168 default:
2169
2170 return X86EMUL_UNHANDLEABLE;
2171 }
2172}
2173
2174static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2175{
2176 int rc;
2177 unsigned short sel;
2178 struct desc_struct new_desc;
2179 u8 cpl = ctxt->ops->cpl(ctxt);
2180
2181 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2182
2183 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2184 X86_TRANSFER_CALL_JMP,
2185 &new_desc);
2186 if (rc != X86EMUL_CONTINUE)
2187 return rc;
2188
2189 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2190
2191 if (rc != X86EMUL_CONTINUE)
2192 return X86EMUL_UNHANDLEABLE;
2193
2194 return rc;
2195}
2196
2197static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2198{
2199 return assign_eip_near(ctxt, ctxt->src.val);
2200}
2201
2202static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2203{
2204 int rc;
2205 long int old_eip;
2206
2207 old_eip = ctxt->_eip;
2208 rc = assign_eip_near(ctxt, ctxt->src.val);
2209 if (rc != X86EMUL_CONTINUE)
2210 return rc;
2211 ctxt->src.val = old_eip;
2212 rc = em_push(ctxt);
2213 return rc;
2214}
2215
2216static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2217{
2218 u64 old = ctxt->dst.orig_val64;
2219
2220 if (ctxt->dst.bytes == 16)
2221 return X86EMUL_UNHANDLEABLE;
2222
2223 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2224 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2225 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2226 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2227 ctxt->eflags &= ~X86_EFLAGS_ZF;
2228 } else {
2229 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2230 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2231
2232 ctxt->eflags |= X86_EFLAGS_ZF;
2233 }
2234 return X86EMUL_CONTINUE;
2235}
2236
2237static int em_ret(struct x86_emulate_ctxt *ctxt)
2238{
2239 int rc;
2240 unsigned long eip;
2241
2242 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2243 if (rc != X86EMUL_CONTINUE)
2244 return rc;
2245
2246 return assign_eip_near(ctxt, eip);
2247}
2248
2249static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2250{
2251 int rc;
2252 unsigned long eip, cs;
2253 int cpl = ctxt->ops->cpl(ctxt);
2254 struct desc_struct new_desc;
2255
2256 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2257 if (rc != X86EMUL_CONTINUE)
2258 return rc;
2259 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2260 if (rc != X86EMUL_CONTINUE)
2261 return rc;
2262
2263 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2264 return X86EMUL_UNHANDLEABLE;
2265 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2266 X86_TRANSFER_RET,
2267 &new_desc);
2268 if (rc != X86EMUL_CONTINUE)
2269 return rc;
2270 rc = assign_eip_far(ctxt, eip, &new_desc);
2271
2272 if (rc != X86EMUL_CONTINUE)
2273 return X86EMUL_UNHANDLEABLE;
2274
2275 return rc;
2276}
2277
2278static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2279{
2280 int rc;
2281
2282 rc = em_ret_far(ctxt);
2283 if (rc != X86EMUL_CONTINUE)
2284 return rc;
2285 rsp_increment(ctxt, ctxt->src.val);
2286 return X86EMUL_CONTINUE;
2287}
2288
2289static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2290{
2291
2292 ctxt->dst.orig_val = ctxt->dst.val;
2293 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2294 ctxt->src.orig_val = ctxt->src.val;
2295 ctxt->src.val = ctxt->dst.orig_val;
2296 fastop(ctxt, em_cmp);
2297
2298 if (ctxt->eflags & X86_EFLAGS_ZF) {
2299
2300 ctxt->src.type = OP_NONE;
2301 ctxt->dst.val = ctxt->src.orig_val;
2302 } else {
2303
2304 ctxt->src.type = OP_REG;
2305 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2306 ctxt->src.val = ctxt->dst.orig_val;
2307
2308 ctxt->dst.val = ctxt->dst.orig_val;
2309 }
2310 return X86EMUL_CONTINUE;
2311}
2312
2313static int em_lseg(struct x86_emulate_ctxt *ctxt)
2314{
2315 int seg = ctxt->src2.val;
2316 unsigned short sel;
2317 int rc;
2318
2319 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2320
2321 rc = load_segment_descriptor(ctxt, sel, seg);
2322 if (rc != X86EMUL_CONTINUE)
2323 return rc;
2324
2325 ctxt->dst.val = ctxt->src.val;
2326 return rc;
2327}
2328
2329static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2330{
2331 u32 eax, ebx, ecx, edx;
2332
2333 eax = 0x80000001;
2334 ecx = 0;
2335 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2336 return edx & bit(X86_FEATURE_LM);
2337}
2338
2339#define GET_SMSTATE(type, smbase, offset) \
2340 ({ \
2341 type __val; \
2342 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
2343 sizeof(__val)); \
2344 if (r != X86EMUL_CONTINUE) \
2345 return X86EMUL_UNHANDLEABLE; \
2346 __val; \
2347 })
2348
2349static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2350{
2351 desc->g = (flags >> 23) & 1;
2352 desc->d = (flags >> 22) & 1;
2353 desc->l = (flags >> 21) & 1;
2354 desc->avl = (flags >> 20) & 1;
2355 desc->p = (flags >> 15) & 1;
2356 desc->dpl = (flags >> 13) & 3;
2357 desc->s = (flags >> 12) & 1;
2358 desc->type = (flags >> 8) & 15;
2359}
2360
2361static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2362{
2363 struct desc_struct desc;
2364 int offset;
2365 u16 selector;
2366
2367 selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
2368
2369 if (n < 3)
2370 offset = 0x7f84 + n * 12;
2371 else
2372 offset = 0x7f2c + (n - 3) * 12;
2373
2374 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2375 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2376 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
2377 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2378 return X86EMUL_CONTINUE;
2379}
2380
2381static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2382{
2383 struct desc_struct desc;
2384 int offset;
2385 u16 selector;
2386 u32 base3;
2387
2388 offset = 0x7e00 + n * 16;
2389
2390 selector = GET_SMSTATE(u16, smbase, offset);
2391 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
2392 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2393 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2394 base3 = GET_SMSTATE(u32, smbase, offset + 12);
2395
2396 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2397 return X86EMUL_CONTINUE;
2398}
2399
2400static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2401 u64 cr0, u64 cr4)
2402{
2403 int bad;
2404
2405
2406
2407
2408
2409
2410 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2411 if (bad)
2412 return X86EMUL_UNHANDLEABLE;
2413
2414 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2415 if (bad)
2416 return X86EMUL_UNHANDLEABLE;
2417
2418 if (cr4 & X86_CR4_PCIDE) {
2419 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2420 if (bad)
2421 return X86EMUL_UNHANDLEABLE;
2422 }
2423
2424 return X86EMUL_CONTINUE;
2425}
2426
2427static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
2428{
2429 struct desc_struct desc;
2430 struct desc_ptr dt;
2431 u16 selector;
2432 u32 val, cr0, cr4;
2433 int i;
2434
2435 cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
2436 ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
2437 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
2438 ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
2439
2440 for (i = 0; i < 8; i++)
2441 *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
2442
2443 val = GET_SMSTATE(u32, smbase, 0x7fcc);
2444 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2445 val = GET_SMSTATE(u32, smbase, 0x7fc8);
2446 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2447
2448 selector = GET_SMSTATE(u32, smbase, 0x7fc4);
2449 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
2450 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
2451 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
2452 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2453
2454 selector = GET_SMSTATE(u32, smbase, 0x7fc0);
2455 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
2456 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
2457 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
2458 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2459
2460 dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
2461 dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
2462 ctxt->ops->set_gdt(ctxt, &dt);
2463
2464 dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
2465 dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
2466 ctxt->ops->set_idt(ctxt, &dt);
2467
2468 for (i = 0; i < 6; i++) {
2469 int r = rsm_load_seg_32(ctxt, smbase, i);
2470 if (r != X86EMUL_CONTINUE)
2471 return r;
2472 }
2473
2474 cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
2475
2476 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
2477
2478 return rsm_enter_protected_mode(ctxt, cr0, cr4);
2479}
2480
2481static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2482{
2483 struct desc_struct desc;
2484 struct desc_ptr dt;
2485 u64 val, cr0, cr4;
2486 u32 base3;
2487 u16 selector;
2488 int i, r;
2489
2490 for (i = 0; i < 16; i++)
2491 *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
2492
2493 ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
2494 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
2495
2496 val = GET_SMSTATE(u32, smbase, 0x7f68);
2497 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2498 val = GET_SMSTATE(u32, smbase, 0x7f60);
2499 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2500
2501 cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
2502 ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50));
2503 cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
2504 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
2505 val = GET_SMSTATE(u64, smbase, 0x7ed0);
2506 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2507
2508 selector = GET_SMSTATE(u32, smbase, 0x7e90);
2509 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
2510 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
2511 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
2512 base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
2513 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2514
2515 dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
2516 dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
2517 ctxt->ops->set_idt(ctxt, &dt);
2518
2519 selector = GET_SMSTATE(u32, smbase, 0x7e70);
2520 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
2521 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
2522 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
2523 base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
2524 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2525
2526 dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
2527 dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
2528 ctxt->ops->set_gdt(ctxt, &dt);
2529
2530 r = rsm_enter_protected_mode(ctxt, cr0, cr4);
2531 if (r != X86EMUL_CONTINUE)
2532 return r;
2533
2534 for (i = 0; i < 6; i++) {
2535 r = rsm_load_seg_64(ctxt, smbase, i);
2536 if (r != X86EMUL_CONTINUE)
2537 return r;
2538 }
2539
2540 return X86EMUL_CONTINUE;
2541}
2542
2543static int em_rsm(struct x86_emulate_ctxt *ctxt)
2544{
2545 unsigned long cr0, cr4, efer;
2546 u64 smbase;
2547 int ret;
2548
2549 if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
2550 return emulate_ud(ctxt);
2551
2552
2553
2554
2555
2556
2557 cr4 = ctxt->ops->get_cr(ctxt, 4);
2558 if (emulator_has_longmode(ctxt)) {
2559 struct desc_struct cs_desc;
2560
2561
2562 if (cr4 & X86_CR4_PCIDE) {
2563 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2564 cr4 &= ~X86_CR4_PCIDE;
2565 }
2566
2567
2568 memset(&cs_desc, 0, sizeof(cs_desc));
2569 cs_desc.type = 0xb;
2570 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2571 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2572 }
2573
2574
2575 cr0 = ctxt->ops->get_cr(ctxt, 0);
2576 if (cr0 & X86_CR0_PE)
2577 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2578
2579
2580 if (cr4 & X86_CR4_PAE)
2581 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2582
2583
2584 efer = 0;
2585 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2586
2587 smbase = ctxt->ops->get_smbase(ctxt);
2588 if (emulator_has_longmode(ctxt))
2589 ret = rsm_load_state_64(ctxt, smbase + 0x8000);
2590 else
2591 ret = rsm_load_state_32(ctxt, smbase + 0x8000);
2592
2593 if (ret != X86EMUL_CONTINUE) {
2594
2595 return X86EMUL_UNHANDLEABLE;
2596 }
2597
2598 if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2599 ctxt->ops->set_nmi_mask(ctxt, false);
2600
2601 ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
2602 ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
2603 return X86EMUL_CONTINUE;
2604}
2605
2606static void
2607setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2608 struct desc_struct *cs, struct desc_struct *ss)
2609{
2610 cs->l = 0;
2611 set_desc_base(cs, 0);
2612 cs->g = 1;
2613 set_desc_limit(cs, 0xfffff);
2614 cs->type = 0x0b;
2615 cs->s = 1;
2616 cs->dpl = 0;
2617 cs->p = 1;
2618 cs->d = 1;
2619 cs->avl = 0;
2620
2621 set_desc_base(ss, 0);
2622 set_desc_limit(ss, 0xfffff);
2623 ss->g = 1;
2624 ss->s = 1;
2625 ss->type = 0x03;
2626 ss->d = 1;
2627 ss->dpl = 0;
2628 ss->p = 1;
2629 ss->l = 0;
2630 ss->avl = 0;
2631}
2632
2633static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2634{
2635 u32 eax, ebx, ecx, edx;
2636
2637 eax = ecx = 0;
2638 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2639 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2640 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2641 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2642}
2643
2644static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2645{
2646 const struct x86_emulate_ops *ops = ctxt->ops;
2647 u32 eax, ebx, ecx, edx;
2648
2649
2650
2651
2652
2653 if (ctxt->mode == X86EMUL_MODE_PROT64)
2654 return true;
2655
2656 eax = 0x00000000;
2657 ecx = 0x00000000;
2658 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2659
2660
2661
2662
2663
2664
2665
2666
2667 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2668 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2669 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2670 return false;
2671
2672
2673 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2674 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2675 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2676 return true;
2677
2678
2679 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2680 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2681 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2682 return true;
2683
2684
2685 return false;
2686}
2687
2688static int em_syscall(struct x86_emulate_ctxt *ctxt)
2689{
2690 const struct x86_emulate_ops *ops = ctxt->ops;
2691 struct desc_struct cs, ss;
2692 u64 msr_data;
2693 u16 cs_sel, ss_sel;
2694 u64 efer = 0;
2695
2696
2697 if (ctxt->mode == X86EMUL_MODE_REAL ||
2698 ctxt->mode == X86EMUL_MODE_VM86)
2699 return emulate_ud(ctxt);
2700
2701 if (!(em_syscall_is_enabled(ctxt)))
2702 return emulate_ud(ctxt);
2703
2704 ops->get_msr(ctxt, MSR_EFER, &efer);
2705 setup_syscalls_segments(ctxt, &cs, &ss);
2706
2707 if (!(efer & EFER_SCE))
2708 return emulate_ud(ctxt);
2709
2710 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2711 msr_data >>= 32;
2712 cs_sel = (u16)(msr_data & 0xfffc);
2713 ss_sel = (u16)(msr_data + 8);
2714
2715 if (efer & EFER_LMA) {
2716 cs.d = 0;
2717 cs.l = 1;
2718 }
2719 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2720 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2721
2722 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2723 if (efer & EFER_LMA) {
2724#ifdef CONFIG_X86_64
2725 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2726
2727 ops->get_msr(ctxt,
2728 ctxt->mode == X86EMUL_MODE_PROT64 ?
2729 MSR_LSTAR : MSR_CSTAR, &msr_data);
2730 ctxt->_eip = msr_data;
2731
2732 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2733 ctxt->eflags &= ~msr_data;
2734 ctxt->eflags |= X86_EFLAGS_FIXED;
2735#endif
2736 } else {
2737
2738 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2739 ctxt->_eip = (u32)msr_data;
2740
2741 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2742 }
2743
2744 return X86EMUL_CONTINUE;
2745}
2746
2747static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2748{
2749 const struct x86_emulate_ops *ops = ctxt->ops;
2750 struct desc_struct cs, ss;
2751 u64 msr_data;
2752 u16 cs_sel, ss_sel;
2753 u64 efer = 0;
2754
2755 ops->get_msr(ctxt, MSR_EFER, &efer);
2756
2757 if (ctxt->mode == X86EMUL_MODE_REAL)
2758 return emulate_gp(ctxt, 0);
2759
2760
2761
2762
2763
2764 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2765 && !vendor_intel(ctxt))
2766 return emulate_ud(ctxt);
2767
2768
2769 if (ctxt->mode == X86EMUL_MODE_PROT64)
2770 return X86EMUL_UNHANDLEABLE;
2771
2772 setup_syscalls_segments(ctxt, &cs, &ss);
2773
2774 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2775 if ((msr_data & 0xfffc) == 0x0)
2776 return emulate_gp(ctxt, 0);
2777
2778 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2779 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2780 ss_sel = cs_sel + 8;
2781 if (efer & EFER_LMA) {
2782 cs.d = 0;
2783 cs.l = 1;
2784 }
2785
2786 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2787 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2788
2789 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2790 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2791
2792 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2793 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2794 (u32)msr_data;
2795
2796 return X86EMUL_CONTINUE;
2797}
2798
2799static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2800{
2801 const struct x86_emulate_ops *ops = ctxt->ops;
2802 struct desc_struct cs, ss;
2803 u64 msr_data, rcx, rdx;
2804 int usermode;
2805 u16 cs_sel = 0, ss_sel = 0;
2806
2807
2808 if (ctxt->mode == X86EMUL_MODE_REAL ||
2809 ctxt->mode == X86EMUL_MODE_VM86)
2810 return emulate_gp(ctxt, 0);
2811
2812 setup_syscalls_segments(ctxt, &cs, &ss);
2813
2814 if ((ctxt->rex_prefix & 0x8) != 0x0)
2815 usermode = X86EMUL_MODE_PROT64;
2816 else
2817 usermode = X86EMUL_MODE_PROT32;
2818
2819 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2820 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2821
2822 cs.dpl = 3;
2823 ss.dpl = 3;
2824 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2825 switch (usermode) {
2826 case X86EMUL_MODE_PROT32:
2827 cs_sel = (u16)(msr_data + 16);
2828 if ((msr_data & 0xfffc) == 0x0)
2829 return emulate_gp(ctxt, 0);
2830 ss_sel = (u16)(msr_data + 24);
2831 rcx = (u32)rcx;
2832 rdx = (u32)rdx;
2833 break;
2834 case X86EMUL_MODE_PROT64:
2835 cs_sel = (u16)(msr_data + 32);
2836 if (msr_data == 0x0)
2837 return emulate_gp(ctxt, 0);
2838 ss_sel = cs_sel + 8;
2839 cs.d = 0;
2840 cs.l = 1;
2841 if (is_noncanonical_address(rcx) ||
2842 is_noncanonical_address(rdx))
2843 return emulate_gp(ctxt, 0);
2844 break;
2845 }
2846 cs_sel |= SEGMENT_RPL_MASK;
2847 ss_sel |= SEGMENT_RPL_MASK;
2848
2849 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2850 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2851
2852 ctxt->_eip = rdx;
2853 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2854
2855 return X86EMUL_CONTINUE;
2856}
2857
2858static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2859{
2860 int iopl;
2861 if (ctxt->mode == X86EMUL_MODE_REAL)
2862 return false;
2863 if (ctxt->mode == X86EMUL_MODE_VM86)
2864 return true;
2865 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2866 return ctxt->ops->cpl(ctxt) > iopl;
2867}
2868
2869static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2870 u16 port, u16 len)
2871{
2872 const struct x86_emulate_ops *ops = ctxt->ops;
2873 struct desc_struct tr_seg;
2874 u32 base3;
2875 int r;
2876 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2877 unsigned mask = (1 << len) - 1;
2878 unsigned long base;
2879
2880 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2881 if (!tr_seg.p)
2882 return false;
2883 if (desc_limit_scaled(&tr_seg) < 103)
2884 return false;
2885 base = get_desc_base(&tr_seg);
2886#ifdef CONFIG_X86_64
2887 base |= ((u64)base3) << 32;
2888#endif
2889 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2890 if (r != X86EMUL_CONTINUE)
2891 return false;
2892 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2893 return false;
2894 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2895 if (r != X86EMUL_CONTINUE)
2896 return false;
2897 if ((perm >> bit_idx) & mask)
2898 return false;
2899 return true;
2900}
2901
2902static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2903 u16 port, u16 len)
2904{
2905 if (ctxt->perm_ok)
2906 return true;
2907
2908 if (emulator_bad_iopl(ctxt))
2909 if (!emulator_io_port_access_allowed(ctxt, port, len))
2910 return false;
2911
2912 ctxt->perm_ok = true;
2913
2914 return true;
2915}
2916
2917static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2918{
2919
2920
2921
2922
2923#ifdef CONFIG_X86_64
2924 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2925 return;
2926
2927 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2928
2929 switch (ctxt->b) {
2930 case 0xa4:
2931 case 0xa5:
2932 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2933
2934 case 0xaa:
2935 case 0xab:
2936 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2937 }
2938#endif
2939}
2940
2941static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2942 struct tss_segment_16 *tss)
2943{
2944 tss->ip = ctxt->_eip;
2945 tss->flag = ctxt->eflags;
2946 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2947 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2948 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2949 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2950 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2951 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2952 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2953 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2954
2955 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2956 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2957 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2958 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2959 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2960}
2961
2962static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2963 struct tss_segment_16 *tss)
2964{
2965 int ret;
2966 u8 cpl;
2967
2968 ctxt->_eip = tss->ip;
2969 ctxt->eflags = tss->flag | 2;
2970 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2971 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2972 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2973 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2974 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2975 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2976 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2977 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2978
2979
2980
2981
2982
2983 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2984 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2985 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2986 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2987 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2988
2989 cpl = tss->cs & 3;
2990
2991
2992
2993
2994
2995 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2996 X86_TRANSFER_TASK_SWITCH, NULL);
2997 if (ret != X86EMUL_CONTINUE)
2998 return ret;
2999 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3000 X86_TRANSFER_TASK_SWITCH, NULL);
3001 if (ret != X86EMUL_CONTINUE)
3002 return ret;
3003 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3004 X86_TRANSFER_TASK_SWITCH, NULL);
3005 if (ret != X86EMUL_CONTINUE)
3006 return ret;
3007 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3008 X86_TRANSFER_TASK_SWITCH, NULL);
3009 if (ret != X86EMUL_CONTINUE)
3010 return ret;
3011 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3012 X86_TRANSFER_TASK_SWITCH, NULL);
3013 if (ret != X86EMUL_CONTINUE)
3014 return ret;
3015
3016 return X86EMUL_CONTINUE;
3017}
3018
3019static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3020 u16 tss_selector, u16 old_tss_sel,
3021 ulong old_tss_base, struct desc_struct *new_desc)
3022{
3023 const struct x86_emulate_ops *ops = ctxt->ops;
3024 struct tss_segment_16 tss_seg;
3025 int ret;
3026 u32 new_tss_base = get_desc_base(new_desc);
3027
3028 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3029 &ctxt->exception);
3030 if (ret != X86EMUL_CONTINUE)
3031 return ret;
3032
3033 save_state_to_tss16(ctxt, &tss_seg);
3034
3035 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3036 &ctxt->exception);
3037 if (ret != X86EMUL_CONTINUE)
3038 return ret;
3039
3040 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3041 &ctxt->exception);
3042 if (ret != X86EMUL_CONTINUE)
3043 return ret;
3044
3045 if (old_tss_sel != 0xffff) {
3046 tss_seg.prev_task_link = old_tss_sel;
3047
3048 ret = ops->write_std(ctxt, new_tss_base,
3049 &tss_seg.prev_task_link,
3050 sizeof tss_seg.prev_task_link,
3051 &ctxt->exception);
3052 if (ret != X86EMUL_CONTINUE)
3053 return ret;
3054 }
3055
3056 return load_state_from_tss16(ctxt, &tss_seg);
3057}
3058
3059static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3060 struct tss_segment_32 *tss)
3061{
3062
3063 tss->eip = ctxt->_eip;
3064 tss->eflags = ctxt->eflags;
3065 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3066 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3067 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3068 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3069 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3070 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3071 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3072 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3073
3074 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3075 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3076 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3077 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3078 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3079 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3080}
3081
3082static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3083 struct tss_segment_32 *tss)
3084{
3085 int ret;
3086 u8 cpl;
3087
3088 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3089 return emulate_gp(ctxt, 0);
3090 ctxt->_eip = tss->eip;
3091 ctxt->eflags = tss->eflags | 2;
3092
3093
3094 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3095 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3096 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3097 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3098 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3099 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3100 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3101 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3102
3103
3104
3105
3106
3107
3108 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3109 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3110 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3111 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3112 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3113 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3114 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3115
3116
3117
3118
3119
3120
3121 if (ctxt->eflags & X86_EFLAGS_VM) {
3122 ctxt->mode = X86EMUL_MODE_VM86;
3123 cpl = 3;
3124 } else {
3125 ctxt->mode = X86EMUL_MODE_PROT32;
3126 cpl = tss->cs & 3;
3127 }
3128
3129
3130
3131
3132
3133 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3134 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3135 if (ret != X86EMUL_CONTINUE)
3136 return ret;
3137 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3138 X86_TRANSFER_TASK_SWITCH, NULL);
3139 if (ret != X86EMUL_CONTINUE)
3140 return ret;
3141 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3142 X86_TRANSFER_TASK_SWITCH, NULL);
3143 if (ret != X86EMUL_CONTINUE)
3144 return ret;
3145 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3146 X86_TRANSFER_TASK_SWITCH, NULL);
3147 if (ret != X86EMUL_CONTINUE)
3148 return ret;
3149 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3150 X86_TRANSFER_TASK_SWITCH, NULL);
3151 if (ret != X86EMUL_CONTINUE)
3152 return ret;
3153 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3154 X86_TRANSFER_TASK_SWITCH, NULL);
3155 if (ret != X86EMUL_CONTINUE)
3156 return ret;
3157 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3158 X86_TRANSFER_TASK_SWITCH, NULL);
3159
3160 return ret;
3161}
3162
3163static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3164 u16 tss_selector, u16 old_tss_sel,
3165 ulong old_tss_base, struct desc_struct *new_desc)
3166{
3167 const struct x86_emulate_ops *ops = ctxt->ops;
3168 struct tss_segment_32 tss_seg;
3169 int ret;
3170 u32 new_tss_base = get_desc_base(new_desc);
3171 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3172 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3173
3174 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3175 &ctxt->exception);
3176 if (ret != X86EMUL_CONTINUE)
3177 return ret;
3178
3179 save_state_to_tss32(ctxt, &tss_seg);
3180
3181
3182 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3183 ldt_sel_offset - eip_offset, &ctxt->exception);
3184 if (ret != X86EMUL_CONTINUE)
3185 return ret;
3186
3187 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3188 &ctxt->exception);
3189 if (ret != X86EMUL_CONTINUE)
3190 return ret;
3191
3192 if (old_tss_sel != 0xffff) {
3193 tss_seg.prev_task_link = old_tss_sel;
3194
3195 ret = ops->write_std(ctxt, new_tss_base,
3196 &tss_seg.prev_task_link,
3197 sizeof tss_seg.prev_task_link,
3198 &ctxt->exception);
3199 if (ret != X86EMUL_CONTINUE)
3200 return ret;
3201 }
3202
3203 return load_state_from_tss32(ctxt, &tss_seg);
3204}
3205
3206static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3207 u16 tss_selector, int idt_index, int reason,
3208 bool has_error_code, u32 error_code)
3209{
3210 const struct x86_emulate_ops *ops = ctxt->ops;
3211 struct desc_struct curr_tss_desc, next_tss_desc;
3212 int ret;
3213 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3214 ulong old_tss_base =
3215 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3216 u32 desc_limit;
3217 ulong desc_addr, dr7;
3218
3219
3220
3221 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3222 if (ret != X86EMUL_CONTINUE)
3223 return ret;
3224 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3225 if (ret != X86EMUL_CONTINUE)
3226 return ret;
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238 if (reason == TASK_SWITCH_GATE) {
3239 if (idt_index != -1) {
3240
3241 struct desc_struct task_gate_desc;
3242 int dpl;
3243
3244 ret = read_interrupt_descriptor(ctxt, idt_index,
3245 &task_gate_desc);
3246 if (ret != X86EMUL_CONTINUE)
3247 return ret;
3248
3249 dpl = task_gate_desc.dpl;
3250 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3251 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3252 }
3253 }
3254
3255 desc_limit = desc_limit_scaled(&next_tss_desc);
3256 if (!next_tss_desc.p ||
3257 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3258 desc_limit < 0x2b)) {
3259 return emulate_ts(ctxt, tss_selector & 0xfffc);
3260 }
3261
3262 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3263 curr_tss_desc.type &= ~(1 << 1);
3264 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3265 }
3266
3267 if (reason == TASK_SWITCH_IRET)
3268 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3269
3270
3271
3272 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3273 old_tss_sel = 0xffff;
3274
3275 if (next_tss_desc.type & 8)
3276 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3277 old_tss_base, &next_tss_desc);
3278 else
3279 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3280 old_tss_base, &next_tss_desc);
3281 if (ret != X86EMUL_CONTINUE)
3282 return ret;
3283
3284 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3285 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3286
3287 if (reason != TASK_SWITCH_IRET) {
3288 next_tss_desc.type |= (1 << 1);
3289 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3290 }
3291
3292 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3293 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3294
3295 if (has_error_code) {
3296 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3297 ctxt->lock_prefix = 0;
3298 ctxt->src.val = (unsigned long) error_code;
3299 ret = em_push(ctxt);
3300 }
3301
3302 ops->get_dr(ctxt, 7, &dr7);
3303 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3304
3305 return ret;
3306}
3307
3308int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3309 u16 tss_selector, int idt_index, int reason,
3310 bool has_error_code, u32 error_code)
3311{
3312 int rc;
3313
3314 invalidate_registers(ctxt);
3315 ctxt->_eip = ctxt->eip;
3316 ctxt->dst.type = OP_NONE;
3317
3318 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3319 has_error_code, error_code);
3320
3321 if (rc == X86EMUL_CONTINUE) {
3322 ctxt->eip = ctxt->_eip;
3323 writeback_registers(ctxt);
3324 }
3325
3326 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3327}
3328
3329static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3330 struct operand *op)
3331{
3332 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3333
3334 register_address_increment(ctxt, reg, df * op->bytes);
3335 op->addr.mem.ea = register_address(ctxt, reg);
3336}
3337
3338static int em_das(struct x86_emulate_ctxt *ctxt)
3339{
3340 u8 al, old_al;
3341 bool af, cf, old_cf;
3342
3343 cf = ctxt->eflags & X86_EFLAGS_CF;
3344 al = ctxt->dst.val;
3345
3346 old_al = al;
3347 old_cf = cf;
3348 cf = false;
3349 af = ctxt->eflags & X86_EFLAGS_AF;
3350 if ((al & 0x0f) > 9 || af) {
3351 al -= 6;
3352 cf = old_cf | (al >= 250);
3353 af = true;
3354 } else {
3355 af = false;
3356 }
3357 if (old_al > 0x99 || old_cf) {
3358 al -= 0x60;
3359 cf = true;
3360 }
3361
3362 ctxt->dst.val = al;
3363
3364 ctxt->src.type = OP_IMM;
3365 ctxt->src.val = 0;
3366 ctxt->src.bytes = 1;
3367 fastop(ctxt, em_or);
3368 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3369 if (cf)
3370 ctxt->eflags |= X86_EFLAGS_CF;
3371 if (af)
3372 ctxt->eflags |= X86_EFLAGS_AF;
3373 return X86EMUL_CONTINUE;
3374}
3375
3376static int em_aam(struct x86_emulate_ctxt *ctxt)
3377{
3378 u8 al, ah;
3379
3380 if (ctxt->src.val == 0)
3381 return emulate_de(ctxt);
3382
3383 al = ctxt->dst.val & 0xff;
3384 ah = al / ctxt->src.val;
3385 al %= ctxt->src.val;
3386
3387 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3388
3389
3390 ctxt->src.type = OP_IMM;
3391 ctxt->src.val = 0;
3392 ctxt->src.bytes = 1;
3393 fastop(ctxt, em_or);
3394
3395 return X86EMUL_CONTINUE;
3396}
3397
3398static int em_aad(struct x86_emulate_ctxt *ctxt)
3399{
3400 u8 al = ctxt->dst.val & 0xff;
3401 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3402
3403 al = (al + (ah * ctxt->src.val)) & 0xff;
3404
3405 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3406
3407
3408 ctxt->src.type = OP_IMM;
3409 ctxt->src.val = 0;
3410 ctxt->src.bytes = 1;
3411 fastop(ctxt, em_or);
3412
3413 return X86EMUL_CONTINUE;
3414}
3415
3416static int em_call(struct x86_emulate_ctxt *ctxt)
3417{
3418 int rc;
3419 long rel = ctxt->src.val;
3420
3421 ctxt->src.val = (unsigned long)ctxt->_eip;
3422 rc = jmp_rel(ctxt, rel);
3423 if (rc != X86EMUL_CONTINUE)
3424 return rc;
3425 return em_push(ctxt);
3426}
3427
3428static int em_call_far(struct x86_emulate_ctxt *ctxt)
3429{
3430 u16 sel, old_cs;
3431 ulong old_eip;
3432 int rc;
3433 struct desc_struct old_desc, new_desc;
3434 const struct x86_emulate_ops *ops = ctxt->ops;
3435 int cpl = ctxt->ops->cpl(ctxt);
3436 enum x86emul_mode prev_mode = ctxt->mode;
3437
3438 old_eip = ctxt->_eip;
3439 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3440
3441 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3442 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3443 X86_TRANSFER_CALL_JMP, &new_desc);
3444 if (rc != X86EMUL_CONTINUE)
3445 return rc;
3446
3447 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3448 if (rc != X86EMUL_CONTINUE)
3449 goto fail;
3450
3451 ctxt->src.val = old_cs;
3452 rc = em_push(ctxt);
3453 if (rc != X86EMUL_CONTINUE)
3454 goto fail;
3455
3456 ctxt->src.val = old_eip;
3457 rc = em_push(ctxt);
3458
3459
3460 if (rc != X86EMUL_CONTINUE) {
3461 pr_warn_once("faulting far call emulation tainted memory\n");
3462 goto fail;
3463 }
3464 return rc;
3465fail:
3466 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3467 ctxt->mode = prev_mode;
3468 return rc;
3469
3470}
3471
3472static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3473{
3474 int rc;
3475 unsigned long eip;
3476
3477 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3478 if (rc != X86EMUL_CONTINUE)
3479 return rc;
3480 rc = assign_eip_near(ctxt, eip);
3481 if (rc != X86EMUL_CONTINUE)
3482 return rc;
3483 rsp_increment(ctxt, ctxt->src.val);
3484 return X86EMUL_CONTINUE;
3485}
3486
3487static int em_xchg(struct x86_emulate_ctxt *ctxt)
3488{
3489
3490 ctxt->src.val = ctxt->dst.val;
3491 write_register_operand(&ctxt->src);
3492
3493
3494 ctxt->dst.val = ctxt->src.orig_val;
3495 ctxt->lock_prefix = 1;
3496 return X86EMUL_CONTINUE;
3497}
3498
3499static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3500{
3501 ctxt->dst.val = ctxt->src2.val;
3502 return fastop(ctxt, em_imul);
3503}
3504
3505static int em_cwd(struct x86_emulate_ctxt *ctxt)
3506{
3507 ctxt->dst.type = OP_REG;
3508 ctxt->dst.bytes = ctxt->src.bytes;
3509 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3510 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3511
3512 return X86EMUL_CONTINUE;
3513}
3514
3515static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3516{
3517 u64 tsc = 0;
3518
3519 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3520 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3521 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3522 return X86EMUL_CONTINUE;
3523}
3524
3525static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3526{
3527 u64 pmc;
3528
3529 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3530 return emulate_gp(ctxt, 0);
3531 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3532 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3533 return X86EMUL_CONTINUE;
3534}
3535
3536static int em_mov(struct x86_emulate_ctxt *ctxt)
3537{
3538 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3539 return X86EMUL_CONTINUE;
3540}
3541
3542#define FFL(x) bit(X86_FEATURE_##x)
3543
3544static int em_movbe(struct x86_emulate_ctxt *ctxt)
3545{
3546 u32 ebx, ecx, edx, eax = 1;
3547 u16 tmp;
3548
3549
3550
3551
3552 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3553 if (!(ecx & FFL(MOVBE)))
3554 return emulate_ud(ctxt);
3555
3556 switch (ctxt->op_bytes) {
3557 case 2:
3558
3559
3560
3561
3562
3563
3564
3565
3566 tmp = (u16)ctxt->src.val;
3567 ctxt->dst.val &= ~0xffffUL;
3568 ctxt->dst.val |= (unsigned long)swab16(tmp);
3569 break;
3570 case 4:
3571 ctxt->dst.val = swab32((u32)ctxt->src.val);
3572 break;
3573 case 8:
3574 ctxt->dst.val = swab64(ctxt->src.val);
3575 break;
3576 default:
3577 BUG();
3578 }
3579 return X86EMUL_CONTINUE;
3580}
3581
3582static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3583{
3584 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3585 return emulate_gp(ctxt, 0);
3586
3587
3588 ctxt->dst.type = OP_NONE;
3589 return X86EMUL_CONTINUE;
3590}
3591
3592static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3593{
3594 unsigned long val;
3595
3596 if (ctxt->mode == X86EMUL_MODE_PROT64)
3597 val = ctxt->src.val & ~0ULL;
3598 else
3599 val = ctxt->src.val & ~0U;
3600
3601
3602 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3603 return emulate_gp(ctxt, 0);
3604
3605
3606 ctxt->dst.type = OP_NONE;
3607 return X86EMUL_CONTINUE;
3608}
3609
3610static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3611{
3612 u64 msr_data;
3613
3614 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3615 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3616 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3617 return emulate_gp(ctxt, 0);
3618
3619 return X86EMUL_CONTINUE;
3620}
3621
3622static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3623{
3624 u64 msr_data;
3625
3626 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3627 return emulate_gp(ctxt, 0);
3628
3629 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3630 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3631 return X86EMUL_CONTINUE;
3632}
3633
3634static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3635{
3636 if (ctxt->modrm_reg > VCPU_SREG_GS)
3637 return emulate_ud(ctxt);
3638
3639 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3640 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3641 ctxt->dst.bytes = 2;
3642 return X86EMUL_CONTINUE;
3643}
3644
3645static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3646{
3647 u16 sel = ctxt->src.val;
3648
3649 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3650 return emulate_ud(ctxt);
3651
3652 if (ctxt->modrm_reg == VCPU_SREG_SS)
3653 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3654
3655
3656 ctxt->dst.type = OP_NONE;
3657 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3658}
3659
3660static int em_lldt(struct x86_emulate_ctxt *ctxt)
3661{
3662 u16 sel = ctxt->src.val;
3663
3664
3665 ctxt->dst.type = OP_NONE;
3666 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3667}
3668
3669static int em_ltr(struct x86_emulate_ctxt *ctxt)
3670{
3671 u16 sel = ctxt->src.val;
3672
3673
3674 ctxt->dst.type = OP_NONE;
3675 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3676}
3677
3678static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3679{
3680 int rc;
3681 ulong linear;
3682
3683 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3684 if (rc == X86EMUL_CONTINUE)
3685 ctxt->ops->invlpg(ctxt, linear);
3686
3687 ctxt->dst.type = OP_NONE;
3688 return X86EMUL_CONTINUE;
3689}
3690
3691static int em_clts(struct x86_emulate_ctxt *ctxt)
3692{
3693 ulong cr0;
3694
3695 cr0 = ctxt->ops->get_cr(ctxt, 0);
3696 cr0 &= ~X86_CR0_TS;
3697 ctxt->ops->set_cr(ctxt, 0, cr0);
3698 return X86EMUL_CONTINUE;
3699}
3700
3701static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3702{
3703 int rc = ctxt->ops->fix_hypercall(ctxt);
3704
3705 if (rc != X86EMUL_CONTINUE)
3706 return rc;
3707
3708
3709 ctxt->_eip = ctxt->eip;
3710
3711 ctxt->dst.type = OP_NONE;
3712 return X86EMUL_CONTINUE;
3713}
3714
3715static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3716 void (*get)(struct x86_emulate_ctxt *ctxt,
3717 struct desc_ptr *ptr))
3718{
3719 struct desc_ptr desc_ptr;
3720
3721 if (ctxt->mode == X86EMUL_MODE_PROT64)
3722 ctxt->op_bytes = 8;
3723 get(ctxt, &desc_ptr);
3724 if (ctxt->op_bytes == 2) {
3725 ctxt->op_bytes = 4;
3726 desc_ptr.address &= 0x00ffffff;
3727 }
3728
3729 ctxt->dst.type = OP_NONE;
3730 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3731 &desc_ptr, 2 + ctxt->op_bytes);
3732}
3733
3734static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3735{
3736 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3737}
3738
3739static int em_sidt(struct x86_emulate_ctxt *ctxt)
3740{
3741 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3742}
3743
3744static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3745{
3746 struct desc_ptr desc_ptr;
3747 int rc;
3748
3749 if (ctxt->mode == X86EMUL_MODE_PROT64)
3750 ctxt->op_bytes = 8;
3751 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3752 &desc_ptr.size, &desc_ptr.address,
3753 ctxt->op_bytes);
3754 if (rc != X86EMUL_CONTINUE)
3755 return rc;
3756 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3757 is_noncanonical_address(desc_ptr.address))
3758 return emulate_gp(ctxt, 0);
3759 if (lgdt)
3760 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3761 else
3762 ctxt->ops->set_idt(ctxt, &desc_ptr);
3763
3764 ctxt->dst.type = OP_NONE;
3765 return X86EMUL_CONTINUE;
3766}
3767
3768static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3769{
3770 return em_lgdt_lidt(ctxt, true);
3771}
3772
3773static int em_lidt(struct x86_emulate_ctxt *ctxt)
3774{
3775 return em_lgdt_lidt(ctxt, false);
3776}
3777
3778static int em_smsw(struct x86_emulate_ctxt *ctxt)
3779{
3780 if (ctxt->dst.type == OP_MEM)
3781 ctxt->dst.bytes = 2;
3782 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3783 return X86EMUL_CONTINUE;
3784}
3785
3786static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3787{
3788 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3789 | (ctxt->src.val & 0x0f));
3790 ctxt->dst.type = OP_NONE;
3791 return X86EMUL_CONTINUE;
3792}
3793
3794static int em_loop(struct x86_emulate_ctxt *ctxt)
3795{
3796 int rc = X86EMUL_CONTINUE;
3797
3798 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3799 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3800 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3801 rc = jmp_rel(ctxt, ctxt->src.val);
3802
3803 return rc;
3804}
3805
3806static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3807{
3808 int rc = X86EMUL_CONTINUE;
3809
3810 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3811 rc = jmp_rel(ctxt, ctxt->src.val);
3812
3813 return rc;
3814}
3815
3816static int em_in(struct x86_emulate_ctxt *ctxt)
3817{
3818 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3819 &ctxt->dst.val))
3820 return X86EMUL_IO_NEEDED;
3821
3822 return X86EMUL_CONTINUE;
3823}
3824
3825static int em_out(struct x86_emulate_ctxt *ctxt)
3826{
3827 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3828 &ctxt->src.val, 1);
3829
3830 ctxt->dst.type = OP_NONE;
3831 return X86EMUL_CONTINUE;
3832}
3833
3834static int em_cli(struct x86_emulate_ctxt *ctxt)
3835{
3836 if (emulator_bad_iopl(ctxt))
3837 return emulate_gp(ctxt, 0);
3838
3839 ctxt->eflags &= ~X86_EFLAGS_IF;
3840 return X86EMUL_CONTINUE;
3841}
3842
3843static int em_sti(struct x86_emulate_ctxt *ctxt)
3844{
3845 if (emulator_bad_iopl(ctxt))
3846 return emulate_gp(ctxt, 0);
3847
3848 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3849 ctxt->eflags |= X86_EFLAGS_IF;
3850 return X86EMUL_CONTINUE;
3851}
3852
3853static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3854{
3855 u32 eax, ebx, ecx, edx;
3856
3857 eax = reg_read(ctxt, VCPU_REGS_RAX);
3858 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3859 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3860 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3861 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3862 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3863 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3864 return X86EMUL_CONTINUE;
3865}
3866
3867static int em_sahf(struct x86_emulate_ctxt *ctxt)
3868{
3869 u32 flags;
3870
3871 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3872 X86_EFLAGS_SF;
3873 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3874
3875 ctxt->eflags &= ~0xffUL;
3876 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3877 return X86EMUL_CONTINUE;
3878}
3879
3880static int em_lahf(struct x86_emulate_ctxt *ctxt)
3881{
3882 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3883 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3884 return X86EMUL_CONTINUE;
3885}
3886
3887static int em_bswap(struct x86_emulate_ctxt *ctxt)
3888{
3889 switch (ctxt->op_bytes) {
3890#ifdef CONFIG_X86_64
3891 case 8:
3892 asm("bswap %0" : "+r"(ctxt->dst.val));
3893 break;
3894#endif
3895 default:
3896 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3897 break;
3898 }
3899 return X86EMUL_CONTINUE;
3900}
3901
3902static int em_clflush(struct x86_emulate_ctxt *ctxt)
3903{
3904
3905 return X86EMUL_CONTINUE;
3906}
3907
3908static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3909{
3910 ctxt->dst.val = (s32) ctxt->src.val;
3911 return X86EMUL_CONTINUE;
3912}
3913
3914static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3915{
3916 u32 eax = 1, ebx, ecx = 0, edx;
3917
3918 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3919 if (!(edx & FFL(FXSR)))
3920 return emulate_ud(ctxt);
3921
3922 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3923 return emulate_nm(ctxt);
3924
3925
3926
3927
3928
3929 if (ctxt->mode >= X86EMUL_MODE_PROT64)
3930 return X86EMUL_UNHANDLEABLE;
3931
3932 return X86EMUL_CONTINUE;
3933}
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953static int em_fxsave(struct x86_emulate_ctxt *ctxt)
3954{
3955 struct fxregs_state fx_state;
3956 size_t size;
3957 int rc;
3958
3959 rc = check_fxsr(ctxt);
3960 if (rc != X86EMUL_CONTINUE)
3961 return rc;
3962
3963 ctxt->ops->get_fpu(ctxt);
3964
3965 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
3966
3967 ctxt->ops->put_fpu(ctxt);
3968
3969 if (rc != X86EMUL_CONTINUE)
3970 return rc;
3971
3972 if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
3973 size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
3974 else
3975 size = offsetof(struct fxregs_state, xmm_space[0]);
3976
3977 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
3978}
3979
3980static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
3981 struct fxregs_state *new)
3982{
3983 int rc = X86EMUL_CONTINUE;
3984 struct fxregs_state old;
3985
3986 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
3987 if (rc != X86EMUL_CONTINUE)
3988 return rc;
3989
3990
3991
3992
3993
3994
3995#ifdef CONFIG_X86_64
3996
3997 memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
3998#endif
3999
4000
4001
4002
4003
4004 if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
4005 memcpy(new->xmm_space, old.xmm_space, 8 * 16);
4006
4007 return rc;
4008}
4009
4010static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4011{
4012 struct fxregs_state fx_state;
4013 int rc;
4014
4015 rc = check_fxsr(ctxt);
4016 if (rc != X86EMUL_CONTINUE)
4017 return rc;
4018
4019 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
4020 if (rc != X86EMUL_CONTINUE)
4021 return rc;
4022
4023 if (fx_state.mxcsr >> 16)
4024 return emulate_gp(ctxt, 0);
4025
4026 ctxt->ops->get_fpu(ctxt);
4027
4028 if (ctxt->mode < X86EMUL_MODE_PROT64)
4029 rc = fxrstor_fixup(ctxt, &fx_state);
4030
4031 if (rc == X86EMUL_CONTINUE)
4032 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4033
4034 ctxt->ops->put_fpu(ctxt);
4035
4036 return rc;
4037}
4038
4039static bool valid_cr(int nr)
4040{
4041 switch (nr) {
4042 case 0:
4043 case 2 ... 4:
4044 case 8:
4045 return true;
4046 default:
4047 return false;
4048 }
4049}
4050
4051static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4052{
4053 if (!valid_cr(ctxt->modrm_reg))
4054 return emulate_ud(ctxt);
4055
4056 return X86EMUL_CONTINUE;
4057}
4058
4059static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4060{
4061 u64 new_val = ctxt->src.val64;
4062 int cr = ctxt->modrm_reg;
4063 u64 efer = 0;
4064
4065 static u64 cr_reserved_bits[] = {
4066 0xffffffff00000000ULL,
4067 0, 0, 0,
4068 CR4_RESERVED_BITS,
4069 0, 0, 0,
4070 CR8_RESERVED_BITS,
4071 };
4072
4073 if (!valid_cr(cr))
4074 return emulate_ud(ctxt);
4075
4076 if (new_val & cr_reserved_bits[cr])
4077 return emulate_gp(ctxt, 0);
4078
4079 switch (cr) {
4080 case 0: {
4081 u64 cr4;
4082 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4083 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4084 return emulate_gp(ctxt, 0);
4085
4086 cr4 = ctxt->ops->get_cr(ctxt, 4);
4087 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4088
4089 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4090 !(cr4 & X86_CR4_PAE))
4091 return emulate_gp(ctxt, 0);
4092
4093 break;
4094 }
4095 case 3: {
4096 u64 rsvd = 0;
4097
4098 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4099 if (efer & EFER_LMA)
4100 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
4101
4102 if (new_val & rsvd)
4103 return emulate_gp(ctxt, 0);
4104
4105 break;
4106 }
4107 case 4: {
4108 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4109
4110 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4111 return emulate_gp(ctxt, 0);
4112
4113 break;
4114 }
4115 }
4116
4117 return X86EMUL_CONTINUE;
4118}
4119
4120static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4121{
4122 unsigned long dr7;
4123
4124 ctxt->ops->get_dr(ctxt, 7, &dr7);
4125
4126
4127 return dr7 & (1 << 13);
4128}
4129
4130static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4131{
4132 int dr = ctxt->modrm_reg;
4133 u64 cr4;
4134
4135 if (dr > 7)
4136 return emulate_ud(ctxt);
4137
4138 cr4 = ctxt->ops->get_cr(ctxt, 4);
4139 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4140 return emulate_ud(ctxt);
4141
4142 if (check_dr7_gd(ctxt)) {
4143 ulong dr6;
4144
4145 ctxt->ops->get_dr(ctxt, 6, &dr6);
4146 dr6 &= ~15;
4147 dr6 |= DR6_BD | DR6_RTM;
4148 ctxt->ops->set_dr(ctxt, 6, dr6);
4149 return emulate_db(ctxt);
4150 }
4151
4152 return X86EMUL_CONTINUE;
4153}
4154
4155static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4156{
4157 u64 new_val = ctxt->src.val64;
4158 int dr = ctxt->modrm_reg;
4159
4160 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4161 return emulate_gp(ctxt, 0);
4162
4163 return check_dr_read(ctxt);
4164}
4165
4166static int check_svme(struct x86_emulate_ctxt *ctxt)
4167{
4168 u64 efer;
4169
4170 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4171
4172 if (!(efer & EFER_SVME))
4173 return emulate_ud(ctxt);
4174
4175 return X86EMUL_CONTINUE;
4176}
4177
4178static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4179{
4180 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4181
4182
4183 if (rax & 0xffff000000000000ULL)
4184 return emulate_gp(ctxt, 0);
4185
4186 return check_svme(ctxt);
4187}
4188
4189static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4190{
4191 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4192
4193 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4194 return emulate_ud(ctxt);
4195
4196 return X86EMUL_CONTINUE;
4197}
4198
4199static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4200{
4201 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4202 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4203
4204 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4205 ctxt->ops->check_pmc(ctxt, rcx))
4206 return emulate_gp(ctxt, 0);
4207
4208 return X86EMUL_CONTINUE;
4209}
4210
4211static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4212{
4213 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4214 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4215 return emulate_gp(ctxt, 0);
4216
4217 return X86EMUL_CONTINUE;
4218}
4219
4220static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4221{
4222 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4223 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4224 return emulate_gp(ctxt, 0);
4225
4226 return X86EMUL_CONTINUE;
4227}
4228
4229#define D(_y) { .flags = (_y) }
4230#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4231#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4232 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4233#define N D(NotImpl)
4234#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4235#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4236#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4237#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4238#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4239#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4240#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4241#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4242#define II(_f, _e, _i) \
4243 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4244#define IIP(_f, _e, _i, _p) \
4245 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4246 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4247#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4248
4249#define D2bv(_f) D((_f) | ByteOp), D(_f)
4250#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4251#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4252#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4253#define I2bvIP(_f, _e, _i, _p) \
4254 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4255
4256#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4257 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4258 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4259
4260static const struct opcode group7_rm0[] = {
4261 N,
4262 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4263 N, N, N, N, N, N,
4264};
4265
4266static const struct opcode group7_rm1[] = {
4267 DI(SrcNone | Priv, monitor),
4268 DI(SrcNone | Priv, mwait),
4269 N, N, N, N, N, N,
4270};
4271
4272static const struct opcode group7_rm3[] = {
4273 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4274 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4275 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4276 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4277 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4278 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4279 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4280 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4281};
4282
4283static const struct opcode group7_rm7[] = {
4284 N,
4285 DIP(SrcNone, rdtscp, check_rdtsc),
4286 N, N, N, N, N, N,
4287};
4288
4289static const struct opcode group1[] = {
4290 F(Lock, em_add),
4291 F(Lock | PageTable, em_or),
4292 F(Lock, em_adc),
4293 F(Lock, em_sbb),
4294 F(Lock | PageTable, em_and),
4295 F(Lock, em_sub),
4296 F(Lock, em_xor),
4297 F(NoWrite, em_cmp),
4298};
4299
4300static const struct opcode group1A[] = {
4301 I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
4302};
4303
4304static const struct opcode group2[] = {
4305 F(DstMem | ModRM, em_rol),
4306 F(DstMem | ModRM, em_ror),
4307 F(DstMem | ModRM, em_rcl),
4308 F(DstMem | ModRM, em_rcr),
4309 F(DstMem | ModRM, em_shl),
4310 F(DstMem | ModRM, em_shr),
4311 F(DstMem | ModRM, em_shl),
4312 F(DstMem | ModRM, em_sar),
4313};
4314
4315static const struct opcode group3[] = {
4316 F(DstMem | SrcImm | NoWrite, em_test),
4317 F(DstMem | SrcImm | NoWrite, em_test),
4318 F(DstMem | SrcNone | Lock, em_not),
4319 F(DstMem | SrcNone | Lock, em_neg),
4320 F(DstXacc | Src2Mem, em_mul_ex),
4321 F(DstXacc | Src2Mem, em_imul_ex),
4322 F(DstXacc | Src2Mem, em_div_ex),
4323 F(DstXacc | Src2Mem, em_idiv_ex),
4324};
4325
4326static const struct opcode group4[] = {
4327 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4328 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4329 N, N, N, N, N, N,
4330};
4331
4332static const struct opcode group5[] = {
4333 F(DstMem | SrcNone | Lock, em_inc),
4334 F(DstMem | SrcNone | Lock, em_dec),
4335 I(SrcMem | NearBranch, em_call_near_abs),
4336 I(SrcMemFAddr | ImplicitOps, em_call_far),
4337 I(SrcMem | NearBranch, em_jmp_abs),
4338 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4339 I(SrcMem | Stack, em_push), D(Undefined),
4340};
4341
4342static const struct opcode group6[] = {
4343 DI(Prot | DstMem, sldt),
4344 DI(Prot | DstMem, str),
4345 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4346 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4347 N, N, N, N,
4348};
4349
4350static const struct group_dual group7 = { {
4351 II(Mov | DstMem, em_sgdt, sgdt),
4352 II(Mov | DstMem, em_sidt, sidt),
4353 II(SrcMem | Priv, em_lgdt, lgdt),
4354 II(SrcMem | Priv, em_lidt, lidt),
4355 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4356 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4357 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4358}, {
4359 EXT(0, group7_rm0),
4360 EXT(0, group7_rm1),
4361 N, EXT(0, group7_rm3),
4362 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4363 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4364 EXT(0, group7_rm7),
4365} };
4366
4367static const struct opcode group8[] = {
4368 N, N, N, N,
4369 F(DstMem | SrcImmByte | NoWrite, em_bt),
4370 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4371 F(DstMem | SrcImmByte | Lock, em_btr),
4372 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4373};
4374
4375static const struct group_dual group9 = { {
4376 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4377}, {
4378 N, N, N, N, N, N, N, N,
4379} };
4380
4381static const struct opcode group11[] = {
4382 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4383 X7(D(Undefined)),
4384};
4385
4386static const struct gprefix pfx_0f_ae_7 = {
4387 I(SrcMem | ByteOp, em_clflush), N, N, N,
4388};
4389
4390static const struct group_dual group15 = { {
4391 I(ModRM | Aligned16, em_fxsave),
4392 I(ModRM | Aligned16, em_fxrstor),
4393 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4394}, {
4395 N, N, N, N, N, N, N, N,
4396} };
4397
4398static const struct gprefix pfx_0f_6f_0f_7f = {
4399 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4400};
4401
4402static const struct instr_dual instr_dual_0f_2b = {
4403 I(0, em_mov), N
4404};
4405
4406static const struct gprefix pfx_0f_2b = {
4407 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4408};
4409
4410static const struct gprefix pfx_0f_28_0f_29 = {
4411 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4412};
4413
4414static const struct gprefix pfx_0f_e7 = {
4415 N, I(Sse, em_mov), N, N,
4416};
4417
4418static const struct escape escape_d9 = { {
4419 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4420}, {
4421
4422 N, N, N, N, N, N, N, N,
4423
4424 N, N, N, N, N, N, N, N,
4425
4426 N, N, N, N, N, N, N, N,
4427
4428 N, N, N, N, N, N, N, N,
4429
4430 N, N, N, N, N, N, N, N,
4431
4432 N, N, N, N, N, N, N, N,
4433
4434 N, N, N, N, N, N, N, N,
4435
4436 N, N, N, N, N, N, N, N,
4437} };
4438
4439static const struct escape escape_db = { {
4440 N, N, N, N, N, N, N, N,
4441}, {
4442
4443 N, N, N, N, N, N, N, N,
4444
4445 N, N, N, N, N, N, N, N,
4446
4447 N, N, N, N, N, N, N, N,
4448
4449 N, N, N, N, N, N, N, N,
4450
4451 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4452
4453 N, N, N, N, N, N, N, N,
4454
4455 N, N, N, N, N, N, N, N,
4456
4457 N, N, N, N, N, N, N, N,
4458} };
4459
4460static const struct escape escape_dd = { {
4461 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4462}, {
4463
4464 N, N, N, N, N, N, N, N,
4465
4466 N, N, N, N, N, N, N, N,
4467
4468 N, N, N, N, N, N, N, N,
4469
4470 N, N, N, N, N, N, N, N,
4471
4472 N, N, N, N, N, N, N, N,
4473
4474 N, N, N, N, N, N, N, N,
4475
4476 N, N, N, N, N, N, N, N,
4477
4478 N, N, N, N, N, N, N, N,
4479} };
4480
4481static const struct instr_dual instr_dual_0f_c3 = {
4482 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4483};
4484
4485static const struct mode_dual mode_dual_63 = {
4486 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4487};
4488
4489static const struct opcode opcode_table[256] = {
4490
4491 F6ALU(Lock, em_add),
4492 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4493 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4494
4495 F6ALU(Lock | PageTable, em_or),
4496 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4497 N,
4498
4499 F6ALU(Lock, em_adc),
4500 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4501 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4502
4503 F6ALU(Lock, em_sbb),
4504 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4505 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4506
4507 F6ALU(Lock | PageTable, em_and), N, N,
4508
4509 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4510
4511 F6ALU(Lock, em_xor), N, N,
4512
4513 F6ALU(NoWrite, em_cmp), N, N,
4514
4515 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4516
4517 X8(I(SrcReg | Stack, em_push)),
4518
4519 X8(I(DstReg | Stack, em_pop)),
4520
4521 I(ImplicitOps | Stack | No64, em_pusha),
4522 I(ImplicitOps | Stack | No64, em_popa),
4523 N, MD(ModRM, &mode_dual_63),
4524 N, N, N, N,
4525
4526 I(SrcImm | Mov | Stack, em_push),
4527 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4528 I(SrcImmByte | Mov | Stack, em_push),
4529 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4530 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in),
4531 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out),
4532
4533 X16(D(SrcImmByte | NearBranch)),
4534
4535 G(ByteOp | DstMem | SrcImm, group1),
4536 G(DstMem | SrcImm, group1),
4537 G(ByteOp | DstMem | SrcImm | No64, group1),
4538 G(DstMem | SrcImmByte, group1),
4539 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4540 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4541
4542 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4543 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4544 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4545 D(ModRM | SrcMem | NoAccess | DstReg),
4546 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4547 G(0, group1A),
4548
4549 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4550
4551 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4552 I(SrcImmFAddr | No64, em_call_far), N,
4553 II(ImplicitOps | Stack, em_pushf, pushf),
4554 II(ImplicitOps | Stack, em_popf, popf),
4555 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4556
4557 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4558 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4559 I2bv(SrcSI | DstDI | Mov | String, em_mov),
4560 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4561
4562 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4563 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4564 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4565 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4566
4567 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4568
4569 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4570
4571 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4572 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4573 I(ImplicitOps | NearBranch, em_ret),
4574 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4575 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4576 G(ByteOp, group11), G(0, group11),
4577
4578 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4579 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4580 I(ImplicitOps, em_ret_far),
4581 D(ImplicitOps), DI(SrcImmByte, intn),
4582 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4583
4584 G(Src2One | ByteOp, group2), G(Src2One, group2),
4585 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4586 I(DstAcc | SrcImmUByte | No64, em_aam),
4587 I(DstAcc | SrcImmUByte | No64, em_aad),
4588 F(DstAcc | ByteOp | No64, em_salc),
4589 I(DstAcc | SrcXLat | ByteOp, em_mov),
4590
4591 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4592
4593 X3(I(SrcImmByte | NearBranch, em_loop)),
4594 I(SrcImmByte | NearBranch, em_jcxz),
4595 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4596 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4597
4598 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4599 I(SrcImmFAddr | No64, em_jmp_far),
4600 D(SrcImmByte | ImplicitOps | NearBranch),
4601 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4602 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4603
4604 N, DI(ImplicitOps, icebp), N, N,
4605 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4606 G(ByteOp, group3), G(0, group3),
4607
4608 D(ImplicitOps), D(ImplicitOps),
4609 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4610 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4611};
4612
4613static const struct opcode twobyte_table[256] = {
4614
4615 G(0, group6), GD(0, &group7), N, N,
4616 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4617 II(ImplicitOps | Priv, em_clts, clts), N,
4618 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4619 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4620
4621 N, N, N, N, N, N, N, N,
4622 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4623 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4624
4625 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4626 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4627 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4628 check_cr_write),
4629 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4630 check_dr_write),
4631 N, N, N, N,
4632 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4633 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4634 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4635 N, N, N, N,
4636
4637 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4638 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4639 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4640 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4641 I(ImplicitOps | EmulateOnUD, em_sysenter),
4642 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4643 N, N,
4644 N, N, N, N, N, N, N, N,
4645
4646 X16(D(DstReg | SrcMem | ModRM)),
4647
4648 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4649
4650 N, N, N, N,
4651 N, N, N, N,
4652 N, N, N, N,
4653 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4654
4655 N, N, N, N,
4656 N, N, N, N,
4657 N, N, N, N,
4658 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4659
4660 X16(D(SrcImm | NearBranch)),
4661
4662 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4663
4664 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4665 II(ImplicitOps, em_cpuid, cpuid),
4666 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4667 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4668 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4669
4670 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4671 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4672 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4673 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4674 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4675 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4676
4677 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4678 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4679 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4680 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4681 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4682 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4683
4684 N, N,
4685 G(BitOp, group8),
4686 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4687 I(DstReg | SrcMem | ModRM, em_bsf_c),
4688 I(DstReg | SrcMem | ModRM, em_bsr_c),
4689 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4690
4691 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4692 N, ID(0, &instr_dual_0f_c3),
4693 N, N, N, GD(0, &group9),
4694
4695 X8(I(DstReg, em_bswap)),
4696
4697 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4698
4699 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4700 N, N, N, N, N, N, N, N,
4701
4702 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4703};
4704
4705static const struct instr_dual instr_dual_0f_38_f0 = {
4706 I(DstReg | SrcMem | Mov, em_movbe), N
4707};
4708
4709static const struct instr_dual instr_dual_0f_38_f1 = {
4710 I(DstMem | SrcReg | Mov, em_movbe), N
4711};
4712
4713static const struct gprefix three_byte_0f_38_f0 = {
4714 ID(0, &instr_dual_0f_38_f0), N, N, N
4715};
4716
4717static const struct gprefix three_byte_0f_38_f1 = {
4718 ID(0, &instr_dual_0f_38_f1), N, N, N
4719};
4720
4721
4722
4723
4724
4725static const struct opcode opcode_map_0f_38[256] = {
4726
4727 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4728
4729 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4730
4731 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4732 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4733
4734 N, N, X4(N), X8(N)
4735};
4736
4737#undef D
4738#undef N
4739#undef G
4740#undef GD
4741#undef I
4742#undef GP
4743#undef EXT
4744#undef MD
4745#undef ID
4746
4747#undef D2bv
4748#undef D2bvIP
4749#undef I2bv
4750#undef I2bvIP
4751#undef I6ALU
4752
4753static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4754{
4755 unsigned size;
4756
4757 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4758 if (size == 8)
4759 size = 4;
4760 return size;
4761}
4762
4763static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4764 unsigned size, bool sign_extension)
4765{
4766 int rc = X86EMUL_CONTINUE;
4767
4768 op->type = OP_IMM;
4769 op->bytes = size;
4770 op->addr.mem.ea = ctxt->_eip;
4771
4772 switch (op->bytes) {
4773 case 1:
4774 op->val = insn_fetch(s8, ctxt);
4775 break;
4776 case 2:
4777 op->val = insn_fetch(s16, ctxt);
4778 break;
4779 case 4:
4780 op->val = insn_fetch(s32, ctxt);
4781 break;
4782 case 8:
4783 op->val = insn_fetch(s64, ctxt);
4784 break;
4785 }
4786 if (!sign_extension) {
4787 switch (op->bytes) {
4788 case 1:
4789 op->val &= 0xff;
4790 break;
4791 case 2:
4792 op->val &= 0xffff;
4793 break;
4794 case 4:
4795 op->val &= 0xffffffff;
4796 break;
4797 }
4798 }
4799done:
4800 return rc;
4801}
4802
4803static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4804 unsigned d)
4805{
4806 int rc = X86EMUL_CONTINUE;
4807
4808 switch (d) {
4809 case OpReg:
4810 decode_register_operand(ctxt, op);
4811 break;
4812 case OpImmUByte:
4813 rc = decode_imm(ctxt, op, 1, false);
4814 break;
4815 case OpMem:
4816 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4817 mem_common:
4818 *op = ctxt->memop;
4819 ctxt->memopp = op;
4820 if (ctxt->d & BitOp)
4821 fetch_bit_operand(ctxt);
4822 op->orig_val = op->val;
4823 break;
4824 case OpMem64:
4825 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4826 goto mem_common;
4827 case OpAcc:
4828 op->type = OP_REG;
4829 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4830 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4831 fetch_register_operand(op);
4832 op->orig_val = op->val;
4833 break;
4834 case OpAccLo:
4835 op->type = OP_REG;
4836 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4837 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4838 fetch_register_operand(op);
4839 op->orig_val = op->val;
4840 break;
4841 case OpAccHi:
4842 if (ctxt->d & ByteOp) {
4843 op->type = OP_NONE;
4844 break;
4845 }
4846 op->type = OP_REG;
4847 op->bytes = ctxt->op_bytes;
4848 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4849 fetch_register_operand(op);
4850 op->orig_val = op->val;
4851 break;
4852 case OpDI:
4853 op->type = OP_MEM;
4854 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4855 op->addr.mem.ea =
4856 register_address(ctxt, VCPU_REGS_RDI);
4857 op->addr.mem.seg = VCPU_SREG_ES;
4858 op->val = 0;
4859 op->count = 1;
4860 break;
4861 case OpDX:
4862 op->type = OP_REG;
4863 op->bytes = 2;
4864 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4865 fetch_register_operand(op);
4866 break;
4867 case OpCL:
4868 op->type = OP_IMM;
4869 op->bytes = 1;
4870 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4871 break;
4872 case OpImmByte:
4873 rc = decode_imm(ctxt, op, 1, true);
4874 break;
4875 case OpOne:
4876 op->type = OP_IMM;
4877 op->bytes = 1;
4878 op->val = 1;
4879 break;
4880 case OpImm:
4881 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4882 break;
4883 case OpImm64:
4884 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4885 break;
4886 case OpMem8:
4887 ctxt->memop.bytes = 1;
4888 if (ctxt->memop.type == OP_REG) {
4889 ctxt->memop.addr.reg = decode_register(ctxt,
4890 ctxt->modrm_rm, true);
4891 fetch_register_operand(&ctxt->memop);
4892 }
4893 goto mem_common;
4894 case OpMem16:
4895 ctxt->memop.bytes = 2;
4896 goto mem_common;
4897 case OpMem32:
4898 ctxt->memop.bytes = 4;
4899 goto mem_common;
4900 case OpImmU16:
4901 rc = decode_imm(ctxt, op, 2, false);
4902 break;
4903 case OpImmU:
4904 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4905 break;
4906 case OpSI:
4907 op->type = OP_MEM;
4908 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4909 op->addr.mem.ea =
4910 register_address(ctxt, VCPU_REGS_RSI);
4911 op->addr.mem.seg = ctxt->seg_override;
4912 op->val = 0;
4913 op->count = 1;
4914 break;
4915 case OpXLat:
4916 op->type = OP_MEM;
4917 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4918 op->addr.mem.ea =
4919 address_mask(ctxt,
4920 reg_read(ctxt, VCPU_REGS_RBX) +
4921 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4922 op->addr.mem.seg = ctxt->seg_override;
4923 op->val = 0;
4924 break;
4925 case OpImmFAddr:
4926 op->type = OP_IMM;
4927 op->addr.mem.ea = ctxt->_eip;
4928 op->bytes = ctxt->op_bytes + 2;
4929 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4930 break;
4931 case OpMemFAddr:
4932 ctxt->memop.bytes = ctxt->op_bytes + 2;
4933 goto mem_common;
4934 case OpES:
4935 op->type = OP_IMM;
4936 op->val = VCPU_SREG_ES;
4937 break;
4938 case OpCS:
4939 op->type = OP_IMM;
4940 op->val = VCPU_SREG_CS;
4941 break;
4942 case OpSS:
4943 op->type = OP_IMM;
4944 op->val = VCPU_SREG_SS;
4945 break;
4946 case OpDS:
4947 op->type = OP_IMM;
4948 op->val = VCPU_SREG_DS;
4949 break;
4950 case OpFS:
4951 op->type = OP_IMM;
4952 op->val = VCPU_SREG_FS;
4953 break;
4954 case OpGS:
4955 op->type = OP_IMM;
4956 op->val = VCPU_SREG_GS;
4957 break;
4958 case OpImplicit:
4959
4960 default:
4961 op->type = OP_NONE;
4962 break;
4963 }
4964
4965done:
4966 return rc;
4967}
4968
4969int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4970{
4971 int rc = X86EMUL_CONTINUE;
4972 int mode = ctxt->mode;
4973 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4974 bool op_prefix = false;
4975 bool has_seg_override = false;
4976 struct opcode opcode;
4977
4978 ctxt->memop.type = OP_NONE;
4979 ctxt->memopp = NULL;
4980 ctxt->_eip = ctxt->eip;
4981 ctxt->fetch.ptr = ctxt->fetch.data;
4982 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4983 ctxt->opcode_len = 1;
4984 if (insn_len > 0)
4985 memcpy(ctxt->fetch.data, insn, insn_len);
4986 else {
4987 rc = __do_insn_fetch_bytes(ctxt, 1);
4988 if (rc != X86EMUL_CONTINUE)
4989 return rc;
4990 }
4991
4992 switch (mode) {
4993 case X86EMUL_MODE_REAL:
4994 case X86EMUL_MODE_VM86:
4995 case X86EMUL_MODE_PROT16:
4996 def_op_bytes = def_ad_bytes = 2;
4997 break;
4998 case X86EMUL_MODE_PROT32:
4999 def_op_bytes = def_ad_bytes = 4;
5000 break;
5001#ifdef CONFIG_X86_64
5002 case X86EMUL_MODE_PROT64:
5003 def_op_bytes = 4;
5004 def_ad_bytes = 8;
5005 break;
5006#endif
5007 default:
5008 return EMULATION_FAILED;
5009 }
5010
5011 ctxt->op_bytes = def_op_bytes;
5012 ctxt->ad_bytes = def_ad_bytes;
5013
5014
5015 for (;;) {
5016 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5017 case 0x66:
5018 op_prefix = true;
5019
5020 ctxt->op_bytes = def_op_bytes ^ 6;
5021 break;
5022 case 0x67:
5023 if (mode == X86EMUL_MODE_PROT64)
5024
5025 ctxt->ad_bytes = def_ad_bytes ^ 12;
5026 else
5027
5028 ctxt->ad_bytes = def_ad_bytes ^ 6;
5029 break;
5030 case 0x26:
5031 case 0x2e:
5032 case 0x36:
5033 case 0x3e:
5034 has_seg_override = true;
5035 ctxt->seg_override = (ctxt->b >> 3) & 3;
5036 break;
5037 case 0x64:
5038 case 0x65:
5039 has_seg_override = true;
5040 ctxt->seg_override = ctxt->b & 7;
5041 break;
5042 case 0x40 ... 0x4f:
5043 if (mode != X86EMUL_MODE_PROT64)
5044 goto done_prefixes;
5045 ctxt->rex_prefix = ctxt->b;
5046 continue;
5047 case 0xf0:
5048 ctxt->lock_prefix = 1;
5049 break;
5050 case 0xf2:
5051 case 0xf3:
5052 ctxt->rep_prefix = ctxt->b;
5053 break;
5054 default:
5055 goto done_prefixes;
5056 }
5057
5058
5059
5060 ctxt->rex_prefix = 0;
5061 }
5062
5063done_prefixes:
5064
5065
5066 if (ctxt->rex_prefix & 8)
5067 ctxt->op_bytes = 8;
5068
5069
5070 opcode = opcode_table[ctxt->b];
5071
5072 if (ctxt->b == 0x0f) {
5073 ctxt->opcode_len = 2;
5074 ctxt->b = insn_fetch(u8, ctxt);
5075 opcode = twobyte_table[ctxt->b];
5076
5077
5078 if (ctxt->b == 0x38) {
5079 ctxt->opcode_len = 3;
5080 ctxt->b = insn_fetch(u8, ctxt);
5081 opcode = opcode_map_0f_38[ctxt->b];
5082 }
5083 }
5084 ctxt->d = opcode.flags;
5085
5086 if (ctxt->d & ModRM)
5087 ctxt->modrm = insn_fetch(u8, ctxt);
5088
5089
5090 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5091 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5092 ctxt->d = NotImpl;
5093 }
5094
5095 while (ctxt->d & GroupMask) {
5096 switch (ctxt->d & GroupMask) {
5097 case Group:
5098 goffset = (ctxt->modrm >> 3) & 7;
5099 opcode = opcode.u.group[goffset];
5100 break;
5101 case GroupDual:
5102 goffset = (ctxt->modrm >> 3) & 7;
5103 if ((ctxt->modrm >> 6) == 3)
5104 opcode = opcode.u.gdual->mod3[goffset];
5105 else
5106 opcode = opcode.u.gdual->mod012[goffset];
5107 break;
5108 case RMExt:
5109 goffset = ctxt->modrm & 7;
5110 opcode = opcode.u.group[goffset];
5111 break;
5112 case Prefix:
5113 if (ctxt->rep_prefix && op_prefix)
5114 return EMULATION_FAILED;
5115 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5116 switch (simd_prefix) {
5117 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5118 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5119 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5120 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5121 }
5122 break;
5123 case Escape:
5124 if (ctxt->modrm > 0xbf)
5125 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5126 else
5127 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5128 break;
5129 case InstrDual:
5130 if ((ctxt->modrm >> 6) == 3)
5131 opcode = opcode.u.idual->mod3;
5132 else
5133 opcode = opcode.u.idual->mod012;
5134 break;
5135 case ModeDual:
5136 if (ctxt->mode == X86EMUL_MODE_PROT64)
5137 opcode = opcode.u.mdual->mode64;
5138 else
5139 opcode = opcode.u.mdual->mode32;
5140 break;
5141 default:
5142 return EMULATION_FAILED;
5143 }
5144
5145 ctxt->d &= ~(u64)GroupMask;
5146 ctxt->d |= opcode.flags;
5147 }
5148
5149
5150 if (ctxt->d == 0)
5151 return EMULATION_FAILED;
5152
5153 ctxt->execute = opcode.u.execute;
5154
5155 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5156 return EMULATION_FAILED;
5157
5158 if (unlikely(ctxt->d &
5159 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5160 No16))) {
5161
5162
5163
5164
5165 ctxt->check_perm = opcode.check_perm;
5166 ctxt->intercept = opcode.intercept;
5167
5168 if (ctxt->d & NotImpl)
5169 return EMULATION_FAILED;
5170
5171 if (mode == X86EMUL_MODE_PROT64) {
5172 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5173 ctxt->op_bytes = 8;
5174 else if (ctxt->d & NearBranch)
5175 ctxt->op_bytes = 8;
5176 }
5177
5178 if (ctxt->d & Op3264) {
5179 if (mode == X86EMUL_MODE_PROT64)
5180 ctxt->op_bytes = 8;
5181 else
5182 ctxt->op_bytes = 4;
5183 }
5184
5185 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5186 ctxt->op_bytes = 4;
5187
5188 if (ctxt->d & Sse)
5189 ctxt->op_bytes = 16;
5190 else if (ctxt->d & Mmx)
5191 ctxt->op_bytes = 8;
5192 }
5193
5194
5195 if (ctxt->d & ModRM) {
5196 rc = decode_modrm(ctxt, &ctxt->memop);
5197 if (!has_seg_override) {
5198 has_seg_override = true;
5199 ctxt->seg_override = ctxt->modrm_seg;
5200 }
5201 } else if (ctxt->d & MemAbs)
5202 rc = decode_abs(ctxt, &ctxt->memop);
5203 if (rc != X86EMUL_CONTINUE)
5204 goto done;
5205
5206 if (!has_seg_override)
5207 ctxt->seg_override = VCPU_SREG_DS;
5208
5209 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5210
5211
5212
5213
5214
5215 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5216 if (rc != X86EMUL_CONTINUE)
5217 goto done;
5218
5219
5220
5221
5222
5223 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5224 if (rc != X86EMUL_CONTINUE)
5225 goto done;
5226
5227
5228 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5229
5230 if (ctxt->rip_relative && likely(ctxt->memopp))
5231 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5232 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5233
5234done:
5235 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5236}
5237
5238bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5239{
5240 return ctxt->d & PageTable;
5241}
5242
5243static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5244{
5245
5246
5247
5248
5249
5250
5251
5252 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5253 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5254 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5255 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5256 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5257 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5258 return true;
5259
5260 return false;
5261}
5262
5263static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5264{
5265 int rc;
5266
5267 ctxt->ops->get_fpu(ctxt);
5268 rc = asm_safe("fwait");
5269 ctxt->ops->put_fpu(ctxt);
5270
5271 if (unlikely(rc != X86EMUL_CONTINUE))
5272 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5273
5274 return X86EMUL_CONTINUE;
5275}
5276
5277static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5278 struct operand *op)
5279{
5280 if (op->type == OP_MM)
5281 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5282}
5283
5284static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5285{
5286 register void *__sp asm(_ASM_SP);
5287 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5288
5289 if (!(ctxt->d & ByteOp))
5290 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5291
5292 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
5293 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5294 [fastop]"+S"(fop), "+r"(__sp)
5295 : "c"(ctxt->src2.val));
5296
5297 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5298 if (!fop)
5299 return emulate_de(ctxt);
5300 return X86EMUL_CONTINUE;
5301}
5302
5303void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5304{
5305 memset(&ctxt->rip_relative, 0,
5306 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5307
5308 ctxt->io_read.pos = 0;
5309 ctxt->io_read.end = 0;
5310 ctxt->mem_read.end = 0;
5311}
5312
5313int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5314{
5315 const struct x86_emulate_ops *ops = ctxt->ops;
5316 int rc = X86EMUL_CONTINUE;
5317 int saved_dst_type = ctxt->dst.type;
5318
5319 ctxt->mem_read.pos = 0;
5320
5321
5322 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5323 rc = emulate_ud(ctxt);
5324 goto done;
5325 }
5326
5327 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5328 rc = emulate_ud(ctxt);
5329 goto done;
5330 }
5331
5332 if (unlikely(ctxt->d &
5333 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5334 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5335 (ctxt->d & Undefined)) {
5336 rc = emulate_ud(ctxt);
5337 goto done;
5338 }
5339
5340 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5341 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5342 rc = emulate_ud(ctxt);
5343 goto done;
5344 }
5345
5346 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5347 rc = emulate_nm(ctxt);
5348 goto done;
5349 }
5350
5351 if (ctxt->d & Mmx) {
5352 rc = flush_pending_x87_faults(ctxt);
5353 if (rc != X86EMUL_CONTINUE)
5354 goto done;
5355
5356
5357
5358
5359 fetch_possible_mmx_operand(ctxt, &ctxt->src);
5360 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5361 if (!(ctxt->d & Mov))
5362 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5363 }
5364
5365 if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5366 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5367 X86_ICPT_PRE_EXCEPT);
5368 if (rc != X86EMUL_CONTINUE)
5369 goto done;
5370 }
5371
5372
5373 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5374 rc = emulate_ud(ctxt);
5375 goto done;
5376 }
5377
5378
5379 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5380 if (ctxt->d & PrivUD)
5381 rc = emulate_ud(ctxt);
5382 else
5383 rc = emulate_gp(ctxt, 0);
5384 goto done;
5385 }
5386
5387
5388 if (ctxt->d & CheckPerm) {
5389 rc = ctxt->check_perm(ctxt);
5390 if (rc != X86EMUL_CONTINUE)
5391 goto done;
5392 }
5393
5394 if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5395 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5396 X86_ICPT_POST_EXCEPT);
5397 if (rc != X86EMUL_CONTINUE)
5398 goto done;
5399 }
5400
5401 if (ctxt->rep_prefix && (ctxt->d & String)) {
5402
5403 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5404 string_registers_quirk(ctxt);
5405 ctxt->eip = ctxt->_eip;
5406 ctxt->eflags &= ~X86_EFLAGS_RF;
5407 goto done;
5408 }
5409 }
5410 }
5411
5412 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5413 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5414 ctxt->src.valptr, ctxt->src.bytes);
5415 if (rc != X86EMUL_CONTINUE)
5416 goto done;
5417 ctxt->src.orig_val64 = ctxt->src.val64;
5418 }
5419
5420 if (ctxt->src2.type == OP_MEM) {
5421 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5422 &ctxt->src2.val, ctxt->src2.bytes);
5423 if (rc != X86EMUL_CONTINUE)
5424 goto done;
5425 }
5426
5427 if ((ctxt->d & DstMask) == ImplicitOps)
5428 goto special_insn;
5429
5430
5431 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5432
5433 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5434 &ctxt->dst.val, ctxt->dst.bytes);
5435 if (rc != X86EMUL_CONTINUE) {
5436 if (!(ctxt->d & NoWrite) &&
5437 rc == X86EMUL_PROPAGATE_FAULT &&
5438 ctxt->exception.vector == PF_VECTOR)
5439 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5440 goto done;
5441 }
5442 }
5443
5444 ctxt->dst.orig_val64 = ctxt->dst.val64;
5445
5446special_insn:
5447
5448 if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5449 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5450 X86_ICPT_POST_MEMACCESS);
5451 if (rc != X86EMUL_CONTINUE)
5452 goto done;
5453 }
5454
5455 if (ctxt->rep_prefix && (ctxt->d & String))
5456 ctxt->eflags |= X86_EFLAGS_RF;
5457 else
5458 ctxt->eflags &= ~X86_EFLAGS_RF;
5459
5460 if (ctxt->execute) {
5461 if (ctxt->d & Fastop) {
5462 void (*fop)(struct fastop *) = (void *)ctxt->execute;
5463 rc = fastop(ctxt, fop);
5464 if (rc != X86EMUL_CONTINUE)
5465 goto done;
5466 goto writeback;
5467 }
5468 rc = ctxt->execute(ctxt);
5469 if (rc != X86EMUL_CONTINUE)
5470 goto done;
5471 goto writeback;
5472 }
5473
5474 if (ctxt->opcode_len == 2)
5475 goto twobyte_insn;
5476 else if (ctxt->opcode_len == 3)
5477 goto threebyte_insn;
5478
5479 switch (ctxt->b) {
5480 case 0x70 ... 0x7f:
5481 if (test_cc(ctxt->b, ctxt->eflags))
5482 rc = jmp_rel(ctxt, ctxt->src.val);
5483 break;
5484 case 0x8d:
5485 ctxt->dst.val = ctxt->src.addr.mem.ea;
5486 break;
5487 case 0x90 ... 0x97:
5488 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5489 ctxt->dst.type = OP_NONE;
5490 else
5491 rc = em_xchg(ctxt);
5492 break;
5493 case 0x98:
5494 switch (ctxt->op_bytes) {
5495 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5496 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5497 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5498 }
5499 break;
5500 case 0xcc:
5501 rc = emulate_int(ctxt, 3);
5502 break;
5503 case 0xcd:
5504 rc = emulate_int(ctxt, ctxt->src.val);
5505 break;
5506 case 0xce:
5507 if (ctxt->eflags & X86_EFLAGS_OF)
5508 rc = emulate_int(ctxt, 4);
5509 break;
5510 case 0xe9:
5511 case 0xeb:
5512 rc = jmp_rel(ctxt, ctxt->src.val);
5513 ctxt->dst.type = OP_NONE;
5514 break;
5515 case 0xf4:
5516 ctxt->ops->halt(ctxt);
5517 break;
5518 case 0xf5:
5519
5520 ctxt->eflags ^= X86_EFLAGS_CF;
5521 break;
5522 case 0xf8:
5523 ctxt->eflags &= ~X86_EFLAGS_CF;
5524 break;
5525 case 0xf9:
5526 ctxt->eflags |= X86_EFLAGS_CF;
5527 break;
5528 case 0xfc:
5529 ctxt->eflags &= ~X86_EFLAGS_DF;
5530 break;
5531 case 0xfd:
5532 ctxt->eflags |= X86_EFLAGS_DF;
5533 break;
5534 default:
5535 goto cannot_emulate;
5536 }
5537
5538 if (rc != X86EMUL_CONTINUE)
5539 goto done;
5540
5541writeback:
5542 if (ctxt->d & SrcWrite) {
5543 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5544 rc = writeback(ctxt, &ctxt->src);
5545 if (rc != X86EMUL_CONTINUE)
5546 goto done;
5547 }
5548 if (!(ctxt->d & NoWrite)) {
5549 rc = writeback(ctxt, &ctxt->dst);
5550 if (rc != X86EMUL_CONTINUE)
5551 goto done;
5552 }
5553
5554
5555
5556
5557
5558 ctxt->dst.type = saved_dst_type;
5559
5560 if ((ctxt->d & SrcMask) == SrcSI)
5561 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5562
5563 if ((ctxt->d & DstMask) == DstDI)
5564 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5565
5566 if (ctxt->rep_prefix && (ctxt->d & String)) {
5567 unsigned int count;
5568 struct read_cache *r = &ctxt->io_read;
5569 if ((ctxt->d & SrcMask) == SrcSI)
5570 count = ctxt->src.count;
5571 else
5572 count = ctxt->dst.count;
5573 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5574
5575 if (!string_insn_completed(ctxt)) {
5576
5577
5578
5579
5580 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5581 (r->end == 0 || r->end != r->pos)) {
5582
5583
5584
5585
5586
5587 ctxt->mem_read.end = 0;
5588 writeback_registers(ctxt);
5589 return EMULATION_RESTART;
5590 }
5591 goto done;
5592 }
5593 ctxt->eflags &= ~X86_EFLAGS_RF;
5594 }
5595
5596 ctxt->eip = ctxt->_eip;
5597
5598done:
5599 if (rc == X86EMUL_PROPAGATE_FAULT) {
5600 WARN_ON(ctxt->exception.vector > 0x1f);
5601 ctxt->have_exception = true;
5602 }
5603 if (rc == X86EMUL_INTERCEPTED)
5604 return EMULATION_INTERCEPTED;
5605
5606 if (rc == X86EMUL_CONTINUE)
5607 writeback_registers(ctxt);
5608
5609 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5610
5611twobyte_insn:
5612 switch (ctxt->b) {
5613 case 0x09:
5614 (ctxt->ops->wbinvd)(ctxt);
5615 break;
5616 case 0x08:
5617 case 0x0d:
5618 case 0x18:
5619 case 0x1f:
5620 break;
5621 case 0x20:
5622 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5623 break;
5624 case 0x21:
5625 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5626 break;
5627 case 0x40 ... 0x4f:
5628 if (test_cc(ctxt->b, ctxt->eflags))
5629 ctxt->dst.val = ctxt->src.val;
5630 else if (ctxt->op_bytes != 4)
5631 ctxt->dst.type = OP_NONE;
5632 break;
5633 case 0x80 ... 0x8f:
5634 if (test_cc(ctxt->b, ctxt->eflags))
5635 rc = jmp_rel(ctxt, ctxt->src.val);
5636 break;
5637 case 0x90 ... 0x9f:
5638 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5639 break;
5640 case 0xb6 ... 0xb7:
5641 ctxt->dst.bytes = ctxt->op_bytes;
5642 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5643 : (u16) ctxt->src.val;
5644 break;
5645 case 0xbe ... 0xbf:
5646 ctxt->dst.bytes = ctxt->op_bytes;
5647 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5648 (s16) ctxt->src.val;
5649 break;
5650 default:
5651 goto cannot_emulate;
5652 }
5653
5654threebyte_insn:
5655
5656 if (rc != X86EMUL_CONTINUE)
5657 goto done;
5658
5659 goto writeback;
5660
5661cannot_emulate:
5662 return EMULATION_FAILED;
5663}
5664
5665void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5666{
5667 invalidate_registers(ctxt);
5668}
5669
5670void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5671{
5672 writeback_registers(ctxt);
5673}
5674