1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/kvm_host.h>
22#include "kvm_cache_regs.h"
23#include "kvm_emulate.h"
24#include <linux/stringify.h>
25#include <asm/fpu/api.h>
26#include <asm/debugreg.h>
27#include <asm/nospec-branch.h>
28
29#include "x86.h"
30#include "tss.h"
31#include "mmu.h"
32#include "pmu.h"
33
34
35
36
37#define OpNone 0ull
38#define OpImplicit 1ull
39#define OpReg 2ull
40#define OpMem 3ull
41#define OpAcc 4ull
42#define OpDI 5ull
43#define OpMem64 6ull
44#define OpImmUByte 7ull
45#define OpDX 8ull
46#define OpCL 9ull
47#define OpImmByte 10ull
48#define OpOne 11ull
49#define OpImm 12ull
50#define OpMem16 13ull
51#define OpMem32 14ull
52#define OpImmU 15ull
53#define OpSI 16ull
54#define OpImmFAddr 17ull
55#define OpMemFAddr 18ull
56#define OpImmU16 19ull
57#define OpES 20ull
58#define OpCS 21ull
59#define OpSS 22ull
60#define OpDS 23ull
61#define OpFS 24ull
62#define OpGS 25ull
63#define OpMem8 26ull
64#define OpImm64 27ull
65#define OpXLat 28ull
66#define OpAccLo 29ull
67#define OpAccHi 30ull
68
69#define OpBits 5
70#define OpMask ((1ull << OpBits) - 1)
71
72
73
74
75
76
77
78
79
80
81
82#define ByteOp (1<<0)
83
84#define DstShift 1
85#define ImplicitOps (OpImplicit << DstShift)
86#define DstReg (OpReg << DstShift)
87#define DstMem (OpMem << DstShift)
88#define DstAcc (OpAcc << DstShift)
89#define DstDI (OpDI << DstShift)
90#define DstMem64 (OpMem64 << DstShift)
91#define DstMem16 (OpMem16 << DstShift)
92#define DstImmUByte (OpImmUByte << DstShift)
93#define DstDX (OpDX << DstShift)
94#define DstAccLo (OpAccLo << DstShift)
95#define DstMask (OpMask << DstShift)
96
97#define SrcShift 6
98#define SrcNone (OpNone << SrcShift)
99#define SrcReg (OpReg << SrcShift)
100#define SrcMem (OpMem << SrcShift)
101#define SrcMem16 (OpMem16 << SrcShift)
102#define SrcMem32 (OpMem32 << SrcShift)
103#define SrcImm (OpImm << SrcShift)
104#define SrcImmByte (OpImmByte << SrcShift)
105#define SrcOne (OpOne << SrcShift)
106#define SrcImmUByte (OpImmUByte << SrcShift)
107#define SrcImmU (OpImmU << SrcShift)
108#define SrcSI (OpSI << SrcShift)
109#define SrcXLat (OpXLat << SrcShift)
110#define SrcImmFAddr (OpImmFAddr << SrcShift)
111#define SrcMemFAddr (OpMemFAddr << SrcShift)
112#define SrcAcc (OpAcc << SrcShift)
113#define SrcImmU16 (OpImmU16 << SrcShift)
114#define SrcImm64 (OpImm64 << SrcShift)
115#define SrcDX (OpDX << SrcShift)
116#define SrcMem8 (OpMem8 << SrcShift)
117#define SrcAccHi (OpAccHi << SrcShift)
118#define SrcMask (OpMask << SrcShift)
119#define BitOp (1<<11)
120#define MemAbs (1<<12)
121#define String (1<<13)
122#define Stack (1<<14)
123#define GroupMask (7<<15)
124#define Group (1<<15)
125#define GroupDual (2<<15)
126#define Prefix (3<<15)
127#define RMExt (4<<15)
128#define Escape (5<<15)
129#define InstrDual (6<<15)
130#define ModeDual (7<<15)
131#define Sse (1<<18)
132
133#define ModRM (1<<19)
134
135#define Mov (1<<20)
136
137#define Prot (1<<21)
138#define EmulateOnUD (1<<22)
139#define NoAccess (1<<23)
140#define Op3264 (1<<24)
141#define Undefined (1<<25)
142#define Lock (1<<26)
143#define Priv (1<<27)
144#define No64 (1<<28)
145#define PageTable (1 << 29)
146#define NotImpl (1 << 30)
147
148#define Src2Shift (31)
149#define Src2None (OpNone << Src2Shift)
150#define Src2Mem (OpMem << Src2Shift)
151#define Src2CL (OpCL << Src2Shift)
152#define Src2ImmByte (OpImmByte << Src2Shift)
153#define Src2One (OpOne << Src2Shift)
154#define Src2Imm (OpImm << Src2Shift)
155#define Src2ES (OpES << Src2Shift)
156#define Src2CS (OpCS << Src2Shift)
157#define Src2SS (OpSS << Src2Shift)
158#define Src2DS (OpDS << Src2Shift)
159#define Src2FS (OpFS << Src2Shift)
160#define Src2GS (OpGS << Src2Shift)
161#define Src2Mask (OpMask << Src2Shift)
162#define Mmx ((u64)1 << 40)
163#define AlignMask ((u64)7 << 41)
164#define Aligned ((u64)1 << 41)
165#define Unaligned ((u64)2 << 41)
166#define Avx ((u64)3 << 41)
167#define Aligned16 ((u64)4 << 41)
168#define Fastop ((u64)1 << 44)
169#define NoWrite ((u64)1 << 45)
170#define SrcWrite ((u64)1 << 46)
171#define NoMod ((u64)1 << 47)
172#define Intercept ((u64)1 << 48)
173#define CheckPerm ((u64)1 << 49)
174#define PrivUD ((u64)1 << 51)
175#define NearBranch ((u64)1 << 52)
176#define No16 ((u64)1 << 53)
177#define IncSP ((u64)1 << 54)
178#define TwoMemOp ((u64)1 << 55)
179
180#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
181
182#define X2(x...) x, x
183#define X3(x...) X2(x), x
184#define X4(x...) X2(x), X2(x)
185#define X5(x...) X4(x), x
186#define X6(x...) X4(x), X2(x)
187#define X7(x...) X4(x), X3(x)
188#define X8(x...) X4(x), X4(x)
189#define X16(x...) X8(x), X8(x)
190
191#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
192#define FASTOP_SIZE 8
193
194struct opcode {
195 u64 flags : 56;
196 u64 intercept : 8;
197 union {
198 int (*execute)(struct x86_emulate_ctxt *ctxt);
199 const struct opcode *group;
200 const struct group_dual *gdual;
201 const struct gprefix *gprefix;
202 const struct escape *esc;
203 const struct instr_dual *idual;
204 const struct mode_dual *mdual;
205 void (*fastop)(struct fastop *fake);
206 } u;
207 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
208};
209
210struct group_dual {
211 struct opcode mod012[8];
212 struct opcode mod3[8];
213};
214
215struct gprefix {
216 struct opcode pfx_no;
217 struct opcode pfx_66;
218 struct opcode pfx_f2;
219 struct opcode pfx_f3;
220};
221
222struct escape {
223 struct opcode op[8];
224 struct opcode high[64];
225};
226
227struct instr_dual {
228 struct opcode mod012;
229 struct opcode mod3;
230};
231
232struct mode_dual {
233 struct opcode mode32;
234 struct opcode mode64;
235};
236
237#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
238
239enum x86_transfer_type {
240 X86_TRANSFER_NONE,
241 X86_TRANSFER_CALL_JMP,
242 X86_TRANSFER_RET,
243 X86_TRANSFER_TASK_SWITCH,
244};
245
246static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
247{
248 if (!(ctxt->regs_valid & (1 << nr))) {
249 ctxt->regs_valid |= 1 << nr;
250 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
251 }
252 return ctxt->_regs[nr];
253}
254
255static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
256{
257 ctxt->regs_valid |= 1 << nr;
258 ctxt->regs_dirty |= 1 << nr;
259 return &ctxt->_regs[nr];
260}
261
262static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
263{
264 reg_read(ctxt, nr);
265 return reg_write(ctxt, nr);
266}
267
268static void writeback_registers(struct x86_emulate_ctxt *ctxt)
269{
270 unsigned reg;
271
272 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
273 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
274}
275
276static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
277{
278 ctxt->regs_dirty = 0;
279 ctxt->regs_valid = 0;
280}
281
282
283
284
285
286#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
287 X86_EFLAGS_PF|X86_EFLAGS_CF)
288
289#ifdef CONFIG_X86_64
290#define ON64(x) x
291#else
292#define ON64(x)
293#endif
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
309
310#define __FOP_FUNC(name) \
311 ".align " __stringify(FASTOP_SIZE) " \n\t" \
312 ".type " name ", @function \n\t" \
313 name ":\n\t"
314
315#define FOP_FUNC(name) \
316 __FOP_FUNC(#name)
317
318#define __FOP_RET(name) \
319 "ret \n\t" \
320 ".size " name ", .-" name "\n\t"
321
322#define FOP_RET(name) \
323 __FOP_RET(#name)
324
325#define FOP_START(op) \
326 extern void em_##op(struct fastop *fake); \
327 asm(".pushsection .text, \"ax\" \n\t" \
328 ".global em_" #op " \n\t" \
329 ".align " __stringify(FASTOP_SIZE) " \n\t" \
330 "em_" #op ":\n\t"
331
332#define FOP_END \
333 ".popsection")
334
335#define __FOPNOP(name) \
336 __FOP_FUNC(name) \
337 __FOP_RET(name)
338
339#define FOPNOP() \
340 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
341
342#define FOP1E(op, dst) \
343 __FOP_FUNC(#op "_" #dst) \
344 "10: " #op " %" #dst " \n\t" \
345 __FOP_RET(#op "_" #dst)
346
347#define FOP1EEX(op, dst) \
348 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
349
350#define FASTOP1(op) \
351 FOP_START(op) \
352 FOP1E(op##b, al) \
353 FOP1E(op##w, ax) \
354 FOP1E(op##l, eax) \
355 ON64(FOP1E(op##q, rax)) \
356 FOP_END
357
358
359#define FASTOP1SRC2(op, name) \
360 FOP_START(name) \
361 FOP1E(op, cl) \
362 FOP1E(op, cx) \
363 FOP1E(op, ecx) \
364 ON64(FOP1E(op, rcx)) \
365 FOP_END
366
367
368#define FASTOP1SRC2EX(op, name) \
369 FOP_START(name) \
370 FOP1EEX(op, cl) \
371 FOP1EEX(op, cx) \
372 FOP1EEX(op, ecx) \
373 ON64(FOP1EEX(op, rcx)) \
374 FOP_END
375
376#define FOP2E(op, dst, src) \
377 __FOP_FUNC(#op "_" #dst "_" #src) \
378 #op " %" #src ", %" #dst " \n\t" \
379 __FOP_RET(#op "_" #dst "_" #src)
380
381#define FASTOP2(op) \
382 FOP_START(op) \
383 FOP2E(op##b, al, dl) \
384 FOP2E(op##w, ax, dx) \
385 FOP2E(op##l, eax, edx) \
386 ON64(FOP2E(op##q, rax, rdx)) \
387 FOP_END
388
389
390#define FASTOP2W(op) \
391 FOP_START(op) \
392 FOPNOP() \
393 FOP2E(op##w, ax, dx) \
394 FOP2E(op##l, eax, edx) \
395 ON64(FOP2E(op##q, rax, rdx)) \
396 FOP_END
397
398
399#define FASTOP2CL(op) \
400 FOP_START(op) \
401 FOP2E(op##b, al, cl) \
402 FOP2E(op##w, ax, cl) \
403 FOP2E(op##l, eax, cl) \
404 ON64(FOP2E(op##q, rax, cl)) \
405 FOP_END
406
407
408#define FASTOP2R(op, name) \
409 FOP_START(name) \
410 FOP2E(op##b, dl, al) \
411 FOP2E(op##w, dx, ax) \
412 FOP2E(op##l, edx, eax) \
413 ON64(FOP2E(op##q, rdx, rax)) \
414 FOP_END
415
416#define FOP3E(op, dst, src, src2) \
417 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
418 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
419 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
420
421
422#define FASTOP3WCL(op) \
423 FOP_START(op) \
424 FOPNOP() \
425 FOP3E(op##w, ax, dx, cl) \
426 FOP3E(op##l, eax, edx, cl) \
427 ON64(FOP3E(op##q, rax, rdx, cl)) \
428 FOP_END
429
430
431#define FOP_SETCC(op) \
432 ".align 4 \n\t" \
433 ".type " #op ", @function \n\t" \
434 #op ": \n\t" \
435 #op " %al \n\t" \
436 __FOP_RET(#op)
437
438asm(".pushsection .fixup, \"ax\"\n"
439 ".global kvm_fastop_exception \n"
440 "kvm_fastop_exception: xor %esi, %esi; ret\n"
441 ".popsection");
442
443FOP_START(setcc)
444FOP_SETCC(seto)
445FOP_SETCC(setno)
446FOP_SETCC(setc)
447FOP_SETCC(setnc)
448FOP_SETCC(setz)
449FOP_SETCC(setnz)
450FOP_SETCC(setbe)
451FOP_SETCC(setnbe)
452FOP_SETCC(sets)
453FOP_SETCC(setns)
454FOP_SETCC(setp)
455FOP_SETCC(setnp)
456FOP_SETCC(setl)
457FOP_SETCC(setnl)
458FOP_SETCC(setle)
459FOP_SETCC(setnle)
460FOP_END;
461
462FOP_START(salc)
463FOP_FUNC(salc)
464"pushf; sbb %al, %al; popf \n\t"
465FOP_RET(salc)
466FOP_END;
467
468
469
470
471
472#define asm_safe(insn, inoutclob...) \
473({ \
474 int _fault = 0; \
475 \
476 asm volatile("1:" insn "\n" \
477 "2:\n" \
478 ".pushsection .fixup, \"ax\"\n" \
479 "3: movl $1, %[_fault]\n" \
480 " jmp 2b\n" \
481 ".popsection\n" \
482 _ASM_EXTABLE(1b, 3b) \
483 : [_fault] "+qm"(_fault) inoutclob ); \
484 \
485 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
486})
487
488static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
489 enum x86_intercept intercept,
490 enum x86_intercept_stage stage)
491{
492 struct x86_instruction_info info = {
493 .intercept = intercept,
494 .rep_prefix = ctxt->rep_prefix,
495 .modrm_mod = ctxt->modrm_mod,
496 .modrm_reg = ctxt->modrm_reg,
497 .modrm_rm = ctxt->modrm_rm,
498 .src_val = ctxt->src.val64,
499 .dst_val = ctxt->dst.val64,
500 .src_bytes = ctxt->src.bytes,
501 .dst_bytes = ctxt->dst.bytes,
502 .ad_bytes = ctxt->ad_bytes,
503 .next_rip = ctxt->eip,
504 };
505
506 return ctxt->ops->intercept(ctxt, &info, stage);
507}
508
509static void assign_masked(ulong *dest, ulong src, ulong mask)
510{
511 *dest = (*dest & ~mask) | (src & mask);
512}
513
514static void assign_register(unsigned long *reg, u64 val, int bytes)
515{
516
517 switch (bytes) {
518 case 1:
519 *(u8 *)reg = (u8)val;
520 break;
521 case 2:
522 *(u16 *)reg = (u16)val;
523 break;
524 case 4:
525 *reg = (u32)val;
526 break;
527 case 8:
528 *reg = val;
529 break;
530 }
531}
532
533static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
534{
535 return (1UL << (ctxt->ad_bytes << 3)) - 1;
536}
537
538static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
539{
540 u16 sel;
541 struct desc_struct ss;
542
543 if (ctxt->mode == X86EMUL_MODE_PROT64)
544 return ~0UL;
545 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
546 return ~0U >> ((ss.d ^ 1) * 16);
547}
548
549static int stack_size(struct x86_emulate_ctxt *ctxt)
550{
551 return (__fls(stack_mask(ctxt)) + 1) >> 3;
552}
553
554
555static inline unsigned long
556address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
557{
558 if (ctxt->ad_bytes == sizeof(unsigned long))
559 return reg;
560 else
561 return reg & ad_mask(ctxt);
562}
563
564static inline unsigned long
565register_address(struct x86_emulate_ctxt *ctxt, int reg)
566{
567 return address_mask(ctxt, reg_read(ctxt, reg));
568}
569
570static void masked_increment(ulong *reg, ulong mask, int inc)
571{
572 assign_masked(reg, *reg + inc, mask);
573}
574
575static inline void
576register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
577{
578 ulong *preg = reg_rmw(ctxt, reg);
579
580 assign_register(preg, *preg + inc, ctxt->ad_bytes);
581}
582
583static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
584{
585 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
586}
587
588static u32 desc_limit_scaled(struct desc_struct *desc)
589{
590 u32 limit = get_desc_limit(desc);
591
592 return desc->g ? (limit << 12) | 0xfff : limit;
593}
594
595static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
596{
597 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
598 return 0;
599
600 return ctxt->ops->get_cached_segment_base(ctxt, seg);
601}
602
603static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
604 u32 error, bool valid)
605{
606 WARN_ON(vec > 0x1f);
607 ctxt->exception.vector = vec;
608 ctxt->exception.error_code = error;
609 ctxt->exception.error_code_valid = valid;
610 return X86EMUL_PROPAGATE_FAULT;
611}
612
613static int emulate_db(struct x86_emulate_ctxt *ctxt)
614{
615 return emulate_exception(ctxt, DB_VECTOR, 0, false);
616}
617
618static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
619{
620 return emulate_exception(ctxt, GP_VECTOR, err, true);
621}
622
623static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
624{
625 return emulate_exception(ctxt, SS_VECTOR, err, true);
626}
627
628static int emulate_ud(struct x86_emulate_ctxt *ctxt)
629{
630 return emulate_exception(ctxt, UD_VECTOR, 0, false);
631}
632
633static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
634{
635 return emulate_exception(ctxt, TS_VECTOR, err, true);
636}
637
638static int emulate_de(struct x86_emulate_ctxt *ctxt)
639{
640 return emulate_exception(ctxt, DE_VECTOR, 0, false);
641}
642
643static int emulate_nm(struct x86_emulate_ctxt *ctxt)
644{
645 return emulate_exception(ctxt, NM_VECTOR, 0, false);
646}
647
648static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
649{
650 u16 selector;
651 struct desc_struct desc;
652
653 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
654 return selector;
655}
656
657static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
658 unsigned seg)
659{
660 u16 dummy;
661 u32 base3;
662 struct desc_struct desc;
663
664 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
665 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
666}
667
668static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
669{
670 return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
671}
672
673static inline bool emul_is_noncanonical_address(u64 la,
674 struct x86_emulate_ctxt *ctxt)
675{
676 return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
677}
678
679
680
681
682
683
684
685
686
687
688static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
689{
690 u64 alignment = ctxt->d & AlignMask;
691
692 if (likely(size < 16))
693 return 1;
694
695 switch (alignment) {
696 case Unaligned:
697 case Avx:
698 return 1;
699 case Aligned16:
700 return 16;
701 case Aligned:
702 default:
703 return size;
704 }
705}
706
707static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
708 struct segmented_address addr,
709 unsigned *max_size, unsigned size,
710 bool write, bool fetch,
711 enum x86emul_mode mode, ulong *linear)
712{
713 struct desc_struct desc;
714 bool usable;
715 ulong la;
716 u32 lim;
717 u16 sel;
718 u8 va_bits;
719
720 la = seg_base(ctxt, addr.seg) + addr.ea;
721 *max_size = 0;
722 switch (mode) {
723 case X86EMUL_MODE_PROT64:
724 *linear = la;
725 va_bits = ctxt_virt_addr_bits(ctxt);
726 if (get_canonical(la, va_bits) != la)
727 goto bad;
728
729 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
730 if (size > *max_size)
731 goto bad;
732 break;
733 default:
734 *linear = la = (u32)la;
735 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
736 addr.seg);
737 if (!usable)
738 goto bad;
739
740 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
741 || !(desc.type & 2)) && write)
742 goto bad;
743
744 if (!fetch && (desc.type & 8) && !(desc.type & 2))
745 goto bad;
746 lim = desc_limit_scaled(&desc);
747 if (!(desc.type & 8) && (desc.type & 4)) {
748
749 if (addr.ea <= lim)
750 goto bad;
751 lim = desc.d ? 0xffffffff : 0xffff;
752 }
753 if (addr.ea > lim)
754 goto bad;
755 if (lim == 0xffffffff)
756 *max_size = ~0u;
757 else {
758 *max_size = (u64)lim + 1 - addr.ea;
759 if (size > *max_size)
760 goto bad;
761 }
762 break;
763 }
764 if (la & (insn_alignment(ctxt, size) - 1))
765 return emulate_gp(ctxt, 0);
766 return X86EMUL_CONTINUE;
767bad:
768 if (addr.seg == VCPU_SREG_SS)
769 return emulate_ss(ctxt, 0);
770 else
771 return emulate_gp(ctxt, 0);
772}
773
774static int linearize(struct x86_emulate_ctxt *ctxt,
775 struct segmented_address addr,
776 unsigned size, bool write,
777 ulong *linear)
778{
779 unsigned max_size;
780 return __linearize(ctxt, addr, &max_size, size, write, false,
781 ctxt->mode, linear);
782}
783
784static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
785 enum x86emul_mode mode)
786{
787 ulong linear;
788 int rc;
789 unsigned max_size;
790 struct segmented_address addr = { .seg = VCPU_SREG_CS,
791 .ea = dst };
792
793 if (ctxt->op_bytes != sizeof(unsigned long))
794 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
795 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
796 if (rc == X86EMUL_CONTINUE)
797 ctxt->_eip = addr.ea;
798 return rc;
799}
800
801static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
802{
803 return assign_eip(ctxt, dst, ctxt->mode);
804}
805
806static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
807 const struct desc_struct *cs_desc)
808{
809 enum x86emul_mode mode = ctxt->mode;
810 int rc;
811
812#ifdef CONFIG_X86_64
813 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
814 if (cs_desc->l) {
815 u64 efer = 0;
816
817 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
818 if (efer & EFER_LMA)
819 mode = X86EMUL_MODE_PROT64;
820 } else
821 mode = X86EMUL_MODE_PROT32;
822 }
823#endif
824 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
825 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
826 rc = assign_eip(ctxt, dst, mode);
827 if (rc == X86EMUL_CONTINUE)
828 ctxt->mode = mode;
829 return rc;
830}
831
832static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
833{
834 return assign_eip_near(ctxt, ctxt->_eip + rel);
835}
836
837static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
838 void *data, unsigned size)
839{
840 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
841}
842
843static int linear_write_system(struct x86_emulate_ctxt *ctxt,
844 ulong linear, void *data,
845 unsigned int size)
846{
847 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
848}
849
850static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
851 struct segmented_address addr,
852 void *data,
853 unsigned size)
854{
855 int rc;
856 ulong linear;
857
858 rc = linearize(ctxt, addr, size, false, &linear);
859 if (rc != X86EMUL_CONTINUE)
860 return rc;
861 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
862}
863
864static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
865 struct segmented_address addr,
866 void *data,
867 unsigned int size)
868{
869 int rc;
870 ulong linear;
871
872 rc = linearize(ctxt, addr, size, true, &linear);
873 if (rc != X86EMUL_CONTINUE)
874 return rc;
875 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
876}
877
878
879
880
881
882static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
883{
884 int rc;
885 unsigned size, max_size;
886 unsigned long linear;
887 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
888 struct segmented_address addr = { .seg = VCPU_SREG_CS,
889 .ea = ctxt->eip + cur_size };
890
891
892
893
894
895
896
897
898
899
900
901 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
902 &linear);
903 if (unlikely(rc != X86EMUL_CONTINUE))
904 return rc;
905
906 size = min_t(unsigned, 15UL ^ cur_size, max_size);
907 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
908
909
910
911
912
913
914
915 if (unlikely(size < op_size))
916 return emulate_gp(ctxt, 0);
917
918 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
919 size, &ctxt->exception);
920 if (unlikely(rc != X86EMUL_CONTINUE))
921 return rc;
922 ctxt->fetch.end += size;
923 return X86EMUL_CONTINUE;
924}
925
926static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
927 unsigned size)
928{
929 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
930
931 if (unlikely(done_size < size))
932 return __do_insn_fetch_bytes(ctxt, size - done_size);
933 else
934 return X86EMUL_CONTINUE;
935}
936
937
938#define insn_fetch(_type, _ctxt) \
939({ _type _x; \
940 \
941 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
942 if (rc != X86EMUL_CONTINUE) \
943 goto done; \
944 ctxt->_eip += sizeof(_type); \
945 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
946 ctxt->fetch.ptr += sizeof(_type); \
947 _x; \
948})
949
950#define insn_fetch_arr(_arr, _size, _ctxt) \
951({ \
952 rc = do_insn_fetch_bytes(_ctxt, _size); \
953 if (rc != X86EMUL_CONTINUE) \
954 goto done; \
955 ctxt->_eip += (_size); \
956 memcpy(_arr, ctxt->fetch.ptr, _size); \
957 ctxt->fetch.ptr += (_size); \
958})
959
960
961
962
963
964
965static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
966 int byteop)
967{
968 void *p;
969 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
970
971 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
972 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
973 else
974 p = reg_rmw(ctxt, modrm_reg);
975 return p;
976}
977
978static int read_descriptor(struct x86_emulate_ctxt *ctxt,
979 struct segmented_address addr,
980 u16 *size, unsigned long *address, int op_bytes)
981{
982 int rc;
983
984 if (op_bytes == 2)
985 op_bytes = 3;
986 *address = 0;
987 rc = segmented_read_std(ctxt, addr, size, 2);
988 if (rc != X86EMUL_CONTINUE)
989 return rc;
990 addr.ea += 2;
991 rc = segmented_read_std(ctxt, addr, address, op_bytes);
992 return rc;
993}
994
995FASTOP2(add);
996FASTOP2(or);
997FASTOP2(adc);
998FASTOP2(sbb);
999FASTOP2(and);
1000FASTOP2(sub);
1001FASTOP2(xor);
1002FASTOP2(cmp);
1003FASTOP2(test);
1004
1005FASTOP1SRC2(mul, mul_ex);
1006FASTOP1SRC2(imul, imul_ex);
1007FASTOP1SRC2EX(div, div_ex);
1008FASTOP1SRC2EX(idiv, idiv_ex);
1009
1010FASTOP3WCL(shld);
1011FASTOP3WCL(shrd);
1012
1013FASTOP2W(imul);
1014
1015FASTOP1(not);
1016FASTOP1(neg);
1017FASTOP1(inc);
1018FASTOP1(dec);
1019
1020FASTOP2CL(rol);
1021FASTOP2CL(ror);
1022FASTOP2CL(rcl);
1023FASTOP2CL(rcr);
1024FASTOP2CL(shl);
1025FASTOP2CL(shr);
1026FASTOP2CL(sar);
1027
1028FASTOP2W(bsf);
1029FASTOP2W(bsr);
1030FASTOP2W(bt);
1031FASTOP2W(bts);
1032FASTOP2W(btr);
1033FASTOP2W(btc);
1034
1035FASTOP2(xadd);
1036
1037FASTOP2R(cmp, cmp_r);
1038
1039static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1040{
1041
1042 if (ctxt->src.val == 0)
1043 ctxt->dst.type = OP_NONE;
1044 return fastop(ctxt, em_bsf);
1045}
1046
1047static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1048{
1049
1050 if (ctxt->src.val == 0)
1051 ctxt->dst.type = OP_NONE;
1052 return fastop(ctxt, em_bsr);
1053}
1054
1055static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1056{
1057 u8 rc;
1058 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1059
1060 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1061 asm("push %[flags]; popf; " CALL_NOSPEC
1062 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1063 return rc;
1064}
1065
1066static void fetch_register_operand(struct operand *op)
1067{
1068 switch (op->bytes) {
1069 case 1:
1070 op->val = *(u8 *)op->addr.reg;
1071 break;
1072 case 2:
1073 op->val = *(u16 *)op->addr.reg;
1074 break;
1075 case 4:
1076 op->val = *(u32 *)op->addr.reg;
1077 break;
1078 case 8:
1079 op->val = *(u64 *)op->addr.reg;
1080 break;
1081 }
1082}
1083
1084static void emulator_get_fpu(void)
1085{
1086 fpregs_lock();
1087
1088 fpregs_assert_state_consistent();
1089 if (test_thread_flag(TIF_NEED_FPU_LOAD))
1090 switch_fpu_return();
1091}
1092
1093static void emulator_put_fpu(void)
1094{
1095 fpregs_unlock();
1096}
1097
1098static void read_sse_reg(sse128_t *data, int reg)
1099{
1100 emulator_get_fpu();
1101 switch (reg) {
1102 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1103 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1104 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1105 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1106 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1107 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1108 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1109 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1110#ifdef CONFIG_X86_64
1111 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1112 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1113 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1114 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1115 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1116 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1117 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1118 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1119#endif
1120 default: BUG();
1121 }
1122 emulator_put_fpu();
1123}
1124
1125static void write_sse_reg(sse128_t *data, int reg)
1126{
1127 emulator_get_fpu();
1128 switch (reg) {
1129 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1130 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1131 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1132 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1133 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1134 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1135 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1136 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1137#ifdef CONFIG_X86_64
1138 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1139 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1140 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1141 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1142 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1143 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1144 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1145 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1146#endif
1147 default: BUG();
1148 }
1149 emulator_put_fpu();
1150}
1151
1152static void read_mmx_reg(u64 *data, int reg)
1153{
1154 emulator_get_fpu();
1155 switch (reg) {
1156 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1157 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1158 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1159 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1160 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1161 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1162 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1163 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1164 default: BUG();
1165 }
1166 emulator_put_fpu();
1167}
1168
1169static void write_mmx_reg(u64 *data, int reg)
1170{
1171 emulator_get_fpu();
1172 switch (reg) {
1173 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1174 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1175 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1176 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1177 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1178 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1179 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1180 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1181 default: BUG();
1182 }
1183 emulator_put_fpu();
1184}
1185
1186static int em_fninit(struct x86_emulate_ctxt *ctxt)
1187{
1188 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1189 return emulate_nm(ctxt);
1190
1191 emulator_get_fpu();
1192 asm volatile("fninit");
1193 emulator_put_fpu();
1194 return X86EMUL_CONTINUE;
1195}
1196
1197static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1198{
1199 u16 fcw;
1200
1201 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1202 return emulate_nm(ctxt);
1203
1204 emulator_get_fpu();
1205 asm volatile("fnstcw %0": "+m"(fcw));
1206 emulator_put_fpu();
1207
1208 ctxt->dst.val = fcw;
1209
1210 return X86EMUL_CONTINUE;
1211}
1212
1213static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1214{
1215 u16 fsw;
1216
1217 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1218 return emulate_nm(ctxt);
1219
1220 emulator_get_fpu();
1221 asm volatile("fnstsw %0": "+m"(fsw));
1222 emulator_put_fpu();
1223
1224 ctxt->dst.val = fsw;
1225
1226 return X86EMUL_CONTINUE;
1227}
1228
1229static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1230 struct operand *op)
1231{
1232 unsigned reg = ctxt->modrm_reg;
1233
1234 if (!(ctxt->d & ModRM))
1235 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1236
1237 if (ctxt->d & Sse) {
1238 op->type = OP_XMM;
1239 op->bytes = 16;
1240 op->addr.xmm = reg;
1241 read_sse_reg(&op->vec_val, reg);
1242 return;
1243 }
1244 if (ctxt->d & Mmx) {
1245 reg &= 7;
1246 op->type = OP_MM;
1247 op->bytes = 8;
1248 op->addr.mm = reg;
1249 return;
1250 }
1251
1252 op->type = OP_REG;
1253 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1254 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1255
1256 fetch_register_operand(op);
1257 op->orig_val = op->val;
1258}
1259
1260static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1261{
1262 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1263 ctxt->modrm_seg = VCPU_SREG_SS;
1264}
1265
1266static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1267 struct operand *op)
1268{
1269 u8 sib;
1270 int index_reg, base_reg, scale;
1271 int rc = X86EMUL_CONTINUE;
1272 ulong modrm_ea = 0;
1273
1274 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8);
1275 index_reg = (ctxt->rex_prefix << 2) & 8;
1276 base_reg = (ctxt->rex_prefix << 3) & 8;
1277
1278 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1279 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1280 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1281 ctxt->modrm_seg = VCPU_SREG_DS;
1282
1283 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1284 op->type = OP_REG;
1285 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1286 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1287 ctxt->d & ByteOp);
1288 if (ctxt->d & Sse) {
1289 op->type = OP_XMM;
1290 op->bytes = 16;
1291 op->addr.xmm = ctxt->modrm_rm;
1292 read_sse_reg(&op->vec_val, ctxt->modrm_rm);
1293 return rc;
1294 }
1295 if (ctxt->d & Mmx) {
1296 op->type = OP_MM;
1297 op->bytes = 8;
1298 op->addr.mm = ctxt->modrm_rm & 7;
1299 return rc;
1300 }
1301 fetch_register_operand(op);
1302 return rc;
1303 }
1304
1305 op->type = OP_MEM;
1306
1307 if (ctxt->ad_bytes == 2) {
1308 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1309 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1310 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1311 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1312
1313
1314 switch (ctxt->modrm_mod) {
1315 case 0:
1316 if (ctxt->modrm_rm == 6)
1317 modrm_ea += insn_fetch(u16, ctxt);
1318 break;
1319 case 1:
1320 modrm_ea += insn_fetch(s8, ctxt);
1321 break;
1322 case 2:
1323 modrm_ea += insn_fetch(u16, ctxt);
1324 break;
1325 }
1326 switch (ctxt->modrm_rm) {
1327 case 0:
1328 modrm_ea += bx + si;
1329 break;
1330 case 1:
1331 modrm_ea += bx + di;
1332 break;
1333 case 2:
1334 modrm_ea += bp + si;
1335 break;
1336 case 3:
1337 modrm_ea += bp + di;
1338 break;
1339 case 4:
1340 modrm_ea += si;
1341 break;
1342 case 5:
1343 modrm_ea += di;
1344 break;
1345 case 6:
1346 if (ctxt->modrm_mod != 0)
1347 modrm_ea += bp;
1348 break;
1349 case 7:
1350 modrm_ea += bx;
1351 break;
1352 }
1353 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1354 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1355 ctxt->modrm_seg = VCPU_SREG_SS;
1356 modrm_ea = (u16)modrm_ea;
1357 } else {
1358
1359 if ((ctxt->modrm_rm & 7) == 4) {
1360 sib = insn_fetch(u8, ctxt);
1361 index_reg |= (sib >> 3) & 7;
1362 base_reg |= sib & 7;
1363 scale = sib >> 6;
1364
1365 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1366 modrm_ea += insn_fetch(s32, ctxt);
1367 else {
1368 modrm_ea += reg_read(ctxt, base_reg);
1369 adjust_modrm_seg(ctxt, base_reg);
1370
1371 if ((ctxt->d & IncSP) &&
1372 base_reg == VCPU_REGS_RSP)
1373 modrm_ea += ctxt->op_bytes;
1374 }
1375 if (index_reg != 4)
1376 modrm_ea += reg_read(ctxt, index_reg) << scale;
1377 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1378 modrm_ea += insn_fetch(s32, ctxt);
1379 if (ctxt->mode == X86EMUL_MODE_PROT64)
1380 ctxt->rip_relative = 1;
1381 } else {
1382 base_reg = ctxt->modrm_rm;
1383 modrm_ea += reg_read(ctxt, base_reg);
1384 adjust_modrm_seg(ctxt, base_reg);
1385 }
1386 switch (ctxt->modrm_mod) {
1387 case 1:
1388 modrm_ea += insn_fetch(s8, ctxt);
1389 break;
1390 case 2:
1391 modrm_ea += insn_fetch(s32, ctxt);
1392 break;
1393 }
1394 }
1395 op->addr.mem.ea = modrm_ea;
1396 if (ctxt->ad_bytes != 8)
1397 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1398
1399done:
1400 return rc;
1401}
1402
1403static int decode_abs(struct x86_emulate_ctxt *ctxt,
1404 struct operand *op)
1405{
1406 int rc = X86EMUL_CONTINUE;
1407
1408 op->type = OP_MEM;
1409 switch (ctxt->ad_bytes) {
1410 case 2:
1411 op->addr.mem.ea = insn_fetch(u16, ctxt);
1412 break;
1413 case 4:
1414 op->addr.mem.ea = insn_fetch(u32, ctxt);
1415 break;
1416 case 8:
1417 op->addr.mem.ea = insn_fetch(u64, ctxt);
1418 break;
1419 }
1420done:
1421 return rc;
1422}
1423
1424static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1425{
1426 long sv = 0, mask;
1427
1428 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1429 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1430
1431 if (ctxt->src.bytes == 2)
1432 sv = (s16)ctxt->src.val & (s16)mask;
1433 else if (ctxt->src.bytes == 4)
1434 sv = (s32)ctxt->src.val & (s32)mask;
1435 else
1436 sv = (s64)ctxt->src.val & (s64)mask;
1437
1438 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1439 ctxt->dst.addr.mem.ea + (sv >> 3));
1440 }
1441
1442
1443 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1444}
1445
1446static int read_emulated(struct x86_emulate_ctxt *ctxt,
1447 unsigned long addr, void *dest, unsigned size)
1448{
1449 int rc;
1450 struct read_cache *mc = &ctxt->mem_read;
1451
1452 if (mc->pos < mc->end)
1453 goto read_cached;
1454
1455 WARN_ON((mc->end + size) >= sizeof(mc->data));
1456
1457 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1458 &ctxt->exception);
1459 if (rc != X86EMUL_CONTINUE)
1460 return rc;
1461
1462 mc->end += size;
1463
1464read_cached:
1465 memcpy(dest, mc->data + mc->pos, size);
1466 mc->pos += size;
1467 return X86EMUL_CONTINUE;
1468}
1469
1470static int segmented_read(struct x86_emulate_ctxt *ctxt,
1471 struct segmented_address addr,
1472 void *data,
1473 unsigned size)
1474{
1475 int rc;
1476 ulong linear;
1477
1478 rc = linearize(ctxt, addr, size, false, &linear);
1479 if (rc != X86EMUL_CONTINUE)
1480 return rc;
1481 return read_emulated(ctxt, linear, data, size);
1482}
1483
1484static int segmented_write(struct x86_emulate_ctxt *ctxt,
1485 struct segmented_address addr,
1486 const void *data,
1487 unsigned size)
1488{
1489 int rc;
1490 ulong linear;
1491
1492 rc = linearize(ctxt, addr, size, true, &linear);
1493 if (rc != X86EMUL_CONTINUE)
1494 return rc;
1495 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1496 &ctxt->exception);
1497}
1498
1499static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1500 struct segmented_address addr,
1501 const void *orig_data, const void *data,
1502 unsigned size)
1503{
1504 int rc;
1505 ulong linear;
1506
1507 rc = linearize(ctxt, addr, size, true, &linear);
1508 if (rc != X86EMUL_CONTINUE)
1509 return rc;
1510 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1511 size, &ctxt->exception);
1512}
1513
1514static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1515 unsigned int size, unsigned short port,
1516 void *dest)
1517{
1518 struct read_cache *rc = &ctxt->io_read;
1519
1520 if (rc->pos == rc->end) {
1521 unsigned int in_page, n;
1522 unsigned int count = ctxt->rep_prefix ?
1523 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1524 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1525 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1526 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1527 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1528 if (n == 0)
1529 n = 1;
1530 rc->pos = rc->end = 0;
1531 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1532 return 0;
1533 rc->end = n * size;
1534 }
1535
1536 if (ctxt->rep_prefix && (ctxt->d & String) &&
1537 !(ctxt->eflags & X86_EFLAGS_DF)) {
1538 ctxt->dst.data = rc->data + rc->pos;
1539 ctxt->dst.type = OP_MEM_STR;
1540 ctxt->dst.count = (rc->end - rc->pos) / size;
1541 rc->pos = rc->end;
1542 } else {
1543 memcpy(dest, rc->data + rc->pos, size);
1544 rc->pos += size;
1545 }
1546 return 1;
1547}
1548
1549static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1550 u16 index, struct desc_struct *desc)
1551{
1552 struct desc_ptr dt;
1553 ulong addr;
1554
1555 ctxt->ops->get_idt(ctxt, &dt);
1556
1557 if (dt.size < index * 8 + 7)
1558 return emulate_gp(ctxt, index << 3 | 0x2);
1559
1560 addr = dt.address + index * 8;
1561 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1562}
1563
1564static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1565 u16 selector, struct desc_ptr *dt)
1566{
1567 const struct x86_emulate_ops *ops = ctxt->ops;
1568 u32 base3 = 0;
1569
1570 if (selector & 1 << 2) {
1571 struct desc_struct desc;
1572 u16 sel;
1573
1574 memset(dt, 0, sizeof(*dt));
1575 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1576 VCPU_SREG_LDTR))
1577 return;
1578
1579 dt->size = desc_limit_scaled(&desc);
1580 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1581 } else
1582 ops->get_gdt(ctxt, dt);
1583}
1584
1585static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1586 u16 selector, ulong *desc_addr_p)
1587{
1588 struct desc_ptr dt;
1589 u16 index = selector >> 3;
1590 ulong addr;
1591
1592 get_descriptor_table_ptr(ctxt, selector, &dt);
1593
1594 if (dt.size < index * 8 + 7)
1595 return emulate_gp(ctxt, selector & 0xfffc);
1596
1597 addr = dt.address + index * 8;
1598
1599#ifdef CONFIG_X86_64
1600 if (addr >> 32 != 0) {
1601 u64 efer = 0;
1602
1603 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1604 if (!(efer & EFER_LMA))
1605 addr &= (u32)-1;
1606 }
1607#endif
1608
1609 *desc_addr_p = addr;
1610 return X86EMUL_CONTINUE;
1611}
1612
1613
1614static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1615 u16 selector, struct desc_struct *desc,
1616 ulong *desc_addr_p)
1617{
1618 int rc;
1619
1620 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1621 if (rc != X86EMUL_CONTINUE)
1622 return rc;
1623
1624 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1625}
1626
1627
1628static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1629 u16 selector, struct desc_struct *desc)
1630{
1631 int rc;
1632 ulong addr;
1633
1634 rc = get_descriptor_ptr(ctxt, selector, &addr);
1635 if (rc != X86EMUL_CONTINUE)
1636 return rc;
1637
1638 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1639}
1640
1641static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1642 u16 selector, int seg, u8 cpl,
1643 enum x86_transfer_type transfer,
1644 struct desc_struct *desc)
1645{
1646 struct desc_struct seg_desc, old_desc;
1647 u8 dpl, rpl;
1648 unsigned err_vec = GP_VECTOR;
1649 u32 err_code = 0;
1650 bool null_selector = !(selector & ~0x3);
1651 ulong desc_addr;
1652 int ret;
1653 u16 dummy;
1654 u32 base3 = 0;
1655
1656 memset(&seg_desc, 0, sizeof(seg_desc));
1657
1658 if (ctxt->mode == X86EMUL_MODE_REAL) {
1659
1660
1661 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1662 set_desc_base(&seg_desc, selector << 4);
1663 goto load;
1664 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1665
1666 set_desc_base(&seg_desc, selector << 4);
1667 set_desc_limit(&seg_desc, 0xffff);
1668 seg_desc.type = 3;
1669 seg_desc.p = 1;
1670 seg_desc.s = 1;
1671 seg_desc.dpl = 3;
1672 goto load;
1673 }
1674
1675 rpl = selector & 3;
1676
1677
1678 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1679 goto exception;
1680
1681
1682 if (null_selector) {
1683 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1684 goto exception;
1685
1686 if (seg == VCPU_SREG_SS) {
1687 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1688 goto exception;
1689
1690
1691
1692
1693
1694 seg_desc.type = 3;
1695 seg_desc.p = 1;
1696 seg_desc.s = 1;
1697 seg_desc.dpl = cpl;
1698 seg_desc.d = 1;
1699 seg_desc.g = 1;
1700 }
1701
1702
1703 goto load;
1704 }
1705
1706 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1707 if (ret != X86EMUL_CONTINUE)
1708 return ret;
1709
1710 err_code = selector & 0xfffc;
1711 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1712 GP_VECTOR;
1713
1714
1715 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1716 if (transfer == X86_TRANSFER_CALL_JMP)
1717 return X86EMUL_UNHANDLEABLE;
1718 goto exception;
1719 }
1720
1721 if (!seg_desc.p) {
1722 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1723 goto exception;
1724 }
1725
1726 dpl = seg_desc.dpl;
1727
1728 switch (seg) {
1729 case VCPU_SREG_SS:
1730
1731
1732
1733
1734 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1735 goto exception;
1736 break;
1737 case VCPU_SREG_CS:
1738 if (!(seg_desc.type & 8))
1739 goto exception;
1740
1741 if (seg_desc.type & 4) {
1742
1743 if (dpl > cpl)
1744 goto exception;
1745 } else {
1746
1747 if (rpl > cpl || dpl != cpl)
1748 goto exception;
1749 }
1750
1751 if (seg_desc.d && seg_desc.l) {
1752 u64 efer = 0;
1753
1754 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1755 if (efer & EFER_LMA)
1756 goto exception;
1757 }
1758
1759
1760 selector = (selector & 0xfffc) | cpl;
1761 break;
1762 case VCPU_SREG_TR:
1763 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1764 goto exception;
1765 old_desc = seg_desc;
1766 seg_desc.type |= 2;
1767 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1768 sizeof(seg_desc), &ctxt->exception);
1769 if (ret != X86EMUL_CONTINUE)
1770 return ret;
1771 break;
1772 case VCPU_SREG_LDTR:
1773 if (seg_desc.s || seg_desc.type != 2)
1774 goto exception;
1775 break;
1776 default:
1777
1778
1779
1780
1781
1782 if ((seg_desc.type & 0xa) == 0x8 ||
1783 (((seg_desc.type & 0xc) != 0xc) &&
1784 (rpl > dpl && cpl > dpl)))
1785 goto exception;
1786 break;
1787 }
1788
1789 if (seg_desc.s) {
1790
1791 if (!(seg_desc.type & 1)) {
1792 seg_desc.type |= 1;
1793 ret = write_segment_descriptor(ctxt, selector,
1794 &seg_desc);
1795 if (ret != X86EMUL_CONTINUE)
1796 return ret;
1797 }
1798 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1799 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1800 if (ret != X86EMUL_CONTINUE)
1801 return ret;
1802 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1803 ((u64)base3 << 32), ctxt))
1804 return emulate_gp(ctxt, 0);
1805 }
1806load:
1807 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1808 if (desc)
1809 *desc = seg_desc;
1810 return X86EMUL_CONTINUE;
1811exception:
1812 return emulate_exception(ctxt, err_vec, err_code, true);
1813}
1814
1815static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1816 u16 selector, int seg)
1817{
1818 u8 cpl = ctxt->ops->cpl(ctxt);
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830 if (seg == VCPU_SREG_SS && selector == 3 &&
1831 ctxt->mode == X86EMUL_MODE_PROT64)
1832 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1833
1834 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1835 X86_TRANSFER_NONE, NULL);
1836}
1837
1838static void write_register_operand(struct operand *op)
1839{
1840 return assign_register(op->addr.reg, op->val, op->bytes);
1841}
1842
1843static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1844{
1845 switch (op->type) {
1846 case OP_REG:
1847 write_register_operand(op);
1848 break;
1849 case OP_MEM:
1850 if (ctxt->lock_prefix)
1851 return segmented_cmpxchg(ctxt,
1852 op->addr.mem,
1853 &op->orig_val,
1854 &op->val,
1855 op->bytes);
1856 else
1857 return segmented_write(ctxt,
1858 op->addr.mem,
1859 &op->val,
1860 op->bytes);
1861 break;
1862 case OP_MEM_STR:
1863 return segmented_write(ctxt,
1864 op->addr.mem,
1865 op->data,
1866 op->bytes * op->count);
1867 break;
1868 case OP_XMM:
1869 write_sse_reg(&op->vec_val, op->addr.xmm);
1870 break;
1871 case OP_MM:
1872 write_mmx_reg(&op->mm_val, op->addr.mm);
1873 break;
1874 case OP_NONE:
1875
1876 break;
1877 default:
1878 break;
1879 }
1880 return X86EMUL_CONTINUE;
1881}
1882
1883static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1884{
1885 struct segmented_address addr;
1886
1887 rsp_increment(ctxt, -bytes);
1888 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1889 addr.seg = VCPU_SREG_SS;
1890
1891 return segmented_write(ctxt, addr, data, bytes);
1892}
1893
1894static int em_push(struct x86_emulate_ctxt *ctxt)
1895{
1896
1897 ctxt->dst.type = OP_NONE;
1898 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1899}
1900
1901static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1902 void *dest, int len)
1903{
1904 int rc;
1905 struct segmented_address addr;
1906
1907 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1908 addr.seg = VCPU_SREG_SS;
1909 rc = segmented_read(ctxt, addr, dest, len);
1910 if (rc != X86EMUL_CONTINUE)
1911 return rc;
1912
1913 rsp_increment(ctxt, len);
1914 return rc;
1915}
1916
1917static int em_pop(struct x86_emulate_ctxt *ctxt)
1918{
1919 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1920}
1921
1922static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1923 void *dest, int len)
1924{
1925 int rc;
1926 unsigned long val, change_mask;
1927 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1928 int cpl = ctxt->ops->cpl(ctxt);
1929
1930 rc = emulate_pop(ctxt, &val, len);
1931 if (rc != X86EMUL_CONTINUE)
1932 return rc;
1933
1934 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1935 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1936 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1937 X86_EFLAGS_AC | X86_EFLAGS_ID;
1938
1939 switch(ctxt->mode) {
1940 case X86EMUL_MODE_PROT64:
1941 case X86EMUL_MODE_PROT32:
1942 case X86EMUL_MODE_PROT16:
1943 if (cpl == 0)
1944 change_mask |= X86_EFLAGS_IOPL;
1945 if (cpl <= iopl)
1946 change_mask |= X86_EFLAGS_IF;
1947 break;
1948 case X86EMUL_MODE_VM86:
1949 if (iopl < 3)
1950 return emulate_gp(ctxt, 0);
1951 change_mask |= X86_EFLAGS_IF;
1952 break;
1953 default:
1954 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1955 break;
1956 }
1957
1958 *(unsigned long *)dest =
1959 (ctxt->eflags & ~change_mask) | (val & change_mask);
1960
1961 return rc;
1962}
1963
1964static int em_popf(struct x86_emulate_ctxt *ctxt)
1965{
1966 ctxt->dst.type = OP_REG;
1967 ctxt->dst.addr.reg = &ctxt->eflags;
1968 ctxt->dst.bytes = ctxt->op_bytes;
1969 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1970}
1971
1972static int em_enter(struct x86_emulate_ctxt *ctxt)
1973{
1974 int rc;
1975 unsigned frame_size = ctxt->src.val;
1976 unsigned nesting_level = ctxt->src2.val & 31;
1977 ulong rbp;
1978
1979 if (nesting_level)
1980 return X86EMUL_UNHANDLEABLE;
1981
1982 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1983 rc = push(ctxt, &rbp, stack_size(ctxt));
1984 if (rc != X86EMUL_CONTINUE)
1985 return rc;
1986 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1987 stack_mask(ctxt));
1988 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1989 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1990 stack_mask(ctxt));
1991 return X86EMUL_CONTINUE;
1992}
1993
1994static int em_leave(struct x86_emulate_ctxt *ctxt)
1995{
1996 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1997 stack_mask(ctxt));
1998 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1999}
2000
2001static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
2002{
2003 int seg = ctxt->src2.val;
2004
2005 ctxt->src.val = get_segment_selector(ctxt, seg);
2006 if (ctxt->op_bytes == 4) {
2007 rsp_increment(ctxt, -2);
2008 ctxt->op_bytes = 2;
2009 }
2010
2011 return em_push(ctxt);
2012}
2013
2014static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
2015{
2016 int seg = ctxt->src2.val;
2017 unsigned long selector;
2018 int rc;
2019
2020 rc = emulate_pop(ctxt, &selector, 2);
2021 if (rc != X86EMUL_CONTINUE)
2022 return rc;
2023
2024 if (ctxt->modrm_reg == VCPU_SREG_SS)
2025 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2026 if (ctxt->op_bytes > 2)
2027 rsp_increment(ctxt, ctxt->op_bytes - 2);
2028
2029 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
2030 return rc;
2031}
2032
2033static int em_pusha(struct x86_emulate_ctxt *ctxt)
2034{
2035 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
2036 int rc = X86EMUL_CONTINUE;
2037 int reg = VCPU_REGS_RAX;
2038
2039 while (reg <= VCPU_REGS_RDI) {
2040 (reg == VCPU_REGS_RSP) ?
2041 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
2042
2043 rc = em_push(ctxt);
2044 if (rc != X86EMUL_CONTINUE)
2045 return rc;
2046
2047 ++reg;
2048 }
2049
2050 return rc;
2051}
2052
2053static int em_pushf(struct x86_emulate_ctxt *ctxt)
2054{
2055 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2056 return em_push(ctxt);
2057}
2058
2059static int em_popa(struct x86_emulate_ctxt *ctxt)
2060{
2061 int rc = X86EMUL_CONTINUE;
2062 int reg = VCPU_REGS_RDI;
2063 u32 val;
2064
2065 while (reg >= VCPU_REGS_RAX) {
2066 if (reg == VCPU_REGS_RSP) {
2067 rsp_increment(ctxt, ctxt->op_bytes);
2068 --reg;
2069 }
2070
2071 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2072 if (rc != X86EMUL_CONTINUE)
2073 break;
2074 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2075 --reg;
2076 }
2077 return rc;
2078}
2079
2080static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2081{
2082 const struct x86_emulate_ops *ops = ctxt->ops;
2083 int rc;
2084 struct desc_ptr dt;
2085 gva_t cs_addr;
2086 gva_t eip_addr;
2087 u16 cs, eip;
2088
2089
2090 ctxt->src.val = ctxt->eflags;
2091 rc = em_push(ctxt);
2092 if (rc != X86EMUL_CONTINUE)
2093 return rc;
2094
2095 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2096
2097 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2098 rc = em_push(ctxt);
2099 if (rc != X86EMUL_CONTINUE)
2100 return rc;
2101
2102 ctxt->src.val = ctxt->_eip;
2103 rc = em_push(ctxt);
2104 if (rc != X86EMUL_CONTINUE)
2105 return rc;
2106
2107 ops->get_idt(ctxt, &dt);
2108
2109 eip_addr = dt.address + (irq << 2);
2110 cs_addr = dt.address + (irq << 2) + 2;
2111
2112 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2113 if (rc != X86EMUL_CONTINUE)
2114 return rc;
2115
2116 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2117 if (rc != X86EMUL_CONTINUE)
2118 return rc;
2119
2120 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2121 if (rc != X86EMUL_CONTINUE)
2122 return rc;
2123
2124 ctxt->_eip = eip;
2125
2126 return rc;
2127}
2128
2129int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2130{
2131 int rc;
2132
2133 invalidate_registers(ctxt);
2134 rc = __emulate_int_real(ctxt, irq);
2135 if (rc == X86EMUL_CONTINUE)
2136 writeback_registers(ctxt);
2137 return rc;
2138}
2139
2140static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2141{
2142 switch(ctxt->mode) {
2143 case X86EMUL_MODE_REAL:
2144 return __emulate_int_real(ctxt, irq);
2145 case X86EMUL_MODE_VM86:
2146 case X86EMUL_MODE_PROT16:
2147 case X86EMUL_MODE_PROT32:
2148 case X86EMUL_MODE_PROT64:
2149 default:
2150
2151 return X86EMUL_UNHANDLEABLE;
2152 }
2153}
2154
2155static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2156{
2157 int rc = X86EMUL_CONTINUE;
2158 unsigned long temp_eip = 0;
2159 unsigned long temp_eflags = 0;
2160 unsigned long cs = 0;
2161 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2162 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2163 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2164 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2165 X86_EFLAGS_AC | X86_EFLAGS_ID |
2166 X86_EFLAGS_FIXED;
2167 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2168 X86_EFLAGS_VIP;
2169
2170
2171
2172 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2173
2174 if (rc != X86EMUL_CONTINUE)
2175 return rc;
2176
2177 if (temp_eip & ~0xffff)
2178 return emulate_gp(ctxt, 0);
2179
2180 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2181
2182 if (rc != X86EMUL_CONTINUE)
2183 return rc;
2184
2185 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2186
2187 if (rc != X86EMUL_CONTINUE)
2188 return rc;
2189
2190 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2191
2192 if (rc != X86EMUL_CONTINUE)
2193 return rc;
2194
2195 ctxt->_eip = temp_eip;
2196
2197 if (ctxt->op_bytes == 4)
2198 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2199 else if (ctxt->op_bytes == 2) {
2200 ctxt->eflags &= ~0xffff;
2201 ctxt->eflags |= temp_eflags;
2202 }
2203
2204 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK;
2205 ctxt->eflags |= X86_EFLAGS_FIXED;
2206 ctxt->ops->set_nmi_mask(ctxt, false);
2207
2208 return rc;
2209}
2210
2211static int em_iret(struct x86_emulate_ctxt *ctxt)
2212{
2213 switch(ctxt->mode) {
2214 case X86EMUL_MODE_REAL:
2215 return emulate_iret_real(ctxt);
2216 case X86EMUL_MODE_VM86:
2217 case X86EMUL_MODE_PROT16:
2218 case X86EMUL_MODE_PROT32:
2219 case X86EMUL_MODE_PROT64:
2220 default:
2221
2222 return X86EMUL_UNHANDLEABLE;
2223 }
2224}
2225
2226static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2227{
2228 int rc;
2229 unsigned short sel;
2230 struct desc_struct new_desc;
2231 u8 cpl = ctxt->ops->cpl(ctxt);
2232
2233 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2234
2235 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2236 X86_TRANSFER_CALL_JMP,
2237 &new_desc);
2238 if (rc != X86EMUL_CONTINUE)
2239 return rc;
2240
2241 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2242
2243 if (rc != X86EMUL_CONTINUE)
2244 return X86EMUL_UNHANDLEABLE;
2245
2246 return rc;
2247}
2248
2249static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2250{
2251 return assign_eip_near(ctxt, ctxt->src.val);
2252}
2253
2254static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2255{
2256 int rc;
2257 long int old_eip;
2258
2259 old_eip = ctxt->_eip;
2260 rc = assign_eip_near(ctxt, ctxt->src.val);
2261 if (rc != X86EMUL_CONTINUE)
2262 return rc;
2263 ctxt->src.val = old_eip;
2264 rc = em_push(ctxt);
2265 return rc;
2266}
2267
2268static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2269{
2270 u64 old = ctxt->dst.orig_val64;
2271
2272 if (ctxt->dst.bytes == 16)
2273 return X86EMUL_UNHANDLEABLE;
2274
2275 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2276 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2277 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2278 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2279 ctxt->eflags &= ~X86_EFLAGS_ZF;
2280 } else {
2281 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2282 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2283
2284 ctxt->eflags |= X86_EFLAGS_ZF;
2285 }
2286 return X86EMUL_CONTINUE;
2287}
2288
2289static int em_ret(struct x86_emulate_ctxt *ctxt)
2290{
2291 int rc;
2292 unsigned long eip;
2293
2294 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2295 if (rc != X86EMUL_CONTINUE)
2296 return rc;
2297
2298 return assign_eip_near(ctxt, eip);
2299}
2300
2301static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2302{
2303 int rc;
2304 unsigned long eip, cs;
2305 int cpl = ctxt->ops->cpl(ctxt);
2306 struct desc_struct new_desc;
2307
2308 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2309 if (rc != X86EMUL_CONTINUE)
2310 return rc;
2311 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2312 if (rc != X86EMUL_CONTINUE)
2313 return rc;
2314
2315 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2316 return X86EMUL_UNHANDLEABLE;
2317 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2318 X86_TRANSFER_RET,
2319 &new_desc);
2320 if (rc != X86EMUL_CONTINUE)
2321 return rc;
2322 rc = assign_eip_far(ctxt, eip, &new_desc);
2323
2324 if (rc != X86EMUL_CONTINUE)
2325 return X86EMUL_UNHANDLEABLE;
2326
2327 return rc;
2328}
2329
2330static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2331{
2332 int rc;
2333
2334 rc = em_ret_far(ctxt);
2335 if (rc != X86EMUL_CONTINUE)
2336 return rc;
2337 rsp_increment(ctxt, ctxt->src.val);
2338 return X86EMUL_CONTINUE;
2339}
2340
2341static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2342{
2343
2344 ctxt->dst.orig_val = ctxt->dst.val;
2345 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2346 ctxt->src.orig_val = ctxt->src.val;
2347 ctxt->src.val = ctxt->dst.orig_val;
2348 fastop(ctxt, em_cmp);
2349
2350 if (ctxt->eflags & X86_EFLAGS_ZF) {
2351
2352 ctxt->src.type = OP_NONE;
2353 ctxt->dst.val = ctxt->src.orig_val;
2354 } else {
2355
2356 ctxt->src.type = OP_REG;
2357 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2358 ctxt->src.val = ctxt->dst.orig_val;
2359
2360 ctxt->dst.val = ctxt->dst.orig_val;
2361 }
2362 return X86EMUL_CONTINUE;
2363}
2364
2365static int em_lseg(struct x86_emulate_ctxt *ctxt)
2366{
2367 int seg = ctxt->src2.val;
2368 unsigned short sel;
2369 int rc;
2370
2371 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2372
2373 rc = load_segment_descriptor(ctxt, sel, seg);
2374 if (rc != X86EMUL_CONTINUE)
2375 return rc;
2376
2377 ctxt->dst.val = ctxt->src.val;
2378 return rc;
2379}
2380
2381static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2382{
2383#ifdef CONFIG_X86_64
2384 return ctxt->ops->guest_has_long_mode(ctxt);
2385#else
2386 return false;
2387#endif
2388}
2389
2390static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2391{
2392 desc->g = (flags >> 23) & 1;
2393 desc->d = (flags >> 22) & 1;
2394 desc->l = (flags >> 21) & 1;
2395 desc->avl = (flags >> 20) & 1;
2396 desc->p = (flags >> 15) & 1;
2397 desc->dpl = (flags >> 13) & 3;
2398 desc->s = (flags >> 12) & 1;
2399 desc->type = (flags >> 8) & 15;
2400}
2401
2402static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2403 int n)
2404{
2405 struct desc_struct desc;
2406 int offset;
2407 u16 selector;
2408
2409 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2410
2411 if (n < 3)
2412 offset = 0x7f84 + n * 12;
2413 else
2414 offset = 0x7f2c + (n - 3) * 12;
2415
2416 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2417 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2418 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2419 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2420 return X86EMUL_CONTINUE;
2421}
2422
2423#ifdef CONFIG_X86_64
2424static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2425 int n)
2426{
2427 struct desc_struct desc;
2428 int offset;
2429 u16 selector;
2430 u32 base3;
2431
2432 offset = 0x7e00 + n * 16;
2433
2434 selector = GET_SMSTATE(u16, smstate, offset);
2435 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2436 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2437 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2438 base3 = GET_SMSTATE(u32, smstate, offset + 12);
2439
2440 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2441 return X86EMUL_CONTINUE;
2442}
2443#endif
2444
2445static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2446 u64 cr0, u64 cr3, u64 cr4)
2447{
2448 int bad;
2449 u64 pcid;
2450
2451
2452 pcid = 0;
2453 if (cr4 & X86_CR4_PCIDE) {
2454 pcid = cr3 & 0xfff;
2455 cr3 &= ~0xfff;
2456 }
2457
2458 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2459 if (bad)
2460 return X86EMUL_UNHANDLEABLE;
2461
2462
2463
2464
2465
2466
2467 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2468 if (bad)
2469 return X86EMUL_UNHANDLEABLE;
2470
2471 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2472 if (bad)
2473 return X86EMUL_UNHANDLEABLE;
2474
2475 if (cr4 & X86_CR4_PCIDE) {
2476 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2477 if (bad)
2478 return X86EMUL_UNHANDLEABLE;
2479 if (pcid) {
2480 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2481 if (bad)
2482 return X86EMUL_UNHANDLEABLE;
2483 }
2484
2485 }
2486
2487 return X86EMUL_CONTINUE;
2488}
2489
2490static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2491 const char *smstate)
2492{
2493 struct desc_struct desc;
2494 struct desc_ptr dt;
2495 u16 selector;
2496 u32 val, cr0, cr3, cr4;
2497 int i;
2498
2499 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
2500 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
2501 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2502 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
2503
2504 for (i = 0; i < 8; i++)
2505 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2506
2507 val = GET_SMSTATE(u32, smstate, 0x7fcc);
2508 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2509 val = GET_SMSTATE(u32, smstate, 0x7fc8);
2510 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2511
2512 selector = GET_SMSTATE(u32, smstate, 0x7fc4);
2513 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
2514 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
2515 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
2516 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2517
2518 selector = GET_SMSTATE(u32, smstate, 0x7fc0);
2519 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
2520 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
2521 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
2522 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2523
2524 dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
2525 dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
2526 ctxt->ops->set_gdt(ctxt, &dt);
2527
2528 dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
2529 dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
2530 ctxt->ops->set_idt(ctxt, &dt);
2531
2532 for (i = 0; i < 6; i++) {
2533 int r = rsm_load_seg_32(ctxt, smstate, i);
2534 if (r != X86EMUL_CONTINUE)
2535 return r;
2536 }
2537
2538 cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2539
2540 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2541
2542 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2543}
2544
2545#ifdef CONFIG_X86_64
2546static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2547 const char *smstate)
2548{
2549 struct desc_struct desc;
2550 struct desc_ptr dt;
2551 u64 val, cr0, cr3, cr4;
2552 u32 base3;
2553 u16 selector;
2554 int i, r;
2555
2556 for (i = 0; i < 16; i++)
2557 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2558
2559 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
2560 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2561
2562 val = GET_SMSTATE(u32, smstate, 0x7f68);
2563 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2564 val = GET_SMSTATE(u32, smstate, 0x7f60);
2565 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2566
2567 cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
2568 cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
2569 cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
2570 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2571 val = GET_SMSTATE(u64, smstate, 0x7ed0);
2572 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2573
2574 selector = GET_SMSTATE(u32, smstate, 0x7e90);
2575 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2576 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
2577 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
2578 base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
2579 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2580
2581 dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
2582 dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
2583 ctxt->ops->set_idt(ctxt, &dt);
2584
2585 selector = GET_SMSTATE(u32, smstate, 0x7e70);
2586 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2587 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
2588 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
2589 base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
2590 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2591
2592 dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
2593 dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
2594 ctxt->ops->set_gdt(ctxt, &dt);
2595
2596 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2597 if (r != X86EMUL_CONTINUE)
2598 return r;
2599
2600 for (i = 0; i < 6; i++) {
2601 r = rsm_load_seg_64(ctxt, smstate, i);
2602 if (r != X86EMUL_CONTINUE)
2603 return r;
2604 }
2605
2606 return X86EMUL_CONTINUE;
2607}
2608#endif
2609
2610static int em_rsm(struct x86_emulate_ctxt *ctxt)
2611{
2612 unsigned long cr0, cr4, efer;
2613 char buf[512];
2614 u64 smbase;
2615 int ret;
2616
2617 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2618 return emulate_ud(ctxt);
2619
2620 smbase = ctxt->ops->get_smbase(ctxt);
2621
2622 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2623 if (ret != X86EMUL_CONTINUE)
2624 return X86EMUL_UNHANDLEABLE;
2625
2626 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2627 ctxt->ops->set_nmi_mask(ctxt, false);
2628
2629 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2630 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2631
2632
2633
2634
2635
2636
2637 if (emulator_has_longmode(ctxt)) {
2638 struct desc_struct cs_desc;
2639
2640
2641 cr4 = ctxt->ops->get_cr(ctxt, 4);
2642 if (cr4 & X86_CR4_PCIDE)
2643 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2644
2645
2646 memset(&cs_desc, 0, sizeof(cs_desc));
2647 cs_desc.type = 0xb;
2648 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2649 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2650 }
2651
2652
2653 cr0 = ctxt->ops->get_cr(ctxt, 0);
2654 if (cr0 & X86_CR0_PE)
2655 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2656
2657 if (emulator_has_longmode(ctxt)) {
2658
2659 cr4 = ctxt->ops->get_cr(ctxt, 4);
2660 if (cr4 & X86_CR4_PAE)
2661 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2662
2663
2664 efer = 0;
2665 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2666 }
2667
2668
2669
2670
2671
2672
2673 if (ctxt->ops->pre_leave_smm(ctxt, buf))
2674 return X86EMUL_UNHANDLEABLE;
2675
2676#ifdef CONFIG_X86_64
2677 if (emulator_has_longmode(ctxt))
2678 ret = rsm_load_state_64(ctxt, buf);
2679 else
2680#endif
2681 ret = rsm_load_state_32(ctxt, buf);
2682
2683 if (ret != X86EMUL_CONTINUE) {
2684
2685 return X86EMUL_UNHANDLEABLE;
2686 }
2687
2688 ctxt->ops->post_leave_smm(ctxt);
2689
2690 return X86EMUL_CONTINUE;
2691}
2692
2693static void
2694setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2695 struct desc_struct *cs, struct desc_struct *ss)
2696{
2697 cs->l = 0;
2698 set_desc_base(cs, 0);
2699 cs->g = 1;
2700 set_desc_limit(cs, 0xfffff);
2701 cs->type = 0x0b;
2702 cs->s = 1;
2703 cs->dpl = 0;
2704 cs->p = 1;
2705 cs->d = 1;
2706 cs->avl = 0;
2707
2708 set_desc_base(ss, 0);
2709 set_desc_limit(ss, 0xfffff);
2710 ss->g = 1;
2711 ss->s = 1;
2712 ss->type = 0x03;
2713 ss->d = 1;
2714 ss->dpl = 0;
2715 ss->p = 1;
2716 ss->l = 0;
2717 ss->avl = 0;
2718}
2719
2720static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2721{
2722 u32 eax, ebx, ecx, edx;
2723
2724 eax = ecx = 0;
2725 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2726 return is_guest_vendor_intel(ebx, ecx, edx);
2727}
2728
2729static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2730{
2731 const struct x86_emulate_ops *ops = ctxt->ops;
2732 u32 eax, ebx, ecx, edx;
2733
2734
2735
2736
2737
2738 if (ctxt->mode == X86EMUL_MODE_PROT64)
2739 return true;
2740
2741 eax = 0x00000000;
2742 ecx = 0x00000000;
2743 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2744
2745
2746
2747
2748
2749
2750 if (is_guest_vendor_intel(ebx, ecx, edx))
2751 return false;
2752
2753 if (is_guest_vendor_amd(ebx, ecx, edx) ||
2754 is_guest_vendor_hygon(ebx, ecx, edx))
2755 return true;
2756
2757
2758
2759
2760
2761 return false;
2762}
2763
2764static int em_syscall(struct x86_emulate_ctxt *ctxt)
2765{
2766 const struct x86_emulate_ops *ops = ctxt->ops;
2767 struct desc_struct cs, ss;
2768 u64 msr_data;
2769 u16 cs_sel, ss_sel;
2770 u64 efer = 0;
2771
2772
2773 if (ctxt->mode == X86EMUL_MODE_REAL ||
2774 ctxt->mode == X86EMUL_MODE_VM86)
2775 return emulate_ud(ctxt);
2776
2777 if (!(em_syscall_is_enabled(ctxt)))
2778 return emulate_ud(ctxt);
2779
2780 ops->get_msr(ctxt, MSR_EFER, &efer);
2781 if (!(efer & EFER_SCE))
2782 return emulate_ud(ctxt);
2783
2784 setup_syscalls_segments(ctxt, &cs, &ss);
2785 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2786 msr_data >>= 32;
2787 cs_sel = (u16)(msr_data & 0xfffc);
2788 ss_sel = (u16)(msr_data + 8);
2789
2790 if (efer & EFER_LMA) {
2791 cs.d = 0;
2792 cs.l = 1;
2793 }
2794 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2795 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2796
2797 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2798 if (efer & EFER_LMA) {
2799#ifdef CONFIG_X86_64
2800 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2801
2802 ops->get_msr(ctxt,
2803 ctxt->mode == X86EMUL_MODE_PROT64 ?
2804 MSR_LSTAR : MSR_CSTAR, &msr_data);
2805 ctxt->_eip = msr_data;
2806
2807 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2808 ctxt->eflags &= ~msr_data;
2809 ctxt->eflags |= X86_EFLAGS_FIXED;
2810#endif
2811 } else {
2812
2813 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2814 ctxt->_eip = (u32)msr_data;
2815
2816 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2817 }
2818
2819 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2820 return X86EMUL_CONTINUE;
2821}
2822
2823static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2824{
2825 const struct x86_emulate_ops *ops = ctxt->ops;
2826 struct desc_struct cs, ss;
2827 u64 msr_data;
2828 u16 cs_sel, ss_sel;
2829 u64 efer = 0;
2830
2831 ops->get_msr(ctxt, MSR_EFER, &efer);
2832
2833 if (ctxt->mode == X86EMUL_MODE_REAL)
2834 return emulate_gp(ctxt, 0);
2835
2836
2837
2838
2839
2840 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2841 && !vendor_intel(ctxt))
2842 return emulate_ud(ctxt);
2843
2844
2845 if (ctxt->mode == X86EMUL_MODE_PROT64)
2846 return X86EMUL_UNHANDLEABLE;
2847
2848 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2849 if ((msr_data & 0xfffc) == 0x0)
2850 return emulate_gp(ctxt, 0);
2851
2852 setup_syscalls_segments(ctxt, &cs, &ss);
2853 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2854 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2855 ss_sel = cs_sel + 8;
2856 if (efer & EFER_LMA) {
2857 cs.d = 0;
2858 cs.l = 1;
2859 }
2860
2861 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2862 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2863
2864 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2865 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2866
2867 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2868 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2869 (u32)msr_data;
2870
2871 return X86EMUL_CONTINUE;
2872}
2873
2874static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2875{
2876 const struct x86_emulate_ops *ops = ctxt->ops;
2877 struct desc_struct cs, ss;
2878 u64 msr_data, rcx, rdx;
2879 int usermode;
2880 u16 cs_sel = 0, ss_sel = 0;
2881
2882
2883 if (ctxt->mode == X86EMUL_MODE_REAL ||
2884 ctxt->mode == X86EMUL_MODE_VM86)
2885 return emulate_gp(ctxt, 0);
2886
2887 setup_syscalls_segments(ctxt, &cs, &ss);
2888
2889 if ((ctxt->rex_prefix & 0x8) != 0x0)
2890 usermode = X86EMUL_MODE_PROT64;
2891 else
2892 usermode = X86EMUL_MODE_PROT32;
2893
2894 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2895 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2896
2897 cs.dpl = 3;
2898 ss.dpl = 3;
2899 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2900 switch (usermode) {
2901 case X86EMUL_MODE_PROT32:
2902 cs_sel = (u16)(msr_data + 16);
2903 if ((msr_data & 0xfffc) == 0x0)
2904 return emulate_gp(ctxt, 0);
2905 ss_sel = (u16)(msr_data + 24);
2906 rcx = (u32)rcx;
2907 rdx = (u32)rdx;
2908 break;
2909 case X86EMUL_MODE_PROT64:
2910 cs_sel = (u16)(msr_data + 32);
2911 if (msr_data == 0x0)
2912 return emulate_gp(ctxt, 0);
2913 ss_sel = cs_sel + 8;
2914 cs.d = 0;
2915 cs.l = 1;
2916 if (emul_is_noncanonical_address(rcx, ctxt) ||
2917 emul_is_noncanonical_address(rdx, ctxt))
2918 return emulate_gp(ctxt, 0);
2919 break;
2920 }
2921 cs_sel |= SEGMENT_RPL_MASK;
2922 ss_sel |= SEGMENT_RPL_MASK;
2923
2924 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2925 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2926
2927 ctxt->_eip = rdx;
2928 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2929
2930 return X86EMUL_CONTINUE;
2931}
2932
2933static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2934{
2935 int iopl;
2936 if (ctxt->mode == X86EMUL_MODE_REAL)
2937 return false;
2938 if (ctxt->mode == X86EMUL_MODE_VM86)
2939 return true;
2940 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2941 return ctxt->ops->cpl(ctxt) > iopl;
2942}
2943
2944#define VMWARE_PORT_VMPORT (0x5658)
2945#define VMWARE_PORT_VMRPC (0x5659)
2946
2947static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2948 u16 port, u16 len)
2949{
2950 const struct x86_emulate_ops *ops = ctxt->ops;
2951 struct desc_struct tr_seg;
2952 u32 base3;
2953 int r;
2954 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2955 unsigned mask = (1 << len) - 1;
2956 unsigned long base;
2957
2958
2959
2960
2961
2962 if (enable_vmware_backdoor &&
2963 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2964 return true;
2965
2966 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2967 if (!tr_seg.p)
2968 return false;
2969 if (desc_limit_scaled(&tr_seg) < 103)
2970 return false;
2971 base = get_desc_base(&tr_seg);
2972#ifdef CONFIG_X86_64
2973 base |= ((u64)base3) << 32;
2974#endif
2975 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2976 if (r != X86EMUL_CONTINUE)
2977 return false;
2978 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2979 return false;
2980 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2981 if (r != X86EMUL_CONTINUE)
2982 return false;
2983 if ((perm >> bit_idx) & mask)
2984 return false;
2985 return true;
2986}
2987
2988static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2989 u16 port, u16 len)
2990{
2991 if (ctxt->perm_ok)
2992 return true;
2993
2994 if (emulator_bad_iopl(ctxt))
2995 if (!emulator_io_port_access_allowed(ctxt, port, len))
2996 return false;
2997
2998 ctxt->perm_ok = true;
2999
3000 return true;
3001}
3002
3003static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
3004{
3005
3006
3007
3008
3009#ifdef CONFIG_X86_64
3010 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
3011 return;
3012
3013 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
3014
3015 switch (ctxt->b) {
3016 case 0xa4:
3017 case 0xa5:
3018 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
3019
3020 case 0xaa:
3021 case 0xab:
3022 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
3023 }
3024#endif
3025}
3026
3027static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
3028 struct tss_segment_16 *tss)
3029{
3030 tss->ip = ctxt->_eip;
3031 tss->flag = ctxt->eflags;
3032 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
3033 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
3034 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
3035 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
3036 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
3037 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
3038 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
3039 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
3040
3041 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3042 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3043 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3044 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3045 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
3046}
3047
3048static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
3049 struct tss_segment_16 *tss)
3050{
3051 int ret;
3052 u8 cpl;
3053
3054 ctxt->_eip = tss->ip;
3055 ctxt->eflags = tss->flag | 2;
3056 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
3057 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
3058 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
3059 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
3060 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
3061 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
3062 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
3063 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3064
3065
3066
3067
3068
3069 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3070 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3071 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3072 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3073 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3074
3075 cpl = tss->cs & 3;
3076
3077
3078
3079
3080
3081 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3082 X86_TRANSFER_TASK_SWITCH, NULL);
3083 if (ret != X86EMUL_CONTINUE)
3084 return ret;
3085 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3086 X86_TRANSFER_TASK_SWITCH, NULL);
3087 if (ret != X86EMUL_CONTINUE)
3088 return ret;
3089 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3090 X86_TRANSFER_TASK_SWITCH, NULL);
3091 if (ret != X86EMUL_CONTINUE)
3092 return ret;
3093 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3094 X86_TRANSFER_TASK_SWITCH, NULL);
3095 if (ret != X86EMUL_CONTINUE)
3096 return ret;
3097 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3098 X86_TRANSFER_TASK_SWITCH, NULL);
3099 if (ret != X86EMUL_CONTINUE)
3100 return ret;
3101
3102 return X86EMUL_CONTINUE;
3103}
3104
3105static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3106 u16 tss_selector, u16 old_tss_sel,
3107 ulong old_tss_base, struct desc_struct *new_desc)
3108{
3109 struct tss_segment_16 tss_seg;
3110 int ret;
3111 u32 new_tss_base = get_desc_base(new_desc);
3112
3113 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3114 if (ret != X86EMUL_CONTINUE)
3115 return ret;
3116
3117 save_state_to_tss16(ctxt, &tss_seg);
3118
3119 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3120 if (ret != X86EMUL_CONTINUE)
3121 return ret;
3122
3123 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3124 if (ret != X86EMUL_CONTINUE)
3125 return ret;
3126
3127 if (old_tss_sel != 0xffff) {
3128 tss_seg.prev_task_link = old_tss_sel;
3129
3130 ret = linear_write_system(ctxt, new_tss_base,
3131 &tss_seg.prev_task_link,
3132 sizeof(tss_seg.prev_task_link));
3133 if (ret != X86EMUL_CONTINUE)
3134 return ret;
3135 }
3136
3137 return load_state_from_tss16(ctxt, &tss_seg);
3138}
3139
3140static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3141 struct tss_segment_32 *tss)
3142{
3143
3144 tss->eip = ctxt->_eip;
3145 tss->eflags = ctxt->eflags;
3146 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3147 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3148 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3149 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3150 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3151 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3152 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3153 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3154
3155 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3156 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3157 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3158 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3159 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3160 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3161}
3162
3163static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3164 struct tss_segment_32 *tss)
3165{
3166 int ret;
3167 u8 cpl;
3168
3169 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3170 return emulate_gp(ctxt, 0);
3171 ctxt->_eip = tss->eip;
3172 ctxt->eflags = tss->eflags | 2;
3173
3174
3175 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3176 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3177 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3178 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3179 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3180 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3181 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3182 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3183
3184
3185
3186
3187
3188
3189 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3190 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3191 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3192 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3193 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3194 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3195 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3196
3197
3198
3199
3200
3201
3202 if (ctxt->eflags & X86_EFLAGS_VM) {
3203 ctxt->mode = X86EMUL_MODE_VM86;
3204 cpl = 3;
3205 } else {
3206 ctxt->mode = X86EMUL_MODE_PROT32;
3207 cpl = tss->cs & 3;
3208 }
3209
3210
3211
3212
3213
3214 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3215 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3216 if (ret != X86EMUL_CONTINUE)
3217 return ret;
3218 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3219 X86_TRANSFER_TASK_SWITCH, NULL);
3220 if (ret != X86EMUL_CONTINUE)
3221 return ret;
3222 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3223 X86_TRANSFER_TASK_SWITCH, NULL);
3224 if (ret != X86EMUL_CONTINUE)
3225 return ret;
3226 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3227 X86_TRANSFER_TASK_SWITCH, NULL);
3228 if (ret != X86EMUL_CONTINUE)
3229 return ret;
3230 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3231 X86_TRANSFER_TASK_SWITCH, NULL);
3232 if (ret != X86EMUL_CONTINUE)
3233 return ret;
3234 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3235 X86_TRANSFER_TASK_SWITCH, NULL);
3236 if (ret != X86EMUL_CONTINUE)
3237 return ret;
3238 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3239 X86_TRANSFER_TASK_SWITCH, NULL);
3240
3241 return ret;
3242}
3243
3244static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3245 u16 tss_selector, u16 old_tss_sel,
3246 ulong old_tss_base, struct desc_struct *new_desc)
3247{
3248 struct tss_segment_32 tss_seg;
3249 int ret;
3250 u32 new_tss_base = get_desc_base(new_desc);
3251 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3252 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3253
3254 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3255 if (ret != X86EMUL_CONTINUE)
3256 return ret;
3257
3258 save_state_to_tss32(ctxt, &tss_seg);
3259
3260
3261 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3262 ldt_sel_offset - eip_offset);
3263 if (ret != X86EMUL_CONTINUE)
3264 return ret;
3265
3266 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3267 if (ret != X86EMUL_CONTINUE)
3268 return ret;
3269
3270 if (old_tss_sel != 0xffff) {
3271 tss_seg.prev_task_link = old_tss_sel;
3272
3273 ret = linear_write_system(ctxt, new_tss_base,
3274 &tss_seg.prev_task_link,
3275 sizeof(tss_seg.prev_task_link));
3276 if (ret != X86EMUL_CONTINUE)
3277 return ret;
3278 }
3279
3280 return load_state_from_tss32(ctxt, &tss_seg);
3281}
3282
3283static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3284 u16 tss_selector, int idt_index, int reason,
3285 bool has_error_code, u32 error_code)
3286{
3287 const struct x86_emulate_ops *ops = ctxt->ops;
3288 struct desc_struct curr_tss_desc, next_tss_desc;
3289 int ret;
3290 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3291 ulong old_tss_base =
3292 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3293 u32 desc_limit;
3294 ulong desc_addr, dr7;
3295
3296
3297
3298 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3299 if (ret != X86EMUL_CONTINUE)
3300 return ret;
3301 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3302 if (ret != X86EMUL_CONTINUE)
3303 return ret;
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315 if (reason == TASK_SWITCH_GATE) {
3316 if (idt_index != -1) {
3317
3318 struct desc_struct task_gate_desc;
3319 int dpl;
3320
3321 ret = read_interrupt_descriptor(ctxt, idt_index,
3322 &task_gate_desc);
3323 if (ret != X86EMUL_CONTINUE)
3324 return ret;
3325
3326 dpl = task_gate_desc.dpl;
3327 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3328 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3329 }
3330 }
3331
3332 desc_limit = desc_limit_scaled(&next_tss_desc);
3333 if (!next_tss_desc.p ||
3334 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3335 desc_limit < 0x2b)) {
3336 return emulate_ts(ctxt, tss_selector & 0xfffc);
3337 }
3338
3339 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3340 curr_tss_desc.type &= ~(1 << 1);
3341 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3342 }
3343
3344 if (reason == TASK_SWITCH_IRET)
3345 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3346
3347
3348
3349 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3350 old_tss_sel = 0xffff;
3351
3352 if (next_tss_desc.type & 8)
3353 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3354 old_tss_base, &next_tss_desc);
3355 else
3356 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3357 old_tss_base, &next_tss_desc);
3358 if (ret != X86EMUL_CONTINUE)
3359 return ret;
3360
3361 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3362 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3363
3364 if (reason != TASK_SWITCH_IRET) {
3365 next_tss_desc.type |= (1 << 1);
3366 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3367 }
3368
3369 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3370 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3371
3372 if (has_error_code) {
3373 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3374 ctxt->lock_prefix = 0;
3375 ctxt->src.val = (unsigned long) error_code;
3376 ret = em_push(ctxt);
3377 }
3378
3379 ops->get_dr(ctxt, 7, &dr7);
3380 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3381
3382 return ret;
3383}
3384
3385int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3386 u16 tss_selector, int idt_index, int reason,
3387 bool has_error_code, u32 error_code)
3388{
3389 int rc;
3390
3391 invalidate_registers(ctxt);
3392 ctxt->_eip = ctxt->eip;
3393 ctxt->dst.type = OP_NONE;
3394
3395 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3396 has_error_code, error_code);
3397
3398 if (rc == X86EMUL_CONTINUE) {
3399 ctxt->eip = ctxt->_eip;
3400 writeback_registers(ctxt);
3401 }
3402
3403 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3404}
3405
3406static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3407 struct operand *op)
3408{
3409 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3410
3411 register_address_increment(ctxt, reg, df * op->bytes);
3412 op->addr.mem.ea = register_address(ctxt, reg);
3413}
3414
3415static int em_das(struct x86_emulate_ctxt *ctxt)
3416{
3417 u8 al, old_al;
3418 bool af, cf, old_cf;
3419
3420 cf = ctxt->eflags & X86_EFLAGS_CF;
3421 al = ctxt->dst.val;
3422
3423 old_al = al;
3424 old_cf = cf;
3425 cf = false;
3426 af = ctxt->eflags & X86_EFLAGS_AF;
3427 if ((al & 0x0f) > 9 || af) {
3428 al -= 6;
3429 cf = old_cf | (al >= 250);
3430 af = true;
3431 } else {
3432 af = false;
3433 }
3434 if (old_al > 0x99 || old_cf) {
3435 al -= 0x60;
3436 cf = true;
3437 }
3438
3439 ctxt->dst.val = al;
3440
3441 ctxt->src.type = OP_IMM;
3442 ctxt->src.val = 0;
3443 ctxt->src.bytes = 1;
3444 fastop(ctxt, em_or);
3445 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3446 if (cf)
3447 ctxt->eflags |= X86_EFLAGS_CF;
3448 if (af)
3449 ctxt->eflags |= X86_EFLAGS_AF;
3450 return X86EMUL_CONTINUE;
3451}
3452
3453static int em_aam(struct x86_emulate_ctxt *ctxt)
3454{
3455 u8 al, ah;
3456
3457 if (ctxt->src.val == 0)
3458 return emulate_de(ctxt);
3459
3460 al = ctxt->dst.val & 0xff;
3461 ah = al / ctxt->src.val;
3462 al %= ctxt->src.val;
3463
3464 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3465
3466
3467 ctxt->src.type = OP_IMM;
3468 ctxt->src.val = 0;
3469 ctxt->src.bytes = 1;
3470 fastop(ctxt, em_or);
3471
3472 return X86EMUL_CONTINUE;
3473}
3474
3475static int em_aad(struct x86_emulate_ctxt *ctxt)
3476{
3477 u8 al = ctxt->dst.val & 0xff;
3478 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3479
3480 al = (al + (ah * ctxt->src.val)) & 0xff;
3481
3482 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3483
3484
3485 ctxt->src.type = OP_IMM;
3486 ctxt->src.val = 0;
3487 ctxt->src.bytes = 1;
3488 fastop(ctxt, em_or);
3489
3490 return X86EMUL_CONTINUE;
3491}
3492
3493static int em_call(struct x86_emulate_ctxt *ctxt)
3494{
3495 int rc;
3496 long rel = ctxt->src.val;
3497
3498 ctxt->src.val = (unsigned long)ctxt->_eip;
3499 rc = jmp_rel(ctxt, rel);
3500 if (rc != X86EMUL_CONTINUE)
3501 return rc;
3502 return em_push(ctxt);
3503}
3504
3505static int em_call_far(struct x86_emulate_ctxt *ctxt)
3506{
3507 u16 sel, old_cs;
3508 ulong old_eip;
3509 int rc;
3510 struct desc_struct old_desc, new_desc;
3511 const struct x86_emulate_ops *ops = ctxt->ops;
3512 int cpl = ctxt->ops->cpl(ctxt);
3513 enum x86emul_mode prev_mode = ctxt->mode;
3514
3515 old_eip = ctxt->_eip;
3516 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3517
3518 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3519 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3520 X86_TRANSFER_CALL_JMP, &new_desc);
3521 if (rc != X86EMUL_CONTINUE)
3522 return rc;
3523
3524 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3525 if (rc != X86EMUL_CONTINUE)
3526 goto fail;
3527
3528 ctxt->src.val = old_cs;
3529 rc = em_push(ctxt);
3530 if (rc != X86EMUL_CONTINUE)
3531 goto fail;
3532
3533 ctxt->src.val = old_eip;
3534 rc = em_push(ctxt);
3535
3536
3537 if (rc != X86EMUL_CONTINUE) {
3538 pr_warn_once("faulting far call emulation tainted memory\n");
3539 goto fail;
3540 }
3541 return rc;
3542fail:
3543 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3544 ctxt->mode = prev_mode;
3545 return rc;
3546
3547}
3548
3549static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3550{
3551 int rc;
3552 unsigned long eip;
3553
3554 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3555 if (rc != X86EMUL_CONTINUE)
3556 return rc;
3557 rc = assign_eip_near(ctxt, eip);
3558 if (rc != X86EMUL_CONTINUE)
3559 return rc;
3560 rsp_increment(ctxt, ctxt->src.val);
3561 return X86EMUL_CONTINUE;
3562}
3563
3564static int em_xchg(struct x86_emulate_ctxt *ctxt)
3565{
3566
3567 ctxt->src.val = ctxt->dst.val;
3568 write_register_operand(&ctxt->src);
3569
3570
3571 ctxt->dst.val = ctxt->src.orig_val;
3572 ctxt->lock_prefix = 1;
3573 return X86EMUL_CONTINUE;
3574}
3575
3576static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3577{
3578 ctxt->dst.val = ctxt->src2.val;
3579 return fastop(ctxt, em_imul);
3580}
3581
3582static int em_cwd(struct x86_emulate_ctxt *ctxt)
3583{
3584 ctxt->dst.type = OP_REG;
3585 ctxt->dst.bytes = ctxt->src.bytes;
3586 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3587 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3588
3589 return X86EMUL_CONTINUE;
3590}
3591
3592static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3593{
3594 u64 tsc_aux = 0;
3595
3596 if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3597 return emulate_gp(ctxt, 0);
3598 ctxt->dst.val = tsc_aux;
3599 return X86EMUL_CONTINUE;
3600}
3601
3602static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3603{
3604 u64 tsc = 0;
3605
3606 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3607 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3608 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3609 return X86EMUL_CONTINUE;
3610}
3611
3612static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3613{
3614 u64 pmc;
3615
3616 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3617 return emulate_gp(ctxt, 0);
3618 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3619 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3620 return X86EMUL_CONTINUE;
3621}
3622
3623static int em_mov(struct x86_emulate_ctxt *ctxt)
3624{
3625 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3626 return X86EMUL_CONTINUE;
3627}
3628
3629static int em_movbe(struct x86_emulate_ctxt *ctxt)
3630{
3631 u16 tmp;
3632
3633 if (!ctxt->ops->guest_has_movbe(ctxt))
3634 return emulate_ud(ctxt);
3635
3636 switch (ctxt->op_bytes) {
3637 case 2:
3638
3639
3640
3641
3642
3643
3644
3645
3646 tmp = (u16)ctxt->src.val;
3647 ctxt->dst.val &= ~0xffffUL;
3648 ctxt->dst.val |= (unsigned long)swab16(tmp);
3649 break;
3650 case 4:
3651 ctxt->dst.val = swab32((u32)ctxt->src.val);
3652 break;
3653 case 8:
3654 ctxt->dst.val = swab64(ctxt->src.val);
3655 break;
3656 default:
3657 BUG();
3658 }
3659 return X86EMUL_CONTINUE;
3660}
3661
3662static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3663{
3664 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3665 return emulate_gp(ctxt, 0);
3666
3667
3668 ctxt->dst.type = OP_NONE;
3669 return X86EMUL_CONTINUE;
3670}
3671
3672static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3673{
3674 unsigned long val;
3675
3676 if (ctxt->mode == X86EMUL_MODE_PROT64)
3677 val = ctxt->src.val & ~0ULL;
3678 else
3679 val = ctxt->src.val & ~0U;
3680
3681
3682 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3683 return emulate_gp(ctxt, 0);
3684
3685
3686 ctxt->dst.type = OP_NONE;
3687 return X86EMUL_CONTINUE;
3688}
3689
3690static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3691{
3692 u64 msr_data;
3693
3694 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3695 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3696 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3697 return emulate_gp(ctxt, 0);
3698
3699 return X86EMUL_CONTINUE;
3700}
3701
3702static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3703{
3704 u64 msr_data;
3705
3706 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3707 return emulate_gp(ctxt, 0);
3708
3709 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3710 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3711 return X86EMUL_CONTINUE;
3712}
3713
3714static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3715{
3716 if (segment > VCPU_SREG_GS &&
3717 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3718 ctxt->ops->cpl(ctxt) > 0)
3719 return emulate_gp(ctxt, 0);
3720
3721 ctxt->dst.val = get_segment_selector(ctxt, segment);
3722 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3723 ctxt->dst.bytes = 2;
3724 return X86EMUL_CONTINUE;
3725}
3726
3727static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3728{
3729 if (ctxt->modrm_reg > VCPU_SREG_GS)
3730 return emulate_ud(ctxt);
3731
3732 return em_store_sreg(ctxt, ctxt->modrm_reg);
3733}
3734
3735static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3736{
3737 u16 sel = ctxt->src.val;
3738
3739 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3740 return emulate_ud(ctxt);
3741
3742 if (ctxt->modrm_reg == VCPU_SREG_SS)
3743 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3744
3745
3746 ctxt->dst.type = OP_NONE;
3747 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3748}
3749
3750static int em_sldt(struct x86_emulate_ctxt *ctxt)
3751{
3752 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3753}
3754
3755static int em_lldt(struct x86_emulate_ctxt *ctxt)
3756{
3757 u16 sel = ctxt->src.val;
3758
3759
3760 ctxt->dst.type = OP_NONE;
3761 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3762}
3763
3764static int em_str(struct x86_emulate_ctxt *ctxt)
3765{
3766 return em_store_sreg(ctxt, VCPU_SREG_TR);
3767}
3768
3769static int em_ltr(struct x86_emulate_ctxt *ctxt)
3770{
3771 u16 sel = ctxt->src.val;
3772
3773
3774 ctxt->dst.type = OP_NONE;
3775 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3776}
3777
3778static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3779{
3780 int rc;
3781 ulong linear;
3782
3783 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3784 if (rc == X86EMUL_CONTINUE)
3785 ctxt->ops->invlpg(ctxt, linear);
3786
3787 ctxt->dst.type = OP_NONE;
3788 return X86EMUL_CONTINUE;
3789}
3790
3791static int em_clts(struct x86_emulate_ctxt *ctxt)
3792{
3793 ulong cr0;
3794
3795 cr0 = ctxt->ops->get_cr(ctxt, 0);
3796 cr0 &= ~X86_CR0_TS;
3797 ctxt->ops->set_cr(ctxt, 0, cr0);
3798 return X86EMUL_CONTINUE;
3799}
3800
3801static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3802{
3803 int rc = ctxt->ops->fix_hypercall(ctxt);
3804
3805 if (rc != X86EMUL_CONTINUE)
3806 return rc;
3807
3808
3809 ctxt->_eip = ctxt->eip;
3810
3811 ctxt->dst.type = OP_NONE;
3812 return X86EMUL_CONTINUE;
3813}
3814
3815static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3816 void (*get)(struct x86_emulate_ctxt *ctxt,
3817 struct desc_ptr *ptr))
3818{
3819 struct desc_ptr desc_ptr;
3820
3821 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3822 ctxt->ops->cpl(ctxt) > 0)
3823 return emulate_gp(ctxt, 0);
3824
3825 if (ctxt->mode == X86EMUL_MODE_PROT64)
3826 ctxt->op_bytes = 8;
3827 get(ctxt, &desc_ptr);
3828 if (ctxt->op_bytes == 2) {
3829 ctxt->op_bytes = 4;
3830 desc_ptr.address &= 0x00ffffff;
3831 }
3832
3833 ctxt->dst.type = OP_NONE;
3834 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3835 &desc_ptr, 2 + ctxt->op_bytes);
3836}
3837
3838static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3839{
3840 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3841}
3842
3843static int em_sidt(struct x86_emulate_ctxt *ctxt)
3844{
3845 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3846}
3847
3848static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3849{
3850 struct desc_ptr desc_ptr;
3851 int rc;
3852
3853 if (ctxt->mode == X86EMUL_MODE_PROT64)
3854 ctxt->op_bytes = 8;
3855 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3856 &desc_ptr.size, &desc_ptr.address,
3857 ctxt->op_bytes);
3858 if (rc != X86EMUL_CONTINUE)
3859 return rc;
3860 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3861 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3862 return emulate_gp(ctxt, 0);
3863 if (lgdt)
3864 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3865 else
3866 ctxt->ops->set_idt(ctxt, &desc_ptr);
3867
3868 ctxt->dst.type = OP_NONE;
3869 return X86EMUL_CONTINUE;
3870}
3871
3872static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3873{
3874 return em_lgdt_lidt(ctxt, true);
3875}
3876
3877static int em_lidt(struct x86_emulate_ctxt *ctxt)
3878{
3879 return em_lgdt_lidt(ctxt, false);
3880}
3881
3882static int em_smsw(struct x86_emulate_ctxt *ctxt)
3883{
3884 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3885 ctxt->ops->cpl(ctxt) > 0)
3886 return emulate_gp(ctxt, 0);
3887
3888 if (ctxt->dst.type == OP_MEM)
3889 ctxt->dst.bytes = 2;
3890 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3891 return X86EMUL_CONTINUE;
3892}
3893
3894static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3895{
3896 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3897 | (ctxt->src.val & 0x0f));
3898 ctxt->dst.type = OP_NONE;
3899 return X86EMUL_CONTINUE;
3900}
3901
3902static int em_loop(struct x86_emulate_ctxt *ctxt)
3903{
3904 int rc = X86EMUL_CONTINUE;
3905
3906 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3907 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3908 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3909 rc = jmp_rel(ctxt, ctxt->src.val);
3910
3911 return rc;
3912}
3913
3914static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3915{
3916 int rc = X86EMUL_CONTINUE;
3917
3918 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3919 rc = jmp_rel(ctxt, ctxt->src.val);
3920
3921 return rc;
3922}
3923
3924static int em_in(struct x86_emulate_ctxt *ctxt)
3925{
3926 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3927 &ctxt->dst.val))
3928 return X86EMUL_IO_NEEDED;
3929
3930 return X86EMUL_CONTINUE;
3931}
3932
3933static int em_out(struct x86_emulate_ctxt *ctxt)
3934{
3935 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3936 &ctxt->src.val, 1);
3937
3938 ctxt->dst.type = OP_NONE;
3939 return X86EMUL_CONTINUE;
3940}
3941
3942static int em_cli(struct x86_emulate_ctxt *ctxt)
3943{
3944 if (emulator_bad_iopl(ctxt))
3945 return emulate_gp(ctxt, 0);
3946
3947 ctxt->eflags &= ~X86_EFLAGS_IF;
3948 return X86EMUL_CONTINUE;
3949}
3950
3951static int em_sti(struct x86_emulate_ctxt *ctxt)
3952{
3953 if (emulator_bad_iopl(ctxt))
3954 return emulate_gp(ctxt, 0);
3955
3956 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3957 ctxt->eflags |= X86_EFLAGS_IF;
3958 return X86EMUL_CONTINUE;
3959}
3960
3961static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3962{
3963 u32 eax, ebx, ecx, edx;
3964 u64 msr = 0;
3965
3966 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3967 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3968 ctxt->ops->cpl(ctxt)) {
3969 return emulate_gp(ctxt, 0);
3970 }
3971
3972 eax = reg_read(ctxt, VCPU_REGS_RAX);
3973 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3974 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3975 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3976 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3977 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3978 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3979 return X86EMUL_CONTINUE;
3980}
3981
3982static int em_sahf(struct x86_emulate_ctxt *ctxt)
3983{
3984 u32 flags;
3985
3986 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3987 X86_EFLAGS_SF;
3988 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3989
3990 ctxt->eflags &= ~0xffUL;
3991 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3992 return X86EMUL_CONTINUE;
3993}
3994
3995static int em_lahf(struct x86_emulate_ctxt *ctxt)
3996{
3997 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3998 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3999 return X86EMUL_CONTINUE;
4000}
4001
4002static int em_bswap(struct x86_emulate_ctxt *ctxt)
4003{
4004 switch (ctxt->op_bytes) {
4005#ifdef CONFIG_X86_64
4006 case 8:
4007 asm("bswap %0" : "+r"(ctxt->dst.val));
4008 break;
4009#endif
4010 default:
4011 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
4012 break;
4013 }
4014 return X86EMUL_CONTINUE;
4015}
4016
4017static int em_clflush(struct x86_emulate_ctxt *ctxt)
4018{
4019
4020 return X86EMUL_CONTINUE;
4021}
4022
4023static int em_movsxd(struct x86_emulate_ctxt *ctxt)
4024{
4025 ctxt->dst.val = (s32) ctxt->src.val;
4026 return X86EMUL_CONTINUE;
4027}
4028
4029static int check_fxsr(struct x86_emulate_ctxt *ctxt)
4030{
4031 if (!ctxt->ops->guest_has_fxsr(ctxt))
4032 return emulate_ud(ctxt);
4033
4034 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
4035 return emulate_nm(ctxt);
4036
4037
4038
4039
4040
4041 if (ctxt->mode >= X86EMUL_MODE_PROT64)
4042 return X86EMUL_UNHANDLEABLE;
4043
4044 return X86EMUL_CONTINUE;
4045}
4046
4047
4048
4049
4050
4051static size_t __fxstate_size(int nregs)
4052{
4053 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4054}
4055
4056static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4057{
4058 bool cr4_osfxsr;
4059 if (ctxt->mode == X86EMUL_MODE_PROT64)
4060 return __fxstate_size(16);
4061
4062 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4063 return __fxstate_size(cr4_osfxsr ? 8 : 0);
4064}
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4085{
4086 struct fxregs_state fx_state;
4087 int rc;
4088
4089 rc = check_fxsr(ctxt);
4090 if (rc != X86EMUL_CONTINUE)
4091 return rc;
4092
4093 emulator_get_fpu();
4094
4095 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4096
4097 emulator_put_fpu();
4098
4099 if (rc != X86EMUL_CONTINUE)
4100 return rc;
4101
4102 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4103 fxstate_size(ctxt));
4104}
4105
4106
4107
4108
4109
4110
4111
4112
4113static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4114 const size_t used_size)
4115{
4116 struct fxregs_state fx_tmp;
4117 int rc;
4118
4119 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4120 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4121 __fxstate_size(16) - used_size);
4122
4123 return rc;
4124}
4125
4126static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4127{
4128 struct fxregs_state fx_state;
4129 int rc;
4130 size_t size;
4131
4132 rc = check_fxsr(ctxt);
4133 if (rc != X86EMUL_CONTINUE)
4134 return rc;
4135
4136 size = fxstate_size(ctxt);
4137 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4138 if (rc != X86EMUL_CONTINUE)
4139 return rc;
4140
4141 emulator_get_fpu();
4142
4143 if (size < __fxstate_size(16)) {
4144 rc = fxregs_fixup(&fx_state, size);
4145 if (rc != X86EMUL_CONTINUE)
4146 goto out;
4147 }
4148
4149 if (fx_state.mxcsr >> 16) {
4150 rc = emulate_gp(ctxt, 0);
4151 goto out;
4152 }
4153
4154 if (rc == X86EMUL_CONTINUE)
4155 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4156
4157out:
4158 emulator_put_fpu();
4159
4160 return rc;
4161}
4162
4163static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
4164{
4165 u32 eax, ecx, edx;
4166
4167 eax = reg_read(ctxt, VCPU_REGS_RAX);
4168 edx = reg_read(ctxt, VCPU_REGS_RDX);
4169 ecx = reg_read(ctxt, VCPU_REGS_RCX);
4170
4171 if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
4172 return emulate_gp(ctxt, 0);
4173
4174 return X86EMUL_CONTINUE;
4175}
4176
4177static bool valid_cr(int nr)
4178{
4179 switch (nr) {
4180 case 0:
4181 case 2 ... 4:
4182 case 8:
4183 return true;
4184 default:
4185 return false;
4186 }
4187}
4188
4189static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4190{
4191 if (!valid_cr(ctxt->modrm_reg))
4192 return emulate_ud(ctxt);
4193
4194 return X86EMUL_CONTINUE;
4195}
4196
4197static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4198{
4199 u64 new_val = ctxt->src.val64;
4200 int cr = ctxt->modrm_reg;
4201 u64 efer = 0;
4202
4203 static u64 cr_reserved_bits[] = {
4204 0xffffffff00000000ULL,
4205 0, 0, 0,
4206 CR4_RESERVED_BITS,
4207 0, 0, 0,
4208 CR8_RESERVED_BITS,
4209 };
4210
4211 if (!valid_cr(cr))
4212 return emulate_ud(ctxt);
4213
4214 if (new_val & cr_reserved_bits[cr])
4215 return emulate_gp(ctxt, 0);
4216
4217 switch (cr) {
4218 case 0: {
4219 u64 cr4;
4220 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4221 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4222 return emulate_gp(ctxt, 0);
4223
4224 cr4 = ctxt->ops->get_cr(ctxt, 4);
4225 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4226
4227 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4228 !(cr4 & X86_CR4_PAE))
4229 return emulate_gp(ctxt, 0);
4230
4231 break;
4232 }
4233 case 3: {
4234 u64 rsvd = 0;
4235
4236 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4237 if (efer & EFER_LMA) {
4238 u64 maxphyaddr;
4239 u32 eax, ebx, ecx, edx;
4240
4241 eax = 0x80000008;
4242 ecx = 0;
4243 if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4244 &edx, true))
4245 maxphyaddr = eax & 0xff;
4246 else
4247 maxphyaddr = 36;
4248 rsvd = rsvd_bits(maxphyaddr, 63);
4249 if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
4250 rsvd &= ~X86_CR3_PCID_NOFLUSH;
4251 }
4252
4253 if (new_val & rsvd)
4254 return emulate_gp(ctxt, 0);
4255
4256 break;
4257 }
4258 case 4: {
4259 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4260
4261 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4262 return emulate_gp(ctxt, 0);
4263
4264 break;
4265 }
4266 }
4267
4268 return X86EMUL_CONTINUE;
4269}
4270
4271static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4272{
4273 unsigned long dr7;
4274
4275 ctxt->ops->get_dr(ctxt, 7, &dr7);
4276
4277
4278 return dr7 & (1 << 13);
4279}
4280
4281static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4282{
4283 int dr = ctxt->modrm_reg;
4284 u64 cr4;
4285
4286 if (dr > 7)
4287 return emulate_ud(ctxt);
4288
4289 cr4 = ctxt->ops->get_cr(ctxt, 4);
4290 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4291 return emulate_ud(ctxt);
4292
4293 if (check_dr7_gd(ctxt)) {
4294 ulong dr6;
4295
4296 ctxt->ops->get_dr(ctxt, 6, &dr6);
4297 dr6 &= ~DR_TRAP_BITS;
4298 dr6 |= DR6_BD | DR6_RTM;
4299 ctxt->ops->set_dr(ctxt, 6, dr6);
4300 return emulate_db(ctxt);
4301 }
4302
4303 return X86EMUL_CONTINUE;
4304}
4305
4306static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4307{
4308 u64 new_val = ctxt->src.val64;
4309 int dr = ctxt->modrm_reg;
4310
4311 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4312 return emulate_gp(ctxt, 0);
4313
4314 return check_dr_read(ctxt);
4315}
4316
4317static int check_svme(struct x86_emulate_ctxt *ctxt)
4318{
4319 u64 efer = 0;
4320
4321 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4322
4323 if (!(efer & EFER_SVME))
4324 return emulate_ud(ctxt);
4325
4326 return X86EMUL_CONTINUE;
4327}
4328
4329static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4330{
4331 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4332
4333
4334 if (rax & 0xffff000000000000ULL)
4335 return emulate_gp(ctxt, 0);
4336
4337 return check_svme(ctxt);
4338}
4339
4340static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4341{
4342 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4343
4344 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4345 return emulate_ud(ctxt);
4346
4347 return X86EMUL_CONTINUE;
4348}
4349
4350static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4351{
4352 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4353 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4354
4355
4356
4357
4358
4359 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4360 return X86EMUL_CONTINUE;
4361
4362 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4363 ctxt->ops->check_pmc(ctxt, rcx))
4364 return emulate_gp(ctxt, 0);
4365
4366 return X86EMUL_CONTINUE;
4367}
4368
4369static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4370{
4371 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4372 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4373 return emulate_gp(ctxt, 0);
4374
4375 return X86EMUL_CONTINUE;
4376}
4377
4378static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4379{
4380 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4381 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4382 return emulate_gp(ctxt, 0);
4383
4384 return X86EMUL_CONTINUE;
4385}
4386
4387#define D(_y) { .flags = (_y) }
4388#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4389#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4390 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4391#define N D(NotImpl)
4392#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4393#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4394#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4395#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4396#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4397#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4398#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4399#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4400#define II(_f, _e, _i) \
4401 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4402#define IIP(_f, _e, _i, _p) \
4403 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4404 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4405#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4406
4407#define D2bv(_f) D((_f) | ByteOp), D(_f)
4408#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4409#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4410#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4411#define I2bvIP(_f, _e, _i, _p) \
4412 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4413
4414#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4415 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4416 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4417
4418static const struct opcode group7_rm0[] = {
4419 N,
4420 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4421 N, N, N, N, N, N,
4422};
4423
4424static const struct opcode group7_rm1[] = {
4425 DI(SrcNone | Priv, monitor),
4426 DI(SrcNone | Priv, mwait),
4427 N, N, N, N, N, N,
4428};
4429
4430static const struct opcode group7_rm2[] = {
4431 N,
4432 II(ImplicitOps | Priv, em_xsetbv, xsetbv),
4433 N, N, N, N, N, N,
4434};
4435
4436static const struct opcode group7_rm3[] = {
4437 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4438 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4439 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4440 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4441 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4442 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4443 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4444 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4445};
4446
4447static const struct opcode group7_rm7[] = {
4448 N,
4449 DIP(SrcNone, rdtscp, check_rdtsc),
4450 N, N, N, N, N, N,
4451};
4452
4453static const struct opcode group1[] = {
4454 F(Lock, em_add),
4455 F(Lock | PageTable, em_or),
4456 F(Lock, em_adc),
4457 F(Lock, em_sbb),
4458 F(Lock | PageTable, em_and),
4459 F(Lock, em_sub),
4460 F(Lock, em_xor),
4461 F(NoWrite, em_cmp),
4462};
4463
4464static const struct opcode group1A[] = {
4465 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4466};
4467
4468static const struct opcode group2[] = {
4469 F(DstMem | ModRM, em_rol),
4470 F(DstMem | ModRM, em_ror),
4471 F(DstMem | ModRM, em_rcl),
4472 F(DstMem | ModRM, em_rcr),
4473 F(DstMem | ModRM, em_shl),
4474 F(DstMem | ModRM, em_shr),
4475 F(DstMem | ModRM, em_shl),
4476 F(DstMem | ModRM, em_sar),
4477};
4478
4479static const struct opcode group3[] = {
4480 F(DstMem | SrcImm | NoWrite, em_test),
4481 F(DstMem | SrcImm | NoWrite, em_test),
4482 F(DstMem | SrcNone | Lock, em_not),
4483 F(DstMem | SrcNone | Lock, em_neg),
4484 F(DstXacc | Src2Mem, em_mul_ex),
4485 F(DstXacc | Src2Mem, em_imul_ex),
4486 F(DstXacc | Src2Mem, em_div_ex),
4487 F(DstXacc | Src2Mem, em_idiv_ex),
4488};
4489
4490static const struct opcode group4[] = {
4491 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4492 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4493 N, N, N, N, N, N,
4494};
4495
4496static const struct opcode group5[] = {
4497 F(DstMem | SrcNone | Lock, em_inc),
4498 F(DstMem | SrcNone | Lock, em_dec),
4499 I(SrcMem | NearBranch, em_call_near_abs),
4500 I(SrcMemFAddr | ImplicitOps, em_call_far),
4501 I(SrcMem | NearBranch, em_jmp_abs),
4502 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4503 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4504};
4505
4506static const struct opcode group6[] = {
4507 II(Prot | DstMem, em_sldt, sldt),
4508 II(Prot | DstMem, em_str, str),
4509 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4510 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4511 N, N, N, N,
4512};
4513
4514static const struct group_dual group7 = { {
4515 II(Mov | DstMem, em_sgdt, sgdt),
4516 II(Mov | DstMem, em_sidt, sidt),
4517 II(SrcMem | Priv, em_lgdt, lgdt),
4518 II(SrcMem | Priv, em_lidt, lidt),
4519 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4520 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4521 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4522}, {
4523 EXT(0, group7_rm0),
4524 EXT(0, group7_rm1),
4525 EXT(0, group7_rm2),
4526 EXT(0, group7_rm3),
4527 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4528 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4529 EXT(0, group7_rm7),
4530} };
4531
4532static const struct opcode group8[] = {
4533 N, N, N, N,
4534 F(DstMem | SrcImmByte | NoWrite, em_bt),
4535 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4536 F(DstMem | SrcImmByte | Lock, em_btr),
4537 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4538};
4539
4540
4541
4542
4543
4544static const struct gprefix pfx_0f_c7_7 = {
4545 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
4546};
4547
4548
4549static const struct group_dual group9 = { {
4550 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4551}, {
4552 N, N, N, N, N, N, N,
4553 GP(0, &pfx_0f_c7_7),
4554} };
4555
4556static const struct opcode group11[] = {
4557 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4558 X7(D(Undefined)),
4559};
4560
4561static const struct gprefix pfx_0f_ae_7 = {
4562 I(SrcMem | ByteOp, em_clflush), N, N, N,
4563};
4564
4565static const struct group_dual group15 = { {
4566 I(ModRM | Aligned16, em_fxsave),
4567 I(ModRM | Aligned16, em_fxrstor),
4568 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4569}, {
4570 N, N, N, N, N, N, N, N,
4571} };
4572
4573static const struct gprefix pfx_0f_6f_0f_7f = {
4574 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4575};
4576
4577static const struct instr_dual instr_dual_0f_2b = {
4578 I(0, em_mov), N
4579};
4580
4581static const struct gprefix pfx_0f_2b = {
4582 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4583};
4584
4585static const struct gprefix pfx_0f_10_0f_11 = {
4586 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4587};
4588
4589static const struct gprefix pfx_0f_28_0f_29 = {
4590 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4591};
4592
4593static const struct gprefix pfx_0f_e7 = {
4594 N, I(Sse, em_mov), N, N,
4595};
4596
4597static const struct escape escape_d9 = { {
4598 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4599}, {
4600
4601 N, N, N, N, N, N, N, N,
4602
4603 N, N, N, N, N, N, N, N,
4604
4605 N, N, N, N, N, N, N, N,
4606
4607 N, N, N, N, N, N, N, N,
4608
4609 N, N, N, N, N, N, N, N,
4610
4611 N, N, N, N, N, N, N, N,
4612
4613 N, N, N, N, N, N, N, N,
4614
4615 N, N, N, N, N, N, N, N,
4616} };
4617
4618static const struct escape escape_db = { {
4619 N, N, N, N, N, N, N, N,
4620}, {
4621
4622 N, N, N, N, N, N, N, N,
4623
4624 N, N, N, N, N, N, N, N,
4625
4626 N, N, N, N, N, N, N, N,
4627
4628 N, N, N, N, N, N, N, N,
4629
4630 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4631
4632 N, N, N, N, N, N, N, N,
4633
4634 N, N, N, N, N, N, N, N,
4635
4636 N, N, N, N, N, N, N, N,
4637} };
4638
4639static const struct escape escape_dd = { {
4640 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4641}, {
4642
4643 N, N, N, N, N, N, N, N,
4644
4645 N, N, N, N, N, N, N, N,
4646
4647 N, N, N, N, N, N, N, N,
4648
4649 N, N, N, N, N, N, N, N,
4650
4651 N, N, N, N, N, N, N, N,
4652
4653 N, N, N, N, N, N, N, N,
4654
4655 N, N, N, N, N, N, N, N,
4656
4657 N, N, N, N, N, N, N, N,
4658} };
4659
4660static const struct instr_dual instr_dual_0f_c3 = {
4661 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4662};
4663
4664static const struct mode_dual mode_dual_63 = {
4665 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4666};
4667
4668static const struct opcode opcode_table[256] = {
4669
4670 F6ALU(Lock, em_add),
4671 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4672 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4673
4674 F6ALU(Lock | PageTable, em_or),
4675 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4676 N,
4677
4678 F6ALU(Lock, em_adc),
4679 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4680 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4681
4682 F6ALU(Lock, em_sbb),
4683 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4684 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4685
4686 F6ALU(Lock | PageTable, em_and), N, N,
4687
4688 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4689
4690 F6ALU(Lock, em_xor), N, N,
4691
4692 F6ALU(NoWrite, em_cmp), N, N,
4693
4694 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4695
4696 X8(I(SrcReg | Stack, em_push)),
4697
4698 X8(I(DstReg | Stack, em_pop)),
4699
4700 I(ImplicitOps | Stack | No64, em_pusha),
4701 I(ImplicitOps | Stack | No64, em_popa),
4702 N, MD(ModRM, &mode_dual_63),
4703 N, N, N, N,
4704
4705 I(SrcImm | Mov | Stack, em_push),
4706 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4707 I(SrcImmByte | Mov | Stack, em_push),
4708 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4709 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in),
4710 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out),
4711
4712 X16(D(SrcImmByte | NearBranch)),
4713
4714 G(ByteOp | DstMem | SrcImm, group1),
4715 G(DstMem | SrcImm, group1),
4716 G(ByteOp | DstMem | SrcImm | No64, group1),
4717 G(DstMem | SrcImmByte, group1),
4718 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4719 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4720
4721 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4722 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4723 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4724 D(ModRM | SrcMem | NoAccess | DstReg),
4725 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4726 G(0, group1A),
4727
4728 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4729
4730 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4731 I(SrcImmFAddr | No64, em_call_far), N,
4732 II(ImplicitOps | Stack, em_pushf, pushf),
4733 II(ImplicitOps | Stack, em_popf, popf),
4734 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4735
4736 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4737 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4738 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4739 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4740
4741 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4742 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4743 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4744 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4745
4746 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4747
4748 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4749
4750 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4751 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4752 I(ImplicitOps | NearBranch, em_ret),
4753 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4754 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4755 G(ByteOp, group11), G(0, group11),
4756
4757 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4758 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4759 I(ImplicitOps, em_ret_far),
4760 D(ImplicitOps), DI(SrcImmByte, intn),
4761 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4762
4763 G(Src2One | ByteOp, group2), G(Src2One, group2),
4764 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4765 I(DstAcc | SrcImmUByte | No64, em_aam),
4766 I(DstAcc | SrcImmUByte | No64, em_aad),
4767 F(DstAcc | ByteOp | No64, em_salc),
4768 I(DstAcc | SrcXLat | ByteOp, em_mov),
4769
4770 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4771
4772 X3(I(SrcImmByte | NearBranch, em_loop)),
4773 I(SrcImmByte | NearBranch, em_jcxz),
4774 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4775 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4776
4777 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4778 I(SrcImmFAddr | No64, em_jmp_far),
4779 D(SrcImmByte | ImplicitOps | NearBranch),
4780 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4781 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4782
4783 N, DI(ImplicitOps, icebp), N, N,
4784 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4785 G(ByteOp, group3), G(0, group3),
4786
4787 D(ImplicitOps), D(ImplicitOps),
4788 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4789 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4790};
4791
4792static const struct opcode twobyte_table[256] = {
4793
4794 G(0, group6), GD(0, &group7), N, N,
4795 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4796 II(ImplicitOps | Priv, em_clts, clts), N,
4797 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4798 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4799
4800 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4801 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4802 N, N, N, N, N, N,
4803 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4804 D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4805 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4806 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4807 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4808 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4809
4810 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4811 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4812 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4813 check_cr_write),
4814 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4815 check_dr_write),
4816 N, N, N, N,
4817 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4818 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4819 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4820 N, N, N, N,
4821
4822 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4823 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4824 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4825 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4826 I(ImplicitOps | EmulateOnUD, em_sysenter),
4827 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4828 N, N,
4829 N, N, N, N, N, N, N, N,
4830
4831 X16(D(DstReg | SrcMem | ModRM)),
4832
4833 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4834
4835 N, N, N, N,
4836 N, N, N, N,
4837 N, N, N, N,
4838 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4839
4840 N, N, N, N,
4841 N, N, N, N,
4842 N, N, N, N,
4843 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4844
4845 X16(D(SrcImm | NearBranch)),
4846
4847 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4848
4849 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4850 II(ImplicitOps, em_cpuid, cpuid),
4851 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4852 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4853 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4854
4855 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4856 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4857 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4858 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4859 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4860 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4861
4862 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4863 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4864 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4865 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4866 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4867 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4868
4869 N, N,
4870 G(BitOp, group8),
4871 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4872 I(DstReg | SrcMem | ModRM, em_bsf_c),
4873 I(DstReg | SrcMem | ModRM, em_bsr_c),
4874 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4875
4876 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4877 N, ID(0, &instr_dual_0f_c3),
4878 N, N, N, GD(0, &group9),
4879
4880 X8(I(DstReg, em_bswap)),
4881
4882 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4883
4884 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4885 N, N, N, N, N, N, N, N,
4886
4887 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4888};
4889
4890static const struct instr_dual instr_dual_0f_38_f0 = {
4891 I(DstReg | SrcMem | Mov, em_movbe), N
4892};
4893
4894static const struct instr_dual instr_dual_0f_38_f1 = {
4895 I(DstMem | SrcReg | Mov, em_movbe), N
4896};
4897
4898static const struct gprefix three_byte_0f_38_f0 = {
4899 ID(0, &instr_dual_0f_38_f0), N, N, N
4900};
4901
4902static const struct gprefix three_byte_0f_38_f1 = {
4903 ID(0, &instr_dual_0f_38_f1), N, N, N
4904};
4905
4906
4907
4908
4909
4910static const struct opcode opcode_map_0f_38[256] = {
4911
4912 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4913
4914 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4915
4916 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4917 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4918
4919 N, N, X4(N), X8(N)
4920};
4921
4922#undef D
4923#undef N
4924#undef G
4925#undef GD
4926#undef I
4927#undef GP
4928#undef EXT
4929#undef MD
4930#undef ID
4931
4932#undef D2bv
4933#undef D2bvIP
4934#undef I2bv
4935#undef I2bvIP
4936#undef I6ALU
4937
4938static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4939{
4940 unsigned size;
4941
4942 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4943 if (size == 8)
4944 size = 4;
4945 return size;
4946}
4947
4948static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4949 unsigned size, bool sign_extension)
4950{
4951 int rc = X86EMUL_CONTINUE;
4952
4953 op->type = OP_IMM;
4954 op->bytes = size;
4955 op->addr.mem.ea = ctxt->_eip;
4956
4957 switch (op->bytes) {
4958 case 1:
4959 op->val = insn_fetch(s8, ctxt);
4960 break;
4961 case 2:
4962 op->val = insn_fetch(s16, ctxt);
4963 break;
4964 case 4:
4965 op->val = insn_fetch(s32, ctxt);
4966 break;
4967 case 8:
4968 op->val = insn_fetch(s64, ctxt);
4969 break;
4970 }
4971 if (!sign_extension) {
4972 switch (op->bytes) {
4973 case 1:
4974 op->val &= 0xff;
4975 break;
4976 case 2:
4977 op->val &= 0xffff;
4978 break;
4979 case 4:
4980 op->val &= 0xffffffff;
4981 break;
4982 }
4983 }
4984done:
4985 return rc;
4986}
4987
4988static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4989 unsigned d)
4990{
4991 int rc = X86EMUL_CONTINUE;
4992
4993 switch (d) {
4994 case OpReg:
4995 decode_register_operand(ctxt, op);
4996 break;
4997 case OpImmUByte:
4998 rc = decode_imm(ctxt, op, 1, false);
4999 break;
5000 case OpMem:
5001 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5002 mem_common:
5003 *op = ctxt->memop;
5004 ctxt->memopp = op;
5005 if (ctxt->d & BitOp)
5006 fetch_bit_operand(ctxt);
5007 op->orig_val = op->val;
5008 break;
5009 case OpMem64:
5010 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
5011 goto mem_common;
5012 case OpAcc:
5013 op->type = OP_REG;
5014 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5015 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
5016 fetch_register_operand(op);
5017 op->orig_val = op->val;
5018 break;
5019 case OpAccLo:
5020 op->type = OP_REG;
5021 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
5022 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
5023 fetch_register_operand(op);
5024 op->orig_val = op->val;
5025 break;
5026 case OpAccHi:
5027 if (ctxt->d & ByteOp) {
5028 op->type = OP_NONE;
5029 break;
5030 }
5031 op->type = OP_REG;
5032 op->bytes = ctxt->op_bytes;
5033 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5034 fetch_register_operand(op);
5035 op->orig_val = op->val;
5036 break;
5037 case OpDI:
5038 op->type = OP_MEM;
5039 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5040 op->addr.mem.ea =
5041 register_address(ctxt, VCPU_REGS_RDI);
5042 op->addr.mem.seg = VCPU_SREG_ES;
5043 op->val = 0;
5044 op->count = 1;
5045 break;
5046 case OpDX:
5047 op->type = OP_REG;
5048 op->bytes = 2;
5049 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5050 fetch_register_operand(op);
5051 break;
5052 case OpCL:
5053 op->type = OP_IMM;
5054 op->bytes = 1;
5055 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
5056 break;
5057 case OpImmByte:
5058 rc = decode_imm(ctxt, op, 1, true);
5059 break;
5060 case OpOne:
5061 op->type = OP_IMM;
5062 op->bytes = 1;
5063 op->val = 1;
5064 break;
5065 case OpImm:
5066 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
5067 break;
5068 case OpImm64:
5069 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
5070 break;
5071 case OpMem8:
5072 ctxt->memop.bytes = 1;
5073 if (ctxt->memop.type == OP_REG) {
5074 ctxt->memop.addr.reg = decode_register(ctxt,
5075 ctxt->modrm_rm, true);
5076 fetch_register_operand(&ctxt->memop);
5077 }
5078 goto mem_common;
5079 case OpMem16:
5080 ctxt->memop.bytes = 2;
5081 goto mem_common;
5082 case OpMem32:
5083 ctxt->memop.bytes = 4;
5084 goto mem_common;
5085 case OpImmU16:
5086 rc = decode_imm(ctxt, op, 2, false);
5087 break;
5088 case OpImmU:
5089 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
5090 break;
5091 case OpSI:
5092 op->type = OP_MEM;
5093 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5094 op->addr.mem.ea =
5095 register_address(ctxt, VCPU_REGS_RSI);
5096 op->addr.mem.seg = ctxt->seg_override;
5097 op->val = 0;
5098 op->count = 1;
5099 break;
5100 case OpXLat:
5101 op->type = OP_MEM;
5102 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5103 op->addr.mem.ea =
5104 address_mask(ctxt,
5105 reg_read(ctxt, VCPU_REGS_RBX) +
5106 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
5107 op->addr.mem.seg = ctxt->seg_override;
5108 op->val = 0;
5109 break;
5110 case OpImmFAddr:
5111 op->type = OP_IMM;
5112 op->addr.mem.ea = ctxt->_eip;
5113 op->bytes = ctxt->op_bytes + 2;
5114 insn_fetch_arr(op->valptr, op->bytes, ctxt);
5115 break;
5116 case OpMemFAddr:
5117 ctxt->memop.bytes = ctxt->op_bytes + 2;
5118 goto mem_common;
5119 case OpES:
5120 op->type = OP_IMM;
5121 op->val = VCPU_SREG_ES;
5122 break;
5123 case OpCS:
5124 op->type = OP_IMM;
5125 op->val = VCPU_SREG_CS;
5126 break;
5127 case OpSS:
5128 op->type = OP_IMM;
5129 op->val = VCPU_SREG_SS;
5130 break;
5131 case OpDS:
5132 op->type = OP_IMM;
5133 op->val = VCPU_SREG_DS;
5134 break;
5135 case OpFS:
5136 op->type = OP_IMM;
5137 op->val = VCPU_SREG_FS;
5138 break;
5139 case OpGS:
5140 op->type = OP_IMM;
5141 op->val = VCPU_SREG_GS;
5142 break;
5143 case OpImplicit:
5144
5145 default:
5146 op->type = OP_NONE;
5147 break;
5148 }
5149
5150done:
5151 return rc;
5152}
5153
5154int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
5155{
5156 int rc = X86EMUL_CONTINUE;
5157 int mode = ctxt->mode;
5158 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5159 bool op_prefix = false;
5160 bool has_seg_override = false;
5161 struct opcode opcode;
5162 u16 dummy;
5163 struct desc_struct desc;
5164
5165 ctxt->memop.type = OP_NONE;
5166 ctxt->memopp = NULL;
5167 ctxt->_eip = ctxt->eip;
5168 ctxt->fetch.ptr = ctxt->fetch.data;
5169 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5170 ctxt->opcode_len = 1;
5171 ctxt->intercept = x86_intercept_none;
5172 if (insn_len > 0)
5173 memcpy(ctxt->fetch.data, insn, insn_len);
5174 else {
5175 rc = __do_insn_fetch_bytes(ctxt, 1);
5176 if (rc != X86EMUL_CONTINUE)
5177 goto done;
5178 }
5179
5180 switch (mode) {
5181 case X86EMUL_MODE_REAL:
5182 case X86EMUL_MODE_VM86:
5183 def_op_bytes = def_ad_bytes = 2;
5184 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5185 if (desc.d)
5186 def_op_bytes = def_ad_bytes = 4;
5187 break;
5188 case X86EMUL_MODE_PROT16:
5189 def_op_bytes = def_ad_bytes = 2;
5190 break;
5191 case X86EMUL_MODE_PROT32:
5192 def_op_bytes = def_ad_bytes = 4;
5193 break;
5194#ifdef CONFIG_X86_64
5195 case X86EMUL_MODE_PROT64:
5196 def_op_bytes = 4;
5197 def_ad_bytes = 8;
5198 break;
5199#endif
5200 default:
5201 return EMULATION_FAILED;
5202 }
5203
5204 ctxt->op_bytes = def_op_bytes;
5205 ctxt->ad_bytes = def_ad_bytes;
5206
5207
5208 for (;;) {
5209 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5210 case 0x66:
5211 op_prefix = true;
5212
5213 ctxt->op_bytes = def_op_bytes ^ 6;
5214 break;
5215 case 0x67:
5216 if (mode == X86EMUL_MODE_PROT64)
5217
5218 ctxt->ad_bytes = def_ad_bytes ^ 12;
5219 else
5220
5221 ctxt->ad_bytes = def_ad_bytes ^ 6;
5222 break;
5223 case 0x26:
5224 has_seg_override = true;
5225 ctxt->seg_override = VCPU_SREG_ES;
5226 break;
5227 case 0x2e:
5228 has_seg_override = true;
5229 ctxt->seg_override = VCPU_SREG_CS;
5230 break;
5231 case 0x36:
5232 has_seg_override = true;
5233 ctxt->seg_override = VCPU_SREG_SS;
5234 break;
5235 case 0x3e:
5236 has_seg_override = true;
5237 ctxt->seg_override = VCPU_SREG_DS;
5238 break;
5239 case 0x64:
5240 has_seg_override = true;
5241 ctxt->seg_override = VCPU_SREG_FS;
5242 break;
5243 case 0x65:
5244 has_seg_override = true;
5245 ctxt->seg_override = VCPU_SREG_GS;
5246 break;
5247 case 0x40 ... 0x4f:
5248 if (mode != X86EMUL_MODE_PROT64)
5249 goto done_prefixes;
5250 ctxt->rex_prefix = ctxt->b;
5251 continue;
5252 case 0xf0:
5253 ctxt->lock_prefix = 1;
5254 break;
5255 case 0xf2:
5256 case 0xf3:
5257 ctxt->rep_prefix = ctxt->b;
5258 break;
5259 default:
5260 goto done_prefixes;
5261 }
5262
5263
5264
5265 ctxt->rex_prefix = 0;
5266 }
5267
5268done_prefixes:
5269
5270
5271 if (ctxt->rex_prefix & 8)
5272 ctxt->op_bytes = 8;
5273
5274
5275 opcode = opcode_table[ctxt->b];
5276
5277 if (ctxt->b == 0x0f) {
5278 ctxt->opcode_len = 2;
5279 ctxt->b = insn_fetch(u8, ctxt);
5280 opcode = twobyte_table[ctxt->b];
5281
5282
5283 if (ctxt->b == 0x38) {
5284 ctxt->opcode_len = 3;
5285 ctxt->b = insn_fetch(u8, ctxt);
5286 opcode = opcode_map_0f_38[ctxt->b];
5287 }
5288 }
5289 ctxt->d = opcode.flags;
5290
5291 if (ctxt->d & ModRM)
5292 ctxt->modrm = insn_fetch(u8, ctxt);
5293
5294
5295 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5296 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5297 ctxt->d = NotImpl;
5298 }
5299
5300 while (ctxt->d & GroupMask) {
5301 switch (ctxt->d & GroupMask) {
5302 case Group:
5303 goffset = (ctxt->modrm >> 3) & 7;
5304 opcode = opcode.u.group[goffset];
5305 break;
5306 case GroupDual:
5307 goffset = (ctxt->modrm >> 3) & 7;
5308 if ((ctxt->modrm >> 6) == 3)
5309 opcode = opcode.u.gdual->mod3[goffset];
5310 else
5311 opcode = opcode.u.gdual->mod012[goffset];
5312 break;
5313 case RMExt:
5314 goffset = ctxt->modrm & 7;
5315 opcode = opcode.u.group[goffset];
5316 break;
5317 case Prefix:
5318 if (ctxt->rep_prefix && op_prefix)
5319 return EMULATION_FAILED;
5320 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5321 switch (simd_prefix) {
5322 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5323 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5324 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5325 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5326 }
5327 break;
5328 case Escape:
5329 if (ctxt->modrm > 0xbf) {
5330 size_t size = ARRAY_SIZE(opcode.u.esc->high);
5331 u32 index = array_index_nospec(
5332 ctxt->modrm - 0xc0, size);
5333
5334 opcode = opcode.u.esc->high[index];
5335 } else {
5336 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5337 }
5338 break;
5339 case InstrDual:
5340 if ((ctxt->modrm >> 6) == 3)
5341 opcode = opcode.u.idual->mod3;
5342 else
5343 opcode = opcode.u.idual->mod012;
5344 break;
5345 case ModeDual:
5346 if (ctxt->mode == X86EMUL_MODE_PROT64)
5347 opcode = opcode.u.mdual->mode64;
5348 else
5349 opcode = opcode.u.mdual->mode32;
5350 break;
5351 default:
5352 return EMULATION_FAILED;
5353 }
5354
5355 ctxt->d &= ~(u64)GroupMask;
5356 ctxt->d |= opcode.flags;
5357 }
5358
5359
5360 if (ctxt->d == 0)
5361 return EMULATION_FAILED;
5362
5363 ctxt->execute = opcode.u.execute;
5364
5365 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5366 return EMULATION_FAILED;
5367
5368 if (unlikely(ctxt->d &
5369 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5370 No16))) {
5371
5372
5373
5374
5375 ctxt->check_perm = opcode.check_perm;
5376 ctxt->intercept = opcode.intercept;
5377
5378 if (ctxt->d & NotImpl)
5379 return EMULATION_FAILED;
5380
5381 if (mode == X86EMUL_MODE_PROT64) {
5382 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5383 ctxt->op_bytes = 8;
5384 else if (ctxt->d & NearBranch)
5385 ctxt->op_bytes = 8;
5386 }
5387
5388 if (ctxt->d & Op3264) {
5389 if (mode == X86EMUL_MODE_PROT64)
5390 ctxt->op_bytes = 8;
5391 else
5392 ctxt->op_bytes = 4;
5393 }
5394
5395 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5396 ctxt->op_bytes = 4;
5397
5398 if (ctxt->d & Sse)
5399 ctxt->op_bytes = 16;
5400 else if (ctxt->d & Mmx)
5401 ctxt->op_bytes = 8;
5402 }
5403
5404
5405 if (ctxt->d & ModRM) {
5406 rc = decode_modrm(ctxt, &ctxt->memop);
5407 if (!has_seg_override) {
5408 has_seg_override = true;
5409 ctxt->seg_override = ctxt->modrm_seg;
5410 }
5411 } else if (ctxt->d & MemAbs)
5412 rc = decode_abs(ctxt, &ctxt->memop);
5413 if (rc != X86EMUL_CONTINUE)
5414 goto done;
5415
5416 if (!has_seg_override)
5417 ctxt->seg_override = VCPU_SREG_DS;
5418
5419 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5420
5421
5422
5423
5424
5425 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5426 if (rc != X86EMUL_CONTINUE)
5427 goto done;
5428
5429
5430
5431
5432
5433 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5434 if (rc != X86EMUL_CONTINUE)
5435 goto done;
5436
5437
5438 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5439
5440 if (ctxt->rip_relative && likely(ctxt->memopp))
5441 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5442 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5443
5444done:
5445 if (rc == X86EMUL_PROPAGATE_FAULT)
5446 ctxt->have_exception = true;
5447 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5448}
5449
5450bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5451{
5452 return ctxt->d & PageTable;
5453}
5454
5455static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5456{
5457
5458
5459
5460
5461
5462
5463
5464 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5465 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5466 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5467 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5468 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5469 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5470 return true;
5471
5472 return false;
5473}
5474
5475static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5476{
5477 int rc;
5478
5479 emulator_get_fpu();
5480 rc = asm_safe("fwait");
5481 emulator_put_fpu();
5482
5483 if (unlikely(rc != X86EMUL_CONTINUE))
5484 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5485
5486 return X86EMUL_CONTINUE;
5487}
5488
5489static void fetch_possible_mmx_operand(struct operand *op)
5490{
5491 if (op->type == OP_MM)
5492 read_mmx_reg(&op->mm_val, op->addr.mm);
5493}
5494
5495static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
5496{
5497 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5498
5499 if (!(ctxt->d & ByteOp))
5500 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5501
5502 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5503 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5504 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5505 : "c"(ctxt->src2.val));
5506
5507 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5508 if (!fop)
5509 return emulate_de(ctxt);
5510 return X86EMUL_CONTINUE;
5511}
5512
5513void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5514{
5515 memset(&ctxt->rip_relative, 0,
5516 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5517
5518 ctxt->io_read.pos = 0;
5519 ctxt->io_read.end = 0;
5520 ctxt->mem_read.end = 0;
5521}
5522
5523int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5524{
5525 const struct x86_emulate_ops *ops = ctxt->ops;
5526 int rc = X86EMUL_CONTINUE;
5527 int saved_dst_type = ctxt->dst.type;
5528 unsigned emul_flags;
5529
5530 ctxt->mem_read.pos = 0;
5531
5532
5533 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5534 rc = emulate_ud(ctxt);
5535 goto done;
5536 }
5537
5538 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5539 rc = emulate_ud(ctxt);
5540 goto done;
5541 }
5542
5543 emul_flags = ctxt->ops->get_hflags(ctxt);
5544 if (unlikely(ctxt->d &
5545 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5546 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5547 (ctxt->d & Undefined)) {
5548 rc = emulate_ud(ctxt);
5549 goto done;
5550 }
5551
5552 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5553 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5554 rc = emulate_ud(ctxt);
5555 goto done;
5556 }
5557
5558 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5559 rc = emulate_nm(ctxt);
5560 goto done;
5561 }
5562
5563 if (ctxt->d & Mmx) {
5564 rc = flush_pending_x87_faults(ctxt);
5565 if (rc != X86EMUL_CONTINUE)
5566 goto done;
5567
5568
5569
5570
5571 fetch_possible_mmx_operand(&ctxt->src);
5572 fetch_possible_mmx_operand(&ctxt->src2);
5573 if (!(ctxt->d & Mov))
5574 fetch_possible_mmx_operand(&ctxt->dst);
5575 }
5576
5577 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5578 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5579 X86_ICPT_PRE_EXCEPT);
5580 if (rc != X86EMUL_CONTINUE)
5581 goto done;
5582 }
5583
5584
5585 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5586 rc = emulate_ud(ctxt);
5587 goto done;
5588 }
5589
5590
5591 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5592 if (ctxt->d & PrivUD)
5593 rc = emulate_ud(ctxt);
5594 else
5595 rc = emulate_gp(ctxt, 0);
5596 goto done;
5597 }
5598
5599
5600 if (ctxt->d & CheckPerm) {
5601 rc = ctxt->check_perm(ctxt);
5602 if (rc != X86EMUL_CONTINUE)
5603 goto done;
5604 }
5605
5606 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5607 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5608 X86_ICPT_POST_EXCEPT);
5609 if (rc != X86EMUL_CONTINUE)
5610 goto done;
5611 }
5612
5613 if (ctxt->rep_prefix && (ctxt->d & String)) {
5614
5615 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5616 string_registers_quirk(ctxt);
5617 ctxt->eip = ctxt->_eip;
5618 ctxt->eflags &= ~X86_EFLAGS_RF;
5619 goto done;
5620 }
5621 }
5622 }
5623
5624 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5625 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5626 ctxt->src.valptr, ctxt->src.bytes);
5627 if (rc != X86EMUL_CONTINUE)
5628 goto done;
5629 ctxt->src.orig_val64 = ctxt->src.val64;
5630 }
5631
5632 if (ctxt->src2.type == OP_MEM) {
5633 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5634 &ctxt->src2.val, ctxt->src2.bytes);
5635 if (rc != X86EMUL_CONTINUE)
5636 goto done;
5637 }
5638
5639 if ((ctxt->d & DstMask) == ImplicitOps)
5640 goto special_insn;
5641
5642
5643 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5644
5645 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5646 &ctxt->dst.val, ctxt->dst.bytes);
5647 if (rc != X86EMUL_CONTINUE) {
5648 if (!(ctxt->d & NoWrite) &&
5649 rc == X86EMUL_PROPAGATE_FAULT &&
5650 ctxt->exception.vector == PF_VECTOR)
5651 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5652 goto done;
5653 }
5654 }
5655
5656 ctxt->dst.orig_val64 = ctxt->dst.val64;
5657
5658special_insn:
5659
5660 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5661 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5662 X86_ICPT_POST_MEMACCESS);
5663 if (rc != X86EMUL_CONTINUE)
5664 goto done;
5665 }
5666
5667 if (ctxt->rep_prefix && (ctxt->d & String))
5668 ctxt->eflags |= X86_EFLAGS_RF;
5669 else
5670 ctxt->eflags &= ~X86_EFLAGS_RF;
5671
5672 if (ctxt->execute) {
5673 if (ctxt->d & Fastop)
5674 rc = fastop(ctxt, ctxt->fop);
5675 else
5676 rc = ctxt->execute(ctxt);
5677 if (rc != X86EMUL_CONTINUE)
5678 goto done;
5679 goto writeback;
5680 }
5681
5682 if (ctxt->opcode_len == 2)
5683 goto twobyte_insn;
5684 else if (ctxt->opcode_len == 3)
5685 goto threebyte_insn;
5686
5687 switch (ctxt->b) {
5688 case 0x70 ... 0x7f:
5689 if (test_cc(ctxt->b, ctxt->eflags))
5690 rc = jmp_rel(ctxt, ctxt->src.val);
5691 break;
5692 case 0x8d:
5693 ctxt->dst.val = ctxt->src.addr.mem.ea;
5694 break;
5695 case 0x90 ... 0x97:
5696 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5697 ctxt->dst.type = OP_NONE;
5698 else
5699 rc = em_xchg(ctxt);
5700 break;
5701 case 0x98:
5702 switch (ctxt->op_bytes) {
5703 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5704 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5705 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5706 }
5707 break;
5708 case 0xcc:
5709 rc = emulate_int(ctxt, 3);
5710 break;
5711 case 0xcd:
5712 rc = emulate_int(ctxt, ctxt->src.val);
5713 break;
5714 case 0xce:
5715 if (ctxt->eflags & X86_EFLAGS_OF)
5716 rc = emulate_int(ctxt, 4);
5717 break;
5718 case 0xe9:
5719 case 0xeb:
5720 rc = jmp_rel(ctxt, ctxt->src.val);
5721 ctxt->dst.type = OP_NONE;
5722 break;
5723 case 0xf4:
5724 ctxt->ops->halt(ctxt);
5725 break;
5726 case 0xf5:
5727
5728 ctxt->eflags ^= X86_EFLAGS_CF;
5729 break;
5730 case 0xf8:
5731 ctxt->eflags &= ~X86_EFLAGS_CF;
5732 break;
5733 case 0xf9:
5734 ctxt->eflags |= X86_EFLAGS_CF;
5735 break;
5736 case 0xfc:
5737 ctxt->eflags &= ~X86_EFLAGS_DF;
5738 break;
5739 case 0xfd:
5740 ctxt->eflags |= X86_EFLAGS_DF;
5741 break;
5742 default:
5743 goto cannot_emulate;
5744 }
5745
5746 if (rc != X86EMUL_CONTINUE)
5747 goto done;
5748
5749writeback:
5750 if (ctxt->d & SrcWrite) {
5751 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5752 rc = writeback(ctxt, &ctxt->src);
5753 if (rc != X86EMUL_CONTINUE)
5754 goto done;
5755 }
5756 if (!(ctxt->d & NoWrite)) {
5757 rc = writeback(ctxt, &ctxt->dst);
5758 if (rc != X86EMUL_CONTINUE)
5759 goto done;
5760 }
5761
5762
5763
5764
5765
5766 ctxt->dst.type = saved_dst_type;
5767
5768 if ((ctxt->d & SrcMask) == SrcSI)
5769 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5770
5771 if ((ctxt->d & DstMask) == DstDI)
5772 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5773
5774 if (ctxt->rep_prefix && (ctxt->d & String)) {
5775 unsigned int count;
5776 struct read_cache *r = &ctxt->io_read;
5777 if ((ctxt->d & SrcMask) == SrcSI)
5778 count = ctxt->src.count;
5779 else
5780 count = ctxt->dst.count;
5781 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5782
5783 if (!string_insn_completed(ctxt)) {
5784
5785
5786
5787
5788 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5789 (r->end == 0 || r->end != r->pos)) {
5790
5791
5792
5793
5794
5795 ctxt->mem_read.end = 0;
5796 writeback_registers(ctxt);
5797 return EMULATION_RESTART;
5798 }
5799 goto done;
5800 }
5801 ctxt->eflags &= ~X86_EFLAGS_RF;
5802 }
5803
5804 ctxt->eip = ctxt->_eip;
5805 if (ctxt->mode != X86EMUL_MODE_PROT64)
5806 ctxt->eip = (u32)ctxt->_eip;
5807
5808done:
5809 if (rc == X86EMUL_PROPAGATE_FAULT) {
5810 WARN_ON(ctxt->exception.vector > 0x1f);
5811 ctxt->have_exception = true;
5812 }
5813 if (rc == X86EMUL_INTERCEPTED)
5814 return EMULATION_INTERCEPTED;
5815
5816 if (rc == X86EMUL_CONTINUE)
5817 writeback_registers(ctxt);
5818
5819 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5820
5821twobyte_insn:
5822 switch (ctxt->b) {
5823 case 0x09:
5824 (ctxt->ops->wbinvd)(ctxt);
5825 break;
5826 case 0x08:
5827 case 0x0d:
5828 case 0x18:
5829 case 0x1f:
5830 break;
5831 case 0x20:
5832 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5833 break;
5834 case 0x21:
5835 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5836 break;
5837 case 0x40 ... 0x4f:
5838 if (test_cc(ctxt->b, ctxt->eflags))
5839 ctxt->dst.val = ctxt->src.val;
5840 else if (ctxt->op_bytes != 4)
5841 ctxt->dst.type = OP_NONE;
5842 break;
5843 case 0x80 ... 0x8f:
5844 if (test_cc(ctxt->b, ctxt->eflags))
5845 rc = jmp_rel(ctxt, ctxt->src.val);
5846 break;
5847 case 0x90 ... 0x9f:
5848 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5849 break;
5850 case 0xb6 ... 0xb7:
5851 ctxt->dst.bytes = ctxt->op_bytes;
5852 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5853 : (u16) ctxt->src.val;
5854 break;
5855 case 0xbe ... 0xbf:
5856 ctxt->dst.bytes = ctxt->op_bytes;
5857 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5858 (s16) ctxt->src.val;
5859 break;
5860 default:
5861 goto cannot_emulate;
5862 }
5863
5864threebyte_insn:
5865
5866 if (rc != X86EMUL_CONTINUE)
5867 goto done;
5868
5869 goto writeback;
5870
5871cannot_emulate:
5872 return EMULATION_FAILED;
5873}
5874
5875void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5876{
5877 invalidate_registers(ctxt);
5878}
5879
5880void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5881{
5882 writeback_registers(ctxt);
5883}
5884
5885bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5886{
5887 if (ctxt->rep_prefix && (ctxt->d & String))
5888 return false;
5889
5890 if (ctxt->d & TwoMemOp)
5891 return false;
5892
5893 return true;
5894}
5895