1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/kvm_host.h>
22#include "kvm_cache_regs.h"
23#include "kvm_emulate.h"
24#include <linux/stringify.h>
25#include <asm/fpu/api.h>
26#include <asm/debugreg.h>
27#include <asm/nospec-branch.h>
28
29#include "x86.h"
30#include "tss.h"
31#include "mmu.h"
32#include "pmu.h"
33
34
35
36
37#define OpNone 0ull
38#define OpImplicit 1ull
39#define OpReg 2ull
40#define OpMem 3ull
41#define OpAcc 4ull
42#define OpDI 5ull
43#define OpMem64 6ull
44#define OpImmUByte 7ull
45#define OpDX 8ull
46#define OpCL 9ull
47#define OpImmByte 10ull
48#define OpOne 11ull
49#define OpImm 12ull
50#define OpMem16 13ull
51#define OpMem32 14ull
52#define OpImmU 15ull
53#define OpSI 16ull
54#define OpImmFAddr 17ull
55#define OpMemFAddr 18ull
56#define OpImmU16 19ull
57#define OpES 20ull
58#define OpCS 21ull
59#define OpSS 22ull
60#define OpDS 23ull
61#define OpFS 24ull
62#define OpGS 25ull
63#define OpMem8 26ull
64#define OpImm64 27ull
65#define OpXLat 28ull
66#define OpAccLo 29ull
67#define OpAccHi 30ull
68
69#define OpBits 5
70#define OpMask ((1ull << OpBits) - 1)
71
72
73
74
75
76
77
78
79
80
81
82#define ByteOp (1<<0)
83
84#define DstShift 1
85#define ImplicitOps (OpImplicit << DstShift)
86#define DstReg (OpReg << DstShift)
87#define DstMem (OpMem << DstShift)
88#define DstAcc (OpAcc << DstShift)
89#define DstDI (OpDI << DstShift)
90#define DstMem64 (OpMem64 << DstShift)
91#define DstMem16 (OpMem16 << DstShift)
92#define DstImmUByte (OpImmUByte << DstShift)
93#define DstDX (OpDX << DstShift)
94#define DstAccLo (OpAccLo << DstShift)
95#define DstMask (OpMask << DstShift)
96
97#define SrcShift 6
98#define SrcNone (OpNone << SrcShift)
99#define SrcReg (OpReg << SrcShift)
100#define SrcMem (OpMem << SrcShift)
101#define SrcMem16 (OpMem16 << SrcShift)
102#define SrcMem32 (OpMem32 << SrcShift)
103#define SrcImm (OpImm << SrcShift)
104#define SrcImmByte (OpImmByte << SrcShift)
105#define SrcOne (OpOne << SrcShift)
106#define SrcImmUByte (OpImmUByte << SrcShift)
107#define SrcImmU (OpImmU << SrcShift)
108#define SrcSI (OpSI << SrcShift)
109#define SrcXLat (OpXLat << SrcShift)
110#define SrcImmFAddr (OpImmFAddr << SrcShift)
111#define SrcMemFAddr (OpMemFAddr << SrcShift)
112#define SrcAcc (OpAcc << SrcShift)
113#define SrcImmU16 (OpImmU16 << SrcShift)
114#define SrcImm64 (OpImm64 << SrcShift)
115#define SrcDX (OpDX << SrcShift)
116#define SrcMem8 (OpMem8 << SrcShift)
117#define SrcAccHi (OpAccHi << SrcShift)
118#define SrcMask (OpMask << SrcShift)
119#define BitOp (1<<11)
120#define MemAbs (1<<12)
121#define String (1<<13)
122#define Stack (1<<14)
123#define GroupMask (7<<15)
124#define Group (1<<15)
125#define GroupDual (2<<15)
126#define Prefix (3<<15)
127#define RMExt (4<<15)
128#define Escape (5<<15)
129#define InstrDual (6<<15)
130#define ModeDual (7<<15)
131#define Sse (1<<18)
132
133#define ModRM (1<<19)
134
135#define Mov (1<<20)
136
137#define Prot (1<<21)
138#define EmulateOnUD (1<<22)
139#define NoAccess (1<<23)
140#define Op3264 (1<<24)
141#define Undefined (1<<25)
142#define Lock (1<<26)
143#define Priv (1<<27)
144#define No64 (1<<28)
145#define PageTable (1 << 29)
146#define NotImpl (1 << 30)
147
148#define Src2Shift (31)
149#define Src2None (OpNone << Src2Shift)
150#define Src2Mem (OpMem << Src2Shift)
151#define Src2CL (OpCL << Src2Shift)
152#define Src2ImmByte (OpImmByte << Src2Shift)
153#define Src2One (OpOne << Src2Shift)
154#define Src2Imm (OpImm << Src2Shift)
155#define Src2ES (OpES << Src2Shift)
156#define Src2CS (OpCS << Src2Shift)
157#define Src2SS (OpSS << Src2Shift)
158#define Src2DS (OpDS << Src2Shift)
159#define Src2FS (OpFS << Src2Shift)
160#define Src2GS (OpGS << Src2Shift)
161#define Src2Mask (OpMask << Src2Shift)
162#define Mmx ((u64)1 << 40)
163#define AlignMask ((u64)7 << 41)
164#define Aligned ((u64)1 << 41)
165#define Unaligned ((u64)2 << 41)
166#define Avx ((u64)3 << 41)
167#define Aligned16 ((u64)4 << 41)
168#define Fastop ((u64)1 << 44)
169#define NoWrite ((u64)1 << 45)
170#define SrcWrite ((u64)1 << 46)
171#define NoMod ((u64)1 << 47)
172#define Intercept ((u64)1 << 48)
173#define CheckPerm ((u64)1 << 49)
174#define PrivUD ((u64)1 << 51)
175#define NearBranch ((u64)1 << 52)
176#define No16 ((u64)1 << 53)
177#define IncSP ((u64)1 << 54)
178#define TwoMemOp ((u64)1 << 55)
179
180#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
181
182#define X2(x...) x, x
183#define X3(x...) X2(x), x
184#define X4(x...) X2(x), X2(x)
185#define X5(x...) X4(x), x
186#define X6(x...) X4(x), X2(x)
187#define X7(x...) X4(x), X3(x)
188#define X8(x...) X4(x), X4(x)
189#define X16(x...) X8(x), X8(x)
190
191#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
192#define FASTOP_SIZE 8
193
194struct opcode {
195 u64 flags : 56;
196 u64 intercept : 8;
197 union {
198 int (*execute)(struct x86_emulate_ctxt *ctxt);
199 const struct opcode *group;
200 const struct group_dual *gdual;
201 const struct gprefix *gprefix;
202 const struct escape *esc;
203 const struct instr_dual *idual;
204 const struct mode_dual *mdual;
205 void (*fastop)(struct fastop *fake);
206 } u;
207 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
208};
209
210struct group_dual {
211 struct opcode mod012[8];
212 struct opcode mod3[8];
213};
214
215struct gprefix {
216 struct opcode pfx_no;
217 struct opcode pfx_66;
218 struct opcode pfx_f2;
219 struct opcode pfx_f3;
220};
221
222struct escape {
223 struct opcode op[8];
224 struct opcode high[64];
225};
226
227struct instr_dual {
228 struct opcode mod012;
229 struct opcode mod3;
230};
231
232struct mode_dual {
233 struct opcode mode32;
234 struct opcode mode64;
235};
236
237#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
238
239enum x86_transfer_type {
240 X86_TRANSFER_NONE,
241 X86_TRANSFER_CALL_JMP,
242 X86_TRANSFER_RET,
243 X86_TRANSFER_TASK_SWITCH,
244};
245
246static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
247{
248 if (!(ctxt->regs_valid & (1 << nr))) {
249 ctxt->regs_valid |= 1 << nr;
250 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
251 }
252 return ctxt->_regs[nr];
253}
254
255static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
256{
257 ctxt->regs_valid |= 1 << nr;
258 ctxt->regs_dirty |= 1 << nr;
259 return &ctxt->_regs[nr];
260}
261
262static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
263{
264 reg_read(ctxt, nr);
265 return reg_write(ctxt, nr);
266}
267
268static void writeback_registers(struct x86_emulate_ctxt *ctxt)
269{
270 unsigned reg;
271
272 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
273 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
274}
275
276static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
277{
278 ctxt->regs_dirty = 0;
279 ctxt->regs_valid = 0;
280}
281
282
283
284
285
286#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
287 X86_EFLAGS_PF|X86_EFLAGS_CF)
288
289#ifdef CONFIG_X86_64
290#define ON64(x) x
291#else
292#define ON64(x)
293#endif
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
309
310#define __FOP_FUNC(name) \
311 ".align " __stringify(FASTOP_SIZE) " \n\t" \
312 ".type " name ", @function \n\t" \
313 name ":\n\t"
314
315#define FOP_FUNC(name) \
316 __FOP_FUNC(#name)
317
318#define __FOP_RET(name) \
319 "ret \n\t" \
320 ".size " name ", .-" name "\n\t"
321
322#define FOP_RET(name) \
323 __FOP_RET(#name)
324
325#define FOP_START(op) \
326 extern void em_##op(struct fastop *fake); \
327 asm(".pushsection .text, \"ax\" \n\t" \
328 ".global em_" #op " \n\t" \
329 ".align " __stringify(FASTOP_SIZE) " \n\t" \
330 "em_" #op ":\n\t"
331
332#define FOP_END \
333 ".popsection")
334
335#define __FOPNOP(name) \
336 __FOP_FUNC(name) \
337 __FOP_RET(name)
338
339#define FOPNOP() \
340 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
341
342#define FOP1E(op, dst) \
343 __FOP_FUNC(#op "_" #dst) \
344 "10: " #op " %" #dst " \n\t" \
345 __FOP_RET(#op "_" #dst)
346
347#define FOP1EEX(op, dst) \
348 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
349
350#define FASTOP1(op) \
351 FOP_START(op) \
352 FOP1E(op##b, al) \
353 FOP1E(op##w, ax) \
354 FOP1E(op##l, eax) \
355 ON64(FOP1E(op##q, rax)) \
356 FOP_END
357
358
359#define FASTOP1SRC2(op, name) \
360 FOP_START(name) \
361 FOP1E(op, cl) \
362 FOP1E(op, cx) \
363 FOP1E(op, ecx) \
364 ON64(FOP1E(op, rcx)) \
365 FOP_END
366
367
368#define FASTOP1SRC2EX(op, name) \
369 FOP_START(name) \
370 FOP1EEX(op, cl) \
371 FOP1EEX(op, cx) \
372 FOP1EEX(op, ecx) \
373 ON64(FOP1EEX(op, rcx)) \
374 FOP_END
375
376#define FOP2E(op, dst, src) \
377 __FOP_FUNC(#op "_" #dst "_" #src) \
378 #op " %" #src ", %" #dst " \n\t" \
379 __FOP_RET(#op "_" #dst "_" #src)
380
381#define FASTOP2(op) \
382 FOP_START(op) \
383 FOP2E(op##b, al, dl) \
384 FOP2E(op##w, ax, dx) \
385 FOP2E(op##l, eax, edx) \
386 ON64(FOP2E(op##q, rax, rdx)) \
387 FOP_END
388
389
390#define FASTOP2W(op) \
391 FOP_START(op) \
392 FOPNOP() \
393 FOP2E(op##w, ax, dx) \
394 FOP2E(op##l, eax, edx) \
395 ON64(FOP2E(op##q, rax, rdx)) \
396 FOP_END
397
398
399#define FASTOP2CL(op) \
400 FOP_START(op) \
401 FOP2E(op##b, al, cl) \
402 FOP2E(op##w, ax, cl) \
403 FOP2E(op##l, eax, cl) \
404 ON64(FOP2E(op##q, rax, cl)) \
405 FOP_END
406
407
408#define FASTOP2R(op, name) \
409 FOP_START(name) \
410 FOP2E(op##b, dl, al) \
411 FOP2E(op##w, dx, ax) \
412 FOP2E(op##l, edx, eax) \
413 ON64(FOP2E(op##q, rdx, rax)) \
414 FOP_END
415
416#define FOP3E(op, dst, src, src2) \
417 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
418 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
419 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
420
421
422#define FASTOP3WCL(op) \
423 FOP_START(op) \
424 FOPNOP() \
425 FOP3E(op##w, ax, dx, cl) \
426 FOP3E(op##l, eax, edx, cl) \
427 ON64(FOP3E(op##q, rax, rdx, cl)) \
428 FOP_END
429
430
431#define FOP_SETCC(op) \
432 ".align 4 \n\t" \
433 ".type " #op ", @function \n\t" \
434 #op ": \n\t" \
435 #op " %al \n\t" \
436 __FOP_RET(#op)
437
438asm(".pushsection .fixup, \"ax\"\n"
439 ".global kvm_fastop_exception \n"
440 "kvm_fastop_exception: xor %esi, %esi; ret\n"
441 ".popsection");
442
443FOP_START(setcc)
444FOP_SETCC(seto)
445FOP_SETCC(setno)
446FOP_SETCC(setc)
447FOP_SETCC(setnc)
448FOP_SETCC(setz)
449FOP_SETCC(setnz)
450FOP_SETCC(setbe)
451FOP_SETCC(setnbe)
452FOP_SETCC(sets)
453FOP_SETCC(setns)
454FOP_SETCC(setp)
455FOP_SETCC(setnp)
456FOP_SETCC(setl)
457FOP_SETCC(setnl)
458FOP_SETCC(setle)
459FOP_SETCC(setnle)
460FOP_END;
461
462FOP_START(salc)
463FOP_FUNC(salc)
464"pushf; sbb %al, %al; popf \n\t"
465FOP_RET(salc)
466FOP_END;
467
468
469
470
471
472#define asm_safe(insn, inoutclob...) \
473({ \
474 int _fault = 0; \
475 \
476 asm volatile("1:" insn "\n" \
477 "2:\n" \
478 ".pushsection .fixup, \"ax\"\n" \
479 "3: movl $1, %[_fault]\n" \
480 " jmp 2b\n" \
481 ".popsection\n" \
482 _ASM_EXTABLE(1b, 3b) \
483 : [_fault] "+qm"(_fault) inoutclob ); \
484 \
485 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
486})
487
488static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
489 enum x86_intercept intercept,
490 enum x86_intercept_stage stage)
491{
492 struct x86_instruction_info info = {
493 .intercept = intercept,
494 .rep_prefix = ctxt->rep_prefix,
495 .modrm_mod = ctxt->modrm_mod,
496 .modrm_reg = ctxt->modrm_reg,
497 .modrm_rm = ctxt->modrm_rm,
498 .src_val = ctxt->src.val64,
499 .dst_val = ctxt->dst.val64,
500 .src_bytes = ctxt->src.bytes,
501 .dst_bytes = ctxt->dst.bytes,
502 .ad_bytes = ctxt->ad_bytes,
503 .next_rip = ctxt->eip,
504 };
505
506 return ctxt->ops->intercept(ctxt, &info, stage);
507}
508
509static void assign_masked(ulong *dest, ulong src, ulong mask)
510{
511 *dest = (*dest & ~mask) | (src & mask);
512}
513
514static void assign_register(unsigned long *reg, u64 val, int bytes)
515{
516
517 switch (bytes) {
518 case 1:
519 *(u8 *)reg = (u8)val;
520 break;
521 case 2:
522 *(u16 *)reg = (u16)val;
523 break;
524 case 4:
525 *reg = (u32)val;
526 break;
527 case 8:
528 *reg = val;
529 break;
530 }
531}
532
533static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
534{
535 return (1UL << (ctxt->ad_bytes << 3)) - 1;
536}
537
538static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
539{
540 u16 sel;
541 struct desc_struct ss;
542
543 if (ctxt->mode == X86EMUL_MODE_PROT64)
544 return ~0UL;
545 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
546 return ~0U >> ((ss.d ^ 1) * 16);
547}
548
549static int stack_size(struct x86_emulate_ctxt *ctxt)
550{
551 return (__fls(stack_mask(ctxt)) + 1) >> 3;
552}
553
554
555static inline unsigned long
556address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
557{
558 if (ctxt->ad_bytes == sizeof(unsigned long))
559 return reg;
560 else
561 return reg & ad_mask(ctxt);
562}
563
564static inline unsigned long
565register_address(struct x86_emulate_ctxt *ctxt, int reg)
566{
567 return address_mask(ctxt, reg_read(ctxt, reg));
568}
569
570static void masked_increment(ulong *reg, ulong mask, int inc)
571{
572 assign_masked(reg, *reg + inc, mask);
573}
574
575static inline void
576register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
577{
578 ulong *preg = reg_rmw(ctxt, reg);
579
580 assign_register(preg, *preg + inc, ctxt->ad_bytes);
581}
582
583static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
584{
585 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
586}
587
588static u32 desc_limit_scaled(struct desc_struct *desc)
589{
590 u32 limit = get_desc_limit(desc);
591
592 return desc->g ? (limit << 12) | 0xfff : limit;
593}
594
595static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
596{
597 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
598 return 0;
599
600 return ctxt->ops->get_cached_segment_base(ctxt, seg);
601}
602
603static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
604 u32 error, bool valid)
605{
606 WARN_ON(vec > 0x1f);
607 ctxt->exception.vector = vec;
608 ctxt->exception.error_code = error;
609 ctxt->exception.error_code_valid = valid;
610 return X86EMUL_PROPAGATE_FAULT;
611}
612
613static int emulate_db(struct x86_emulate_ctxt *ctxt)
614{
615 return emulate_exception(ctxt, DB_VECTOR, 0, false);
616}
617
618static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
619{
620 return emulate_exception(ctxt, GP_VECTOR, err, true);
621}
622
623static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
624{
625 return emulate_exception(ctxt, SS_VECTOR, err, true);
626}
627
628static int emulate_ud(struct x86_emulate_ctxt *ctxt)
629{
630 return emulate_exception(ctxt, UD_VECTOR, 0, false);
631}
632
633static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
634{
635 return emulate_exception(ctxt, TS_VECTOR, err, true);
636}
637
638static int emulate_de(struct x86_emulate_ctxt *ctxt)
639{
640 return emulate_exception(ctxt, DE_VECTOR, 0, false);
641}
642
643static int emulate_nm(struct x86_emulate_ctxt *ctxt)
644{
645 return emulate_exception(ctxt, NM_VECTOR, 0, false);
646}
647
648static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
649{
650 u16 selector;
651 struct desc_struct desc;
652
653 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
654 return selector;
655}
656
657static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
658 unsigned seg)
659{
660 u16 dummy;
661 u32 base3;
662 struct desc_struct desc;
663
664 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
665 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
666}
667
668static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
669{
670 return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
671}
672
673static inline bool emul_is_noncanonical_address(u64 la,
674 struct x86_emulate_ctxt *ctxt)
675{
676 return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
677}
678
679
680
681
682
683
684
685
686
687
688static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
689{
690 u64 alignment = ctxt->d & AlignMask;
691
692 if (likely(size < 16))
693 return 1;
694
695 switch (alignment) {
696 case Unaligned:
697 case Avx:
698 return 1;
699 case Aligned16:
700 return 16;
701 case Aligned:
702 default:
703 return size;
704 }
705}
706
707static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
708 struct segmented_address addr,
709 unsigned *max_size, unsigned size,
710 bool write, bool fetch,
711 enum x86emul_mode mode, ulong *linear)
712{
713 struct desc_struct desc;
714 bool usable;
715 ulong la;
716 u32 lim;
717 u16 sel;
718 u8 va_bits;
719
720 la = seg_base(ctxt, addr.seg) + addr.ea;
721 *max_size = 0;
722 switch (mode) {
723 case X86EMUL_MODE_PROT64:
724 *linear = la;
725 va_bits = ctxt_virt_addr_bits(ctxt);
726 if (get_canonical(la, va_bits) != la)
727 goto bad;
728
729 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
730 if (size > *max_size)
731 goto bad;
732 break;
733 default:
734 *linear = la = (u32)la;
735 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
736 addr.seg);
737 if (!usable)
738 goto bad;
739
740 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
741 || !(desc.type & 2)) && write)
742 goto bad;
743
744 if (!fetch && (desc.type & 8) && !(desc.type & 2))
745 goto bad;
746 lim = desc_limit_scaled(&desc);
747 if (!(desc.type & 8) && (desc.type & 4)) {
748
749 if (addr.ea <= lim)
750 goto bad;
751 lim = desc.d ? 0xffffffff : 0xffff;
752 }
753 if (addr.ea > lim)
754 goto bad;
755 if (lim == 0xffffffff)
756 *max_size = ~0u;
757 else {
758 *max_size = (u64)lim + 1 - addr.ea;
759 if (size > *max_size)
760 goto bad;
761 }
762 break;
763 }
764 if (la & (insn_alignment(ctxt, size) - 1))
765 return emulate_gp(ctxt, 0);
766 return X86EMUL_CONTINUE;
767bad:
768 if (addr.seg == VCPU_SREG_SS)
769 return emulate_ss(ctxt, 0);
770 else
771 return emulate_gp(ctxt, 0);
772}
773
774static int linearize(struct x86_emulate_ctxt *ctxt,
775 struct segmented_address addr,
776 unsigned size, bool write,
777 ulong *linear)
778{
779 unsigned max_size;
780 return __linearize(ctxt, addr, &max_size, size, write, false,
781 ctxt->mode, linear);
782}
783
784static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
785 enum x86emul_mode mode)
786{
787 ulong linear;
788 int rc;
789 unsigned max_size;
790 struct segmented_address addr = { .seg = VCPU_SREG_CS,
791 .ea = dst };
792
793 if (ctxt->op_bytes != sizeof(unsigned long))
794 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
795 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
796 if (rc == X86EMUL_CONTINUE)
797 ctxt->_eip = addr.ea;
798 return rc;
799}
800
801static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
802{
803 return assign_eip(ctxt, dst, ctxt->mode);
804}
805
806static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
807 const struct desc_struct *cs_desc)
808{
809 enum x86emul_mode mode = ctxt->mode;
810 int rc;
811
812#ifdef CONFIG_X86_64
813 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
814 if (cs_desc->l) {
815 u64 efer = 0;
816
817 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
818 if (efer & EFER_LMA)
819 mode = X86EMUL_MODE_PROT64;
820 } else
821 mode = X86EMUL_MODE_PROT32;
822 }
823#endif
824 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
825 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
826 rc = assign_eip(ctxt, dst, mode);
827 if (rc == X86EMUL_CONTINUE)
828 ctxt->mode = mode;
829 return rc;
830}
831
832static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
833{
834 return assign_eip_near(ctxt, ctxt->_eip + rel);
835}
836
837static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
838 void *data, unsigned size)
839{
840 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
841}
842
843static int linear_write_system(struct x86_emulate_ctxt *ctxt,
844 ulong linear, void *data,
845 unsigned int size)
846{
847 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
848}
849
850static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
851 struct segmented_address addr,
852 void *data,
853 unsigned size)
854{
855 int rc;
856 ulong linear;
857
858 rc = linearize(ctxt, addr, size, false, &linear);
859 if (rc != X86EMUL_CONTINUE)
860 return rc;
861 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
862}
863
864static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
865 struct segmented_address addr,
866 void *data,
867 unsigned int size)
868{
869 int rc;
870 ulong linear;
871
872 rc = linearize(ctxt, addr, size, true, &linear);
873 if (rc != X86EMUL_CONTINUE)
874 return rc;
875 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
876}
877
878
879
880
881
882static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
883{
884 int rc;
885 unsigned size, max_size;
886 unsigned long linear;
887 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
888 struct segmented_address addr = { .seg = VCPU_SREG_CS,
889 .ea = ctxt->eip + cur_size };
890
891
892
893
894
895
896
897
898
899
900
901 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
902 &linear);
903 if (unlikely(rc != X86EMUL_CONTINUE))
904 return rc;
905
906 size = min_t(unsigned, 15UL ^ cur_size, max_size);
907 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
908
909
910
911
912
913
914
915 if (unlikely(size < op_size))
916 return emulate_gp(ctxt, 0);
917
918 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
919 size, &ctxt->exception);
920 if (unlikely(rc != X86EMUL_CONTINUE))
921 return rc;
922 ctxt->fetch.end += size;
923 return X86EMUL_CONTINUE;
924}
925
926static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
927 unsigned size)
928{
929 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
930
931 if (unlikely(done_size < size))
932 return __do_insn_fetch_bytes(ctxt, size - done_size);
933 else
934 return X86EMUL_CONTINUE;
935}
936
937
938#define insn_fetch(_type, _ctxt) \
939({ _type _x; \
940 \
941 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
942 if (rc != X86EMUL_CONTINUE) \
943 goto done; \
944 ctxt->_eip += sizeof(_type); \
945 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
946 ctxt->fetch.ptr += sizeof(_type); \
947 _x; \
948})
949
950#define insn_fetch_arr(_arr, _size, _ctxt) \
951({ \
952 rc = do_insn_fetch_bytes(_ctxt, _size); \
953 if (rc != X86EMUL_CONTINUE) \
954 goto done; \
955 ctxt->_eip += (_size); \
956 memcpy(_arr, ctxt->fetch.ptr, _size); \
957 ctxt->fetch.ptr += (_size); \
958})
959
960
961
962
963
964
965static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
966 int byteop)
967{
968 void *p;
969 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
970
971 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
972 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
973 else
974 p = reg_rmw(ctxt, modrm_reg);
975 return p;
976}
977
978static int read_descriptor(struct x86_emulate_ctxt *ctxt,
979 struct segmented_address addr,
980 u16 *size, unsigned long *address, int op_bytes)
981{
982 int rc;
983
984 if (op_bytes == 2)
985 op_bytes = 3;
986 *address = 0;
987 rc = segmented_read_std(ctxt, addr, size, 2);
988 if (rc != X86EMUL_CONTINUE)
989 return rc;
990 addr.ea += 2;
991 rc = segmented_read_std(ctxt, addr, address, op_bytes);
992 return rc;
993}
994
995FASTOP2(add);
996FASTOP2(or);
997FASTOP2(adc);
998FASTOP2(sbb);
999FASTOP2(and);
1000FASTOP2(sub);
1001FASTOP2(xor);
1002FASTOP2(cmp);
1003FASTOP2(test);
1004
1005FASTOP1SRC2(mul, mul_ex);
1006FASTOP1SRC2(imul, imul_ex);
1007FASTOP1SRC2EX(div, div_ex);
1008FASTOP1SRC2EX(idiv, idiv_ex);
1009
1010FASTOP3WCL(shld);
1011FASTOP3WCL(shrd);
1012
1013FASTOP2W(imul);
1014
1015FASTOP1(not);
1016FASTOP1(neg);
1017FASTOP1(inc);
1018FASTOP1(dec);
1019
1020FASTOP2CL(rol);
1021FASTOP2CL(ror);
1022FASTOP2CL(rcl);
1023FASTOP2CL(rcr);
1024FASTOP2CL(shl);
1025FASTOP2CL(shr);
1026FASTOP2CL(sar);
1027
1028FASTOP2W(bsf);
1029FASTOP2W(bsr);
1030FASTOP2W(bt);
1031FASTOP2W(bts);
1032FASTOP2W(btr);
1033FASTOP2W(btc);
1034
1035FASTOP2(xadd);
1036
1037FASTOP2R(cmp, cmp_r);
1038
1039static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1040{
1041
1042 if (ctxt->src.val == 0)
1043 ctxt->dst.type = OP_NONE;
1044 return fastop(ctxt, em_bsf);
1045}
1046
1047static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1048{
1049
1050 if (ctxt->src.val == 0)
1051 ctxt->dst.type = OP_NONE;
1052 return fastop(ctxt, em_bsr);
1053}
1054
1055static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1056{
1057 u8 rc;
1058 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1059
1060 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1061 asm("push %[flags]; popf; " CALL_NOSPEC
1062 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1063 return rc;
1064}
1065
1066static void fetch_register_operand(struct operand *op)
1067{
1068 switch (op->bytes) {
1069 case 1:
1070 op->val = *(u8 *)op->addr.reg;
1071 break;
1072 case 2:
1073 op->val = *(u16 *)op->addr.reg;
1074 break;
1075 case 4:
1076 op->val = *(u32 *)op->addr.reg;
1077 break;
1078 case 8:
1079 op->val = *(u64 *)op->addr.reg;
1080 break;
1081 }
1082}
1083
1084static void emulator_get_fpu(void)
1085{
1086 fpregs_lock();
1087
1088 fpregs_assert_state_consistent();
1089 if (test_thread_flag(TIF_NEED_FPU_LOAD))
1090 switch_fpu_return();
1091}
1092
1093static void emulator_put_fpu(void)
1094{
1095 fpregs_unlock();
1096}
1097
1098static void read_sse_reg(sse128_t *data, int reg)
1099{
1100 emulator_get_fpu();
1101 switch (reg) {
1102 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1103 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1104 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1105 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1106 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1107 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1108 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1109 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1110#ifdef CONFIG_X86_64
1111 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1112 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1113 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1114 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1115 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1116 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1117 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1118 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1119#endif
1120 default: BUG();
1121 }
1122 emulator_put_fpu();
1123}
1124
1125static void write_sse_reg(sse128_t *data, int reg)
1126{
1127 emulator_get_fpu();
1128 switch (reg) {
1129 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1130 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1131 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1132 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1133 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1134 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1135 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1136 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1137#ifdef CONFIG_X86_64
1138 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1139 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1140 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1141 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1142 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1143 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1144 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1145 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1146#endif
1147 default: BUG();
1148 }
1149 emulator_put_fpu();
1150}
1151
1152static void read_mmx_reg(u64 *data, int reg)
1153{
1154 emulator_get_fpu();
1155 switch (reg) {
1156 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1157 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1158 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1159 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1160 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1161 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1162 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1163 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1164 default: BUG();
1165 }
1166 emulator_put_fpu();
1167}
1168
1169static void write_mmx_reg(u64 *data, int reg)
1170{
1171 emulator_get_fpu();
1172 switch (reg) {
1173 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1174 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1175 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1176 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1177 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1178 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1179 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1180 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1181 default: BUG();
1182 }
1183 emulator_put_fpu();
1184}
1185
1186static int em_fninit(struct x86_emulate_ctxt *ctxt)
1187{
1188 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1189 return emulate_nm(ctxt);
1190
1191 emulator_get_fpu();
1192 asm volatile("fninit");
1193 emulator_put_fpu();
1194 return X86EMUL_CONTINUE;
1195}
1196
1197static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1198{
1199 u16 fcw;
1200
1201 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1202 return emulate_nm(ctxt);
1203
1204 emulator_get_fpu();
1205 asm volatile("fnstcw %0": "+m"(fcw));
1206 emulator_put_fpu();
1207
1208 ctxt->dst.val = fcw;
1209
1210 return X86EMUL_CONTINUE;
1211}
1212
1213static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1214{
1215 u16 fsw;
1216
1217 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1218 return emulate_nm(ctxt);
1219
1220 emulator_get_fpu();
1221 asm volatile("fnstsw %0": "+m"(fsw));
1222 emulator_put_fpu();
1223
1224 ctxt->dst.val = fsw;
1225
1226 return X86EMUL_CONTINUE;
1227}
1228
1229static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1230 struct operand *op)
1231{
1232 unsigned reg = ctxt->modrm_reg;
1233
1234 if (!(ctxt->d & ModRM))
1235 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1236
1237 if (ctxt->d & Sse) {
1238 op->type = OP_XMM;
1239 op->bytes = 16;
1240 op->addr.xmm = reg;
1241 read_sse_reg(&op->vec_val, reg);
1242 return;
1243 }
1244 if (ctxt->d & Mmx) {
1245 reg &= 7;
1246 op->type = OP_MM;
1247 op->bytes = 8;
1248 op->addr.mm = reg;
1249 return;
1250 }
1251
1252 op->type = OP_REG;
1253 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1254 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1255
1256 fetch_register_operand(op);
1257 op->orig_val = op->val;
1258}
1259
1260static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1261{
1262 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1263 ctxt->modrm_seg = VCPU_SREG_SS;
1264}
1265
1266static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1267 struct operand *op)
1268{
1269 u8 sib;
1270 int index_reg, base_reg, scale;
1271 int rc = X86EMUL_CONTINUE;
1272 ulong modrm_ea = 0;
1273
1274 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8);
1275 index_reg = (ctxt->rex_prefix << 2) & 8;
1276 base_reg = (ctxt->rex_prefix << 3) & 8;
1277
1278 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1279 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1280 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1281 ctxt->modrm_seg = VCPU_SREG_DS;
1282
1283 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1284 op->type = OP_REG;
1285 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1286 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1287 ctxt->d & ByteOp);
1288 if (ctxt->d & Sse) {
1289 op->type = OP_XMM;
1290 op->bytes = 16;
1291 op->addr.xmm = ctxt->modrm_rm;
1292 read_sse_reg(&op->vec_val, ctxt->modrm_rm);
1293 return rc;
1294 }
1295 if (ctxt->d & Mmx) {
1296 op->type = OP_MM;
1297 op->bytes = 8;
1298 op->addr.mm = ctxt->modrm_rm & 7;
1299 return rc;
1300 }
1301 fetch_register_operand(op);
1302 return rc;
1303 }
1304
1305 op->type = OP_MEM;
1306
1307 if (ctxt->ad_bytes == 2) {
1308 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1309 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1310 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1311 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1312
1313
1314 switch (ctxt->modrm_mod) {
1315 case 0:
1316 if (ctxt->modrm_rm == 6)
1317 modrm_ea += insn_fetch(u16, ctxt);
1318 break;
1319 case 1:
1320 modrm_ea += insn_fetch(s8, ctxt);
1321 break;
1322 case 2:
1323 modrm_ea += insn_fetch(u16, ctxt);
1324 break;
1325 }
1326 switch (ctxt->modrm_rm) {
1327 case 0:
1328 modrm_ea += bx + si;
1329 break;
1330 case 1:
1331 modrm_ea += bx + di;
1332 break;
1333 case 2:
1334 modrm_ea += bp + si;
1335 break;
1336 case 3:
1337 modrm_ea += bp + di;
1338 break;
1339 case 4:
1340 modrm_ea += si;
1341 break;
1342 case 5:
1343 modrm_ea += di;
1344 break;
1345 case 6:
1346 if (ctxt->modrm_mod != 0)
1347 modrm_ea += bp;
1348 break;
1349 case 7:
1350 modrm_ea += bx;
1351 break;
1352 }
1353 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1354 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1355 ctxt->modrm_seg = VCPU_SREG_SS;
1356 modrm_ea = (u16)modrm_ea;
1357 } else {
1358
1359 if ((ctxt->modrm_rm & 7) == 4) {
1360 sib = insn_fetch(u8, ctxt);
1361 index_reg |= (sib >> 3) & 7;
1362 base_reg |= sib & 7;
1363 scale = sib >> 6;
1364
1365 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1366 modrm_ea += insn_fetch(s32, ctxt);
1367 else {
1368 modrm_ea += reg_read(ctxt, base_reg);
1369 adjust_modrm_seg(ctxt, base_reg);
1370
1371 if ((ctxt->d & IncSP) &&
1372 base_reg == VCPU_REGS_RSP)
1373 modrm_ea += ctxt->op_bytes;
1374 }
1375 if (index_reg != 4)
1376 modrm_ea += reg_read(ctxt, index_reg) << scale;
1377 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1378 modrm_ea += insn_fetch(s32, ctxt);
1379 if (ctxt->mode == X86EMUL_MODE_PROT64)
1380 ctxt->rip_relative = 1;
1381 } else {
1382 base_reg = ctxt->modrm_rm;
1383 modrm_ea += reg_read(ctxt, base_reg);
1384 adjust_modrm_seg(ctxt, base_reg);
1385 }
1386 switch (ctxt->modrm_mod) {
1387 case 1:
1388 modrm_ea += insn_fetch(s8, ctxt);
1389 break;
1390 case 2:
1391 modrm_ea += insn_fetch(s32, ctxt);
1392 break;
1393 }
1394 }
1395 op->addr.mem.ea = modrm_ea;
1396 if (ctxt->ad_bytes != 8)
1397 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1398
1399done:
1400 return rc;
1401}
1402
1403static int decode_abs(struct x86_emulate_ctxt *ctxt,
1404 struct operand *op)
1405{
1406 int rc = X86EMUL_CONTINUE;
1407
1408 op->type = OP_MEM;
1409 switch (ctxt->ad_bytes) {
1410 case 2:
1411 op->addr.mem.ea = insn_fetch(u16, ctxt);
1412 break;
1413 case 4:
1414 op->addr.mem.ea = insn_fetch(u32, ctxt);
1415 break;
1416 case 8:
1417 op->addr.mem.ea = insn_fetch(u64, ctxt);
1418 break;
1419 }
1420done:
1421 return rc;
1422}
1423
1424static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1425{
1426 long sv = 0, mask;
1427
1428 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1429 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1430
1431 if (ctxt->src.bytes == 2)
1432 sv = (s16)ctxt->src.val & (s16)mask;
1433 else if (ctxt->src.bytes == 4)
1434 sv = (s32)ctxt->src.val & (s32)mask;
1435 else
1436 sv = (s64)ctxt->src.val & (s64)mask;
1437
1438 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1439 ctxt->dst.addr.mem.ea + (sv >> 3));
1440 }
1441
1442
1443 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1444}
1445
1446static int read_emulated(struct x86_emulate_ctxt *ctxt,
1447 unsigned long addr, void *dest, unsigned size)
1448{
1449 int rc;
1450 struct read_cache *mc = &ctxt->mem_read;
1451
1452 if (mc->pos < mc->end)
1453 goto read_cached;
1454
1455 WARN_ON((mc->end + size) >= sizeof(mc->data));
1456
1457 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1458 &ctxt->exception);
1459 if (rc != X86EMUL_CONTINUE)
1460 return rc;
1461
1462 mc->end += size;
1463
1464read_cached:
1465 memcpy(dest, mc->data + mc->pos, size);
1466 mc->pos += size;
1467 return X86EMUL_CONTINUE;
1468}
1469
1470static int segmented_read(struct x86_emulate_ctxt *ctxt,
1471 struct segmented_address addr,
1472 void *data,
1473 unsigned size)
1474{
1475 int rc;
1476 ulong linear;
1477
1478 rc = linearize(ctxt, addr, size, false, &linear);
1479 if (rc != X86EMUL_CONTINUE)
1480 return rc;
1481 return read_emulated(ctxt, linear, data, size);
1482}
1483
1484static int segmented_write(struct x86_emulate_ctxt *ctxt,
1485 struct segmented_address addr,
1486 const void *data,
1487 unsigned size)
1488{
1489 int rc;
1490 ulong linear;
1491
1492 rc = linearize(ctxt, addr, size, true, &linear);
1493 if (rc != X86EMUL_CONTINUE)
1494 return rc;
1495 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1496 &ctxt->exception);
1497}
1498
1499static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1500 struct segmented_address addr,
1501 const void *orig_data, const void *data,
1502 unsigned size)
1503{
1504 int rc;
1505 ulong linear;
1506
1507 rc = linearize(ctxt, addr, size, true, &linear);
1508 if (rc != X86EMUL_CONTINUE)
1509 return rc;
1510 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1511 size, &ctxt->exception);
1512}
1513
1514static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1515 unsigned int size, unsigned short port,
1516 void *dest)
1517{
1518 struct read_cache *rc = &ctxt->io_read;
1519
1520 if (rc->pos == rc->end) {
1521 unsigned int in_page, n;
1522 unsigned int count = ctxt->rep_prefix ?
1523 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1524 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1525 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1526 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1527 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1528 if (n == 0)
1529 n = 1;
1530 rc->pos = rc->end = 0;
1531 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1532 return 0;
1533 rc->end = n * size;
1534 }
1535
1536 if (ctxt->rep_prefix && (ctxt->d & String) &&
1537 !(ctxt->eflags & X86_EFLAGS_DF)) {
1538 ctxt->dst.data = rc->data + rc->pos;
1539 ctxt->dst.type = OP_MEM_STR;
1540 ctxt->dst.count = (rc->end - rc->pos) / size;
1541 rc->pos = rc->end;
1542 } else {
1543 memcpy(dest, rc->data + rc->pos, size);
1544 rc->pos += size;
1545 }
1546 return 1;
1547}
1548
1549static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1550 u16 index, struct desc_struct *desc)
1551{
1552 struct desc_ptr dt;
1553 ulong addr;
1554
1555 ctxt->ops->get_idt(ctxt, &dt);
1556
1557 if (dt.size < index * 8 + 7)
1558 return emulate_gp(ctxt, index << 3 | 0x2);
1559
1560 addr = dt.address + index * 8;
1561 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1562}
1563
1564static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1565 u16 selector, struct desc_ptr *dt)
1566{
1567 const struct x86_emulate_ops *ops = ctxt->ops;
1568 u32 base3 = 0;
1569
1570 if (selector & 1 << 2) {
1571 struct desc_struct desc;
1572 u16 sel;
1573
1574 memset(dt, 0, sizeof(*dt));
1575 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1576 VCPU_SREG_LDTR))
1577 return;
1578
1579 dt->size = desc_limit_scaled(&desc);
1580 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1581 } else
1582 ops->get_gdt(ctxt, dt);
1583}
1584
1585static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1586 u16 selector, ulong *desc_addr_p)
1587{
1588 struct desc_ptr dt;
1589 u16 index = selector >> 3;
1590 ulong addr;
1591
1592 get_descriptor_table_ptr(ctxt, selector, &dt);
1593
1594 if (dt.size < index * 8 + 7)
1595 return emulate_gp(ctxt, selector & 0xfffc);
1596
1597 addr = dt.address + index * 8;
1598
1599#ifdef CONFIG_X86_64
1600 if (addr >> 32 != 0) {
1601 u64 efer = 0;
1602
1603 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1604 if (!(efer & EFER_LMA))
1605 addr &= (u32)-1;
1606 }
1607#endif
1608
1609 *desc_addr_p = addr;
1610 return X86EMUL_CONTINUE;
1611}
1612
1613
1614static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1615 u16 selector, struct desc_struct *desc,
1616 ulong *desc_addr_p)
1617{
1618 int rc;
1619
1620 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1621 if (rc != X86EMUL_CONTINUE)
1622 return rc;
1623
1624 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1625}
1626
1627
1628static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1629 u16 selector, struct desc_struct *desc)
1630{
1631 int rc;
1632 ulong addr;
1633
1634 rc = get_descriptor_ptr(ctxt, selector, &addr);
1635 if (rc != X86EMUL_CONTINUE)
1636 return rc;
1637
1638 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1639}
1640
1641static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1642 u16 selector, int seg, u8 cpl,
1643 enum x86_transfer_type transfer,
1644 struct desc_struct *desc)
1645{
1646 struct desc_struct seg_desc, old_desc;
1647 u8 dpl, rpl;
1648 unsigned err_vec = GP_VECTOR;
1649 u32 err_code = 0;
1650 bool null_selector = !(selector & ~0x3);
1651 ulong desc_addr;
1652 int ret;
1653 u16 dummy;
1654 u32 base3 = 0;
1655
1656 memset(&seg_desc, 0, sizeof(seg_desc));
1657
1658 if (ctxt->mode == X86EMUL_MODE_REAL) {
1659
1660
1661 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1662 set_desc_base(&seg_desc, selector << 4);
1663 goto load;
1664 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1665
1666 set_desc_base(&seg_desc, selector << 4);
1667 set_desc_limit(&seg_desc, 0xffff);
1668 seg_desc.type = 3;
1669 seg_desc.p = 1;
1670 seg_desc.s = 1;
1671 seg_desc.dpl = 3;
1672 goto load;
1673 }
1674
1675 rpl = selector & 3;
1676
1677
1678 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1679 goto exception;
1680
1681
1682 if (null_selector) {
1683 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1684 goto exception;
1685
1686 if (seg == VCPU_SREG_SS) {
1687 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1688 goto exception;
1689
1690
1691
1692
1693
1694 seg_desc.type = 3;
1695 seg_desc.p = 1;
1696 seg_desc.s = 1;
1697 seg_desc.dpl = cpl;
1698 seg_desc.d = 1;
1699 seg_desc.g = 1;
1700 }
1701
1702
1703 goto load;
1704 }
1705
1706 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1707 if (ret != X86EMUL_CONTINUE)
1708 return ret;
1709
1710 err_code = selector & 0xfffc;
1711 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1712 GP_VECTOR;
1713
1714
1715 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1716 if (transfer == X86_TRANSFER_CALL_JMP)
1717 return X86EMUL_UNHANDLEABLE;
1718 goto exception;
1719 }
1720
1721 if (!seg_desc.p) {
1722 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1723 goto exception;
1724 }
1725
1726 dpl = seg_desc.dpl;
1727
1728 switch (seg) {
1729 case VCPU_SREG_SS:
1730
1731
1732
1733
1734 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1735 goto exception;
1736 break;
1737 case VCPU_SREG_CS:
1738 if (!(seg_desc.type & 8))
1739 goto exception;
1740
1741 if (seg_desc.type & 4) {
1742
1743 if (dpl > cpl)
1744 goto exception;
1745 } else {
1746
1747 if (rpl > cpl || dpl != cpl)
1748 goto exception;
1749 }
1750
1751 if (seg_desc.d && seg_desc.l) {
1752 u64 efer = 0;
1753
1754 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1755 if (efer & EFER_LMA)
1756 goto exception;
1757 }
1758
1759
1760 selector = (selector & 0xfffc) | cpl;
1761 break;
1762 case VCPU_SREG_TR:
1763 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1764 goto exception;
1765 old_desc = seg_desc;
1766 seg_desc.type |= 2;
1767 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1768 sizeof(seg_desc), &ctxt->exception);
1769 if (ret != X86EMUL_CONTINUE)
1770 return ret;
1771 break;
1772 case VCPU_SREG_LDTR:
1773 if (seg_desc.s || seg_desc.type != 2)
1774 goto exception;
1775 break;
1776 default:
1777
1778
1779
1780
1781
1782 if ((seg_desc.type & 0xa) == 0x8 ||
1783 (((seg_desc.type & 0xc) != 0xc) &&
1784 (rpl > dpl && cpl > dpl)))
1785 goto exception;
1786 break;
1787 }
1788
1789 if (seg_desc.s) {
1790
1791 if (!(seg_desc.type & 1)) {
1792 seg_desc.type |= 1;
1793 ret = write_segment_descriptor(ctxt, selector,
1794 &seg_desc);
1795 if (ret != X86EMUL_CONTINUE)
1796 return ret;
1797 }
1798 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1799 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1800 if (ret != X86EMUL_CONTINUE)
1801 return ret;
1802 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1803 ((u64)base3 << 32), ctxt))
1804 return emulate_gp(ctxt, 0);
1805 }
1806load:
1807 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1808 if (desc)
1809 *desc = seg_desc;
1810 return X86EMUL_CONTINUE;
1811exception:
1812 return emulate_exception(ctxt, err_vec, err_code, true);
1813}
1814
1815static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1816 u16 selector, int seg)
1817{
1818 u8 cpl = ctxt->ops->cpl(ctxt);
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830 if (seg == VCPU_SREG_SS && selector == 3 &&
1831 ctxt->mode == X86EMUL_MODE_PROT64)
1832 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1833
1834 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1835 X86_TRANSFER_NONE, NULL);
1836}
1837
1838static void write_register_operand(struct operand *op)
1839{
1840 return assign_register(op->addr.reg, op->val, op->bytes);
1841}
1842
1843static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1844{
1845 switch (op->type) {
1846 case OP_REG:
1847 write_register_operand(op);
1848 break;
1849 case OP_MEM:
1850 if (ctxt->lock_prefix)
1851 return segmented_cmpxchg(ctxt,
1852 op->addr.mem,
1853 &op->orig_val,
1854 &op->val,
1855 op->bytes);
1856 else
1857 return segmented_write(ctxt,
1858 op->addr.mem,
1859 &op->val,
1860 op->bytes);
1861 break;
1862 case OP_MEM_STR:
1863 return segmented_write(ctxt,
1864 op->addr.mem,
1865 op->data,
1866 op->bytes * op->count);
1867 break;
1868 case OP_XMM:
1869 write_sse_reg(&op->vec_val, op->addr.xmm);
1870 break;
1871 case OP_MM:
1872 write_mmx_reg(&op->mm_val, op->addr.mm);
1873 break;
1874 case OP_NONE:
1875
1876 break;
1877 default:
1878 break;
1879 }
1880 return X86EMUL_CONTINUE;
1881}
1882
1883static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1884{
1885 struct segmented_address addr;
1886
1887 rsp_increment(ctxt, -bytes);
1888 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1889 addr.seg = VCPU_SREG_SS;
1890
1891 return segmented_write(ctxt, addr, data, bytes);
1892}
1893
1894static int em_push(struct x86_emulate_ctxt *ctxt)
1895{
1896
1897 ctxt->dst.type = OP_NONE;
1898 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1899}
1900
1901static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1902 void *dest, int len)
1903{
1904 int rc;
1905 struct segmented_address addr;
1906
1907 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1908 addr.seg = VCPU_SREG_SS;
1909 rc = segmented_read(ctxt, addr, dest, len);
1910 if (rc != X86EMUL_CONTINUE)
1911 return rc;
1912
1913 rsp_increment(ctxt, len);
1914 return rc;
1915}
1916
1917static int em_pop(struct x86_emulate_ctxt *ctxt)
1918{
1919 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1920}
1921
1922static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1923 void *dest, int len)
1924{
1925 int rc;
1926 unsigned long val, change_mask;
1927 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1928 int cpl = ctxt->ops->cpl(ctxt);
1929
1930 rc = emulate_pop(ctxt, &val, len);
1931 if (rc != X86EMUL_CONTINUE)
1932 return rc;
1933
1934 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1935 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1936 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1937 X86_EFLAGS_AC | X86_EFLAGS_ID;
1938
1939 switch(ctxt->mode) {
1940 case X86EMUL_MODE_PROT64:
1941 case X86EMUL_MODE_PROT32:
1942 case X86EMUL_MODE_PROT16:
1943 if (cpl == 0)
1944 change_mask |= X86_EFLAGS_IOPL;
1945 if (cpl <= iopl)
1946 change_mask |= X86_EFLAGS_IF;
1947 break;
1948 case X86EMUL_MODE_VM86:
1949 if (iopl < 3)
1950 return emulate_gp(ctxt, 0);
1951 change_mask |= X86_EFLAGS_IF;
1952 break;
1953 default:
1954 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1955 break;
1956 }
1957
1958 *(unsigned long *)dest =
1959 (ctxt->eflags & ~change_mask) | (val & change_mask);
1960
1961 return rc;
1962}
1963
1964static int em_popf(struct x86_emulate_ctxt *ctxt)
1965{
1966 ctxt->dst.type = OP_REG;
1967 ctxt->dst.addr.reg = &ctxt->eflags;
1968 ctxt->dst.bytes = ctxt->op_bytes;
1969 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1970}
1971
1972static int em_enter(struct x86_emulate_ctxt *ctxt)
1973{
1974 int rc;
1975 unsigned frame_size = ctxt->src.val;
1976 unsigned nesting_level = ctxt->src2.val & 31;
1977 ulong rbp;
1978
1979 if (nesting_level)
1980 return X86EMUL_UNHANDLEABLE;
1981
1982 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1983 rc = push(ctxt, &rbp, stack_size(ctxt));
1984 if (rc != X86EMUL_CONTINUE)
1985 return rc;
1986 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1987 stack_mask(ctxt));
1988 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1989 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1990 stack_mask(ctxt));
1991 return X86EMUL_CONTINUE;
1992}
1993
1994static int em_leave(struct x86_emulate_ctxt *ctxt)
1995{
1996 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1997 stack_mask(ctxt));
1998 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1999}
2000
2001static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
2002{
2003 int seg = ctxt->src2.val;
2004
2005 ctxt->src.val = get_segment_selector(ctxt, seg);
2006 if (ctxt->op_bytes == 4) {
2007 rsp_increment(ctxt, -2);
2008 ctxt->op_bytes = 2;
2009 }
2010
2011 return em_push(ctxt);
2012}
2013
2014static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
2015{
2016 int seg = ctxt->src2.val;
2017 unsigned long selector;
2018 int rc;
2019
2020 rc = emulate_pop(ctxt, &selector, 2);
2021 if (rc != X86EMUL_CONTINUE)
2022 return rc;
2023
2024 if (ctxt->modrm_reg == VCPU_SREG_SS)
2025 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2026 if (ctxt->op_bytes > 2)
2027 rsp_increment(ctxt, ctxt->op_bytes - 2);
2028
2029 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
2030 return rc;
2031}
2032
2033static int em_pusha(struct x86_emulate_ctxt *ctxt)
2034{
2035 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
2036 int rc = X86EMUL_CONTINUE;
2037 int reg = VCPU_REGS_RAX;
2038
2039 while (reg <= VCPU_REGS_RDI) {
2040 (reg == VCPU_REGS_RSP) ?
2041 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
2042
2043 rc = em_push(ctxt);
2044 if (rc != X86EMUL_CONTINUE)
2045 return rc;
2046
2047 ++reg;
2048 }
2049
2050 return rc;
2051}
2052
2053static int em_pushf(struct x86_emulate_ctxt *ctxt)
2054{
2055 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2056 return em_push(ctxt);
2057}
2058
2059static int em_popa(struct x86_emulate_ctxt *ctxt)
2060{
2061 int rc = X86EMUL_CONTINUE;
2062 int reg = VCPU_REGS_RDI;
2063 u32 val;
2064
2065 while (reg >= VCPU_REGS_RAX) {
2066 if (reg == VCPU_REGS_RSP) {
2067 rsp_increment(ctxt, ctxt->op_bytes);
2068 --reg;
2069 }
2070
2071 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2072 if (rc != X86EMUL_CONTINUE)
2073 break;
2074 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2075 --reg;
2076 }
2077 return rc;
2078}
2079
2080static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2081{
2082 const struct x86_emulate_ops *ops = ctxt->ops;
2083 int rc;
2084 struct desc_ptr dt;
2085 gva_t cs_addr;
2086 gva_t eip_addr;
2087 u16 cs, eip;
2088
2089
2090 ctxt->src.val = ctxt->eflags;
2091 rc = em_push(ctxt);
2092 if (rc != X86EMUL_CONTINUE)
2093 return rc;
2094
2095 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2096
2097 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2098 rc = em_push(ctxt);
2099 if (rc != X86EMUL_CONTINUE)
2100 return rc;
2101
2102 ctxt->src.val = ctxt->_eip;
2103 rc = em_push(ctxt);
2104 if (rc != X86EMUL_CONTINUE)
2105 return rc;
2106
2107 ops->get_idt(ctxt, &dt);
2108
2109 eip_addr = dt.address + (irq << 2);
2110 cs_addr = dt.address + (irq << 2) + 2;
2111
2112 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2113 if (rc != X86EMUL_CONTINUE)
2114 return rc;
2115
2116 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2117 if (rc != X86EMUL_CONTINUE)
2118 return rc;
2119
2120 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2121 if (rc != X86EMUL_CONTINUE)
2122 return rc;
2123
2124 ctxt->_eip = eip;
2125
2126 return rc;
2127}
2128
2129int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2130{
2131 int rc;
2132
2133 invalidate_registers(ctxt);
2134 rc = __emulate_int_real(ctxt, irq);
2135 if (rc == X86EMUL_CONTINUE)
2136 writeback_registers(ctxt);
2137 return rc;
2138}
2139
2140static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2141{
2142 switch(ctxt->mode) {
2143 case X86EMUL_MODE_REAL:
2144 return __emulate_int_real(ctxt, irq);
2145 case X86EMUL_MODE_VM86:
2146 case X86EMUL_MODE_PROT16:
2147 case X86EMUL_MODE_PROT32:
2148 case X86EMUL_MODE_PROT64:
2149 default:
2150
2151 return X86EMUL_UNHANDLEABLE;
2152 }
2153}
2154
2155static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2156{
2157 int rc = X86EMUL_CONTINUE;
2158 unsigned long temp_eip = 0;
2159 unsigned long temp_eflags = 0;
2160 unsigned long cs = 0;
2161 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2162 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2163 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2164 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2165 X86_EFLAGS_AC | X86_EFLAGS_ID |
2166 X86_EFLAGS_FIXED;
2167 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2168 X86_EFLAGS_VIP;
2169
2170
2171
2172 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2173
2174 if (rc != X86EMUL_CONTINUE)
2175 return rc;
2176
2177 if (temp_eip & ~0xffff)
2178 return emulate_gp(ctxt, 0);
2179
2180 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2181
2182 if (rc != X86EMUL_CONTINUE)
2183 return rc;
2184
2185 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2186
2187 if (rc != X86EMUL_CONTINUE)
2188 return rc;
2189
2190 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2191
2192 if (rc != X86EMUL_CONTINUE)
2193 return rc;
2194
2195 ctxt->_eip = temp_eip;
2196
2197 if (ctxt->op_bytes == 4)
2198 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2199 else if (ctxt->op_bytes == 2) {
2200 ctxt->eflags &= ~0xffff;
2201 ctxt->eflags |= temp_eflags;
2202 }
2203
2204 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK;
2205 ctxt->eflags |= X86_EFLAGS_FIXED;
2206 ctxt->ops->set_nmi_mask(ctxt, false);
2207
2208 return rc;
2209}
2210
2211static int em_iret(struct x86_emulate_ctxt *ctxt)
2212{
2213 switch(ctxt->mode) {
2214 case X86EMUL_MODE_REAL:
2215 return emulate_iret_real(ctxt);
2216 case X86EMUL_MODE_VM86:
2217 case X86EMUL_MODE_PROT16:
2218 case X86EMUL_MODE_PROT32:
2219 case X86EMUL_MODE_PROT64:
2220 default:
2221
2222 return X86EMUL_UNHANDLEABLE;
2223 }
2224}
2225
2226static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2227{
2228 int rc;
2229 unsigned short sel;
2230 struct desc_struct new_desc;
2231 u8 cpl = ctxt->ops->cpl(ctxt);
2232
2233 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2234
2235 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2236 X86_TRANSFER_CALL_JMP,
2237 &new_desc);
2238 if (rc != X86EMUL_CONTINUE)
2239 return rc;
2240
2241 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2242
2243 if (rc != X86EMUL_CONTINUE)
2244 return X86EMUL_UNHANDLEABLE;
2245
2246 return rc;
2247}
2248
2249static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2250{
2251 return assign_eip_near(ctxt, ctxt->src.val);
2252}
2253
2254static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2255{
2256 int rc;
2257 long int old_eip;
2258
2259 old_eip = ctxt->_eip;
2260 rc = assign_eip_near(ctxt, ctxt->src.val);
2261 if (rc != X86EMUL_CONTINUE)
2262 return rc;
2263 ctxt->src.val = old_eip;
2264 rc = em_push(ctxt);
2265 return rc;
2266}
2267
2268static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2269{
2270 u64 old = ctxt->dst.orig_val64;
2271
2272 if (ctxt->dst.bytes == 16)
2273 return X86EMUL_UNHANDLEABLE;
2274
2275 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2276 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2277 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2278 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2279 ctxt->eflags &= ~X86_EFLAGS_ZF;
2280 } else {
2281 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2282 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2283
2284 ctxt->eflags |= X86_EFLAGS_ZF;
2285 }
2286 return X86EMUL_CONTINUE;
2287}
2288
2289static int em_ret(struct x86_emulate_ctxt *ctxt)
2290{
2291 int rc;
2292 unsigned long eip;
2293
2294 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2295 if (rc != X86EMUL_CONTINUE)
2296 return rc;
2297
2298 return assign_eip_near(ctxt, eip);
2299}
2300
2301static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2302{
2303 int rc;
2304 unsigned long eip, cs;
2305 int cpl = ctxt->ops->cpl(ctxt);
2306 struct desc_struct new_desc;
2307
2308 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2309 if (rc != X86EMUL_CONTINUE)
2310 return rc;
2311 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2312 if (rc != X86EMUL_CONTINUE)
2313 return rc;
2314
2315 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2316 return X86EMUL_UNHANDLEABLE;
2317 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2318 X86_TRANSFER_RET,
2319 &new_desc);
2320 if (rc != X86EMUL_CONTINUE)
2321 return rc;
2322 rc = assign_eip_far(ctxt, eip, &new_desc);
2323
2324 if (rc != X86EMUL_CONTINUE)
2325 return X86EMUL_UNHANDLEABLE;
2326
2327 return rc;
2328}
2329
2330static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2331{
2332 int rc;
2333
2334 rc = em_ret_far(ctxt);
2335 if (rc != X86EMUL_CONTINUE)
2336 return rc;
2337 rsp_increment(ctxt, ctxt->src.val);
2338 return X86EMUL_CONTINUE;
2339}
2340
2341static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2342{
2343
2344 ctxt->dst.orig_val = ctxt->dst.val;
2345 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2346 ctxt->src.orig_val = ctxt->src.val;
2347 ctxt->src.val = ctxt->dst.orig_val;
2348 fastop(ctxt, em_cmp);
2349
2350 if (ctxt->eflags & X86_EFLAGS_ZF) {
2351
2352 ctxt->src.type = OP_NONE;
2353 ctxt->dst.val = ctxt->src.orig_val;
2354 } else {
2355
2356 ctxt->src.type = OP_REG;
2357 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2358 ctxt->src.val = ctxt->dst.orig_val;
2359
2360 ctxt->dst.val = ctxt->dst.orig_val;
2361 }
2362 return X86EMUL_CONTINUE;
2363}
2364
2365static int em_lseg(struct x86_emulate_ctxt *ctxt)
2366{
2367 int seg = ctxt->src2.val;
2368 unsigned short sel;
2369 int rc;
2370
2371 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2372
2373 rc = load_segment_descriptor(ctxt, sel, seg);
2374 if (rc != X86EMUL_CONTINUE)
2375 return rc;
2376
2377 ctxt->dst.val = ctxt->src.val;
2378 return rc;
2379}
2380
2381static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2382{
2383#ifdef CONFIG_X86_64
2384 return ctxt->ops->guest_has_long_mode(ctxt);
2385#else
2386 return false;
2387#endif
2388}
2389
2390static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2391{
2392 desc->g = (flags >> 23) & 1;
2393 desc->d = (flags >> 22) & 1;
2394 desc->l = (flags >> 21) & 1;
2395 desc->avl = (flags >> 20) & 1;
2396 desc->p = (flags >> 15) & 1;
2397 desc->dpl = (flags >> 13) & 3;
2398 desc->s = (flags >> 12) & 1;
2399 desc->type = (flags >> 8) & 15;
2400}
2401
2402static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2403 int n)
2404{
2405 struct desc_struct desc;
2406 int offset;
2407 u16 selector;
2408
2409 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2410
2411 if (n < 3)
2412 offset = 0x7f84 + n * 12;
2413 else
2414 offset = 0x7f2c + (n - 3) * 12;
2415
2416 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2417 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2418 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2419 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2420 return X86EMUL_CONTINUE;
2421}
2422
2423#ifdef CONFIG_X86_64
2424static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2425 int n)
2426{
2427 struct desc_struct desc;
2428 int offset;
2429 u16 selector;
2430 u32 base3;
2431
2432 offset = 0x7e00 + n * 16;
2433
2434 selector = GET_SMSTATE(u16, smstate, offset);
2435 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2436 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2437 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2438 base3 = GET_SMSTATE(u32, smstate, offset + 12);
2439
2440 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2441 return X86EMUL_CONTINUE;
2442}
2443#endif
2444
2445static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2446 u64 cr0, u64 cr3, u64 cr4)
2447{
2448 int bad;
2449 u64 pcid;
2450
2451
2452 pcid = 0;
2453 if (cr4 & X86_CR4_PCIDE) {
2454 pcid = cr3 & 0xfff;
2455 cr3 &= ~0xfff;
2456 }
2457
2458 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2459 if (bad)
2460 return X86EMUL_UNHANDLEABLE;
2461
2462
2463
2464
2465
2466
2467 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2468 if (bad)
2469 return X86EMUL_UNHANDLEABLE;
2470
2471 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2472 if (bad)
2473 return X86EMUL_UNHANDLEABLE;
2474
2475 if (cr4 & X86_CR4_PCIDE) {
2476 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2477 if (bad)
2478 return X86EMUL_UNHANDLEABLE;
2479 if (pcid) {
2480 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2481 if (bad)
2482 return X86EMUL_UNHANDLEABLE;
2483 }
2484
2485 }
2486
2487 return X86EMUL_CONTINUE;
2488}
2489
2490static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2491 const char *smstate)
2492{
2493 struct desc_struct desc;
2494 struct desc_ptr dt;
2495 u16 selector;
2496 u32 val, cr0, cr3, cr4;
2497 int i;
2498
2499 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
2500 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
2501 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2502 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
2503
2504 for (i = 0; i < 8; i++)
2505 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2506
2507 val = GET_SMSTATE(u32, smstate, 0x7fcc);
2508
2509 if (ctxt->ops->set_dr(ctxt, 6, val))
2510 return X86EMUL_UNHANDLEABLE;
2511
2512 val = GET_SMSTATE(u32, smstate, 0x7fc8);
2513
2514 if (ctxt->ops->set_dr(ctxt, 7, val))
2515 return X86EMUL_UNHANDLEABLE;
2516
2517 selector = GET_SMSTATE(u32, smstate, 0x7fc4);
2518 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
2519 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
2520 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
2521 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2522
2523 selector = GET_SMSTATE(u32, smstate, 0x7fc0);
2524 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
2525 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
2526 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
2527 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2528
2529 dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
2530 dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
2531 ctxt->ops->set_gdt(ctxt, &dt);
2532
2533 dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
2534 dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
2535 ctxt->ops->set_idt(ctxt, &dt);
2536
2537 for (i = 0; i < 6; i++) {
2538 int r = rsm_load_seg_32(ctxt, smstate, i);
2539 if (r != X86EMUL_CONTINUE)
2540 return r;
2541 }
2542
2543 cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2544
2545 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2546
2547 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2548}
2549
2550#ifdef CONFIG_X86_64
2551static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2552 const char *smstate)
2553{
2554 struct desc_struct desc;
2555 struct desc_ptr dt;
2556 u64 val, cr0, cr3, cr4;
2557 u32 base3;
2558 u16 selector;
2559 int i, r;
2560
2561 for (i = 0; i < 16; i++)
2562 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2563
2564 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
2565 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2566
2567 val = GET_SMSTATE(u64, smstate, 0x7f68);
2568
2569 if (ctxt->ops->set_dr(ctxt, 6, val))
2570 return X86EMUL_UNHANDLEABLE;
2571
2572 val = GET_SMSTATE(u64, smstate, 0x7f60);
2573
2574 if (ctxt->ops->set_dr(ctxt, 7, val))
2575 return X86EMUL_UNHANDLEABLE;
2576
2577 cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
2578 cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
2579 cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
2580 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2581 val = GET_SMSTATE(u64, smstate, 0x7ed0);
2582
2583 if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
2584 return X86EMUL_UNHANDLEABLE;
2585
2586 selector = GET_SMSTATE(u32, smstate, 0x7e90);
2587 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2588 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
2589 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
2590 base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
2591 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2592
2593 dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
2594 dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
2595 ctxt->ops->set_idt(ctxt, &dt);
2596
2597 selector = GET_SMSTATE(u32, smstate, 0x7e70);
2598 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2599 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
2600 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
2601 base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
2602 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2603
2604 dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
2605 dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
2606 ctxt->ops->set_gdt(ctxt, &dt);
2607
2608 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2609 if (r != X86EMUL_CONTINUE)
2610 return r;
2611
2612 for (i = 0; i < 6; i++) {
2613 r = rsm_load_seg_64(ctxt, smstate, i);
2614 if (r != X86EMUL_CONTINUE)
2615 return r;
2616 }
2617
2618 return X86EMUL_CONTINUE;
2619}
2620#endif
2621
2622static int em_rsm(struct x86_emulate_ctxt *ctxt)
2623{
2624 unsigned long cr0, cr4, efer;
2625 char buf[512];
2626 u64 smbase;
2627 int ret;
2628
2629 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2630 return emulate_ud(ctxt);
2631
2632 smbase = ctxt->ops->get_smbase(ctxt);
2633
2634 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2635 if (ret != X86EMUL_CONTINUE)
2636 return X86EMUL_UNHANDLEABLE;
2637
2638 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2639 ctxt->ops->set_nmi_mask(ctxt, false);
2640
2641 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2642 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2643
2644
2645
2646
2647
2648
2649 if (emulator_has_longmode(ctxt)) {
2650 struct desc_struct cs_desc;
2651
2652
2653 cr4 = ctxt->ops->get_cr(ctxt, 4);
2654 if (cr4 & X86_CR4_PCIDE)
2655 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2656
2657
2658 memset(&cs_desc, 0, sizeof(cs_desc));
2659 cs_desc.type = 0xb;
2660 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2661 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2662 }
2663
2664
2665 cr0 = ctxt->ops->get_cr(ctxt, 0);
2666 if (cr0 & X86_CR0_PE)
2667 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2668
2669 if (emulator_has_longmode(ctxt)) {
2670
2671 cr4 = ctxt->ops->get_cr(ctxt, 4);
2672 if (cr4 & X86_CR4_PAE)
2673 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2674
2675
2676 efer = 0;
2677 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2678 }
2679
2680
2681
2682
2683
2684
2685 if (ctxt->ops->pre_leave_smm(ctxt, buf))
2686 return X86EMUL_UNHANDLEABLE;
2687
2688#ifdef CONFIG_X86_64
2689 if (emulator_has_longmode(ctxt))
2690 ret = rsm_load_state_64(ctxt, buf);
2691 else
2692#endif
2693 ret = rsm_load_state_32(ctxt, buf);
2694
2695 if (ret != X86EMUL_CONTINUE) {
2696
2697 return X86EMUL_UNHANDLEABLE;
2698 }
2699
2700 ctxt->ops->post_leave_smm(ctxt);
2701
2702 return X86EMUL_CONTINUE;
2703}
2704
2705static void
2706setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2707 struct desc_struct *cs, struct desc_struct *ss)
2708{
2709 cs->l = 0;
2710 set_desc_base(cs, 0);
2711 cs->g = 1;
2712 set_desc_limit(cs, 0xfffff);
2713 cs->type = 0x0b;
2714 cs->s = 1;
2715 cs->dpl = 0;
2716 cs->p = 1;
2717 cs->d = 1;
2718 cs->avl = 0;
2719
2720 set_desc_base(ss, 0);
2721 set_desc_limit(ss, 0xfffff);
2722 ss->g = 1;
2723 ss->s = 1;
2724 ss->type = 0x03;
2725 ss->d = 1;
2726 ss->dpl = 0;
2727 ss->p = 1;
2728 ss->l = 0;
2729 ss->avl = 0;
2730}
2731
2732static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2733{
2734 u32 eax, ebx, ecx, edx;
2735
2736 eax = ecx = 0;
2737 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2738 return is_guest_vendor_intel(ebx, ecx, edx);
2739}
2740
2741static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2742{
2743 const struct x86_emulate_ops *ops = ctxt->ops;
2744 u32 eax, ebx, ecx, edx;
2745
2746
2747
2748
2749
2750 if (ctxt->mode == X86EMUL_MODE_PROT64)
2751 return true;
2752
2753 eax = 0x00000000;
2754 ecx = 0x00000000;
2755 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2756
2757
2758
2759
2760
2761
2762 if (is_guest_vendor_intel(ebx, ecx, edx))
2763 return false;
2764
2765 if (is_guest_vendor_amd(ebx, ecx, edx) ||
2766 is_guest_vendor_hygon(ebx, ecx, edx))
2767 return true;
2768
2769
2770
2771
2772
2773 return false;
2774}
2775
2776static int em_syscall(struct x86_emulate_ctxt *ctxt)
2777{
2778 const struct x86_emulate_ops *ops = ctxt->ops;
2779 struct desc_struct cs, ss;
2780 u64 msr_data;
2781 u16 cs_sel, ss_sel;
2782 u64 efer = 0;
2783
2784
2785 if (ctxt->mode == X86EMUL_MODE_REAL ||
2786 ctxt->mode == X86EMUL_MODE_VM86)
2787 return emulate_ud(ctxt);
2788
2789 if (!(em_syscall_is_enabled(ctxt)))
2790 return emulate_ud(ctxt);
2791
2792 ops->get_msr(ctxt, MSR_EFER, &efer);
2793 if (!(efer & EFER_SCE))
2794 return emulate_ud(ctxt);
2795
2796 setup_syscalls_segments(ctxt, &cs, &ss);
2797 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2798 msr_data >>= 32;
2799 cs_sel = (u16)(msr_data & 0xfffc);
2800 ss_sel = (u16)(msr_data + 8);
2801
2802 if (efer & EFER_LMA) {
2803 cs.d = 0;
2804 cs.l = 1;
2805 }
2806 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2807 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2808
2809 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2810 if (efer & EFER_LMA) {
2811#ifdef CONFIG_X86_64
2812 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2813
2814 ops->get_msr(ctxt,
2815 ctxt->mode == X86EMUL_MODE_PROT64 ?
2816 MSR_LSTAR : MSR_CSTAR, &msr_data);
2817 ctxt->_eip = msr_data;
2818
2819 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2820 ctxt->eflags &= ~msr_data;
2821 ctxt->eflags |= X86_EFLAGS_FIXED;
2822#endif
2823 } else {
2824
2825 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2826 ctxt->_eip = (u32)msr_data;
2827
2828 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2829 }
2830
2831 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2832 return X86EMUL_CONTINUE;
2833}
2834
2835static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2836{
2837 const struct x86_emulate_ops *ops = ctxt->ops;
2838 struct desc_struct cs, ss;
2839 u64 msr_data;
2840 u16 cs_sel, ss_sel;
2841 u64 efer = 0;
2842
2843 ops->get_msr(ctxt, MSR_EFER, &efer);
2844
2845 if (ctxt->mode == X86EMUL_MODE_REAL)
2846 return emulate_gp(ctxt, 0);
2847
2848
2849
2850
2851
2852 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2853 && !vendor_intel(ctxt))
2854 return emulate_ud(ctxt);
2855
2856
2857 if (ctxt->mode == X86EMUL_MODE_PROT64)
2858 return X86EMUL_UNHANDLEABLE;
2859
2860 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2861 if ((msr_data & 0xfffc) == 0x0)
2862 return emulate_gp(ctxt, 0);
2863
2864 setup_syscalls_segments(ctxt, &cs, &ss);
2865 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2866 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2867 ss_sel = cs_sel + 8;
2868 if (efer & EFER_LMA) {
2869 cs.d = 0;
2870 cs.l = 1;
2871 }
2872
2873 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2874 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2875
2876 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2877 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2878
2879 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2880 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2881 (u32)msr_data;
2882 if (efer & EFER_LMA)
2883 ctxt->mode = X86EMUL_MODE_PROT64;
2884
2885 return X86EMUL_CONTINUE;
2886}
2887
2888static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2889{
2890 const struct x86_emulate_ops *ops = ctxt->ops;
2891 struct desc_struct cs, ss;
2892 u64 msr_data, rcx, rdx;
2893 int usermode;
2894 u16 cs_sel = 0, ss_sel = 0;
2895
2896
2897 if (ctxt->mode == X86EMUL_MODE_REAL ||
2898 ctxt->mode == X86EMUL_MODE_VM86)
2899 return emulate_gp(ctxt, 0);
2900
2901 setup_syscalls_segments(ctxt, &cs, &ss);
2902
2903 if ((ctxt->rex_prefix & 0x8) != 0x0)
2904 usermode = X86EMUL_MODE_PROT64;
2905 else
2906 usermode = X86EMUL_MODE_PROT32;
2907
2908 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2909 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2910
2911 cs.dpl = 3;
2912 ss.dpl = 3;
2913 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2914 switch (usermode) {
2915 case X86EMUL_MODE_PROT32:
2916 cs_sel = (u16)(msr_data + 16);
2917 if ((msr_data & 0xfffc) == 0x0)
2918 return emulate_gp(ctxt, 0);
2919 ss_sel = (u16)(msr_data + 24);
2920 rcx = (u32)rcx;
2921 rdx = (u32)rdx;
2922 break;
2923 case X86EMUL_MODE_PROT64:
2924 cs_sel = (u16)(msr_data + 32);
2925 if (msr_data == 0x0)
2926 return emulate_gp(ctxt, 0);
2927 ss_sel = cs_sel + 8;
2928 cs.d = 0;
2929 cs.l = 1;
2930 if (emul_is_noncanonical_address(rcx, ctxt) ||
2931 emul_is_noncanonical_address(rdx, ctxt))
2932 return emulate_gp(ctxt, 0);
2933 break;
2934 }
2935 cs_sel |= SEGMENT_RPL_MASK;
2936 ss_sel |= SEGMENT_RPL_MASK;
2937
2938 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2939 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2940
2941 ctxt->_eip = rdx;
2942 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2943
2944 return X86EMUL_CONTINUE;
2945}
2946
2947static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2948{
2949 int iopl;
2950 if (ctxt->mode == X86EMUL_MODE_REAL)
2951 return false;
2952 if (ctxt->mode == X86EMUL_MODE_VM86)
2953 return true;
2954 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2955 return ctxt->ops->cpl(ctxt) > iopl;
2956}
2957
2958#define VMWARE_PORT_VMPORT (0x5658)
2959#define VMWARE_PORT_VMRPC (0x5659)
2960
2961static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2962 u16 port, u16 len)
2963{
2964 const struct x86_emulate_ops *ops = ctxt->ops;
2965 struct desc_struct tr_seg;
2966 u32 base3;
2967 int r;
2968 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2969 unsigned mask = (1 << len) - 1;
2970 unsigned long base;
2971
2972
2973
2974
2975
2976 if (enable_vmware_backdoor &&
2977 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2978 return true;
2979
2980 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2981 if (!tr_seg.p)
2982 return false;
2983 if (desc_limit_scaled(&tr_seg) < 103)
2984 return false;
2985 base = get_desc_base(&tr_seg);
2986#ifdef CONFIG_X86_64
2987 base |= ((u64)base3) << 32;
2988#endif
2989 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2990 if (r != X86EMUL_CONTINUE)
2991 return false;
2992 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2993 return false;
2994 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2995 if (r != X86EMUL_CONTINUE)
2996 return false;
2997 if ((perm >> bit_idx) & mask)
2998 return false;
2999 return true;
3000}
3001
3002static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
3003 u16 port, u16 len)
3004{
3005 if (ctxt->perm_ok)
3006 return true;
3007
3008 if (emulator_bad_iopl(ctxt))
3009 if (!emulator_io_port_access_allowed(ctxt, port, len))
3010 return false;
3011
3012 ctxt->perm_ok = true;
3013
3014 return true;
3015}
3016
3017static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
3018{
3019
3020
3021
3022
3023#ifdef CONFIG_X86_64
3024 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
3025 return;
3026
3027 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
3028
3029 switch (ctxt->b) {
3030 case 0xa4:
3031 case 0xa5:
3032 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
3033 fallthrough;
3034 case 0xaa:
3035 case 0xab:
3036 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
3037 }
3038#endif
3039}
3040
3041static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
3042 struct tss_segment_16 *tss)
3043{
3044 tss->ip = ctxt->_eip;
3045 tss->flag = ctxt->eflags;
3046 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
3047 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
3048 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
3049 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
3050 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
3051 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
3052 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
3053 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
3054
3055 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3056 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3057 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3058 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3059 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
3060}
3061
3062static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
3063 struct tss_segment_16 *tss)
3064{
3065 int ret;
3066 u8 cpl;
3067
3068 ctxt->_eip = tss->ip;
3069 ctxt->eflags = tss->flag | 2;
3070 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
3071 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
3072 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
3073 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
3074 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
3075 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
3076 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
3077 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3078
3079
3080
3081
3082
3083 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3084 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3085 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3086 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3087 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3088
3089 cpl = tss->cs & 3;
3090
3091
3092
3093
3094
3095 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3096 X86_TRANSFER_TASK_SWITCH, NULL);
3097 if (ret != X86EMUL_CONTINUE)
3098 return ret;
3099 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3100 X86_TRANSFER_TASK_SWITCH, NULL);
3101 if (ret != X86EMUL_CONTINUE)
3102 return ret;
3103 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3104 X86_TRANSFER_TASK_SWITCH, NULL);
3105 if (ret != X86EMUL_CONTINUE)
3106 return ret;
3107 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3108 X86_TRANSFER_TASK_SWITCH, NULL);
3109 if (ret != X86EMUL_CONTINUE)
3110 return ret;
3111 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3112 X86_TRANSFER_TASK_SWITCH, NULL);
3113 if (ret != X86EMUL_CONTINUE)
3114 return ret;
3115
3116 return X86EMUL_CONTINUE;
3117}
3118
3119static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3120 u16 tss_selector, u16 old_tss_sel,
3121 ulong old_tss_base, struct desc_struct *new_desc)
3122{
3123 struct tss_segment_16 tss_seg;
3124 int ret;
3125 u32 new_tss_base = get_desc_base(new_desc);
3126
3127 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3128 if (ret != X86EMUL_CONTINUE)
3129 return ret;
3130
3131 save_state_to_tss16(ctxt, &tss_seg);
3132
3133 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3134 if (ret != X86EMUL_CONTINUE)
3135 return ret;
3136
3137 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3138 if (ret != X86EMUL_CONTINUE)
3139 return ret;
3140
3141 if (old_tss_sel != 0xffff) {
3142 tss_seg.prev_task_link = old_tss_sel;
3143
3144 ret = linear_write_system(ctxt, new_tss_base,
3145 &tss_seg.prev_task_link,
3146 sizeof(tss_seg.prev_task_link));
3147 if (ret != X86EMUL_CONTINUE)
3148 return ret;
3149 }
3150
3151 return load_state_from_tss16(ctxt, &tss_seg);
3152}
3153
3154static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3155 struct tss_segment_32 *tss)
3156{
3157
3158 tss->eip = ctxt->_eip;
3159 tss->eflags = ctxt->eflags;
3160 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3161 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3162 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3163 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3164 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3165 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3166 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3167 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3168
3169 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3170 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3171 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3172 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3173 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3174 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3175}
3176
3177static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3178 struct tss_segment_32 *tss)
3179{
3180 int ret;
3181 u8 cpl;
3182
3183 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3184 return emulate_gp(ctxt, 0);
3185 ctxt->_eip = tss->eip;
3186 ctxt->eflags = tss->eflags | 2;
3187
3188
3189 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3190 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3191 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3192 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3193 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3194 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3195 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3196 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3197
3198
3199
3200
3201
3202
3203 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3204 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3205 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3206 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3207 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3208 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3209 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3210
3211
3212
3213
3214
3215
3216 if (ctxt->eflags & X86_EFLAGS_VM) {
3217 ctxt->mode = X86EMUL_MODE_VM86;
3218 cpl = 3;
3219 } else {
3220 ctxt->mode = X86EMUL_MODE_PROT32;
3221 cpl = tss->cs & 3;
3222 }
3223
3224
3225
3226
3227
3228 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3229 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3230 if (ret != X86EMUL_CONTINUE)
3231 return ret;
3232 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3233 X86_TRANSFER_TASK_SWITCH, NULL);
3234 if (ret != X86EMUL_CONTINUE)
3235 return ret;
3236 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3237 X86_TRANSFER_TASK_SWITCH, NULL);
3238 if (ret != X86EMUL_CONTINUE)
3239 return ret;
3240 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3241 X86_TRANSFER_TASK_SWITCH, NULL);
3242 if (ret != X86EMUL_CONTINUE)
3243 return ret;
3244 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3245 X86_TRANSFER_TASK_SWITCH, NULL);
3246 if (ret != X86EMUL_CONTINUE)
3247 return ret;
3248 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3249 X86_TRANSFER_TASK_SWITCH, NULL);
3250 if (ret != X86EMUL_CONTINUE)
3251 return ret;
3252 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3253 X86_TRANSFER_TASK_SWITCH, NULL);
3254
3255 return ret;
3256}
3257
3258static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3259 u16 tss_selector, u16 old_tss_sel,
3260 ulong old_tss_base, struct desc_struct *new_desc)
3261{
3262 struct tss_segment_32 tss_seg;
3263 int ret;
3264 u32 new_tss_base = get_desc_base(new_desc);
3265 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3266 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3267
3268 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3269 if (ret != X86EMUL_CONTINUE)
3270 return ret;
3271
3272 save_state_to_tss32(ctxt, &tss_seg);
3273
3274
3275 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3276 ldt_sel_offset - eip_offset);
3277 if (ret != X86EMUL_CONTINUE)
3278 return ret;
3279
3280 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3281 if (ret != X86EMUL_CONTINUE)
3282 return ret;
3283
3284 if (old_tss_sel != 0xffff) {
3285 tss_seg.prev_task_link = old_tss_sel;
3286
3287 ret = linear_write_system(ctxt, new_tss_base,
3288 &tss_seg.prev_task_link,
3289 sizeof(tss_seg.prev_task_link));
3290 if (ret != X86EMUL_CONTINUE)
3291 return ret;
3292 }
3293
3294 return load_state_from_tss32(ctxt, &tss_seg);
3295}
3296
3297static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3298 u16 tss_selector, int idt_index, int reason,
3299 bool has_error_code, u32 error_code)
3300{
3301 const struct x86_emulate_ops *ops = ctxt->ops;
3302 struct desc_struct curr_tss_desc, next_tss_desc;
3303 int ret;
3304 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3305 ulong old_tss_base =
3306 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3307 u32 desc_limit;
3308 ulong desc_addr, dr7;
3309
3310
3311
3312 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3313 if (ret != X86EMUL_CONTINUE)
3314 return ret;
3315 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3316 if (ret != X86EMUL_CONTINUE)
3317 return ret;
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329 if (reason == TASK_SWITCH_GATE) {
3330 if (idt_index != -1) {
3331
3332 struct desc_struct task_gate_desc;
3333 int dpl;
3334
3335 ret = read_interrupt_descriptor(ctxt, idt_index,
3336 &task_gate_desc);
3337 if (ret != X86EMUL_CONTINUE)
3338 return ret;
3339
3340 dpl = task_gate_desc.dpl;
3341 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3342 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3343 }
3344 }
3345
3346 desc_limit = desc_limit_scaled(&next_tss_desc);
3347 if (!next_tss_desc.p ||
3348 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3349 desc_limit < 0x2b)) {
3350 return emulate_ts(ctxt, tss_selector & 0xfffc);
3351 }
3352
3353 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3354 curr_tss_desc.type &= ~(1 << 1);
3355 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3356 }
3357
3358 if (reason == TASK_SWITCH_IRET)
3359 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3360
3361
3362
3363 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3364 old_tss_sel = 0xffff;
3365
3366 if (next_tss_desc.type & 8)
3367 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3368 old_tss_base, &next_tss_desc);
3369 else
3370 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3371 old_tss_base, &next_tss_desc);
3372 if (ret != X86EMUL_CONTINUE)
3373 return ret;
3374
3375 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3376 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3377
3378 if (reason != TASK_SWITCH_IRET) {
3379 next_tss_desc.type |= (1 << 1);
3380 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3381 }
3382
3383 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3384 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3385
3386 if (has_error_code) {
3387 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3388 ctxt->lock_prefix = 0;
3389 ctxt->src.val = (unsigned long) error_code;
3390 ret = em_push(ctxt);
3391 }
3392
3393 ops->get_dr(ctxt, 7, &dr7);
3394 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3395
3396 return ret;
3397}
3398
3399int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3400 u16 tss_selector, int idt_index, int reason,
3401 bool has_error_code, u32 error_code)
3402{
3403 int rc;
3404
3405 invalidate_registers(ctxt);
3406 ctxt->_eip = ctxt->eip;
3407 ctxt->dst.type = OP_NONE;
3408
3409 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3410 has_error_code, error_code);
3411
3412 if (rc == X86EMUL_CONTINUE) {
3413 ctxt->eip = ctxt->_eip;
3414 writeback_registers(ctxt);
3415 }
3416
3417 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3418}
3419
3420static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3421 struct operand *op)
3422{
3423 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3424
3425 register_address_increment(ctxt, reg, df * op->bytes);
3426 op->addr.mem.ea = register_address(ctxt, reg);
3427}
3428
3429static int em_das(struct x86_emulate_ctxt *ctxt)
3430{
3431 u8 al, old_al;
3432 bool af, cf, old_cf;
3433
3434 cf = ctxt->eflags & X86_EFLAGS_CF;
3435 al = ctxt->dst.val;
3436
3437 old_al = al;
3438 old_cf = cf;
3439 cf = false;
3440 af = ctxt->eflags & X86_EFLAGS_AF;
3441 if ((al & 0x0f) > 9 || af) {
3442 al -= 6;
3443 cf = old_cf | (al >= 250);
3444 af = true;
3445 } else {
3446 af = false;
3447 }
3448 if (old_al > 0x99 || old_cf) {
3449 al -= 0x60;
3450 cf = true;
3451 }
3452
3453 ctxt->dst.val = al;
3454
3455 ctxt->src.type = OP_IMM;
3456 ctxt->src.val = 0;
3457 ctxt->src.bytes = 1;
3458 fastop(ctxt, em_or);
3459 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3460 if (cf)
3461 ctxt->eflags |= X86_EFLAGS_CF;
3462 if (af)
3463 ctxt->eflags |= X86_EFLAGS_AF;
3464 return X86EMUL_CONTINUE;
3465}
3466
3467static int em_aam(struct x86_emulate_ctxt *ctxt)
3468{
3469 u8 al, ah;
3470
3471 if (ctxt->src.val == 0)
3472 return emulate_de(ctxt);
3473
3474 al = ctxt->dst.val & 0xff;
3475 ah = al / ctxt->src.val;
3476 al %= ctxt->src.val;
3477
3478 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3479
3480
3481 ctxt->src.type = OP_IMM;
3482 ctxt->src.val = 0;
3483 ctxt->src.bytes = 1;
3484 fastop(ctxt, em_or);
3485
3486 return X86EMUL_CONTINUE;
3487}
3488
3489static int em_aad(struct x86_emulate_ctxt *ctxt)
3490{
3491 u8 al = ctxt->dst.val & 0xff;
3492 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3493
3494 al = (al + (ah * ctxt->src.val)) & 0xff;
3495
3496 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3497
3498
3499 ctxt->src.type = OP_IMM;
3500 ctxt->src.val = 0;
3501 ctxt->src.bytes = 1;
3502 fastop(ctxt, em_or);
3503
3504 return X86EMUL_CONTINUE;
3505}
3506
3507static int em_call(struct x86_emulate_ctxt *ctxt)
3508{
3509 int rc;
3510 long rel = ctxt->src.val;
3511
3512 ctxt->src.val = (unsigned long)ctxt->_eip;
3513 rc = jmp_rel(ctxt, rel);
3514 if (rc != X86EMUL_CONTINUE)
3515 return rc;
3516 return em_push(ctxt);
3517}
3518
3519static int em_call_far(struct x86_emulate_ctxt *ctxt)
3520{
3521 u16 sel, old_cs;
3522 ulong old_eip;
3523 int rc;
3524 struct desc_struct old_desc, new_desc;
3525 const struct x86_emulate_ops *ops = ctxt->ops;
3526 int cpl = ctxt->ops->cpl(ctxt);
3527 enum x86emul_mode prev_mode = ctxt->mode;
3528
3529 old_eip = ctxt->_eip;
3530 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3531
3532 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3533 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3534 X86_TRANSFER_CALL_JMP, &new_desc);
3535 if (rc != X86EMUL_CONTINUE)
3536 return rc;
3537
3538 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3539 if (rc != X86EMUL_CONTINUE)
3540 goto fail;
3541
3542 ctxt->src.val = old_cs;
3543 rc = em_push(ctxt);
3544 if (rc != X86EMUL_CONTINUE)
3545 goto fail;
3546
3547 ctxt->src.val = old_eip;
3548 rc = em_push(ctxt);
3549
3550
3551 if (rc != X86EMUL_CONTINUE) {
3552 pr_warn_once("faulting far call emulation tainted memory\n");
3553 goto fail;
3554 }
3555 return rc;
3556fail:
3557 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3558 ctxt->mode = prev_mode;
3559 return rc;
3560
3561}
3562
3563static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3564{
3565 int rc;
3566 unsigned long eip;
3567
3568 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3569 if (rc != X86EMUL_CONTINUE)
3570 return rc;
3571 rc = assign_eip_near(ctxt, eip);
3572 if (rc != X86EMUL_CONTINUE)
3573 return rc;
3574 rsp_increment(ctxt, ctxt->src.val);
3575 return X86EMUL_CONTINUE;
3576}
3577
3578static int em_xchg(struct x86_emulate_ctxt *ctxt)
3579{
3580
3581 ctxt->src.val = ctxt->dst.val;
3582 write_register_operand(&ctxt->src);
3583
3584
3585 ctxt->dst.val = ctxt->src.orig_val;
3586 ctxt->lock_prefix = 1;
3587 return X86EMUL_CONTINUE;
3588}
3589
3590static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3591{
3592 ctxt->dst.val = ctxt->src2.val;
3593 return fastop(ctxt, em_imul);
3594}
3595
3596static int em_cwd(struct x86_emulate_ctxt *ctxt)
3597{
3598 ctxt->dst.type = OP_REG;
3599 ctxt->dst.bytes = ctxt->src.bytes;
3600 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3601 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3602
3603 return X86EMUL_CONTINUE;
3604}
3605
3606static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3607{
3608 u64 tsc_aux = 0;
3609
3610 if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3611 return emulate_ud(ctxt);
3612 ctxt->dst.val = tsc_aux;
3613 return X86EMUL_CONTINUE;
3614}
3615
3616static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3617{
3618 u64 tsc = 0;
3619
3620 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3621 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3622 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3623 return X86EMUL_CONTINUE;
3624}
3625
3626static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3627{
3628 u64 pmc;
3629
3630 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3631 return emulate_gp(ctxt, 0);
3632 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3633 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3634 return X86EMUL_CONTINUE;
3635}
3636
3637static int em_mov(struct x86_emulate_ctxt *ctxt)
3638{
3639 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3640 return X86EMUL_CONTINUE;
3641}
3642
3643static int em_movbe(struct x86_emulate_ctxt *ctxt)
3644{
3645 u16 tmp;
3646
3647 if (!ctxt->ops->guest_has_movbe(ctxt))
3648 return emulate_ud(ctxt);
3649
3650 switch (ctxt->op_bytes) {
3651 case 2:
3652
3653
3654
3655
3656
3657
3658
3659
3660 tmp = (u16)ctxt->src.val;
3661 ctxt->dst.val &= ~0xffffUL;
3662 ctxt->dst.val |= (unsigned long)swab16(tmp);
3663 break;
3664 case 4:
3665 ctxt->dst.val = swab32((u32)ctxt->src.val);
3666 break;
3667 case 8:
3668 ctxt->dst.val = swab64(ctxt->src.val);
3669 break;
3670 default:
3671 BUG();
3672 }
3673 return X86EMUL_CONTINUE;
3674}
3675
3676static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3677{
3678 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3679 return emulate_gp(ctxt, 0);
3680
3681
3682 ctxt->dst.type = OP_NONE;
3683 return X86EMUL_CONTINUE;
3684}
3685
3686static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3687{
3688 unsigned long val;
3689
3690 if (ctxt->mode == X86EMUL_MODE_PROT64)
3691 val = ctxt->src.val & ~0ULL;
3692 else
3693 val = ctxt->src.val & ~0U;
3694
3695
3696 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3697 return emulate_gp(ctxt, 0);
3698
3699
3700 ctxt->dst.type = OP_NONE;
3701 return X86EMUL_CONTINUE;
3702}
3703
3704static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3705{
3706 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3707 u64 msr_data;
3708 int r;
3709
3710 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3711 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3712 r = ctxt->ops->set_msr(ctxt, msr_index, msr_data);
3713
3714 if (r == X86EMUL_IO_NEEDED)
3715 return r;
3716
3717 if (r > 0)
3718 return emulate_gp(ctxt, 0);
3719
3720 return r < 0 ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
3721}
3722
3723static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3724{
3725 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3726 u64 msr_data;
3727 int r;
3728
3729 r = ctxt->ops->get_msr(ctxt, msr_index, &msr_data);
3730
3731 if (r == X86EMUL_IO_NEEDED)
3732 return r;
3733
3734 if (r)
3735 return emulate_gp(ctxt, 0);
3736
3737 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3738 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3739 return X86EMUL_CONTINUE;
3740}
3741
3742static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3743{
3744 if (segment > VCPU_SREG_GS &&
3745 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3746 ctxt->ops->cpl(ctxt) > 0)
3747 return emulate_gp(ctxt, 0);
3748
3749 ctxt->dst.val = get_segment_selector(ctxt, segment);
3750 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3751 ctxt->dst.bytes = 2;
3752 return X86EMUL_CONTINUE;
3753}
3754
3755static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3756{
3757 if (ctxt->modrm_reg > VCPU_SREG_GS)
3758 return emulate_ud(ctxt);
3759
3760 return em_store_sreg(ctxt, ctxt->modrm_reg);
3761}
3762
3763static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3764{
3765 u16 sel = ctxt->src.val;
3766
3767 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3768 return emulate_ud(ctxt);
3769
3770 if (ctxt->modrm_reg == VCPU_SREG_SS)
3771 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3772
3773
3774 ctxt->dst.type = OP_NONE;
3775 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3776}
3777
3778static int em_sldt(struct x86_emulate_ctxt *ctxt)
3779{
3780 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3781}
3782
3783static int em_lldt(struct x86_emulate_ctxt *ctxt)
3784{
3785 u16 sel = ctxt->src.val;
3786
3787
3788 ctxt->dst.type = OP_NONE;
3789 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3790}
3791
3792static int em_str(struct x86_emulate_ctxt *ctxt)
3793{
3794 return em_store_sreg(ctxt, VCPU_SREG_TR);
3795}
3796
3797static int em_ltr(struct x86_emulate_ctxt *ctxt)
3798{
3799 u16 sel = ctxt->src.val;
3800
3801
3802 ctxt->dst.type = OP_NONE;
3803 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3804}
3805
3806static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3807{
3808 int rc;
3809 ulong linear;
3810
3811 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3812 if (rc == X86EMUL_CONTINUE)
3813 ctxt->ops->invlpg(ctxt, linear);
3814
3815 ctxt->dst.type = OP_NONE;
3816 return X86EMUL_CONTINUE;
3817}
3818
3819static int em_clts(struct x86_emulate_ctxt *ctxt)
3820{
3821 ulong cr0;
3822
3823 cr0 = ctxt->ops->get_cr(ctxt, 0);
3824 cr0 &= ~X86_CR0_TS;
3825 ctxt->ops->set_cr(ctxt, 0, cr0);
3826 return X86EMUL_CONTINUE;
3827}
3828
3829static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3830{
3831 int rc = ctxt->ops->fix_hypercall(ctxt);
3832
3833 if (rc != X86EMUL_CONTINUE)
3834 return rc;
3835
3836
3837 ctxt->_eip = ctxt->eip;
3838
3839 ctxt->dst.type = OP_NONE;
3840 return X86EMUL_CONTINUE;
3841}
3842
3843static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3844 void (*get)(struct x86_emulate_ctxt *ctxt,
3845 struct desc_ptr *ptr))
3846{
3847 struct desc_ptr desc_ptr;
3848
3849 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3850 ctxt->ops->cpl(ctxt) > 0)
3851 return emulate_gp(ctxt, 0);
3852
3853 if (ctxt->mode == X86EMUL_MODE_PROT64)
3854 ctxt->op_bytes = 8;
3855 get(ctxt, &desc_ptr);
3856 if (ctxt->op_bytes == 2) {
3857 ctxt->op_bytes = 4;
3858 desc_ptr.address &= 0x00ffffff;
3859 }
3860
3861 ctxt->dst.type = OP_NONE;
3862 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3863 &desc_ptr, 2 + ctxt->op_bytes);
3864}
3865
3866static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3867{
3868 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3869}
3870
3871static int em_sidt(struct x86_emulate_ctxt *ctxt)
3872{
3873 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3874}
3875
3876static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3877{
3878 struct desc_ptr desc_ptr;
3879 int rc;
3880
3881 if (ctxt->mode == X86EMUL_MODE_PROT64)
3882 ctxt->op_bytes = 8;
3883 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3884 &desc_ptr.size, &desc_ptr.address,
3885 ctxt->op_bytes);
3886 if (rc != X86EMUL_CONTINUE)
3887 return rc;
3888 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3889 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3890 return emulate_gp(ctxt, 0);
3891 if (lgdt)
3892 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3893 else
3894 ctxt->ops->set_idt(ctxt, &desc_ptr);
3895
3896 ctxt->dst.type = OP_NONE;
3897 return X86EMUL_CONTINUE;
3898}
3899
3900static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3901{
3902 return em_lgdt_lidt(ctxt, true);
3903}
3904
3905static int em_lidt(struct x86_emulate_ctxt *ctxt)
3906{
3907 return em_lgdt_lidt(ctxt, false);
3908}
3909
3910static int em_smsw(struct x86_emulate_ctxt *ctxt)
3911{
3912 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3913 ctxt->ops->cpl(ctxt) > 0)
3914 return emulate_gp(ctxt, 0);
3915
3916 if (ctxt->dst.type == OP_MEM)
3917 ctxt->dst.bytes = 2;
3918 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3919 return X86EMUL_CONTINUE;
3920}
3921
3922static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3923{
3924 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3925 | (ctxt->src.val & 0x0f));
3926 ctxt->dst.type = OP_NONE;
3927 return X86EMUL_CONTINUE;
3928}
3929
3930static int em_loop(struct x86_emulate_ctxt *ctxt)
3931{
3932 int rc = X86EMUL_CONTINUE;
3933
3934 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3935 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3936 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3937 rc = jmp_rel(ctxt, ctxt->src.val);
3938
3939 return rc;
3940}
3941
3942static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3943{
3944 int rc = X86EMUL_CONTINUE;
3945
3946 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3947 rc = jmp_rel(ctxt, ctxt->src.val);
3948
3949 return rc;
3950}
3951
3952static int em_in(struct x86_emulate_ctxt *ctxt)
3953{
3954 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3955 &ctxt->dst.val))
3956 return X86EMUL_IO_NEEDED;
3957
3958 return X86EMUL_CONTINUE;
3959}
3960
3961static int em_out(struct x86_emulate_ctxt *ctxt)
3962{
3963 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3964 &ctxt->src.val, 1);
3965
3966 ctxt->dst.type = OP_NONE;
3967 return X86EMUL_CONTINUE;
3968}
3969
3970static int em_cli(struct x86_emulate_ctxt *ctxt)
3971{
3972 if (emulator_bad_iopl(ctxt))
3973 return emulate_gp(ctxt, 0);
3974
3975 ctxt->eflags &= ~X86_EFLAGS_IF;
3976 return X86EMUL_CONTINUE;
3977}
3978
3979static int em_sti(struct x86_emulate_ctxt *ctxt)
3980{
3981 if (emulator_bad_iopl(ctxt))
3982 return emulate_gp(ctxt, 0);
3983
3984 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3985 ctxt->eflags |= X86_EFLAGS_IF;
3986 return X86EMUL_CONTINUE;
3987}
3988
3989static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3990{
3991 u32 eax, ebx, ecx, edx;
3992 u64 msr = 0;
3993
3994 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3995 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3996 ctxt->ops->cpl(ctxt)) {
3997 return emulate_gp(ctxt, 0);
3998 }
3999
4000 eax = reg_read(ctxt, VCPU_REGS_RAX);
4001 ecx = reg_read(ctxt, VCPU_REGS_RCX);
4002 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
4003 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
4004 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
4005 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
4006 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
4007 return X86EMUL_CONTINUE;
4008}
4009
4010static int em_sahf(struct x86_emulate_ctxt *ctxt)
4011{
4012 u32 flags;
4013
4014 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
4015 X86_EFLAGS_SF;
4016 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
4017
4018 ctxt->eflags &= ~0xffUL;
4019 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
4020 return X86EMUL_CONTINUE;
4021}
4022
4023static int em_lahf(struct x86_emulate_ctxt *ctxt)
4024{
4025 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
4026 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
4027 return X86EMUL_CONTINUE;
4028}
4029
4030static int em_bswap(struct x86_emulate_ctxt *ctxt)
4031{
4032 switch (ctxt->op_bytes) {
4033#ifdef CONFIG_X86_64
4034 case 8:
4035 asm("bswap %0" : "+r"(ctxt->dst.val));
4036 break;
4037#endif
4038 default:
4039 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
4040 break;
4041 }
4042 return X86EMUL_CONTINUE;
4043}
4044
4045static int em_clflush(struct x86_emulate_ctxt *ctxt)
4046{
4047
4048 return X86EMUL_CONTINUE;
4049}
4050
4051static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
4052{
4053
4054 return X86EMUL_CONTINUE;
4055}
4056
4057static int em_movsxd(struct x86_emulate_ctxt *ctxt)
4058{
4059 ctxt->dst.val = (s32) ctxt->src.val;
4060 return X86EMUL_CONTINUE;
4061}
4062
4063static int check_fxsr(struct x86_emulate_ctxt *ctxt)
4064{
4065 if (!ctxt->ops->guest_has_fxsr(ctxt))
4066 return emulate_ud(ctxt);
4067
4068 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
4069 return emulate_nm(ctxt);
4070
4071
4072
4073
4074
4075 if (ctxt->mode >= X86EMUL_MODE_PROT64)
4076 return X86EMUL_UNHANDLEABLE;
4077
4078 return X86EMUL_CONTINUE;
4079}
4080
4081
4082
4083
4084
4085static size_t __fxstate_size(int nregs)
4086{
4087 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4088}
4089
4090static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4091{
4092 bool cr4_osfxsr;
4093 if (ctxt->mode == X86EMUL_MODE_PROT64)
4094 return __fxstate_size(16);
4095
4096 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4097 return __fxstate_size(cr4_osfxsr ? 8 : 0);
4098}
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4119{
4120 struct fxregs_state fx_state;
4121 int rc;
4122
4123 rc = check_fxsr(ctxt);
4124 if (rc != X86EMUL_CONTINUE)
4125 return rc;
4126
4127 emulator_get_fpu();
4128
4129 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4130
4131 emulator_put_fpu();
4132
4133 if (rc != X86EMUL_CONTINUE)
4134 return rc;
4135
4136 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4137 fxstate_size(ctxt));
4138}
4139
4140
4141
4142
4143
4144
4145
4146
4147static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4148 const size_t used_size)
4149{
4150 struct fxregs_state fx_tmp;
4151 int rc;
4152
4153 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4154 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4155 __fxstate_size(16) - used_size);
4156
4157 return rc;
4158}
4159
4160static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4161{
4162 struct fxregs_state fx_state;
4163 int rc;
4164 size_t size;
4165
4166 rc = check_fxsr(ctxt);
4167 if (rc != X86EMUL_CONTINUE)
4168 return rc;
4169
4170 size = fxstate_size(ctxt);
4171 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4172 if (rc != X86EMUL_CONTINUE)
4173 return rc;
4174
4175 emulator_get_fpu();
4176
4177 if (size < __fxstate_size(16)) {
4178 rc = fxregs_fixup(&fx_state, size);
4179 if (rc != X86EMUL_CONTINUE)
4180 goto out;
4181 }
4182
4183 if (fx_state.mxcsr >> 16) {
4184 rc = emulate_gp(ctxt, 0);
4185 goto out;
4186 }
4187
4188 if (rc == X86EMUL_CONTINUE)
4189 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4190
4191out:
4192 emulator_put_fpu();
4193
4194 return rc;
4195}
4196
4197static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
4198{
4199 u32 eax, ecx, edx;
4200
4201 eax = reg_read(ctxt, VCPU_REGS_RAX);
4202 edx = reg_read(ctxt, VCPU_REGS_RDX);
4203 ecx = reg_read(ctxt, VCPU_REGS_RCX);
4204
4205 if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
4206 return emulate_gp(ctxt, 0);
4207
4208 return X86EMUL_CONTINUE;
4209}
4210
4211static bool valid_cr(int nr)
4212{
4213 switch (nr) {
4214 case 0:
4215 case 2 ... 4:
4216 case 8:
4217 return true;
4218 default:
4219 return false;
4220 }
4221}
4222
4223static int check_cr_access(struct x86_emulate_ctxt *ctxt)
4224{
4225 if (!valid_cr(ctxt->modrm_reg))
4226 return emulate_ud(ctxt);
4227
4228 return X86EMUL_CONTINUE;
4229}
4230
4231static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4232{
4233 unsigned long dr7;
4234
4235 ctxt->ops->get_dr(ctxt, 7, &dr7);
4236
4237
4238 return dr7 & (1 << 13);
4239}
4240
4241static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4242{
4243 int dr = ctxt->modrm_reg;
4244 u64 cr4;
4245
4246 if (dr > 7)
4247 return emulate_ud(ctxt);
4248
4249 cr4 = ctxt->ops->get_cr(ctxt, 4);
4250 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4251 return emulate_ud(ctxt);
4252
4253 if (check_dr7_gd(ctxt)) {
4254 ulong dr6;
4255
4256 ctxt->ops->get_dr(ctxt, 6, &dr6);
4257 dr6 &= ~DR_TRAP_BITS;
4258 dr6 |= DR6_BD | DR6_ACTIVE_LOW;
4259 ctxt->ops->set_dr(ctxt, 6, dr6);
4260 return emulate_db(ctxt);
4261 }
4262
4263 return X86EMUL_CONTINUE;
4264}
4265
4266static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4267{
4268 u64 new_val = ctxt->src.val64;
4269 int dr = ctxt->modrm_reg;
4270
4271 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4272 return emulate_gp(ctxt, 0);
4273
4274 return check_dr_read(ctxt);
4275}
4276
4277static int check_svme(struct x86_emulate_ctxt *ctxt)
4278{
4279 u64 efer = 0;
4280
4281 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4282
4283 if (!(efer & EFER_SVME))
4284 return emulate_ud(ctxt);
4285
4286 return X86EMUL_CONTINUE;
4287}
4288
4289static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4290{
4291 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4292
4293
4294 if (rax & 0xffff000000000000ULL)
4295 return emulate_gp(ctxt, 0);
4296
4297 return check_svme(ctxt);
4298}
4299
4300static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4301{
4302 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4303
4304 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4305 return emulate_ud(ctxt);
4306
4307 return X86EMUL_CONTINUE;
4308}
4309
4310static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4311{
4312 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4313 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4314
4315
4316
4317
4318
4319 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4320 return X86EMUL_CONTINUE;
4321
4322 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4323 ctxt->ops->check_pmc(ctxt, rcx))
4324 return emulate_gp(ctxt, 0);
4325
4326 return X86EMUL_CONTINUE;
4327}
4328
4329static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4330{
4331 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4332 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4333 return emulate_gp(ctxt, 0);
4334
4335 return X86EMUL_CONTINUE;
4336}
4337
4338static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4339{
4340 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4341 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4342 return emulate_gp(ctxt, 0);
4343
4344 return X86EMUL_CONTINUE;
4345}
4346
4347#define D(_y) { .flags = (_y) }
4348#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4349#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4350 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4351#define N D(NotImpl)
4352#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4353#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4354#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4355#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4356#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4357#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4358#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4359#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4360#define II(_f, _e, _i) \
4361 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4362#define IIP(_f, _e, _i, _p) \
4363 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4364 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4365#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4366
4367#define D2bv(_f) D((_f) | ByteOp), D(_f)
4368#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4369#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4370#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4371#define I2bvIP(_f, _e, _i, _p) \
4372 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4373
4374#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4375 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4376 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4377
4378static const struct opcode group7_rm0[] = {
4379 N,
4380 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4381 N, N, N, N, N, N,
4382};
4383
4384static const struct opcode group7_rm1[] = {
4385 DI(SrcNone | Priv, monitor),
4386 DI(SrcNone | Priv, mwait),
4387 N, N, N, N, N, N,
4388};
4389
4390static const struct opcode group7_rm2[] = {
4391 N,
4392 II(ImplicitOps | Priv, em_xsetbv, xsetbv),
4393 N, N, N, N, N, N,
4394};
4395
4396static const struct opcode group7_rm3[] = {
4397 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4398 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4399 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4400 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4401 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4402 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4403 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4404 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4405};
4406
4407static const struct opcode group7_rm7[] = {
4408 N,
4409 DIP(SrcNone, rdtscp, check_rdtsc),
4410 N, N, N, N, N, N,
4411};
4412
4413static const struct opcode group1[] = {
4414 F(Lock, em_add),
4415 F(Lock | PageTable, em_or),
4416 F(Lock, em_adc),
4417 F(Lock, em_sbb),
4418 F(Lock | PageTable, em_and),
4419 F(Lock, em_sub),
4420 F(Lock, em_xor),
4421 F(NoWrite, em_cmp),
4422};
4423
4424static const struct opcode group1A[] = {
4425 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4426};
4427
4428static const struct opcode group2[] = {
4429 F(DstMem | ModRM, em_rol),
4430 F(DstMem | ModRM, em_ror),
4431 F(DstMem | ModRM, em_rcl),
4432 F(DstMem | ModRM, em_rcr),
4433 F(DstMem | ModRM, em_shl),
4434 F(DstMem | ModRM, em_shr),
4435 F(DstMem | ModRM, em_shl),
4436 F(DstMem | ModRM, em_sar),
4437};
4438
4439static const struct opcode group3[] = {
4440 F(DstMem | SrcImm | NoWrite, em_test),
4441 F(DstMem | SrcImm | NoWrite, em_test),
4442 F(DstMem | SrcNone | Lock, em_not),
4443 F(DstMem | SrcNone | Lock, em_neg),
4444 F(DstXacc | Src2Mem, em_mul_ex),
4445 F(DstXacc | Src2Mem, em_imul_ex),
4446 F(DstXacc | Src2Mem, em_div_ex),
4447 F(DstXacc | Src2Mem, em_idiv_ex),
4448};
4449
4450static const struct opcode group4[] = {
4451 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4452 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4453 N, N, N, N, N, N,
4454};
4455
4456static const struct opcode group5[] = {
4457 F(DstMem | SrcNone | Lock, em_inc),
4458 F(DstMem | SrcNone | Lock, em_dec),
4459 I(SrcMem | NearBranch, em_call_near_abs),
4460 I(SrcMemFAddr | ImplicitOps, em_call_far),
4461 I(SrcMem | NearBranch, em_jmp_abs),
4462 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4463 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4464};
4465
4466static const struct opcode group6[] = {
4467 II(Prot | DstMem, em_sldt, sldt),
4468 II(Prot | DstMem, em_str, str),
4469 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4470 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4471 N, N, N, N,
4472};
4473
4474static const struct group_dual group7 = { {
4475 II(Mov | DstMem, em_sgdt, sgdt),
4476 II(Mov | DstMem, em_sidt, sidt),
4477 II(SrcMem | Priv, em_lgdt, lgdt),
4478 II(SrcMem | Priv, em_lidt, lidt),
4479 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4480 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4481 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4482}, {
4483 EXT(0, group7_rm0),
4484 EXT(0, group7_rm1),
4485 EXT(0, group7_rm2),
4486 EXT(0, group7_rm3),
4487 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4488 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4489 EXT(0, group7_rm7),
4490} };
4491
4492static const struct opcode group8[] = {
4493 N, N, N, N,
4494 F(DstMem | SrcImmByte | NoWrite, em_bt),
4495 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4496 F(DstMem | SrcImmByte | Lock, em_btr),
4497 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4498};
4499
4500
4501
4502
4503
4504static const struct gprefix pfx_0f_c7_7 = {
4505 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
4506};
4507
4508
4509static const struct group_dual group9 = { {
4510 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4511}, {
4512 N, N, N, N, N, N, N,
4513 GP(0, &pfx_0f_c7_7),
4514} };
4515
4516static const struct opcode group11[] = {
4517 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4518 X7(D(Undefined)),
4519};
4520
4521static const struct gprefix pfx_0f_ae_7 = {
4522 I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
4523};
4524
4525static const struct group_dual group15 = { {
4526 I(ModRM | Aligned16, em_fxsave),
4527 I(ModRM | Aligned16, em_fxrstor),
4528 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4529}, {
4530 N, N, N, N, N, N, N, N,
4531} };
4532
4533static const struct gprefix pfx_0f_6f_0f_7f = {
4534 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4535};
4536
4537static const struct instr_dual instr_dual_0f_2b = {
4538 I(0, em_mov), N
4539};
4540
4541static const struct gprefix pfx_0f_2b = {
4542 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4543};
4544
4545static const struct gprefix pfx_0f_10_0f_11 = {
4546 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4547};
4548
4549static const struct gprefix pfx_0f_28_0f_29 = {
4550 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4551};
4552
4553static const struct gprefix pfx_0f_e7 = {
4554 N, I(Sse, em_mov), N, N,
4555};
4556
4557static const struct escape escape_d9 = { {
4558 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4559}, {
4560
4561 N, N, N, N, N, N, N, N,
4562
4563 N, N, N, N, N, N, N, N,
4564
4565 N, N, N, N, N, N, N, N,
4566
4567 N, N, N, N, N, N, N, N,
4568
4569 N, N, N, N, N, N, N, N,
4570
4571 N, N, N, N, N, N, N, N,
4572
4573 N, N, N, N, N, N, N, N,
4574
4575 N, N, N, N, N, N, N, N,
4576} };
4577
4578static const struct escape escape_db = { {
4579 N, N, N, N, N, N, N, N,
4580}, {
4581
4582 N, N, N, N, N, N, N, N,
4583
4584 N, N, N, N, N, N, N, N,
4585
4586 N, N, N, N, N, N, N, N,
4587
4588 N, N, N, N, N, N, N, N,
4589
4590 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4591
4592 N, N, N, N, N, N, N, N,
4593
4594 N, N, N, N, N, N, N, N,
4595
4596 N, N, N, N, N, N, N, N,
4597} };
4598
4599static const struct escape escape_dd = { {
4600 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4601}, {
4602
4603 N, N, N, N, N, N, N, N,
4604
4605 N, N, N, N, N, N, N, N,
4606
4607 N, N, N, N, N, N, N, N,
4608
4609 N, N, N, N, N, N, N, N,
4610
4611 N, N, N, N, N, N, N, N,
4612
4613 N, N, N, N, N, N, N, N,
4614
4615 N, N, N, N, N, N, N, N,
4616
4617 N, N, N, N, N, N, N, N,
4618} };
4619
4620static const struct instr_dual instr_dual_0f_c3 = {
4621 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4622};
4623
4624static const struct mode_dual mode_dual_63 = {
4625 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4626};
4627
4628static const struct opcode opcode_table[256] = {
4629
4630 F6ALU(Lock, em_add),
4631 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4632 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4633
4634 F6ALU(Lock | PageTable, em_or),
4635 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4636 N,
4637
4638 F6ALU(Lock, em_adc),
4639 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4640 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4641
4642 F6ALU(Lock, em_sbb),
4643 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4644 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4645
4646 F6ALU(Lock | PageTable, em_and), N, N,
4647
4648 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4649
4650 F6ALU(Lock, em_xor), N, N,
4651
4652 F6ALU(NoWrite, em_cmp), N, N,
4653
4654 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4655
4656 X8(I(SrcReg | Stack, em_push)),
4657
4658 X8(I(DstReg | Stack, em_pop)),
4659
4660 I(ImplicitOps | Stack | No64, em_pusha),
4661 I(ImplicitOps | Stack | No64, em_popa),
4662 N, MD(ModRM, &mode_dual_63),
4663 N, N, N, N,
4664
4665 I(SrcImm | Mov | Stack, em_push),
4666 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4667 I(SrcImmByte | Mov | Stack, em_push),
4668 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4669 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in),
4670 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out),
4671
4672 X16(D(SrcImmByte | NearBranch)),
4673
4674 G(ByteOp | DstMem | SrcImm, group1),
4675 G(DstMem | SrcImm, group1),
4676 G(ByteOp | DstMem | SrcImm | No64, group1),
4677 G(DstMem | SrcImmByte, group1),
4678 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4679 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4680
4681 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4682 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4683 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4684 D(ModRM | SrcMem | NoAccess | DstReg),
4685 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4686 G(0, group1A),
4687
4688 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4689
4690 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4691 I(SrcImmFAddr | No64, em_call_far), N,
4692 II(ImplicitOps | Stack, em_pushf, pushf),
4693 II(ImplicitOps | Stack, em_popf, popf),
4694 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4695
4696 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4697 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4698 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4699 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4700
4701 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4702 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4703 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4704 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4705
4706 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4707
4708 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4709
4710 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4711 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4712 I(ImplicitOps | NearBranch, em_ret),
4713 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4714 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4715 G(ByteOp, group11), G(0, group11),
4716
4717 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4718 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4719 I(ImplicitOps, em_ret_far),
4720 D(ImplicitOps), DI(SrcImmByte, intn),
4721 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4722
4723 G(Src2One | ByteOp, group2), G(Src2One, group2),
4724 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4725 I(DstAcc | SrcImmUByte | No64, em_aam),
4726 I(DstAcc | SrcImmUByte | No64, em_aad),
4727 F(DstAcc | ByteOp | No64, em_salc),
4728 I(DstAcc | SrcXLat | ByteOp, em_mov),
4729
4730 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4731
4732 X3(I(SrcImmByte | NearBranch, em_loop)),
4733 I(SrcImmByte | NearBranch, em_jcxz),
4734 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4735 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4736
4737 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4738 I(SrcImmFAddr | No64, em_jmp_far),
4739 D(SrcImmByte | ImplicitOps | NearBranch),
4740 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4741 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4742
4743 N, DI(ImplicitOps, icebp), N, N,
4744 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4745 G(ByteOp, group3), G(0, group3),
4746
4747 D(ImplicitOps), D(ImplicitOps),
4748 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4749 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4750};
4751
4752static const struct opcode twobyte_table[256] = {
4753
4754 G(0, group6), GD(0, &group7), N, N,
4755 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4756 II(ImplicitOps | Priv, em_clts, clts), N,
4757 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4758 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4759
4760 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4761 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4762 N, N, N, N, N, N,
4763 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4764 D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4765 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4766 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4767 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4768 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4769
4770 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
4771 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4772 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4773 check_cr_access),
4774 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4775 check_dr_write),
4776 N, N, N, N,
4777 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4778 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4779 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4780 N, N, N, N,
4781
4782 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4783 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4784 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4785 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4786 I(ImplicitOps | EmulateOnUD, em_sysenter),
4787 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4788 N, N,
4789 N, N, N, N, N, N, N, N,
4790
4791 X16(D(DstReg | SrcMem | ModRM)),
4792
4793 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4794
4795 N, N, N, N,
4796 N, N, N, N,
4797 N, N, N, N,
4798 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4799
4800 N, N, N, N,
4801 N, N, N, N,
4802 N, N, N, N,
4803 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4804
4805 X16(D(SrcImm | NearBranch)),
4806
4807 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4808
4809 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4810 II(ImplicitOps, em_cpuid, cpuid),
4811 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4812 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4813 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4814
4815 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4816 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4817 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4818 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4819 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4820 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4821
4822 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4823 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4824 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4825 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4826 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4827 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4828
4829 N, N,
4830 G(BitOp, group8),
4831 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4832 I(DstReg | SrcMem | ModRM, em_bsf_c),
4833 I(DstReg | SrcMem | ModRM, em_bsr_c),
4834 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4835
4836 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4837 N, ID(0, &instr_dual_0f_c3),
4838 N, N, N, GD(0, &group9),
4839
4840 X8(I(DstReg, em_bswap)),
4841
4842 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4843
4844 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4845 N, N, N, N, N, N, N, N,
4846
4847 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4848};
4849
4850static const struct instr_dual instr_dual_0f_38_f0 = {
4851 I(DstReg | SrcMem | Mov, em_movbe), N
4852};
4853
4854static const struct instr_dual instr_dual_0f_38_f1 = {
4855 I(DstMem | SrcReg | Mov, em_movbe), N
4856};
4857
4858static const struct gprefix three_byte_0f_38_f0 = {
4859 ID(0, &instr_dual_0f_38_f0), N, N, N
4860};
4861
4862static const struct gprefix three_byte_0f_38_f1 = {
4863 ID(0, &instr_dual_0f_38_f1), N, N, N
4864};
4865
4866
4867
4868
4869
4870static const struct opcode opcode_map_0f_38[256] = {
4871
4872 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4873
4874 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4875
4876 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4877 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4878
4879 N, N, X4(N), X8(N)
4880};
4881
4882#undef D
4883#undef N
4884#undef G
4885#undef GD
4886#undef I
4887#undef GP
4888#undef EXT
4889#undef MD
4890#undef ID
4891
4892#undef D2bv
4893#undef D2bvIP
4894#undef I2bv
4895#undef I2bvIP
4896#undef I6ALU
4897
4898static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4899{
4900 unsigned size;
4901
4902 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4903 if (size == 8)
4904 size = 4;
4905 return size;
4906}
4907
4908static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4909 unsigned size, bool sign_extension)
4910{
4911 int rc = X86EMUL_CONTINUE;
4912
4913 op->type = OP_IMM;
4914 op->bytes = size;
4915 op->addr.mem.ea = ctxt->_eip;
4916
4917 switch (op->bytes) {
4918 case 1:
4919 op->val = insn_fetch(s8, ctxt);
4920 break;
4921 case 2:
4922 op->val = insn_fetch(s16, ctxt);
4923 break;
4924 case 4:
4925 op->val = insn_fetch(s32, ctxt);
4926 break;
4927 case 8:
4928 op->val = insn_fetch(s64, ctxt);
4929 break;
4930 }
4931 if (!sign_extension) {
4932 switch (op->bytes) {
4933 case 1:
4934 op->val &= 0xff;
4935 break;
4936 case 2:
4937 op->val &= 0xffff;
4938 break;
4939 case 4:
4940 op->val &= 0xffffffff;
4941 break;
4942 }
4943 }
4944done:
4945 return rc;
4946}
4947
4948static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4949 unsigned d)
4950{
4951 int rc = X86EMUL_CONTINUE;
4952
4953 switch (d) {
4954 case OpReg:
4955 decode_register_operand(ctxt, op);
4956 break;
4957 case OpImmUByte:
4958 rc = decode_imm(ctxt, op, 1, false);
4959 break;
4960 case OpMem:
4961 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4962 mem_common:
4963 *op = ctxt->memop;
4964 ctxt->memopp = op;
4965 if (ctxt->d & BitOp)
4966 fetch_bit_operand(ctxt);
4967 op->orig_val = op->val;
4968 break;
4969 case OpMem64:
4970 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4971 goto mem_common;
4972 case OpAcc:
4973 op->type = OP_REG;
4974 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4975 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4976 fetch_register_operand(op);
4977 op->orig_val = op->val;
4978 break;
4979 case OpAccLo:
4980 op->type = OP_REG;
4981 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4982 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4983 fetch_register_operand(op);
4984 op->orig_val = op->val;
4985 break;
4986 case OpAccHi:
4987 if (ctxt->d & ByteOp) {
4988 op->type = OP_NONE;
4989 break;
4990 }
4991 op->type = OP_REG;
4992 op->bytes = ctxt->op_bytes;
4993 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4994 fetch_register_operand(op);
4995 op->orig_val = op->val;
4996 break;
4997 case OpDI:
4998 op->type = OP_MEM;
4999 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5000 op->addr.mem.ea =
5001 register_address(ctxt, VCPU_REGS_RDI);
5002 op->addr.mem.seg = VCPU_SREG_ES;
5003 op->val = 0;
5004 op->count = 1;
5005 break;
5006 case OpDX:
5007 op->type = OP_REG;
5008 op->bytes = 2;
5009 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5010 fetch_register_operand(op);
5011 break;
5012 case OpCL:
5013 op->type = OP_IMM;
5014 op->bytes = 1;
5015 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
5016 break;
5017 case OpImmByte:
5018 rc = decode_imm(ctxt, op, 1, true);
5019 break;
5020 case OpOne:
5021 op->type = OP_IMM;
5022 op->bytes = 1;
5023 op->val = 1;
5024 break;
5025 case OpImm:
5026 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
5027 break;
5028 case OpImm64:
5029 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
5030 break;
5031 case OpMem8:
5032 ctxt->memop.bytes = 1;
5033 if (ctxt->memop.type == OP_REG) {
5034 ctxt->memop.addr.reg = decode_register(ctxt,
5035 ctxt->modrm_rm, true);
5036 fetch_register_operand(&ctxt->memop);
5037 }
5038 goto mem_common;
5039 case OpMem16:
5040 ctxt->memop.bytes = 2;
5041 goto mem_common;
5042 case OpMem32:
5043 ctxt->memop.bytes = 4;
5044 goto mem_common;
5045 case OpImmU16:
5046 rc = decode_imm(ctxt, op, 2, false);
5047 break;
5048 case OpImmU:
5049 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
5050 break;
5051 case OpSI:
5052 op->type = OP_MEM;
5053 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5054 op->addr.mem.ea =
5055 register_address(ctxt, VCPU_REGS_RSI);
5056 op->addr.mem.seg = ctxt->seg_override;
5057 op->val = 0;
5058 op->count = 1;
5059 break;
5060 case OpXLat:
5061 op->type = OP_MEM;
5062 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5063 op->addr.mem.ea =
5064 address_mask(ctxt,
5065 reg_read(ctxt, VCPU_REGS_RBX) +
5066 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
5067 op->addr.mem.seg = ctxt->seg_override;
5068 op->val = 0;
5069 break;
5070 case OpImmFAddr:
5071 op->type = OP_IMM;
5072 op->addr.mem.ea = ctxt->_eip;
5073 op->bytes = ctxt->op_bytes + 2;
5074 insn_fetch_arr(op->valptr, op->bytes, ctxt);
5075 break;
5076 case OpMemFAddr:
5077 ctxt->memop.bytes = ctxt->op_bytes + 2;
5078 goto mem_common;
5079 case OpES:
5080 op->type = OP_IMM;
5081 op->val = VCPU_SREG_ES;
5082 break;
5083 case OpCS:
5084 op->type = OP_IMM;
5085 op->val = VCPU_SREG_CS;
5086 break;
5087 case OpSS:
5088 op->type = OP_IMM;
5089 op->val = VCPU_SREG_SS;
5090 break;
5091 case OpDS:
5092 op->type = OP_IMM;
5093 op->val = VCPU_SREG_DS;
5094 break;
5095 case OpFS:
5096 op->type = OP_IMM;
5097 op->val = VCPU_SREG_FS;
5098 break;
5099 case OpGS:
5100 op->type = OP_IMM;
5101 op->val = VCPU_SREG_GS;
5102 break;
5103 case OpImplicit:
5104
5105 default:
5106 op->type = OP_NONE;
5107 break;
5108 }
5109
5110done:
5111 return rc;
5112}
5113
5114int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
5115{
5116 int rc = X86EMUL_CONTINUE;
5117 int mode = ctxt->mode;
5118 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5119 bool op_prefix = false;
5120 bool has_seg_override = false;
5121 struct opcode opcode;
5122 u16 dummy;
5123 struct desc_struct desc;
5124
5125 ctxt->memop.type = OP_NONE;
5126 ctxt->memopp = NULL;
5127 ctxt->_eip = ctxt->eip;
5128 ctxt->fetch.ptr = ctxt->fetch.data;
5129 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5130 ctxt->opcode_len = 1;
5131 ctxt->intercept = x86_intercept_none;
5132 if (insn_len > 0)
5133 memcpy(ctxt->fetch.data, insn, insn_len);
5134 else {
5135 rc = __do_insn_fetch_bytes(ctxt, 1);
5136 if (rc != X86EMUL_CONTINUE)
5137 goto done;
5138 }
5139
5140 switch (mode) {
5141 case X86EMUL_MODE_REAL:
5142 case X86EMUL_MODE_VM86:
5143 def_op_bytes = def_ad_bytes = 2;
5144 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5145 if (desc.d)
5146 def_op_bytes = def_ad_bytes = 4;
5147 break;
5148 case X86EMUL_MODE_PROT16:
5149 def_op_bytes = def_ad_bytes = 2;
5150 break;
5151 case X86EMUL_MODE_PROT32:
5152 def_op_bytes = def_ad_bytes = 4;
5153 break;
5154#ifdef CONFIG_X86_64
5155 case X86EMUL_MODE_PROT64:
5156 def_op_bytes = 4;
5157 def_ad_bytes = 8;
5158 break;
5159#endif
5160 default:
5161 return EMULATION_FAILED;
5162 }
5163
5164 ctxt->op_bytes = def_op_bytes;
5165 ctxt->ad_bytes = def_ad_bytes;
5166
5167
5168 for (;;) {
5169 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5170 case 0x66:
5171 op_prefix = true;
5172
5173 ctxt->op_bytes = def_op_bytes ^ 6;
5174 break;
5175 case 0x67:
5176 if (mode == X86EMUL_MODE_PROT64)
5177
5178 ctxt->ad_bytes = def_ad_bytes ^ 12;
5179 else
5180
5181 ctxt->ad_bytes = def_ad_bytes ^ 6;
5182 break;
5183 case 0x26:
5184 has_seg_override = true;
5185 ctxt->seg_override = VCPU_SREG_ES;
5186 break;
5187 case 0x2e:
5188 has_seg_override = true;
5189 ctxt->seg_override = VCPU_SREG_CS;
5190 break;
5191 case 0x36:
5192 has_seg_override = true;
5193 ctxt->seg_override = VCPU_SREG_SS;
5194 break;
5195 case 0x3e:
5196 has_seg_override = true;
5197 ctxt->seg_override = VCPU_SREG_DS;
5198 break;
5199 case 0x64:
5200 has_seg_override = true;
5201 ctxt->seg_override = VCPU_SREG_FS;
5202 break;
5203 case 0x65:
5204 has_seg_override = true;
5205 ctxt->seg_override = VCPU_SREG_GS;
5206 break;
5207 case 0x40 ... 0x4f:
5208 if (mode != X86EMUL_MODE_PROT64)
5209 goto done_prefixes;
5210 ctxt->rex_prefix = ctxt->b;
5211 continue;
5212 case 0xf0:
5213 ctxt->lock_prefix = 1;
5214 break;
5215 case 0xf2:
5216 case 0xf3:
5217 ctxt->rep_prefix = ctxt->b;
5218 break;
5219 default:
5220 goto done_prefixes;
5221 }
5222
5223
5224
5225 ctxt->rex_prefix = 0;
5226 }
5227
5228done_prefixes:
5229
5230
5231 if (ctxt->rex_prefix & 8)
5232 ctxt->op_bytes = 8;
5233
5234
5235 opcode = opcode_table[ctxt->b];
5236
5237 if (ctxt->b == 0x0f) {
5238 ctxt->opcode_len = 2;
5239 ctxt->b = insn_fetch(u8, ctxt);
5240 opcode = twobyte_table[ctxt->b];
5241
5242
5243 if (ctxt->b == 0x38) {
5244 ctxt->opcode_len = 3;
5245 ctxt->b = insn_fetch(u8, ctxt);
5246 opcode = opcode_map_0f_38[ctxt->b];
5247 }
5248 }
5249 ctxt->d = opcode.flags;
5250
5251 if (ctxt->d & ModRM)
5252 ctxt->modrm = insn_fetch(u8, ctxt);
5253
5254
5255 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5256 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5257 ctxt->d = NotImpl;
5258 }
5259
5260 while (ctxt->d & GroupMask) {
5261 switch (ctxt->d & GroupMask) {
5262 case Group:
5263 goffset = (ctxt->modrm >> 3) & 7;
5264 opcode = opcode.u.group[goffset];
5265 break;
5266 case GroupDual:
5267 goffset = (ctxt->modrm >> 3) & 7;
5268 if ((ctxt->modrm >> 6) == 3)
5269 opcode = opcode.u.gdual->mod3[goffset];
5270 else
5271 opcode = opcode.u.gdual->mod012[goffset];
5272 break;
5273 case RMExt:
5274 goffset = ctxt->modrm & 7;
5275 opcode = opcode.u.group[goffset];
5276 break;
5277 case Prefix:
5278 if (ctxt->rep_prefix && op_prefix)
5279 return EMULATION_FAILED;
5280 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5281 switch (simd_prefix) {
5282 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5283 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5284 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5285 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5286 }
5287 break;
5288 case Escape:
5289 if (ctxt->modrm > 0xbf) {
5290 size_t size = ARRAY_SIZE(opcode.u.esc->high);
5291 u32 index = array_index_nospec(
5292 ctxt->modrm - 0xc0, size);
5293
5294 opcode = opcode.u.esc->high[index];
5295 } else {
5296 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5297 }
5298 break;
5299 case InstrDual:
5300 if ((ctxt->modrm >> 6) == 3)
5301 opcode = opcode.u.idual->mod3;
5302 else
5303 opcode = opcode.u.idual->mod012;
5304 break;
5305 case ModeDual:
5306 if (ctxt->mode == X86EMUL_MODE_PROT64)
5307 opcode = opcode.u.mdual->mode64;
5308 else
5309 opcode = opcode.u.mdual->mode32;
5310 break;
5311 default:
5312 return EMULATION_FAILED;
5313 }
5314
5315 ctxt->d &= ~(u64)GroupMask;
5316 ctxt->d |= opcode.flags;
5317 }
5318
5319
5320 if (ctxt->d == 0)
5321 return EMULATION_FAILED;
5322
5323 ctxt->execute = opcode.u.execute;
5324
5325 if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
5326 likely(!(ctxt->d & EmulateOnUD)))
5327 return EMULATION_FAILED;
5328
5329 if (unlikely(ctxt->d &
5330 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5331 No16))) {
5332
5333
5334
5335
5336 ctxt->check_perm = opcode.check_perm;
5337 ctxt->intercept = opcode.intercept;
5338
5339 if (ctxt->d & NotImpl)
5340 return EMULATION_FAILED;
5341
5342 if (mode == X86EMUL_MODE_PROT64) {
5343 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5344 ctxt->op_bytes = 8;
5345 else if (ctxt->d & NearBranch)
5346 ctxt->op_bytes = 8;
5347 }
5348
5349 if (ctxt->d & Op3264) {
5350 if (mode == X86EMUL_MODE_PROT64)
5351 ctxt->op_bytes = 8;
5352 else
5353 ctxt->op_bytes = 4;
5354 }
5355
5356 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5357 ctxt->op_bytes = 4;
5358
5359 if (ctxt->d & Sse)
5360 ctxt->op_bytes = 16;
5361 else if (ctxt->d & Mmx)
5362 ctxt->op_bytes = 8;
5363 }
5364
5365
5366 if (ctxt->d & ModRM) {
5367 rc = decode_modrm(ctxt, &ctxt->memop);
5368 if (!has_seg_override) {
5369 has_seg_override = true;
5370 ctxt->seg_override = ctxt->modrm_seg;
5371 }
5372 } else if (ctxt->d & MemAbs)
5373 rc = decode_abs(ctxt, &ctxt->memop);
5374 if (rc != X86EMUL_CONTINUE)
5375 goto done;
5376
5377 if (!has_seg_override)
5378 ctxt->seg_override = VCPU_SREG_DS;
5379
5380 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5381
5382
5383
5384
5385
5386 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5387 if (rc != X86EMUL_CONTINUE)
5388 goto done;
5389
5390
5391
5392
5393
5394 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5395 if (rc != X86EMUL_CONTINUE)
5396 goto done;
5397
5398
5399 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5400
5401 if (ctxt->rip_relative && likely(ctxt->memopp))
5402 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5403 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5404
5405done:
5406 if (rc == X86EMUL_PROPAGATE_FAULT)
5407 ctxt->have_exception = true;
5408 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5409}
5410
5411bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5412{
5413 return ctxt->d & PageTable;
5414}
5415
5416static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5417{
5418
5419
5420
5421
5422
5423
5424
5425 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5426 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5427 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5428 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5429 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5430 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5431 return true;
5432
5433 return false;
5434}
5435
5436static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5437{
5438 int rc;
5439
5440 emulator_get_fpu();
5441 rc = asm_safe("fwait");
5442 emulator_put_fpu();
5443
5444 if (unlikely(rc != X86EMUL_CONTINUE))
5445 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5446
5447 return X86EMUL_CONTINUE;
5448}
5449
5450static void fetch_possible_mmx_operand(struct operand *op)
5451{
5452 if (op->type == OP_MM)
5453 read_mmx_reg(&op->mm_val, op->addr.mm);
5454}
5455
5456static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
5457{
5458 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5459
5460 if (!(ctxt->d & ByteOp))
5461 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5462
5463 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5464 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5465 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5466 : "c"(ctxt->src2.val));
5467
5468 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5469 if (!fop)
5470 return emulate_de(ctxt);
5471 return X86EMUL_CONTINUE;
5472}
5473
5474void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5475{
5476 memset(&ctxt->rip_relative, 0,
5477 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5478
5479 ctxt->io_read.pos = 0;
5480 ctxt->io_read.end = 0;
5481 ctxt->mem_read.end = 0;
5482}
5483
5484int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5485{
5486 const struct x86_emulate_ops *ops = ctxt->ops;
5487 int rc = X86EMUL_CONTINUE;
5488 int saved_dst_type = ctxt->dst.type;
5489 unsigned emul_flags;
5490
5491 ctxt->mem_read.pos = 0;
5492
5493
5494 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5495 rc = emulate_ud(ctxt);
5496 goto done;
5497 }
5498
5499 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5500 rc = emulate_ud(ctxt);
5501 goto done;
5502 }
5503
5504 emul_flags = ctxt->ops->get_hflags(ctxt);
5505 if (unlikely(ctxt->d &
5506 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5507 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5508 (ctxt->d & Undefined)) {
5509 rc = emulate_ud(ctxt);
5510 goto done;
5511 }
5512
5513 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5514 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5515 rc = emulate_ud(ctxt);
5516 goto done;
5517 }
5518
5519 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5520 rc = emulate_nm(ctxt);
5521 goto done;
5522 }
5523
5524 if (ctxt->d & Mmx) {
5525 rc = flush_pending_x87_faults(ctxt);
5526 if (rc != X86EMUL_CONTINUE)
5527 goto done;
5528
5529
5530
5531
5532 fetch_possible_mmx_operand(&ctxt->src);
5533 fetch_possible_mmx_operand(&ctxt->src2);
5534 if (!(ctxt->d & Mov))
5535 fetch_possible_mmx_operand(&ctxt->dst);
5536 }
5537
5538 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5539 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5540 X86_ICPT_PRE_EXCEPT);
5541 if (rc != X86EMUL_CONTINUE)
5542 goto done;
5543 }
5544
5545
5546 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5547 rc = emulate_ud(ctxt);
5548 goto done;
5549 }
5550
5551
5552 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5553 if (ctxt->d & PrivUD)
5554 rc = emulate_ud(ctxt);
5555 else
5556 rc = emulate_gp(ctxt, 0);
5557 goto done;
5558 }
5559
5560
5561 if (ctxt->d & CheckPerm) {
5562 rc = ctxt->check_perm(ctxt);
5563 if (rc != X86EMUL_CONTINUE)
5564 goto done;
5565 }
5566
5567 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5568 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5569 X86_ICPT_POST_EXCEPT);
5570 if (rc != X86EMUL_CONTINUE)
5571 goto done;
5572 }
5573
5574 if (ctxt->rep_prefix && (ctxt->d & String)) {
5575
5576 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5577 string_registers_quirk(ctxt);
5578 ctxt->eip = ctxt->_eip;
5579 ctxt->eflags &= ~X86_EFLAGS_RF;
5580 goto done;
5581 }
5582 }
5583 }
5584
5585 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5586 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5587 ctxt->src.valptr, ctxt->src.bytes);
5588 if (rc != X86EMUL_CONTINUE)
5589 goto done;
5590 ctxt->src.orig_val64 = ctxt->src.val64;
5591 }
5592
5593 if (ctxt->src2.type == OP_MEM) {
5594 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5595 &ctxt->src2.val, ctxt->src2.bytes);
5596 if (rc != X86EMUL_CONTINUE)
5597 goto done;
5598 }
5599
5600 if ((ctxt->d & DstMask) == ImplicitOps)
5601 goto special_insn;
5602
5603
5604 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5605
5606 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5607 &ctxt->dst.val, ctxt->dst.bytes);
5608 if (rc != X86EMUL_CONTINUE) {
5609 if (!(ctxt->d & NoWrite) &&
5610 rc == X86EMUL_PROPAGATE_FAULT &&
5611 ctxt->exception.vector == PF_VECTOR)
5612 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5613 goto done;
5614 }
5615 }
5616
5617 ctxt->dst.orig_val64 = ctxt->dst.val64;
5618
5619special_insn:
5620
5621 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5622 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5623 X86_ICPT_POST_MEMACCESS);
5624 if (rc != X86EMUL_CONTINUE)
5625 goto done;
5626 }
5627
5628 if (ctxt->rep_prefix && (ctxt->d & String))
5629 ctxt->eflags |= X86_EFLAGS_RF;
5630 else
5631 ctxt->eflags &= ~X86_EFLAGS_RF;
5632
5633 if (ctxt->execute) {
5634 if (ctxt->d & Fastop)
5635 rc = fastop(ctxt, ctxt->fop);
5636 else
5637 rc = ctxt->execute(ctxt);
5638 if (rc != X86EMUL_CONTINUE)
5639 goto done;
5640 goto writeback;
5641 }
5642
5643 if (ctxt->opcode_len == 2)
5644 goto twobyte_insn;
5645 else if (ctxt->opcode_len == 3)
5646 goto threebyte_insn;
5647
5648 switch (ctxt->b) {
5649 case 0x70 ... 0x7f:
5650 if (test_cc(ctxt->b, ctxt->eflags))
5651 rc = jmp_rel(ctxt, ctxt->src.val);
5652 break;
5653 case 0x8d:
5654 ctxt->dst.val = ctxt->src.addr.mem.ea;
5655 break;
5656 case 0x90 ... 0x97:
5657 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5658 ctxt->dst.type = OP_NONE;
5659 else
5660 rc = em_xchg(ctxt);
5661 break;
5662 case 0x98:
5663 switch (ctxt->op_bytes) {
5664 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5665 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5666 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5667 }
5668 break;
5669 case 0xcc:
5670 rc = emulate_int(ctxt, 3);
5671 break;
5672 case 0xcd:
5673 rc = emulate_int(ctxt, ctxt->src.val);
5674 break;
5675 case 0xce:
5676 if (ctxt->eflags & X86_EFLAGS_OF)
5677 rc = emulate_int(ctxt, 4);
5678 break;
5679 case 0xe9:
5680 case 0xeb:
5681 rc = jmp_rel(ctxt, ctxt->src.val);
5682 ctxt->dst.type = OP_NONE;
5683 break;
5684 case 0xf4:
5685 ctxt->ops->halt(ctxt);
5686 break;
5687 case 0xf5:
5688
5689 ctxt->eflags ^= X86_EFLAGS_CF;
5690 break;
5691 case 0xf8:
5692 ctxt->eflags &= ~X86_EFLAGS_CF;
5693 break;
5694 case 0xf9:
5695 ctxt->eflags |= X86_EFLAGS_CF;
5696 break;
5697 case 0xfc:
5698 ctxt->eflags &= ~X86_EFLAGS_DF;
5699 break;
5700 case 0xfd:
5701 ctxt->eflags |= X86_EFLAGS_DF;
5702 break;
5703 default:
5704 goto cannot_emulate;
5705 }
5706
5707 if (rc != X86EMUL_CONTINUE)
5708 goto done;
5709
5710writeback:
5711 if (ctxt->d & SrcWrite) {
5712 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5713 rc = writeback(ctxt, &ctxt->src);
5714 if (rc != X86EMUL_CONTINUE)
5715 goto done;
5716 }
5717 if (!(ctxt->d & NoWrite)) {
5718 rc = writeback(ctxt, &ctxt->dst);
5719 if (rc != X86EMUL_CONTINUE)
5720 goto done;
5721 }
5722
5723
5724
5725
5726
5727 ctxt->dst.type = saved_dst_type;
5728
5729 if ((ctxt->d & SrcMask) == SrcSI)
5730 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5731
5732 if ((ctxt->d & DstMask) == DstDI)
5733 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5734
5735 if (ctxt->rep_prefix && (ctxt->d & String)) {
5736 unsigned int count;
5737 struct read_cache *r = &ctxt->io_read;
5738 if ((ctxt->d & SrcMask) == SrcSI)
5739 count = ctxt->src.count;
5740 else
5741 count = ctxt->dst.count;
5742 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5743
5744 if (!string_insn_completed(ctxt)) {
5745
5746
5747
5748
5749 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5750 (r->end == 0 || r->end != r->pos)) {
5751
5752
5753
5754
5755
5756 ctxt->mem_read.end = 0;
5757 writeback_registers(ctxt);
5758 return EMULATION_RESTART;
5759 }
5760 goto done;
5761 }
5762 ctxt->eflags &= ~X86_EFLAGS_RF;
5763 }
5764
5765 ctxt->eip = ctxt->_eip;
5766 if (ctxt->mode != X86EMUL_MODE_PROT64)
5767 ctxt->eip = (u32)ctxt->_eip;
5768
5769done:
5770 if (rc == X86EMUL_PROPAGATE_FAULT) {
5771 WARN_ON(ctxt->exception.vector > 0x1f);
5772 ctxt->have_exception = true;
5773 }
5774 if (rc == X86EMUL_INTERCEPTED)
5775 return EMULATION_INTERCEPTED;
5776
5777 if (rc == X86EMUL_CONTINUE)
5778 writeback_registers(ctxt);
5779
5780 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5781
5782twobyte_insn:
5783 switch (ctxt->b) {
5784 case 0x09:
5785 (ctxt->ops->wbinvd)(ctxt);
5786 break;
5787 case 0x08:
5788 case 0x0d:
5789 case 0x18:
5790 case 0x1f:
5791 break;
5792 case 0x20:
5793 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5794 break;
5795 case 0x21:
5796 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5797 break;
5798 case 0x40 ... 0x4f:
5799 if (test_cc(ctxt->b, ctxt->eflags))
5800 ctxt->dst.val = ctxt->src.val;
5801 else if (ctxt->op_bytes != 4)
5802 ctxt->dst.type = OP_NONE;
5803 break;
5804 case 0x80 ... 0x8f:
5805 if (test_cc(ctxt->b, ctxt->eflags))
5806 rc = jmp_rel(ctxt, ctxt->src.val);
5807 break;
5808 case 0x90 ... 0x9f:
5809 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5810 break;
5811 case 0xb6 ... 0xb7:
5812 ctxt->dst.bytes = ctxt->op_bytes;
5813 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5814 : (u16) ctxt->src.val;
5815 break;
5816 case 0xbe ... 0xbf:
5817 ctxt->dst.bytes = ctxt->op_bytes;
5818 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5819 (s16) ctxt->src.val;
5820 break;
5821 default:
5822 goto cannot_emulate;
5823 }
5824
5825threebyte_insn:
5826
5827 if (rc != X86EMUL_CONTINUE)
5828 goto done;
5829
5830 goto writeback;
5831
5832cannot_emulate:
5833 return EMULATION_FAILED;
5834}
5835
5836void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5837{
5838 invalidate_registers(ctxt);
5839}
5840
5841void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5842{
5843 writeback_registers(ctxt);
5844}
5845
5846bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5847{
5848 if (ctxt->rep_prefix && (ctxt->d & String))
5849 return false;
5850
5851 if (ctxt->d & TwoMemOp)
5852 return false;
5853
5854 return true;
5855}
5856