1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/kvm_host.h>
22#include "kvm_cache_regs.h"
23#include "kvm_emulate.h"
24#include <linux/stringify.h>
25#include <asm/fpu/api.h>
26#include <asm/debugreg.h>
27#include <asm/nospec-branch.h>
28
29#include "x86.h"
30#include "tss.h"
31#include "mmu.h"
32#include "pmu.h"
33
34
35
36
37#define OpNone 0ull
38#define OpImplicit 1ull
39#define OpReg 2ull
40#define OpMem 3ull
41#define OpAcc 4ull
42#define OpDI 5ull
43#define OpMem64 6ull
44#define OpImmUByte 7ull
45#define OpDX 8ull
46#define OpCL 9ull
47#define OpImmByte 10ull
48#define OpOne 11ull
49#define OpImm 12ull
50#define OpMem16 13ull
51#define OpMem32 14ull
52#define OpImmU 15ull
53#define OpSI 16ull
54#define OpImmFAddr 17ull
55#define OpMemFAddr 18ull
56#define OpImmU16 19ull
57#define OpES 20ull
58#define OpCS 21ull
59#define OpSS 22ull
60#define OpDS 23ull
61#define OpFS 24ull
62#define OpGS 25ull
63#define OpMem8 26ull
64#define OpImm64 27ull
65#define OpXLat 28ull
66#define OpAccLo 29ull
67#define OpAccHi 30ull
68
69#define OpBits 5
70#define OpMask ((1ull << OpBits) - 1)
71
72
73
74
75
76
77
78
79
80
81
82#define ByteOp (1<<0)
83
84#define DstShift 1
85#define ImplicitOps (OpImplicit << DstShift)
86#define DstReg (OpReg << DstShift)
87#define DstMem (OpMem << DstShift)
88#define DstAcc (OpAcc << DstShift)
89#define DstDI (OpDI << DstShift)
90#define DstMem64 (OpMem64 << DstShift)
91#define DstMem16 (OpMem16 << DstShift)
92#define DstImmUByte (OpImmUByte << DstShift)
93#define DstDX (OpDX << DstShift)
94#define DstAccLo (OpAccLo << DstShift)
95#define DstMask (OpMask << DstShift)
96
97#define SrcShift 6
98#define SrcNone (OpNone << SrcShift)
99#define SrcReg (OpReg << SrcShift)
100#define SrcMem (OpMem << SrcShift)
101#define SrcMem16 (OpMem16 << SrcShift)
102#define SrcMem32 (OpMem32 << SrcShift)
103#define SrcImm (OpImm << SrcShift)
104#define SrcImmByte (OpImmByte << SrcShift)
105#define SrcOne (OpOne << SrcShift)
106#define SrcImmUByte (OpImmUByte << SrcShift)
107#define SrcImmU (OpImmU << SrcShift)
108#define SrcSI (OpSI << SrcShift)
109#define SrcXLat (OpXLat << SrcShift)
110#define SrcImmFAddr (OpImmFAddr << SrcShift)
111#define SrcMemFAddr (OpMemFAddr << SrcShift)
112#define SrcAcc (OpAcc << SrcShift)
113#define SrcImmU16 (OpImmU16 << SrcShift)
114#define SrcImm64 (OpImm64 << SrcShift)
115#define SrcDX (OpDX << SrcShift)
116#define SrcMem8 (OpMem8 << SrcShift)
117#define SrcAccHi (OpAccHi << SrcShift)
118#define SrcMask (OpMask << SrcShift)
119#define BitOp (1<<11)
120#define MemAbs (1<<12)
121#define String (1<<13)
122#define Stack (1<<14)
123#define GroupMask (7<<15)
124#define Group (1<<15)
125#define GroupDual (2<<15)
126#define Prefix (3<<15)
127#define RMExt (4<<15)
128#define Escape (5<<15)
129#define InstrDual (6<<15)
130#define ModeDual (7<<15)
131#define Sse (1<<18)
132
133#define ModRM (1<<19)
134
135#define Mov (1<<20)
136
137#define Prot (1<<21)
138#define EmulateOnUD (1<<22)
139#define NoAccess (1<<23)
140#define Op3264 (1<<24)
141#define Undefined (1<<25)
142#define Lock (1<<26)
143#define Priv (1<<27)
144#define No64 (1<<28)
145#define PageTable (1 << 29)
146#define NotImpl (1 << 30)
147
148#define Src2Shift (31)
149#define Src2None (OpNone << Src2Shift)
150#define Src2Mem (OpMem << Src2Shift)
151#define Src2CL (OpCL << Src2Shift)
152#define Src2ImmByte (OpImmByte << Src2Shift)
153#define Src2One (OpOne << Src2Shift)
154#define Src2Imm (OpImm << Src2Shift)
155#define Src2ES (OpES << Src2Shift)
156#define Src2CS (OpCS << Src2Shift)
157#define Src2SS (OpSS << Src2Shift)
158#define Src2DS (OpDS << Src2Shift)
159#define Src2FS (OpFS << Src2Shift)
160#define Src2GS (OpGS << Src2Shift)
161#define Src2Mask (OpMask << Src2Shift)
162#define Mmx ((u64)1 << 40)
163#define AlignMask ((u64)7 << 41)
164#define Aligned ((u64)1 << 41)
165#define Unaligned ((u64)2 << 41)
166#define Avx ((u64)3 << 41)
167#define Aligned16 ((u64)4 << 41)
168#define Fastop ((u64)1 << 44)
169#define NoWrite ((u64)1 << 45)
170#define SrcWrite ((u64)1 << 46)
171#define NoMod ((u64)1 << 47)
172#define Intercept ((u64)1 << 48)
173#define CheckPerm ((u64)1 << 49)
174#define PrivUD ((u64)1 << 51)
175#define NearBranch ((u64)1 << 52)
176#define No16 ((u64)1 << 53)
177#define IncSP ((u64)1 << 54)
178#define TwoMemOp ((u64)1 << 55)
179
180#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
181
182#define X2(x...) x, x
183#define X3(x...) X2(x), x
184#define X4(x...) X2(x), X2(x)
185#define X5(x...) X4(x), x
186#define X6(x...) X4(x), X2(x)
187#define X7(x...) X4(x), X3(x)
188#define X8(x...) X4(x), X4(x)
189#define X16(x...) X8(x), X8(x)
190
191#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
192#define FASTOP_SIZE 8
193
194struct opcode {
195 u64 flags : 56;
196 u64 intercept : 8;
197 union {
198 int (*execute)(struct x86_emulate_ctxt *ctxt);
199 const struct opcode *group;
200 const struct group_dual *gdual;
201 const struct gprefix *gprefix;
202 const struct escape *esc;
203 const struct instr_dual *idual;
204 const struct mode_dual *mdual;
205 void (*fastop)(struct fastop *fake);
206 } u;
207 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
208};
209
210struct group_dual {
211 struct opcode mod012[8];
212 struct opcode mod3[8];
213};
214
215struct gprefix {
216 struct opcode pfx_no;
217 struct opcode pfx_66;
218 struct opcode pfx_f2;
219 struct opcode pfx_f3;
220};
221
222struct escape {
223 struct opcode op[8];
224 struct opcode high[64];
225};
226
227struct instr_dual {
228 struct opcode mod012;
229 struct opcode mod3;
230};
231
232struct mode_dual {
233 struct opcode mode32;
234 struct opcode mode64;
235};
236
237#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
238
239enum x86_transfer_type {
240 X86_TRANSFER_NONE,
241 X86_TRANSFER_CALL_JMP,
242 X86_TRANSFER_RET,
243 X86_TRANSFER_TASK_SWITCH,
244};
245
246static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
247{
248 if (!(ctxt->regs_valid & (1 << nr))) {
249 ctxt->regs_valid |= 1 << nr;
250 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
251 }
252 return ctxt->_regs[nr];
253}
254
255static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
256{
257 ctxt->regs_valid |= 1 << nr;
258 ctxt->regs_dirty |= 1 << nr;
259 return &ctxt->_regs[nr];
260}
261
262static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
263{
264 reg_read(ctxt, nr);
265 return reg_write(ctxt, nr);
266}
267
268static void writeback_registers(struct x86_emulate_ctxt *ctxt)
269{
270 unsigned reg;
271
272 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
273 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
274}
275
276static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
277{
278 ctxt->regs_dirty = 0;
279 ctxt->regs_valid = 0;
280}
281
282
283
284
285
286#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
287 X86_EFLAGS_PF|X86_EFLAGS_CF)
288
289#ifdef CONFIG_X86_64
290#define ON64(x) x
291#else
292#define ON64(x)
293#endif
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
309
310#define __FOP_FUNC(name) \
311 ".align " __stringify(FASTOP_SIZE) " \n\t" \
312 ".type " name ", @function \n\t" \
313 name ":\n\t"
314
315#define FOP_FUNC(name) \
316 __FOP_FUNC(#name)
317
318#define __FOP_RET(name) \
319 "ret \n\t" \
320 ".size " name ", .-" name "\n\t"
321
322#define FOP_RET(name) \
323 __FOP_RET(#name)
324
325#define FOP_START(op) \
326 extern void em_##op(struct fastop *fake); \
327 asm(".pushsection .text, \"ax\" \n\t" \
328 ".global em_" #op " \n\t" \
329 ".align " __stringify(FASTOP_SIZE) " \n\t" \
330 "em_" #op ":\n\t"
331
332#define FOP_END \
333 ".popsection")
334
335#define __FOPNOP(name) \
336 __FOP_FUNC(name) \
337 __FOP_RET(name)
338
339#define FOPNOP() \
340 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
341
342#define FOP1E(op, dst) \
343 __FOP_FUNC(#op "_" #dst) \
344 "10: " #op " %" #dst " \n\t" \
345 __FOP_RET(#op "_" #dst)
346
347#define FOP1EEX(op, dst) \
348 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
349
350#define FASTOP1(op) \
351 FOP_START(op) \
352 FOP1E(op##b, al) \
353 FOP1E(op##w, ax) \
354 FOP1E(op##l, eax) \
355 ON64(FOP1E(op##q, rax)) \
356 FOP_END
357
358
359#define FASTOP1SRC2(op, name) \
360 FOP_START(name) \
361 FOP1E(op, cl) \
362 FOP1E(op, cx) \
363 FOP1E(op, ecx) \
364 ON64(FOP1E(op, rcx)) \
365 FOP_END
366
367
368#define FASTOP1SRC2EX(op, name) \
369 FOP_START(name) \
370 FOP1EEX(op, cl) \
371 FOP1EEX(op, cx) \
372 FOP1EEX(op, ecx) \
373 ON64(FOP1EEX(op, rcx)) \
374 FOP_END
375
376#define FOP2E(op, dst, src) \
377 __FOP_FUNC(#op "_" #dst "_" #src) \
378 #op " %" #src ", %" #dst " \n\t" \
379 __FOP_RET(#op "_" #dst "_" #src)
380
381#define FASTOP2(op) \
382 FOP_START(op) \
383 FOP2E(op##b, al, dl) \
384 FOP2E(op##w, ax, dx) \
385 FOP2E(op##l, eax, edx) \
386 ON64(FOP2E(op##q, rax, rdx)) \
387 FOP_END
388
389
390#define FASTOP2W(op) \
391 FOP_START(op) \
392 FOPNOP() \
393 FOP2E(op##w, ax, dx) \
394 FOP2E(op##l, eax, edx) \
395 ON64(FOP2E(op##q, rax, rdx)) \
396 FOP_END
397
398
399#define FASTOP2CL(op) \
400 FOP_START(op) \
401 FOP2E(op##b, al, cl) \
402 FOP2E(op##w, ax, cl) \
403 FOP2E(op##l, eax, cl) \
404 ON64(FOP2E(op##q, rax, cl)) \
405 FOP_END
406
407
408#define FASTOP2R(op, name) \
409 FOP_START(name) \
410 FOP2E(op##b, dl, al) \
411 FOP2E(op##w, dx, ax) \
412 FOP2E(op##l, edx, eax) \
413 ON64(FOP2E(op##q, rdx, rax)) \
414 FOP_END
415
416#define FOP3E(op, dst, src, src2) \
417 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
418 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
419 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
420
421
422#define FASTOP3WCL(op) \
423 FOP_START(op) \
424 FOPNOP() \
425 FOP3E(op##w, ax, dx, cl) \
426 FOP3E(op##l, eax, edx, cl) \
427 ON64(FOP3E(op##q, rax, rdx, cl)) \
428 FOP_END
429
430
431#define FOP_SETCC(op) \
432 ".align 4 \n\t" \
433 ".type " #op ", @function \n\t" \
434 #op ": \n\t" \
435 #op " %al \n\t" \
436 __FOP_RET(#op)
437
438asm(".pushsection .fixup, \"ax\"\n"
439 ".global kvm_fastop_exception \n"
440 "kvm_fastop_exception: xor %esi, %esi; ret\n"
441 ".popsection");
442
443FOP_START(setcc)
444FOP_SETCC(seto)
445FOP_SETCC(setno)
446FOP_SETCC(setc)
447FOP_SETCC(setnc)
448FOP_SETCC(setz)
449FOP_SETCC(setnz)
450FOP_SETCC(setbe)
451FOP_SETCC(setnbe)
452FOP_SETCC(sets)
453FOP_SETCC(setns)
454FOP_SETCC(setp)
455FOP_SETCC(setnp)
456FOP_SETCC(setl)
457FOP_SETCC(setnl)
458FOP_SETCC(setle)
459FOP_SETCC(setnle)
460FOP_END;
461
462FOP_START(salc)
463FOP_FUNC(salc)
464"pushf; sbb %al, %al; popf \n\t"
465FOP_RET(salc)
466FOP_END;
467
468
469
470
471
472#define asm_safe(insn, inoutclob...) \
473({ \
474 int _fault = 0; \
475 \
476 asm volatile("1:" insn "\n" \
477 "2:\n" \
478 ".pushsection .fixup, \"ax\"\n" \
479 "3: movl $1, %[_fault]\n" \
480 " jmp 2b\n" \
481 ".popsection\n" \
482 _ASM_EXTABLE(1b, 3b) \
483 : [_fault] "+qm"(_fault) inoutclob ); \
484 \
485 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
486})
487
488static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
489 enum x86_intercept intercept,
490 enum x86_intercept_stage stage)
491{
492 struct x86_instruction_info info = {
493 .intercept = intercept,
494 .rep_prefix = ctxt->rep_prefix,
495 .modrm_mod = ctxt->modrm_mod,
496 .modrm_reg = ctxt->modrm_reg,
497 .modrm_rm = ctxt->modrm_rm,
498 .src_val = ctxt->src.val64,
499 .dst_val = ctxt->dst.val64,
500 .src_bytes = ctxt->src.bytes,
501 .dst_bytes = ctxt->dst.bytes,
502 .ad_bytes = ctxt->ad_bytes,
503 .next_rip = ctxt->eip,
504 };
505
506 return ctxt->ops->intercept(ctxt, &info, stage);
507}
508
509static void assign_masked(ulong *dest, ulong src, ulong mask)
510{
511 *dest = (*dest & ~mask) | (src & mask);
512}
513
514static void assign_register(unsigned long *reg, u64 val, int bytes)
515{
516
517 switch (bytes) {
518 case 1:
519 *(u8 *)reg = (u8)val;
520 break;
521 case 2:
522 *(u16 *)reg = (u16)val;
523 break;
524 case 4:
525 *reg = (u32)val;
526 break;
527 case 8:
528 *reg = val;
529 break;
530 }
531}
532
533static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
534{
535 return (1UL << (ctxt->ad_bytes << 3)) - 1;
536}
537
538static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
539{
540 u16 sel;
541 struct desc_struct ss;
542
543 if (ctxt->mode == X86EMUL_MODE_PROT64)
544 return ~0UL;
545 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
546 return ~0U >> ((ss.d ^ 1) * 16);
547}
548
549static int stack_size(struct x86_emulate_ctxt *ctxt)
550{
551 return (__fls(stack_mask(ctxt)) + 1) >> 3;
552}
553
554
555static inline unsigned long
556address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
557{
558 if (ctxt->ad_bytes == sizeof(unsigned long))
559 return reg;
560 else
561 return reg & ad_mask(ctxt);
562}
563
564static inline unsigned long
565register_address(struct x86_emulate_ctxt *ctxt, int reg)
566{
567 return address_mask(ctxt, reg_read(ctxt, reg));
568}
569
570static void masked_increment(ulong *reg, ulong mask, int inc)
571{
572 assign_masked(reg, *reg + inc, mask);
573}
574
575static inline void
576register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
577{
578 ulong *preg = reg_rmw(ctxt, reg);
579
580 assign_register(preg, *preg + inc, ctxt->ad_bytes);
581}
582
583static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
584{
585 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
586}
587
588static u32 desc_limit_scaled(struct desc_struct *desc)
589{
590 u32 limit = get_desc_limit(desc);
591
592 return desc->g ? (limit << 12) | 0xfff : limit;
593}
594
595static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
596{
597 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
598 return 0;
599
600 return ctxt->ops->get_cached_segment_base(ctxt, seg);
601}
602
603static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
604 u32 error, bool valid)
605{
606 WARN_ON(vec > 0x1f);
607 ctxt->exception.vector = vec;
608 ctxt->exception.error_code = error;
609 ctxt->exception.error_code_valid = valid;
610 return X86EMUL_PROPAGATE_FAULT;
611}
612
613static int emulate_db(struct x86_emulate_ctxt *ctxt)
614{
615 return emulate_exception(ctxt, DB_VECTOR, 0, false);
616}
617
618static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
619{
620 return emulate_exception(ctxt, GP_VECTOR, err, true);
621}
622
623static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
624{
625 return emulate_exception(ctxt, SS_VECTOR, err, true);
626}
627
628static int emulate_ud(struct x86_emulate_ctxt *ctxt)
629{
630 return emulate_exception(ctxt, UD_VECTOR, 0, false);
631}
632
633static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
634{
635 return emulate_exception(ctxt, TS_VECTOR, err, true);
636}
637
638static int emulate_de(struct x86_emulate_ctxt *ctxt)
639{
640 return emulate_exception(ctxt, DE_VECTOR, 0, false);
641}
642
643static int emulate_nm(struct x86_emulate_ctxt *ctxt)
644{
645 return emulate_exception(ctxt, NM_VECTOR, 0, false);
646}
647
648static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
649{
650 u16 selector;
651 struct desc_struct desc;
652
653 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
654 return selector;
655}
656
657static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
658 unsigned seg)
659{
660 u16 dummy;
661 u32 base3;
662 struct desc_struct desc;
663
664 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
665 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
666}
667
668static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
669{
670 return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
671}
672
673static inline bool emul_is_noncanonical_address(u64 la,
674 struct x86_emulate_ctxt *ctxt)
675{
676 return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
677}
678
679
680
681
682
683
684
685
686
687
688static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
689{
690 u64 alignment = ctxt->d & AlignMask;
691
692 if (likely(size < 16))
693 return 1;
694
695 switch (alignment) {
696 case Unaligned:
697 case Avx:
698 return 1;
699 case Aligned16:
700 return 16;
701 case Aligned:
702 default:
703 return size;
704 }
705}
706
707static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
708 struct segmented_address addr,
709 unsigned *max_size, unsigned size,
710 bool write, bool fetch,
711 enum x86emul_mode mode, ulong *linear)
712{
713 struct desc_struct desc;
714 bool usable;
715 ulong la;
716 u32 lim;
717 u16 sel;
718 u8 va_bits;
719
720 la = seg_base(ctxt, addr.seg) + addr.ea;
721 *max_size = 0;
722 switch (mode) {
723 case X86EMUL_MODE_PROT64:
724 *linear = la;
725 va_bits = ctxt_virt_addr_bits(ctxt);
726 if (get_canonical(la, va_bits) != la)
727 goto bad;
728
729 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
730 if (size > *max_size)
731 goto bad;
732 break;
733 default:
734 *linear = la = (u32)la;
735 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
736 addr.seg);
737 if (!usable)
738 goto bad;
739
740 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
741 || !(desc.type & 2)) && write)
742 goto bad;
743
744 if (!fetch && (desc.type & 8) && !(desc.type & 2))
745 goto bad;
746 lim = desc_limit_scaled(&desc);
747 if (!(desc.type & 8) && (desc.type & 4)) {
748
749 if (addr.ea <= lim)
750 goto bad;
751 lim = desc.d ? 0xffffffff : 0xffff;
752 }
753 if (addr.ea > lim)
754 goto bad;
755 if (lim == 0xffffffff)
756 *max_size = ~0u;
757 else {
758 *max_size = (u64)lim + 1 - addr.ea;
759 if (size > *max_size)
760 goto bad;
761 }
762 break;
763 }
764 if (la & (insn_alignment(ctxt, size) - 1))
765 return emulate_gp(ctxt, 0);
766 return X86EMUL_CONTINUE;
767bad:
768 if (addr.seg == VCPU_SREG_SS)
769 return emulate_ss(ctxt, 0);
770 else
771 return emulate_gp(ctxt, 0);
772}
773
774static int linearize(struct x86_emulate_ctxt *ctxt,
775 struct segmented_address addr,
776 unsigned size, bool write,
777 ulong *linear)
778{
779 unsigned max_size;
780 return __linearize(ctxt, addr, &max_size, size, write, false,
781 ctxt->mode, linear);
782}
783
784static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
785 enum x86emul_mode mode)
786{
787 ulong linear;
788 int rc;
789 unsigned max_size;
790 struct segmented_address addr = { .seg = VCPU_SREG_CS,
791 .ea = dst };
792
793 if (ctxt->op_bytes != sizeof(unsigned long))
794 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
795 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
796 if (rc == X86EMUL_CONTINUE)
797 ctxt->_eip = addr.ea;
798 return rc;
799}
800
801static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
802{
803 return assign_eip(ctxt, dst, ctxt->mode);
804}
805
806static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
807 const struct desc_struct *cs_desc)
808{
809 enum x86emul_mode mode = ctxt->mode;
810 int rc;
811
812#ifdef CONFIG_X86_64
813 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
814 if (cs_desc->l) {
815 u64 efer = 0;
816
817 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
818 if (efer & EFER_LMA)
819 mode = X86EMUL_MODE_PROT64;
820 } else
821 mode = X86EMUL_MODE_PROT32;
822 }
823#endif
824 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
825 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
826 rc = assign_eip(ctxt, dst, mode);
827 if (rc == X86EMUL_CONTINUE)
828 ctxt->mode = mode;
829 return rc;
830}
831
832static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
833{
834 return assign_eip_near(ctxt, ctxt->_eip + rel);
835}
836
837static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
838 void *data, unsigned size)
839{
840 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
841}
842
843static int linear_write_system(struct x86_emulate_ctxt *ctxt,
844 ulong linear, void *data,
845 unsigned int size)
846{
847 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
848}
849
850static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
851 struct segmented_address addr,
852 void *data,
853 unsigned size)
854{
855 int rc;
856 ulong linear;
857
858 rc = linearize(ctxt, addr, size, false, &linear);
859 if (rc != X86EMUL_CONTINUE)
860 return rc;
861 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
862}
863
864static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
865 struct segmented_address addr,
866 void *data,
867 unsigned int size)
868{
869 int rc;
870 ulong linear;
871
872 rc = linearize(ctxt, addr, size, true, &linear);
873 if (rc != X86EMUL_CONTINUE)
874 return rc;
875 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
876}
877
878
879
880
881
882static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
883{
884 int rc;
885 unsigned size, max_size;
886 unsigned long linear;
887 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
888 struct segmented_address addr = { .seg = VCPU_SREG_CS,
889 .ea = ctxt->eip + cur_size };
890
891
892
893
894
895
896
897
898
899
900
901 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
902 &linear);
903 if (unlikely(rc != X86EMUL_CONTINUE))
904 return rc;
905
906 size = min_t(unsigned, 15UL ^ cur_size, max_size);
907 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
908
909
910
911
912
913
914
915 if (unlikely(size < op_size))
916 return emulate_gp(ctxt, 0);
917
918 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
919 size, &ctxt->exception);
920 if (unlikely(rc != X86EMUL_CONTINUE))
921 return rc;
922 ctxt->fetch.end += size;
923 return X86EMUL_CONTINUE;
924}
925
926static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
927 unsigned size)
928{
929 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
930
931 if (unlikely(done_size < size))
932 return __do_insn_fetch_bytes(ctxt, size - done_size);
933 else
934 return X86EMUL_CONTINUE;
935}
936
937
938#define insn_fetch(_type, _ctxt) \
939({ _type _x; \
940 \
941 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
942 if (rc != X86EMUL_CONTINUE) \
943 goto done; \
944 ctxt->_eip += sizeof(_type); \
945 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
946 ctxt->fetch.ptr += sizeof(_type); \
947 _x; \
948})
949
950#define insn_fetch_arr(_arr, _size, _ctxt) \
951({ \
952 rc = do_insn_fetch_bytes(_ctxt, _size); \
953 if (rc != X86EMUL_CONTINUE) \
954 goto done; \
955 ctxt->_eip += (_size); \
956 memcpy(_arr, ctxt->fetch.ptr, _size); \
957 ctxt->fetch.ptr += (_size); \
958})
959
960
961
962
963
964
965static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
966 int byteop)
967{
968 void *p;
969 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
970
971 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
972 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
973 else
974 p = reg_rmw(ctxt, modrm_reg);
975 return p;
976}
977
978static int read_descriptor(struct x86_emulate_ctxt *ctxt,
979 struct segmented_address addr,
980 u16 *size, unsigned long *address, int op_bytes)
981{
982 int rc;
983
984 if (op_bytes == 2)
985 op_bytes = 3;
986 *address = 0;
987 rc = segmented_read_std(ctxt, addr, size, 2);
988 if (rc != X86EMUL_CONTINUE)
989 return rc;
990 addr.ea += 2;
991 rc = segmented_read_std(ctxt, addr, address, op_bytes);
992 return rc;
993}
994
995FASTOP2(add);
996FASTOP2(or);
997FASTOP2(adc);
998FASTOP2(sbb);
999FASTOP2(and);
1000FASTOP2(sub);
1001FASTOP2(xor);
1002FASTOP2(cmp);
1003FASTOP2(test);
1004
1005FASTOP1SRC2(mul, mul_ex);
1006FASTOP1SRC2(imul, imul_ex);
1007FASTOP1SRC2EX(div, div_ex);
1008FASTOP1SRC2EX(idiv, idiv_ex);
1009
1010FASTOP3WCL(shld);
1011FASTOP3WCL(shrd);
1012
1013FASTOP2W(imul);
1014
1015FASTOP1(not);
1016FASTOP1(neg);
1017FASTOP1(inc);
1018FASTOP1(dec);
1019
1020FASTOP2CL(rol);
1021FASTOP2CL(ror);
1022FASTOP2CL(rcl);
1023FASTOP2CL(rcr);
1024FASTOP2CL(shl);
1025FASTOP2CL(shr);
1026FASTOP2CL(sar);
1027
1028FASTOP2W(bsf);
1029FASTOP2W(bsr);
1030FASTOP2W(bt);
1031FASTOP2W(bts);
1032FASTOP2W(btr);
1033FASTOP2W(btc);
1034
1035FASTOP2(xadd);
1036
1037FASTOP2R(cmp, cmp_r);
1038
1039static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1040{
1041
1042 if (ctxt->src.val == 0)
1043 ctxt->dst.type = OP_NONE;
1044 return fastop(ctxt, em_bsf);
1045}
1046
1047static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1048{
1049
1050 if (ctxt->src.val == 0)
1051 ctxt->dst.type = OP_NONE;
1052 return fastop(ctxt, em_bsr);
1053}
1054
1055static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1056{
1057 u8 rc;
1058 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1059
1060 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1061 asm("push %[flags]; popf; " CALL_NOSPEC
1062 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1063 return rc;
1064}
1065
1066static void fetch_register_operand(struct operand *op)
1067{
1068 switch (op->bytes) {
1069 case 1:
1070 op->val = *(u8 *)op->addr.reg;
1071 break;
1072 case 2:
1073 op->val = *(u16 *)op->addr.reg;
1074 break;
1075 case 4:
1076 op->val = *(u32 *)op->addr.reg;
1077 break;
1078 case 8:
1079 op->val = *(u64 *)op->addr.reg;
1080 break;
1081 }
1082}
1083
1084static void emulator_get_fpu(void)
1085{
1086 fpregs_lock();
1087
1088 fpregs_assert_state_consistent();
1089 if (test_thread_flag(TIF_NEED_FPU_LOAD))
1090 switch_fpu_return();
1091}
1092
1093static void emulator_put_fpu(void)
1094{
1095 fpregs_unlock();
1096}
1097
1098static void read_sse_reg(sse128_t *data, int reg)
1099{
1100 emulator_get_fpu();
1101 switch (reg) {
1102 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1103 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1104 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1105 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1106 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1107 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1108 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1109 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1110#ifdef CONFIG_X86_64
1111 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1112 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1113 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1114 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1115 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1116 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1117 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1118 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1119#endif
1120 default: BUG();
1121 }
1122 emulator_put_fpu();
1123}
1124
1125static void write_sse_reg(sse128_t *data, int reg)
1126{
1127 emulator_get_fpu();
1128 switch (reg) {
1129 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1130 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1131 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1132 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1133 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1134 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1135 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1136 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1137#ifdef CONFIG_X86_64
1138 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1139 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1140 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1141 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1142 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1143 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1144 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1145 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1146#endif
1147 default: BUG();
1148 }
1149 emulator_put_fpu();
1150}
1151
1152static void read_mmx_reg(u64 *data, int reg)
1153{
1154 emulator_get_fpu();
1155 switch (reg) {
1156 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1157 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1158 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1159 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1160 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1161 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1162 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1163 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1164 default: BUG();
1165 }
1166 emulator_put_fpu();
1167}
1168
1169static void write_mmx_reg(u64 *data, int reg)
1170{
1171 emulator_get_fpu();
1172 switch (reg) {
1173 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1174 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1175 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1176 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1177 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1178 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1179 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1180 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1181 default: BUG();
1182 }
1183 emulator_put_fpu();
1184}
1185
1186static int em_fninit(struct x86_emulate_ctxt *ctxt)
1187{
1188 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1189 return emulate_nm(ctxt);
1190
1191 emulator_get_fpu();
1192 asm volatile("fninit");
1193 emulator_put_fpu();
1194 return X86EMUL_CONTINUE;
1195}
1196
1197static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1198{
1199 u16 fcw;
1200
1201 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1202 return emulate_nm(ctxt);
1203
1204 emulator_get_fpu();
1205 asm volatile("fnstcw %0": "+m"(fcw));
1206 emulator_put_fpu();
1207
1208 ctxt->dst.val = fcw;
1209
1210 return X86EMUL_CONTINUE;
1211}
1212
1213static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1214{
1215 u16 fsw;
1216
1217 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1218 return emulate_nm(ctxt);
1219
1220 emulator_get_fpu();
1221 asm volatile("fnstsw %0": "+m"(fsw));
1222 emulator_put_fpu();
1223
1224 ctxt->dst.val = fsw;
1225
1226 return X86EMUL_CONTINUE;
1227}
1228
1229static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1230 struct operand *op)
1231{
1232 unsigned reg = ctxt->modrm_reg;
1233
1234 if (!(ctxt->d & ModRM))
1235 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1236
1237 if (ctxt->d & Sse) {
1238 op->type = OP_XMM;
1239 op->bytes = 16;
1240 op->addr.xmm = reg;
1241 read_sse_reg(&op->vec_val, reg);
1242 return;
1243 }
1244 if (ctxt->d & Mmx) {
1245 reg &= 7;
1246 op->type = OP_MM;
1247 op->bytes = 8;
1248 op->addr.mm = reg;
1249 return;
1250 }
1251
1252 op->type = OP_REG;
1253 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1254 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1255
1256 fetch_register_operand(op);
1257 op->orig_val = op->val;
1258}
1259
1260static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1261{
1262 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1263 ctxt->modrm_seg = VCPU_SREG_SS;
1264}
1265
1266static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1267 struct operand *op)
1268{
1269 u8 sib;
1270 int index_reg, base_reg, scale;
1271 int rc = X86EMUL_CONTINUE;
1272 ulong modrm_ea = 0;
1273
1274 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8);
1275 index_reg = (ctxt->rex_prefix << 2) & 8;
1276 base_reg = (ctxt->rex_prefix << 3) & 8;
1277
1278 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1279 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1280 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1281 ctxt->modrm_seg = VCPU_SREG_DS;
1282
1283 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1284 op->type = OP_REG;
1285 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1286 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1287 ctxt->d & ByteOp);
1288 if (ctxt->d & Sse) {
1289 op->type = OP_XMM;
1290 op->bytes = 16;
1291 op->addr.xmm = ctxt->modrm_rm;
1292 read_sse_reg(&op->vec_val, ctxt->modrm_rm);
1293 return rc;
1294 }
1295 if (ctxt->d & Mmx) {
1296 op->type = OP_MM;
1297 op->bytes = 8;
1298 op->addr.mm = ctxt->modrm_rm & 7;
1299 return rc;
1300 }
1301 fetch_register_operand(op);
1302 return rc;
1303 }
1304
1305 op->type = OP_MEM;
1306
1307 if (ctxt->ad_bytes == 2) {
1308 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1309 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1310 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1311 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1312
1313
1314 switch (ctxt->modrm_mod) {
1315 case 0:
1316 if (ctxt->modrm_rm == 6)
1317 modrm_ea += insn_fetch(u16, ctxt);
1318 break;
1319 case 1:
1320 modrm_ea += insn_fetch(s8, ctxt);
1321 break;
1322 case 2:
1323 modrm_ea += insn_fetch(u16, ctxt);
1324 break;
1325 }
1326 switch (ctxt->modrm_rm) {
1327 case 0:
1328 modrm_ea += bx + si;
1329 break;
1330 case 1:
1331 modrm_ea += bx + di;
1332 break;
1333 case 2:
1334 modrm_ea += bp + si;
1335 break;
1336 case 3:
1337 modrm_ea += bp + di;
1338 break;
1339 case 4:
1340 modrm_ea += si;
1341 break;
1342 case 5:
1343 modrm_ea += di;
1344 break;
1345 case 6:
1346 if (ctxt->modrm_mod != 0)
1347 modrm_ea += bp;
1348 break;
1349 case 7:
1350 modrm_ea += bx;
1351 break;
1352 }
1353 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1354 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1355 ctxt->modrm_seg = VCPU_SREG_SS;
1356 modrm_ea = (u16)modrm_ea;
1357 } else {
1358
1359 if ((ctxt->modrm_rm & 7) == 4) {
1360 sib = insn_fetch(u8, ctxt);
1361 index_reg |= (sib >> 3) & 7;
1362 base_reg |= sib & 7;
1363 scale = sib >> 6;
1364
1365 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1366 modrm_ea += insn_fetch(s32, ctxt);
1367 else {
1368 modrm_ea += reg_read(ctxt, base_reg);
1369 adjust_modrm_seg(ctxt, base_reg);
1370
1371 if ((ctxt->d & IncSP) &&
1372 base_reg == VCPU_REGS_RSP)
1373 modrm_ea += ctxt->op_bytes;
1374 }
1375 if (index_reg != 4)
1376 modrm_ea += reg_read(ctxt, index_reg) << scale;
1377 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1378 modrm_ea += insn_fetch(s32, ctxt);
1379 if (ctxt->mode == X86EMUL_MODE_PROT64)
1380 ctxt->rip_relative = 1;
1381 } else {
1382 base_reg = ctxt->modrm_rm;
1383 modrm_ea += reg_read(ctxt, base_reg);
1384 adjust_modrm_seg(ctxt, base_reg);
1385 }
1386 switch (ctxt->modrm_mod) {
1387 case 1:
1388 modrm_ea += insn_fetch(s8, ctxt);
1389 break;
1390 case 2:
1391 modrm_ea += insn_fetch(s32, ctxt);
1392 break;
1393 }
1394 }
1395 op->addr.mem.ea = modrm_ea;
1396 if (ctxt->ad_bytes != 8)
1397 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1398
1399done:
1400 return rc;
1401}
1402
1403static int decode_abs(struct x86_emulate_ctxt *ctxt,
1404 struct operand *op)
1405{
1406 int rc = X86EMUL_CONTINUE;
1407
1408 op->type = OP_MEM;
1409 switch (ctxt->ad_bytes) {
1410 case 2:
1411 op->addr.mem.ea = insn_fetch(u16, ctxt);
1412 break;
1413 case 4:
1414 op->addr.mem.ea = insn_fetch(u32, ctxt);
1415 break;
1416 case 8:
1417 op->addr.mem.ea = insn_fetch(u64, ctxt);
1418 break;
1419 }
1420done:
1421 return rc;
1422}
1423
1424static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1425{
1426 long sv = 0, mask;
1427
1428 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1429 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1430
1431 if (ctxt->src.bytes == 2)
1432 sv = (s16)ctxt->src.val & (s16)mask;
1433 else if (ctxt->src.bytes == 4)
1434 sv = (s32)ctxt->src.val & (s32)mask;
1435 else
1436 sv = (s64)ctxt->src.val & (s64)mask;
1437
1438 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1439 ctxt->dst.addr.mem.ea + (sv >> 3));
1440 }
1441
1442
1443 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1444}
1445
1446static int read_emulated(struct x86_emulate_ctxt *ctxt,
1447 unsigned long addr, void *dest, unsigned size)
1448{
1449 int rc;
1450 struct read_cache *mc = &ctxt->mem_read;
1451
1452 if (mc->pos < mc->end)
1453 goto read_cached;
1454
1455 WARN_ON((mc->end + size) >= sizeof(mc->data));
1456
1457 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1458 &ctxt->exception);
1459 if (rc != X86EMUL_CONTINUE)
1460 return rc;
1461
1462 mc->end += size;
1463
1464read_cached:
1465 memcpy(dest, mc->data + mc->pos, size);
1466 mc->pos += size;
1467 return X86EMUL_CONTINUE;
1468}
1469
1470static int segmented_read(struct x86_emulate_ctxt *ctxt,
1471 struct segmented_address addr,
1472 void *data,
1473 unsigned size)
1474{
1475 int rc;
1476 ulong linear;
1477
1478 rc = linearize(ctxt, addr, size, false, &linear);
1479 if (rc != X86EMUL_CONTINUE)
1480 return rc;
1481 return read_emulated(ctxt, linear, data, size);
1482}
1483
1484static int segmented_write(struct x86_emulate_ctxt *ctxt,
1485 struct segmented_address addr,
1486 const void *data,
1487 unsigned size)
1488{
1489 int rc;
1490 ulong linear;
1491
1492 rc = linearize(ctxt, addr, size, true, &linear);
1493 if (rc != X86EMUL_CONTINUE)
1494 return rc;
1495 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1496 &ctxt->exception);
1497}
1498
1499static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1500 struct segmented_address addr,
1501 const void *orig_data, const void *data,
1502 unsigned size)
1503{
1504 int rc;
1505 ulong linear;
1506
1507 rc = linearize(ctxt, addr, size, true, &linear);
1508 if (rc != X86EMUL_CONTINUE)
1509 return rc;
1510 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1511 size, &ctxt->exception);
1512}
1513
1514static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1515 unsigned int size, unsigned short port,
1516 void *dest)
1517{
1518 struct read_cache *rc = &ctxt->io_read;
1519
1520 if (rc->pos == rc->end) {
1521 unsigned int in_page, n;
1522 unsigned int count = ctxt->rep_prefix ?
1523 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1524 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1525 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1526 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1527 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1528 if (n == 0)
1529 n = 1;
1530 rc->pos = rc->end = 0;
1531 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1532 return 0;
1533 rc->end = n * size;
1534 }
1535
1536 if (ctxt->rep_prefix && (ctxt->d & String) &&
1537 !(ctxt->eflags & X86_EFLAGS_DF)) {
1538 ctxt->dst.data = rc->data + rc->pos;
1539 ctxt->dst.type = OP_MEM_STR;
1540 ctxt->dst.count = (rc->end - rc->pos) / size;
1541 rc->pos = rc->end;
1542 } else {
1543 memcpy(dest, rc->data + rc->pos, size);
1544 rc->pos += size;
1545 }
1546 return 1;
1547}
1548
1549static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1550 u16 index, struct desc_struct *desc)
1551{
1552 struct desc_ptr dt;
1553 ulong addr;
1554
1555 ctxt->ops->get_idt(ctxt, &dt);
1556
1557 if (dt.size < index * 8 + 7)
1558 return emulate_gp(ctxt, index << 3 | 0x2);
1559
1560 addr = dt.address + index * 8;
1561 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1562}
1563
1564static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1565 u16 selector, struct desc_ptr *dt)
1566{
1567 const struct x86_emulate_ops *ops = ctxt->ops;
1568 u32 base3 = 0;
1569
1570 if (selector & 1 << 2) {
1571 struct desc_struct desc;
1572 u16 sel;
1573
1574 memset(dt, 0, sizeof(*dt));
1575 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1576 VCPU_SREG_LDTR))
1577 return;
1578
1579 dt->size = desc_limit_scaled(&desc);
1580 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1581 } else
1582 ops->get_gdt(ctxt, dt);
1583}
1584
1585static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1586 u16 selector, ulong *desc_addr_p)
1587{
1588 struct desc_ptr dt;
1589 u16 index = selector >> 3;
1590 ulong addr;
1591
1592 get_descriptor_table_ptr(ctxt, selector, &dt);
1593
1594 if (dt.size < index * 8 + 7)
1595 return emulate_gp(ctxt, selector & 0xfffc);
1596
1597 addr = dt.address + index * 8;
1598
1599#ifdef CONFIG_X86_64
1600 if (addr >> 32 != 0) {
1601 u64 efer = 0;
1602
1603 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1604 if (!(efer & EFER_LMA))
1605 addr &= (u32)-1;
1606 }
1607#endif
1608
1609 *desc_addr_p = addr;
1610 return X86EMUL_CONTINUE;
1611}
1612
1613
1614static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1615 u16 selector, struct desc_struct *desc,
1616 ulong *desc_addr_p)
1617{
1618 int rc;
1619
1620 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1621 if (rc != X86EMUL_CONTINUE)
1622 return rc;
1623
1624 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1625}
1626
1627
1628static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1629 u16 selector, struct desc_struct *desc)
1630{
1631 int rc;
1632 ulong addr;
1633
1634 rc = get_descriptor_ptr(ctxt, selector, &addr);
1635 if (rc != X86EMUL_CONTINUE)
1636 return rc;
1637
1638 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1639}
1640
1641static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1642 u16 selector, int seg, u8 cpl,
1643 enum x86_transfer_type transfer,
1644 struct desc_struct *desc)
1645{
1646 struct desc_struct seg_desc, old_desc;
1647 u8 dpl, rpl;
1648 unsigned err_vec = GP_VECTOR;
1649 u32 err_code = 0;
1650 bool null_selector = !(selector & ~0x3);
1651 ulong desc_addr;
1652 int ret;
1653 u16 dummy;
1654 u32 base3 = 0;
1655
1656 memset(&seg_desc, 0, sizeof(seg_desc));
1657
1658 if (ctxt->mode == X86EMUL_MODE_REAL) {
1659
1660
1661 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1662 set_desc_base(&seg_desc, selector << 4);
1663 goto load;
1664 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1665
1666 set_desc_base(&seg_desc, selector << 4);
1667 set_desc_limit(&seg_desc, 0xffff);
1668 seg_desc.type = 3;
1669 seg_desc.p = 1;
1670 seg_desc.s = 1;
1671 seg_desc.dpl = 3;
1672 goto load;
1673 }
1674
1675 rpl = selector & 3;
1676
1677
1678 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1679 goto exception;
1680
1681
1682 if (null_selector) {
1683 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1684 goto exception;
1685
1686 if (seg == VCPU_SREG_SS) {
1687 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1688 goto exception;
1689
1690
1691
1692
1693
1694 seg_desc.type = 3;
1695 seg_desc.p = 1;
1696 seg_desc.s = 1;
1697 seg_desc.dpl = cpl;
1698 seg_desc.d = 1;
1699 seg_desc.g = 1;
1700 }
1701
1702
1703 goto load;
1704 }
1705
1706 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1707 if (ret != X86EMUL_CONTINUE)
1708 return ret;
1709
1710 err_code = selector & 0xfffc;
1711 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1712 GP_VECTOR;
1713
1714
1715 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1716 if (transfer == X86_TRANSFER_CALL_JMP)
1717 return X86EMUL_UNHANDLEABLE;
1718 goto exception;
1719 }
1720
1721 if (!seg_desc.p) {
1722 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1723 goto exception;
1724 }
1725
1726 dpl = seg_desc.dpl;
1727
1728 switch (seg) {
1729 case VCPU_SREG_SS:
1730
1731
1732
1733
1734 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1735 goto exception;
1736 break;
1737 case VCPU_SREG_CS:
1738 if (!(seg_desc.type & 8))
1739 goto exception;
1740
1741 if (seg_desc.type & 4) {
1742
1743 if (dpl > cpl)
1744 goto exception;
1745 } else {
1746
1747 if (rpl > cpl || dpl != cpl)
1748 goto exception;
1749 }
1750
1751 if (seg_desc.d && seg_desc.l) {
1752 u64 efer = 0;
1753
1754 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1755 if (efer & EFER_LMA)
1756 goto exception;
1757 }
1758
1759
1760 selector = (selector & 0xfffc) | cpl;
1761 break;
1762 case VCPU_SREG_TR:
1763 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1764 goto exception;
1765 old_desc = seg_desc;
1766 seg_desc.type |= 2;
1767 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1768 sizeof(seg_desc), &ctxt->exception);
1769 if (ret != X86EMUL_CONTINUE)
1770 return ret;
1771 break;
1772 case VCPU_SREG_LDTR:
1773 if (seg_desc.s || seg_desc.type != 2)
1774 goto exception;
1775 break;
1776 default:
1777
1778
1779
1780
1781
1782 if ((seg_desc.type & 0xa) == 0x8 ||
1783 (((seg_desc.type & 0xc) != 0xc) &&
1784 (rpl > dpl && cpl > dpl)))
1785 goto exception;
1786 break;
1787 }
1788
1789 if (seg_desc.s) {
1790
1791 if (!(seg_desc.type & 1)) {
1792 seg_desc.type |= 1;
1793 ret = write_segment_descriptor(ctxt, selector,
1794 &seg_desc);
1795 if (ret != X86EMUL_CONTINUE)
1796 return ret;
1797 }
1798 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1799 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1800 if (ret != X86EMUL_CONTINUE)
1801 return ret;
1802 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1803 ((u64)base3 << 32), ctxt))
1804 return emulate_gp(ctxt, 0);
1805 }
1806load:
1807 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1808 if (desc)
1809 *desc = seg_desc;
1810 return X86EMUL_CONTINUE;
1811exception:
1812 return emulate_exception(ctxt, err_vec, err_code, true);
1813}
1814
1815static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1816 u16 selector, int seg)
1817{
1818 u8 cpl = ctxt->ops->cpl(ctxt);
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830 if (seg == VCPU_SREG_SS && selector == 3 &&
1831 ctxt->mode == X86EMUL_MODE_PROT64)
1832 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1833
1834 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1835 X86_TRANSFER_NONE, NULL);
1836}
1837
1838static void write_register_operand(struct operand *op)
1839{
1840 return assign_register(op->addr.reg, op->val, op->bytes);
1841}
1842
1843static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1844{
1845 switch (op->type) {
1846 case OP_REG:
1847 write_register_operand(op);
1848 break;
1849 case OP_MEM:
1850 if (ctxt->lock_prefix)
1851 return segmented_cmpxchg(ctxt,
1852 op->addr.mem,
1853 &op->orig_val,
1854 &op->val,
1855 op->bytes);
1856 else
1857 return segmented_write(ctxt,
1858 op->addr.mem,
1859 &op->val,
1860 op->bytes);
1861 break;
1862 case OP_MEM_STR:
1863 return segmented_write(ctxt,
1864 op->addr.mem,
1865 op->data,
1866 op->bytes * op->count);
1867 break;
1868 case OP_XMM:
1869 write_sse_reg(&op->vec_val, op->addr.xmm);
1870 break;
1871 case OP_MM:
1872 write_mmx_reg(&op->mm_val, op->addr.mm);
1873 break;
1874 case OP_NONE:
1875
1876 break;
1877 default:
1878 break;
1879 }
1880 return X86EMUL_CONTINUE;
1881}
1882
1883static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1884{
1885 struct segmented_address addr;
1886
1887 rsp_increment(ctxt, -bytes);
1888 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1889 addr.seg = VCPU_SREG_SS;
1890
1891 return segmented_write(ctxt, addr, data, bytes);
1892}
1893
1894static int em_push(struct x86_emulate_ctxt *ctxt)
1895{
1896
1897 ctxt->dst.type = OP_NONE;
1898 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1899}
1900
1901static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1902 void *dest, int len)
1903{
1904 int rc;
1905 struct segmented_address addr;
1906
1907 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1908 addr.seg = VCPU_SREG_SS;
1909 rc = segmented_read(ctxt, addr, dest, len);
1910 if (rc != X86EMUL_CONTINUE)
1911 return rc;
1912
1913 rsp_increment(ctxt, len);
1914 return rc;
1915}
1916
1917static int em_pop(struct x86_emulate_ctxt *ctxt)
1918{
1919 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1920}
1921
1922static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1923 void *dest, int len)
1924{
1925 int rc;
1926 unsigned long val, change_mask;
1927 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1928 int cpl = ctxt->ops->cpl(ctxt);
1929
1930 rc = emulate_pop(ctxt, &val, len);
1931 if (rc != X86EMUL_CONTINUE)
1932 return rc;
1933
1934 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1935 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1936 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1937 X86_EFLAGS_AC | X86_EFLAGS_ID;
1938
1939 switch(ctxt->mode) {
1940 case X86EMUL_MODE_PROT64:
1941 case X86EMUL_MODE_PROT32:
1942 case X86EMUL_MODE_PROT16:
1943 if (cpl == 0)
1944 change_mask |= X86_EFLAGS_IOPL;
1945 if (cpl <= iopl)
1946 change_mask |= X86_EFLAGS_IF;
1947 break;
1948 case X86EMUL_MODE_VM86:
1949 if (iopl < 3)
1950 return emulate_gp(ctxt, 0);
1951 change_mask |= X86_EFLAGS_IF;
1952 break;
1953 default:
1954 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1955 break;
1956 }
1957
1958 *(unsigned long *)dest =
1959 (ctxt->eflags & ~change_mask) | (val & change_mask);
1960
1961 return rc;
1962}
1963
1964static int em_popf(struct x86_emulate_ctxt *ctxt)
1965{
1966 ctxt->dst.type = OP_REG;
1967 ctxt->dst.addr.reg = &ctxt->eflags;
1968 ctxt->dst.bytes = ctxt->op_bytes;
1969 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1970}
1971
1972static int em_enter(struct x86_emulate_ctxt *ctxt)
1973{
1974 int rc;
1975 unsigned frame_size = ctxt->src.val;
1976 unsigned nesting_level = ctxt->src2.val & 31;
1977 ulong rbp;
1978
1979 if (nesting_level)
1980 return X86EMUL_UNHANDLEABLE;
1981
1982 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1983 rc = push(ctxt, &rbp, stack_size(ctxt));
1984 if (rc != X86EMUL_CONTINUE)
1985 return rc;
1986 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1987 stack_mask(ctxt));
1988 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1989 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1990 stack_mask(ctxt));
1991 return X86EMUL_CONTINUE;
1992}
1993
1994static int em_leave(struct x86_emulate_ctxt *ctxt)
1995{
1996 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1997 stack_mask(ctxt));
1998 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1999}
2000
2001static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
2002{
2003 int seg = ctxt->src2.val;
2004
2005 ctxt->src.val = get_segment_selector(ctxt, seg);
2006 if (ctxt->op_bytes == 4) {
2007 rsp_increment(ctxt, -2);
2008 ctxt->op_bytes = 2;
2009 }
2010
2011 return em_push(ctxt);
2012}
2013
2014static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
2015{
2016 int seg = ctxt->src2.val;
2017 unsigned long selector;
2018 int rc;
2019
2020 rc = emulate_pop(ctxt, &selector, 2);
2021 if (rc != X86EMUL_CONTINUE)
2022 return rc;
2023
2024 if (ctxt->modrm_reg == VCPU_SREG_SS)
2025 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2026 if (ctxt->op_bytes > 2)
2027 rsp_increment(ctxt, ctxt->op_bytes - 2);
2028
2029 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
2030 return rc;
2031}
2032
2033static int em_pusha(struct x86_emulate_ctxt *ctxt)
2034{
2035 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
2036 int rc = X86EMUL_CONTINUE;
2037 int reg = VCPU_REGS_RAX;
2038
2039 while (reg <= VCPU_REGS_RDI) {
2040 (reg == VCPU_REGS_RSP) ?
2041 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
2042
2043 rc = em_push(ctxt);
2044 if (rc != X86EMUL_CONTINUE)
2045 return rc;
2046
2047 ++reg;
2048 }
2049
2050 return rc;
2051}
2052
2053static int em_pushf(struct x86_emulate_ctxt *ctxt)
2054{
2055 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2056 return em_push(ctxt);
2057}
2058
2059static int em_popa(struct x86_emulate_ctxt *ctxt)
2060{
2061 int rc = X86EMUL_CONTINUE;
2062 int reg = VCPU_REGS_RDI;
2063 u32 val;
2064
2065 while (reg >= VCPU_REGS_RAX) {
2066 if (reg == VCPU_REGS_RSP) {
2067 rsp_increment(ctxt, ctxt->op_bytes);
2068 --reg;
2069 }
2070
2071 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2072 if (rc != X86EMUL_CONTINUE)
2073 break;
2074 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2075 --reg;
2076 }
2077 return rc;
2078}
2079
2080static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2081{
2082 const struct x86_emulate_ops *ops = ctxt->ops;
2083 int rc;
2084 struct desc_ptr dt;
2085 gva_t cs_addr;
2086 gva_t eip_addr;
2087 u16 cs, eip;
2088
2089
2090 ctxt->src.val = ctxt->eflags;
2091 rc = em_push(ctxt);
2092 if (rc != X86EMUL_CONTINUE)
2093 return rc;
2094
2095 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2096
2097 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2098 rc = em_push(ctxt);
2099 if (rc != X86EMUL_CONTINUE)
2100 return rc;
2101
2102 ctxt->src.val = ctxt->_eip;
2103 rc = em_push(ctxt);
2104 if (rc != X86EMUL_CONTINUE)
2105 return rc;
2106
2107 ops->get_idt(ctxt, &dt);
2108
2109 eip_addr = dt.address + (irq << 2);
2110 cs_addr = dt.address + (irq << 2) + 2;
2111
2112 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2113 if (rc != X86EMUL_CONTINUE)
2114 return rc;
2115
2116 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2117 if (rc != X86EMUL_CONTINUE)
2118 return rc;
2119
2120 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2121 if (rc != X86EMUL_CONTINUE)
2122 return rc;
2123
2124 ctxt->_eip = eip;
2125
2126 return rc;
2127}
2128
2129int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2130{
2131 int rc;
2132
2133 invalidate_registers(ctxt);
2134 rc = __emulate_int_real(ctxt, irq);
2135 if (rc == X86EMUL_CONTINUE)
2136 writeback_registers(ctxt);
2137 return rc;
2138}
2139
2140static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2141{
2142 switch(ctxt->mode) {
2143 case X86EMUL_MODE_REAL:
2144 return __emulate_int_real(ctxt, irq);
2145 case X86EMUL_MODE_VM86:
2146 case X86EMUL_MODE_PROT16:
2147 case X86EMUL_MODE_PROT32:
2148 case X86EMUL_MODE_PROT64:
2149 default:
2150
2151 return X86EMUL_UNHANDLEABLE;
2152 }
2153}
2154
2155static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2156{
2157 int rc = X86EMUL_CONTINUE;
2158 unsigned long temp_eip = 0;
2159 unsigned long temp_eflags = 0;
2160 unsigned long cs = 0;
2161 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2162 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2163 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2164 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2165 X86_EFLAGS_AC | X86_EFLAGS_ID |
2166 X86_EFLAGS_FIXED;
2167 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2168 X86_EFLAGS_VIP;
2169
2170
2171
2172 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2173
2174 if (rc != X86EMUL_CONTINUE)
2175 return rc;
2176
2177 if (temp_eip & ~0xffff)
2178 return emulate_gp(ctxt, 0);
2179
2180 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2181
2182 if (rc != X86EMUL_CONTINUE)
2183 return rc;
2184
2185 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2186
2187 if (rc != X86EMUL_CONTINUE)
2188 return rc;
2189
2190 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2191
2192 if (rc != X86EMUL_CONTINUE)
2193 return rc;
2194
2195 ctxt->_eip = temp_eip;
2196
2197 if (ctxt->op_bytes == 4)
2198 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2199 else if (ctxt->op_bytes == 2) {
2200 ctxt->eflags &= ~0xffff;
2201 ctxt->eflags |= temp_eflags;
2202 }
2203
2204 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK;
2205 ctxt->eflags |= X86_EFLAGS_FIXED;
2206 ctxt->ops->set_nmi_mask(ctxt, false);
2207
2208 return rc;
2209}
2210
2211static int em_iret(struct x86_emulate_ctxt *ctxt)
2212{
2213 switch(ctxt->mode) {
2214 case X86EMUL_MODE_REAL:
2215 return emulate_iret_real(ctxt);
2216 case X86EMUL_MODE_VM86:
2217 case X86EMUL_MODE_PROT16:
2218 case X86EMUL_MODE_PROT32:
2219 case X86EMUL_MODE_PROT64:
2220 default:
2221
2222 return X86EMUL_UNHANDLEABLE;
2223 }
2224}
2225
2226static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2227{
2228 int rc;
2229 unsigned short sel;
2230 struct desc_struct new_desc;
2231 u8 cpl = ctxt->ops->cpl(ctxt);
2232
2233 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2234
2235 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2236 X86_TRANSFER_CALL_JMP,
2237 &new_desc);
2238 if (rc != X86EMUL_CONTINUE)
2239 return rc;
2240
2241 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2242
2243 if (rc != X86EMUL_CONTINUE)
2244 return X86EMUL_UNHANDLEABLE;
2245
2246 return rc;
2247}
2248
2249static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2250{
2251 return assign_eip_near(ctxt, ctxt->src.val);
2252}
2253
2254static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2255{
2256 int rc;
2257 long int old_eip;
2258
2259 old_eip = ctxt->_eip;
2260 rc = assign_eip_near(ctxt, ctxt->src.val);
2261 if (rc != X86EMUL_CONTINUE)
2262 return rc;
2263 ctxt->src.val = old_eip;
2264 rc = em_push(ctxt);
2265 return rc;
2266}
2267
2268static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2269{
2270 u64 old = ctxt->dst.orig_val64;
2271
2272 if (ctxt->dst.bytes == 16)
2273 return X86EMUL_UNHANDLEABLE;
2274
2275 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2276 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2277 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2278 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2279 ctxt->eflags &= ~X86_EFLAGS_ZF;
2280 } else {
2281 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2282 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2283
2284 ctxt->eflags |= X86_EFLAGS_ZF;
2285 }
2286 return X86EMUL_CONTINUE;
2287}
2288
2289static int em_ret(struct x86_emulate_ctxt *ctxt)
2290{
2291 int rc;
2292 unsigned long eip;
2293
2294 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2295 if (rc != X86EMUL_CONTINUE)
2296 return rc;
2297
2298 return assign_eip_near(ctxt, eip);
2299}
2300
2301static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2302{
2303 int rc;
2304 unsigned long eip, cs;
2305 int cpl = ctxt->ops->cpl(ctxt);
2306 struct desc_struct new_desc;
2307
2308 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2309 if (rc != X86EMUL_CONTINUE)
2310 return rc;
2311 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2312 if (rc != X86EMUL_CONTINUE)
2313 return rc;
2314
2315 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2316 return X86EMUL_UNHANDLEABLE;
2317 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2318 X86_TRANSFER_RET,
2319 &new_desc);
2320 if (rc != X86EMUL_CONTINUE)
2321 return rc;
2322 rc = assign_eip_far(ctxt, eip, &new_desc);
2323
2324 if (rc != X86EMUL_CONTINUE)
2325 return X86EMUL_UNHANDLEABLE;
2326
2327 return rc;
2328}
2329
2330static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2331{
2332 int rc;
2333
2334 rc = em_ret_far(ctxt);
2335 if (rc != X86EMUL_CONTINUE)
2336 return rc;
2337 rsp_increment(ctxt, ctxt->src.val);
2338 return X86EMUL_CONTINUE;
2339}
2340
2341static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2342{
2343
2344 ctxt->dst.orig_val = ctxt->dst.val;
2345 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2346 ctxt->src.orig_val = ctxt->src.val;
2347 ctxt->src.val = ctxt->dst.orig_val;
2348 fastop(ctxt, em_cmp);
2349
2350 if (ctxt->eflags & X86_EFLAGS_ZF) {
2351
2352 ctxt->src.type = OP_NONE;
2353 ctxt->dst.val = ctxt->src.orig_val;
2354 } else {
2355
2356 ctxt->src.type = OP_REG;
2357 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2358 ctxt->src.val = ctxt->dst.orig_val;
2359
2360 ctxt->dst.val = ctxt->dst.orig_val;
2361 }
2362 return X86EMUL_CONTINUE;
2363}
2364
2365static int em_lseg(struct x86_emulate_ctxt *ctxt)
2366{
2367 int seg = ctxt->src2.val;
2368 unsigned short sel;
2369 int rc;
2370
2371 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2372
2373 rc = load_segment_descriptor(ctxt, sel, seg);
2374 if (rc != X86EMUL_CONTINUE)
2375 return rc;
2376
2377 ctxt->dst.val = ctxt->src.val;
2378 return rc;
2379}
2380
2381static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2382{
2383#ifdef CONFIG_X86_64
2384 return ctxt->ops->guest_has_long_mode(ctxt);
2385#else
2386 return false;
2387#endif
2388}
2389
2390static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2391{
2392 desc->g = (flags >> 23) & 1;
2393 desc->d = (flags >> 22) & 1;
2394 desc->l = (flags >> 21) & 1;
2395 desc->avl = (flags >> 20) & 1;
2396 desc->p = (flags >> 15) & 1;
2397 desc->dpl = (flags >> 13) & 3;
2398 desc->s = (flags >> 12) & 1;
2399 desc->type = (flags >> 8) & 15;
2400}
2401
2402static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2403 int n)
2404{
2405 struct desc_struct desc;
2406 int offset;
2407 u16 selector;
2408
2409 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2410
2411 if (n < 3)
2412 offset = 0x7f84 + n * 12;
2413 else
2414 offset = 0x7f2c + (n - 3) * 12;
2415
2416 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2417 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2418 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2419 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2420 return X86EMUL_CONTINUE;
2421}
2422
2423#ifdef CONFIG_X86_64
2424static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2425 int n)
2426{
2427 struct desc_struct desc;
2428 int offset;
2429 u16 selector;
2430 u32 base3;
2431
2432 offset = 0x7e00 + n * 16;
2433
2434 selector = GET_SMSTATE(u16, smstate, offset);
2435 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2436 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2437 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2438 base3 = GET_SMSTATE(u32, smstate, offset + 12);
2439
2440 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2441 return X86EMUL_CONTINUE;
2442}
2443#endif
2444
2445static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2446 u64 cr0, u64 cr3, u64 cr4)
2447{
2448 int bad;
2449 u64 pcid;
2450
2451
2452 pcid = 0;
2453 if (cr4 & X86_CR4_PCIDE) {
2454 pcid = cr3 & 0xfff;
2455 cr3 &= ~0xfff;
2456 }
2457
2458 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2459 if (bad)
2460 return X86EMUL_UNHANDLEABLE;
2461
2462
2463
2464
2465
2466
2467 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2468 if (bad)
2469 return X86EMUL_UNHANDLEABLE;
2470
2471 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2472 if (bad)
2473 return X86EMUL_UNHANDLEABLE;
2474
2475 if (cr4 & X86_CR4_PCIDE) {
2476 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2477 if (bad)
2478 return X86EMUL_UNHANDLEABLE;
2479 if (pcid) {
2480 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2481 if (bad)
2482 return X86EMUL_UNHANDLEABLE;
2483 }
2484
2485 }
2486
2487 return X86EMUL_CONTINUE;
2488}
2489
2490static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2491 const char *smstate)
2492{
2493 struct desc_struct desc;
2494 struct desc_ptr dt;
2495 u16 selector;
2496 u32 val, cr0, cr3, cr4;
2497 int i;
2498
2499 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
2500 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
2501 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2502 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
2503
2504 for (i = 0; i < 8; i++)
2505 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2506
2507 val = GET_SMSTATE(u32, smstate, 0x7fcc);
2508
2509 if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
2510 return X86EMUL_UNHANDLEABLE;
2511
2512 val = GET_SMSTATE(u32, smstate, 0x7fc8);
2513
2514 if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
2515 return X86EMUL_UNHANDLEABLE;
2516
2517 selector = GET_SMSTATE(u32, smstate, 0x7fc4);
2518 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
2519 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
2520 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
2521 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2522
2523 selector = GET_SMSTATE(u32, smstate, 0x7fc0);
2524 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
2525 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
2526 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
2527 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2528
2529 dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
2530 dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
2531 ctxt->ops->set_gdt(ctxt, &dt);
2532
2533 dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
2534 dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
2535 ctxt->ops->set_idt(ctxt, &dt);
2536
2537 for (i = 0; i < 6; i++) {
2538 int r = rsm_load_seg_32(ctxt, smstate, i);
2539 if (r != X86EMUL_CONTINUE)
2540 return r;
2541 }
2542
2543 cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2544
2545 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2546
2547 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2548}
2549
2550#ifdef CONFIG_X86_64
2551static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2552 const char *smstate)
2553{
2554 struct desc_struct desc;
2555 struct desc_ptr dt;
2556 u64 val, cr0, cr3, cr4;
2557 u32 base3;
2558 u16 selector;
2559 int i, r;
2560
2561 for (i = 0; i < 16; i++)
2562 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2563
2564 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
2565 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2566
2567 val = GET_SMSTATE(u32, smstate, 0x7f68);
2568
2569 if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
2570 return X86EMUL_UNHANDLEABLE;
2571
2572 val = GET_SMSTATE(u32, smstate, 0x7f60);
2573
2574 if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
2575 return X86EMUL_UNHANDLEABLE;
2576
2577 cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
2578 cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
2579 cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
2580 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2581 val = GET_SMSTATE(u64, smstate, 0x7ed0);
2582
2583 if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
2584 return X86EMUL_UNHANDLEABLE;
2585
2586 selector = GET_SMSTATE(u32, smstate, 0x7e90);
2587 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2588 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
2589 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
2590 base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
2591 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2592
2593 dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
2594 dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
2595 ctxt->ops->set_idt(ctxt, &dt);
2596
2597 selector = GET_SMSTATE(u32, smstate, 0x7e70);
2598 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2599 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
2600 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
2601 base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
2602 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2603
2604 dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
2605 dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
2606 ctxt->ops->set_gdt(ctxt, &dt);
2607
2608 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2609 if (r != X86EMUL_CONTINUE)
2610 return r;
2611
2612 for (i = 0; i < 6; i++) {
2613 r = rsm_load_seg_64(ctxt, smstate, i);
2614 if (r != X86EMUL_CONTINUE)
2615 return r;
2616 }
2617
2618 return X86EMUL_CONTINUE;
2619}
2620#endif
2621
2622static int em_rsm(struct x86_emulate_ctxt *ctxt)
2623{
2624 unsigned long cr0, cr4, efer;
2625 char buf[512];
2626 u64 smbase;
2627 int ret;
2628
2629 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2630 return emulate_ud(ctxt);
2631
2632 smbase = ctxt->ops->get_smbase(ctxt);
2633
2634 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2635 if (ret != X86EMUL_CONTINUE)
2636 return X86EMUL_UNHANDLEABLE;
2637
2638 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2639 ctxt->ops->set_nmi_mask(ctxt, false);
2640
2641 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2642 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2643
2644
2645
2646
2647
2648
2649 if (emulator_has_longmode(ctxt)) {
2650 struct desc_struct cs_desc;
2651
2652
2653 cr4 = ctxt->ops->get_cr(ctxt, 4);
2654 if (cr4 & X86_CR4_PCIDE)
2655 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2656
2657
2658 memset(&cs_desc, 0, sizeof(cs_desc));
2659 cs_desc.type = 0xb;
2660 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2661 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2662 }
2663
2664
2665 cr0 = ctxt->ops->get_cr(ctxt, 0);
2666 if (cr0 & X86_CR0_PE)
2667 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2668
2669 if (emulator_has_longmode(ctxt)) {
2670
2671 cr4 = ctxt->ops->get_cr(ctxt, 4);
2672 if (cr4 & X86_CR4_PAE)
2673 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2674
2675
2676 efer = 0;
2677 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2678 }
2679
2680
2681
2682
2683
2684
2685 if (ctxt->ops->pre_leave_smm(ctxt, buf))
2686 return X86EMUL_UNHANDLEABLE;
2687
2688#ifdef CONFIG_X86_64
2689 if (emulator_has_longmode(ctxt))
2690 ret = rsm_load_state_64(ctxt, buf);
2691 else
2692#endif
2693 ret = rsm_load_state_32(ctxt, buf);
2694
2695 if (ret != X86EMUL_CONTINUE) {
2696
2697 return X86EMUL_UNHANDLEABLE;
2698 }
2699
2700 ctxt->ops->post_leave_smm(ctxt);
2701
2702 return X86EMUL_CONTINUE;
2703}
2704
2705static void
2706setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2707 struct desc_struct *cs, struct desc_struct *ss)
2708{
2709 cs->l = 0;
2710 set_desc_base(cs, 0);
2711 cs->g = 1;
2712 set_desc_limit(cs, 0xfffff);
2713 cs->type = 0x0b;
2714 cs->s = 1;
2715 cs->dpl = 0;
2716 cs->p = 1;
2717 cs->d = 1;
2718 cs->avl = 0;
2719
2720 set_desc_base(ss, 0);
2721 set_desc_limit(ss, 0xfffff);
2722 ss->g = 1;
2723 ss->s = 1;
2724 ss->type = 0x03;
2725 ss->d = 1;
2726 ss->dpl = 0;
2727 ss->p = 1;
2728 ss->l = 0;
2729 ss->avl = 0;
2730}
2731
2732static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2733{
2734 u32 eax, ebx, ecx, edx;
2735
2736 eax = ecx = 0;
2737 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2738 return is_guest_vendor_intel(ebx, ecx, edx);
2739}
2740
2741static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2742{
2743 const struct x86_emulate_ops *ops = ctxt->ops;
2744 u32 eax, ebx, ecx, edx;
2745
2746
2747
2748
2749
2750 if (ctxt->mode == X86EMUL_MODE_PROT64)
2751 return true;
2752
2753 eax = 0x00000000;
2754 ecx = 0x00000000;
2755 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2756
2757
2758
2759
2760
2761
2762 if (is_guest_vendor_intel(ebx, ecx, edx))
2763 return false;
2764
2765 if (is_guest_vendor_amd(ebx, ecx, edx) ||
2766 is_guest_vendor_hygon(ebx, ecx, edx))
2767 return true;
2768
2769
2770
2771
2772
2773 return false;
2774}
2775
2776static int em_syscall(struct x86_emulate_ctxt *ctxt)
2777{
2778 const struct x86_emulate_ops *ops = ctxt->ops;
2779 struct desc_struct cs, ss;
2780 u64 msr_data;
2781 u16 cs_sel, ss_sel;
2782 u64 efer = 0;
2783
2784
2785 if (ctxt->mode == X86EMUL_MODE_REAL ||
2786 ctxt->mode == X86EMUL_MODE_VM86)
2787 return emulate_ud(ctxt);
2788
2789 if (!(em_syscall_is_enabled(ctxt)))
2790 return emulate_ud(ctxt);
2791
2792 ops->get_msr(ctxt, MSR_EFER, &efer);
2793 if (!(efer & EFER_SCE))
2794 return emulate_ud(ctxt);
2795
2796 setup_syscalls_segments(ctxt, &cs, &ss);
2797 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2798 msr_data >>= 32;
2799 cs_sel = (u16)(msr_data & 0xfffc);
2800 ss_sel = (u16)(msr_data + 8);
2801
2802 if (efer & EFER_LMA) {
2803 cs.d = 0;
2804 cs.l = 1;
2805 }
2806 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2807 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2808
2809 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2810 if (efer & EFER_LMA) {
2811#ifdef CONFIG_X86_64
2812 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2813
2814 ops->get_msr(ctxt,
2815 ctxt->mode == X86EMUL_MODE_PROT64 ?
2816 MSR_LSTAR : MSR_CSTAR, &msr_data);
2817 ctxt->_eip = msr_data;
2818
2819 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2820 ctxt->eflags &= ~msr_data;
2821 ctxt->eflags |= X86_EFLAGS_FIXED;
2822#endif
2823 } else {
2824
2825 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2826 ctxt->_eip = (u32)msr_data;
2827
2828 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2829 }
2830
2831 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2832 return X86EMUL_CONTINUE;
2833}
2834
2835static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2836{
2837 const struct x86_emulate_ops *ops = ctxt->ops;
2838 struct desc_struct cs, ss;
2839 u64 msr_data;
2840 u16 cs_sel, ss_sel;
2841 u64 efer = 0;
2842
2843 ops->get_msr(ctxt, MSR_EFER, &efer);
2844
2845 if (ctxt->mode == X86EMUL_MODE_REAL)
2846 return emulate_gp(ctxt, 0);
2847
2848
2849
2850
2851
2852 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2853 && !vendor_intel(ctxt))
2854 return emulate_ud(ctxt);
2855
2856
2857 if (ctxt->mode == X86EMUL_MODE_PROT64)
2858 return X86EMUL_UNHANDLEABLE;
2859
2860 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2861 if ((msr_data & 0xfffc) == 0x0)
2862 return emulate_gp(ctxt, 0);
2863
2864 setup_syscalls_segments(ctxt, &cs, &ss);
2865 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2866 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2867 ss_sel = cs_sel + 8;
2868 if (efer & EFER_LMA) {
2869 cs.d = 0;
2870 cs.l = 1;
2871 }
2872
2873 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2874 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2875
2876 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2877 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2878
2879 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2880 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2881 (u32)msr_data;
2882
2883 return X86EMUL_CONTINUE;
2884}
2885
2886static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2887{
2888 const struct x86_emulate_ops *ops = ctxt->ops;
2889 struct desc_struct cs, ss;
2890 u64 msr_data, rcx, rdx;
2891 int usermode;
2892 u16 cs_sel = 0, ss_sel = 0;
2893
2894
2895 if (ctxt->mode == X86EMUL_MODE_REAL ||
2896 ctxt->mode == X86EMUL_MODE_VM86)
2897 return emulate_gp(ctxt, 0);
2898
2899 setup_syscalls_segments(ctxt, &cs, &ss);
2900
2901 if ((ctxt->rex_prefix & 0x8) != 0x0)
2902 usermode = X86EMUL_MODE_PROT64;
2903 else
2904 usermode = X86EMUL_MODE_PROT32;
2905
2906 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2907 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2908
2909 cs.dpl = 3;
2910 ss.dpl = 3;
2911 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2912 switch (usermode) {
2913 case X86EMUL_MODE_PROT32:
2914 cs_sel = (u16)(msr_data + 16);
2915 if ((msr_data & 0xfffc) == 0x0)
2916 return emulate_gp(ctxt, 0);
2917 ss_sel = (u16)(msr_data + 24);
2918 rcx = (u32)rcx;
2919 rdx = (u32)rdx;
2920 break;
2921 case X86EMUL_MODE_PROT64:
2922 cs_sel = (u16)(msr_data + 32);
2923 if (msr_data == 0x0)
2924 return emulate_gp(ctxt, 0);
2925 ss_sel = cs_sel + 8;
2926 cs.d = 0;
2927 cs.l = 1;
2928 if (emul_is_noncanonical_address(rcx, ctxt) ||
2929 emul_is_noncanonical_address(rdx, ctxt))
2930 return emulate_gp(ctxt, 0);
2931 break;
2932 }
2933 cs_sel |= SEGMENT_RPL_MASK;
2934 ss_sel |= SEGMENT_RPL_MASK;
2935
2936 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2937 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2938
2939 ctxt->_eip = rdx;
2940 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2941
2942 return X86EMUL_CONTINUE;
2943}
2944
2945static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2946{
2947 int iopl;
2948 if (ctxt->mode == X86EMUL_MODE_REAL)
2949 return false;
2950 if (ctxt->mode == X86EMUL_MODE_VM86)
2951 return true;
2952 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2953 return ctxt->ops->cpl(ctxt) > iopl;
2954}
2955
2956#define VMWARE_PORT_VMPORT (0x5658)
2957#define VMWARE_PORT_VMRPC (0x5659)
2958
2959static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2960 u16 port, u16 len)
2961{
2962 const struct x86_emulate_ops *ops = ctxt->ops;
2963 struct desc_struct tr_seg;
2964 u32 base3;
2965 int r;
2966 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2967 unsigned mask = (1 << len) - 1;
2968 unsigned long base;
2969
2970
2971
2972
2973
2974 if (enable_vmware_backdoor &&
2975 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2976 return true;
2977
2978 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2979 if (!tr_seg.p)
2980 return false;
2981 if (desc_limit_scaled(&tr_seg) < 103)
2982 return false;
2983 base = get_desc_base(&tr_seg);
2984#ifdef CONFIG_X86_64
2985 base |= ((u64)base3) << 32;
2986#endif
2987 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2988 if (r != X86EMUL_CONTINUE)
2989 return false;
2990 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2991 return false;
2992 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2993 if (r != X86EMUL_CONTINUE)
2994 return false;
2995 if ((perm >> bit_idx) & mask)
2996 return false;
2997 return true;
2998}
2999
3000static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
3001 u16 port, u16 len)
3002{
3003 if (ctxt->perm_ok)
3004 return true;
3005
3006 if (emulator_bad_iopl(ctxt))
3007 if (!emulator_io_port_access_allowed(ctxt, port, len))
3008 return false;
3009
3010 ctxt->perm_ok = true;
3011
3012 return true;
3013}
3014
3015static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
3016{
3017
3018
3019
3020
3021#ifdef CONFIG_X86_64
3022 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
3023 return;
3024
3025 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
3026
3027 switch (ctxt->b) {
3028 case 0xa4:
3029 case 0xa5:
3030 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
3031 fallthrough;
3032 case 0xaa:
3033 case 0xab:
3034 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
3035 }
3036#endif
3037}
3038
3039static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
3040 struct tss_segment_16 *tss)
3041{
3042 tss->ip = ctxt->_eip;
3043 tss->flag = ctxt->eflags;
3044 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
3045 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
3046 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
3047 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
3048 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
3049 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
3050 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
3051 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
3052
3053 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3054 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3055 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3056 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3057 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
3058}
3059
3060static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
3061 struct tss_segment_16 *tss)
3062{
3063 int ret;
3064 u8 cpl;
3065
3066 ctxt->_eip = tss->ip;
3067 ctxt->eflags = tss->flag | 2;
3068 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
3069 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
3070 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
3071 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
3072 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
3073 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
3074 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
3075 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3076
3077
3078
3079
3080
3081 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3082 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3083 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3084 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3085 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3086
3087 cpl = tss->cs & 3;
3088
3089
3090
3091
3092
3093 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3094 X86_TRANSFER_TASK_SWITCH, NULL);
3095 if (ret != X86EMUL_CONTINUE)
3096 return ret;
3097 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3098 X86_TRANSFER_TASK_SWITCH, NULL);
3099 if (ret != X86EMUL_CONTINUE)
3100 return ret;
3101 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3102 X86_TRANSFER_TASK_SWITCH, NULL);
3103 if (ret != X86EMUL_CONTINUE)
3104 return ret;
3105 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3106 X86_TRANSFER_TASK_SWITCH, NULL);
3107 if (ret != X86EMUL_CONTINUE)
3108 return ret;
3109 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3110 X86_TRANSFER_TASK_SWITCH, NULL);
3111 if (ret != X86EMUL_CONTINUE)
3112 return ret;
3113
3114 return X86EMUL_CONTINUE;
3115}
3116
3117static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3118 u16 tss_selector, u16 old_tss_sel,
3119 ulong old_tss_base, struct desc_struct *new_desc)
3120{
3121 struct tss_segment_16 tss_seg;
3122 int ret;
3123 u32 new_tss_base = get_desc_base(new_desc);
3124
3125 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3126 if (ret != X86EMUL_CONTINUE)
3127 return ret;
3128
3129 save_state_to_tss16(ctxt, &tss_seg);
3130
3131 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3132 if (ret != X86EMUL_CONTINUE)
3133 return ret;
3134
3135 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3136 if (ret != X86EMUL_CONTINUE)
3137 return ret;
3138
3139 if (old_tss_sel != 0xffff) {
3140 tss_seg.prev_task_link = old_tss_sel;
3141
3142 ret = linear_write_system(ctxt, new_tss_base,
3143 &tss_seg.prev_task_link,
3144 sizeof(tss_seg.prev_task_link));
3145 if (ret != X86EMUL_CONTINUE)
3146 return ret;
3147 }
3148
3149 return load_state_from_tss16(ctxt, &tss_seg);
3150}
3151
3152static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3153 struct tss_segment_32 *tss)
3154{
3155
3156 tss->eip = ctxt->_eip;
3157 tss->eflags = ctxt->eflags;
3158 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3159 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3160 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3161 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3162 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3163 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3164 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3165 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3166
3167 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3168 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3169 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3170 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3171 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3172 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3173}
3174
3175static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3176 struct tss_segment_32 *tss)
3177{
3178 int ret;
3179 u8 cpl;
3180
3181 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3182 return emulate_gp(ctxt, 0);
3183 ctxt->_eip = tss->eip;
3184 ctxt->eflags = tss->eflags | 2;
3185
3186
3187 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3188 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3189 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3190 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3191 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3192 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3193 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3194 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3195
3196
3197
3198
3199
3200
3201 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3202 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3203 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3204 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3205 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3206 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3207 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3208
3209
3210
3211
3212
3213
3214 if (ctxt->eflags & X86_EFLAGS_VM) {
3215 ctxt->mode = X86EMUL_MODE_VM86;
3216 cpl = 3;
3217 } else {
3218 ctxt->mode = X86EMUL_MODE_PROT32;
3219 cpl = tss->cs & 3;
3220 }
3221
3222
3223
3224
3225
3226 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3227 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3228 if (ret != X86EMUL_CONTINUE)
3229 return ret;
3230 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3231 X86_TRANSFER_TASK_SWITCH, NULL);
3232 if (ret != X86EMUL_CONTINUE)
3233 return ret;
3234 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3235 X86_TRANSFER_TASK_SWITCH, NULL);
3236 if (ret != X86EMUL_CONTINUE)
3237 return ret;
3238 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3239 X86_TRANSFER_TASK_SWITCH, NULL);
3240 if (ret != X86EMUL_CONTINUE)
3241 return ret;
3242 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3243 X86_TRANSFER_TASK_SWITCH, NULL);
3244 if (ret != X86EMUL_CONTINUE)
3245 return ret;
3246 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3247 X86_TRANSFER_TASK_SWITCH, NULL);
3248 if (ret != X86EMUL_CONTINUE)
3249 return ret;
3250 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3251 X86_TRANSFER_TASK_SWITCH, NULL);
3252
3253 return ret;
3254}
3255
3256static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3257 u16 tss_selector, u16 old_tss_sel,
3258 ulong old_tss_base, struct desc_struct *new_desc)
3259{
3260 struct tss_segment_32 tss_seg;
3261 int ret;
3262 u32 new_tss_base = get_desc_base(new_desc);
3263 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3264 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3265
3266 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3267 if (ret != X86EMUL_CONTINUE)
3268 return ret;
3269
3270 save_state_to_tss32(ctxt, &tss_seg);
3271
3272
3273 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3274 ldt_sel_offset - eip_offset);
3275 if (ret != X86EMUL_CONTINUE)
3276 return ret;
3277
3278 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3279 if (ret != X86EMUL_CONTINUE)
3280 return ret;
3281
3282 if (old_tss_sel != 0xffff) {
3283 tss_seg.prev_task_link = old_tss_sel;
3284
3285 ret = linear_write_system(ctxt, new_tss_base,
3286 &tss_seg.prev_task_link,
3287 sizeof(tss_seg.prev_task_link));
3288 if (ret != X86EMUL_CONTINUE)
3289 return ret;
3290 }
3291
3292 return load_state_from_tss32(ctxt, &tss_seg);
3293}
3294
3295static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3296 u16 tss_selector, int idt_index, int reason,
3297 bool has_error_code, u32 error_code)
3298{
3299 const struct x86_emulate_ops *ops = ctxt->ops;
3300 struct desc_struct curr_tss_desc, next_tss_desc;
3301 int ret;
3302 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3303 ulong old_tss_base =
3304 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3305 u32 desc_limit;
3306 ulong desc_addr, dr7;
3307
3308
3309
3310 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3311 if (ret != X86EMUL_CONTINUE)
3312 return ret;
3313 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3314 if (ret != X86EMUL_CONTINUE)
3315 return ret;
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327 if (reason == TASK_SWITCH_GATE) {
3328 if (idt_index != -1) {
3329
3330 struct desc_struct task_gate_desc;
3331 int dpl;
3332
3333 ret = read_interrupt_descriptor(ctxt, idt_index,
3334 &task_gate_desc);
3335 if (ret != X86EMUL_CONTINUE)
3336 return ret;
3337
3338 dpl = task_gate_desc.dpl;
3339 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3340 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3341 }
3342 }
3343
3344 desc_limit = desc_limit_scaled(&next_tss_desc);
3345 if (!next_tss_desc.p ||
3346 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3347 desc_limit < 0x2b)) {
3348 return emulate_ts(ctxt, tss_selector & 0xfffc);
3349 }
3350
3351 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3352 curr_tss_desc.type &= ~(1 << 1);
3353 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3354 }
3355
3356 if (reason == TASK_SWITCH_IRET)
3357 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3358
3359
3360
3361 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3362 old_tss_sel = 0xffff;
3363
3364 if (next_tss_desc.type & 8)
3365 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3366 old_tss_base, &next_tss_desc);
3367 else
3368 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3369 old_tss_base, &next_tss_desc);
3370 if (ret != X86EMUL_CONTINUE)
3371 return ret;
3372
3373 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3374 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3375
3376 if (reason != TASK_SWITCH_IRET) {
3377 next_tss_desc.type |= (1 << 1);
3378 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3379 }
3380
3381 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3382 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3383
3384 if (has_error_code) {
3385 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3386 ctxt->lock_prefix = 0;
3387 ctxt->src.val = (unsigned long) error_code;
3388 ret = em_push(ctxt);
3389 }
3390
3391 ops->get_dr(ctxt, 7, &dr7);
3392 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3393
3394 return ret;
3395}
3396
3397int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3398 u16 tss_selector, int idt_index, int reason,
3399 bool has_error_code, u32 error_code)
3400{
3401 int rc;
3402
3403 invalidate_registers(ctxt);
3404 ctxt->_eip = ctxt->eip;
3405 ctxt->dst.type = OP_NONE;
3406
3407 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3408 has_error_code, error_code);
3409
3410 if (rc == X86EMUL_CONTINUE) {
3411 ctxt->eip = ctxt->_eip;
3412 writeback_registers(ctxt);
3413 }
3414
3415 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3416}
3417
3418static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3419 struct operand *op)
3420{
3421 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3422
3423 register_address_increment(ctxt, reg, df * op->bytes);
3424 op->addr.mem.ea = register_address(ctxt, reg);
3425}
3426
3427static int em_das(struct x86_emulate_ctxt *ctxt)
3428{
3429 u8 al, old_al;
3430 bool af, cf, old_cf;
3431
3432 cf = ctxt->eflags & X86_EFLAGS_CF;
3433 al = ctxt->dst.val;
3434
3435 old_al = al;
3436 old_cf = cf;
3437 cf = false;
3438 af = ctxt->eflags & X86_EFLAGS_AF;
3439 if ((al & 0x0f) > 9 || af) {
3440 al -= 6;
3441 cf = old_cf | (al >= 250);
3442 af = true;
3443 } else {
3444 af = false;
3445 }
3446 if (old_al > 0x99 || old_cf) {
3447 al -= 0x60;
3448 cf = true;
3449 }
3450
3451 ctxt->dst.val = al;
3452
3453 ctxt->src.type = OP_IMM;
3454 ctxt->src.val = 0;
3455 ctxt->src.bytes = 1;
3456 fastop(ctxt, em_or);
3457 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3458 if (cf)
3459 ctxt->eflags |= X86_EFLAGS_CF;
3460 if (af)
3461 ctxt->eflags |= X86_EFLAGS_AF;
3462 return X86EMUL_CONTINUE;
3463}
3464
3465static int em_aam(struct x86_emulate_ctxt *ctxt)
3466{
3467 u8 al, ah;
3468
3469 if (ctxt->src.val == 0)
3470 return emulate_de(ctxt);
3471
3472 al = ctxt->dst.val & 0xff;
3473 ah = al / ctxt->src.val;
3474 al %= ctxt->src.val;
3475
3476 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3477
3478
3479 ctxt->src.type = OP_IMM;
3480 ctxt->src.val = 0;
3481 ctxt->src.bytes = 1;
3482 fastop(ctxt, em_or);
3483
3484 return X86EMUL_CONTINUE;
3485}
3486
3487static int em_aad(struct x86_emulate_ctxt *ctxt)
3488{
3489 u8 al = ctxt->dst.val & 0xff;
3490 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3491
3492 al = (al + (ah * ctxt->src.val)) & 0xff;
3493
3494 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3495
3496
3497 ctxt->src.type = OP_IMM;
3498 ctxt->src.val = 0;
3499 ctxt->src.bytes = 1;
3500 fastop(ctxt, em_or);
3501
3502 return X86EMUL_CONTINUE;
3503}
3504
3505static int em_call(struct x86_emulate_ctxt *ctxt)
3506{
3507 int rc;
3508 long rel = ctxt->src.val;
3509
3510 ctxt->src.val = (unsigned long)ctxt->_eip;
3511 rc = jmp_rel(ctxt, rel);
3512 if (rc != X86EMUL_CONTINUE)
3513 return rc;
3514 return em_push(ctxt);
3515}
3516
3517static int em_call_far(struct x86_emulate_ctxt *ctxt)
3518{
3519 u16 sel, old_cs;
3520 ulong old_eip;
3521 int rc;
3522 struct desc_struct old_desc, new_desc;
3523 const struct x86_emulate_ops *ops = ctxt->ops;
3524 int cpl = ctxt->ops->cpl(ctxt);
3525 enum x86emul_mode prev_mode = ctxt->mode;
3526
3527 old_eip = ctxt->_eip;
3528 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3529
3530 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3531 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3532 X86_TRANSFER_CALL_JMP, &new_desc);
3533 if (rc != X86EMUL_CONTINUE)
3534 return rc;
3535
3536 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3537 if (rc != X86EMUL_CONTINUE)
3538 goto fail;
3539
3540 ctxt->src.val = old_cs;
3541 rc = em_push(ctxt);
3542 if (rc != X86EMUL_CONTINUE)
3543 goto fail;
3544
3545 ctxt->src.val = old_eip;
3546 rc = em_push(ctxt);
3547
3548
3549 if (rc != X86EMUL_CONTINUE) {
3550 pr_warn_once("faulting far call emulation tainted memory\n");
3551 goto fail;
3552 }
3553 return rc;
3554fail:
3555 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3556 ctxt->mode = prev_mode;
3557 return rc;
3558
3559}
3560
3561static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3562{
3563 int rc;
3564 unsigned long eip;
3565
3566 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3567 if (rc != X86EMUL_CONTINUE)
3568 return rc;
3569 rc = assign_eip_near(ctxt, eip);
3570 if (rc != X86EMUL_CONTINUE)
3571 return rc;
3572 rsp_increment(ctxt, ctxt->src.val);
3573 return X86EMUL_CONTINUE;
3574}
3575
3576static int em_xchg(struct x86_emulate_ctxt *ctxt)
3577{
3578
3579 ctxt->src.val = ctxt->dst.val;
3580 write_register_operand(&ctxt->src);
3581
3582
3583 ctxt->dst.val = ctxt->src.orig_val;
3584 ctxt->lock_prefix = 1;
3585 return X86EMUL_CONTINUE;
3586}
3587
3588static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3589{
3590 ctxt->dst.val = ctxt->src2.val;
3591 return fastop(ctxt, em_imul);
3592}
3593
3594static int em_cwd(struct x86_emulate_ctxt *ctxt)
3595{
3596 ctxt->dst.type = OP_REG;
3597 ctxt->dst.bytes = ctxt->src.bytes;
3598 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3599 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3600
3601 return X86EMUL_CONTINUE;
3602}
3603
3604static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3605{
3606 u64 tsc_aux = 0;
3607
3608 if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3609 return emulate_ud(ctxt);
3610 ctxt->dst.val = tsc_aux;
3611 return X86EMUL_CONTINUE;
3612}
3613
3614static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3615{
3616 u64 tsc = 0;
3617
3618 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3619 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3620 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3621 return X86EMUL_CONTINUE;
3622}
3623
3624static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3625{
3626 u64 pmc;
3627
3628 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3629 return emulate_gp(ctxt, 0);
3630 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3631 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3632 return X86EMUL_CONTINUE;
3633}
3634
3635static int em_mov(struct x86_emulate_ctxt *ctxt)
3636{
3637 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3638 return X86EMUL_CONTINUE;
3639}
3640
3641static int em_movbe(struct x86_emulate_ctxt *ctxt)
3642{
3643 u16 tmp;
3644
3645 if (!ctxt->ops->guest_has_movbe(ctxt))
3646 return emulate_ud(ctxt);
3647
3648 switch (ctxt->op_bytes) {
3649 case 2:
3650
3651
3652
3653
3654
3655
3656
3657
3658 tmp = (u16)ctxt->src.val;
3659 ctxt->dst.val &= ~0xffffUL;
3660 ctxt->dst.val |= (unsigned long)swab16(tmp);
3661 break;
3662 case 4:
3663 ctxt->dst.val = swab32((u32)ctxt->src.val);
3664 break;
3665 case 8:
3666 ctxt->dst.val = swab64(ctxt->src.val);
3667 break;
3668 default:
3669 BUG();
3670 }
3671 return X86EMUL_CONTINUE;
3672}
3673
3674static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3675{
3676 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3677 return emulate_gp(ctxt, 0);
3678
3679
3680 ctxt->dst.type = OP_NONE;
3681 return X86EMUL_CONTINUE;
3682}
3683
3684static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3685{
3686 unsigned long val;
3687
3688 if (ctxt->mode == X86EMUL_MODE_PROT64)
3689 val = ctxt->src.val & ~0ULL;
3690 else
3691 val = ctxt->src.val & ~0U;
3692
3693
3694 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3695 return emulate_gp(ctxt, 0);
3696
3697
3698 ctxt->dst.type = OP_NONE;
3699 return X86EMUL_CONTINUE;
3700}
3701
3702static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3703{
3704 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3705 u64 msr_data;
3706 int r;
3707
3708 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3709 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3710 r = ctxt->ops->set_msr(ctxt, msr_index, msr_data);
3711
3712 if (r == X86EMUL_IO_NEEDED)
3713 return r;
3714
3715 if (r > 0)
3716 return emulate_gp(ctxt, 0);
3717
3718 return r < 0 ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
3719}
3720
3721static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3722{
3723 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3724 u64 msr_data;
3725 int r;
3726
3727 r = ctxt->ops->get_msr(ctxt, msr_index, &msr_data);
3728
3729 if (r == X86EMUL_IO_NEEDED)
3730 return r;
3731
3732 if (r)
3733 return emulate_gp(ctxt, 0);
3734
3735 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3736 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3737 return X86EMUL_CONTINUE;
3738}
3739
3740static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3741{
3742 if (segment > VCPU_SREG_GS &&
3743 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3744 ctxt->ops->cpl(ctxt) > 0)
3745 return emulate_gp(ctxt, 0);
3746
3747 ctxt->dst.val = get_segment_selector(ctxt, segment);
3748 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3749 ctxt->dst.bytes = 2;
3750 return X86EMUL_CONTINUE;
3751}
3752
3753static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3754{
3755 if (ctxt->modrm_reg > VCPU_SREG_GS)
3756 return emulate_ud(ctxt);
3757
3758 return em_store_sreg(ctxt, ctxt->modrm_reg);
3759}
3760
3761static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3762{
3763 u16 sel = ctxt->src.val;
3764
3765 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3766 return emulate_ud(ctxt);
3767
3768 if (ctxt->modrm_reg == VCPU_SREG_SS)
3769 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3770
3771
3772 ctxt->dst.type = OP_NONE;
3773 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3774}
3775
3776static int em_sldt(struct x86_emulate_ctxt *ctxt)
3777{
3778 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3779}
3780
3781static int em_lldt(struct x86_emulate_ctxt *ctxt)
3782{
3783 u16 sel = ctxt->src.val;
3784
3785
3786 ctxt->dst.type = OP_NONE;
3787 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3788}
3789
3790static int em_str(struct x86_emulate_ctxt *ctxt)
3791{
3792 return em_store_sreg(ctxt, VCPU_SREG_TR);
3793}
3794
3795static int em_ltr(struct x86_emulate_ctxt *ctxt)
3796{
3797 u16 sel = ctxt->src.val;
3798
3799
3800 ctxt->dst.type = OP_NONE;
3801 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3802}
3803
3804static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3805{
3806 int rc;
3807 ulong linear;
3808
3809 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3810 if (rc == X86EMUL_CONTINUE)
3811 ctxt->ops->invlpg(ctxt, linear);
3812
3813 ctxt->dst.type = OP_NONE;
3814 return X86EMUL_CONTINUE;
3815}
3816
3817static int em_clts(struct x86_emulate_ctxt *ctxt)
3818{
3819 ulong cr0;
3820
3821 cr0 = ctxt->ops->get_cr(ctxt, 0);
3822 cr0 &= ~X86_CR0_TS;
3823 ctxt->ops->set_cr(ctxt, 0, cr0);
3824 return X86EMUL_CONTINUE;
3825}
3826
3827static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3828{
3829 int rc = ctxt->ops->fix_hypercall(ctxt);
3830
3831 if (rc != X86EMUL_CONTINUE)
3832 return rc;
3833
3834
3835 ctxt->_eip = ctxt->eip;
3836
3837 ctxt->dst.type = OP_NONE;
3838 return X86EMUL_CONTINUE;
3839}
3840
3841static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3842 void (*get)(struct x86_emulate_ctxt *ctxt,
3843 struct desc_ptr *ptr))
3844{
3845 struct desc_ptr desc_ptr;
3846
3847 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3848 ctxt->ops->cpl(ctxt) > 0)
3849 return emulate_gp(ctxt, 0);
3850
3851 if (ctxt->mode == X86EMUL_MODE_PROT64)
3852 ctxt->op_bytes = 8;
3853 get(ctxt, &desc_ptr);
3854 if (ctxt->op_bytes == 2) {
3855 ctxt->op_bytes = 4;
3856 desc_ptr.address &= 0x00ffffff;
3857 }
3858
3859 ctxt->dst.type = OP_NONE;
3860 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3861 &desc_ptr, 2 + ctxt->op_bytes);
3862}
3863
3864static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3865{
3866 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3867}
3868
3869static int em_sidt(struct x86_emulate_ctxt *ctxt)
3870{
3871 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3872}
3873
3874static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3875{
3876 struct desc_ptr desc_ptr;
3877 int rc;
3878
3879 if (ctxt->mode == X86EMUL_MODE_PROT64)
3880 ctxt->op_bytes = 8;
3881 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3882 &desc_ptr.size, &desc_ptr.address,
3883 ctxt->op_bytes);
3884 if (rc != X86EMUL_CONTINUE)
3885 return rc;
3886 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3887 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3888 return emulate_gp(ctxt, 0);
3889 if (lgdt)
3890 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3891 else
3892 ctxt->ops->set_idt(ctxt, &desc_ptr);
3893
3894 ctxt->dst.type = OP_NONE;
3895 return X86EMUL_CONTINUE;
3896}
3897
3898static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3899{
3900 return em_lgdt_lidt(ctxt, true);
3901}
3902
3903static int em_lidt(struct x86_emulate_ctxt *ctxt)
3904{
3905 return em_lgdt_lidt(ctxt, false);
3906}
3907
3908static int em_smsw(struct x86_emulate_ctxt *ctxt)
3909{
3910 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3911 ctxt->ops->cpl(ctxt) > 0)
3912 return emulate_gp(ctxt, 0);
3913
3914 if (ctxt->dst.type == OP_MEM)
3915 ctxt->dst.bytes = 2;
3916 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3917 return X86EMUL_CONTINUE;
3918}
3919
3920static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3921{
3922 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3923 | (ctxt->src.val & 0x0f));
3924 ctxt->dst.type = OP_NONE;
3925 return X86EMUL_CONTINUE;
3926}
3927
3928static int em_loop(struct x86_emulate_ctxt *ctxt)
3929{
3930 int rc = X86EMUL_CONTINUE;
3931
3932 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3933 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3934 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3935 rc = jmp_rel(ctxt, ctxt->src.val);
3936
3937 return rc;
3938}
3939
3940static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3941{
3942 int rc = X86EMUL_CONTINUE;
3943
3944 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3945 rc = jmp_rel(ctxt, ctxt->src.val);
3946
3947 return rc;
3948}
3949
3950static int em_in(struct x86_emulate_ctxt *ctxt)
3951{
3952 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3953 &ctxt->dst.val))
3954 return X86EMUL_IO_NEEDED;
3955
3956 return X86EMUL_CONTINUE;
3957}
3958
3959static int em_out(struct x86_emulate_ctxt *ctxt)
3960{
3961 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3962 &ctxt->src.val, 1);
3963
3964 ctxt->dst.type = OP_NONE;
3965 return X86EMUL_CONTINUE;
3966}
3967
3968static int em_cli(struct x86_emulate_ctxt *ctxt)
3969{
3970 if (emulator_bad_iopl(ctxt))
3971 return emulate_gp(ctxt, 0);
3972
3973 ctxt->eflags &= ~X86_EFLAGS_IF;
3974 return X86EMUL_CONTINUE;
3975}
3976
3977static int em_sti(struct x86_emulate_ctxt *ctxt)
3978{
3979 if (emulator_bad_iopl(ctxt))
3980 return emulate_gp(ctxt, 0);
3981
3982 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3983 ctxt->eflags |= X86_EFLAGS_IF;
3984 return X86EMUL_CONTINUE;
3985}
3986
3987static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3988{
3989 u32 eax, ebx, ecx, edx;
3990 u64 msr = 0;
3991
3992 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3993 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3994 ctxt->ops->cpl(ctxt)) {
3995 return emulate_gp(ctxt, 0);
3996 }
3997
3998 eax = reg_read(ctxt, VCPU_REGS_RAX);
3999 ecx = reg_read(ctxt, VCPU_REGS_RCX);
4000 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
4001 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
4002 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
4003 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
4004 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
4005 return X86EMUL_CONTINUE;
4006}
4007
4008static int em_sahf(struct x86_emulate_ctxt *ctxt)
4009{
4010 u32 flags;
4011
4012 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
4013 X86_EFLAGS_SF;
4014 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
4015
4016 ctxt->eflags &= ~0xffUL;
4017 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
4018 return X86EMUL_CONTINUE;
4019}
4020
4021static int em_lahf(struct x86_emulate_ctxt *ctxt)
4022{
4023 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
4024 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
4025 return X86EMUL_CONTINUE;
4026}
4027
4028static int em_bswap(struct x86_emulate_ctxt *ctxt)
4029{
4030 switch (ctxt->op_bytes) {
4031#ifdef CONFIG_X86_64
4032 case 8:
4033 asm("bswap %0" : "+r"(ctxt->dst.val));
4034 break;
4035#endif
4036 default:
4037 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
4038 break;
4039 }
4040 return X86EMUL_CONTINUE;
4041}
4042
4043static int em_clflush(struct x86_emulate_ctxt *ctxt)
4044{
4045
4046 return X86EMUL_CONTINUE;
4047}
4048
4049static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
4050{
4051
4052 return X86EMUL_CONTINUE;
4053}
4054
4055static int em_movsxd(struct x86_emulate_ctxt *ctxt)
4056{
4057 ctxt->dst.val = (s32) ctxt->src.val;
4058 return X86EMUL_CONTINUE;
4059}
4060
4061static int check_fxsr(struct x86_emulate_ctxt *ctxt)
4062{
4063 if (!ctxt->ops->guest_has_fxsr(ctxt))
4064 return emulate_ud(ctxt);
4065
4066 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
4067 return emulate_nm(ctxt);
4068
4069
4070
4071
4072
4073 if (ctxt->mode >= X86EMUL_MODE_PROT64)
4074 return X86EMUL_UNHANDLEABLE;
4075
4076 return X86EMUL_CONTINUE;
4077}
4078
4079
4080
4081
4082
4083static size_t __fxstate_size(int nregs)
4084{
4085 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4086}
4087
4088static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4089{
4090 bool cr4_osfxsr;
4091 if (ctxt->mode == X86EMUL_MODE_PROT64)
4092 return __fxstate_size(16);
4093
4094 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4095 return __fxstate_size(cr4_osfxsr ? 8 : 0);
4096}
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4117{
4118 struct fxregs_state fx_state;
4119 int rc;
4120
4121 rc = check_fxsr(ctxt);
4122 if (rc != X86EMUL_CONTINUE)
4123 return rc;
4124
4125 emulator_get_fpu();
4126
4127 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4128
4129 emulator_put_fpu();
4130
4131 if (rc != X86EMUL_CONTINUE)
4132 return rc;
4133
4134 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4135 fxstate_size(ctxt));
4136}
4137
4138
4139
4140
4141
4142
4143
4144
4145static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4146 const size_t used_size)
4147{
4148 struct fxregs_state fx_tmp;
4149 int rc;
4150
4151 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4152 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4153 __fxstate_size(16) - used_size);
4154
4155 return rc;
4156}
4157
4158static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4159{
4160 struct fxregs_state fx_state;
4161 int rc;
4162 size_t size;
4163
4164 rc = check_fxsr(ctxt);
4165 if (rc != X86EMUL_CONTINUE)
4166 return rc;
4167
4168 size = fxstate_size(ctxt);
4169 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4170 if (rc != X86EMUL_CONTINUE)
4171 return rc;
4172
4173 emulator_get_fpu();
4174
4175 if (size < __fxstate_size(16)) {
4176 rc = fxregs_fixup(&fx_state, size);
4177 if (rc != X86EMUL_CONTINUE)
4178 goto out;
4179 }
4180
4181 if (fx_state.mxcsr >> 16) {
4182 rc = emulate_gp(ctxt, 0);
4183 goto out;
4184 }
4185
4186 if (rc == X86EMUL_CONTINUE)
4187 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4188
4189out:
4190 emulator_put_fpu();
4191
4192 return rc;
4193}
4194
4195static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
4196{
4197 u32 eax, ecx, edx;
4198
4199 eax = reg_read(ctxt, VCPU_REGS_RAX);
4200 edx = reg_read(ctxt, VCPU_REGS_RDX);
4201 ecx = reg_read(ctxt, VCPU_REGS_RCX);
4202
4203 if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
4204 return emulate_gp(ctxt, 0);
4205
4206 return X86EMUL_CONTINUE;
4207}
4208
4209static bool valid_cr(int nr)
4210{
4211 switch (nr) {
4212 case 0:
4213 case 2 ... 4:
4214 case 8:
4215 return true;
4216 default:
4217 return false;
4218 }
4219}
4220
4221static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4222{
4223 if (!valid_cr(ctxt->modrm_reg))
4224 return emulate_ud(ctxt);
4225
4226 return X86EMUL_CONTINUE;
4227}
4228
4229static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4230{
4231 u64 new_val = ctxt->src.val64;
4232 int cr = ctxt->modrm_reg;
4233 u64 efer = 0;
4234
4235 static u64 cr_reserved_bits[] = {
4236 0xffffffff00000000ULL,
4237 0, 0, 0,
4238 CR4_RESERVED_BITS,
4239 0, 0, 0,
4240 CR8_RESERVED_BITS,
4241 };
4242
4243 if (!valid_cr(cr))
4244 return emulate_ud(ctxt);
4245
4246 if (new_val & cr_reserved_bits[cr])
4247 return emulate_gp(ctxt, 0);
4248
4249 switch (cr) {
4250 case 0: {
4251 u64 cr4;
4252 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4253 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4254 return emulate_gp(ctxt, 0);
4255
4256 cr4 = ctxt->ops->get_cr(ctxt, 4);
4257 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4258
4259 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4260 !(cr4 & X86_CR4_PAE))
4261 return emulate_gp(ctxt, 0);
4262
4263 break;
4264 }
4265 case 3: {
4266 u64 rsvd = 0;
4267
4268 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4269 if (efer & EFER_LMA) {
4270 u64 maxphyaddr;
4271 u32 eax, ebx, ecx, edx;
4272
4273 eax = 0x80000008;
4274 ecx = 0;
4275 if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4276 &edx, true))
4277 maxphyaddr = eax & 0xff;
4278 else
4279 maxphyaddr = 36;
4280 rsvd = rsvd_bits(maxphyaddr, 63);
4281 if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
4282 rsvd &= ~X86_CR3_PCID_NOFLUSH;
4283 }
4284
4285 if (new_val & rsvd)
4286 return emulate_gp(ctxt, 0);
4287
4288 break;
4289 }
4290 case 4: {
4291 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4292
4293 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4294 return emulate_gp(ctxt, 0);
4295
4296 break;
4297 }
4298 }
4299
4300 return X86EMUL_CONTINUE;
4301}
4302
4303static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4304{
4305 unsigned long dr7;
4306
4307 ctxt->ops->get_dr(ctxt, 7, &dr7);
4308
4309
4310 return dr7 & (1 << 13);
4311}
4312
4313static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4314{
4315 int dr = ctxt->modrm_reg;
4316 u64 cr4;
4317
4318 if (dr > 7)
4319 return emulate_ud(ctxt);
4320
4321 cr4 = ctxt->ops->get_cr(ctxt, 4);
4322 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4323 return emulate_ud(ctxt);
4324
4325 if (check_dr7_gd(ctxt)) {
4326 ulong dr6;
4327
4328 ctxt->ops->get_dr(ctxt, 6, &dr6);
4329 dr6 &= ~DR_TRAP_BITS;
4330 dr6 |= DR6_BD | DR6_RTM;
4331 ctxt->ops->set_dr(ctxt, 6, dr6);
4332 return emulate_db(ctxt);
4333 }
4334
4335 return X86EMUL_CONTINUE;
4336}
4337
4338static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4339{
4340 u64 new_val = ctxt->src.val64;
4341 int dr = ctxt->modrm_reg;
4342
4343 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4344 return emulate_gp(ctxt, 0);
4345
4346 return check_dr_read(ctxt);
4347}
4348
4349static int check_svme(struct x86_emulate_ctxt *ctxt)
4350{
4351 u64 efer = 0;
4352
4353 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4354
4355 if (!(efer & EFER_SVME))
4356 return emulate_ud(ctxt);
4357
4358 return X86EMUL_CONTINUE;
4359}
4360
4361static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4362{
4363 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4364
4365
4366 if (rax & 0xffff000000000000ULL)
4367 return emulate_gp(ctxt, 0);
4368
4369 return check_svme(ctxt);
4370}
4371
4372static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4373{
4374 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4375
4376 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4377 return emulate_ud(ctxt);
4378
4379 return X86EMUL_CONTINUE;
4380}
4381
4382static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4383{
4384 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4385 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4386
4387
4388
4389
4390
4391 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4392 return X86EMUL_CONTINUE;
4393
4394 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4395 ctxt->ops->check_pmc(ctxt, rcx))
4396 return emulate_gp(ctxt, 0);
4397
4398 return X86EMUL_CONTINUE;
4399}
4400
4401static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4402{
4403 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4404 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4405 return emulate_gp(ctxt, 0);
4406
4407 return X86EMUL_CONTINUE;
4408}
4409
4410static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4411{
4412 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4413 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4414 return emulate_gp(ctxt, 0);
4415
4416 return X86EMUL_CONTINUE;
4417}
4418
4419#define D(_y) { .flags = (_y) }
4420#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4421#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4422 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4423#define N D(NotImpl)
4424#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4425#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4426#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4427#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4428#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4429#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4430#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4431#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4432#define II(_f, _e, _i) \
4433 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4434#define IIP(_f, _e, _i, _p) \
4435 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4436 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4437#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4438
4439#define D2bv(_f) D((_f) | ByteOp), D(_f)
4440#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4441#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4442#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4443#define I2bvIP(_f, _e, _i, _p) \
4444 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4445
4446#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4447 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4448 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4449
4450static const struct opcode group7_rm0[] = {
4451 N,
4452 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4453 N, N, N, N, N, N,
4454};
4455
4456static const struct opcode group7_rm1[] = {
4457 DI(SrcNone | Priv, monitor),
4458 DI(SrcNone | Priv, mwait),
4459 N, N, N, N, N, N,
4460};
4461
4462static const struct opcode group7_rm2[] = {
4463 N,
4464 II(ImplicitOps | Priv, em_xsetbv, xsetbv),
4465 N, N, N, N, N, N,
4466};
4467
4468static const struct opcode group7_rm3[] = {
4469 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4470 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4471 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4472 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4473 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4474 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4475 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4476 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4477};
4478
4479static const struct opcode group7_rm7[] = {
4480 N,
4481 DIP(SrcNone, rdtscp, check_rdtsc),
4482 N, N, N, N, N, N,
4483};
4484
4485static const struct opcode group1[] = {
4486 F(Lock, em_add),
4487 F(Lock | PageTable, em_or),
4488 F(Lock, em_adc),
4489 F(Lock, em_sbb),
4490 F(Lock | PageTable, em_and),
4491 F(Lock, em_sub),
4492 F(Lock, em_xor),
4493 F(NoWrite, em_cmp),
4494};
4495
4496static const struct opcode group1A[] = {
4497 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4498};
4499
4500static const struct opcode group2[] = {
4501 F(DstMem | ModRM, em_rol),
4502 F(DstMem | ModRM, em_ror),
4503 F(DstMem | ModRM, em_rcl),
4504 F(DstMem | ModRM, em_rcr),
4505 F(DstMem | ModRM, em_shl),
4506 F(DstMem | ModRM, em_shr),
4507 F(DstMem | ModRM, em_shl),
4508 F(DstMem | ModRM, em_sar),
4509};
4510
4511static const struct opcode group3[] = {
4512 F(DstMem | SrcImm | NoWrite, em_test),
4513 F(DstMem | SrcImm | NoWrite, em_test),
4514 F(DstMem | SrcNone | Lock, em_not),
4515 F(DstMem | SrcNone | Lock, em_neg),
4516 F(DstXacc | Src2Mem, em_mul_ex),
4517 F(DstXacc | Src2Mem, em_imul_ex),
4518 F(DstXacc | Src2Mem, em_div_ex),
4519 F(DstXacc | Src2Mem, em_idiv_ex),
4520};
4521
4522static const struct opcode group4[] = {
4523 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4524 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4525 N, N, N, N, N, N,
4526};
4527
4528static const struct opcode group5[] = {
4529 F(DstMem | SrcNone | Lock, em_inc),
4530 F(DstMem | SrcNone | Lock, em_dec),
4531 I(SrcMem | NearBranch, em_call_near_abs),
4532 I(SrcMemFAddr | ImplicitOps, em_call_far),
4533 I(SrcMem | NearBranch, em_jmp_abs),
4534 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4535 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4536};
4537
4538static const struct opcode group6[] = {
4539 II(Prot | DstMem, em_sldt, sldt),
4540 II(Prot | DstMem, em_str, str),
4541 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4542 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4543 N, N, N, N,
4544};
4545
4546static const struct group_dual group7 = { {
4547 II(Mov | DstMem, em_sgdt, sgdt),
4548 II(Mov | DstMem, em_sidt, sidt),
4549 II(SrcMem | Priv, em_lgdt, lgdt),
4550 II(SrcMem | Priv, em_lidt, lidt),
4551 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4552 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4553 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4554}, {
4555 EXT(0, group7_rm0),
4556 EXT(0, group7_rm1),
4557 EXT(0, group7_rm2),
4558 EXT(0, group7_rm3),
4559 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4560 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4561 EXT(0, group7_rm7),
4562} };
4563
4564static const struct opcode group8[] = {
4565 N, N, N, N,
4566 F(DstMem | SrcImmByte | NoWrite, em_bt),
4567 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4568 F(DstMem | SrcImmByte | Lock, em_btr),
4569 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4570};
4571
4572
4573
4574
4575
4576static const struct gprefix pfx_0f_c7_7 = {
4577 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
4578};
4579
4580
4581static const struct group_dual group9 = { {
4582 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4583}, {
4584 N, N, N, N, N, N, N,
4585 GP(0, &pfx_0f_c7_7),
4586} };
4587
4588static const struct opcode group11[] = {
4589 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4590 X7(D(Undefined)),
4591};
4592
4593static const struct gprefix pfx_0f_ae_7 = {
4594 I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
4595};
4596
4597static const struct group_dual group15 = { {
4598 I(ModRM | Aligned16, em_fxsave),
4599 I(ModRM | Aligned16, em_fxrstor),
4600 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4601}, {
4602 N, N, N, N, N, N, N, N,
4603} };
4604
4605static const struct gprefix pfx_0f_6f_0f_7f = {
4606 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4607};
4608
4609static const struct instr_dual instr_dual_0f_2b = {
4610 I(0, em_mov), N
4611};
4612
4613static const struct gprefix pfx_0f_2b = {
4614 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4615};
4616
4617static const struct gprefix pfx_0f_10_0f_11 = {
4618 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4619};
4620
4621static const struct gprefix pfx_0f_28_0f_29 = {
4622 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4623};
4624
4625static const struct gprefix pfx_0f_e7 = {
4626 N, I(Sse, em_mov), N, N,
4627};
4628
4629static const struct escape escape_d9 = { {
4630 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4631}, {
4632
4633 N, N, N, N, N, N, N, N,
4634
4635 N, N, N, N, N, N, N, N,
4636
4637 N, N, N, N, N, N, N, N,
4638
4639 N, N, N, N, N, N, N, N,
4640
4641 N, N, N, N, N, N, N, N,
4642
4643 N, N, N, N, N, N, N, N,
4644
4645 N, N, N, N, N, N, N, N,
4646
4647 N, N, N, N, N, N, N, N,
4648} };
4649
4650static const struct escape escape_db = { {
4651 N, N, N, N, N, N, N, N,
4652}, {
4653
4654 N, N, N, N, N, N, N, N,
4655
4656 N, N, N, N, N, N, N, N,
4657
4658 N, N, N, N, N, N, N, N,
4659
4660 N, N, N, N, N, N, N, N,
4661
4662 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4663
4664 N, N, N, N, N, N, N, N,
4665
4666 N, N, N, N, N, N, N, N,
4667
4668 N, N, N, N, N, N, N, N,
4669} };
4670
4671static const struct escape escape_dd = { {
4672 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4673}, {
4674
4675 N, N, N, N, N, N, N, N,
4676
4677 N, N, N, N, N, N, N, N,
4678
4679 N, N, N, N, N, N, N, N,
4680
4681 N, N, N, N, N, N, N, N,
4682
4683 N, N, N, N, N, N, N, N,
4684
4685 N, N, N, N, N, N, N, N,
4686
4687 N, N, N, N, N, N, N, N,
4688
4689 N, N, N, N, N, N, N, N,
4690} };
4691
4692static const struct instr_dual instr_dual_0f_c3 = {
4693 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4694};
4695
4696static const struct mode_dual mode_dual_63 = {
4697 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4698};
4699
4700static const struct opcode opcode_table[256] = {
4701
4702 F6ALU(Lock, em_add),
4703 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4704 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4705
4706 F6ALU(Lock | PageTable, em_or),
4707 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4708 N,
4709
4710 F6ALU(Lock, em_adc),
4711 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4712 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4713
4714 F6ALU(Lock, em_sbb),
4715 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4716 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4717
4718 F6ALU(Lock | PageTable, em_and), N, N,
4719
4720 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4721
4722 F6ALU(Lock, em_xor), N, N,
4723
4724 F6ALU(NoWrite, em_cmp), N, N,
4725
4726 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4727
4728 X8(I(SrcReg | Stack, em_push)),
4729
4730 X8(I(DstReg | Stack, em_pop)),
4731
4732 I(ImplicitOps | Stack | No64, em_pusha),
4733 I(ImplicitOps | Stack | No64, em_popa),
4734 N, MD(ModRM, &mode_dual_63),
4735 N, N, N, N,
4736
4737 I(SrcImm | Mov | Stack, em_push),
4738 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4739 I(SrcImmByte | Mov | Stack, em_push),
4740 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4741 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in),
4742 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out),
4743
4744 X16(D(SrcImmByte | NearBranch)),
4745
4746 G(ByteOp | DstMem | SrcImm, group1),
4747 G(DstMem | SrcImm, group1),
4748 G(ByteOp | DstMem | SrcImm | No64, group1),
4749 G(DstMem | SrcImmByte, group1),
4750 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4751 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4752
4753 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4754 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4755 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4756 D(ModRM | SrcMem | NoAccess | DstReg),
4757 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4758 G(0, group1A),
4759
4760 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4761
4762 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4763 I(SrcImmFAddr | No64, em_call_far), N,
4764 II(ImplicitOps | Stack, em_pushf, pushf),
4765 II(ImplicitOps | Stack, em_popf, popf),
4766 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4767
4768 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4769 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4770 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4771 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4772
4773 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4774 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4775 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4776 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4777
4778 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4779
4780 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4781
4782 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4783 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4784 I(ImplicitOps | NearBranch, em_ret),
4785 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4786 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4787 G(ByteOp, group11), G(0, group11),
4788
4789 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4790 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4791 I(ImplicitOps, em_ret_far),
4792 D(ImplicitOps), DI(SrcImmByte, intn),
4793 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4794
4795 G(Src2One | ByteOp, group2), G(Src2One, group2),
4796 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4797 I(DstAcc | SrcImmUByte | No64, em_aam),
4798 I(DstAcc | SrcImmUByte | No64, em_aad),
4799 F(DstAcc | ByteOp | No64, em_salc),
4800 I(DstAcc | SrcXLat | ByteOp, em_mov),
4801
4802 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4803
4804 X3(I(SrcImmByte | NearBranch, em_loop)),
4805 I(SrcImmByte | NearBranch, em_jcxz),
4806 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4807 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4808
4809 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4810 I(SrcImmFAddr | No64, em_jmp_far),
4811 D(SrcImmByte | ImplicitOps | NearBranch),
4812 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4813 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4814
4815 N, DI(ImplicitOps, icebp), N, N,
4816 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4817 G(ByteOp, group3), G(0, group3),
4818
4819 D(ImplicitOps), D(ImplicitOps),
4820 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4821 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4822};
4823
4824static const struct opcode twobyte_table[256] = {
4825
4826 G(0, group6), GD(0, &group7), N, N,
4827 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4828 II(ImplicitOps | Priv, em_clts, clts), N,
4829 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4830 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4831
4832 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4833 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4834 N, N, N, N, N, N,
4835 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4836 D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4837 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4838 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4839 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4840 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4841
4842 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4843 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4844 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4845 check_cr_write),
4846 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4847 check_dr_write),
4848 N, N, N, N,
4849 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4850 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4851 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4852 N, N, N, N,
4853
4854 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4855 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4856 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4857 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4858 I(ImplicitOps | EmulateOnUD, em_sysenter),
4859 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4860 N, N,
4861 N, N, N, N, N, N, N, N,
4862
4863 X16(D(DstReg | SrcMem | ModRM)),
4864
4865 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4866
4867 N, N, N, N,
4868 N, N, N, N,
4869 N, N, N, N,
4870 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4871
4872 N, N, N, N,
4873 N, N, N, N,
4874 N, N, N, N,
4875 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4876
4877 X16(D(SrcImm | NearBranch)),
4878
4879 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4880
4881 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4882 II(ImplicitOps, em_cpuid, cpuid),
4883 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4884 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4885 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4886
4887 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4888 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4889 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4890 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4891 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4892 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4893
4894 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4895 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4896 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4897 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4898 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4899 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4900
4901 N, N,
4902 G(BitOp, group8),
4903 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4904 I(DstReg | SrcMem | ModRM, em_bsf_c),
4905 I(DstReg | SrcMem | ModRM, em_bsr_c),
4906 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4907
4908 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4909 N, ID(0, &instr_dual_0f_c3),
4910 N, N, N, GD(0, &group9),
4911
4912 X8(I(DstReg, em_bswap)),
4913
4914 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4915
4916 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4917 N, N, N, N, N, N, N, N,
4918
4919 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4920};
4921
4922static const struct instr_dual instr_dual_0f_38_f0 = {
4923 I(DstReg | SrcMem | Mov, em_movbe), N
4924};
4925
4926static const struct instr_dual instr_dual_0f_38_f1 = {
4927 I(DstMem | SrcReg | Mov, em_movbe), N
4928};
4929
4930static const struct gprefix three_byte_0f_38_f0 = {
4931 ID(0, &instr_dual_0f_38_f0), N, N, N
4932};
4933
4934static const struct gprefix three_byte_0f_38_f1 = {
4935 ID(0, &instr_dual_0f_38_f1), N, N, N
4936};
4937
4938
4939
4940
4941
4942static const struct opcode opcode_map_0f_38[256] = {
4943
4944 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4945
4946 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4947
4948 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4949 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4950
4951 N, N, X4(N), X8(N)
4952};
4953
4954#undef D
4955#undef N
4956#undef G
4957#undef GD
4958#undef I
4959#undef GP
4960#undef EXT
4961#undef MD
4962#undef ID
4963
4964#undef D2bv
4965#undef D2bvIP
4966#undef I2bv
4967#undef I2bvIP
4968#undef I6ALU
4969
4970static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4971{
4972 unsigned size;
4973
4974 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4975 if (size == 8)
4976 size = 4;
4977 return size;
4978}
4979
4980static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4981 unsigned size, bool sign_extension)
4982{
4983 int rc = X86EMUL_CONTINUE;
4984
4985 op->type = OP_IMM;
4986 op->bytes = size;
4987 op->addr.mem.ea = ctxt->_eip;
4988
4989 switch (op->bytes) {
4990 case 1:
4991 op->val = insn_fetch(s8, ctxt);
4992 break;
4993 case 2:
4994 op->val = insn_fetch(s16, ctxt);
4995 break;
4996 case 4:
4997 op->val = insn_fetch(s32, ctxt);
4998 break;
4999 case 8:
5000 op->val = insn_fetch(s64, ctxt);
5001 break;
5002 }
5003 if (!sign_extension) {
5004 switch (op->bytes) {
5005 case 1:
5006 op->val &= 0xff;
5007 break;
5008 case 2:
5009 op->val &= 0xffff;
5010 break;
5011 case 4:
5012 op->val &= 0xffffffff;
5013 break;
5014 }
5015 }
5016done:
5017 return rc;
5018}
5019
5020static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
5021 unsigned d)
5022{
5023 int rc = X86EMUL_CONTINUE;
5024
5025 switch (d) {
5026 case OpReg:
5027 decode_register_operand(ctxt, op);
5028 break;
5029 case OpImmUByte:
5030 rc = decode_imm(ctxt, op, 1, false);
5031 break;
5032 case OpMem:
5033 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5034 mem_common:
5035 *op = ctxt->memop;
5036 ctxt->memopp = op;
5037 if (ctxt->d & BitOp)
5038 fetch_bit_operand(ctxt);
5039 op->orig_val = op->val;
5040 break;
5041 case OpMem64:
5042 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
5043 goto mem_common;
5044 case OpAcc:
5045 op->type = OP_REG;
5046 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5047 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
5048 fetch_register_operand(op);
5049 op->orig_val = op->val;
5050 break;
5051 case OpAccLo:
5052 op->type = OP_REG;
5053 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
5054 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
5055 fetch_register_operand(op);
5056 op->orig_val = op->val;
5057 break;
5058 case OpAccHi:
5059 if (ctxt->d & ByteOp) {
5060 op->type = OP_NONE;
5061 break;
5062 }
5063 op->type = OP_REG;
5064 op->bytes = ctxt->op_bytes;
5065 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5066 fetch_register_operand(op);
5067 op->orig_val = op->val;
5068 break;
5069 case OpDI:
5070 op->type = OP_MEM;
5071 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5072 op->addr.mem.ea =
5073 register_address(ctxt, VCPU_REGS_RDI);
5074 op->addr.mem.seg = VCPU_SREG_ES;
5075 op->val = 0;
5076 op->count = 1;
5077 break;
5078 case OpDX:
5079 op->type = OP_REG;
5080 op->bytes = 2;
5081 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5082 fetch_register_operand(op);
5083 break;
5084 case OpCL:
5085 op->type = OP_IMM;
5086 op->bytes = 1;
5087 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
5088 break;
5089 case OpImmByte:
5090 rc = decode_imm(ctxt, op, 1, true);
5091 break;
5092 case OpOne:
5093 op->type = OP_IMM;
5094 op->bytes = 1;
5095 op->val = 1;
5096 break;
5097 case OpImm:
5098 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
5099 break;
5100 case OpImm64:
5101 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
5102 break;
5103 case OpMem8:
5104 ctxt->memop.bytes = 1;
5105 if (ctxt->memop.type == OP_REG) {
5106 ctxt->memop.addr.reg = decode_register(ctxt,
5107 ctxt->modrm_rm, true);
5108 fetch_register_operand(&ctxt->memop);
5109 }
5110 goto mem_common;
5111 case OpMem16:
5112 ctxt->memop.bytes = 2;
5113 goto mem_common;
5114 case OpMem32:
5115 ctxt->memop.bytes = 4;
5116 goto mem_common;
5117 case OpImmU16:
5118 rc = decode_imm(ctxt, op, 2, false);
5119 break;
5120 case OpImmU:
5121 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
5122 break;
5123 case OpSI:
5124 op->type = OP_MEM;
5125 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5126 op->addr.mem.ea =
5127 register_address(ctxt, VCPU_REGS_RSI);
5128 op->addr.mem.seg = ctxt->seg_override;
5129 op->val = 0;
5130 op->count = 1;
5131 break;
5132 case OpXLat:
5133 op->type = OP_MEM;
5134 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5135 op->addr.mem.ea =
5136 address_mask(ctxt,
5137 reg_read(ctxt, VCPU_REGS_RBX) +
5138 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
5139 op->addr.mem.seg = ctxt->seg_override;
5140 op->val = 0;
5141 break;
5142 case OpImmFAddr:
5143 op->type = OP_IMM;
5144 op->addr.mem.ea = ctxt->_eip;
5145 op->bytes = ctxt->op_bytes + 2;
5146 insn_fetch_arr(op->valptr, op->bytes, ctxt);
5147 break;
5148 case OpMemFAddr:
5149 ctxt->memop.bytes = ctxt->op_bytes + 2;
5150 goto mem_common;
5151 case OpES:
5152 op->type = OP_IMM;
5153 op->val = VCPU_SREG_ES;
5154 break;
5155 case OpCS:
5156 op->type = OP_IMM;
5157 op->val = VCPU_SREG_CS;
5158 break;
5159 case OpSS:
5160 op->type = OP_IMM;
5161 op->val = VCPU_SREG_SS;
5162 break;
5163 case OpDS:
5164 op->type = OP_IMM;
5165 op->val = VCPU_SREG_DS;
5166 break;
5167 case OpFS:
5168 op->type = OP_IMM;
5169 op->val = VCPU_SREG_FS;
5170 break;
5171 case OpGS:
5172 op->type = OP_IMM;
5173 op->val = VCPU_SREG_GS;
5174 break;
5175 case OpImplicit:
5176
5177 default:
5178 op->type = OP_NONE;
5179 break;
5180 }
5181
5182done:
5183 return rc;
5184}
5185
5186int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
5187{
5188 int rc = X86EMUL_CONTINUE;
5189 int mode = ctxt->mode;
5190 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5191 bool op_prefix = false;
5192 bool has_seg_override = false;
5193 struct opcode opcode;
5194 u16 dummy;
5195 struct desc_struct desc;
5196
5197 ctxt->memop.type = OP_NONE;
5198 ctxt->memopp = NULL;
5199 ctxt->_eip = ctxt->eip;
5200 ctxt->fetch.ptr = ctxt->fetch.data;
5201 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5202 ctxt->opcode_len = 1;
5203 ctxt->intercept = x86_intercept_none;
5204 if (insn_len > 0)
5205 memcpy(ctxt->fetch.data, insn, insn_len);
5206 else {
5207 rc = __do_insn_fetch_bytes(ctxt, 1);
5208 if (rc != X86EMUL_CONTINUE)
5209 goto done;
5210 }
5211
5212 switch (mode) {
5213 case X86EMUL_MODE_REAL:
5214 case X86EMUL_MODE_VM86:
5215 def_op_bytes = def_ad_bytes = 2;
5216 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5217 if (desc.d)
5218 def_op_bytes = def_ad_bytes = 4;
5219 break;
5220 case X86EMUL_MODE_PROT16:
5221 def_op_bytes = def_ad_bytes = 2;
5222 break;
5223 case X86EMUL_MODE_PROT32:
5224 def_op_bytes = def_ad_bytes = 4;
5225 break;
5226#ifdef CONFIG_X86_64
5227 case X86EMUL_MODE_PROT64:
5228 def_op_bytes = 4;
5229 def_ad_bytes = 8;
5230 break;
5231#endif
5232 default:
5233 return EMULATION_FAILED;
5234 }
5235
5236 ctxt->op_bytes = def_op_bytes;
5237 ctxt->ad_bytes = def_ad_bytes;
5238
5239
5240 for (;;) {
5241 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5242 case 0x66:
5243 op_prefix = true;
5244
5245 ctxt->op_bytes = def_op_bytes ^ 6;
5246 break;
5247 case 0x67:
5248 if (mode == X86EMUL_MODE_PROT64)
5249
5250 ctxt->ad_bytes = def_ad_bytes ^ 12;
5251 else
5252
5253 ctxt->ad_bytes = def_ad_bytes ^ 6;
5254 break;
5255 case 0x26:
5256 has_seg_override = true;
5257 ctxt->seg_override = VCPU_SREG_ES;
5258 break;
5259 case 0x2e:
5260 has_seg_override = true;
5261 ctxt->seg_override = VCPU_SREG_CS;
5262 break;
5263 case 0x36:
5264 has_seg_override = true;
5265 ctxt->seg_override = VCPU_SREG_SS;
5266 break;
5267 case 0x3e:
5268 has_seg_override = true;
5269 ctxt->seg_override = VCPU_SREG_DS;
5270 break;
5271 case 0x64:
5272 has_seg_override = true;
5273 ctxt->seg_override = VCPU_SREG_FS;
5274 break;
5275 case 0x65:
5276 has_seg_override = true;
5277 ctxt->seg_override = VCPU_SREG_GS;
5278 break;
5279 case 0x40 ... 0x4f:
5280 if (mode != X86EMUL_MODE_PROT64)
5281 goto done_prefixes;
5282 ctxt->rex_prefix = ctxt->b;
5283 continue;
5284 case 0xf0:
5285 ctxt->lock_prefix = 1;
5286 break;
5287 case 0xf2:
5288 case 0xf3:
5289 ctxt->rep_prefix = ctxt->b;
5290 break;
5291 default:
5292 goto done_prefixes;
5293 }
5294
5295
5296
5297 ctxt->rex_prefix = 0;
5298 }
5299
5300done_prefixes:
5301
5302
5303 if (ctxt->rex_prefix & 8)
5304 ctxt->op_bytes = 8;
5305
5306
5307 opcode = opcode_table[ctxt->b];
5308
5309 if (ctxt->b == 0x0f) {
5310 ctxt->opcode_len = 2;
5311 ctxt->b = insn_fetch(u8, ctxt);
5312 opcode = twobyte_table[ctxt->b];
5313
5314
5315 if (ctxt->b == 0x38) {
5316 ctxt->opcode_len = 3;
5317 ctxt->b = insn_fetch(u8, ctxt);
5318 opcode = opcode_map_0f_38[ctxt->b];
5319 }
5320 }
5321 ctxt->d = opcode.flags;
5322
5323 if (ctxt->d & ModRM)
5324 ctxt->modrm = insn_fetch(u8, ctxt);
5325
5326
5327 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5328 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5329 ctxt->d = NotImpl;
5330 }
5331
5332 while (ctxt->d & GroupMask) {
5333 switch (ctxt->d & GroupMask) {
5334 case Group:
5335 goffset = (ctxt->modrm >> 3) & 7;
5336 opcode = opcode.u.group[goffset];
5337 break;
5338 case GroupDual:
5339 goffset = (ctxt->modrm >> 3) & 7;
5340 if ((ctxt->modrm >> 6) == 3)
5341 opcode = opcode.u.gdual->mod3[goffset];
5342 else
5343 opcode = opcode.u.gdual->mod012[goffset];
5344 break;
5345 case RMExt:
5346 goffset = ctxt->modrm & 7;
5347 opcode = opcode.u.group[goffset];
5348 break;
5349 case Prefix:
5350 if (ctxt->rep_prefix && op_prefix)
5351 return EMULATION_FAILED;
5352 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5353 switch (simd_prefix) {
5354 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5355 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5356 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5357 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5358 }
5359 break;
5360 case Escape:
5361 if (ctxt->modrm > 0xbf) {
5362 size_t size = ARRAY_SIZE(opcode.u.esc->high);
5363 u32 index = array_index_nospec(
5364 ctxt->modrm - 0xc0, size);
5365
5366 opcode = opcode.u.esc->high[index];
5367 } else {
5368 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5369 }
5370 break;
5371 case InstrDual:
5372 if ((ctxt->modrm >> 6) == 3)
5373 opcode = opcode.u.idual->mod3;
5374 else
5375 opcode = opcode.u.idual->mod012;
5376 break;
5377 case ModeDual:
5378 if (ctxt->mode == X86EMUL_MODE_PROT64)
5379 opcode = opcode.u.mdual->mode64;
5380 else
5381 opcode = opcode.u.mdual->mode32;
5382 break;
5383 default:
5384 return EMULATION_FAILED;
5385 }
5386
5387 ctxt->d &= ~(u64)GroupMask;
5388 ctxt->d |= opcode.flags;
5389 }
5390
5391
5392 if (ctxt->d == 0)
5393 return EMULATION_FAILED;
5394
5395 ctxt->execute = opcode.u.execute;
5396
5397 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5398 return EMULATION_FAILED;
5399
5400 if (unlikely(ctxt->d &
5401 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5402 No16))) {
5403
5404
5405
5406
5407 ctxt->check_perm = opcode.check_perm;
5408 ctxt->intercept = opcode.intercept;
5409
5410 if (ctxt->d & NotImpl)
5411 return EMULATION_FAILED;
5412
5413 if (mode == X86EMUL_MODE_PROT64) {
5414 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5415 ctxt->op_bytes = 8;
5416 else if (ctxt->d & NearBranch)
5417 ctxt->op_bytes = 8;
5418 }
5419
5420 if (ctxt->d & Op3264) {
5421 if (mode == X86EMUL_MODE_PROT64)
5422 ctxt->op_bytes = 8;
5423 else
5424 ctxt->op_bytes = 4;
5425 }
5426
5427 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5428 ctxt->op_bytes = 4;
5429
5430 if (ctxt->d & Sse)
5431 ctxt->op_bytes = 16;
5432 else if (ctxt->d & Mmx)
5433 ctxt->op_bytes = 8;
5434 }
5435
5436
5437 if (ctxt->d & ModRM) {
5438 rc = decode_modrm(ctxt, &ctxt->memop);
5439 if (!has_seg_override) {
5440 has_seg_override = true;
5441 ctxt->seg_override = ctxt->modrm_seg;
5442 }
5443 } else if (ctxt->d & MemAbs)
5444 rc = decode_abs(ctxt, &ctxt->memop);
5445 if (rc != X86EMUL_CONTINUE)
5446 goto done;
5447
5448 if (!has_seg_override)
5449 ctxt->seg_override = VCPU_SREG_DS;
5450
5451 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5452
5453
5454
5455
5456
5457 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5458 if (rc != X86EMUL_CONTINUE)
5459 goto done;
5460
5461
5462
5463
5464
5465 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5466 if (rc != X86EMUL_CONTINUE)
5467 goto done;
5468
5469
5470 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5471
5472 if (ctxt->rip_relative && likely(ctxt->memopp))
5473 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5474 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5475
5476done:
5477 if (rc == X86EMUL_PROPAGATE_FAULT)
5478 ctxt->have_exception = true;
5479 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5480}
5481
5482bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5483{
5484 return ctxt->d & PageTable;
5485}
5486
5487static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5488{
5489
5490
5491
5492
5493
5494
5495
5496 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5497 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5498 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5499 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5500 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5501 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5502 return true;
5503
5504 return false;
5505}
5506
5507static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5508{
5509 int rc;
5510
5511 emulator_get_fpu();
5512 rc = asm_safe("fwait");
5513 emulator_put_fpu();
5514
5515 if (unlikely(rc != X86EMUL_CONTINUE))
5516 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5517
5518 return X86EMUL_CONTINUE;
5519}
5520
5521static void fetch_possible_mmx_operand(struct operand *op)
5522{
5523 if (op->type == OP_MM)
5524 read_mmx_reg(&op->mm_val, op->addr.mm);
5525}
5526
5527static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
5528{
5529 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5530
5531 if (!(ctxt->d & ByteOp))
5532 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5533
5534 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5535 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5536 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5537 : "c"(ctxt->src2.val));
5538
5539 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5540 if (!fop)
5541 return emulate_de(ctxt);
5542 return X86EMUL_CONTINUE;
5543}
5544
5545void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5546{
5547 memset(&ctxt->rip_relative, 0,
5548 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5549
5550 ctxt->io_read.pos = 0;
5551 ctxt->io_read.end = 0;
5552 ctxt->mem_read.end = 0;
5553}
5554
5555int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5556{
5557 const struct x86_emulate_ops *ops = ctxt->ops;
5558 int rc = X86EMUL_CONTINUE;
5559 int saved_dst_type = ctxt->dst.type;
5560 unsigned emul_flags;
5561
5562 ctxt->mem_read.pos = 0;
5563
5564
5565 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5566 rc = emulate_ud(ctxt);
5567 goto done;
5568 }
5569
5570 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5571 rc = emulate_ud(ctxt);
5572 goto done;
5573 }
5574
5575 emul_flags = ctxt->ops->get_hflags(ctxt);
5576 if (unlikely(ctxt->d &
5577 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5578 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5579 (ctxt->d & Undefined)) {
5580 rc = emulate_ud(ctxt);
5581 goto done;
5582 }
5583
5584 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5585 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5586 rc = emulate_ud(ctxt);
5587 goto done;
5588 }
5589
5590 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5591 rc = emulate_nm(ctxt);
5592 goto done;
5593 }
5594
5595 if (ctxt->d & Mmx) {
5596 rc = flush_pending_x87_faults(ctxt);
5597 if (rc != X86EMUL_CONTINUE)
5598 goto done;
5599
5600
5601
5602
5603 fetch_possible_mmx_operand(&ctxt->src);
5604 fetch_possible_mmx_operand(&ctxt->src2);
5605 if (!(ctxt->d & Mov))
5606 fetch_possible_mmx_operand(&ctxt->dst);
5607 }
5608
5609 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5610 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5611 X86_ICPT_PRE_EXCEPT);
5612 if (rc != X86EMUL_CONTINUE)
5613 goto done;
5614 }
5615
5616
5617 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5618 rc = emulate_ud(ctxt);
5619 goto done;
5620 }
5621
5622
5623 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5624 if (ctxt->d & PrivUD)
5625 rc = emulate_ud(ctxt);
5626 else
5627 rc = emulate_gp(ctxt, 0);
5628 goto done;
5629 }
5630
5631
5632 if (ctxt->d & CheckPerm) {
5633 rc = ctxt->check_perm(ctxt);
5634 if (rc != X86EMUL_CONTINUE)
5635 goto done;
5636 }
5637
5638 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5639 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5640 X86_ICPT_POST_EXCEPT);
5641 if (rc != X86EMUL_CONTINUE)
5642 goto done;
5643 }
5644
5645 if (ctxt->rep_prefix && (ctxt->d & String)) {
5646
5647 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5648 string_registers_quirk(ctxt);
5649 ctxt->eip = ctxt->_eip;
5650 ctxt->eflags &= ~X86_EFLAGS_RF;
5651 goto done;
5652 }
5653 }
5654 }
5655
5656 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5657 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5658 ctxt->src.valptr, ctxt->src.bytes);
5659 if (rc != X86EMUL_CONTINUE)
5660 goto done;
5661 ctxt->src.orig_val64 = ctxt->src.val64;
5662 }
5663
5664 if (ctxt->src2.type == OP_MEM) {
5665 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5666 &ctxt->src2.val, ctxt->src2.bytes);
5667 if (rc != X86EMUL_CONTINUE)
5668 goto done;
5669 }
5670
5671 if ((ctxt->d & DstMask) == ImplicitOps)
5672 goto special_insn;
5673
5674
5675 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5676
5677 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5678 &ctxt->dst.val, ctxt->dst.bytes);
5679 if (rc != X86EMUL_CONTINUE) {
5680 if (!(ctxt->d & NoWrite) &&
5681 rc == X86EMUL_PROPAGATE_FAULT &&
5682 ctxt->exception.vector == PF_VECTOR)
5683 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5684 goto done;
5685 }
5686 }
5687
5688 ctxt->dst.orig_val64 = ctxt->dst.val64;
5689
5690special_insn:
5691
5692 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5693 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5694 X86_ICPT_POST_MEMACCESS);
5695 if (rc != X86EMUL_CONTINUE)
5696 goto done;
5697 }
5698
5699 if (ctxt->rep_prefix && (ctxt->d & String))
5700 ctxt->eflags |= X86_EFLAGS_RF;
5701 else
5702 ctxt->eflags &= ~X86_EFLAGS_RF;
5703
5704 if (ctxt->execute) {
5705 if (ctxt->d & Fastop)
5706 rc = fastop(ctxt, ctxt->fop);
5707 else
5708 rc = ctxt->execute(ctxt);
5709 if (rc != X86EMUL_CONTINUE)
5710 goto done;
5711 goto writeback;
5712 }
5713
5714 if (ctxt->opcode_len == 2)
5715 goto twobyte_insn;
5716 else if (ctxt->opcode_len == 3)
5717 goto threebyte_insn;
5718
5719 switch (ctxt->b) {
5720 case 0x70 ... 0x7f:
5721 if (test_cc(ctxt->b, ctxt->eflags))
5722 rc = jmp_rel(ctxt, ctxt->src.val);
5723 break;
5724 case 0x8d:
5725 ctxt->dst.val = ctxt->src.addr.mem.ea;
5726 break;
5727 case 0x90 ... 0x97:
5728 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5729 ctxt->dst.type = OP_NONE;
5730 else
5731 rc = em_xchg(ctxt);
5732 break;
5733 case 0x98:
5734 switch (ctxt->op_bytes) {
5735 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5736 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5737 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5738 }
5739 break;
5740 case 0xcc:
5741 rc = emulate_int(ctxt, 3);
5742 break;
5743 case 0xcd:
5744 rc = emulate_int(ctxt, ctxt->src.val);
5745 break;
5746 case 0xce:
5747 if (ctxt->eflags & X86_EFLAGS_OF)
5748 rc = emulate_int(ctxt, 4);
5749 break;
5750 case 0xe9:
5751 case 0xeb:
5752 rc = jmp_rel(ctxt, ctxt->src.val);
5753 ctxt->dst.type = OP_NONE;
5754 break;
5755 case 0xf4:
5756 ctxt->ops->halt(ctxt);
5757 break;
5758 case 0xf5:
5759
5760 ctxt->eflags ^= X86_EFLAGS_CF;
5761 break;
5762 case 0xf8:
5763 ctxt->eflags &= ~X86_EFLAGS_CF;
5764 break;
5765 case 0xf9:
5766 ctxt->eflags |= X86_EFLAGS_CF;
5767 break;
5768 case 0xfc:
5769 ctxt->eflags &= ~X86_EFLAGS_DF;
5770 break;
5771 case 0xfd:
5772 ctxt->eflags |= X86_EFLAGS_DF;
5773 break;
5774 default:
5775 goto cannot_emulate;
5776 }
5777
5778 if (rc != X86EMUL_CONTINUE)
5779 goto done;
5780
5781writeback:
5782 if (ctxt->d & SrcWrite) {
5783 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5784 rc = writeback(ctxt, &ctxt->src);
5785 if (rc != X86EMUL_CONTINUE)
5786 goto done;
5787 }
5788 if (!(ctxt->d & NoWrite)) {
5789 rc = writeback(ctxt, &ctxt->dst);
5790 if (rc != X86EMUL_CONTINUE)
5791 goto done;
5792 }
5793
5794
5795
5796
5797
5798 ctxt->dst.type = saved_dst_type;
5799
5800 if ((ctxt->d & SrcMask) == SrcSI)
5801 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5802
5803 if ((ctxt->d & DstMask) == DstDI)
5804 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5805
5806 if (ctxt->rep_prefix && (ctxt->d & String)) {
5807 unsigned int count;
5808 struct read_cache *r = &ctxt->io_read;
5809 if ((ctxt->d & SrcMask) == SrcSI)
5810 count = ctxt->src.count;
5811 else
5812 count = ctxt->dst.count;
5813 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5814
5815 if (!string_insn_completed(ctxt)) {
5816
5817
5818
5819
5820 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5821 (r->end == 0 || r->end != r->pos)) {
5822
5823
5824
5825
5826
5827 ctxt->mem_read.end = 0;
5828 writeback_registers(ctxt);
5829 return EMULATION_RESTART;
5830 }
5831 goto done;
5832 }
5833 ctxt->eflags &= ~X86_EFLAGS_RF;
5834 }
5835
5836 ctxt->eip = ctxt->_eip;
5837 if (ctxt->mode != X86EMUL_MODE_PROT64)
5838 ctxt->eip = (u32)ctxt->_eip;
5839
5840done:
5841 if (rc == X86EMUL_PROPAGATE_FAULT) {
5842 WARN_ON(ctxt->exception.vector > 0x1f);
5843 ctxt->have_exception = true;
5844 }
5845 if (rc == X86EMUL_INTERCEPTED)
5846 return EMULATION_INTERCEPTED;
5847
5848 if (rc == X86EMUL_CONTINUE)
5849 writeback_registers(ctxt);
5850
5851 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5852
5853twobyte_insn:
5854 switch (ctxt->b) {
5855 case 0x09:
5856 (ctxt->ops->wbinvd)(ctxt);
5857 break;
5858 case 0x08:
5859 case 0x0d:
5860 case 0x18:
5861 case 0x1f:
5862 break;
5863 case 0x20:
5864 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5865 break;
5866 case 0x21:
5867 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5868 break;
5869 case 0x40 ... 0x4f:
5870 if (test_cc(ctxt->b, ctxt->eflags))
5871 ctxt->dst.val = ctxt->src.val;
5872 else if (ctxt->op_bytes != 4)
5873 ctxt->dst.type = OP_NONE;
5874 break;
5875 case 0x80 ... 0x8f:
5876 if (test_cc(ctxt->b, ctxt->eflags))
5877 rc = jmp_rel(ctxt, ctxt->src.val);
5878 break;
5879 case 0x90 ... 0x9f:
5880 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5881 break;
5882 case 0xb6 ... 0xb7:
5883 ctxt->dst.bytes = ctxt->op_bytes;
5884 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5885 : (u16) ctxt->src.val;
5886 break;
5887 case 0xbe ... 0xbf:
5888 ctxt->dst.bytes = ctxt->op_bytes;
5889 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5890 (s16) ctxt->src.val;
5891 break;
5892 default:
5893 goto cannot_emulate;
5894 }
5895
5896threebyte_insn:
5897
5898 if (rc != X86EMUL_CONTINUE)
5899 goto done;
5900
5901 goto writeback;
5902
5903cannot_emulate:
5904 return EMULATION_FAILED;
5905}
5906
5907void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5908{
5909 invalidate_registers(ctxt);
5910}
5911
5912void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5913{
5914 writeback_registers(ctxt);
5915}
5916
5917bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5918{
5919 if (ctxt->rep_prefix && (ctxt->d & String))
5920 return false;
5921
5922 if (ctxt->d & TwoMemOp)
5923 return false;
5924
5925 return true;
5926}
5927