1
2
3
4
5
6
7
8
9
10
11#ifndef _ASM_X86_KVM_X86_EMULATE_H
12#define _ASM_X86_KVM_X86_EMULATE_H
13
14#include <asm/desc_defs.h>
15
16struct x86_emulate_ctxt;
17
18struct x86_exception {
19 u8 vector;
20 bool error_code_valid;
21 u16 error_code;
22 bool nested_page_fault;
23 u64 address;
24};
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57#define X86EMUL_CONTINUE 0
58
59#define X86EMUL_UNHANDLEABLE 1
60
61#define X86EMUL_PROPAGATE_FAULT 2
62#define X86EMUL_RETRY_INSTR 3
63#define X86EMUL_CMPXCHG_FAILED 4
64#define X86EMUL_IO_NEEDED 5
65
66struct x86_emulate_ops {
67
68
69
70
71
72
73
74 int (*read_std)(unsigned long addr, void *val,
75 unsigned int bytes, struct kvm_vcpu *vcpu,
76 struct x86_exception *fault);
77
78
79
80
81
82
83
84
85 int (*write_std)(unsigned long addr, void *val,
86 unsigned int bytes, struct kvm_vcpu *vcpu,
87 struct x86_exception *fault);
88
89
90
91
92
93
94
95 int (*fetch)(unsigned long addr, void *val,
96 unsigned int bytes, struct kvm_vcpu *vcpu,
97 struct x86_exception *fault);
98
99
100
101
102
103
104
105 int (*read_emulated)(unsigned long addr,
106 void *val,
107 unsigned int bytes,
108 struct x86_exception *fault,
109 struct kvm_vcpu *vcpu);
110
111
112
113
114
115
116
117
118 int (*write_emulated)(unsigned long addr,
119 const void *val,
120 unsigned int bytes,
121 struct x86_exception *fault,
122 struct kvm_vcpu *vcpu);
123
124
125
126
127
128
129
130
131
132 int (*cmpxchg_emulated)(unsigned long addr,
133 const void *old,
134 const void *new,
135 unsigned int bytes,
136 struct x86_exception *fault,
137 struct kvm_vcpu *vcpu);
138
139 int (*pio_in_emulated)(int size, unsigned short port, void *val,
140 unsigned int count, struct kvm_vcpu *vcpu);
141
142 int (*pio_out_emulated)(int size, unsigned short port, const void *val,
143 unsigned int count, struct kvm_vcpu *vcpu);
144
145 bool (*get_cached_descriptor)(struct desc_struct *desc,
146 int seg, struct kvm_vcpu *vcpu);
147 void (*set_cached_descriptor)(struct desc_struct *desc,
148 int seg, struct kvm_vcpu *vcpu);
149 u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu);
150 void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu);
151 unsigned long (*get_cached_segment_base)(int seg, struct kvm_vcpu *vcpu);
152 void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu);
153 void (*get_idt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu);
154 ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu);
155 int (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu);
156 int (*cpl)(struct kvm_vcpu *vcpu);
157 int (*get_dr)(int dr, unsigned long *dest, struct kvm_vcpu *vcpu);
158 int (*set_dr)(int dr, unsigned long value, struct kvm_vcpu *vcpu);
159 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
160 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
161};
162
163
164struct operand {
165 enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
166 unsigned int bytes;
167 union {
168 unsigned long orig_val;
169 u64 orig_val64;
170 };
171 union {
172 unsigned long *reg;
173 struct segmented_address {
174 ulong ea;
175 unsigned seg;
176 } mem;
177 } addr;
178 union {
179 unsigned long val;
180 u64 val64;
181 char valptr[sizeof(unsigned long) + 2];
182 };
183};
184
185struct fetch_cache {
186 u8 data[15];
187 unsigned long start;
188 unsigned long end;
189};
190
191struct read_cache {
192 u8 data[1024];
193 unsigned long pos;
194 unsigned long end;
195};
196
197struct decode_cache {
198 u8 twobyte;
199 u8 b;
200 u8 lock_prefix;
201 u8 rep_prefix;
202 u8 op_bytes;
203 u8 ad_bytes;
204 u8 rex_prefix;
205 struct operand src;
206 struct operand src2;
207 struct operand dst;
208 bool has_seg_override;
209 u8 seg_override;
210 unsigned int d;
211 int (*execute)(struct x86_emulate_ctxt *ctxt);
212 unsigned long regs[NR_VCPU_REGS];
213 unsigned long eip;
214
215 u8 modrm;
216 u8 modrm_mod;
217 u8 modrm_reg;
218 u8 modrm_rm;
219 u8 modrm_seg;
220 bool rip_relative;
221 struct fetch_cache fetch;
222 struct read_cache io_read;
223 struct read_cache mem_read;
224};
225
226struct x86_emulate_ctxt {
227 struct x86_emulate_ops *ops;
228
229
230 struct kvm_vcpu *vcpu;
231
232 unsigned long eflags;
233 unsigned long eip;
234
235 int mode;
236 u32 cs_base;
237
238
239 int interruptibility;
240
241 bool perm_ok;
242
243 bool have_exception;
244 struct x86_exception exception;
245
246
247 struct decode_cache decode;
248};
249
250
251#define REPE_PREFIX 1
252#define REPNE_PREFIX 2
253
254
255#define X86EMUL_MODE_REAL 0
256#define X86EMUL_MODE_VM86 1
257#define X86EMUL_MODE_PROT16 2
258#define X86EMUL_MODE_PROT32 4
259#define X86EMUL_MODE_PROT64 8
260
261
262#if defined(CONFIG_X86_32)
263#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
264#elif defined(CONFIG_X86_64)
265#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
266#endif
267
268int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
269#define EMULATION_FAILED -1
270#define EMULATION_OK 0
271#define EMULATION_RESTART 1
272int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
273int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
274 u16 tss_selector, int reason,
275 bool has_error_code, u32 error_code);
276int emulate_int_real(struct x86_emulate_ctxt *ctxt,
277 struct x86_emulate_ops *ops, int irq);
278#endif
279