1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef CPU_I386_H
20#define CPU_I386_H
21
22#include "config.h"
23#include "qemu-common.h"
24
25#ifdef TARGET_X86_64
26#define TARGET_LONG_BITS 64
27#else
28#define TARGET_LONG_BITS 32
29#endif
30
31
32#define TARGET_HAS_SMC
33
34
35#define TARGET_HAS_PRECISE_SMC
36
37#define TARGET_HAS_ICE 1
38
39#ifdef TARGET_X86_64
40#define ELF_MACHINE EM_X86_64
41#else
42#define ELF_MACHINE EM_386
43#endif
44
45#define CPUState struct CPUX86State
46
47#include "cpu-defs.h"
48
49#include "softfloat.h"
50
51#define R_EAX 0
52#define R_ECX 1
53#define R_EDX 2
54#define R_EBX 3
55#define R_ESP 4
56#define R_EBP 5
57#define R_ESI 6
58#define R_EDI 7
59
60#define R_AL 0
61#define R_CL 1
62#define R_DL 2
63#define R_BL 3
64#define R_AH 4
65#define R_CH 5
66#define R_DH 6
67#define R_BH 7
68
69#define R_ES 0
70#define R_CS 1
71#define R_SS 2
72#define R_DS 3
73#define R_FS 4
74#define R_GS 5
75
76
77#define DESC_G_MASK (1 << 23)
78#define DESC_B_SHIFT 22
79#define DESC_B_MASK (1 << DESC_B_SHIFT)
80#define DESC_L_SHIFT 21
81#define DESC_L_MASK (1 << DESC_L_SHIFT)
82#define DESC_AVL_MASK (1 << 20)
83#define DESC_P_MASK (1 << 15)
84#define DESC_DPL_SHIFT 13
85#define DESC_DPL_MASK (3 << DESC_DPL_SHIFT)
86#define DESC_S_MASK (1 << 12)
87#define DESC_TYPE_SHIFT 8
88#define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT)
89#define DESC_A_MASK (1 << 8)
90
91#define DESC_CS_MASK (1 << 11)
92#define DESC_C_MASK (1 << 10)
93#define DESC_R_MASK (1 << 9)
94
95#define DESC_E_MASK (1 << 10)
96#define DESC_W_MASK (1 << 9)
97
98#define DESC_TSS_BUSY_MASK (1 << 9)
99
100
101#define CC_C 0x0001
102#define CC_P 0x0004
103#define CC_A 0x0010
104#define CC_Z 0x0040
105#define CC_S 0x0080
106#define CC_O 0x0800
107
108#define TF_SHIFT 8
109#define IOPL_SHIFT 12
110#define VM_SHIFT 17
111
112#define TF_MASK 0x00000100
113#define IF_MASK 0x00000200
114#define DF_MASK 0x00000400
115#define IOPL_MASK 0x00003000
116#define NT_MASK 0x00004000
117#define RF_MASK 0x00010000
118#define VM_MASK 0x00020000
119#define AC_MASK 0x00040000
120#define VIF_MASK 0x00080000
121#define VIP_MASK 0x00100000
122#define ID_MASK 0x00200000
123
124
125
126
127
128
129#define HF_CPL_SHIFT 0
130
131#define HF_SOFTMMU_SHIFT 2
132
133#define HF_INHIBIT_IRQ_SHIFT 3
134
135#define HF_CS32_SHIFT 4
136#define HF_SS32_SHIFT 5
137
138#define HF_ADDSEG_SHIFT 6
139
140#define HF_PE_SHIFT 7
141#define HF_TF_SHIFT 8
142#define HF_MP_SHIFT 9
143#define HF_EM_SHIFT 10
144#define HF_TS_SHIFT 11
145#define HF_IOPL_SHIFT 12
146#define HF_LMA_SHIFT 14
147#define HF_CS64_SHIFT 15
148#define HF_RF_SHIFT 16
149#define HF_VM_SHIFT 17
150#define HF_SMM_SHIFT 19
151#define HF_SVME_SHIFT 20
152#define HF_SVMI_SHIFT 21
153#define HF_OSFXSR_SHIFT 22
154
155#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
156#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
157#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
158#define HF_CS32_MASK (1 << HF_CS32_SHIFT)
159#define HF_SS32_MASK (1 << HF_SS32_SHIFT)
160#define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
161#define HF_PE_MASK (1 << HF_PE_SHIFT)
162#define HF_TF_MASK (1 << HF_TF_SHIFT)
163#define HF_MP_MASK (1 << HF_MP_SHIFT)
164#define HF_EM_MASK (1 << HF_EM_SHIFT)
165#define HF_TS_MASK (1 << HF_TS_SHIFT)
166#define HF_IOPL_MASK (3 << HF_IOPL_SHIFT)
167#define HF_LMA_MASK (1 << HF_LMA_SHIFT)
168#define HF_CS64_MASK (1 << HF_CS64_SHIFT)
169#define HF_RF_MASK (1 << HF_RF_SHIFT)
170#define HF_VM_MASK (1 << HF_VM_SHIFT)
171#define HF_SMM_MASK (1 << HF_SMM_SHIFT)
172#define HF_SVME_MASK (1 << HF_SVME_SHIFT)
173#define HF_SVMI_MASK (1 << HF_SVMI_SHIFT)
174#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
175
176
177
178#define HF2_GIF_SHIFT 0
179#define HF2_HIF_SHIFT 1
180#define HF2_NMI_SHIFT 2
181#define HF2_VINTR_SHIFT 3
182
183#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
184#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
185#define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
186#define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
187
188#define CR0_PE_SHIFT 0
189#define CR0_MP_SHIFT 1
190
191#define CR0_PE_MASK (1 << 0)
192#define CR0_MP_MASK (1 << 1)
193#define CR0_EM_MASK (1 << 2)
194#define CR0_TS_MASK (1 << 3)
195#define CR0_ET_MASK (1 << 4)
196#define CR0_NE_MASK (1 << 5)
197#define CR0_WP_MASK (1 << 16)
198#define CR0_AM_MASK (1 << 18)
199#define CR0_PG_MASK (1 << 31)
200
201#define CR4_VME_MASK (1 << 0)
202#define CR4_PVI_MASK (1 << 1)
203#define CR4_TSD_MASK (1 << 2)
204#define CR4_DE_MASK (1 << 3)
205#define CR4_PSE_MASK (1 << 4)
206#define CR4_PAE_MASK (1 << 5)
207#define CR4_MCE_MASK (1 << 6)
208#define CR4_PGE_MASK (1 << 7)
209#define CR4_PCE_MASK (1 << 8)
210#define CR4_OSFXSR_SHIFT 9
211#define CR4_OSFXSR_MASK (1 << CR4_OSFXSR_SHIFT)
212#define CR4_OSXMMEXCPT_MASK (1 << 10)
213
214#define DR6_BD (1 << 13)
215#define DR6_BS (1 << 14)
216#define DR6_BT (1 << 15)
217#define DR6_FIXED_1 0xffff0ff0
218
219#define DR7_GD (1 << 13)
220#define DR7_TYPE_SHIFT 16
221#define DR7_LEN_SHIFT 18
222#define DR7_FIXED_1 0x00000400
223
224#define PG_PRESENT_BIT 0
225#define PG_RW_BIT 1
226#define PG_USER_BIT 2
227#define PG_PWT_BIT 3
228#define PG_PCD_BIT 4
229#define PG_ACCESSED_BIT 5
230#define PG_DIRTY_BIT 6
231#define PG_PSE_BIT 7
232#define PG_GLOBAL_BIT 8
233#define PG_NX_BIT 63
234
235#define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
236#define PG_RW_MASK (1 << PG_RW_BIT)
237#define PG_USER_MASK (1 << PG_USER_BIT)
238#define PG_PWT_MASK (1 << PG_PWT_BIT)
239#define PG_PCD_MASK (1 << PG_PCD_BIT)
240#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
241#define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
242#define PG_PSE_MASK (1 << PG_PSE_BIT)
243#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
244#define PG_NX_MASK (1LL << PG_NX_BIT)
245
246#define PG_ERROR_W_BIT 1
247
248#define PG_ERROR_P_MASK 0x01
249#define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
250#define PG_ERROR_U_MASK 0x04
251#define PG_ERROR_RSVD_MASK 0x08
252#define PG_ERROR_I_D_MASK 0x10
253
254#define MCG_CTL_P (1ULL<<8)
255#define MCG_SER_P (1ULL<<24)
256
257#define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P)
258#define MCE_BANKS_DEF 10
259
260#define MCG_STATUS_RIPV (1ULL<<0)
261#define MCG_STATUS_EIPV (1ULL<<1)
262#define MCG_STATUS_MCIP (1ULL<<2)
263
264#define MCI_STATUS_VAL (1ULL<<63)
265#define MCI_STATUS_OVER (1ULL<<62)
266#define MCI_STATUS_UC (1ULL<<61)
267#define MCI_STATUS_EN (1ULL<<60)
268#define MCI_STATUS_MISCV (1ULL<<59)
269#define MCI_STATUS_ADDRV (1ULL<<58)
270#define MCI_STATUS_PCC (1ULL<<57)
271#define MCI_STATUS_S (1ULL<<56)
272#define MCI_STATUS_AR (1ULL<<55)
273
274
275#define MCM_ADDR_SEGOFF 0
276#define MCM_ADDR_LINEAR 1
277#define MCM_ADDR_PHYS 2
278#define MCM_ADDR_MEM 3
279#define MCM_ADDR_GENERIC 7
280
281#define MSR_IA32_TSC 0x10
282#define MSR_IA32_APICBASE 0x1b
283#define MSR_IA32_APICBASE_BSP (1<<8)
284#define MSR_IA32_APICBASE_ENABLE (1<<11)
285#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
286
287#define MSR_MTRRcap 0xfe
288#define MSR_MTRRcap_VCNT 8
289#define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
290#define MSR_MTRRcap_WC_SUPPORTED (1 << 10)
291
292#define MSR_IA32_SYSENTER_CS 0x174
293#define MSR_IA32_SYSENTER_ESP 0x175
294#define MSR_IA32_SYSENTER_EIP 0x176
295
296#define MSR_MCG_CAP 0x179
297#define MSR_MCG_STATUS 0x17a
298#define MSR_MCG_CTL 0x17b
299
300#define MSR_IA32_PERF_STATUS 0x198
301
302#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
303#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
304
305#define MSR_MTRRfix64K_00000 0x250
306#define MSR_MTRRfix16K_80000 0x258
307#define MSR_MTRRfix16K_A0000 0x259
308#define MSR_MTRRfix4K_C0000 0x268
309#define MSR_MTRRfix4K_C8000 0x269
310#define MSR_MTRRfix4K_D0000 0x26a
311#define MSR_MTRRfix4K_D8000 0x26b
312#define MSR_MTRRfix4K_E0000 0x26c
313#define MSR_MTRRfix4K_E8000 0x26d
314#define MSR_MTRRfix4K_F0000 0x26e
315#define MSR_MTRRfix4K_F8000 0x26f
316
317#define MSR_PAT 0x277
318
319#define MSR_MTRRdefType 0x2ff
320
321#define MSR_MC0_CTL 0x400
322#define MSR_MC0_STATUS 0x401
323#define MSR_MC0_ADDR 0x402
324#define MSR_MC0_MISC 0x403
325
326#define MSR_EFER 0xc0000080
327
328#define MSR_EFER_SCE (1 << 0)
329#define MSR_EFER_LME (1 << 8)
330#define MSR_EFER_LMA (1 << 10)
331#define MSR_EFER_NXE (1 << 11)
332#define MSR_EFER_SVME (1 << 12)
333#define MSR_EFER_FFXSR (1 << 14)
334
335#define MSR_STAR 0xc0000081
336#define MSR_LSTAR 0xc0000082
337#define MSR_CSTAR 0xc0000083
338#define MSR_FMASK 0xc0000084
339#define MSR_FSBASE 0xc0000100
340#define MSR_GSBASE 0xc0000101
341#define MSR_KERNELGSBASE 0xc0000102
342#define MSR_TSC_AUX 0xc0000103
343
344#define MSR_VM_HSAVE_PA 0xc0010117
345
346
347#define CPUID_FP87 (1 << 0)
348#define CPUID_VME (1 << 1)
349#define CPUID_DE (1 << 2)
350#define CPUID_PSE (1 << 3)
351#define CPUID_TSC (1 << 4)
352#define CPUID_MSR (1 << 5)
353#define CPUID_PAE (1 << 6)
354#define CPUID_MCE (1 << 7)
355#define CPUID_CX8 (1 << 8)
356#define CPUID_APIC (1 << 9)
357#define CPUID_SEP (1 << 11)
358#define CPUID_MTRR (1 << 12)
359#define CPUID_PGE (1 << 13)
360#define CPUID_MCA (1 << 14)
361#define CPUID_CMOV (1 << 15)
362#define CPUID_PAT (1 << 16)
363#define CPUID_PSE36 (1 << 17)
364#define CPUID_PN (1 << 18)
365#define CPUID_CLFLUSH (1 << 19)
366#define CPUID_DTS (1 << 21)
367#define CPUID_ACPI (1 << 22)
368#define CPUID_MMX (1 << 23)
369#define CPUID_FXSR (1 << 24)
370#define CPUID_SSE (1 << 25)
371#define CPUID_SSE2 (1 << 26)
372#define CPUID_SS (1 << 27)
373#define CPUID_HT (1 << 28)
374#define CPUID_TM (1 << 29)
375#define CPUID_IA64 (1 << 30)
376#define CPUID_PBE (1 << 31)
377
378#define CPUID_EXT_SSE3 (1 << 0)
379#define CPUID_EXT_DTES64 (1 << 2)
380#define CPUID_EXT_MONITOR (1 << 3)
381#define CPUID_EXT_DSCPL (1 << 4)
382#define CPUID_EXT_VMX (1 << 5)
383#define CPUID_EXT_SMX (1 << 6)
384#define CPUID_EXT_EST (1 << 7)
385#define CPUID_EXT_TM2 (1 << 8)
386#define CPUID_EXT_SSSE3 (1 << 9)
387#define CPUID_EXT_CID (1 << 10)
388#define CPUID_EXT_CX16 (1 << 13)
389#define CPUID_EXT_XTPR (1 << 14)
390#define CPUID_EXT_PDCM (1 << 15)
391#define CPUID_EXT_DCA (1 << 18)
392#define CPUID_EXT_SSE41 (1 << 19)
393#define CPUID_EXT_SSE42 (1 << 20)
394#define CPUID_EXT_X2APIC (1 << 21)
395#define CPUID_EXT_MOVBE (1 << 22)
396#define CPUID_EXT_POPCNT (1 << 23)
397#define CPUID_EXT_XSAVE (1 << 26)
398#define CPUID_EXT_OSXSAVE (1 << 27)
399#define CPUID_EXT_HYPERVISOR (1 << 31)
400
401#define CPUID_EXT2_SYSCALL (1 << 11)
402#define CPUID_EXT2_MP (1 << 19)
403#define CPUID_EXT2_NX (1 << 20)
404#define CPUID_EXT2_MMXEXT (1 << 22)
405#define CPUID_EXT2_FFXSR (1 << 25)
406#define CPUID_EXT2_PDPE1GB (1 << 26)
407#define CPUID_EXT2_RDTSCP (1 << 27)
408#define CPUID_EXT2_LM (1 << 29)
409#define CPUID_EXT2_3DNOWEXT (1 << 30)
410#define CPUID_EXT2_3DNOW (1 << 31)
411
412#define CPUID_EXT3_LAHF_LM (1 << 0)
413#define CPUID_EXT3_CMP_LEG (1 << 1)
414#define CPUID_EXT3_SVM (1 << 2)
415#define CPUID_EXT3_EXTAPIC (1 << 3)
416#define CPUID_EXT3_CR8LEG (1 << 4)
417#define CPUID_EXT3_ABM (1 << 5)
418#define CPUID_EXT3_SSE4A (1 << 6)
419#define CPUID_EXT3_MISALIGNSSE (1 << 7)
420#define CPUID_EXT3_3DNOWPREFETCH (1 << 8)
421#define CPUID_EXT3_OSVW (1 << 9)
422#define CPUID_EXT3_IBS (1 << 10)
423#define CPUID_EXT3_SKINIT (1 << 12)
424
425#define CPUID_SVM_NPT (1 << 0)
426#define CPUID_SVM_LBRV (1 << 1)
427#define CPUID_SVM_SVMLOCK (1 << 2)
428#define CPUID_SVM_NRIPSAVE (1 << 3)
429#define CPUID_SVM_TSCSCALE (1 << 4)
430#define CPUID_SVM_VMCBCLEAN (1 << 5)
431#define CPUID_SVM_FLUSHASID (1 << 6)
432#define CPUID_SVM_DECODEASSIST (1 << 7)
433#define CPUID_SVM_PAUSEFILTER (1 << 10)
434#define CPUID_SVM_PFTHRESHOLD (1 << 12)
435
436#define CPUID_VENDOR_INTEL_1 0x756e6547
437#define CPUID_VENDOR_INTEL_2 0x49656e69
438#define CPUID_VENDOR_INTEL_3 0x6c65746e
439
440#define CPUID_VENDOR_AMD_1 0x68747541
441#define CPUID_VENDOR_AMD_2 0x69746e65
442#define CPUID_VENDOR_AMD_3 0x444d4163
443
444#define CPUID_VENDOR_VIA_1 0x746e6543
445#define CPUID_VENDOR_VIA_2 0x48727561
446#define CPUID_VENDOR_VIA_3 0x736c7561
447
448#define CPUID_MWAIT_IBE (1 << 1)
449#define CPUID_MWAIT_EMX (1 << 0)
450
451#define EXCP00_DIVZ 0
452#define EXCP01_DB 1
453#define EXCP02_NMI 2
454#define EXCP03_INT3 3
455#define EXCP04_INTO 4
456#define EXCP05_BOUND 5
457#define EXCP06_ILLOP 6
458#define EXCP07_PREX 7
459#define EXCP08_DBLE 8
460#define EXCP09_XERR 9
461#define EXCP0A_TSS 10
462#define EXCP0B_NOSEG 11
463#define EXCP0C_STACK 12
464#define EXCP0D_GPF 13
465#define EXCP0E_PAGE 14
466#define EXCP10_COPR 16
467#define EXCP11_ALGN 17
468#define EXCP12_MCHK 18
469
470#define EXCP_SYSCALL 0x100
471
472
473
474#define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2
475#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
476#define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4
477#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0
478#define CPU_INTERRUPT_INIT CPU_INTERRUPT_TGT_INT_1
479#define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_2
480
481
482enum {
483 CC_OP_DYNAMIC,
484 CC_OP_EFLAGS,
485
486 CC_OP_MULB,
487 CC_OP_MULW,
488 CC_OP_MULL,
489 CC_OP_MULQ,
490
491 CC_OP_ADDB,
492 CC_OP_ADDW,
493 CC_OP_ADDL,
494 CC_OP_ADDQ,
495
496 CC_OP_ADCB,
497 CC_OP_ADCW,
498 CC_OP_ADCL,
499 CC_OP_ADCQ,
500
501 CC_OP_SUBB,
502 CC_OP_SUBW,
503 CC_OP_SUBL,
504 CC_OP_SUBQ,
505
506 CC_OP_SBBB,
507 CC_OP_SBBW,
508 CC_OP_SBBL,
509 CC_OP_SBBQ,
510
511 CC_OP_LOGICB,
512 CC_OP_LOGICW,
513 CC_OP_LOGICL,
514 CC_OP_LOGICQ,
515
516 CC_OP_INCB,
517 CC_OP_INCW,
518 CC_OP_INCL,
519 CC_OP_INCQ,
520
521 CC_OP_DECB,
522 CC_OP_DECW,
523 CC_OP_DECL,
524 CC_OP_DECQ,
525
526 CC_OP_SHLB,
527 CC_OP_SHLW,
528 CC_OP_SHLL,
529 CC_OP_SHLQ,
530
531 CC_OP_SARB,
532 CC_OP_SARW,
533 CC_OP_SARL,
534 CC_OP_SARQ,
535
536 CC_OP_NB,
537};
538
539typedef struct SegmentCache {
540 uint32_t selector;
541 target_ulong base;
542 uint32_t limit;
543 uint32_t flags;
544} SegmentCache;
545
546typedef union {
547 uint8_t _b[16];
548 uint16_t _w[8];
549 uint32_t _l[4];
550 uint64_t _q[2];
551 float32 _s[4];
552 float64 _d[2];
553} XMMReg;
554
555typedef union {
556 uint8_t _b[8];
557 uint16_t _w[4];
558 uint32_t _l[2];
559 float32 _s[2];
560 uint64_t q;
561} MMXReg;
562
563#ifdef HOST_WORDS_BIGENDIAN
564#define XMM_B(n) _b[15 - (n)]
565#define XMM_W(n) _w[7 - (n)]
566#define XMM_L(n) _l[3 - (n)]
567#define XMM_S(n) _s[3 - (n)]
568#define XMM_Q(n) _q[1 - (n)]
569#define XMM_D(n) _d[1 - (n)]
570
571#define MMX_B(n) _b[7 - (n)]
572#define MMX_W(n) _w[3 - (n)]
573#define MMX_L(n) _l[1 - (n)]
574#define MMX_S(n) _s[1 - (n)]
575#else
576#define XMM_B(n) _b[n]
577#define XMM_W(n) _w[n]
578#define XMM_L(n) _l[n]
579#define XMM_S(n) _s[n]
580#define XMM_Q(n) _q[n]
581#define XMM_D(n) _d[n]
582
583#define MMX_B(n) _b[n]
584#define MMX_W(n) _w[n]
585#define MMX_L(n) _l[n]
586#define MMX_S(n) _s[n]
587#endif
588#define MMX_Q(n) q
589
590typedef union {
591 floatx80 d __attribute__((aligned(16)));
592 MMXReg mmx;
593} FPReg;
594
595typedef struct {
596 uint64_t base;
597 uint64_t mask;
598} MTRRVar;
599
600#define CPU_NB_REGS64 16
601#define CPU_NB_REGS32 8
602
603#ifdef TARGET_X86_64
604#define CPU_NB_REGS CPU_NB_REGS64
605#else
606#define CPU_NB_REGS CPU_NB_REGS32
607#endif
608
609#define NB_MMU_MODES 2
610
611typedef struct CPUX86State {
612
613 target_ulong regs[CPU_NB_REGS];
614 target_ulong eip;
615 target_ulong eflags;
616
617
618
619
620 target_ulong cc_src;
621 target_ulong cc_dst;
622 uint32_t cc_op;
623 int32_t df;
624 uint32_t hflags;
625
626 uint32_t hflags2;
627
628
629 SegmentCache segs[6];
630 SegmentCache ldt;
631 SegmentCache tr;
632 SegmentCache gdt;
633 SegmentCache idt;
634
635 target_ulong cr[5];
636 int32_t a20_mask;
637
638
639 unsigned int fpstt;
640 uint16_t fpus;
641 uint16_t fpuc;
642 uint8_t fptags[8];
643 FPReg fpregs[8];
644
645 uint16_t fpop;
646 uint64_t fpip;
647 uint64_t fpdp;
648
649
650 float_status fp_status;
651 floatx80 ft0;
652
653 float_status mmx_status;
654 float_status sse_status;
655 uint32_t mxcsr;
656 XMMReg xmm_regs[CPU_NB_REGS];
657 XMMReg xmm_t0;
658 MMXReg mmx_t0;
659 target_ulong cc_tmp;
660
661
662 uint32_t sysenter_cs;
663 target_ulong sysenter_esp;
664 target_ulong sysenter_eip;
665 uint64_t efer;
666 uint64_t star;
667
668 uint64_t vm_hsave;
669 uint64_t vm_vmcb;
670 uint64_t tsc_offset;
671 uint64_t intercept;
672 uint16_t intercept_cr_read;
673 uint16_t intercept_cr_write;
674 uint16_t intercept_dr_read;
675 uint16_t intercept_dr_write;
676 uint32_t intercept_exceptions;
677 uint8_t v_tpr;
678
679#ifdef TARGET_X86_64
680 target_ulong lstar;
681 target_ulong cstar;
682 target_ulong fmask;
683 target_ulong kernelgsbase;
684#endif
685 uint64_t system_time_msr;
686 uint64_t wall_clock_msr;
687 uint64_t async_pf_en_msr;
688
689 uint64_t tsc;
690
691 uint64_t mcg_status;
692
693
694 int error_code;
695 int exception_is_int;
696 target_ulong exception_next_eip;
697 target_ulong dr[8];
698 union {
699 CPUBreakpoint *cpu_breakpoint[4];
700 CPUWatchpoint *cpu_watchpoint[4];
701 };
702 uint32_t smbase;
703 int old_exception;
704
705
706 uint8_t nmi_injected;
707 uint8_t nmi_pending;
708
709 CPU_COMMON
710
711 uint64_t pat;
712
713
714 uint32_t cpuid_level;
715 uint32_t cpuid_vendor1;
716 uint32_t cpuid_vendor2;
717 uint32_t cpuid_vendor3;
718 uint32_t cpuid_version;
719 uint32_t cpuid_features;
720 uint32_t cpuid_ext_features;
721 uint32_t cpuid_xlevel;
722 uint32_t cpuid_model[12];
723 uint32_t cpuid_ext2_features;
724 uint32_t cpuid_ext3_features;
725 uint32_t cpuid_apic_id;
726 int cpuid_vendor_override;
727
728 uint32_t cpuid_xlevel2;
729 uint32_t cpuid_ext4_features;
730
731
732 uint64_t mtrr_fixed[11];
733 uint64_t mtrr_deftype;
734 MTRRVar mtrr_var[8];
735
736
737 uint32_t mp_state;
738 int32_t exception_injected;
739 int32_t interrupt_injected;
740 uint8_t soft_interrupt;
741 uint8_t has_error_code;
742 uint32_t sipi_vector;
743 uint32_t cpuid_kvm_features;
744 uint32_t cpuid_svm_features;
745 bool tsc_valid;
746
747
748
749 struct DeviceState *apic_state;
750
751 uint64_t mcg_cap;
752 uint64_t mcg_ctl;
753 uint64_t mce_banks[MCE_BANKS_DEF*4];
754
755 uint64_t tsc_aux;
756
757
758 uint16_t fpus_vmstate;
759 uint16_t fptag_vmstate;
760 uint16_t fpregs_format_vmstate;
761
762 uint64_t xstate_bv;
763 XMMReg ymmh_regs[CPU_NB_REGS];
764
765 uint64_t xcr0;
766} CPUX86State;
767
768CPUX86State *cpu_x86_init(const char *cpu_model);
769int cpu_x86_exec(CPUX86State *s);
770void cpu_x86_close(CPUX86State *s);
771void x86_cpu_list (FILE *f, fprintf_function cpu_fprintf, const char *optarg);
772void x86_cpudef_setup(void);
773int cpu_x86_support_mca_broadcast(CPUState *env);
774
775int cpu_get_pic_interrupt(CPUX86State *s);
776
777void cpu_set_ferr(CPUX86State *s);
778
779
780
781static inline void cpu_x86_load_seg_cache(CPUX86State *env,
782 int seg_reg, unsigned int selector,
783 target_ulong base,
784 unsigned int limit,
785 unsigned int flags)
786{
787 SegmentCache *sc;
788 unsigned int new_hflags;
789
790 sc = &env->segs[seg_reg];
791 sc->selector = selector;
792 sc->base = base;
793 sc->limit = limit;
794 sc->flags = flags;
795
796
797 {
798 if (seg_reg == R_CS) {
799#ifdef TARGET_X86_64
800 if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
801
802 env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
803 env->hflags &= ~(HF_ADDSEG_MASK);
804 } else
805#endif
806 {
807
808 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
809 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
810 env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
811 new_hflags;
812 }
813 }
814 new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
815 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
816 if (env->hflags & HF_CS64_MASK) {
817
818 } else if (!(env->cr[0] & CR0_PE_MASK) ||
819 (env->eflags & VM_MASK) ||
820 !(env->hflags & HF_CS32_MASK)) {
821
822
823
824
825
826 new_hflags |= HF_ADDSEG_MASK;
827 } else {
828 new_hflags |= ((env->segs[R_DS].base |
829 env->segs[R_ES].base |
830 env->segs[R_SS].base) != 0) <<
831 HF_ADDSEG_SHIFT;
832 }
833 env->hflags = (env->hflags &
834 ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
835 }
836}
837
838static inline void cpu_x86_load_seg_cache_sipi(CPUX86State *env,
839 int sipi_vector)
840{
841 env->eip = 0;
842 cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8,
843 sipi_vector << 12,
844 env->segs[R_CS].limit,
845 env->segs[R_CS].flags);
846 env->halted = 0;
847}
848
849int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
850 target_ulong *base, unsigned int *limit,
851 unsigned int *flags);
852
853
854static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
855{
856#if HF_CPL_MASK == 3
857 s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl;
858#else
859#error HF_CPL_MASK is hardcoded
860#endif
861}
862
863
864
865void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f);
866floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper);
867
868
869
870
871void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
872void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32);
873void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
874
875
876
877
878int cpu_x86_signal_handler(int host_signum, void *pinfo,
879 void *puc);
880
881
882void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
883 uint32_t *eax, uint32_t *ebx,
884 uint32_t *ecx, uint32_t *edx);
885int cpu_x86_register (CPUX86State *env, const char *cpu_model);
886void cpu_clear_apic_feature(CPUX86State *env);
887void host_cpuid(uint32_t function, uint32_t count,
888 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
889
890
891int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
892 int is_write, int mmu_idx, int is_softmmu);
893#define cpu_handle_mmu_fault cpu_x86_handle_mmu_fault
894void cpu_x86_set_a20(CPUX86State *env, int a20_state);
895
896static inline int hw_breakpoint_enabled(unsigned long dr7, int index)
897{
898 return (dr7 >> (index * 2)) & 3;
899}
900
901static inline int hw_breakpoint_type(unsigned long dr7, int index)
902{
903 return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3;
904}
905
906static inline int hw_breakpoint_len(unsigned long dr7, int index)
907{
908 int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3);
909 return (len == 2) ? 8 : len + 1;
910}
911
912void hw_breakpoint_insert(CPUX86State *env, int index);
913void hw_breakpoint_remove(CPUX86State *env, int index);
914int check_hw_breakpoints(CPUX86State *env, int force_dr6_update);
915
916
917void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
918void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
919void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
920
921
922void cpu_smm_update(CPUX86State *env);
923uint64_t cpu_get_tsc(CPUX86State *env);
924
925
926#define X86_DUMP_FPU 0x0001
927#define X86_DUMP_CCOP 0x0002
928
929#define TARGET_PAGE_BITS 12
930
931#ifdef TARGET_X86_64
932#define TARGET_PHYS_ADDR_SPACE_BITS 52
933
934
935
936#define TARGET_VIRT_ADDR_SPACE_BITS 47
937#else
938#define TARGET_PHYS_ADDR_SPACE_BITS 36
939#define TARGET_VIRT_ADDR_SPACE_BITS 32
940#endif
941
942#define cpu_init cpu_x86_init
943#define cpu_exec cpu_x86_exec
944#define cpu_gen_code cpu_x86_gen_code
945#define cpu_signal_handler cpu_x86_signal_handler
946#define cpu_list_id x86_cpu_list
947#define cpudef_setup x86_cpudef_setup
948
949#define CPU_SAVE_VERSION 12
950
951
952#define MMU_MODE0_SUFFIX _kernel
953#define MMU_MODE1_SUFFIX _user
954#define MMU_USER_IDX 1
955static inline int cpu_mmu_index (CPUState *env)
956{
957 return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0;
958}
959
960#undef EAX
961#define EAX (env->regs[R_EAX])
962#undef ECX
963#define ECX (env->regs[R_ECX])
964#undef EDX
965#define EDX (env->regs[R_EDX])
966#undef EBX
967#define EBX (env->regs[R_EBX])
968#undef ESP
969#define ESP (env->regs[R_ESP])
970#undef EBP
971#define EBP (env->regs[R_EBP])
972#undef ESI
973#define ESI (env->regs[R_ESI])
974#undef EDI
975#define EDI (env->regs[R_EDI])
976#undef EIP
977#define EIP (env->eip)
978#define DF (env->df)
979
980#define CC_SRC (env->cc_src)
981#define CC_DST (env->cc_dst)
982#define CC_OP (env->cc_op)
983
984
985#define FT0 (env->ft0)
986#define ST0 (env->fpregs[env->fpstt].d)
987#define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d)
988#define ST1 ST(1)
989
990
991void optimize_flags_init(void);
992
993typedef struct CCTable {
994 int (*compute_all)(void);
995 int (*compute_c)(void);
996} CCTable;
997
998#if defined(CONFIG_USER_ONLY)
999static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
1000{
1001 if (newsp)
1002 env->regs[R_ESP] = newsp;
1003 env->regs[R_EAX] = 0;
1004}
1005#endif
1006
1007#include "cpu-all.h"
1008#include "svm.h"
1009
1010#if !defined(CONFIG_USER_ONLY)
1011#include "hw/apic.h"
1012#endif
1013
1014static inline bool cpu_has_work(CPUState *env)
1015{
1016 return ((env->interrupt_request & CPU_INTERRUPT_HARD) &&
1017 (env->eflags & IF_MASK)) ||
1018 (env->interrupt_request & (CPU_INTERRUPT_NMI |
1019 CPU_INTERRUPT_INIT |
1020 CPU_INTERRUPT_SIPI |
1021 CPU_INTERRUPT_MCE));
1022}
1023
1024#include "exec-all.h"
1025
1026static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
1027{
1028 env->eip = tb->pc - tb->cs_base;
1029}
1030
1031static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
1032 target_ulong *cs_base, int *flags)
1033{
1034 *cs_base = env->segs[R_CS].base;
1035 *pc = *cs_base + env->eip;
1036 *flags = env->hflags |
1037 (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK));
1038}
1039
1040void do_cpu_init(CPUState *env);
1041void do_cpu_sipi(CPUState *env);
1042
1043#define MCE_INJECT_BROADCAST 1
1044#define MCE_INJECT_UNCOND_AO 2
1045
1046void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
1047 uint64_t status, uint64_t mcg_status, uint64_t addr,
1048 uint64_t misc, int flags);
1049
1050
1051void do_interrupt(CPUState *env);
1052void do_interrupt_x86_hardirq(CPUState *env, int intno, int is_hw);
1053
1054void do_smm_enter(CPUState *env1);
1055
1056void svm_check_intercept(CPUState *env1, uint32_t type);
1057
1058uint32_t cpu_cc_compute_all(CPUState *env1, int op);
1059
1060#endif
1061