1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef CPU_I386_H
20#define CPU_I386_H
21
22#include "qemu-common.h"
23#include "standard-headers/asm-x86/hyperv.h"
24
25#ifdef TARGET_X86_64
26#define TARGET_LONG_BITS 64
27#else
28#define TARGET_LONG_BITS 32
29#endif
30
31
32#define TARGET_MAX_INSN_SIZE 16
33
34
35
36#define TARGET_HAS_PRECISE_SMC
37
38#ifdef TARGET_X86_64
39#define I386_ELF_MACHINE EM_X86_64
40#define ELF_MACHINE_UNAME "x86_64"
41#else
42#define I386_ELF_MACHINE EM_386
43#define ELF_MACHINE_UNAME "i686"
44#endif
45
46#define CPUArchState struct CPUX86State
47
48#include "exec/cpu-defs.h"
49
50#include "fpu/softfloat.h"
51
52#define R_EAX 0
53#define R_ECX 1
54#define R_EDX 2
55#define R_EBX 3
56#define R_ESP 4
57#define R_EBP 5
58#define R_ESI 6
59#define R_EDI 7
60
61#define R_AL 0
62#define R_CL 1
63#define R_DL 2
64#define R_BL 3
65#define R_AH 4
66#define R_CH 5
67#define R_DH 6
68#define R_BH 7
69
70#define R_ES 0
71#define R_CS 1
72#define R_SS 2
73#define R_DS 3
74#define R_FS 4
75#define R_GS 5
76
77
78#define DESC_G_MASK (1 << 23)
79#define DESC_B_SHIFT 22
80#define DESC_B_MASK (1 << DESC_B_SHIFT)
81#define DESC_L_SHIFT 21
82#define DESC_L_MASK (1 << DESC_L_SHIFT)
83#define DESC_AVL_MASK (1 << 20)
84#define DESC_P_MASK (1 << 15)
85#define DESC_DPL_SHIFT 13
86#define DESC_DPL_MASK (3 << DESC_DPL_SHIFT)
87#define DESC_S_MASK (1 << 12)
88#define DESC_TYPE_SHIFT 8
89#define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT)
90#define DESC_A_MASK (1 << 8)
91
92#define DESC_CS_MASK (1 << 11)
93#define DESC_C_MASK (1 << 10)
94#define DESC_R_MASK (1 << 9)
95
96#define DESC_E_MASK (1 << 10)
97#define DESC_W_MASK (1 << 9)
98
99#define DESC_TSS_BUSY_MASK (1 << 9)
100
101
102#define CC_C 0x0001
103#define CC_P 0x0004
104#define CC_A 0x0010
105#define CC_Z 0x0040
106#define CC_S 0x0080
107#define CC_O 0x0800
108
109#define TF_SHIFT 8
110#define IOPL_SHIFT 12
111#define VM_SHIFT 17
112
113#define TF_MASK 0x00000100
114#define IF_MASK 0x00000200
115#define DF_MASK 0x00000400
116#define IOPL_MASK 0x00003000
117#define NT_MASK 0x00004000
118#define RF_MASK 0x00010000
119#define VM_MASK 0x00020000
120#define AC_MASK 0x00040000
121#define VIF_MASK 0x00080000
122#define VIP_MASK 0x00100000
123#define ID_MASK 0x00200000
124
125
126
127
128
129
130#define HF_CPL_SHIFT 0
131
132#define HF_SOFTMMU_SHIFT 2
133
134#define HF_INHIBIT_IRQ_SHIFT 3
135
136#define HF_CS32_SHIFT 4
137#define HF_SS32_SHIFT 5
138
139#define HF_ADDSEG_SHIFT 6
140
141#define HF_PE_SHIFT 7
142#define HF_TF_SHIFT 8
143#define HF_MP_SHIFT 9
144#define HF_EM_SHIFT 10
145#define HF_TS_SHIFT 11
146#define HF_IOPL_SHIFT 12
147#define HF_LMA_SHIFT 14
148#define HF_CS64_SHIFT 15
149#define HF_RF_SHIFT 16
150#define HF_VM_SHIFT 17
151#define HF_AC_SHIFT 18
152#define HF_SMM_SHIFT 19
153#define HF_SVME_SHIFT 20
154#define HF_SVMI_SHIFT 21
155#define HF_OSFXSR_SHIFT 22
156#define HF_SMAP_SHIFT 23
157#define HF_IOBPT_SHIFT 24
158#define HF_MPX_EN_SHIFT 25
159#define HF_MPX_IU_SHIFT 26
160
161#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
162#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
163#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
164#define HF_CS32_MASK (1 << HF_CS32_SHIFT)
165#define HF_SS32_MASK (1 << HF_SS32_SHIFT)
166#define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
167#define HF_PE_MASK (1 << HF_PE_SHIFT)
168#define HF_TF_MASK (1 << HF_TF_SHIFT)
169#define HF_MP_MASK (1 << HF_MP_SHIFT)
170#define HF_EM_MASK (1 << HF_EM_SHIFT)
171#define HF_TS_MASK (1 << HF_TS_SHIFT)
172#define HF_IOPL_MASK (3 << HF_IOPL_SHIFT)
173#define HF_LMA_MASK (1 << HF_LMA_SHIFT)
174#define HF_CS64_MASK (1 << HF_CS64_SHIFT)
175#define HF_RF_MASK (1 << HF_RF_SHIFT)
176#define HF_VM_MASK (1 << HF_VM_SHIFT)
177#define HF_AC_MASK (1 << HF_AC_SHIFT)
178#define HF_SMM_MASK (1 << HF_SMM_SHIFT)
179#define HF_SVME_MASK (1 << HF_SVME_SHIFT)
180#define HF_SVMI_MASK (1 << HF_SVMI_SHIFT)
181#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
182#define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
183#define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT)
184#define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT)
185#define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT)
186
187
188
189#define HF2_GIF_SHIFT 0
190#define HF2_HIF_SHIFT 1
191#define HF2_NMI_SHIFT 2
192#define HF2_VINTR_SHIFT 3
193#define HF2_SMM_INSIDE_NMI_SHIFT 4
194#define HF2_MPX_PR_SHIFT 5
195
196#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
197#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
198#define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
199#define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
200#define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT)
201#define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT)
202
203#define CR0_PE_SHIFT 0
204#define CR0_MP_SHIFT 1
205
206#define CR0_PE_MASK (1U << 0)
207#define CR0_MP_MASK (1U << 1)
208#define CR0_EM_MASK (1U << 2)
209#define CR0_TS_MASK (1U << 3)
210#define CR0_ET_MASK (1U << 4)
211#define CR0_NE_MASK (1U << 5)
212#define CR0_WP_MASK (1U << 16)
213#define CR0_AM_MASK (1U << 18)
214#define CR0_PG_MASK (1U << 31)
215
216#define CR4_VME_MASK (1U << 0)
217#define CR4_PVI_MASK (1U << 1)
218#define CR4_TSD_MASK (1U << 2)
219#define CR4_DE_MASK (1U << 3)
220#define CR4_PSE_MASK (1U << 4)
221#define CR4_PAE_MASK (1U << 5)
222#define CR4_MCE_MASK (1U << 6)
223#define CR4_PGE_MASK (1U << 7)
224#define CR4_PCE_MASK (1U << 8)
225#define CR4_OSFXSR_SHIFT 9
226#define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT)
227#define CR4_OSXMMEXCPT_MASK (1U << 10)
228#define CR4_VMXE_MASK (1U << 13)
229#define CR4_SMXE_MASK (1U << 14)
230#define CR4_FSGSBASE_MASK (1U << 16)
231#define CR4_PCIDE_MASK (1U << 17)
232#define CR4_OSXSAVE_MASK (1U << 18)
233#define CR4_SMEP_MASK (1U << 20)
234#define CR4_SMAP_MASK (1U << 21)
235#define CR4_PKE_MASK (1U << 22)
236
237#define DR6_BD (1 << 13)
238#define DR6_BS (1 << 14)
239#define DR6_BT (1 << 15)
240#define DR6_FIXED_1 0xffff0ff0
241
242#define DR7_GD (1 << 13)
243#define DR7_TYPE_SHIFT 16
244#define DR7_LEN_SHIFT 18
245#define DR7_FIXED_1 0x00000400
246#define DR7_GLOBAL_BP_MASK 0xaa
247#define DR7_LOCAL_BP_MASK 0x55
248#define DR7_MAX_BP 4
249#define DR7_TYPE_BP_INST 0x0
250#define DR7_TYPE_DATA_WR 0x1
251#define DR7_TYPE_IO_RW 0x2
252#define DR7_TYPE_DATA_RW 0x3
253
254#define PG_PRESENT_BIT 0
255#define PG_RW_BIT 1
256#define PG_USER_BIT 2
257#define PG_PWT_BIT 3
258#define PG_PCD_BIT 4
259#define PG_ACCESSED_BIT 5
260#define PG_DIRTY_BIT 6
261#define PG_PSE_BIT 7
262#define PG_GLOBAL_BIT 8
263#define PG_PSE_PAT_BIT 12
264#define PG_PKRU_BIT 59
265#define PG_NX_BIT 63
266
267#define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
268#define PG_RW_MASK (1 << PG_RW_BIT)
269#define PG_USER_MASK (1 << PG_USER_BIT)
270#define PG_PWT_MASK (1 << PG_PWT_BIT)
271#define PG_PCD_MASK (1 << PG_PCD_BIT)
272#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
273#define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
274#define PG_PSE_MASK (1 << PG_PSE_BIT)
275#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
276#define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT)
277#define PG_ADDRESS_MASK 0x000ffffffffff000LL
278#define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK)
279#define PG_HI_USER_MASK 0x7ff0000000000000LL
280#define PG_PKRU_MASK (15ULL << PG_PKRU_BIT)
281#define PG_NX_MASK (1ULL << PG_NX_BIT)
282
283#define PG_ERROR_W_BIT 1
284
285#define PG_ERROR_P_MASK 0x01
286#define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
287#define PG_ERROR_U_MASK 0x04
288#define PG_ERROR_RSVD_MASK 0x08
289#define PG_ERROR_I_D_MASK 0x10
290#define PG_ERROR_PK_MASK 0x20
291
292#define MCG_CTL_P (1ULL<<8)
293#define MCG_SER_P (1ULL<<24)
294
295#define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P)
296#define MCE_BANKS_DEF 10
297
298#define MCG_CAP_BANKS_MASK 0xff
299
300#define MCG_STATUS_RIPV (1ULL<<0)
301#define MCG_STATUS_EIPV (1ULL<<1)
302#define MCG_STATUS_MCIP (1ULL<<2)
303
304#define MCI_STATUS_VAL (1ULL<<63)
305#define MCI_STATUS_OVER (1ULL<<62)
306#define MCI_STATUS_UC (1ULL<<61)
307#define MCI_STATUS_EN (1ULL<<60)
308#define MCI_STATUS_MISCV (1ULL<<59)
309#define MCI_STATUS_ADDRV (1ULL<<58)
310#define MCI_STATUS_PCC (1ULL<<57)
311#define MCI_STATUS_S (1ULL<<56)
312#define MCI_STATUS_AR (1ULL<<55)
313
314
315#define MCM_ADDR_SEGOFF 0
316#define MCM_ADDR_LINEAR 1
317#define MCM_ADDR_PHYS 2
318#define MCM_ADDR_MEM 3
319#define MCM_ADDR_GENERIC 7
320
321#define MSR_IA32_TSC 0x10
322#define MSR_IA32_APICBASE 0x1b
323#define MSR_IA32_APICBASE_BSP (1<<8)
324#define MSR_IA32_APICBASE_ENABLE (1<<11)
325#define MSR_IA32_APICBASE_BASE (0xfffffU<<12)
326#define MSR_IA32_FEATURE_CONTROL 0x0000003a
327#define MSR_TSC_ADJUST 0x0000003b
328#define MSR_IA32_TSCDEADLINE 0x6e0
329
330#define MSR_P6_PERFCTR0 0xc1
331
332#define MSR_IA32_SMBASE 0x9e
333#define MSR_MTRRcap 0xfe
334#define MSR_MTRRcap_VCNT 8
335#define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
336#define MSR_MTRRcap_WC_SUPPORTED (1 << 10)
337
338#define MSR_IA32_SYSENTER_CS 0x174
339#define MSR_IA32_SYSENTER_ESP 0x175
340#define MSR_IA32_SYSENTER_EIP 0x176
341
342#define MSR_MCG_CAP 0x179
343#define MSR_MCG_STATUS 0x17a
344#define MSR_MCG_CTL 0x17b
345
346#define MSR_P6_EVNTSEL0 0x186
347
348#define MSR_IA32_PERF_STATUS 0x198
349
350#define MSR_IA32_MISC_ENABLE 0x1a0
351
352#define MSR_IA32_MISC_ENABLE_DEFAULT 1
353
354#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
355#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
356
357#define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2)
358
359#define MSR_MTRRfix64K_00000 0x250
360#define MSR_MTRRfix16K_80000 0x258
361#define MSR_MTRRfix16K_A0000 0x259
362#define MSR_MTRRfix4K_C0000 0x268
363#define MSR_MTRRfix4K_C8000 0x269
364#define MSR_MTRRfix4K_D0000 0x26a
365#define MSR_MTRRfix4K_D8000 0x26b
366#define MSR_MTRRfix4K_E0000 0x26c
367#define MSR_MTRRfix4K_E8000 0x26d
368#define MSR_MTRRfix4K_F0000 0x26e
369#define MSR_MTRRfix4K_F8000 0x26f
370
371#define MSR_PAT 0x277
372
373#define MSR_MTRRdefType 0x2ff
374
375#define MSR_CORE_PERF_FIXED_CTR0 0x309
376#define MSR_CORE_PERF_FIXED_CTR1 0x30a
377#define MSR_CORE_PERF_FIXED_CTR2 0x30b
378#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
379#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
380#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
381#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
382
383#define MSR_MC0_CTL 0x400
384#define MSR_MC0_STATUS 0x401
385#define MSR_MC0_ADDR 0x402
386#define MSR_MC0_MISC 0x403
387
388#define MSR_EFER 0xc0000080
389
390#define MSR_EFER_SCE (1 << 0)
391#define MSR_EFER_LME (1 << 8)
392#define MSR_EFER_LMA (1 << 10)
393#define MSR_EFER_NXE (1 << 11)
394#define MSR_EFER_SVME (1 << 12)
395#define MSR_EFER_FFXSR (1 << 14)
396
397#define MSR_STAR 0xc0000081
398#define MSR_LSTAR 0xc0000082
399#define MSR_CSTAR 0xc0000083
400#define MSR_FMASK 0xc0000084
401#define MSR_FSBASE 0xc0000100
402#define MSR_GSBASE 0xc0000101
403#define MSR_KERNELGSBASE 0xc0000102
404#define MSR_TSC_AUX 0xc0000103
405
406#define MSR_VM_HSAVE_PA 0xc0010117
407
408#define MSR_IA32_BNDCFGS 0x00000d90
409#define MSR_IA32_XSS 0x00000da0
410
411#define XSTATE_FP_BIT 0
412#define XSTATE_SSE_BIT 1
413#define XSTATE_YMM_BIT 2
414#define XSTATE_BNDREGS_BIT 3
415#define XSTATE_BNDCSR_BIT 4
416#define XSTATE_OPMASK_BIT 5
417#define XSTATE_ZMM_Hi256_BIT 6
418#define XSTATE_Hi16_ZMM_BIT 7
419#define XSTATE_PKRU_BIT 9
420
421#define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT)
422#define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT)
423#define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT)
424#define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT)
425#define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT)
426#define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT)
427#define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
428#define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
429#define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
430
431
432typedef enum FeatureWord {
433 FEAT_1_EDX,
434 FEAT_1_ECX,
435 FEAT_7_0_EBX,
436 FEAT_7_0_ECX,
437 FEAT_8000_0001_EDX,
438 FEAT_8000_0001_ECX,
439 FEAT_8000_0007_EDX,
440 FEAT_C000_0001_EDX,
441 FEAT_KVM,
442 FEAT_SVM,
443 FEAT_XSAVE,
444 FEAT_6_EAX,
445 FEATURE_WORDS,
446} FeatureWord;
447
448typedef uint32_t FeatureWordArray[FEATURE_WORDS];
449
450
451#define CPUID_FP87 (1U << 0)
452#define CPUID_VME (1U << 1)
453#define CPUID_DE (1U << 2)
454#define CPUID_PSE (1U << 3)
455#define CPUID_TSC (1U << 4)
456#define CPUID_MSR (1U << 5)
457#define CPUID_PAE (1U << 6)
458#define CPUID_MCE (1U << 7)
459#define CPUID_CX8 (1U << 8)
460#define CPUID_APIC (1U << 9)
461#define CPUID_SEP (1U << 11)
462#define CPUID_MTRR (1U << 12)
463#define CPUID_PGE (1U << 13)
464#define CPUID_MCA (1U << 14)
465#define CPUID_CMOV (1U << 15)
466#define CPUID_PAT (1U << 16)
467#define CPUID_PSE36 (1U << 17)
468#define CPUID_PN (1U << 18)
469#define CPUID_CLFLUSH (1U << 19)
470#define CPUID_DTS (1U << 21)
471#define CPUID_ACPI (1U << 22)
472#define CPUID_MMX (1U << 23)
473#define CPUID_FXSR (1U << 24)
474#define CPUID_SSE (1U << 25)
475#define CPUID_SSE2 (1U << 26)
476#define CPUID_SS (1U << 27)
477#define CPUID_HT (1U << 28)
478#define CPUID_TM (1U << 29)
479#define CPUID_IA64 (1U << 30)
480#define CPUID_PBE (1U << 31)
481
482#define CPUID_EXT_SSE3 (1U << 0)
483#define CPUID_EXT_PCLMULQDQ (1U << 1)
484#define CPUID_EXT_DTES64 (1U << 2)
485#define CPUID_EXT_MONITOR (1U << 3)
486#define CPUID_EXT_DSCPL (1U << 4)
487#define CPUID_EXT_VMX (1U << 5)
488#define CPUID_EXT_SMX (1U << 6)
489#define CPUID_EXT_EST (1U << 7)
490#define CPUID_EXT_TM2 (1U << 8)
491#define CPUID_EXT_SSSE3 (1U << 9)
492#define CPUID_EXT_CID (1U << 10)
493#define CPUID_EXT_FMA (1U << 12)
494#define CPUID_EXT_CX16 (1U << 13)
495#define CPUID_EXT_XTPR (1U << 14)
496#define CPUID_EXT_PDCM (1U << 15)
497#define CPUID_EXT_PCID (1U << 17)
498#define CPUID_EXT_DCA (1U << 18)
499#define CPUID_EXT_SSE41 (1U << 19)
500#define CPUID_EXT_SSE42 (1U << 20)
501#define CPUID_EXT_X2APIC (1U << 21)
502#define CPUID_EXT_MOVBE (1U << 22)
503#define CPUID_EXT_POPCNT (1U << 23)
504#define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24)
505#define CPUID_EXT_AES (1U << 25)
506#define CPUID_EXT_XSAVE (1U << 26)
507#define CPUID_EXT_OSXSAVE (1U << 27)
508#define CPUID_EXT_AVX (1U << 28)
509#define CPUID_EXT_F16C (1U << 29)
510#define CPUID_EXT_RDRAND (1U << 30)
511#define CPUID_EXT_HYPERVISOR (1U << 31)
512
513#define CPUID_EXT2_FPU (1U << 0)
514#define CPUID_EXT2_VME (1U << 1)
515#define CPUID_EXT2_DE (1U << 2)
516#define CPUID_EXT2_PSE (1U << 3)
517#define CPUID_EXT2_TSC (1U << 4)
518#define CPUID_EXT2_MSR (1U << 5)
519#define CPUID_EXT2_PAE (1U << 6)
520#define CPUID_EXT2_MCE (1U << 7)
521#define CPUID_EXT2_CX8 (1U << 8)
522#define CPUID_EXT2_APIC (1U << 9)
523#define CPUID_EXT2_SYSCALL (1U << 11)
524#define CPUID_EXT2_MTRR (1U << 12)
525#define CPUID_EXT2_PGE (1U << 13)
526#define CPUID_EXT2_MCA (1U << 14)
527#define CPUID_EXT2_CMOV (1U << 15)
528#define CPUID_EXT2_PAT (1U << 16)
529#define CPUID_EXT2_PSE36 (1U << 17)
530#define CPUID_EXT2_MP (1U << 19)
531#define CPUID_EXT2_NX (1U << 20)
532#define CPUID_EXT2_MMXEXT (1U << 22)
533#define CPUID_EXT2_MMX (1U << 23)
534#define CPUID_EXT2_FXSR (1U << 24)
535#define CPUID_EXT2_FFXSR (1U << 25)
536#define CPUID_EXT2_PDPE1GB (1U << 26)
537#define CPUID_EXT2_RDTSCP (1U << 27)
538#define CPUID_EXT2_LM (1U << 29)
539#define CPUID_EXT2_3DNOWEXT (1U << 30)
540#define CPUID_EXT2_3DNOW (1U << 31)
541
542
543#define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
544 CPUID_EXT2_DE | CPUID_EXT2_PSE | \
545 CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
546 CPUID_EXT2_PAE | CPUID_EXT2_MCE | \
547 CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \
548 CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \
549 CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \
550 CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \
551 CPUID_EXT2_MMX | CPUID_EXT2_FXSR)
552
553#define CPUID_EXT3_LAHF_LM (1U << 0)
554#define CPUID_EXT3_CMP_LEG (1U << 1)
555#define CPUID_EXT3_SVM (1U << 2)
556#define CPUID_EXT3_EXTAPIC (1U << 3)
557#define CPUID_EXT3_CR8LEG (1U << 4)
558#define CPUID_EXT3_ABM (1U << 5)
559#define CPUID_EXT3_SSE4A (1U << 6)
560#define CPUID_EXT3_MISALIGNSSE (1U << 7)
561#define CPUID_EXT3_3DNOWPREFETCH (1U << 8)
562#define CPUID_EXT3_OSVW (1U << 9)
563#define CPUID_EXT3_IBS (1U << 10)
564#define CPUID_EXT3_XOP (1U << 11)
565#define CPUID_EXT3_SKINIT (1U << 12)
566#define CPUID_EXT3_WDT (1U << 13)
567#define CPUID_EXT3_LWP (1U << 15)
568#define CPUID_EXT3_FMA4 (1U << 16)
569#define CPUID_EXT3_TCE (1U << 17)
570#define CPUID_EXT3_NODEID (1U << 19)
571#define CPUID_EXT3_TBM (1U << 21)
572#define CPUID_EXT3_TOPOEXT (1U << 22)
573#define CPUID_EXT3_PERFCORE (1U << 23)
574#define CPUID_EXT3_PERFNB (1U << 24)
575
576#define CPUID_SVM_NPT (1U << 0)
577#define CPUID_SVM_LBRV (1U << 1)
578#define CPUID_SVM_SVMLOCK (1U << 2)
579#define CPUID_SVM_NRIPSAVE (1U << 3)
580#define CPUID_SVM_TSCSCALE (1U << 4)
581#define CPUID_SVM_VMCBCLEAN (1U << 5)
582#define CPUID_SVM_FLUSHASID (1U << 6)
583#define CPUID_SVM_DECODEASSIST (1U << 7)
584#define CPUID_SVM_PAUSEFILTER (1U << 10)
585#define CPUID_SVM_PFTHRESHOLD (1U << 12)
586
587#define CPUID_7_0_EBX_FSGSBASE (1U << 0)
588#define CPUID_7_0_EBX_BMI1 (1U << 3)
589#define CPUID_7_0_EBX_HLE (1U << 4)
590#define CPUID_7_0_EBX_AVX2 (1U << 5)
591#define CPUID_7_0_EBX_SMEP (1U << 7)
592#define CPUID_7_0_EBX_BMI2 (1U << 8)
593#define CPUID_7_0_EBX_ERMS (1U << 9)
594#define CPUID_7_0_EBX_INVPCID (1U << 10)
595#define CPUID_7_0_EBX_RTM (1U << 11)
596#define CPUID_7_0_EBX_MPX (1U << 14)
597#define CPUID_7_0_EBX_AVX512F (1U << 16)
598#define CPUID_7_0_EBX_RDSEED (1U << 18)
599#define CPUID_7_0_EBX_ADX (1U << 19)
600#define CPUID_7_0_EBX_SMAP (1U << 20)
601#define CPUID_7_0_EBX_PCOMMIT (1U << 22)
602#define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23)
603#define CPUID_7_0_EBX_CLWB (1U << 24)
604#define CPUID_7_0_EBX_AVX512PF (1U << 26)
605#define CPUID_7_0_EBX_AVX512ER (1U << 27)
606#define CPUID_7_0_EBX_AVX512CD (1U << 28)
607
608#define CPUID_7_0_ECX_PKU (1U << 3)
609#define CPUID_7_0_ECX_OSPKE (1U << 4)
610
611#define CPUID_XSAVE_XSAVEOPT (1U << 0)
612#define CPUID_XSAVE_XSAVEC (1U << 1)
613#define CPUID_XSAVE_XGETBV1 (1U << 2)
614#define CPUID_XSAVE_XSAVES (1U << 3)
615
616#define CPUID_6_EAX_ARAT (1U << 2)
617
618
619#define CPUID_APM_INVTSC (1U << 8)
620
621#define CPUID_VENDOR_SZ 12
622
623#define CPUID_VENDOR_INTEL_1 0x756e6547
624#define CPUID_VENDOR_INTEL_2 0x49656e69
625#define CPUID_VENDOR_INTEL_3 0x6c65746e
626#define CPUID_VENDOR_INTEL "GenuineIntel"
627
628#define CPUID_VENDOR_AMD_1 0x68747541
629#define CPUID_VENDOR_AMD_2 0x69746e65
630#define CPUID_VENDOR_AMD_3 0x444d4163
631#define CPUID_VENDOR_AMD "AuthenticAMD"
632
633#define CPUID_VENDOR_VIA "CentaurHauls"
634
635#define CPUID_MWAIT_IBE (1U << 1)
636#define CPUID_MWAIT_EMX (1U << 0)
637
638#ifndef HYPERV_SPINLOCK_NEVER_RETRY
639#define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF
640#endif
641
642#define EXCP00_DIVZ 0
643#define EXCP01_DB 1
644#define EXCP02_NMI 2
645#define EXCP03_INT3 3
646#define EXCP04_INTO 4
647#define EXCP05_BOUND 5
648#define EXCP06_ILLOP 6
649#define EXCP07_PREX 7
650#define EXCP08_DBLE 8
651#define EXCP09_XERR 9
652#define EXCP0A_TSS 10
653#define EXCP0B_NOSEG 11
654#define EXCP0C_STACK 12
655#define EXCP0D_GPF 13
656#define EXCP0E_PAGE 14
657#define EXCP10_COPR 16
658#define EXCP11_ALGN 17
659#define EXCP12_MCHK 18
660
661#define EXCP_SYSCALL 0x100
662
663
664
665#define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1
666#define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2
667#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
668#define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4
669#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0
670#define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1
671#define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2
672
673
674#define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET
675
676typedef enum {
677 CC_OP_DYNAMIC,
678 CC_OP_EFLAGS,
679
680 CC_OP_MULB,
681 CC_OP_MULW,
682 CC_OP_MULL,
683 CC_OP_MULQ,
684
685 CC_OP_ADDB,
686 CC_OP_ADDW,
687 CC_OP_ADDL,
688 CC_OP_ADDQ,
689
690 CC_OP_ADCB,
691 CC_OP_ADCW,
692 CC_OP_ADCL,
693 CC_OP_ADCQ,
694
695 CC_OP_SUBB,
696 CC_OP_SUBW,
697 CC_OP_SUBL,
698 CC_OP_SUBQ,
699
700 CC_OP_SBBB,
701 CC_OP_SBBW,
702 CC_OP_SBBL,
703 CC_OP_SBBQ,
704
705 CC_OP_LOGICB,
706 CC_OP_LOGICW,
707 CC_OP_LOGICL,
708 CC_OP_LOGICQ,
709
710 CC_OP_INCB,
711 CC_OP_INCW,
712 CC_OP_INCL,
713 CC_OP_INCQ,
714
715 CC_OP_DECB,
716 CC_OP_DECW,
717 CC_OP_DECL,
718 CC_OP_DECQ,
719
720 CC_OP_SHLB,
721 CC_OP_SHLW,
722 CC_OP_SHLL,
723 CC_OP_SHLQ,
724
725 CC_OP_SARB,
726 CC_OP_SARW,
727 CC_OP_SARL,
728 CC_OP_SARQ,
729
730 CC_OP_BMILGB,
731 CC_OP_BMILGW,
732 CC_OP_BMILGL,
733 CC_OP_BMILGQ,
734
735 CC_OP_ADCX,
736 CC_OP_ADOX,
737 CC_OP_ADCOX,
738
739 CC_OP_CLR,
740
741 CC_OP_NB,
742} CCOp;
743
744typedef struct SegmentCache {
745 uint32_t selector;
746 target_ulong base;
747 uint32_t limit;
748 uint32_t flags;
749} SegmentCache;
750
751#define MMREG_UNION(n, bits) \
752 union n { \
753 uint8_t _b_##n[(bits)/8]; \
754 uint16_t _w_##n[(bits)/16]; \
755 uint32_t _l_##n[(bits)/32]; \
756 uint64_t _q_##n[(bits)/64]; \
757 float32 _s_##n[(bits)/32]; \
758 float64 _d_##n[(bits)/64]; \
759 }
760
761typedef MMREG_UNION(ZMMReg, 512) ZMMReg;
762typedef MMREG_UNION(MMXReg, 64) MMXReg;
763
764typedef struct BNDReg {
765 uint64_t lb;
766 uint64_t ub;
767} BNDReg;
768
769typedef struct BNDCSReg {
770 uint64_t cfgu;
771 uint64_t sts;
772} BNDCSReg;
773
774#define BNDCFG_ENABLE 1ULL
775#define BNDCFG_BNDPRESERVE 2ULL
776#define BNDCFG_BDIR_MASK TARGET_PAGE_MASK
777
778#ifdef HOST_WORDS_BIGENDIAN
779#define ZMM_B(n) _b_ZMMReg[63 - (n)]
780#define ZMM_W(n) _w_ZMMReg[31 - (n)]
781#define ZMM_L(n) _l_ZMMReg[15 - (n)]
782#define ZMM_S(n) _s_ZMMReg[15 - (n)]
783#define ZMM_Q(n) _q_ZMMReg[7 - (n)]
784#define ZMM_D(n) _d_ZMMReg[7 - (n)]
785
786#define MMX_B(n) _b_MMXReg[7 - (n)]
787#define MMX_W(n) _w_MMXReg[3 - (n)]
788#define MMX_L(n) _l_MMXReg[1 - (n)]
789#define MMX_S(n) _s_MMXReg[1 - (n)]
790#else
791#define ZMM_B(n) _b_ZMMReg[n]
792#define ZMM_W(n) _w_ZMMReg[n]
793#define ZMM_L(n) _l_ZMMReg[n]
794#define ZMM_S(n) _s_ZMMReg[n]
795#define ZMM_Q(n) _q_ZMMReg[n]
796#define ZMM_D(n) _d_ZMMReg[n]
797
798#define MMX_B(n) _b_MMXReg[n]
799#define MMX_W(n) _w_MMXReg[n]
800#define MMX_L(n) _l_MMXReg[n]
801#define MMX_S(n) _s_MMXReg[n]
802#endif
803#define MMX_Q(n) _q_MMXReg[n]
804
805typedef union {
806 floatx80 d __attribute__((aligned(16)));
807 MMXReg mmx;
808} FPReg;
809
810typedef struct {
811 uint64_t base;
812 uint64_t mask;
813} MTRRVar;
814
815#define CPU_NB_REGS64 16
816#define CPU_NB_REGS32 8
817
818#ifdef TARGET_X86_64
819#define CPU_NB_REGS CPU_NB_REGS64
820#else
821#define CPU_NB_REGS CPU_NB_REGS32
822#endif
823
824#define MAX_FIXED_COUNTERS 3
825#define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
826
827#define NB_MMU_MODES 3
828#define TARGET_INSN_START_EXTRA_WORDS 1
829
830#define NB_OPMASK_REGS 8
831
832typedef enum TPRAccess {
833 TPR_ACCESS_READ,
834 TPR_ACCESS_WRITE,
835} TPRAccess;
836
837typedef struct CPUX86State {
838
839 target_ulong regs[CPU_NB_REGS];
840 target_ulong eip;
841 target_ulong eflags;
842
843
844
845
846 target_ulong cc_dst;
847 target_ulong cc_src;
848 target_ulong cc_src2;
849 uint32_t cc_op;
850 int32_t df;
851 uint32_t hflags;
852
853 uint32_t hflags2;
854
855
856 SegmentCache segs[6];
857 SegmentCache ldt;
858 SegmentCache tr;
859 SegmentCache gdt;
860 SegmentCache idt;
861
862 target_ulong cr[5];
863 int32_t a20_mask;
864
865 BNDReg bnd_regs[4];
866 BNDCSReg bndcs_regs;
867 uint64_t msr_bndcfgs;
868 uint64_t efer;
869
870
871 struct {} start_init_save;
872
873
874 unsigned int fpstt;
875 uint16_t fpus;
876 uint16_t fpuc;
877 uint8_t fptags[8];
878 FPReg fpregs[8];
879
880 uint16_t fpop;
881 uint64_t fpip;
882 uint64_t fpdp;
883
884
885 float_status fp_status;
886 floatx80 ft0;
887
888 float_status mmx_status;
889 float_status sse_status;
890 uint32_t mxcsr;
891 ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32];
892 ZMMReg xmm_t0;
893 MMXReg mmx_t0;
894
895 uint64_t opmask_regs[NB_OPMASK_REGS];
896
897
898 uint32_t sysenter_cs;
899 target_ulong sysenter_esp;
900 target_ulong sysenter_eip;
901 uint64_t star;
902
903 uint64_t vm_hsave;
904
905#ifdef TARGET_X86_64
906 target_ulong lstar;
907 target_ulong cstar;
908 target_ulong fmask;
909 target_ulong kernelgsbase;
910#endif
911
912 uint64_t tsc;
913 uint64_t tsc_adjust;
914 uint64_t tsc_deadline;
915
916 uint64_t mcg_status;
917 uint64_t msr_ia32_misc_enable;
918 uint64_t msr_ia32_feature_control;
919
920 uint64_t msr_fixed_ctr_ctrl;
921 uint64_t msr_global_ctrl;
922 uint64_t msr_global_status;
923 uint64_t msr_global_ovf_ctrl;
924 uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS];
925 uint64_t msr_gp_counters[MAX_GP_COUNTERS];
926 uint64_t msr_gp_evtsel[MAX_GP_COUNTERS];
927
928 uint64_t pat;
929 uint32_t smbase;
930
931
932 struct {} end_init_save;
933
934 uint64_t system_time_msr;
935 uint64_t wall_clock_msr;
936 uint64_t steal_time_msr;
937 uint64_t async_pf_en_msr;
938 uint64_t pv_eoi_en_msr;
939
940 uint64_t msr_hv_hypercall;
941 uint64_t msr_hv_guest_os_id;
942 uint64_t msr_hv_vapic;
943 uint64_t msr_hv_tsc;
944 uint64_t msr_hv_crash_params[HV_X64_MSR_CRASH_PARAMS];
945 uint64_t msr_hv_runtime;
946 uint64_t msr_hv_synic_control;
947 uint64_t msr_hv_synic_version;
948 uint64_t msr_hv_synic_evt_page;
949 uint64_t msr_hv_synic_msg_page;
950 uint64_t msr_hv_synic_sint[HV_SYNIC_SINT_COUNT];
951 uint64_t msr_hv_stimer_config[HV_SYNIC_STIMER_COUNT];
952 uint64_t msr_hv_stimer_count[HV_SYNIC_STIMER_COUNT];
953
954
955 int error_code;
956 int exception_is_int;
957 target_ulong exception_next_eip;
958 target_ulong dr[8];
959 union {
960 struct CPUBreakpoint *cpu_breakpoint[4];
961 struct CPUWatchpoint *cpu_watchpoint[4];
962 };
963 int old_exception;
964
965 uint64_t vm_vmcb;
966 uint64_t tsc_offset;
967 uint64_t intercept;
968 uint16_t intercept_cr_read;
969 uint16_t intercept_cr_write;
970 uint16_t intercept_dr_read;
971 uint16_t intercept_dr_write;
972 uint32_t intercept_exceptions;
973 uint8_t v_tpr;
974
975
976 uint8_t nmi_injected;
977 uint8_t nmi_pending;
978
979 CPU_COMMON
980
981
982
983
984 uint32_t cpuid_level;
985 uint32_t cpuid_xlevel;
986 uint32_t cpuid_xlevel2;
987 uint32_t cpuid_vendor1;
988 uint32_t cpuid_vendor2;
989 uint32_t cpuid_vendor3;
990 uint32_t cpuid_version;
991 FeatureWordArray features;
992 uint32_t cpuid_model[12];
993
994
995 uint64_t mtrr_fixed[11];
996 uint64_t mtrr_deftype;
997 MTRRVar mtrr_var[MSR_MTRRcap_VCNT];
998
999
1000 uint32_t mp_state;
1001 int32_t exception_injected;
1002 int32_t interrupt_injected;
1003 uint8_t soft_interrupt;
1004 uint8_t has_error_code;
1005 uint32_t sipi_vector;
1006 bool tsc_valid;
1007 int64_t tsc_khz;
1008 int64_t user_tsc_khz;
1009 void *kvm_xsave_buf;
1010
1011 uint64_t mcg_cap;
1012 uint64_t mcg_ctl;
1013 uint64_t mce_banks[MCE_BANKS_DEF*4];
1014
1015 uint64_t tsc_aux;
1016
1017
1018 uint16_t fpus_vmstate;
1019 uint16_t fptag_vmstate;
1020 uint16_t fpregs_format_vmstate;
1021 uint64_t xstate_bv;
1022
1023 uint64_t xcr0;
1024 uint64_t xss;
1025
1026 uint32_t pkru;
1027
1028 TPRAccess tpr_access_type;
1029} CPUX86State;
1030
1031#include "cpu-qom.h"
1032
1033X86CPU *cpu_x86_init(const char *cpu_model);
1034X86CPU *cpu_x86_create(const char *cpu_model, Error **errp);
1035int cpu_x86_exec(CPUState *cpu);
1036void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf);
1037void x86_cpudef_setup(void);
1038int cpu_x86_support_mca_broadcast(CPUX86State *env);
1039
1040int cpu_get_pic_interrupt(CPUX86State *s);
1041
1042void cpu_set_ferr(CPUX86State *s);
1043
1044
1045
1046static inline void cpu_x86_load_seg_cache(CPUX86State *env,
1047 int seg_reg, unsigned int selector,
1048 target_ulong base,
1049 unsigned int limit,
1050 unsigned int flags)
1051{
1052 SegmentCache *sc;
1053 unsigned int new_hflags;
1054
1055 sc = &env->segs[seg_reg];
1056 sc->selector = selector;
1057 sc->base = base;
1058 sc->limit = limit;
1059 sc->flags = flags;
1060
1061
1062 {
1063 if (seg_reg == R_CS) {
1064#ifdef TARGET_X86_64
1065 if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
1066
1067 env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
1068 env->hflags &= ~(HF_ADDSEG_MASK);
1069 } else
1070#endif
1071 {
1072
1073 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
1074 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
1075 env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
1076 new_hflags;
1077 }
1078 }
1079 if (seg_reg == R_SS) {
1080 int cpl = (flags >> DESC_DPL_SHIFT) & 3;
1081#if HF_CPL_MASK != 3
1082#error HF_CPL_MASK is hardcoded
1083#endif
1084 env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl;
1085 }
1086 new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
1087 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
1088 if (env->hflags & HF_CS64_MASK) {
1089
1090 } else if (!(env->cr[0] & CR0_PE_MASK) ||
1091 (env->eflags & VM_MASK) ||
1092 !(env->hflags & HF_CS32_MASK)) {
1093
1094
1095
1096
1097
1098 new_hflags |= HF_ADDSEG_MASK;
1099 } else {
1100 new_hflags |= ((env->segs[R_DS].base |
1101 env->segs[R_ES].base |
1102 env->segs[R_SS].base) != 0) <<
1103 HF_ADDSEG_SHIFT;
1104 }
1105 env->hflags = (env->hflags &
1106 ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
1107 }
1108}
1109
1110static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
1111 uint8_t sipi_vector)
1112{
1113 CPUState *cs = CPU(cpu);
1114 CPUX86State *env = &cpu->env;
1115
1116 env->eip = 0;
1117 cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8,
1118 sipi_vector << 12,
1119 env->segs[R_CS].limit,
1120 env->segs[R_CS].flags);
1121 cs->halted = 0;
1122}
1123
1124int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1125 target_ulong *base, unsigned int *limit,
1126 unsigned int *flags);
1127
1128
1129
1130void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f);
1131floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper);
1132
1133
1134
1135
1136void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
1137void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32);
1138void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
1139
1140
1141
1142
1143int cpu_x86_signal_handler(int host_signum, void *pinfo,
1144 void *puc);
1145
1146
1147typedef struct ExtSaveArea {
1148 uint32_t feature, bits;
1149 uint32_t offset, size;
1150} ExtSaveArea;
1151
1152extern const ExtSaveArea x86_ext_save_areas[];
1153
1154void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1155 uint32_t *eax, uint32_t *ebx,
1156 uint32_t *ecx, uint32_t *edx);
1157void cpu_clear_apic_feature(CPUX86State *env);
1158void host_cpuid(uint32_t function, uint32_t count,
1159 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
1160
1161
1162int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr,
1163 int is_write, int mmu_idx);
1164void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
1165
1166#ifndef CONFIG_USER_ONLY
1167uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr);
1168uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr);
1169uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr);
1170uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr);
1171void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val);
1172void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val);
1173void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val);
1174void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val);
1175void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val);
1176#endif
1177
1178void breakpoint_handler(CPUState *cs);
1179
1180
1181void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
1182void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
1183void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
1184void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7);
1185
1186
1187uint64_t cpu_get_tsc(CPUX86State *env);
1188
1189#define TARGET_PAGE_BITS 12
1190
1191#ifdef TARGET_X86_64
1192#define TARGET_PHYS_ADDR_SPACE_BITS 52
1193
1194
1195
1196#define TARGET_VIRT_ADDR_SPACE_BITS 47
1197#else
1198#define TARGET_PHYS_ADDR_SPACE_BITS 36
1199#define TARGET_VIRT_ADDR_SPACE_BITS 32
1200#endif
1201
1202
1203
1204# if defined(TARGET_X86_64)
1205# define PHYS_ADDR_MASK 0xffffffffffLL
1206# else
1207# define PHYS_ADDR_MASK 0xfffffffffLL
1208# endif
1209
1210#define cpu_init(cpu_model) CPU(cpu_x86_init(cpu_model))
1211
1212#define cpu_exec cpu_x86_exec
1213#define cpu_signal_handler cpu_x86_signal_handler
1214#define cpu_list x86_cpu_list
1215#define cpudef_setup x86_cpudef_setup
1216
1217
1218#define MMU_MODE0_SUFFIX _ksmap
1219#define MMU_MODE1_SUFFIX _user
1220#define MMU_MODE2_SUFFIX _knosmap
1221#define MMU_KSMAP_IDX 0
1222#define MMU_USER_IDX 1
1223#define MMU_KNOSMAP_IDX 2
1224static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
1225{
1226 return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
1227 (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
1228 ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
1229}
1230
1231static inline int cpu_mmu_index_kernel(CPUX86State *env)
1232{
1233 return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
1234 ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK))
1235 ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
1236}
1237
1238#define CC_DST (env->cc_dst)
1239#define CC_SRC (env->cc_src)
1240#define CC_SRC2 (env->cc_src2)
1241#define CC_OP (env->cc_op)
1242
1243
1244static inline target_long lshift(target_long x, int n)
1245{
1246 if (n >= 0) {
1247 return x << n;
1248 } else {
1249 return x >> (-n);
1250 }
1251}
1252
1253
1254#define FT0 (env->ft0)
1255#define ST0 (env->fpregs[env->fpstt].d)
1256#define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d)
1257#define ST1 ST(1)
1258
1259
1260void tcg_x86_init(void);
1261
1262#include "exec/cpu-all.h"
1263#include "svm.h"
1264
1265#if !defined(CONFIG_USER_ONLY)
1266#include "hw/i386/apic.h"
1267#endif
1268
1269#include "exec/exec-all.h"
1270
1271static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
1272 target_ulong *cs_base, int *flags)
1273{
1274 *cs_base = env->segs[R_CS].base;
1275 *pc = *cs_base + env->eip;
1276 *flags = env->hflags |
1277 (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
1278}
1279
1280void do_cpu_init(X86CPU *cpu);
1281void do_cpu_sipi(X86CPU *cpu);
1282
1283#define MCE_INJECT_BROADCAST 1
1284#define MCE_INJECT_UNCOND_AO 2
1285
1286void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1287 uint64_t status, uint64_t mcg_status, uint64_t addr,
1288 uint64_t misc, int flags);
1289
1290
1291void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index);
1292void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index,
1293 uintptr_t retaddr);
1294void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index,
1295 int error_code);
1296void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index,
1297 int error_code, uintptr_t retaddr);
1298void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int,
1299 int error_code, int next_eip_addend);
1300
1301
1302extern const uint8_t parity_table[256];
1303uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
1304void update_fp_status(CPUX86State *env);
1305
1306static inline uint32_t cpu_compute_eflags(CPUX86State *env)
1307{
1308 return env->eflags | cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK);
1309}
1310
1311
1312
1313
1314static inline void cpu_load_eflags(CPUX86State *env, int eflags,
1315 int update_mask)
1316{
1317 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1318 CC_OP = CC_OP_EFLAGS;
1319 env->df = 1 - (2 * ((eflags >> 10) & 1));
1320 env->eflags = (env->eflags & ~update_mask) |
1321 (eflags & update_mask) | 0x2;
1322}
1323
1324
1325
1326static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
1327{
1328 env->efer = val;
1329 env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
1330 if (env->efer & MSR_EFER_LMA) {
1331 env->hflags |= HF_LMA_MASK;
1332 }
1333 if (env->efer & MSR_EFER_SVME) {
1334 env->hflags |= HF_SVME_MASK;
1335 }
1336}
1337
1338static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
1339{
1340 return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 });
1341}
1342
1343
1344void cpu_set_mxcsr(CPUX86State *env, uint32_t val);
1345void cpu_set_fpuc(CPUX86State *env, uint16_t val);
1346
1347
1348void helper_lock_init(void);
1349
1350
1351void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
1352 uint64_t param);
1353void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1);
1354
1355
1356void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
1357
1358
1359void do_smm_enter(X86CPU *cpu);
1360void cpu_smm_update(X86CPU *cpu);
1361
1362void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372void x86_cpu_change_kvm_default(const char *prop, const char *value);
1373
1374
1375void cpu_sync_bndcs_hflags(CPUX86State *env);
1376
1377
1378const char *get_register_name_32(unsigned int reg);
1379
1380void enable_compat_apic_id_mode(void);
1381
1382#define APIC_DEFAULT_ADDRESS 0xfee00000
1383#define APIC_SPACE_SIZE 0x100000
1384
1385void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
1386 fprintf_function cpu_fprintf, int flags);
1387
1388#endif
1389