1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "cpu.h"
21#include "sysemu/kvm.h"
22#ifndef CONFIG_USER_ONLY
23#include "sysemu/sysemu.h"
24#include "monitor/monitor.h"
25#endif
26
27
28
29static void cpu_x86_version(CPUX86State *env, int *family, int *model)
30{
31 int cpuver = env->cpuid_version;
32
33 if (family == NULL || model == NULL) {
34 return;
35 }
36
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
39}
40
41
42int cpu_x86_support_mca_broadcast(CPUX86State *env)
43{
44 int family = 0;
45 int model = 0;
46
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
50 }
51
52 return 0;
53}
54
55
56
57
58static const char *cc_op_str[CC_OP_NB] = {
59 "DYNAMIC",
60 "EFLAGS",
61
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
66
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
71
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
76
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
81
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
86
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
91
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
96
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
101
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
106
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
111
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
116
117 "ADCX",
118 "ADOX",
119 "ADCOX",
120
121 "CLR",
122};
123
124static void
125cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
126 const char *name, struct SegmentCache *sc)
127{
128#ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
132 } else
133#endif
134 {
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
137 }
138
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
141
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
150 cpu_fprintf(f,
151 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
152 ? "DS " : "DS16");
153 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
154 (sc->flags & DESC_W_MASK) ? 'W' : '-');
155 }
156 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
157 } else {
158 static const char *sys_type_name[2][16] = {
159 {
160 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
161 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
162 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
163 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
164 },
165 {
166 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
167 "Reserved", "Reserved", "Reserved", "Reserved",
168 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
169 "Reserved", "IntGate64", "TrapGate64"
170 }
171 };
172 cpu_fprintf(f, "%s",
173 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
174 [(sc->flags & DESC_TYPE_MASK)
175 >> DESC_TYPE_SHIFT]);
176 }
177done:
178 cpu_fprintf(f, "\n");
179}
180
181#define DUMP_CODE_BYTES_TOTAL 50
182#define DUMP_CODE_BYTES_BACKWARD 20
183
184void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
185 int flags)
186{
187 X86CPU *cpu = X86_CPU(cs);
188 CPUX86State *env = &cpu->env;
189 int eflags, i, nb;
190 char cc_op_name[32];
191 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
192
193 eflags = cpu_compute_eflags(env);
194#ifdef TARGET_X86_64
195 if (env->hflags & HF_CS64_MASK) {
196 cpu_fprintf(f,
197 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
198 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
199 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
200 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
201 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
202 env->regs[R_EAX],
203 env->regs[R_EBX],
204 env->regs[R_ECX],
205 env->regs[R_EDX],
206 env->regs[R_ESI],
207 env->regs[R_EDI],
208 env->regs[R_EBP],
209 env->regs[R_ESP],
210 env->regs[8],
211 env->regs[9],
212 env->regs[10],
213 env->regs[11],
214 env->regs[12],
215 env->regs[13],
216 env->regs[14],
217 env->regs[15],
218 env->eip, eflags,
219 eflags & DF_MASK ? 'D' : '-',
220 eflags & CC_O ? 'O' : '-',
221 eflags & CC_S ? 'S' : '-',
222 eflags & CC_Z ? 'Z' : '-',
223 eflags & CC_A ? 'A' : '-',
224 eflags & CC_P ? 'P' : '-',
225 eflags & CC_C ? 'C' : '-',
226 env->hflags & HF_CPL_MASK,
227 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
228 (env->a20_mask >> 20) & 1,
229 (env->hflags >> HF_SMM_SHIFT) & 1,
230 cs->halted);
231 } else
232#endif
233 {
234 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
235 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
236 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
237 (uint32_t)env->regs[R_EAX],
238 (uint32_t)env->regs[R_EBX],
239 (uint32_t)env->regs[R_ECX],
240 (uint32_t)env->regs[R_EDX],
241 (uint32_t)env->regs[R_ESI],
242 (uint32_t)env->regs[R_EDI],
243 (uint32_t)env->regs[R_EBP],
244 (uint32_t)env->regs[R_ESP],
245 (uint32_t)env->eip, eflags,
246 eflags & DF_MASK ? 'D' : '-',
247 eflags & CC_O ? 'O' : '-',
248 eflags & CC_S ? 'S' : '-',
249 eflags & CC_Z ? 'Z' : '-',
250 eflags & CC_A ? 'A' : '-',
251 eflags & CC_P ? 'P' : '-',
252 eflags & CC_C ? 'C' : '-',
253 env->hflags & HF_CPL_MASK,
254 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
255 (env->a20_mask >> 20) & 1,
256 (env->hflags >> HF_SMM_SHIFT) & 1,
257 cs->halted);
258 }
259
260 for(i = 0; i < 6; i++) {
261 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
262 &env->segs[i]);
263 }
264 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
265 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
266
267#ifdef TARGET_X86_64
268 if (env->hflags & HF_LMA_MASK) {
269 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
270 env->gdt.base, env->gdt.limit);
271 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
272 env->idt.base, env->idt.limit);
273 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
274 (uint32_t)env->cr[0],
275 env->cr[2],
276 env->cr[3],
277 (uint32_t)env->cr[4]);
278 for(i = 0; i < 4; i++)
279 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
280 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
281 env->dr[6], env->dr[7]);
282 } else
283#endif
284 {
285 cpu_fprintf(f, "GDT= %08x %08x\n",
286 (uint32_t)env->gdt.base, env->gdt.limit);
287 cpu_fprintf(f, "IDT= %08x %08x\n",
288 (uint32_t)env->idt.base, env->idt.limit);
289 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
290 (uint32_t)env->cr[0],
291 (uint32_t)env->cr[2],
292 (uint32_t)env->cr[3],
293 (uint32_t)env->cr[4]);
294 for(i = 0; i < 4; i++) {
295 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
296 }
297 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
298 env->dr[6], env->dr[7]);
299 }
300 if (flags & CPU_DUMP_CCOP) {
301 if ((unsigned)env->cc_op < CC_OP_NB)
302 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
303 else
304 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
305#ifdef TARGET_X86_64
306 if (env->hflags & HF_CS64_MASK) {
307 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
308 env->cc_src, env->cc_dst,
309 cc_op_name);
310 } else
311#endif
312 {
313 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
314 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
315 cc_op_name);
316 }
317 }
318 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
319 if (flags & CPU_DUMP_FPU) {
320 int fptag;
321 fptag = 0;
322 for(i = 0; i < 8; i++) {
323 fptag |= ((!env->fptags[i]) << i);
324 }
325 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
326 env->fpuc,
327 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
328 env->fpstt,
329 fptag,
330 env->mxcsr);
331 for(i=0;i<8;i++) {
332 CPU_LDoubleU u;
333 u.d = env->fpregs[i].d;
334 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
335 i, u.l.lower, u.l.upper);
336 if ((i & 1) == 1)
337 cpu_fprintf(f, "\n");
338 else
339 cpu_fprintf(f, " ");
340 }
341 if (env->hflags & HF_CS64_MASK)
342 nb = 16;
343 else
344 nb = 8;
345 for(i=0;i<nb;i++) {
346 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
347 i,
348 env->xmm_regs[i].XMM_L(3),
349 env->xmm_regs[i].XMM_L(2),
350 env->xmm_regs[i].XMM_L(1),
351 env->xmm_regs[i].XMM_L(0));
352 if ((i & 1) == 1)
353 cpu_fprintf(f, "\n");
354 else
355 cpu_fprintf(f, " ");
356 }
357 }
358 if (flags & CPU_DUMP_CODE) {
359 target_ulong base = env->segs[R_CS].base + env->eip;
360 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
361 uint8_t code;
362 char codestr[3];
363
364 cpu_fprintf(f, "Code=");
365 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
366 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
367 snprintf(codestr, sizeof(codestr), "%02x", code);
368 } else {
369 snprintf(codestr, sizeof(codestr), "??");
370 }
371 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
372 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
373 }
374 cpu_fprintf(f, "\n");
375 }
376}
377
378
379
380
381
382void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
383{
384 CPUX86State *env = &cpu->env;
385
386 a20_state = (a20_state != 0);
387 if (a20_state != ((env->a20_mask >> 20) & 1)) {
388 CPUState *cs = CPU(cpu);
389
390#if defined(DEBUG_MMU)
391 printf("A20 update: a20=%d\n", a20_state);
392#endif
393
394
395 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
396
397
398
399 tlb_flush(cs, 1);
400 env->a20_mask = ~(1 << 20) | (a20_state << 20);
401 }
402}
403
404void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
405{
406 X86CPU *cpu = x86_env_get_cpu(env);
407 int pe_state;
408
409#if defined(DEBUG_MMU)
410 printf("CR0 update: CR0=0x%08x\n", new_cr0);
411#endif
412 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
413 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
414 tlb_flush(CPU(cpu), 1);
415 }
416
417#ifdef TARGET_X86_64
418 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
419 (env->efer & MSR_EFER_LME)) {
420
421
422 if (!(env->cr[4] & CR4_PAE_MASK))
423 return;
424 env->efer |= MSR_EFER_LMA;
425 env->hflags |= HF_LMA_MASK;
426 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
427 (env->efer & MSR_EFER_LMA)) {
428
429 env->efer &= ~MSR_EFER_LMA;
430 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
431 env->eip &= 0xffffffff;
432 }
433#endif
434 env->cr[0] = new_cr0 | CR0_ET_MASK;
435
436
437 pe_state = (env->cr[0] & CR0_PE_MASK);
438 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
439
440 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
441
442 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
443 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
444}
445
446
447
448void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
449{
450 X86CPU *cpu = x86_env_get_cpu(env);
451
452 env->cr[3] = new_cr3;
453 if (env->cr[0] & CR0_PG_MASK) {
454#if defined(DEBUG_MMU)
455 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
456#endif
457 tlb_flush(CPU(cpu), 0);
458 }
459}
460
461void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
462{
463 X86CPU *cpu = x86_env_get_cpu(env);
464
465#if defined(DEBUG_MMU)
466 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
467#endif
468 if ((new_cr4 ^ env->cr[4]) &
469 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
470 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
471 tlb_flush(CPU(cpu), 1);
472 }
473
474 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
475 new_cr4 &= ~CR4_OSFXSR_MASK;
476 }
477 env->hflags &= ~HF_OSFXSR_MASK;
478 if (new_cr4 & CR4_OSFXSR_MASK) {
479 env->hflags |= HF_OSFXSR_MASK;
480 }
481
482 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
483 new_cr4 &= ~CR4_SMAP_MASK;
484 }
485 env->hflags &= ~HF_SMAP_MASK;
486 if (new_cr4 & CR4_SMAP_MASK) {
487 env->hflags |= HF_SMAP_MASK;
488 }
489
490 env->cr[4] = new_cr4;
491}
492
493#if defined(CONFIG_USER_ONLY)
494
495int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
496 int is_write, int mmu_idx)
497{
498 X86CPU *cpu = X86_CPU(cs);
499 CPUX86State *env = &cpu->env;
500
501
502 is_write &= 1;
503 env->cr[2] = addr;
504 env->error_code = (is_write << PG_ERROR_W_BIT);
505 env->error_code |= PG_ERROR_U_MASK;
506 cs->exception_index = EXCP0E_PAGE;
507 return 1;
508}
509
510#else
511
512
513
514# if defined(TARGET_X86_64)
515# define PHYS_ADDR_MASK 0xfffffff000LL
516# else
517# define PHYS_ADDR_MASK 0xffffff000LL
518# endif
519
520
521
522
523
524
525int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
526 int is_write1, int mmu_idx)
527{
528 X86CPU *cpu = X86_CPU(cs);
529 CPUX86State *env = &cpu->env;
530 uint64_t ptep, pte;
531 target_ulong pde_addr, pte_addr;
532 int error_code, is_dirty, prot, page_size, is_write, is_user;
533 hwaddr paddr;
534 uint32_t page_offset;
535 target_ulong vaddr, virt_addr;
536
537 is_user = mmu_idx == MMU_USER_IDX;
538#if defined(DEBUG_MMU)
539 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
540 addr, is_write1, is_user, env->eip);
541#endif
542 is_write = is_write1 & 1;
543
544 if (!(env->cr[0] & CR0_PG_MASK)) {
545 pte = addr;
546#ifdef TARGET_X86_64
547 if (!(env->hflags & HF_LMA_MASK)) {
548
549 pte = (uint32_t)pte;
550 }
551#endif
552 virt_addr = addr & TARGET_PAGE_MASK;
553 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
554 page_size = 4096;
555 goto do_mapping;
556 }
557
558 if (env->cr[4] & CR4_PAE_MASK) {
559 uint64_t pde, pdpe;
560 target_ulong pdpe_addr;
561
562#ifdef TARGET_X86_64
563 if (env->hflags & HF_LMA_MASK) {
564 uint64_t pml4e_addr, pml4e;
565 int32_t sext;
566
567
568 sext = (int64_t)addr >> 47;
569 if (sext != 0 && sext != -1) {
570 env->error_code = 0;
571 cs->exception_index = EXCP0D_GPF;
572 return 1;
573 }
574
575 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
576 env->a20_mask;
577 pml4e = ldq_phys(cs->as, pml4e_addr);
578 if (!(pml4e & PG_PRESENT_MASK)) {
579 error_code = 0;
580 goto do_fault;
581 }
582 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
583 error_code = PG_ERROR_RSVD_MASK;
584 goto do_fault;
585 }
586 if (!(pml4e & PG_ACCESSED_MASK)) {
587 pml4e |= PG_ACCESSED_MASK;
588 stl_phys_notdirty(cs->as, pml4e_addr, pml4e);
589 }
590 ptep = pml4e ^ PG_NX_MASK;
591 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
592 env->a20_mask;
593 pdpe = ldq_phys(cs->as, pdpe_addr);
594 if (!(pdpe & PG_PRESENT_MASK)) {
595 error_code = 0;
596 goto do_fault;
597 }
598 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
599 error_code = PG_ERROR_RSVD_MASK;
600 goto do_fault;
601 }
602 ptep &= pdpe ^ PG_NX_MASK;
603 if (!(pdpe & PG_ACCESSED_MASK)) {
604 pdpe |= PG_ACCESSED_MASK;
605 stl_phys_notdirty(cs->as, pdpe_addr, pdpe);
606 }
607 } else
608#endif
609 {
610
611 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
612 env->a20_mask;
613 pdpe = ldq_phys(cs->as, pdpe_addr);
614 if (!(pdpe & PG_PRESENT_MASK)) {
615 error_code = 0;
616 goto do_fault;
617 }
618 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
619 }
620
621 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
622 env->a20_mask;
623 pde = ldq_phys(cs->as, pde_addr);
624 if (!(pde & PG_PRESENT_MASK)) {
625 error_code = 0;
626 goto do_fault;
627 }
628 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
629 error_code = PG_ERROR_RSVD_MASK;
630 goto do_fault;
631 }
632 ptep &= pde ^ PG_NX_MASK;
633 if (pde & PG_PSE_MASK) {
634
635 page_size = 2048 * 1024;
636 ptep ^= PG_NX_MASK;
637 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
638 goto do_fault_protect;
639 }
640 switch (mmu_idx) {
641 case MMU_USER_IDX:
642 if (!(ptep & PG_USER_MASK)) {
643 goto do_fault_protect;
644 }
645 if (is_write && !(ptep & PG_RW_MASK)) {
646 goto do_fault_protect;
647 }
648 break;
649
650 case MMU_KERNEL_IDX:
651 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
652 (ptep & PG_USER_MASK)) {
653 goto do_fault_protect;
654 }
655
656 case MMU_KSMAP_IDX:
657 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
658 (ptep & PG_USER_MASK)) {
659 goto do_fault_protect;
660 }
661 if ((env->cr[0] & CR0_WP_MASK) &&
662 is_write && !(ptep & PG_RW_MASK)) {
663 goto do_fault_protect;
664 }
665 break;
666
667 default:
668 break;
669 }
670 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
671 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
672 pde |= PG_ACCESSED_MASK;
673 if (is_dirty)
674 pde |= PG_DIRTY_MASK;
675 stl_phys_notdirty(cs->as, pde_addr, pde);
676 }
677
678 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
679 virt_addr = addr & ~(page_size - 1);
680 } else {
681
682 if (!(pde & PG_ACCESSED_MASK)) {
683 pde |= PG_ACCESSED_MASK;
684 stl_phys_notdirty(cs->as, pde_addr, pde);
685 }
686 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
687 env->a20_mask;
688 pte = ldq_phys(cs->as, pte_addr);
689 if (!(pte & PG_PRESENT_MASK)) {
690 error_code = 0;
691 goto do_fault;
692 }
693 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
694 error_code = PG_ERROR_RSVD_MASK;
695 goto do_fault;
696 }
697
698 ptep &= pte ^ PG_NX_MASK;
699 ptep ^= PG_NX_MASK;
700 if ((ptep & PG_NX_MASK) && is_write1 == 2)
701 goto do_fault_protect;
702 switch (mmu_idx) {
703 case MMU_USER_IDX:
704 if (!(ptep & PG_USER_MASK)) {
705 goto do_fault_protect;
706 }
707 if (is_write && !(ptep & PG_RW_MASK)) {
708 goto do_fault_protect;
709 }
710 break;
711
712 case MMU_KERNEL_IDX:
713 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
714 (ptep & PG_USER_MASK)) {
715 goto do_fault_protect;
716 }
717
718 case MMU_KSMAP_IDX:
719 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
720 (ptep & PG_USER_MASK)) {
721 goto do_fault_protect;
722 }
723 if ((env->cr[0] & CR0_WP_MASK) &&
724 is_write && !(ptep & PG_RW_MASK)) {
725 goto do_fault_protect;
726 }
727 break;
728
729 default:
730 break;
731 }
732 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
733 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
734 pte |= PG_ACCESSED_MASK;
735 if (is_dirty)
736 pte |= PG_DIRTY_MASK;
737 stl_phys_notdirty(cs->as, pte_addr, pte);
738 }
739 page_size = 4096;
740 virt_addr = addr & ~0xfff;
741 pte = pte & (PHYS_ADDR_MASK | 0xfff);
742 }
743 } else {
744 uint32_t pde;
745
746
747 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
748 env->a20_mask;
749 pde = ldl_phys(cs->as, pde_addr);
750 if (!(pde & PG_PRESENT_MASK)) {
751 error_code = 0;
752 goto do_fault;
753 }
754
755 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
756 page_size = 4096 * 1024;
757 switch (mmu_idx) {
758 case MMU_USER_IDX:
759 if (!(pde & PG_USER_MASK)) {
760 goto do_fault_protect;
761 }
762 if (is_write && !(pde & PG_RW_MASK)) {
763 goto do_fault_protect;
764 }
765 break;
766
767 case MMU_KERNEL_IDX:
768 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
769 (pde & PG_USER_MASK)) {
770 goto do_fault_protect;
771 }
772
773 case MMU_KSMAP_IDX:
774 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
775 (pde & PG_USER_MASK)) {
776 goto do_fault_protect;
777 }
778 if ((env->cr[0] & CR0_WP_MASK) &&
779 is_write && !(pde & PG_RW_MASK)) {
780 goto do_fault_protect;
781 }
782 break;
783
784 default:
785 break;
786 }
787 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
788 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
789 pde |= PG_ACCESSED_MASK;
790 if (is_dirty)
791 pde |= PG_DIRTY_MASK;
792 stl_phys_notdirty(cs->as, pde_addr, pde);
793 }
794
795 pte = pde & ~( (page_size - 1) & ~0xfff);
796 ptep = pte;
797 virt_addr = addr & ~(page_size - 1);
798 } else {
799 if (!(pde & PG_ACCESSED_MASK)) {
800 pde |= PG_ACCESSED_MASK;
801 stl_phys_notdirty(cs->as, pde_addr, pde);
802 }
803
804
805 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
806 env->a20_mask;
807 pte = ldl_phys(cs->as, pte_addr);
808 if (!(pte & PG_PRESENT_MASK)) {
809 error_code = 0;
810 goto do_fault;
811 }
812
813 ptep = pte & pde;
814 switch (mmu_idx) {
815 case MMU_USER_IDX:
816 if (!(ptep & PG_USER_MASK)) {
817 goto do_fault_protect;
818 }
819 if (is_write && !(ptep & PG_RW_MASK)) {
820 goto do_fault_protect;
821 }
822 break;
823
824 case MMU_KERNEL_IDX:
825 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
826 (ptep & PG_USER_MASK)) {
827 goto do_fault_protect;
828 }
829
830 case MMU_KSMAP_IDX:
831 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
832 (ptep & PG_USER_MASK)) {
833 goto do_fault_protect;
834 }
835 if ((env->cr[0] & CR0_WP_MASK) &&
836 is_write && !(ptep & PG_RW_MASK)) {
837 goto do_fault_protect;
838 }
839 break;
840
841 default:
842 break;
843 }
844 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
845 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
846 pte |= PG_ACCESSED_MASK;
847 if (is_dirty)
848 pte |= PG_DIRTY_MASK;
849 stl_phys_notdirty(cs->as, pte_addr, pte);
850 }
851 page_size = 4096;
852 virt_addr = addr & ~0xfff;
853 }
854 }
855
856 prot = PAGE_READ;
857 if (!(ptep & PG_NX_MASK))
858 prot |= PAGE_EXEC;
859 if (pte & PG_DIRTY_MASK) {
860
861
862 if (is_user) {
863 if (ptep & PG_RW_MASK)
864 prot |= PAGE_WRITE;
865 } else {
866 if (!(env->cr[0] & CR0_WP_MASK) ||
867 (ptep & PG_RW_MASK))
868 prot |= PAGE_WRITE;
869 }
870 }
871 do_mapping:
872 pte = pte & env->a20_mask;
873
874
875
876 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
877 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
878 vaddr = virt_addr + page_offset;
879
880 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
881 return 0;
882 do_fault_protect:
883 error_code = PG_ERROR_P_MASK;
884 do_fault:
885 error_code |= (is_write << PG_ERROR_W_BIT);
886 if (is_user)
887 error_code |= PG_ERROR_U_MASK;
888 if (is_write1 == 2 &&
889 (((env->efer & MSR_EFER_NXE) &&
890 (env->cr[4] & CR4_PAE_MASK)) ||
891 (env->cr[4] & CR4_SMEP_MASK)))
892 error_code |= PG_ERROR_I_D_MASK;
893 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
894
895 stq_phys(cs->as,
896 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
897 addr);
898 } else {
899 env->cr[2] = addr;
900 }
901 env->error_code = error_code;
902 cs->exception_index = EXCP0E_PAGE;
903 return 1;
904}
905
906hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
907{
908 X86CPU *cpu = X86_CPU(cs);
909 CPUX86State *env = &cpu->env;
910 target_ulong pde_addr, pte_addr;
911 uint64_t pte;
912 hwaddr paddr;
913 uint32_t page_offset;
914 int page_size;
915
916 if (!(env->cr[0] & CR0_PG_MASK)) {
917 pte = addr & env->a20_mask;
918 page_size = 4096;
919 } else if (env->cr[4] & CR4_PAE_MASK) {
920 target_ulong pdpe_addr;
921 uint64_t pde, pdpe;
922
923#ifdef TARGET_X86_64
924 if (env->hflags & HF_LMA_MASK) {
925 uint64_t pml4e_addr, pml4e;
926 int32_t sext;
927
928
929 sext = (int64_t)addr >> 47;
930 if (sext != 0 && sext != -1)
931 return -1;
932
933 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
934 env->a20_mask;
935 pml4e = ldq_phys(cs->as, pml4e_addr);
936 if (!(pml4e & PG_PRESENT_MASK))
937 return -1;
938
939 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
940 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
941 pdpe = ldq_phys(cs->as, pdpe_addr);
942 if (!(pdpe & PG_PRESENT_MASK))
943 return -1;
944
945 if (pdpe & PG_PSE_MASK) {
946 page_size = 1024 * 1024 * 1024;
947 pte = pdpe & ~( (page_size - 1) & ~0xfff);
948 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
949 goto out;
950 }
951
952 } else
953#endif
954 {
955 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
956 env->a20_mask;
957 pdpe = ldq_phys(cs->as, pdpe_addr);
958 if (!(pdpe & PG_PRESENT_MASK))
959 return -1;
960 }
961
962 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
963 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
964 pde = ldq_phys(cs->as, pde_addr);
965 if (!(pde & PG_PRESENT_MASK)) {
966 return -1;
967 }
968 if (pde & PG_PSE_MASK) {
969
970 page_size = 2048 * 1024;
971 pte = pde & ~( (page_size - 1) & ~0xfff);
972 } else {
973
974 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
975 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
976 page_size = 4096;
977 pte = ldq_phys(cs->as, pte_addr);
978 }
979 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
980 if (!(pte & PG_PRESENT_MASK))
981 return -1;
982 } else {
983 uint32_t pde;
984
985
986 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
987 pde = ldl_phys(cs->as, pde_addr);
988 if (!(pde & PG_PRESENT_MASK))
989 return -1;
990 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
991 pte = pde & ~0x003ff000;
992 page_size = 4096 * 1024;
993 } else {
994
995 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
996 pte = ldl_phys(cs->as, pte_addr);
997 if (!(pte & PG_PRESENT_MASK))
998 return -1;
999 page_size = 4096;
1000 }
1001 pte = pte & env->a20_mask;
1002 }
1003
1004#ifdef TARGET_X86_64
1005out:
1006#endif
1007 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1008 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1009 return paddr;
1010}
1011
1012void hw_breakpoint_insert(CPUX86State *env, int index)
1013{
1014 CPUState *cs = CPU(x86_env_get_cpu(env));
1015 int type = 0, err = 0;
1016
1017 switch (hw_breakpoint_type(env->dr[7], index)) {
1018 case DR7_TYPE_BP_INST:
1019 if (hw_breakpoint_enabled(env->dr[7], index)) {
1020 err = cpu_breakpoint_insert(cs, env->dr[index], BP_CPU,
1021 &env->cpu_breakpoint[index]);
1022 }
1023 break;
1024 case DR7_TYPE_DATA_WR:
1025 type = BP_CPU | BP_MEM_WRITE;
1026 break;
1027 case DR7_TYPE_IO_RW:
1028
1029 break;
1030 case DR7_TYPE_DATA_RW:
1031 type = BP_CPU | BP_MEM_ACCESS;
1032 break;
1033 }
1034
1035 if (type != 0) {
1036 err = cpu_watchpoint_insert(cs, env->dr[index],
1037 hw_breakpoint_len(env->dr[7], index),
1038 type, &env->cpu_watchpoint[index]);
1039 }
1040
1041 if (err) {
1042 env->cpu_breakpoint[index] = NULL;
1043 }
1044}
1045
1046void hw_breakpoint_remove(CPUX86State *env, int index)
1047{
1048 CPUState *cs;
1049
1050 if (!env->cpu_breakpoint[index]) {
1051 return;
1052 }
1053 cs = CPU(x86_env_get_cpu(env));
1054 switch (hw_breakpoint_type(env->dr[7], index)) {
1055 case DR7_TYPE_BP_INST:
1056 if (hw_breakpoint_enabled(env->dr[7], index)) {
1057 cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
1058 }
1059 break;
1060 case DR7_TYPE_DATA_WR:
1061 case DR7_TYPE_DATA_RW:
1062 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
1063 break;
1064 case DR7_TYPE_IO_RW:
1065
1066 break;
1067 }
1068}
1069
1070bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
1071{
1072 target_ulong dr6;
1073 int reg;
1074 bool hit_enabled = false;
1075
1076 dr6 = env->dr[6] & ~0xf;
1077 for (reg = 0; reg < DR7_MAX_BP; reg++) {
1078 bool bp_match = false;
1079 bool wp_match = false;
1080
1081 switch (hw_breakpoint_type(env->dr[7], reg)) {
1082 case DR7_TYPE_BP_INST:
1083 if (env->dr[reg] == env->eip) {
1084 bp_match = true;
1085 }
1086 break;
1087 case DR7_TYPE_DATA_WR:
1088 case DR7_TYPE_DATA_RW:
1089 if (env->cpu_watchpoint[reg] &&
1090 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1091 wp_match = true;
1092 }
1093 break;
1094 case DR7_TYPE_IO_RW:
1095 break;
1096 }
1097 if (bp_match || wp_match) {
1098 dr6 |= 1 << reg;
1099 if (hw_breakpoint_enabled(env->dr[7], reg)) {
1100 hit_enabled = true;
1101 }
1102 }
1103 }
1104
1105 if (hit_enabled || force_dr6_update) {
1106 env->dr[6] = dr6;
1107 }
1108
1109 return hit_enabled;
1110}
1111
1112void breakpoint_handler(CPUX86State *env)
1113{
1114 CPUState *cs = CPU(x86_env_get_cpu(env));
1115 CPUBreakpoint *bp;
1116
1117 if (cs->watchpoint_hit) {
1118 if (cs->watchpoint_hit->flags & BP_CPU) {
1119 cs->watchpoint_hit = NULL;
1120 if (check_hw_breakpoints(env, false)) {
1121 raise_exception(env, EXCP01_DB);
1122 } else {
1123 cpu_resume_from_signal(cs, NULL);
1124 }
1125 }
1126 } else {
1127 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
1128 if (bp->pc == env->eip) {
1129 if (bp->flags & BP_CPU) {
1130 check_hw_breakpoints(env, true);
1131 raise_exception(env, EXCP01_DB);
1132 }
1133 break;
1134 }
1135 }
1136 }
1137}
1138
1139typedef struct MCEInjectionParams {
1140 Monitor *mon;
1141 X86CPU *cpu;
1142 int bank;
1143 uint64_t status;
1144 uint64_t mcg_status;
1145 uint64_t addr;
1146 uint64_t misc;
1147 int flags;
1148} MCEInjectionParams;
1149
1150static void do_inject_x86_mce(void *data)
1151{
1152 MCEInjectionParams *params = data;
1153 CPUX86State *cenv = ¶ms->cpu->env;
1154 CPUState *cpu = CPU(params->cpu);
1155 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1156
1157 cpu_synchronize_state(cpu);
1158
1159
1160
1161
1162
1163 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1164 && !(params->status & MCI_STATUS_AR)
1165 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1166 return;
1167 }
1168
1169 if (params->status & MCI_STATUS_UC) {
1170
1171
1172
1173
1174 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1175 monitor_printf(params->mon,
1176 "CPU %d: Uncorrected error reporting disabled\n",
1177 cpu->cpu_index);
1178 return;
1179 }
1180
1181
1182
1183
1184
1185 if (banks[0] != ~(uint64_t)0) {
1186 monitor_printf(params->mon,
1187 "CPU %d: Uncorrected error reporting disabled for"
1188 " bank %d\n",
1189 cpu->cpu_index, params->bank);
1190 return;
1191 }
1192
1193 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1194 !(cenv->cr[4] & CR4_MCE_MASK)) {
1195 monitor_printf(params->mon,
1196 "CPU %d: Previous MCE still in progress, raising"
1197 " triple fault\n",
1198 cpu->cpu_index);
1199 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1200 qemu_system_reset_request();
1201 return;
1202 }
1203 if (banks[1] & MCI_STATUS_VAL) {
1204 params->status |= MCI_STATUS_OVER;
1205 }
1206 banks[2] = params->addr;
1207 banks[3] = params->misc;
1208 cenv->mcg_status = params->mcg_status;
1209 banks[1] = params->status;
1210 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1211 } else if (!(banks[1] & MCI_STATUS_VAL)
1212 || !(banks[1] & MCI_STATUS_UC)) {
1213 if (banks[1] & MCI_STATUS_VAL) {
1214 params->status |= MCI_STATUS_OVER;
1215 }
1216 banks[2] = params->addr;
1217 banks[3] = params->misc;
1218 banks[1] = params->status;
1219 } else {
1220 banks[1] |= MCI_STATUS_OVER;
1221 }
1222}
1223
1224void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1225 uint64_t status, uint64_t mcg_status, uint64_t addr,
1226 uint64_t misc, int flags)
1227{
1228 CPUState *cs = CPU(cpu);
1229 CPUX86State *cenv = &cpu->env;
1230 MCEInjectionParams params = {
1231 .mon = mon,
1232 .cpu = cpu,
1233 .bank = bank,
1234 .status = status,
1235 .mcg_status = mcg_status,
1236 .addr = addr,
1237 .misc = misc,
1238 .flags = flags,
1239 };
1240 unsigned bank_num = cenv->mcg_cap & 0xff;
1241
1242 if (!cenv->mcg_cap) {
1243 monitor_printf(mon, "MCE injection not supported\n");
1244 return;
1245 }
1246 if (bank >= bank_num) {
1247 monitor_printf(mon, "Invalid MCE bank number\n");
1248 return;
1249 }
1250 if (!(status & MCI_STATUS_VAL)) {
1251 monitor_printf(mon, "Invalid MCE status code\n");
1252 return;
1253 }
1254 if ((flags & MCE_INJECT_BROADCAST)
1255 && !cpu_x86_support_mca_broadcast(cenv)) {
1256 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1257 return;
1258 }
1259
1260 run_on_cpu(cs, do_inject_x86_mce, ¶ms);
1261 if (flags & MCE_INJECT_BROADCAST) {
1262 CPUState *other_cs;
1263
1264 params.bank = 1;
1265 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1266 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1267 params.addr = 0;
1268 params.misc = 0;
1269 CPU_FOREACH(other_cs) {
1270 if (other_cs == cs) {
1271 continue;
1272 }
1273 params.cpu = X86_CPU(other_cs);
1274 run_on_cpu(other_cs, do_inject_x86_mce, ¶ms);
1275 }
1276 }
1277}
1278
1279void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1280{
1281 X86CPU *cpu = x86_env_get_cpu(env);
1282 CPUState *cs = CPU(cpu);
1283
1284 if (kvm_enabled()) {
1285 env->tpr_access_type = access;
1286
1287 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1288 } else {
1289 cpu_restore_state(cs, cs->mem_io_pc);
1290
1291 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1292 }
1293}
1294#endif
1295
1296int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1297 target_ulong *base, unsigned int *limit,
1298 unsigned int *flags)
1299{
1300 X86CPU *cpu = x86_env_get_cpu(env);
1301 CPUState *cs = CPU(cpu);
1302 SegmentCache *dt;
1303 target_ulong ptr;
1304 uint32_t e1, e2;
1305 int index;
1306
1307 if (selector & 0x4)
1308 dt = &env->ldt;
1309 else
1310 dt = &env->gdt;
1311 index = selector & ~7;
1312 ptr = dt->base + index;
1313 if ((index + 7) > dt->limit
1314 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1315 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1316 return 0;
1317
1318 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1319 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1320 if (e2 & DESC_G_MASK)
1321 *limit = (*limit << 12) | 0xfff;
1322 *flags = e2;
1323
1324 return 1;
1325}
1326
1327#if !defined(CONFIG_USER_ONLY)
1328void do_cpu_init(X86CPU *cpu)
1329{
1330 CPUState *cs = CPU(cpu);
1331 CPUX86State *env = &cpu->env;
1332 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1333 uint64_t pat = env->pat;
1334
1335 cpu_reset(cs);
1336 cs->interrupt_request = sipi;
1337 env->pat = pat;
1338 apic_init_reset(cpu->apic_state);
1339}
1340
1341void do_cpu_sipi(X86CPU *cpu)
1342{
1343 apic_sipi(cpu->apic_state);
1344}
1345#else
1346void do_cpu_init(X86CPU *cpu)
1347{
1348}
1349void do_cpu_sipi(X86CPU *cpu)
1350{
1351}
1352#endif
1353