1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "cpu.h"
21#include "sysemu/kvm.h"
22#include "kvm_i386.h"
23#ifndef CONFIG_USER_ONLY
24#include "sysemu/sysemu.h"
25#include "monitor/monitor.h"
26#endif
27
28
29
30static void cpu_x86_version(CPUX86State *env, int *family, int *model)
31{
32 int cpuver = env->cpuid_version;
33
34 if (family == NULL || model == NULL) {
35 return;
36 }
37
38 *family = (cpuver >> 8) & 0x0f;
39 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
40}
41
42
43int cpu_x86_support_mca_broadcast(CPUX86State *env)
44{
45 int family = 0;
46 int model = 0;
47
48 cpu_x86_version(env, &family, &model);
49 if ((family == 6 && model >= 14) || family > 6) {
50 return 1;
51 }
52
53 return 0;
54}
55
56
57
58
59static const char *cc_op_str[CC_OP_NB] = {
60 "DYNAMIC",
61 "EFLAGS",
62
63 "MULB",
64 "MULW",
65 "MULL",
66 "MULQ",
67
68 "ADDB",
69 "ADDW",
70 "ADDL",
71 "ADDQ",
72
73 "ADCB",
74 "ADCW",
75 "ADCL",
76 "ADCQ",
77
78 "SUBB",
79 "SUBW",
80 "SUBL",
81 "SUBQ",
82
83 "SBBB",
84 "SBBW",
85 "SBBL",
86 "SBBQ",
87
88 "LOGICB",
89 "LOGICW",
90 "LOGICL",
91 "LOGICQ",
92
93 "INCB",
94 "INCW",
95 "INCL",
96 "INCQ",
97
98 "DECB",
99 "DECW",
100 "DECL",
101 "DECQ",
102
103 "SHLB",
104 "SHLW",
105 "SHLL",
106 "SHLQ",
107
108 "SARB",
109 "SARW",
110 "SARL",
111 "SARQ",
112
113 "BMILGB",
114 "BMILGW",
115 "BMILGL",
116 "BMILGQ",
117
118 "ADCX",
119 "ADOX",
120 "ADCOX",
121
122 "CLR",
123};
124
125static void
126cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
127 const char *name, struct SegmentCache *sc)
128{
129#ifdef TARGET_X86_64
130 if (env->hflags & HF_CS64_MASK) {
131 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
132 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
133 } else
134#endif
135 {
136 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
137 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
138 }
139
140 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
141 goto done;
142
143 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
144 if (sc->flags & DESC_S_MASK) {
145 if (sc->flags & DESC_CS_MASK) {
146 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
147 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
148 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
149 (sc->flags & DESC_R_MASK) ? 'R' : '-');
150 } else {
151 cpu_fprintf(f,
152 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
153 ? "DS " : "DS16");
154 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
155 (sc->flags & DESC_W_MASK) ? 'W' : '-');
156 }
157 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
158 } else {
159 static const char *sys_type_name[2][16] = {
160 {
161 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
162 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
163 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
164 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
165 },
166 {
167 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
168 "Reserved", "Reserved", "Reserved", "Reserved",
169 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
170 "Reserved", "IntGate64", "TrapGate64"
171 }
172 };
173 cpu_fprintf(f, "%s",
174 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
175 [(sc->flags & DESC_TYPE_MASK)
176 >> DESC_TYPE_SHIFT]);
177 }
178done:
179 cpu_fprintf(f, "\n");
180}
181
182#define DUMP_CODE_BYTES_TOTAL 50
183#define DUMP_CODE_BYTES_BACKWARD 20
184
185void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
186 int flags)
187{
188 X86CPU *cpu = X86_CPU(cs);
189 CPUX86State *env = &cpu->env;
190 int eflags, i, nb;
191 char cc_op_name[32];
192 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
193
194 eflags = cpu_compute_eflags(env);
195#ifdef TARGET_X86_64
196 if (env->hflags & HF_CS64_MASK) {
197 cpu_fprintf(f,
198 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
199 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
200 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
201 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
202 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
203 env->regs[R_EAX],
204 env->regs[R_EBX],
205 env->regs[R_ECX],
206 env->regs[R_EDX],
207 env->regs[R_ESI],
208 env->regs[R_EDI],
209 env->regs[R_EBP],
210 env->regs[R_ESP],
211 env->regs[8],
212 env->regs[9],
213 env->regs[10],
214 env->regs[11],
215 env->regs[12],
216 env->regs[13],
217 env->regs[14],
218 env->regs[15],
219 env->eip, eflags,
220 eflags & DF_MASK ? 'D' : '-',
221 eflags & CC_O ? 'O' : '-',
222 eflags & CC_S ? 'S' : '-',
223 eflags & CC_Z ? 'Z' : '-',
224 eflags & CC_A ? 'A' : '-',
225 eflags & CC_P ? 'P' : '-',
226 eflags & CC_C ? 'C' : '-',
227 env->hflags & HF_CPL_MASK,
228 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
229 (env->a20_mask >> 20) & 1,
230 (env->hflags >> HF_SMM_SHIFT) & 1,
231 cs->halted);
232 } else
233#endif
234 {
235 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
236 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
237 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
238 (uint32_t)env->regs[R_EAX],
239 (uint32_t)env->regs[R_EBX],
240 (uint32_t)env->regs[R_ECX],
241 (uint32_t)env->regs[R_EDX],
242 (uint32_t)env->regs[R_ESI],
243 (uint32_t)env->regs[R_EDI],
244 (uint32_t)env->regs[R_EBP],
245 (uint32_t)env->regs[R_ESP],
246 (uint32_t)env->eip, eflags,
247 eflags & DF_MASK ? 'D' : '-',
248 eflags & CC_O ? 'O' : '-',
249 eflags & CC_S ? 'S' : '-',
250 eflags & CC_Z ? 'Z' : '-',
251 eflags & CC_A ? 'A' : '-',
252 eflags & CC_P ? 'P' : '-',
253 eflags & CC_C ? 'C' : '-',
254 env->hflags & HF_CPL_MASK,
255 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
256 (env->a20_mask >> 20) & 1,
257 (env->hflags >> HF_SMM_SHIFT) & 1,
258 cs->halted);
259 }
260
261 for(i = 0; i < 6; i++) {
262 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
263 &env->segs[i]);
264 }
265 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
266 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
267
268#ifdef TARGET_X86_64
269 if (env->hflags & HF_LMA_MASK) {
270 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
271 env->gdt.base, env->gdt.limit);
272 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
273 env->idt.base, env->idt.limit);
274 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
275 (uint32_t)env->cr[0],
276 env->cr[2],
277 env->cr[3],
278 (uint32_t)env->cr[4]);
279 for(i = 0; i < 4; i++)
280 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
281 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
282 env->dr[6], env->dr[7]);
283 } else
284#endif
285 {
286 cpu_fprintf(f, "GDT= %08x %08x\n",
287 (uint32_t)env->gdt.base, env->gdt.limit);
288 cpu_fprintf(f, "IDT= %08x %08x\n",
289 (uint32_t)env->idt.base, env->idt.limit);
290 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
291 (uint32_t)env->cr[0],
292 (uint32_t)env->cr[2],
293 (uint32_t)env->cr[3],
294 (uint32_t)env->cr[4]);
295 for(i = 0; i < 4; i++) {
296 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
297 }
298 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
299 env->dr[6], env->dr[7]);
300 }
301 if (flags & CPU_DUMP_CCOP) {
302 if ((unsigned)env->cc_op < CC_OP_NB)
303 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
304 else
305 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
306#ifdef TARGET_X86_64
307 if (env->hflags & HF_CS64_MASK) {
308 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
309 env->cc_src, env->cc_dst,
310 cc_op_name);
311 } else
312#endif
313 {
314 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
315 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
316 cc_op_name);
317 }
318 }
319 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
320 if (flags & CPU_DUMP_FPU) {
321 int fptag;
322 fptag = 0;
323 for(i = 0; i < 8; i++) {
324 fptag |= ((!env->fptags[i]) << i);
325 }
326 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
327 env->fpuc,
328 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
329 env->fpstt,
330 fptag,
331 env->mxcsr);
332 for(i=0;i<8;i++) {
333 CPU_LDoubleU u;
334 u.d = env->fpregs[i].d;
335 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
336 i, u.l.lower, u.l.upper);
337 if ((i & 1) == 1)
338 cpu_fprintf(f, "\n");
339 else
340 cpu_fprintf(f, " ");
341 }
342 if (env->hflags & HF_CS64_MASK)
343 nb = 16;
344 else
345 nb = 8;
346 for(i=0;i<nb;i++) {
347 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
348 i,
349 env->xmm_regs[i].XMM_L(3),
350 env->xmm_regs[i].XMM_L(2),
351 env->xmm_regs[i].XMM_L(1),
352 env->xmm_regs[i].XMM_L(0));
353 if ((i & 1) == 1)
354 cpu_fprintf(f, "\n");
355 else
356 cpu_fprintf(f, " ");
357 }
358 }
359 if (flags & CPU_DUMP_CODE) {
360 target_ulong base = env->segs[R_CS].base + env->eip;
361 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
362 uint8_t code;
363 char codestr[3];
364
365 cpu_fprintf(f, "Code=");
366 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
367 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
368 snprintf(codestr, sizeof(codestr), "%02x", code);
369 } else {
370 snprintf(codestr, sizeof(codestr), "??");
371 }
372 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
373 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
374 }
375 cpu_fprintf(f, "\n");
376 }
377}
378
379
380
381
382
383void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
384{
385 CPUX86State *env = &cpu->env;
386
387 a20_state = (a20_state != 0);
388 if (a20_state != ((env->a20_mask >> 20) & 1)) {
389 CPUState *cs = CPU(cpu);
390
391#if defined(DEBUG_MMU)
392 printf("A20 update: a20=%d\n", a20_state);
393#endif
394
395
396 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
397
398
399
400 tlb_flush(cs, 1);
401 env->a20_mask = ~(1 << 20) | (a20_state << 20);
402 }
403}
404
405void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
406{
407 X86CPU *cpu = x86_env_get_cpu(env);
408 int pe_state;
409
410#if defined(DEBUG_MMU)
411 printf("CR0 update: CR0=0x%08x\n", new_cr0);
412#endif
413 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
414 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
415 tlb_flush(CPU(cpu), 1);
416 }
417
418#ifdef TARGET_X86_64
419 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
420 (env->efer & MSR_EFER_LME)) {
421
422
423 if (!(env->cr[4] & CR4_PAE_MASK))
424 return;
425 env->efer |= MSR_EFER_LMA;
426 env->hflags |= HF_LMA_MASK;
427 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
428 (env->efer & MSR_EFER_LMA)) {
429
430 env->efer &= ~MSR_EFER_LMA;
431 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
432 env->eip &= 0xffffffff;
433 }
434#endif
435 env->cr[0] = new_cr0 | CR0_ET_MASK;
436
437
438 pe_state = (env->cr[0] & CR0_PE_MASK);
439 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
440
441 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
442
443 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
444 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
445}
446
447
448
449void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
450{
451 X86CPU *cpu = x86_env_get_cpu(env);
452
453 env->cr[3] = new_cr3;
454 if (env->cr[0] & CR0_PG_MASK) {
455#if defined(DEBUG_MMU)
456 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
457#endif
458 tlb_flush(CPU(cpu), 0);
459 }
460}
461
462void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
463{
464 X86CPU *cpu = x86_env_get_cpu(env);
465
466#if defined(DEBUG_MMU)
467 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
468#endif
469 if ((new_cr4 ^ env->cr[4]) &
470 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
471 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
472 tlb_flush(CPU(cpu), 1);
473 }
474
475 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
476 new_cr4 &= ~CR4_OSFXSR_MASK;
477 }
478 env->hflags &= ~HF_OSFXSR_MASK;
479 if (new_cr4 & CR4_OSFXSR_MASK) {
480 env->hflags |= HF_OSFXSR_MASK;
481 }
482
483 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
484 new_cr4 &= ~CR4_SMAP_MASK;
485 }
486 env->hflags &= ~HF_SMAP_MASK;
487 if (new_cr4 & CR4_SMAP_MASK) {
488 env->hflags |= HF_SMAP_MASK;
489 }
490
491 env->cr[4] = new_cr4;
492}
493
494#if defined(CONFIG_USER_ONLY)
495
496int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
497 int is_write, int mmu_idx)
498{
499 X86CPU *cpu = X86_CPU(cs);
500 CPUX86State *env = &cpu->env;
501
502
503 is_write &= 1;
504 env->cr[2] = addr;
505 env->error_code = (is_write << PG_ERROR_W_BIT);
506 env->error_code |= PG_ERROR_U_MASK;
507 cs->exception_index = EXCP0E_PAGE;
508 return 1;
509}
510
511#else
512
513
514
515
516
517
518int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
519 int is_write1, int mmu_idx)
520{
521 X86CPU *cpu = X86_CPU(cs);
522 CPUX86State *env = &cpu->env;
523 uint64_t ptep, pte;
524 target_ulong pde_addr, pte_addr;
525 int error_code = 0;
526 int is_dirty, prot, page_size, is_write, is_user;
527 hwaddr paddr;
528 uint64_t rsvd_mask = PG_HI_RSVD_MASK;
529 uint32_t page_offset;
530 target_ulong vaddr;
531
532 is_user = mmu_idx == MMU_USER_IDX;
533#if defined(DEBUG_MMU)
534 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
535 addr, is_write1, is_user, env->eip);
536#endif
537 is_write = is_write1 & 1;
538
539 if (!(env->cr[0] & CR0_PG_MASK)) {
540 pte = addr;
541#ifdef TARGET_X86_64
542 if (!(env->hflags & HF_LMA_MASK)) {
543
544 pte = (uint32_t)pte;
545 }
546#endif
547 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
548 page_size = 4096;
549 goto do_mapping;
550 }
551
552 if (!(env->efer & MSR_EFER_NXE)) {
553 rsvd_mask |= PG_NX_MASK;
554 }
555
556 if (env->cr[4] & CR4_PAE_MASK) {
557 uint64_t pde, pdpe;
558 target_ulong pdpe_addr;
559
560#ifdef TARGET_X86_64
561 if (env->hflags & HF_LMA_MASK) {
562 uint64_t pml4e_addr, pml4e;
563 int32_t sext;
564
565
566 sext = (int64_t)addr >> 47;
567 if (sext != 0 && sext != -1) {
568 env->error_code = 0;
569 cs->exception_index = EXCP0D_GPF;
570 return 1;
571 }
572
573 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
574 env->a20_mask;
575 pml4e = ldq_phys(cs->as, pml4e_addr);
576 if (!(pml4e & PG_PRESENT_MASK)) {
577 goto do_fault;
578 }
579 if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
580 goto do_fault_rsvd;
581 }
582 if (!(pml4e & PG_ACCESSED_MASK)) {
583 pml4e |= PG_ACCESSED_MASK;
584 stl_phys_notdirty(cs->as, pml4e_addr, pml4e);
585 }
586 ptep = pml4e ^ PG_NX_MASK;
587 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
588 env->a20_mask;
589 pdpe = ldq_phys(cs->as, pdpe_addr);
590 if (!(pdpe & PG_PRESENT_MASK)) {
591 goto do_fault;
592 }
593 if (pdpe & rsvd_mask) {
594 goto do_fault_rsvd;
595 }
596 ptep &= pdpe ^ PG_NX_MASK;
597 if (!(pdpe & PG_ACCESSED_MASK)) {
598 pdpe |= PG_ACCESSED_MASK;
599 stl_phys_notdirty(cs->as, pdpe_addr, pdpe);
600 }
601 if (pdpe & PG_PSE_MASK) {
602
603 page_size = 1024 * 1024 * 1024;
604 pte_addr = pdpe_addr;
605 pte = pdpe;
606 goto do_check_protect;
607 }
608 } else
609#endif
610 {
611
612 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
613 env->a20_mask;
614 pdpe = ldq_phys(cs->as, pdpe_addr);
615 if (!(pdpe & PG_PRESENT_MASK)) {
616 goto do_fault;
617 }
618 rsvd_mask |= PG_HI_USER_MASK;
619 if (pdpe & (rsvd_mask | PG_NX_MASK)) {
620 goto do_fault_rsvd;
621 }
622 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
623 }
624
625 pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
626 env->a20_mask;
627 pde = ldq_phys(cs->as, pde_addr);
628 if (!(pde & PG_PRESENT_MASK)) {
629 goto do_fault;
630 }
631 if (pde & rsvd_mask) {
632 goto do_fault_rsvd;
633 }
634 ptep &= pde ^ PG_NX_MASK;
635 if (pde & PG_PSE_MASK) {
636
637 page_size = 2048 * 1024;
638 pte_addr = pde_addr;
639 pte = pde;
640 goto do_check_protect;
641 }
642
643 if (!(pde & PG_ACCESSED_MASK)) {
644 pde |= PG_ACCESSED_MASK;
645 stl_phys_notdirty(cs->as, pde_addr, pde);
646 }
647 pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
648 env->a20_mask;
649 pte = ldq_phys(cs->as, pte_addr);
650 if (!(pte & PG_PRESENT_MASK)) {
651 goto do_fault;
652 }
653 if (pte & rsvd_mask) {
654 goto do_fault_rsvd;
655 }
656
657 ptep &= pte ^ PG_NX_MASK;
658 page_size = 4096;
659 } else {
660 uint32_t pde;
661
662
663 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
664 env->a20_mask;
665 pde = ldl_phys(cs->as, pde_addr);
666 if (!(pde & PG_PRESENT_MASK)) {
667 goto do_fault;
668 }
669 ptep = pde | PG_NX_MASK;
670
671
672 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
673 page_size = 4096 * 1024;
674 pte_addr = pde_addr;
675
676
677
678
679 pte = pde | ((pde & 0x1fe000) << (32 - 13));
680 rsvd_mask = 0x200000;
681 goto do_check_protect_pse36;
682 }
683
684 if (!(pde & PG_ACCESSED_MASK)) {
685 pde |= PG_ACCESSED_MASK;
686 stl_phys_notdirty(cs->as, pde_addr, pde);
687 }
688
689
690 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
691 env->a20_mask;
692 pte = ldl_phys(cs->as, pte_addr);
693 if (!(pte & PG_PRESENT_MASK)) {
694 goto do_fault;
695 }
696
697 ptep &= pte | PG_NX_MASK;
698 page_size = 4096;
699 rsvd_mask = 0;
700 }
701
702do_check_protect:
703 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
704do_check_protect_pse36:
705 if (pte & rsvd_mask) {
706 goto do_fault_rsvd;
707 }
708 ptep ^= PG_NX_MASK;
709 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
710 goto do_fault_protect;
711 }
712 switch (mmu_idx) {
713 case MMU_USER_IDX:
714 if (!(ptep & PG_USER_MASK)) {
715 goto do_fault_protect;
716 }
717 if (is_write && !(ptep & PG_RW_MASK)) {
718 goto do_fault_protect;
719 }
720 break;
721
722 case MMU_KSMAP_IDX:
723 if (is_write1 != 2 && (ptep & PG_USER_MASK)) {
724 goto do_fault_protect;
725 }
726
727 case MMU_KNOSMAP_IDX:
728 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
729 (ptep & PG_USER_MASK)) {
730 goto do_fault_protect;
731 }
732 if ((env->cr[0] & CR0_WP_MASK) &&
733 is_write && !(ptep & PG_RW_MASK)) {
734 goto do_fault_protect;
735 }
736 break;
737
738 default:
739 break;
740 }
741 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
742 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
743 pte |= PG_ACCESSED_MASK;
744 if (is_dirty) {
745 pte |= PG_DIRTY_MASK;
746 }
747 stl_phys_notdirty(cs->as, pte_addr, pte);
748 }
749
750
751 prot = PAGE_READ;
752 if (!(ptep & PG_NX_MASK) &&
753 (mmu_idx == MMU_USER_IDX ||
754 !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
755 prot |= PAGE_EXEC;
756 }
757 if (pte & PG_DIRTY_MASK) {
758
759
760 if (is_user) {
761 if (ptep & PG_RW_MASK)
762 prot |= PAGE_WRITE;
763 } else {
764 if (!(env->cr[0] & CR0_WP_MASK) ||
765 (ptep & PG_RW_MASK))
766 prot |= PAGE_WRITE;
767 }
768 }
769 do_mapping:
770 pte = pte & env->a20_mask;
771
772
773 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
774
775
776
777 vaddr = addr & TARGET_PAGE_MASK;
778 page_offset = vaddr & (page_size - 1);
779 paddr = pte + page_offset;
780
781 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
782 return 0;
783 do_fault_rsvd:
784 error_code |= PG_ERROR_RSVD_MASK;
785 do_fault_protect:
786 error_code |= PG_ERROR_P_MASK;
787 do_fault:
788 error_code |= (is_write << PG_ERROR_W_BIT);
789 if (is_user)
790 error_code |= PG_ERROR_U_MASK;
791 if (is_write1 == 2 &&
792 (((env->efer & MSR_EFER_NXE) &&
793 (env->cr[4] & CR4_PAE_MASK)) ||
794 (env->cr[4] & CR4_SMEP_MASK)))
795 error_code |= PG_ERROR_I_D_MASK;
796 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
797
798 stq_phys(cs->as,
799 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
800 addr);
801 } else {
802 env->cr[2] = addr;
803 }
804 env->error_code = error_code;
805 cs->exception_index = EXCP0E_PAGE;
806 return 1;
807}
808
809hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
810{
811 X86CPU *cpu = X86_CPU(cs);
812 CPUX86State *env = &cpu->env;
813 target_ulong pde_addr, pte_addr;
814 uint64_t pte;
815 uint32_t page_offset;
816 int page_size;
817
818 if (!(env->cr[0] & CR0_PG_MASK)) {
819 pte = addr & env->a20_mask;
820 page_size = 4096;
821 } else if (env->cr[4] & CR4_PAE_MASK) {
822 target_ulong pdpe_addr;
823 uint64_t pde, pdpe;
824
825#ifdef TARGET_X86_64
826 if (env->hflags & HF_LMA_MASK) {
827 uint64_t pml4e_addr, pml4e;
828 int32_t sext;
829
830
831 sext = (int64_t)addr >> 47;
832 if (sext != 0 && sext != -1) {
833 return -1;
834 }
835 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
836 env->a20_mask;
837 pml4e = ldq_phys(cs->as, pml4e_addr);
838 if (!(pml4e & PG_PRESENT_MASK)) {
839 return -1;
840 }
841 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
842 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
843 pdpe = ldq_phys(cs->as, pdpe_addr);
844 if (!(pdpe & PG_PRESENT_MASK)) {
845 return -1;
846 }
847 if (pdpe & PG_PSE_MASK) {
848 page_size = 1024 * 1024 * 1024;
849 pte = pdpe;
850 goto out;
851 }
852
853 } else
854#endif
855 {
856 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
857 env->a20_mask;
858 pdpe = ldq_phys(cs->as, pdpe_addr);
859 if (!(pdpe & PG_PRESENT_MASK))
860 return -1;
861 }
862
863 pde_addr = ((pdpe & PG_ADDRESS_MASK) +
864 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
865 pde = ldq_phys(cs->as, pde_addr);
866 if (!(pde & PG_PRESENT_MASK)) {
867 return -1;
868 }
869 if (pde & PG_PSE_MASK) {
870
871 page_size = 2048 * 1024;
872 pte = pde;
873 } else {
874
875 pte_addr = ((pde & PG_ADDRESS_MASK) +
876 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
877 page_size = 4096;
878 pte = ldq_phys(cs->as, pte_addr);
879 }
880 if (!(pte & PG_PRESENT_MASK)) {
881 return -1;
882 }
883 } else {
884 uint32_t pde;
885
886
887 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
888 pde = ldl_phys(cs->as, pde_addr);
889 if (!(pde & PG_PRESENT_MASK))
890 return -1;
891 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
892 pte = pde | ((pde & 0x1fe000) << (32 - 13));
893 page_size = 4096 * 1024;
894 } else {
895
896 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
897 pte = ldl_phys(cs->as, pte_addr);
898 if (!(pte & PG_PRESENT_MASK)) {
899 return -1;
900 }
901 page_size = 4096;
902 }
903 pte = pte & env->a20_mask;
904 }
905
906#ifdef TARGET_X86_64
907out:
908#endif
909 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
910 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
911 return pte | page_offset;
912}
913
914void hw_breakpoint_insert(CPUX86State *env, int index)
915{
916 CPUState *cs = CPU(x86_env_get_cpu(env));
917 int type = 0, err = 0;
918
919 switch (hw_breakpoint_type(env->dr[7], index)) {
920 case DR7_TYPE_BP_INST:
921 if (hw_breakpoint_enabled(env->dr[7], index)) {
922 err = cpu_breakpoint_insert(cs, env->dr[index], BP_CPU,
923 &env->cpu_breakpoint[index]);
924 }
925 break;
926 case DR7_TYPE_DATA_WR:
927 type = BP_CPU | BP_MEM_WRITE;
928 break;
929 case DR7_TYPE_IO_RW:
930
931 break;
932 case DR7_TYPE_DATA_RW:
933 type = BP_CPU | BP_MEM_ACCESS;
934 break;
935 }
936
937 if (type != 0) {
938 err = cpu_watchpoint_insert(cs, env->dr[index],
939 hw_breakpoint_len(env->dr[7], index),
940 type, &env->cpu_watchpoint[index]);
941 }
942
943 if (err) {
944 env->cpu_breakpoint[index] = NULL;
945 }
946}
947
948void hw_breakpoint_remove(CPUX86State *env, int index)
949{
950 CPUState *cs;
951
952 if (!env->cpu_breakpoint[index]) {
953 return;
954 }
955 cs = CPU(x86_env_get_cpu(env));
956 switch (hw_breakpoint_type(env->dr[7], index)) {
957 case DR7_TYPE_BP_INST:
958 if (hw_breakpoint_enabled(env->dr[7], index)) {
959 cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
960 }
961 break;
962 case DR7_TYPE_DATA_WR:
963 case DR7_TYPE_DATA_RW:
964 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
965 break;
966 case DR7_TYPE_IO_RW:
967
968 break;
969 }
970}
971
972bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
973{
974 target_ulong dr6;
975 int reg;
976 bool hit_enabled = false;
977
978 dr6 = env->dr[6] & ~0xf;
979 for (reg = 0; reg < DR7_MAX_BP; reg++) {
980 bool bp_match = false;
981 bool wp_match = false;
982
983 switch (hw_breakpoint_type(env->dr[7], reg)) {
984 case DR7_TYPE_BP_INST:
985 if (env->dr[reg] == env->eip) {
986 bp_match = true;
987 }
988 break;
989 case DR7_TYPE_DATA_WR:
990 case DR7_TYPE_DATA_RW:
991 if (env->cpu_watchpoint[reg] &&
992 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
993 wp_match = true;
994 }
995 break;
996 case DR7_TYPE_IO_RW:
997 break;
998 }
999 if (bp_match || wp_match) {
1000 dr6 |= 1 << reg;
1001 if (hw_breakpoint_enabled(env->dr[7], reg)) {
1002 hit_enabled = true;
1003 }
1004 }
1005 }
1006
1007 if (hit_enabled || force_dr6_update) {
1008 env->dr[6] = dr6;
1009 }
1010
1011 return hit_enabled;
1012}
1013
1014void breakpoint_handler(CPUState *cs)
1015{
1016 X86CPU *cpu = X86_CPU(cs);
1017 CPUX86State *env = &cpu->env;
1018 CPUBreakpoint *bp;
1019
1020 if (cs->watchpoint_hit) {
1021 if (cs->watchpoint_hit->flags & BP_CPU) {
1022 cs->watchpoint_hit = NULL;
1023 if (check_hw_breakpoints(env, false)) {
1024 raise_exception(env, EXCP01_DB);
1025 } else {
1026 cpu_resume_from_signal(cs, NULL);
1027 }
1028 }
1029 } else {
1030 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
1031 if (bp->pc == env->eip) {
1032 if (bp->flags & BP_CPU) {
1033 check_hw_breakpoints(env, true);
1034 raise_exception(env, EXCP01_DB);
1035 }
1036 break;
1037 }
1038 }
1039 }
1040}
1041
1042typedef struct MCEInjectionParams {
1043 Monitor *mon;
1044 X86CPU *cpu;
1045 int bank;
1046 uint64_t status;
1047 uint64_t mcg_status;
1048 uint64_t addr;
1049 uint64_t misc;
1050 int flags;
1051} MCEInjectionParams;
1052
1053static void do_inject_x86_mce(void *data)
1054{
1055 MCEInjectionParams *params = data;
1056 CPUX86State *cenv = ¶ms->cpu->env;
1057 CPUState *cpu = CPU(params->cpu);
1058 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1059
1060 cpu_synchronize_state(cpu);
1061
1062
1063
1064
1065
1066 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1067 && !(params->status & MCI_STATUS_AR)
1068 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1069 return;
1070 }
1071
1072 if (params->status & MCI_STATUS_UC) {
1073
1074
1075
1076
1077 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1078 monitor_printf(params->mon,
1079 "CPU %d: Uncorrected error reporting disabled\n",
1080 cpu->cpu_index);
1081 return;
1082 }
1083
1084
1085
1086
1087
1088 if (banks[0] != ~(uint64_t)0) {
1089 monitor_printf(params->mon,
1090 "CPU %d: Uncorrected error reporting disabled for"
1091 " bank %d\n",
1092 cpu->cpu_index, params->bank);
1093 return;
1094 }
1095
1096 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1097 !(cenv->cr[4] & CR4_MCE_MASK)) {
1098 monitor_printf(params->mon,
1099 "CPU %d: Previous MCE still in progress, raising"
1100 " triple fault\n",
1101 cpu->cpu_index);
1102 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1103 qemu_system_reset_request();
1104 return;
1105 }
1106 if (banks[1] & MCI_STATUS_VAL) {
1107 params->status |= MCI_STATUS_OVER;
1108 }
1109 banks[2] = params->addr;
1110 banks[3] = params->misc;
1111 cenv->mcg_status = params->mcg_status;
1112 banks[1] = params->status;
1113 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1114 } else if (!(banks[1] & MCI_STATUS_VAL)
1115 || !(banks[1] & MCI_STATUS_UC)) {
1116 if (banks[1] & MCI_STATUS_VAL) {
1117 params->status |= MCI_STATUS_OVER;
1118 }
1119 banks[2] = params->addr;
1120 banks[3] = params->misc;
1121 banks[1] = params->status;
1122 } else {
1123 banks[1] |= MCI_STATUS_OVER;
1124 }
1125}
1126
1127void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1128 uint64_t status, uint64_t mcg_status, uint64_t addr,
1129 uint64_t misc, int flags)
1130{
1131 CPUState *cs = CPU(cpu);
1132 CPUX86State *cenv = &cpu->env;
1133 MCEInjectionParams params = {
1134 .mon = mon,
1135 .cpu = cpu,
1136 .bank = bank,
1137 .status = status,
1138 .mcg_status = mcg_status,
1139 .addr = addr,
1140 .misc = misc,
1141 .flags = flags,
1142 };
1143 unsigned bank_num = cenv->mcg_cap & 0xff;
1144
1145 if (!cenv->mcg_cap) {
1146 monitor_printf(mon, "MCE injection not supported\n");
1147 return;
1148 }
1149 if (bank >= bank_num) {
1150 monitor_printf(mon, "Invalid MCE bank number\n");
1151 return;
1152 }
1153 if (!(status & MCI_STATUS_VAL)) {
1154 monitor_printf(mon, "Invalid MCE status code\n");
1155 return;
1156 }
1157 if ((flags & MCE_INJECT_BROADCAST)
1158 && !cpu_x86_support_mca_broadcast(cenv)) {
1159 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1160 return;
1161 }
1162
1163 run_on_cpu(cs, do_inject_x86_mce, ¶ms);
1164 if (flags & MCE_INJECT_BROADCAST) {
1165 CPUState *other_cs;
1166
1167 params.bank = 1;
1168 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1169 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1170 params.addr = 0;
1171 params.misc = 0;
1172 CPU_FOREACH(other_cs) {
1173 if (other_cs == cs) {
1174 continue;
1175 }
1176 params.cpu = X86_CPU(other_cs);
1177 run_on_cpu(other_cs, do_inject_x86_mce, ¶ms);
1178 }
1179 }
1180}
1181
1182void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1183{
1184 X86CPU *cpu = x86_env_get_cpu(env);
1185 CPUState *cs = CPU(cpu);
1186
1187 if (kvm_enabled()) {
1188 env->tpr_access_type = access;
1189
1190 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1191 } else {
1192 cpu_restore_state(cs, cs->mem_io_pc);
1193
1194 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1195 }
1196}
1197#endif
1198
1199int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1200 target_ulong *base, unsigned int *limit,
1201 unsigned int *flags)
1202{
1203 X86CPU *cpu = x86_env_get_cpu(env);
1204 CPUState *cs = CPU(cpu);
1205 SegmentCache *dt;
1206 target_ulong ptr;
1207 uint32_t e1, e2;
1208 int index;
1209
1210 if (selector & 0x4)
1211 dt = &env->ldt;
1212 else
1213 dt = &env->gdt;
1214 index = selector & ~7;
1215 ptr = dt->base + index;
1216 if ((index + 7) > dt->limit
1217 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1218 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1219 return 0;
1220
1221 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1222 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1223 if (e2 & DESC_G_MASK)
1224 *limit = (*limit << 12) | 0xfff;
1225 *flags = e2;
1226
1227 return 1;
1228}
1229
1230#if !defined(CONFIG_USER_ONLY)
1231void do_cpu_init(X86CPU *cpu)
1232{
1233 CPUState *cs = CPU(cpu);
1234 CPUX86State *env = &cpu->env;
1235 CPUX86State *save = g_new(CPUX86State, 1);
1236 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1237
1238 *save = *env;
1239
1240 cpu_reset(cs);
1241 cs->interrupt_request = sipi;
1242 memcpy(&env->start_init_save, &save->start_init_save,
1243 offsetof(CPUX86State, end_init_save) -
1244 offsetof(CPUX86State, start_init_save));
1245 g_free(save);
1246
1247 if (kvm_enabled()) {
1248 kvm_arch_do_init_vcpu(cpu);
1249 }
1250 apic_init_reset(cpu->apic_state);
1251}
1252
1253void do_cpu_sipi(X86CPU *cpu)
1254{
1255 apic_sipi(cpu->apic_state);
1256}
1257#else
1258void do_cpu_init(X86CPU *cpu)
1259{
1260}
1261void do_cpu_sipi(X86CPU *cpu)
1262{
1263}
1264#endif
1265
1266
1267
1268void x86_cpu_exec_enter(CPUState *cs)
1269{
1270 X86CPU *cpu = X86_CPU(cs);
1271 CPUX86State *env = &cpu->env;
1272
1273 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1274 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
1275 CC_OP = CC_OP_EFLAGS;
1276 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1277}
1278
1279void x86_cpu_exec_exit(CPUState *cs)
1280{
1281 X86CPU *cpu = X86_CPU(cs);
1282 CPUX86State *env = &cpu->env;
1283
1284 env->eflags = cpu_compute_eflags(env);
1285}
1286