1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "sysemu/kvm.h"
23#include "kvm_i386.h"
24#ifndef CONFIG_USER_ONLY
25#include "sysemu/sysemu.h"
26#include "monitor/monitor.h"
27#include "hw/i386/apic_internal.h"
28#endif
29
30static void cpu_x86_version(CPUX86State *env, int *family, int *model)
31{
32 int cpuver = env->cpuid_version;
33
34 if (family == NULL || model == NULL) {
35 return;
36 }
37
38 *family = (cpuver >> 8) & 0x0f;
39 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
40}
41
42
43int cpu_x86_support_mca_broadcast(CPUX86State *env)
44{
45 int family = 0;
46 int model = 0;
47
48 cpu_x86_version(env, &family, &model);
49 if ((family == 6 && model >= 14) || family > 6) {
50 return 1;
51 }
52
53 return 0;
54}
55
56
57
58
59static const char *cc_op_str[CC_OP_NB] = {
60 "DYNAMIC",
61 "EFLAGS",
62
63 "MULB",
64 "MULW",
65 "MULL",
66 "MULQ",
67
68 "ADDB",
69 "ADDW",
70 "ADDL",
71 "ADDQ",
72
73 "ADCB",
74 "ADCW",
75 "ADCL",
76 "ADCQ",
77
78 "SUBB",
79 "SUBW",
80 "SUBL",
81 "SUBQ",
82
83 "SBBB",
84 "SBBW",
85 "SBBL",
86 "SBBQ",
87
88 "LOGICB",
89 "LOGICW",
90 "LOGICL",
91 "LOGICQ",
92
93 "INCB",
94 "INCW",
95 "INCL",
96 "INCQ",
97
98 "DECB",
99 "DECW",
100 "DECL",
101 "DECQ",
102
103 "SHLB",
104 "SHLW",
105 "SHLL",
106 "SHLQ",
107
108 "SARB",
109 "SARW",
110 "SARL",
111 "SARQ",
112
113 "BMILGB",
114 "BMILGW",
115 "BMILGL",
116 "BMILGQ",
117
118 "ADCX",
119 "ADOX",
120 "ADCOX",
121
122 "CLR",
123};
124
125static void
126cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
127 const char *name, struct SegmentCache *sc)
128{
129#ifdef TARGET_X86_64
130 if (env->hflags & HF_CS64_MASK) {
131 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
132 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
133 } else
134#endif
135 {
136 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
137 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
138 }
139
140 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
141 goto done;
142
143 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
144 if (sc->flags & DESC_S_MASK) {
145 if (sc->flags & DESC_CS_MASK) {
146 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
147 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
148 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
149 (sc->flags & DESC_R_MASK) ? 'R' : '-');
150 } else {
151 cpu_fprintf(f,
152 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
153 ? "DS " : "DS16");
154 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
155 (sc->flags & DESC_W_MASK) ? 'W' : '-');
156 }
157 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
158 } else {
159 static const char *sys_type_name[2][16] = {
160 {
161 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
162 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
163 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
164 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
165 },
166 {
167 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
168 "Reserved", "Reserved", "Reserved", "Reserved",
169 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
170 "Reserved", "IntGate64", "TrapGate64"
171 }
172 };
173 cpu_fprintf(f, "%s",
174 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
175 [(sc->flags & DESC_TYPE_MASK)
176 >> DESC_TYPE_SHIFT]);
177 }
178done:
179 cpu_fprintf(f, "\n");
180}
181
182#ifndef CONFIG_USER_ONLY
183
184
185
186
187static inline const char *dm2str(uint32_t dm)
188{
189 static const char *str[] = {
190 "Fixed",
191 "...",
192 "SMI",
193 "...",
194 "NMI",
195 "INIT",
196 "...",
197 "ExtINT"
198 };
199 return str[dm];
200}
201
202static void dump_apic_lvt(FILE *f, fprintf_function cpu_fprintf,
203 const char *name, uint32_t lvt, bool is_timer)
204{
205 uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
206 cpu_fprintf(f,
207 "%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
208 name, lvt,
209 lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
210 lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
211 lvt & APIC_LVT_MASKED ? "masked" : "",
212 lvt & APIC_LVT_DELIV_STS ? "pending" : "",
213 !is_timer ?
214 "" : lvt & APIC_LVT_TIMER_PERIODIC ?
215 "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
216 "tsc-deadline" : "one-shot",
217 dm2str(dm));
218 if (dm != APIC_DM_NMI) {
219 cpu_fprintf(f, " (vec %u)\n", lvt & APIC_VECTOR_MASK);
220 } else {
221 cpu_fprintf(f, "\n");
222 }
223}
224
225
226
227
228static inline const char *shorthand2str(uint32_t shorthand)
229{
230 const char *str[] = {
231 "no-shorthand", "self", "all-self", "all"
232 };
233 return str[shorthand];
234}
235
236static inline uint8_t divider_conf(uint32_t divide_conf)
237{
238 uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
239
240 return divide_val == 7 ? 1 : 2 << divide_val;
241}
242
243static inline void mask2str(char *str, uint32_t val, uint8_t size)
244{
245 while (size--) {
246 *str++ = (val >> size) & 1 ? '1' : '0';
247 }
248 *str = 0;
249}
250
251#define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
252
253static void dump_apic_icr(FILE *f, fprintf_function cpu_fprintf,
254 APICCommonState *s, CPUX86State *env)
255{
256 uint32_t icr = s->icr[0], icr2 = s->icr[1];
257 uint8_t dest_shorthand = \
258 (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
259 bool logical_mod = icr & APIC_ICR_DEST_MOD;
260 char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
261 uint32_t dest_field;
262 bool x2apic;
263
264 cpu_fprintf(f, "ICR\t 0x%08x %s %s %s %s\n",
265 icr,
266 logical_mod ? "logical" : "physical",
267 icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
268 icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
269 shorthand2str(dest_shorthand));
270
271 cpu_fprintf(f, "ICR2\t 0x%08x", icr2);
272 if (dest_shorthand != 0) {
273 cpu_fprintf(f, "\n");
274 return;
275 }
276 x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
277 dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
278
279 if (!logical_mod) {
280 if (x2apic) {
281 cpu_fprintf(f, " cpu %u (X2APIC ID)\n", dest_field);
282 } else {
283 cpu_fprintf(f, " cpu %u (APIC ID)\n",
284 dest_field & APIC_LOGDEST_XAPIC_ID);
285 }
286 return;
287 }
288
289 if (s->dest_mode == 0xf) {
290 mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
291 cpu_fprintf(f, " mask %s (APIC ID)\n", apic_id_str);
292 } else if (s->dest_mode == 0) {
293 if (x2apic) {
294 mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
295 cpu_fprintf(f, " cluster %u mask %s (X2APIC ID)\n",
296 dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
297 } else {
298 mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
299 cpu_fprintf(f, " cluster %u mask %s (APIC ID)\n",
300 dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
301 }
302 }
303}
304
305static void dump_apic_interrupt(FILE *f, fprintf_function cpu_fprintf,
306 const char *name, uint32_t *ireg_tab,
307 uint32_t *tmr_tab)
308{
309 int i, empty = true;
310
311 cpu_fprintf(f, "%s\t ", name);
312 for (i = 0; i < 256; i++) {
313 if (apic_get_bit(ireg_tab, i)) {
314 cpu_fprintf(f, "%u%s ", i,
315 apic_get_bit(tmr_tab, i) ? "(level)" : "");
316 empty = false;
317 }
318 }
319 cpu_fprintf(f, "%s\n", empty ? "(none)" : "");
320}
321
322void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
323 fprintf_function cpu_fprintf, int flags)
324{
325 X86CPU *cpu = X86_CPU(cs);
326 APICCommonState *s = APIC_COMMON(cpu->apic_state);
327 uint32_t *lvt = s->lvt;
328
329 cpu_fprintf(f, "dumping local APIC state for CPU %-2u\n\n",
330 CPU(cpu)->cpu_index);
331 dump_apic_lvt(f, cpu_fprintf, "LVT0", lvt[APIC_LVT_LINT0], false);
332 dump_apic_lvt(f, cpu_fprintf, "LVT1", lvt[APIC_LVT_LINT1], false);
333 dump_apic_lvt(f, cpu_fprintf, "LVTPC", lvt[APIC_LVT_PERFORM], false);
334 dump_apic_lvt(f, cpu_fprintf, "LVTERR", lvt[APIC_LVT_ERROR], false);
335 dump_apic_lvt(f, cpu_fprintf, "LVTTHMR", lvt[APIC_LVT_THERMAL], false);
336 dump_apic_lvt(f, cpu_fprintf, "LVTT", lvt[APIC_LVT_TIMER], true);
337
338 cpu_fprintf(f, "Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
339 s->divide_conf & APIC_DCR_MASK,
340 divider_conf(s->divide_conf),
341 s->initial_count);
342
343 cpu_fprintf(f, "SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
344 s->spurious_vec,
345 s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
346 s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
347 s->spurious_vec & APIC_VECTOR_MASK);
348
349 dump_apic_icr(f, cpu_fprintf, s, &cpu->env);
350
351 cpu_fprintf(f, "ESR\t 0x%08x\n", s->esr);
352
353 dump_apic_interrupt(f, cpu_fprintf, "ISR", s->isr, s->tmr);
354 dump_apic_interrupt(f, cpu_fprintf, "IRR", s->irr, s->tmr);
355
356 cpu_fprintf(f, "\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
357 s->arb_id, s->tpr, s->dest_mode, s->log_dest);
358 if (s->dest_mode == 0) {
359 cpu_fprintf(f, "(cluster %u: id %u)",
360 s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
361 s->log_dest & APIC_LOGDEST_XAPIC_ID);
362 }
363 cpu_fprintf(f, " PPR 0x%02x\n", apic_get_ppr(s));
364}
365#else
366void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
367 fprintf_function cpu_fprintf, int flags)
368{
369}
370#endif
371
372#define DUMP_CODE_BYTES_TOTAL 50
373#define DUMP_CODE_BYTES_BACKWARD 20
374
375void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
376 int flags)
377{
378 X86CPU *cpu = X86_CPU(cs);
379 CPUX86State *env = &cpu->env;
380 int eflags, i, nb;
381 char cc_op_name[32];
382 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
383
384 eflags = cpu_compute_eflags(env);
385#ifdef TARGET_X86_64
386 if (env->hflags & HF_CS64_MASK) {
387 cpu_fprintf(f,
388 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
389 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
390 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
391 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
392 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
393 env->regs[R_EAX],
394 env->regs[R_EBX],
395 env->regs[R_ECX],
396 env->regs[R_EDX],
397 env->regs[R_ESI],
398 env->regs[R_EDI],
399 env->regs[R_EBP],
400 env->regs[R_ESP],
401 env->regs[8],
402 env->regs[9],
403 env->regs[10],
404 env->regs[11],
405 env->regs[12],
406 env->regs[13],
407 env->regs[14],
408 env->regs[15],
409 env->eip, eflags,
410 eflags & DF_MASK ? 'D' : '-',
411 eflags & CC_O ? 'O' : '-',
412 eflags & CC_S ? 'S' : '-',
413 eflags & CC_Z ? 'Z' : '-',
414 eflags & CC_A ? 'A' : '-',
415 eflags & CC_P ? 'P' : '-',
416 eflags & CC_C ? 'C' : '-',
417 env->hflags & HF_CPL_MASK,
418 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
419 (env->a20_mask >> 20) & 1,
420 (env->hflags >> HF_SMM_SHIFT) & 1,
421 cs->halted);
422 } else
423#endif
424 {
425 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
426 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
427 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
428 (uint32_t)env->regs[R_EAX],
429 (uint32_t)env->regs[R_EBX],
430 (uint32_t)env->regs[R_ECX],
431 (uint32_t)env->regs[R_EDX],
432 (uint32_t)env->regs[R_ESI],
433 (uint32_t)env->regs[R_EDI],
434 (uint32_t)env->regs[R_EBP],
435 (uint32_t)env->regs[R_ESP],
436 (uint32_t)env->eip, eflags,
437 eflags & DF_MASK ? 'D' : '-',
438 eflags & CC_O ? 'O' : '-',
439 eflags & CC_S ? 'S' : '-',
440 eflags & CC_Z ? 'Z' : '-',
441 eflags & CC_A ? 'A' : '-',
442 eflags & CC_P ? 'P' : '-',
443 eflags & CC_C ? 'C' : '-',
444 env->hflags & HF_CPL_MASK,
445 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
446 (env->a20_mask >> 20) & 1,
447 (env->hflags >> HF_SMM_SHIFT) & 1,
448 cs->halted);
449 }
450
451 for(i = 0; i < 6; i++) {
452 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
453 &env->segs[i]);
454 }
455 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
456 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
457
458#ifdef TARGET_X86_64
459 if (env->hflags & HF_LMA_MASK) {
460 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
461 env->gdt.base, env->gdt.limit);
462 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
463 env->idt.base, env->idt.limit);
464 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
465 (uint32_t)env->cr[0],
466 env->cr[2],
467 env->cr[3],
468 (uint32_t)env->cr[4]);
469 for(i = 0; i < 4; i++)
470 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
471 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
472 env->dr[6], env->dr[7]);
473 } else
474#endif
475 {
476 cpu_fprintf(f, "GDT= %08x %08x\n",
477 (uint32_t)env->gdt.base, env->gdt.limit);
478 cpu_fprintf(f, "IDT= %08x %08x\n",
479 (uint32_t)env->idt.base, env->idt.limit);
480 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
481 (uint32_t)env->cr[0],
482 (uint32_t)env->cr[2],
483 (uint32_t)env->cr[3],
484 (uint32_t)env->cr[4]);
485 for(i = 0; i < 4; i++) {
486 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
487 }
488 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
489 env->dr[6], env->dr[7]);
490 }
491 if (flags & CPU_DUMP_CCOP) {
492 if ((unsigned)env->cc_op < CC_OP_NB)
493 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
494 else
495 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
496#ifdef TARGET_X86_64
497 if (env->hflags & HF_CS64_MASK) {
498 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
499 env->cc_src, env->cc_dst,
500 cc_op_name);
501 } else
502#endif
503 {
504 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
505 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
506 cc_op_name);
507 }
508 }
509 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
510 if (flags & CPU_DUMP_FPU) {
511 int fptag;
512 fptag = 0;
513 for(i = 0; i < 8; i++) {
514 fptag |= ((!env->fptags[i]) << i);
515 }
516 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
517 env->fpuc,
518 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
519 env->fpstt,
520 fptag,
521 env->mxcsr);
522 for(i=0;i<8;i++) {
523 CPU_LDoubleU u;
524 u.d = env->fpregs[i].d;
525 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
526 i, u.l.lower, u.l.upper);
527 if ((i & 1) == 1)
528 cpu_fprintf(f, "\n");
529 else
530 cpu_fprintf(f, " ");
531 }
532 if (env->hflags & HF_CS64_MASK)
533 nb = 16;
534 else
535 nb = 8;
536 for(i=0;i<nb;i++) {
537 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
538 i,
539 env->xmm_regs[i].ZMM_L(3),
540 env->xmm_regs[i].ZMM_L(2),
541 env->xmm_regs[i].ZMM_L(1),
542 env->xmm_regs[i].ZMM_L(0));
543 if ((i & 1) == 1)
544 cpu_fprintf(f, "\n");
545 else
546 cpu_fprintf(f, " ");
547 }
548 }
549 if (flags & CPU_DUMP_CODE) {
550 target_ulong base = env->segs[R_CS].base + env->eip;
551 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
552 uint8_t code;
553 char codestr[3];
554
555 cpu_fprintf(f, "Code=");
556 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
557 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
558 snprintf(codestr, sizeof(codestr), "%02x", code);
559 } else {
560 snprintf(codestr, sizeof(codestr), "??");
561 }
562 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
563 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
564 }
565 cpu_fprintf(f, "\n");
566 }
567}
568
569
570
571
572
573void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
574{
575 CPUX86State *env = &cpu->env;
576
577 a20_state = (a20_state != 0);
578 if (a20_state != ((env->a20_mask >> 20) & 1)) {
579 CPUState *cs = CPU(cpu);
580
581 qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
582
583
584 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
585
586
587
588 tlb_flush(cs, 1);
589 env->a20_mask = ~(1 << 20) | (a20_state << 20);
590 }
591}
592
593void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
594{
595 X86CPU *cpu = x86_env_get_cpu(env);
596 int pe_state;
597
598 qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
599 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
600 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
601 tlb_flush(CPU(cpu), 1);
602 }
603
604#ifdef TARGET_X86_64
605 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
606 (env->efer & MSR_EFER_LME)) {
607
608
609 if (!(env->cr[4] & CR4_PAE_MASK))
610 return;
611 env->efer |= MSR_EFER_LMA;
612 env->hflags |= HF_LMA_MASK;
613 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
614 (env->efer & MSR_EFER_LMA)) {
615
616 env->efer &= ~MSR_EFER_LMA;
617 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
618 env->eip &= 0xffffffff;
619 }
620#endif
621 env->cr[0] = new_cr0 | CR0_ET_MASK;
622
623
624 pe_state = (env->cr[0] & CR0_PE_MASK);
625 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
626
627 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
628
629 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
630 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
631}
632
633
634
635void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
636{
637 X86CPU *cpu = x86_env_get_cpu(env);
638
639 env->cr[3] = new_cr3;
640 if (env->cr[0] & CR0_PG_MASK) {
641 qemu_log_mask(CPU_LOG_MMU,
642 "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
643 tlb_flush(CPU(cpu), 0);
644 }
645}
646
647void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
648{
649 X86CPU *cpu = x86_env_get_cpu(env);
650 uint32_t hflags;
651
652#if defined(DEBUG_MMU)
653 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
654#endif
655 if ((new_cr4 ^ env->cr[4]) &
656 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
657 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
658 tlb_flush(CPU(cpu), 1);
659 }
660
661
662 hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
663
664
665 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
666 new_cr4 &= ~CR4_OSFXSR_MASK;
667 }
668 if (new_cr4 & CR4_OSFXSR_MASK) {
669 hflags |= HF_OSFXSR_MASK;
670 }
671
672 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
673 new_cr4 &= ~CR4_SMAP_MASK;
674 }
675 if (new_cr4 & CR4_SMAP_MASK) {
676 hflags |= HF_SMAP_MASK;
677 }
678
679 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
680 new_cr4 &= ~CR4_PKE_MASK;
681 }
682
683 env->cr[4] = new_cr4;
684 env->hflags = hflags;
685
686 cpu_sync_bndcs_hflags(env);
687}
688
689#if defined(CONFIG_USER_ONLY)
690
691int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
692 int is_write, int mmu_idx)
693{
694 X86CPU *cpu = X86_CPU(cs);
695 CPUX86State *env = &cpu->env;
696
697
698 is_write &= 1;
699 env->cr[2] = addr;
700 env->error_code = (is_write << PG_ERROR_W_BIT);
701 env->error_code |= PG_ERROR_U_MASK;
702 cs->exception_index = EXCP0E_PAGE;
703 return 1;
704}
705
706#else
707
708
709
710
711
712
713int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
714 int is_write1, int mmu_idx)
715{
716 X86CPU *cpu = X86_CPU(cs);
717 CPUX86State *env = &cpu->env;
718 uint64_t ptep, pte;
719 target_ulong pde_addr, pte_addr;
720 int error_code = 0;
721 int is_dirty, prot, page_size, is_write, is_user;
722 hwaddr paddr;
723 uint64_t rsvd_mask = PG_HI_RSVD_MASK;
724 uint32_t page_offset;
725 target_ulong vaddr;
726
727 is_user = mmu_idx == MMU_USER_IDX;
728#if defined(DEBUG_MMU)
729 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
730 addr, is_write1, is_user, env->eip);
731#endif
732 is_write = is_write1 & 1;
733
734 if (!(env->cr[0] & CR0_PG_MASK)) {
735 pte = addr;
736#ifdef TARGET_X86_64
737 if (!(env->hflags & HF_LMA_MASK)) {
738
739 pte = (uint32_t)pte;
740 }
741#endif
742 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
743 page_size = 4096;
744 goto do_mapping;
745 }
746
747 if (!(env->efer & MSR_EFER_NXE)) {
748 rsvd_mask |= PG_NX_MASK;
749 }
750
751 if (env->cr[4] & CR4_PAE_MASK) {
752 uint64_t pde, pdpe;
753 target_ulong pdpe_addr;
754
755#ifdef TARGET_X86_64
756 if (env->hflags & HF_LMA_MASK) {
757 uint64_t pml4e_addr, pml4e;
758 int32_t sext;
759
760
761 sext = (int64_t)addr >> 47;
762 if (sext != 0 && sext != -1) {
763 env->error_code = 0;
764 cs->exception_index = EXCP0D_GPF;
765 return 1;
766 }
767
768 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
769 env->a20_mask;
770 pml4e = x86_ldq_phys(cs, pml4e_addr);
771 if (!(pml4e & PG_PRESENT_MASK)) {
772 goto do_fault;
773 }
774 if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
775 goto do_fault_rsvd;
776 }
777 if (!(pml4e & PG_ACCESSED_MASK)) {
778 pml4e |= PG_ACCESSED_MASK;
779 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
780 }
781 ptep = pml4e ^ PG_NX_MASK;
782 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
783 env->a20_mask;
784 pdpe = x86_ldq_phys(cs, pdpe_addr);
785 if (!(pdpe & PG_PRESENT_MASK)) {
786 goto do_fault;
787 }
788 if (pdpe & rsvd_mask) {
789 goto do_fault_rsvd;
790 }
791 ptep &= pdpe ^ PG_NX_MASK;
792 if (!(pdpe & PG_ACCESSED_MASK)) {
793 pdpe |= PG_ACCESSED_MASK;
794 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
795 }
796 if (pdpe & PG_PSE_MASK) {
797
798 page_size = 1024 * 1024 * 1024;
799 pte_addr = pdpe_addr;
800 pte = pdpe;
801 goto do_check_protect;
802 }
803 } else
804#endif
805 {
806
807 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
808 env->a20_mask;
809 pdpe = x86_ldq_phys(cs, pdpe_addr);
810 if (!(pdpe & PG_PRESENT_MASK)) {
811 goto do_fault;
812 }
813 rsvd_mask |= PG_HI_USER_MASK;
814 if (pdpe & (rsvd_mask | PG_NX_MASK)) {
815 goto do_fault_rsvd;
816 }
817 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
818 }
819
820 pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
821 env->a20_mask;
822 pde = x86_ldq_phys(cs, pde_addr);
823 if (!(pde & PG_PRESENT_MASK)) {
824 goto do_fault;
825 }
826 if (pde & rsvd_mask) {
827 goto do_fault_rsvd;
828 }
829 ptep &= pde ^ PG_NX_MASK;
830 if (pde & PG_PSE_MASK) {
831
832 page_size = 2048 * 1024;
833 pte_addr = pde_addr;
834 pte = pde;
835 goto do_check_protect;
836 }
837
838 if (!(pde & PG_ACCESSED_MASK)) {
839 pde |= PG_ACCESSED_MASK;
840 x86_stl_phys_notdirty(cs, pde_addr, pde);
841 }
842 pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
843 env->a20_mask;
844 pte = x86_ldq_phys(cs, pte_addr);
845 if (!(pte & PG_PRESENT_MASK)) {
846 goto do_fault;
847 }
848 if (pte & rsvd_mask) {
849 goto do_fault_rsvd;
850 }
851
852 ptep &= pte ^ PG_NX_MASK;
853 page_size = 4096;
854 } else {
855 uint32_t pde;
856
857
858 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
859 env->a20_mask;
860 pde = x86_ldl_phys(cs, pde_addr);
861 if (!(pde & PG_PRESENT_MASK)) {
862 goto do_fault;
863 }
864 ptep = pde | PG_NX_MASK;
865
866
867 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
868 page_size = 4096 * 1024;
869 pte_addr = pde_addr;
870
871
872
873
874 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
875 rsvd_mask = 0x200000;
876 goto do_check_protect_pse36;
877 }
878
879 if (!(pde & PG_ACCESSED_MASK)) {
880 pde |= PG_ACCESSED_MASK;
881 x86_stl_phys_notdirty(cs, pde_addr, pde);
882 }
883
884
885 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
886 env->a20_mask;
887 pte = x86_ldl_phys(cs, pte_addr);
888 if (!(pte & PG_PRESENT_MASK)) {
889 goto do_fault;
890 }
891
892 ptep &= pte | PG_NX_MASK;
893 page_size = 4096;
894 rsvd_mask = 0;
895 }
896
897do_check_protect:
898 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
899do_check_protect_pse36:
900 if (pte & rsvd_mask) {
901 goto do_fault_rsvd;
902 }
903 ptep ^= PG_NX_MASK;
904
905
906 if (is_user && !(ptep & PG_USER_MASK)) {
907 goto do_fault_protect;
908 }
909
910 prot = 0;
911 if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
912 prot |= PAGE_READ;
913 if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
914 prot |= PAGE_WRITE;
915 }
916 }
917 if (!(ptep & PG_NX_MASK) &&
918 (mmu_idx == MMU_USER_IDX ||
919 !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
920 prot |= PAGE_EXEC;
921 }
922 if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
923 (ptep & PG_USER_MASK) && env->pkru) {
924 uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
925 uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
926 uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
927 uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
928
929 if (pkru_ad) {
930 pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
931 } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
932 pkru_prot &= ~PAGE_WRITE;
933 }
934
935 prot &= pkru_prot;
936 if ((pkru_prot & (1 << is_write1)) == 0) {
937 assert(is_write1 != 2);
938 error_code |= PG_ERROR_PK_MASK;
939 goto do_fault_protect;
940 }
941 }
942
943 if ((prot & (1 << is_write1)) == 0) {
944 goto do_fault_protect;
945 }
946
947
948 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
949 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
950 pte |= PG_ACCESSED_MASK;
951 if (is_dirty) {
952 pte |= PG_DIRTY_MASK;
953 }
954 x86_stl_phys_notdirty(cs, pte_addr, pte);
955 }
956
957 if (!(pte & PG_DIRTY_MASK)) {
958
959
960 assert(!is_write);
961 prot &= ~PAGE_WRITE;
962 }
963
964 do_mapping:
965 pte = pte & env->a20_mask;
966
967
968 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
969
970
971
972 vaddr = addr & TARGET_PAGE_MASK;
973 page_offset = vaddr & (page_size - 1);
974 paddr = pte + page_offset;
975
976 assert(prot & (1 << is_write1));
977 tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
978 prot, mmu_idx, page_size);
979 return 0;
980 do_fault_rsvd:
981 error_code |= PG_ERROR_RSVD_MASK;
982 do_fault_protect:
983 error_code |= PG_ERROR_P_MASK;
984 do_fault:
985 error_code |= (is_write << PG_ERROR_W_BIT);
986 if (is_user)
987 error_code |= PG_ERROR_U_MASK;
988 if (is_write1 == 2 &&
989 (((env->efer & MSR_EFER_NXE) &&
990 (env->cr[4] & CR4_PAE_MASK)) ||
991 (env->cr[4] & CR4_SMEP_MASK)))
992 error_code |= PG_ERROR_I_D_MASK;
993 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
994
995 x86_stq_phys(cs,
996 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
997 addr);
998 } else {
999 env->cr[2] = addr;
1000 }
1001 env->error_code = error_code;
1002 cs->exception_index = EXCP0E_PAGE;
1003 return 1;
1004}
1005
1006hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1007{
1008 X86CPU *cpu = X86_CPU(cs);
1009 CPUX86State *env = &cpu->env;
1010 target_ulong pde_addr, pte_addr;
1011 uint64_t pte;
1012 uint32_t page_offset;
1013 int page_size;
1014
1015 if (!(env->cr[0] & CR0_PG_MASK)) {
1016 pte = addr & env->a20_mask;
1017 page_size = 4096;
1018 } else if (env->cr[4] & CR4_PAE_MASK) {
1019 target_ulong pdpe_addr;
1020 uint64_t pde, pdpe;
1021
1022#ifdef TARGET_X86_64
1023 if (env->hflags & HF_LMA_MASK) {
1024 uint64_t pml4e_addr, pml4e;
1025 int32_t sext;
1026
1027
1028 sext = (int64_t)addr >> 47;
1029 if (sext != 0 && sext != -1) {
1030 return -1;
1031 }
1032 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1033 env->a20_mask;
1034 pml4e = x86_ldq_phys(cs, pml4e_addr);
1035 if (!(pml4e & PG_PRESENT_MASK)) {
1036 return -1;
1037 }
1038 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
1039 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
1040 pdpe = x86_ldq_phys(cs, pdpe_addr);
1041 if (!(pdpe & PG_PRESENT_MASK)) {
1042 return -1;
1043 }
1044 if (pdpe & PG_PSE_MASK) {
1045 page_size = 1024 * 1024 * 1024;
1046 pte = pdpe;
1047 goto out;
1048 }
1049
1050 } else
1051#endif
1052 {
1053 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1054 env->a20_mask;
1055 pdpe = x86_ldq_phys(cs, pdpe_addr);
1056 if (!(pdpe & PG_PRESENT_MASK))
1057 return -1;
1058 }
1059
1060 pde_addr = ((pdpe & PG_ADDRESS_MASK) +
1061 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
1062 pde = x86_ldq_phys(cs, pde_addr);
1063 if (!(pde & PG_PRESENT_MASK)) {
1064 return -1;
1065 }
1066 if (pde & PG_PSE_MASK) {
1067
1068 page_size = 2048 * 1024;
1069 pte = pde;
1070 } else {
1071
1072 pte_addr = ((pde & PG_ADDRESS_MASK) +
1073 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
1074 page_size = 4096;
1075 pte = x86_ldq_phys(cs, pte_addr);
1076 }
1077 if (!(pte & PG_PRESENT_MASK)) {
1078 return -1;
1079 }
1080 } else {
1081 uint32_t pde;
1082
1083
1084 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1085 pde = x86_ldl_phys(cs, pde_addr);
1086 if (!(pde & PG_PRESENT_MASK))
1087 return -1;
1088 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1089 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
1090 page_size = 4096 * 1024;
1091 } else {
1092
1093 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1094 pte = x86_ldl_phys(cs, pte_addr);
1095 if (!(pte & PG_PRESENT_MASK)) {
1096 return -1;
1097 }
1098 page_size = 4096;
1099 }
1100 pte = pte & env->a20_mask;
1101 }
1102
1103#ifdef TARGET_X86_64
1104out:
1105#endif
1106 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
1107 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1108 return pte | page_offset;
1109}
1110
1111typedef struct MCEInjectionParams {
1112 Monitor *mon;
1113 X86CPU *cpu;
1114 int bank;
1115 uint64_t status;
1116 uint64_t mcg_status;
1117 uint64_t addr;
1118 uint64_t misc;
1119 int flags;
1120} MCEInjectionParams;
1121
1122static void do_inject_x86_mce(void *data)
1123{
1124 MCEInjectionParams *params = data;
1125 CPUX86State *cenv = ¶ms->cpu->env;
1126 CPUState *cpu = CPU(params->cpu);
1127 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1128
1129 cpu_synchronize_state(cpu);
1130
1131
1132
1133
1134
1135 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1136 && !(params->status & MCI_STATUS_AR)
1137 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1138 return;
1139 }
1140
1141 if (params->status & MCI_STATUS_UC) {
1142
1143
1144
1145
1146 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1147 monitor_printf(params->mon,
1148 "CPU %d: Uncorrected error reporting disabled\n",
1149 cpu->cpu_index);
1150 return;
1151 }
1152
1153
1154
1155
1156
1157 if (banks[0] != ~(uint64_t)0) {
1158 monitor_printf(params->mon,
1159 "CPU %d: Uncorrected error reporting disabled for"
1160 " bank %d\n",
1161 cpu->cpu_index, params->bank);
1162 return;
1163 }
1164
1165 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1166 !(cenv->cr[4] & CR4_MCE_MASK)) {
1167 monitor_printf(params->mon,
1168 "CPU %d: Previous MCE still in progress, raising"
1169 " triple fault\n",
1170 cpu->cpu_index);
1171 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1172 qemu_system_reset_request();
1173 return;
1174 }
1175 if (banks[1] & MCI_STATUS_VAL) {
1176 params->status |= MCI_STATUS_OVER;
1177 }
1178 banks[2] = params->addr;
1179 banks[3] = params->misc;
1180 cenv->mcg_status = params->mcg_status;
1181 banks[1] = params->status;
1182 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1183 } else if (!(banks[1] & MCI_STATUS_VAL)
1184 || !(banks[1] & MCI_STATUS_UC)) {
1185 if (banks[1] & MCI_STATUS_VAL) {
1186 params->status |= MCI_STATUS_OVER;
1187 }
1188 banks[2] = params->addr;
1189 banks[3] = params->misc;
1190 banks[1] = params->status;
1191 } else {
1192 banks[1] |= MCI_STATUS_OVER;
1193 }
1194}
1195
1196void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1197 uint64_t status, uint64_t mcg_status, uint64_t addr,
1198 uint64_t misc, int flags)
1199{
1200 CPUState *cs = CPU(cpu);
1201 CPUX86State *cenv = &cpu->env;
1202 MCEInjectionParams params = {
1203 .mon = mon,
1204 .cpu = cpu,
1205 .bank = bank,
1206 .status = status,
1207 .mcg_status = mcg_status,
1208 .addr = addr,
1209 .misc = misc,
1210 .flags = flags,
1211 };
1212 unsigned bank_num = cenv->mcg_cap & 0xff;
1213
1214 if (!cenv->mcg_cap) {
1215 monitor_printf(mon, "MCE injection not supported\n");
1216 return;
1217 }
1218 if (bank >= bank_num) {
1219 monitor_printf(mon, "Invalid MCE bank number\n");
1220 return;
1221 }
1222 if (!(status & MCI_STATUS_VAL)) {
1223 monitor_printf(mon, "Invalid MCE status code\n");
1224 return;
1225 }
1226 if ((flags & MCE_INJECT_BROADCAST)
1227 && !cpu_x86_support_mca_broadcast(cenv)) {
1228 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1229 return;
1230 }
1231
1232 run_on_cpu(cs, do_inject_x86_mce, ¶ms);
1233 if (flags & MCE_INJECT_BROADCAST) {
1234 CPUState *other_cs;
1235
1236 params.bank = 1;
1237 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1238 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1239 params.addr = 0;
1240 params.misc = 0;
1241 CPU_FOREACH(other_cs) {
1242 if (other_cs == cs) {
1243 continue;
1244 }
1245 params.cpu = X86_CPU(other_cs);
1246 run_on_cpu(other_cs, do_inject_x86_mce, ¶ms);
1247 }
1248 }
1249}
1250
1251void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1252{
1253 X86CPU *cpu = x86_env_get_cpu(env);
1254 CPUState *cs = CPU(cpu);
1255
1256 if (kvm_enabled()) {
1257 env->tpr_access_type = access;
1258
1259 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1260 } else {
1261 cpu_restore_state(cs, cs->mem_io_pc);
1262
1263 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1264 }
1265}
1266#endif
1267
1268int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1269 target_ulong *base, unsigned int *limit,
1270 unsigned int *flags)
1271{
1272 X86CPU *cpu = x86_env_get_cpu(env);
1273 CPUState *cs = CPU(cpu);
1274 SegmentCache *dt;
1275 target_ulong ptr;
1276 uint32_t e1, e2;
1277 int index;
1278
1279 if (selector & 0x4)
1280 dt = &env->ldt;
1281 else
1282 dt = &env->gdt;
1283 index = selector & ~7;
1284 ptr = dt->base + index;
1285 if ((index + 7) > dt->limit
1286 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1287 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1288 return 0;
1289
1290 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1291 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1292 if (e2 & DESC_G_MASK)
1293 *limit = (*limit << 12) | 0xfff;
1294 *flags = e2;
1295
1296 return 1;
1297}
1298
1299#if !defined(CONFIG_USER_ONLY)
1300void do_cpu_init(X86CPU *cpu)
1301{
1302 CPUState *cs = CPU(cpu);
1303 CPUX86State *env = &cpu->env;
1304 CPUX86State *save = g_new(CPUX86State, 1);
1305 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1306
1307 *save = *env;
1308
1309 cpu_reset(cs);
1310 cs->interrupt_request = sipi;
1311 memcpy(&env->start_init_save, &save->start_init_save,
1312 offsetof(CPUX86State, end_init_save) -
1313 offsetof(CPUX86State, start_init_save));
1314 g_free(save);
1315
1316 if (kvm_enabled()) {
1317 kvm_arch_do_init_vcpu(cpu);
1318 }
1319 apic_init_reset(cpu->apic_state);
1320}
1321
1322void do_cpu_sipi(X86CPU *cpu)
1323{
1324 apic_sipi(cpu->apic_state);
1325}
1326#else
1327void do_cpu_init(X86CPU *cpu)
1328{
1329}
1330void do_cpu_sipi(X86CPU *cpu)
1331{
1332}
1333#endif
1334
1335
1336
1337void x86_cpu_exec_enter(CPUState *cs)
1338{
1339 X86CPU *cpu = X86_CPU(cs);
1340 CPUX86State *env = &cpu->env;
1341
1342 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1343 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
1344 CC_OP = CC_OP_EFLAGS;
1345 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1346}
1347
1348void x86_cpu_exec_exit(CPUState *cs)
1349{
1350 X86CPU *cpu = X86_CPU(cs);
1351 CPUX86State *env = &cpu->env;
1352
1353 env->eflags = cpu_compute_eflags(env);
1354}
1355
1356#ifndef CONFIG_USER_ONLY
1357uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
1358{
1359 X86CPU *cpu = X86_CPU(cs);
1360 CPUX86State *env = &cpu->env;
1361
1362 return address_space_ldub(cs->as, addr,
1363 cpu_get_mem_attrs(env),
1364 NULL);
1365}
1366
1367uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
1368{
1369 X86CPU *cpu = X86_CPU(cs);
1370 CPUX86State *env = &cpu->env;
1371
1372 return address_space_lduw(cs->as, addr,
1373 cpu_get_mem_attrs(env),
1374 NULL);
1375}
1376
1377uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
1378{
1379 X86CPU *cpu = X86_CPU(cs);
1380 CPUX86State *env = &cpu->env;
1381
1382 return address_space_ldl(cs->as, addr,
1383 cpu_get_mem_attrs(env),
1384 NULL);
1385}
1386
1387uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
1388{
1389 X86CPU *cpu = X86_CPU(cs);
1390 CPUX86State *env = &cpu->env;
1391
1392 return address_space_ldq(cs->as, addr,
1393 cpu_get_mem_attrs(env),
1394 NULL);
1395}
1396
1397void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
1398{
1399 X86CPU *cpu = X86_CPU(cs);
1400 CPUX86State *env = &cpu->env;
1401
1402 address_space_stb(cs->as, addr, val,
1403 cpu_get_mem_attrs(env),
1404 NULL);
1405}
1406
1407void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
1408{
1409 X86CPU *cpu = X86_CPU(cs);
1410 CPUX86State *env = &cpu->env;
1411
1412 address_space_stl_notdirty(cs->as, addr, val,
1413 cpu_get_mem_attrs(env),
1414 NULL);
1415}
1416
1417void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
1418{
1419 X86CPU *cpu = X86_CPU(cs);
1420 CPUX86State *env = &cpu->env;
1421
1422 address_space_stw(cs->as, addr, val,
1423 cpu_get_mem_attrs(env),
1424 NULL);
1425}
1426
1427void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
1428{
1429 X86CPU *cpu = X86_CPU(cs);
1430 CPUX86State *env = &cpu->env;
1431
1432 address_space_stl(cs->as, addr, val,
1433 cpu_get_mem_attrs(env),
1434 NULL);
1435}
1436
1437void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
1438{
1439 X86CPU *cpu = X86_CPU(cs);
1440 CPUX86State *env = &cpu->env;
1441
1442 address_space_stq(cs->as, addr, val,
1443 cpu_get_mem_attrs(env),
1444 NULL);
1445}
1446#endif
1447