1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "cpu.h"
21#include "sysemu/kvm.h"
22#include "kvm_i386.h"
23#ifndef CONFIG_USER_ONLY
24#include "sysemu/sysemu.h"
25#include "monitor/monitor.h"
26#include "hw/i386/apic_internal.h"
27#endif
28
29static void cpu_x86_version(CPUX86State *env, int *family, int *model)
30{
31 int cpuver = env->cpuid_version;
32
33 if (family == NULL || model == NULL) {
34 return;
35 }
36
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
39}
40
41
42int cpu_x86_support_mca_broadcast(CPUX86State *env)
43{
44 int family = 0;
45 int model = 0;
46
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
50 }
51
52 return 0;
53}
54
55
56
57
58static const char *cc_op_str[CC_OP_NB] = {
59 "DYNAMIC",
60 "EFLAGS",
61
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
66
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
71
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
76
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
81
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
86
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
91
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
96
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
101
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
106
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
111
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
116
117 "ADCX",
118 "ADOX",
119 "ADCOX",
120
121 "CLR",
122};
123
124static void
125cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
126 const char *name, struct SegmentCache *sc)
127{
128#ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
132 } else
133#endif
134 {
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
137 }
138
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
141
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
150 cpu_fprintf(f,
151 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
152 ? "DS " : "DS16");
153 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
154 (sc->flags & DESC_W_MASK) ? 'W' : '-');
155 }
156 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
157 } else {
158 static const char *sys_type_name[2][16] = {
159 {
160 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
161 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
162 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
163 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
164 },
165 {
166 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
167 "Reserved", "Reserved", "Reserved", "Reserved",
168 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
169 "Reserved", "IntGate64", "TrapGate64"
170 }
171 };
172 cpu_fprintf(f, "%s",
173 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
174 [(sc->flags & DESC_TYPE_MASK)
175 >> DESC_TYPE_SHIFT]);
176 }
177done:
178 cpu_fprintf(f, "\n");
179}
180
181#ifndef CONFIG_USER_ONLY
182
183
184
185
186static inline const char *dm2str(uint32_t dm)
187{
188 static const char *str[] = {
189 "Fixed",
190 "...",
191 "SMI",
192 "...",
193 "NMI",
194 "INIT",
195 "...",
196 "ExtINT"
197 };
198 return str[dm];
199}
200
201static void dump_apic_lvt(FILE *f, fprintf_function cpu_fprintf,
202 const char *name, uint32_t lvt, bool is_timer)
203{
204 uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
205 cpu_fprintf(f,
206 "%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
207 name, lvt,
208 lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
209 lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
210 lvt & APIC_LVT_MASKED ? "masked" : "",
211 lvt & APIC_LVT_DELIV_STS ? "pending" : "",
212 !is_timer ?
213 "" : lvt & APIC_LVT_TIMER_PERIODIC ?
214 "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
215 "tsc-deadline" : "one-shot",
216 dm2str(dm));
217 if (dm != APIC_DM_NMI) {
218 cpu_fprintf(f, " (vec %u)\n", lvt & APIC_VECTOR_MASK);
219 } else {
220 cpu_fprintf(f, "\n");
221 }
222}
223
224
225
226
227static inline const char *shorthand2str(uint32_t shorthand)
228{
229 const char *str[] = {
230 "no-shorthand", "self", "all-self", "all"
231 };
232 return str[shorthand];
233}
234
235static inline uint8_t divider_conf(uint32_t divide_conf)
236{
237 uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
238
239 return divide_val == 7 ? 1 : 2 << divide_val;
240}
241
242static inline void mask2str(char *str, uint32_t val, uint8_t size)
243{
244 while (size--) {
245 *str++ = (val >> size) & 1 ? '1' : '0';
246 }
247 *str = 0;
248}
249
250#define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
251
252static void dump_apic_icr(FILE *f, fprintf_function cpu_fprintf,
253 APICCommonState *s, CPUX86State *env)
254{
255 uint32_t icr = s->icr[0], icr2 = s->icr[1];
256 uint8_t dest_shorthand = \
257 (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
258 bool logical_mod = icr & APIC_ICR_DEST_MOD;
259 char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
260 uint32_t dest_field;
261 bool x2apic;
262
263 cpu_fprintf(f, "ICR\t 0x%08x %s %s %s %s\n",
264 icr,
265 logical_mod ? "logical" : "physical",
266 icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
267 icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
268 shorthand2str(dest_shorthand));
269
270 cpu_fprintf(f, "ICR2\t 0x%08x", icr2);
271 if (dest_shorthand != 0) {
272 cpu_fprintf(f, "\n");
273 return;
274 }
275 x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
276 dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
277
278 if (!logical_mod) {
279 if (x2apic) {
280 cpu_fprintf(f, " cpu %u (X2APIC ID)\n", dest_field);
281 } else {
282 cpu_fprintf(f, " cpu %u (APIC ID)\n",
283 dest_field & APIC_LOGDEST_XAPIC_ID);
284 }
285 return;
286 }
287
288 if (s->dest_mode == 0xf) {
289 mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
290 cpu_fprintf(f, " mask %s (APIC ID)\n", apic_id_str);
291 } else if (s->dest_mode == 0) {
292 if (x2apic) {
293 mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
294 cpu_fprintf(f, " cluster %u mask %s (X2APIC ID)\n",
295 dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
296 } else {
297 mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
298 cpu_fprintf(f, " cluster %u mask %s (APIC ID)\n",
299 dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
300 }
301 }
302}
303
304static void dump_apic_interrupt(FILE *f, fprintf_function cpu_fprintf,
305 const char *name, uint32_t *ireg_tab,
306 uint32_t *tmr_tab)
307{
308 int i, empty = true;
309
310 cpu_fprintf(f, "%s\t ", name);
311 for (i = 0; i < 256; i++) {
312 if (apic_get_bit(ireg_tab, i)) {
313 cpu_fprintf(f, "%u%s ", i,
314 apic_get_bit(tmr_tab, i) ? "(level)" : "");
315 empty = false;
316 }
317 }
318 cpu_fprintf(f, "%s\n", empty ? "(none)" : "");
319}
320
321void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
322 fprintf_function cpu_fprintf, int flags)
323{
324 X86CPU *cpu = X86_CPU(cs);
325 APICCommonState *s = APIC_COMMON(cpu->apic_state);
326 uint32_t *lvt = s->lvt;
327
328 cpu_fprintf(f, "dumping local APIC state for CPU %-2u\n\n",
329 CPU(cpu)->cpu_index);
330 dump_apic_lvt(f, cpu_fprintf, "LVT0", lvt[APIC_LVT_LINT0], false);
331 dump_apic_lvt(f, cpu_fprintf, "LVT1", lvt[APIC_LVT_LINT1], false);
332 dump_apic_lvt(f, cpu_fprintf, "LVTPC", lvt[APIC_LVT_PERFORM], false);
333 dump_apic_lvt(f, cpu_fprintf, "LVTERR", lvt[APIC_LVT_ERROR], false);
334 dump_apic_lvt(f, cpu_fprintf, "LVTTHMR", lvt[APIC_LVT_THERMAL], false);
335 dump_apic_lvt(f, cpu_fprintf, "LVTT", lvt[APIC_LVT_TIMER], true);
336
337 cpu_fprintf(f, "Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
338 s->divide_conf & APIC_DCR_MASK,
339 divider_conf(s->divide_conf),
340 s->initial_count);
341
342 cpu_fprintf(f, "SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
343 s->spurious_vec,
344 s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
345 s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
346 s->spurious_vec & APIC_VECTOR_MASK);
347
348 dump_apic_icr(f, cpu_fprintf, s, &cpu->env);
349
350 cpu_fprintf(f, "ESR\t 0x%08x\n", s->esr);
351
352 dump_apic_interrupt(f, cpu_fprintf, "ISR", s->isr, s->tmr);
353 dump_apic_interrupt(f, cpu_fprintf, "IRR", s->irr, s->tmr);
354
355 cpu_fprintf(f, "\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
356 s->arb_id, s->tpr, s->dest_mode, s->log_dest);
357 if (s->dest_mode == 0) {
358 cpu_fprintf(f, "(cluster %u: id %u)",
359 s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
360 s->log_dest & APIC_LOGDEST_XAPIC_ID);
361 }
362 cpu_fprintf(f, " PPR 0x%02x\n", apic_get_ppr(s));
363}
364#else
365void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
366 fprintf_function cpu_fprintf, int flags)
367{
368}
369#endif
370
371#define DUMP_CODE_BYTES_TOTAL 50
372#define DUMP_CODE_BYTES_BACKWARD 20
373
374void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
375 int flags)
376{
377 X86CPU *cpu = X86_CPU(cs);
378 CPUX86State *env = &cpu->env;
379 int eflags, i, nb;
380 char cc_op_name[32];
381 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
382
383 eflags = cpu_compute_eflags(env);
384#ifdef TARGET_X86_64
385 if (env->hflags & HF_CS64_MASK) {
386 cpu_fprintf(f,
387 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
388 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
389 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
390 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
391 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
392 env->regs[R_EAX],
393 env->regs[R_EBX],
394 env->regs[R_ECX],
395 env->regs[R_EDX],
396 env->regs[R_ESI],
397 env->regs[R_EDI],
398 env->regs[R_EBP],
399 env->regs[R_ESP],
400 env->regs[8],
401 env->regs[9],
402 env->regs[10],
403 env->regs[11],
404 env->regs[12],
405 env->regs[13],
406 env->regs[14],
407 env->regs[15],
408 env->eip, eflags,
409 eflags & DF_MASK ? 'D' : '-',
410 eflags & CC_O ? 'O' : '-',
411 eflags & CC_S ? 'S' : '-',
412 eflags & CC_Z ? 'Z' : '-',
413 eflags & CC_A ? 'A' : '-',
414 eflags & CC_P ? 'P' : '-',
415 eflags & CC_C ? 'C' : '-',
416 env->hflags & HF_CPL_MASK,
417 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
418 (env->a20_mask >> 20) & 1,
419 (env->hflags >> HF_SMM_SHIFT) & 1,
420 cs->halted);
421 } else
422#endif
423 {
424 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
425 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
426 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
427 (uint32_t)env->regs[R_EAX],
428 (uint32_t)env->regs[R_EBX],
429 (uint32_t)env->regs[R_ECX],
430 (uint32_t)env->regs[R_EDX],
431 (uint32_t)env->regs[R_ESI],
432 (uint32_t)env->regs[R_EDI],
433 (uint32_t)env->regs[R_EBP],
434 (uint32_t)env->regs[R_ESP],
435 (uint32_t)env->eip, eflags,
436 eflags & DF_MASK ? 'D' : '-',
437 eflags & CC_O ? 'O' : '-',
438 eflags & CC_S ? 'S' : '-',
439 eflags & CC_Z ? 'Z' : '-',
440 eflags & CC_A ? 'A' : '-',
441 eflags & CC_P ? 'P' : '-',
442 eflags & CC_C ? 'C' : '-',
443 env->hflags & HF_CPL_MASK,
444 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
445 (env->a20_mask >> 20) & 1,
446 (env->hflags >> HF_SMM_SHIFT) & 1,
447 cs->halted);
448 }
449
450 for(i = 0; i < 6; i++) {
451 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
452 &env->segs[i]);
453 }
454 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
455 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
456
457#ifdef TARGET_X86_64
458 if (env->hflags & HF_LMA_MASK) {
459 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
460 env->gdt.base, env->gdt.limit);
461 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
462 env->idt.base, env->idt.limit);
463 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
464 (uint32_t)env->cr[0],
465 env->cr[2],
466 env->cr[3],
467 (uint32_t)env->cr[4]);
468 for(i = 0; i < 4; i++)
469 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
470 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
471 env->dr[6], env->dr[7]);
472 } else
473#endif
474 {
475 cpu_fprintf(f, "GDT= %08x %08x\n",
476 (uint32_t)env->gdt.base, env->gdt.limit);
477 cpu_fprintf(f, "IDT= %08x %08x\n",
478 (uint32_t)env->idt.base, env->idt.limit);
479 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
480 (uint32_t)env->cr[0],
481 (uint32_t)env->cr[2],
482 (uint32_t)env->cr[3],
483 (uint32_t)env->cr[4]);
484 for(i = 0; i < 4; i++) {
485 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
486 }
487 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
488 env->dr[6], env->dr[7]);
489 }
490 if (flags & CPU_DUMP_CCOP) {
491 if ((unsigned)env->cc_op < CC_OP_NB)
492 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
493 else
494 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
495#ifdef TARGET_X86_64
496 if (env->hflags & HF_CS64_MASK) {
497 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
498 env->cc_src, env->cc_dst,
499 cc_op_name);
500 } else
501#endif
502 {
503 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
504 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
505 cc_op_name);
506 }
507 }
508 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
509 if (flags & CPU_DUMP_FPU) {
510 int fptag;
511 fptag = 0;
512 for(i = 0; i < 8; i++) {
513 fptag |= ((!env->fptags[i]) << i);
514 }
515 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
516 env->fpuc,
517 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
518 env->fpstt,
519 fptag,
520 env->mxcsr);
521 for(i=0;i<8;i++) {
522 CPU_LDoubleU u;
523 u.d = env->fpregs[i].d;
524 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
525 i, u.l.lower, u.l.upper);
526 if ((i & 1) == 1)
527 cpu_fprintf(f, "\n");
528 else
529 cpu_fprintf(f, " ");
530 }
531 if (env->hflags & HF_CS64_MASK)
532 nb = 16;
533 else
534 nb = 8;
535 for(i=0;i<nb;i++) {
536 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
537 i,
538 env->xmm_regs[i].XMM_L(3),
539 env->xmm_regs[i].XMM_L(2),
540 env->xmm_regs[i].XMM_L(1),
541 env->xmm_regs[i].XMM_L(0));
542 if ((i & 1) == 1)
543 cpu_fprintf(f, "\n");
544 else
545 cpu_fprintf(f, " ");
546 }
547 }
548 if (flags & CPU_DUMP_CODE) {
549 target_ulong base = env->segs[R_CS].base + env->eip;
550 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
551 uint8_t code;
552 char codestr[3];
553
554 cpu_fprintf(f, "Code=");
555 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
556 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
557 snprintf(codestr, sizeof(codestr), "%02x", code);
558 } else {
559 snprintf(codestr, sizeof(codestr), "??");
560 }
561 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
562 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
563 }
564 cpu_fprintf(f, "\n");
565 }
566}
567
568
569
570
571
572void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
573{
574 CPUX86State *env = &cpu->env;
575
576 a20_state = (a20_state != 0);
577 if (a20_state != ((env->a20_mask >> 20) & 1)) {
578 CPUState *cs = CPU(cpu);
579
580 qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
581
582
583 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
584
585
586
587 tlb_flush(cs, 1);
588 env->a20_mask = ~(1 << 20) | (a20_state << 20);
589 }
590}
591
592void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
593{
594 X86CPU *cpu = x86_env_get_cpu(env);
595 int pe_state;
596
597 qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
598 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
599 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
600 tlb_flush(CPU(cpu), 1);
601 }
602
603#ifdef TARGET_X86_64
604 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
605 (env->efer & MSR_EFER_LME)) {
606
607
608 if (!(env->cr[4] & CR4_PAE_MASK))
609 return;
610 env->efer |= MSR_EFER_LMA;
611 env->hflags |= HF_LMA_MASK;
612 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
613 (env->efer & MSR_EFER_LMA)) {
614
615 env->efer &= ~MSR_EFER_LMA;
616 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
617 env->eip &= 0xffffffff;
618 }
619#endif
620 env->cr[0] = new_cr0 | CR0_ET_MASK;
621
622
623 pe_state = (env->cr[0] & CR0_PE_MASK);
624 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
625
626 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
627
628 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
629 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
630}
631
632
633
634void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
635{
636 X86CPU *cpu = x86_env_get_cpu(env);
637
638 env->cr[3] = new_cr3;
639 if (env->cr[0] & CR0_PG_MASK) {
640 qemu_log_mask(CPU_LOG_MMU,
641 "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
642 tlb_flush(CPU(cpu), 0);
643 }
644}
645
646void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
647{
648 X86CPU *cpu = x86_env_get_cpu(env);
649
650#if defined(DEBUG_MMU)
651 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
652#endif
653 if ((new_cr4 ^ env->cr[4]) &
654 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
655 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
656 tlb_flush(CPU(cpu), 1);
657 }
658
659 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
660 new_cr4 &= ~CR4_OSFXSR_MASK;
661 }
662 env->hflags &= ~HF_OSFXSR_MASK;
663 if (new_cr4 & CR4_OSFXSR_MASK) {
664 env->hflags |= HF_OSFXSR_MASK;
665 }
666
667 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
668 new_cr4 &= ~CR4_SMAP_MASK;
669 }
670 env->hflags &= ~HF_SMAP_MASK;
671 if (new_cr4 & CR4_SMAP_MASK) {
672 env->hflags |= HF_SMAP_MASK;
673 }
674
675 env->cr[4] = new_cr4;
676}
677
678#if defined(CONFIG_USER_ONLY)
679
680int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
681 int is_write, int mmu_idx)
682{
683 X86CPU *cpu = X86_CPU(cs);
684 CPUX86State *env = &cpu->env;
685
686
687 is_write &= 1;
688 env->cr[2] = addr;
689 env->error_code = (is_write << PG_ERROR_W_BIT);
690 env->error_code |= PG_ERROR_U_MASK;
691 cs->exception_index = EXCP0E_PAGE;
692 return 1;
693}
694
695#else
696
697
698
699
700
701
702int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
703 int is_write1, int mmu_idx)
704{
705 X86CPU *cpu = X86_CPU(cs);
706 CPUX86State *env = &cpu->env;
707 uint64_t ptep, pte;
708 target_ulong pde_addr, pte_addr;
709 int error_code = 0;
710 int is_dirty, prot, page_size, is_write, is_user;
711 hwaddr paddr;
712 uint64_t rsvd_mask = PG_HI_RSVD_MASK;
713 uint32_t page_offset;
714 target_ulong vaddr;
715
716 is_user = mmu_idx == MMU_USER_IDX;
717#if defined(DEBUG_MMU)
718 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
719 addr, is_write1, is_user, env->eip);
720#endif
721 is_write = is_write1 & 1;
722
723 if (!(env->cr[0] & CR0_PG_MASK)) {
724 pte = addr;
725#ifdef TARGET_X86_64
726 if (!(env->hflags & HF_LMA_MASK)) {
727
728 pte = (uint32_t)pte;
729 }
730#endif
731 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
732 page_size = 4096;
733 goto do_mapping;
734 }
735
736 if (!(env->efer & MSR_EFER_NXE)) {
737 rsvd_mask |= PG_NX_MASK;
738 }
739
740 if (env->cr[4] & CR4_PAE_MASK) {
741 uint64_t pde, pdpe;
742 target_ulong pdpe_addr;
743
744#ifdef TARGET_X86_64
745 if (env->hflags & HF_LMA_MASK) {
746 uint64_t pml4e_addr, pml4e;
747 int32_t sext;
748
749
750 sext = (int64_t)addr >> 47;
751 if (sext != 0 && sext != -1) {
752 env->error_code = 0;
753 cs->exception_index = EXCP0D_GPF;
754 return 1;
755 }
756
757 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
758 env->a20_mask;
759 pml4e = x86_ldq_phys(cs, pml4e_addr);
760 if (!(pml4e & PG_PRESENT_MASK)) {
761 goto do_fault;
762 }
763 if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
764 goto do_fault_rsvd;
765 }
766 if (!(pml4e & PG_ACCESSED_MASK)) {
767 pml4e |= PG_ACCESSED_MASK;
768 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
769 }
770 ptep = pml4e ^ PG_NX_MASK;
771 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
772 env->a20_mask;
773 pdpe = x86_ldq_phys(cs, pdpe_addr);
774 if (!(pdpe & PG_PRESENT_MASK)) {
775 goto do_fault;
776 }
777 if (pdpe & rsvd_mask) {
778 goto do_fault_rsvd;
779 }
780 ptep &= pdpe ^ PG_NX_MASK;
781 if (!(pdpe & PG_ACCESSED_MASK)) {
782 pdpe |= PG_ACCESSED_MASK;
783 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
784 }
785 if (pdpe & PG_PSE_MASK) {
786
787 page_size = 1024 * 1024 * 1024;
788 pte_addr = pdpe_addr;
789 pte = pdpe;
790 goto do_check_protect;
791 }
792 } else
793#endif
794 {
795
796 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
797 env->a20_mask;
798 pdpe = x86_ldq_phys(cs, pdpe_addr);
799 if (!(pdpe & PG_PRESENT_MASK)) {
800 goto do_fault;
801 }
802 rsvd_mask |= PG_HI_USER_MASK;
803 if (pdpe & (rsvd_mask | PG_NX_MASK)) {
804 goto do_fault_rsvd;
805 }
806 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
807 }
808
809 pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
810 env->a20_mask;
811 pde = x86_ldq_phys(cs, pde_addr);
812 if (!(pde & PG_PRESENT_MASK)) {
813 goto do_fault;
814 }
815 if (pde & rsvd_mask) {
816 goto do_fault_rsvd;
817 }
818 ptep &= pde ^ PG_NX_MASK;
819 if (pde & PG_PSE_MASK) {
820
821 page_size = 2048 * 1024;
822 pte_addr = pde_addr;
823 pte = pde;
824 goto do_check_protect;
825 }
826
827 if (!(pde & PG_ACCESSED_MASK)) {
828 pde |= PG_ACCESSED_MASK;
829 x86_stl_phys_notdirty(cs, pde_addr, pde);
830 }
831 pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
832 env->a20_mask;
833 pte = x86_ldq_phys(cs, pte_addr);
834 if (!(pte & PG_PRESENT_MASK)) {
835 goto do_fault;
836 }
837 if (pte & rsvd_mask) {
838 goto do_fault_rsvd;
839 }
840
841 ptep &= pte ^ PG_NX_MASK;
842 page_size = 4096;
843 } else {
844 uint32_t pde;
845
846
847 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
848 env->a20_mask;
849 pde = x86_ldl_phys(cs, pde_addr);
850 if (!(pde & PG_PRESENT_MASK)) {
851 goto do_fault;
852 }
853 ptep = pde | PG_NX_MASK;
854
855
856 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
857 page_size = 4096 * 1024;
858 pte_addr = pde_addr;
859
860
861
862
863 pte = pde | ((pde & 0x1fe000) << (32 - 13));
864 rsvd_mask = 0x200000;
865 goto do_check_protect_pse36;
866 }
867
868 if (!(pde & PG_ACCESSED_MASK)) {
869 pde |= PG_ACCESSED_MASK;
870 x86_stl_phys_notdirty(cs, pde_addr, pde);
871 }
872
873
874 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
875 env->a20_mask;
876 pte = x86_ldl_phys(cs, pte_addr);
877 if (!(pte & PG_PRESENT_MASK)) {
878 goto do_fault;
879 }
880
881 ptep &= pte | PG_NX_MASK;
882 page_size = 4096;
883 rsvd_mask = 0;
884 }
885
886do_check_protect:
887 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
888do_check_protect_pse36:
889 if (pte & rsvd_mask) {
890 goto do_fault_rsvd;
891 }
892 ptep ^= PG_NX_MASK;
893 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
894 goto do_fault_protect;
895 }
896 switch (mmu_idx) {
897 case MMU_USER_IDX:
898 if (!(ptep & PG_USER_MASK)) {
899 goto do_fault_protect;
900 }
901 if (is_write && !(ptep & PG_RW_MASK)) {
902 goto do_fault_protect;
903 }
904 break;
905
906 case MMU_KSMAP_IDX:
907 if (is_write1 != 2 && (ptep & PG_USER_MASK)) {
908 goto do_fault_protect;
909 }
910
911 case MMU_KNOSMAP_IDX:
912 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
913 (ptep & PG_USER_MASK)) {
914 goto do_fault_protect;
915 }
916 if ((env->cr[0] & CR0_WP_MASK) &&
917 is_write && !(ptep & PG_RW_MASK)) {
918 goto do_fault_protect;
919 }
920 break;
921
922 default:
923 break;
924 }
925 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
926 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
927 pte |= PG_ACCESSED_MASK;
928 if (is_dirty) {
929 pte |= PG_DIRTY_MASK;
930 }
931 x86_stl_phys_notdirty(cs, pte_addr, pte);
932 }
933
934
935 prot = PAGE_READ;
936 if (!(ptep & PG_NX_MASK) &&
937 (mmu_idx == MMU_USER_IDX ||
938 !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
939 prot |= PAGE_EXEC;
940 }
941 if (pte & PG_DIRTY_MASK) {
942
943
944 if (is_user) {
945 if (ptep & PG_RW_MASK)
946 prot |= PAGE_WRITE;
947 } else {
948 if (!(env->cr[0] & CR0_WP_MASK) ||
949 (ptep & PG_RW_MASK))
950 prot |= PAGE_WRITE;
951 }
952 }
953 do_mapping:
954 pte = pte & env->a20_mask;
955
956
957 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
958
959
960
961 vaddr = addr & TARGET_PAGE_MASK;
962 page_offset = vaddr & (page_size - 1);
963 paddr = pte + page_offset;
964
965 tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
966 prot, mmu_idx, page_size);
967 return 0;
968 do_fault_rsvd:
969 error_code |= PG_ERROR_RSVD_MASK;
970 do_fault_protect:
971 error_code |= PG_ERROR_P_MASK;
972 do_fault:
973 error_code |= (is_write << PG_ERROR_W_BIT);
974 if (is_user)
975 error_code |= PG_ERROR_U_MASK;
976 if (is_write1 == 2 &&
977 (((env->efer & MSR_EFER_NXE) &&
978 (env->cr[4] & CR4_PAE_MASK)) ||
979 (env->cr[4] & CR4_SMEP_MASK)))
980 error_code |= PG_ERROR_I_D_MASK;
981 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
982
983 x86_stq_phys(cs,
984 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
985 addr);
986 } else {
987 env->cr[2] = addr;
988 }
989 env->error_code = error_code;
990 cs->exception_index = EXCP0E_PAGE;
991 return 1;
992}
993
994hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
995{
996 X86CPU *cpu = X86_CPU(cs);
997 CPUX86State *env = &cpu->env;
998 target_ulong pde_addr, pte_addr;
999 uint64_t pte;
1000 uint32_t page_offset;
1001 int page_size;
1002
1003 if (!(env->cr[0] & CR0_PG_MASK)) {
1004 pte = addr & env->a20_mask;
1005 page_size = 4096;
1006 } else if (env->cr[4] & CR4_PAE_MASK) {
1007 target_ulong pdpe_addr;
1008 uint64_t pde, pdpe;
1009
1010#ifdef TARGET_X86_64
1011 if (env->hflags & HF_LMA_MASK) {
1012 uint64_t pml4e_addr, pml4e;
1013 int32_t sext;
1014
1015
1016 sext = (int64_t)addr >> 47;
1017 if (sext != 0 && sext != -1) {
1018 return -1;
1019 }
1020 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1021 env->a20_mask;
1022 pml4e = x86_ldq_phys(cs, pml4e_addr);
1023 if (!(pml4e & PG_PRESENT_MASK)) {
1024 return -1;
1025 }
1026 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
1027 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
1028 pdpe = x86_ldq_phys(cs, pdpe_addr);
1029 if (!(pdpe & PG_PRESENT_MASK)) {
1030 return -1;
1031 }
1032 if (pdpe & PG_PSE_MASK) {
1033 page_size = 1024 * 1024 * 1024;
1034 pte = pdpe;
1035 goto out;
1036 }
1037
1038 } else
1039#endif
1040 {
1041 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1042 env->a20_mask;
1043 pdpe = x86_ldq_phys(cs, pdpe_addr);
1044 if (!(pdpe & PG_PRESENT_MASK))
1045 return -1;
1046 }
1047
1048 pde_addr = ((pdpe & PG_ADDRESS_MASK) +
1049 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
1050 pde = x86_ldq_phys(cs, pde_addr);
1051 if (!(pde & PG_PRESENT_MASK)) {
1052 return -1;
1053 }
1054 if (pde & PG_PSE_MASK) {
1055
1056 page_size = 2048 * 1024;
1057 pte = pde;
1058 } else {
1059
1060 pte_addr = ((pde & PG_ADDRESS_MASK) +
1061 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
1062 page_size = 4096;
1063 pte = x86_ldq_phys(cs, pte_addr);
1064 }
1065 if (!(pte & PG_PRESENT_MASK)) {
1066 return -1;
1067 }
1068 } else {
1069 uint32_t pde;
1070
1071
1072 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1073 pde = x86_ldl_phys(cs, pde_addr);
1074 if (!(pde & PG_PRESENT_MASK))
1075 return -1;
1076 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1077 pte = pde | ((pde & 0x1fe000) << (32 - 13));
1078 page_size = 4096 * 1024;
1079 } else {
1080
1081 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1082 pte = x86_ldl_phys(cs, pte_addr);
1083 if (!(pte & PG_PRESENT_MASK)) {
1084 return -1;
1085 }
1086 page_size = 4096;
1087 }
1088 pte = pte & env->a20_mask;
1089 }
1090
1091#ifdef TARGET_X86_64
1092out:
1093#endif
1094 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
1095 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1096 return pte | page_offset;
1097}
1098
1099typedef struct MCEInjectionParams {
1100 Monitor *mon;
1101 X86CPU *cpu;
1102 int bank;
1103 uint64_t status;
1104 uint64_t mcg_status;
1105 uint64_t addr;
1106 uint64_t misc;
1107 int flags;
1108} MCEInjectionParams;
1109
1110static void do_inject_x86_mce(void *data)
1111{
1112 MCEInjectionParams *params = data;
1113 CPUX86State *cenv = ¶ms->cpu->env;
1114 CPUState *cpu = CPU(params->cpu);
1115 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1116
1117 cpu_synchronize_state(cpu);
1118
1119
1120
1121
1122
1123 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1124 && !(params->status & MCI_STATUS_AR)
1125 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1126 return;
1127 }
1128
1129 if (params->status & MCI_STATUS_UC) {
1130
1131
1132
1133
1134 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1135 monitor_printf(params->mon,
1136 "CPU %d: Uncorrected error reporting disabled\n",
1137 cpu->cpu_index);
1138 return;
1139 }
1140
1141
1142
1143
1144
1145 if (banks[0] != ~(uint64_t)0) {
1146 monitor_printf(params->mon,
1147 "CPU %d: Uncorrected error reporting disabled for"
1148 " bank %d\n",
1149 cpu->cpu_index, params->bank);
1150 return;
1151 }
1152
1153 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1154 !(cenv->cr[4] & CR4_MCE_MASK)) {
1155 monitor_printf(params->mon,
1156 "CPU %d: Previous MCE still in progress, raising"
1157 " triple fault\n",
1158 cpu->cpu_index);
1159 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1160 qemu_system_reset_request();
1161 return;
1162 }
1163 if (banks[1] & MCI_STATUS_VAL) {
1164 params->status |= MCI_STATUS_OVER;
1165 }
1166 banks[2] = params->addr;
1167 banks[3] = params->misc;
1168 cenv->mcg_status = params->mcg_status;
1169 banks[1] = params->status;
1170 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1171 } else if (!(banks[1] & MCI_STATUS_VAL)
1172 || !(banks[1] & MCI_STATUS_UC)) {
1173 if (banks[1] & MCI_STATUS_VAL) {
1174 params->status |= MCI_STATUS_OVER;
1175 }
1176 banks[2] = params->addr;
1177 banks[3] = params->misc;
1178 banks[1] = params->status;
1179 } else {
1180 banks[1] |= MCI_STATUS_OVER;
1181 }
1182}
1183
1184void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1185 uint64_t status, uint64_t mcg_status, uint64_t addr,
1186 uint64_t misc, int flags)
1187{
1188 CPUState *cs = CPU(cpu);
1189 CPUX86State *cenv = &cpu->env;
1190 MCEInjectionParams params = {
1191 .mon = mon,
1192 .cpu = cpu,
1193 .bank = bank,
1194 .status = status,
1195 .mcg_status = mcg_status,
1196 .addr = addr,
1197 .misc = misc,
1198 .flags = flags,
1199 };
1200 unsigned bank_num = cenv->mcg_cap & 0xff;
1201
1202 if (!cenv->mcg_cap) {
1203 monitor_printf(mon, "MCE injection not supported\n");
1204 return;
1205 }
1206 if (bank >= bank_num) {
1207 monitor_printf(mon, "Invalid MCE bank number\n");
1208 return;
1209 }
1210 if (!(status & MCI_STATUS_VAL)) {
1211 monitor_printf(mon, "Invalid MCE status code\n");
1212 return;
1213 }
1214 if ((flags & MCE_INJECT_BROADCAST)
1215 && !cpu_x86_support_mca_broadcast(cenv)) {
1216 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1217 return;
1218 }
1219
1220 run_on_cpu(cs, do_inject_x86_mce, ¶ms);
1221 if (flags & MCE_INJECT_BROADCAST) {
1222 CPUState *other_cs;
1223
1224 params.bank = 1;
1225 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1226 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1227 params.addr = 0;
1228 params.misc = 0;
1229 CPU_FOREACH(other_cs) {
1230 if (other_cs == cs) {
1231 continue;
1232 }
1233 params.cpu = X86_CPU(other_cs);
1234 run_on_cpu(other_cs, do_inject_x86_mce, ¶ms);
1235 }
1236 }
1237}
1238
1239void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1240{
1241 X86CPU *cpu = x86_env_get_cpu(env);
1242 CPUState *cs = CPU(cpu);
1243
1244 if (kvm_enabled()) {
1245 env->tpr_access_type = access;
1246
1247 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1248 } else {
1249 cpu_restore_state(cs, cs->mem_io_pc);
1250
1251 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1252 }
1253}
1254#endif
1255
1256int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1257 target_ulong *base, unsigned int *limit,
1258 unsigned int *flags)
1259{
1260 X86CPU *cpu = x86_env_get_cpu(env);
1261 CPUState *cs = CPU(cpu);
1262 SegmentCache *dt;
1263 target_ulong ptr;
1264 uint32_t e1, e2;
1265 int index;
1266
1267 if (selector & 0x4)
1268 dt = &env->ldt;
1269 else
1270 dt = &env->gdt;
1271 index = selector & ~7;
1272 ptr = dt->base + index;
1273 if ((index + 7) > dt->limit
1274 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1275 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1276 return 0;
1277
1278 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1279 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1280 if (e2 & DESC_G_MASK)
1281 *limit = (*limit << 12) | 0xfff;
1282 *flags = e2;
1283
1284 return 1;
1285}
1286
1287#if !defined(CONFIG_USER_ONLY)
1288void do_cpu_init(X86CPU *cpu)
1289{
1290 CPUState *cs = CPU(cpu);
1291 CPUX86State *env = &cpu->env;
1292 CPUX86State *save = g_new(CPUX86State, 1);
1293 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1294
1295 *save = *env;
1296
1297 cpu_reset(cs);
1298 cs->interrupt_request = sipi;
1299 memcpy(&env->start_init_save, &save->start_init_save,
1300 offsetof(CPUX86State, end_init_save) -
1301 offsetof(CPUX86State, start_init_save));
1302 g_free(save);
1303
1304 if (kvm_enabled()) {
1305 kvm_arch_do_init_vcpu(cpu);
1306 }
1307 apic_init_reset(cpu->apic_state);
1308}
1309
1310void do_cpu_sipi(X86CPU *cpu)
1311{
1312 apic_sipi(cpu->apic_state);
1313}
1314#else
1315void do_cpu_init(X86CPU *cpu)
1316{
1317}
1318void do_cpu_sipi(X86CPU *cpu)
1319{
1320}
1321#endif
1322
1323
1324
1325void x86_cpu_exec_enter(CPUState *cs)
1326{
1327 X86CPU *cpu = X86_CPU(cs);
1328 CPUX86State *env = &cpu->env;
1329
1330 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1331 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
1332 CC_OP = CC_OP_EFLAGS;
1333 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1334}
1335
1336void x86_cpu_exec_exit(CPUState *cs)
1337{
1338 X86CPU *cpu = X86_CPU(cs);
1339 CPUX86State *env = &cpu->env;
1340
1341 env->eflags = cpu_compute_eflags(env);
1342}
1343
1344#ifndef CONFIG_USER_ONLY
1345uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
1346{
1347 X86CPU *cpu = X86_CPU(cs);
1348 CPUX86State *env = &cpu->env;
1349
1350 return address_space_ldub(cs->as, addr,
1351 cpu_get_mem_attrs(env),
1352 NULL);
1353}
1354
1355uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
1356{
1357 X86CPU *cpu = X86_CPU(cs);
1358 CPUX86State *env = &cpu->env;
1359
1360 return address_space_lduw(cs->as, addr,
1361 cpu_get_mem_attrs(env),
1362 NULL);
1363}
1364
1365uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
1366{
1367 X86CPU *cpu = X86_CPU(cs);
1368 CPUX86State *env = &cpu->env;
1369
1370 return address_space_ldl(cs->as, addr,
1371 cpu_get_mem_attrs(env),
1372 NULL);
1373}
1374
1375uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
1376{
1377 X86CPU *cpu = X86_CPU(cs);
1378 CPUX86State *env = &cpu->env;
1379
1380 return address_space_ldq(cs->as, addr,
1381 cpu_get_mem_attrs(env),
1382 NULL);
1383}
1384
1385void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
1386{
1387 X86CPU *cpu = X86_CPU(cs);
1388 CPUX86State *env = &cpu->env;
1389
1390 address_space_stb(cs->as, addr, val,
1391 cpu_get_mem_attrs(env),
1392 NULL);
1393}
1394
1395void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
1396{
1397 X86CPU *cpu = X86_CPU(cs);
1398 CPUX86State *env = &cpu->env;
1399
1400 address_space_stl_notdirty(cs->as, addr, val,
1401 cpu_get_mem_attrs(env),
1402 NULL);
1403}
1404
1405void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
1406{
1407 X86CPU *cpu = X86_CPU(cs);
1408 CPUX86State *env = &cpu->env;
1409
1410 address_space_stw(cs->as, addr, val,
1411 cpu_get_mem_attrs(env),
1412 NULL);
1413}
1414
1415void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
1416{
1417 X86CPU *cpu = X86_CPU(cs);
1418 CPUX86State *env = &cpu->env;
1419
1420 address_space_stl(cs->as, addr, val,
1421 cpu_get_mem_attrs(env),
1422 NULL);
1423}
1424
1425void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
1426{
1427 X86CPU *cpu = X86_CPU(cs);
1428 CPUX86State *env = &cpu->env;
1429
1430 address_space_stq(cs->as, addr, val,
1431 cpu_get_mem_attrs(env),
1432 NULL);
1433}
1434#endif
1435