1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "qemu/osdep.h"
26#include "cpu.h"
27#include "monitor/monitor.h"
28#include "monitor/hmp-target.h"
29#include "monitor/hmp.h"
30#include "qapi/qmp/qdict.h"
31#include "sysemu/kvm.h"
32#include "qapi/error.h"
33#include "qapi/qapi-commands-misc-target.h"
34#include "qapi/qapi-commands-misc.h"
35#include "hw/i386/pc.h"
36
37
38static hwaddr addr_canonical(CPUArchState *env, hwaddr addr)
39{
40#ifdef TARGET_X86_64
41 if (env->cr[4] & CR4_LA57_MASK) {
42 if (addr & (1ULL << 56)) {
43 addr |= (hwaddr)-(1LL << 57);
44 }
45 } else {
46 if (addr & (1ULL << 47)) {
47 addr |= (hwaddr)-(1LL << 48);
48 }
49 }
50#endif
51 return addr;
52}
53
54static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr,
55 hwaddr pte, hwaddr mask)
56{
57 addr = addr_canonical(env, addr);
58
59 monitor_printf(mon, HWADDR_FMT_plx ": " HWADDR_FMT_plx
60 " %c%c%c%c%c%c%c%c%c\n",
61 addr,
62 pte & mask,
63 pte & PG_NX_MASK ? 'X' : '-',
64 pte & PG_GLOBAL_MASK ? 'G' : '-',
65 pte & PG_PSE_MASK ? 'P' : '-',
66 pte & PG_DIRTY_MASK ? 'D' : '-',
67 pte & PG_ACCESSED_MASK ? 'A' : '-',
68 pte & PG_PCD_MASK ? 'C' : '-',
69 pte & PG_PWT_MASK ? 'T' : '-',
70 pte & PG_USER_MASK ? 'U' : '-',
71 pte & PG_RW_MASK ? 'W' : '-');
72}
73
74static void tlb_info_32(Monitor *mon, CPUArchState *env)
75{
76 unsigned int l1, l2;
77 uint32_t pgd, pde, pte;
78
79 pgd = env->cr[3] & ~0xfff;
80 for(l1 = 0; l1 < 1024; l1++) {
81 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
82 pde = le32_to_cpu(pde);
83 if (pde & PG_PRESENT_MASK) {
84 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
85
86 print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1));
87 } else {
88 for(l2 = 0; l2 < 1024; l2++) {
89 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
90 pte = le32_to_cpu(pte);
91 if (pte & PG_PRESENT_MASK) {
92 print_pte(mon, env, (l1 << 22) + (l2 << 12),
93 pte & ~PG_PSE_MASK,
94 ~0xfff);
95 }
96 }
97 }
98 }
99 }
100}
101
102static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
103{
104 unsigned int l1, l2, l3;
105 uint64_t pdpe, pde, pte;
106 uint64_t pdp_addr, pd_addr, pt_addr;
107
108 pdp_addr = env->cr[3] & ~0x1f;
109 for (l1 = 0; l1 < 4; l1++) {
110 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
111 pdpe = le64_to_cpu(pdpe);
112 if (pdpe & PG_PRESENT_MASK) {
113 pd_addr = pdpe & 0x3fffffffff000ULL;
114 for (l2 = 0; l2 < 512; l2++) {
115 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
116 pde = le64_to_cpu(pde);
117 if (pde & PG_PRESENT_MASK) {
118 if (pde & PG_PSE_MASK) {
119
120 print_pte(mon, env, (l1 << 30) + (l2 << 21), pde,
121 ~((hwaddr)(1 << 20) - 1));
122 } else {
123 pt_addr = pde & 0x3fffffffff000ULL;
124 for (l3 = 0; l3 < 512; l3++) {
125 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
126 pte = le64_to_cpu(pte);
127 if (pte & PG_PRESENT_MASK) {
128 print_pte(mon, env, (l1 << 30) + (l2 << 21)
129 + (l3 << 12),
130 pte & ~PG_PSE_MASK,
131 ~(hwaddr)0xfff);
132 }
133 }
134 }
135 }
136 }
137 }
138 }
139}
140
141#ifdef TARGET_X86_64
142static void tlb_info_la48(Monitor *mon, CPUArchState *env,
143 uint64_t l0, uint64_t pml4_addr)
144{
145 uint64_t l1, l2, l3, l4;
146 uint64_t pml4e, pdpe, pde, pte;
147 uint64_t pdp_addr, pd_addr, pt_addr;
148
149 for (l1 = 0; l1 < 512; l1++) {
150 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
151 pml4e = le64_to_cpu(pml4e);
152 if (!(pml4e & PG_PRESENT_MASK)) {
153 continue;
154 }
155
156 pdp_addr = pml4e & 0x3fffffffff000ULL;
157 for (l2 = 0; l2 < 512; l2++) {
158 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
159 pdpe = le64_to_cpu(pdpe);
160 if (!(pdpe & PG_PRESENT_MASK)) {
161 continue;
162 }
163
164 if (pdpe & PG_PSE_MASK) {
165
166 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30),
167 pdpe, 0x3ffffc0000000ULL);
168 continue;
169 }
170
171 pd_addr = pdpe & 0x3fffffffff000ULL;
172 for (l3 = 0; l3 < 512; l3++) {
173 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
174 pde = le64_to_cpu(pde);
175 if (!(pde & PG_PRESENT_MASK)) {
176 continue;
177 }
178
179 if (pde & PG_PSE_MASK) {
180
181 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30) +
182 (l3 << 21), pde, 0x3ffffffe00000ULL);
183 continue;
184 }
185
186 pt_addr = pde & 0x3fffffffff000ULL;
187 for (l4 = 0; l4 < 512; l4++) {
188 cpu_physical_memory_read(pt_addr
189 + l4 * 8,
190 &pte, 8);
191 pte = le64_to_cpu(pte);
192 if (pte & PG_PRESENT_MASK) {
193 print_pte(mon, env, (l0 << 48) + (l1 << 39) +
194 (l2 << 30) + (l3 << 21) + (l4 << 12),
195 pte & ~PG_PSE_MASK, 0x3fffffffff000ULL);
196 }
197 }
198 }
199 }
200 }
201}
202
203static void tlb_info_la57(Monitor *mon, CPUArchState *env)
204{
205 uint64_t l0;
206 uint64_t pml5e;
207 uint64_t pml5_addr;
208
209 pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
210 for (l0 = 0; l0 < 512; l0++) {
211 cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
212 pml5e = le64_to_cpu(pml5e);
213 if (pml5e & PG_PRESENT_MASK) {
214 tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL);
215 }
216 }
217}
218#endif
219
220void hmp_info_tlb(Monitor *mon, const QDict *qdict)
221{
222 CPUArchState *env;
223
224 env = mon_get_cpu_env(mon);
225 if (!env) {
226 monitor_printf(mon, "No CPU available\n");
227 return;
228 }
229
230 if (!(env->cr[0] & CR0_PG_MASK)) {
231 monitor_printf(mon, "PG disabled\n");
232 return;
233 }
234 if (env->cr[4] & CR4_PAE_MASK) {
235#ifdef TARGET_X86_64
236 if (env->hflags & HF_LMA_MASK) {
237 if (env->cr[4] & CR4_LA57_MASK) {
238 tlb_info_la57(mon, env);
239 } else {
240 tlb_info_la48(mon, env, 0, env->cr[3] & 0x3fffffffff000ULL);
241 }
242 } else
243#endif
244 {
245 tlb_info_pae32(mon, env);
246 }
247 } else {
248 tlb_info_32(mon, env);
249 }
250}
251
252static void mem_print(Monitor *mon, CPUArchState *env,
253 hwaddr *pstart, int *plast_prot,
254 hwaddr end, int prot)
255{
256 int prot1;
257 prot1 = *plast_prot;
258 if (prot != prot1) {
259 if (*pstart != -1) {
260 monitor_printf(mon, HWADDR_FMT_plx "-" HWADDR_FMT_plx " "
261 HWADDR_FMT_plx " %c%c%c\n",
262 addr_canonical(env, *pstart),
263 addr_canonical(env, end),
264 addr_canonical(env, end - *pstart),
265 prot1 & PG_USER_MASK ? 'u' : '-',
266 'r',
267 prot1 & PG_RW_MASK ? 'w' : '-');
268 }
269 if (prot != 0)
270 *pstart = end;
271 else
272 *pstart = -1;
273 *plast_prot = prot;
274 }
275}
276
277static void mem_info_32(Monitor *mon, CPUArchState *env)
278{
279 unsigned int l1, l2;
280 int prot, last_prot;
281 uint32_t pgd, pde, pte;
282 hwaddr start, end;
283
284 pgd = env->cr[3] & ~0xfff;
285 last_prot = 0;
286 start = -1;
287 for(l1 = 0; l1 < 1024; l1++) {
288 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
289 pde = le32_to_cpu(pde);
290 end = l1 << 22;
291 if (pde & PG_PRESENT_MASK) {
292 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
293 prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
294 mem_print(mon, env, &start, &last_prot, end, prot);
295 } else {
296 for(l2 = 0; l2 < 1024; l2++) {
297 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
298 pte = le32_to_cpu(pte);
299 end = (l1 << 22) + (l2 << 12);
300 if (pte & PG_PRESENT_MASK) {
301 prot = pte & pde &
302 (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
303 } else {
304 prot = 0;
305 }
306 mem_print(mon, env, &start, &last_prot, end, prot);
307 }
308 }
309 } else {
310 prot = 0;
311 mem_print(mon, env, &start, &last_prot, end, prot);
312 }
313 }
314
315 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
316}
317
318static void mem_info_pae32(Monitor *mon, CPUArchState *env)
319{
320 unsigned int l1, l2, l3;
321 int prot, last_prot;
322 uint64_t pdpe, pde, pte;
323 uint64_t pdp_addr, pd_addr, pt_addr;
324 hwaddr start, end;
325
326 pdp_addr = env->cr[3] & ~0x1f;
327 last_prot = 0;
328 start = -1;
329 for (l1 = 0; l1 < 4; l1++) {
330 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
331 pdpe = le64_to_cpu(pdpe);
332 end = l1 << 30;
333 if (pdpe & PG_PRESENT_MASK) {
334 pd_addr = pdpe & 0x3fffffffff000ULL;
335 for (l2 = 0; l2 < 512; l2++) {
336 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
337 pde = le64_to_cpu(pde);
338 end = (l1 << 30) + (l2 << 21);
339 if (pde & PG_PRESENT_MASK) {
340 if (pde & PG_PSE_MASK) {
341 prot = pde & (PG_USER_MASK | PG_RW_MASK |
342 PG_PRESENT_MASK);
343 mem_print(mon, env, &start, &last_prot, end, prot);
344 } else {
345 pt_addr = pde & 0x3fffffffff000ULL;
346 for (l3 = 0; l3 < 512; l3++) {
347 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
348 pte = le64_to_cpu(pte);
349 end = (l1 << 30) + (l2 << 21) + (l3 << 12);
350 if (pte & PG_PRESENT_MASK) {
351 prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
352 PG_PRESENT_MASK);
353 } else {
354 prot = 0;
355 }
356 mem_print(mon, env, &start, &last_prot, end, prot);
357 }
358 }
359 } else {
360 prot = 0;
361 mem_print(mon, env, &start, &last_prot, end, prot);
362 }
363 }
364 } else {
365 prot = 0;
366 mem_print(mon, env, &start, &last_prot, end, prot);
367 }
368 }
369
370 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
371}
372
373
374#ifdef TARGET_X86_64
375static void mem_info_la48(Monitor *mon, CPUArchState *env)
376{
377 int prot, last_prot;
378 uint64_t l1, l2, l3, l4;
379 uint64_t pml4e, pdpe, pde, pte;
380 uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
381
382 pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
383 last_prot = 0;
384 start = -1;
385 for (l1 = 0; l1 < 512; l1++) {
386 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
387 pml4e = le64_to_cpu(pml4e);
388 end = l1 << 39;
389 if (pml4e & PG_PRESENT_MASK) {
390 pdp_addr = pml4e & 0x3fffffffff000ULL;
391 for (l2 = 0; l2 < 512; l2++) {
392 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
393 pdpe = le64_to_cpu(pdpe);
394 end = (l1 << 39) + (l2 << 30);
395 if (pdpe & PG_PRESENT_MASK) {
396 if (pdpe & PG_PSE_MASK) {
397 prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
398 PG_PRESENT_MASK);
399 prot &= pml4e;
400 mem_print(mon, env, &start, &last_prot, end, prot);
401 } else {
402 pd_addr = pdpe & 0x3fffffffff000ULL;
403 for (l3 = 0; l3 < 512; l3++) {
404 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
405 pde = le64_to_cpu(pde);
406 end = (l1 << 39) + (l2 << 30) + (l3 << 21);
407 if (pde & PG_PRESENT_MASK) {
408 if (pde & PG_PSE_MASK) {
409 prot = pde & (PG_USER_MASK | PG_RW_MASK |
410 PG_PRESENT_MASK);
411 prot &= pml4e & pdpe;
412 mem_print(mon, env, &start,
413 &last_prot, end, prot);
414 } else {
415 pt_addr = pde & 0x3fffffffff000ULL;
416 for (l4 = 0; l4 < 512; l4++) {
417 cpu_physical_memory_read(pt_addr
418 + l4 * 8,
419 &pte, 8);
420 pte = le64_to_cpu(pte);
421 end = (l1 << 39) + (l2 << 30) +
422 (l3 << 21) + (l4 << 12);
423 if (pte & PG_PRESENT_MASK) {
424 prot = pte & (PG_USER_MASK | PG_RW_MASK |
425 PG_PRESENT_MASK);
426 prot &= pml4e & pdpe & pde;
427 } else {
428 prot = 0;
429 }
430 mem_print(mon, env, &start,
431 &last_prot, end, prot);
432 }
433 }
434 } else {
435 prot = 0;
436 mem_print(mon, env, &start,
437 &last_prot, end, prot);
438 }
439 }
440 }
441 } else {
442 prot = 0;
443 mem_print(mon, env, &start, &last_prot, end, prot);
444 }
445 }
446 } else {
447 prot = 0;
448 mem_print(mon, env, &start, &last_prot, end, prot);
449 }
450 }
451
452 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 48, 0);
453}
454
455static void mem_info_la57(Monitor *mon, CPUArchState *env)
456{
457 int prot, last_prot;
458 uint64_t l0, l1, l2, l3, l4;
459 uint64_t pml5e, pml4e, pdpe, pde, pte;
460 uint64_t pml5_addr, pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
461
462 pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
463 last_prot = 0;
464 start = -1;
465 for (l0 = 0; l0 < 512; l0++) {
466 cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
467 pml5e = le64_to_cpu(pml5e);
468 end = l0 << 48;
469 if (!(pml5e & PG_PRESENT_MASK)) {
470 prot = 0;
471 mem_print(mon, env, &start, &last_prot, end, prot);
472 continue;
473 }
474
475 pml4_addr = pml5e & 0x3fffffffff000ULL;
476 for (l1 = 0; l1 < 512; l1++) {
477 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
478 pml4e = le64_to_cpu(pml4e);
479 end = (l0 << 48) + (l1 << 39);
480 if (!(pml4e & PG_PRESENT_MASK)) {
481 prot = 0;
482 mem_print(mon, env, &start, &last_prot, end, prot);
483 continue;
484 }
485
486 pdp_addr = pml4e & 0x3fffffffff000ULL;
487 for (l2 = 0; l2 < 512; l2++) {
488 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
489 pdpe = le64_to_cpu(pdpe);
490 end = (l0 << 48) + (l1 << 39) + (l2 << 30);
491 if (pdpe & PG_PRESENT_MASK) {
492 prot = 0;
493 mem_print(mon, env, &start, &last_prot, end, prot);
494 continue;
495 }
496
497 if (pdpe & PG_PSE_MASK) {
498 prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
499 PG_PRESENT_MASK);
500 prot &= pml5e & pml4e;
501 mem_print(mon, env, &start, &last_prot, end, prot);
502 continue;
503 }
504
505 pd_addr = pdpe & 0x3fffffffff000ULL;
506 for (l3 = 0; l3 < 512; l3++) {
507 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
508 pde = le64_to_cpu(pde);
509 end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
510 if (pde & PG_PRESENT_MASK) {
511 prot = 0;
512 mem_print(mon, env, &start, &last_prot, end, prot);
513 continue;
514 }
515
516 if (pde & PG_PSE_MASK) {
517 prot = pde & (PG_USER_MASK | PG_RW_MASK |
518 PG_PRESENT_MASK);
519 prot &= pml5e & pml4e & pdpe;
520 mem_print(mon, env, &start, &last_prot, end, prot);
521 continue;
522 }
523
524 pt_addr = pde & 0x3fffffffff000ULL;
525 for (l4 = 0; l4 < 512; l4++) {
526 cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
527 pte = le64_to_cpu(pte);
528 end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
529 (l3 << 21) + (l4 << 12);
530 if (pte & PG_PRESENT_MASK) {
531 prot = pte & (PG_USER_MASK | PG_RW_MASK |
532 PG_PRESENT_MASK);
533 prot &= pml5e & pml4e & pdpe & pde;
534 } else {
535 prot = 0;
536 }
537 mem_print(mon, env, &start, &last_prot, end, prot);
538 }
539 }
540 }
541 }
542 }
543
544 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 57, 0);
545}
546#endif
547
548void hmp_info_mem(Monitor *mon, const QDict *qdict)
549{
550 CPUArchState *env;
551
552 env = mon_get_cpu_env(mon);
553 if (!env) {
554 monitor_printf(mon, "No CPU available\n");
555 return;
556 }
557
558 if (!(env->cr[0] & CR0_PG_MASK)) {
559 monitor_printf(mon, "PG disabled\n");
560 return;
561 }
562 if (env->cr[4] & CR4_PAE_MASK) {
563#ifdef TARGET_X86_64
564 if (env->hflags & HF_LMA_MASK) {
565 if (env->cr[4] & CR4_LA57_MASK) {
566 mem_info_la57(mon, env);
567 } else {
568 mem_info_la48(mon, env);
569 }
570 } else
571#endif
572 {
573 mem_info_pae32(mon, env);
574 }
575 } else {
576 mem_info_32(mon, env);
577 }
578}
579
580void hmp_mce(Monitor *mon, const QDict *qdict)
581{
582 X86CPU *cpu;
583 CPUState *cs;
584 int cpu_index = qdict_get_int(qdict, "cpu_index");
585 int bank = qdict_get_int(qdict, "bank");
586 uint64_t status = qdict_get_int(qdict, "status");
587 uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
588 uint64_t addr = qdict_get_int(qdict, "addr");
589 uint64_t misc = qdict_get_int(qdict, "misc");
590 int flags = MCE_INJECT_UNCOND_AO;
591
592 if (qdict_get_try_bool(qdict, "broadcast", false)) {
593 flags |= MCE_INJECT_BROADCAST;
594 }
595 cs = qemu_get_cpu(cpu_index);
596 if (cs != NULL) {
597 cpu = X86_CPU(cs);
598 cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
599 flags);
600 }
601}
602
603static target_long monitor_get_pc(Monitor *mon, const struct MonitorDef *md,
604 int val)
605{
606 CPUArchState *env = mon_get_cpu_env(mon);
607 return env->eip + env->segs[R_CS].base;
608}
609
610const MonitorDef monitor_defs[] = {
611#define SEG(name, seg) \
612 { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
613 { name ".base", offsetof(CPUX86State, segs[seg].base) },\
614 { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
615
616 { "eax", offsetof(CPUX86State, regs[0]) },
617 { "ecx", offsetof(CPUX86State, regs[1]) },
618 { "edx", offsetof(CPUX86State, regs[2]) },
619 { "ebx", offsetof(CPUX86State, regs[3]) },
620 { "esp|sp", offsetof(CPUX86State, regs[4]) },
621 { "ebp|fp", offsetof(CPUX86State, regs[5]) },
622 { "esi", offsetof(CPUX86State, regs[6]) },
623 { "edi", offsetof(CPUX86State, regs[7]) },
624#ifdef TARGET_X86_64
625 { "r8", offsetof(CPUX86State, regs[8]) },
626 { "r9", offsetof(CPUX86State, regs[9]) },
627 { "r10", offsetof(CPUX86State, regs[10]) },
628 { "r11", offsetof(CPUX86State, regs[11]) },
629 { "r12", offsetof(CPUX86State, regs[12]) },
630 { "r13", offsetof(CPUX86State, regs[13]) },
631 { "r14", offsetof(CPUX86State, regs[14]) },
632 { "r15", offsetof(CPUX86State, regs[15]) },
633#endif
634 { "eflags", offsetof(CPUX86State, eflags) },
635 { "eip", offsetof(CPUX86State, eip) },
636 SEG("cs", R_CS)
637 SEG("ds", R_DS)
638 SEG("es", R_ES)
639 SEG("ss", R_SS)
640 SEG("fs", R_FS)
641 SEG("gs", R_GS)
642 { "pc", 0, monitor_get_pc, },
643 { NULL },
644};
645
646const MonitorDef *target_monitor_defs(void)
647{
648 return monitor_defs;
649}
650
651void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
652{
653 CPUState *cs;
654
655 if (qdict_haskey(qdict, "apic-id")) {
656 int id = qdict_get_try_int(qdict, "apic-id", 0);
657 cs = cpu_by_arch_id(id);
658 } else {
659 cs = mon_get_cpu(mon);
660 }
661
662
663 if (!cs) {
664 monitor_printf(mon, "No CPU available\n");
665 return;
666 }
667 x86_cpu_dump_local_apic_state(cs, CPU_DUMP_FPU);
668}
669