1
2
3
4
5
6
7
8
9
10
11
12#include "qemu/osdep.h"
13#include <sys/ioctl.h>
14#include <sys/mman.h>
15#include <sys/ptrace.h>
16
17#include <linux/elf.h>
18#include <linux/kvm.h>
19
20#include "qemu-common.h"
21#include "cpu.h"
22#include "qemu/timer.h"
23#include "qemu/error-report.h"
24#include "qemu/host-utils.h"
25#include "exec/gdbstub.h"
26#include "sysemu/sysemu.h"
27#include "sysemu/kvm.h"
28#include "kvm_arm.h"
29#include "internals.h"
30#include "hw/arm/arm.h"
31
32static bool have_guest_debug;
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49typedef struct {
50 uint64_t bcr;
51 uint64_t bvr;
52} HWBreakpoint;
53
54
55
56
57
58
59typedef struct {
60 uint64_t wcr;
61 uint64_t wvr;
62 CPUWatchpoint details;
63} HWWatchpoint;
64
65
66int max_hw_bps, max_hw_wps;
67GArray *hw_breakpoints, *hw_watchpoints;
68
69#define cur_hw_wps (hw_watchpoints->len)
70#define cur_hw_bps (hw_breakpoints->len)
71#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
72#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
73
74
75
76
77
78
79
80
81
82static void kvm_arm_init_debug(CPUState *cs)
83{
84 have_guest_debug = kvm_check_extension(cs->kvm_state,
85 KVM_CAP_SET_GUEST_DEBUG);
86
87 max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS);
88 hw_watchpoints = g_array_sized_new(true, true,
89 sizeof(HWWatchpoint), max_hw_wps);
90
91 max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS);
92 hw_breakpoints = g_array_sized_new(true, true,
93 sizeof(HWBreakpoint), max_hw_bps);
94 return;
95}
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120static int insert_hw_breakpoint(target_ulong addr)
121{
122 HWBreakpoint brk = {
123 .bcr = 0x1,
124 .bvr = addr
125 };
126
127 if (cur_hw_bps >= max_hw_bps) {
128 return -ENOBUFS;
129 }
130
131 brk.bcr = deposit32(brk.bcr, 1, 2, 0x3);
132 brk.bcr = deposit32(brk.bcr, 5, 4, 0xf);
133
134 g_array_append_val(hw_breakpoints, brk);
135
136 return 0;
137}
138
139
140
141
142
143
144
145
146static int delete_hw_breakpoint(target_ulong pc)
147{
148 int i;
149 for (i = 0; i < hw_breakpoints->len; i++) {
150 HWBreakpoint *brk = get_hw_bp(i);
151 if (brk->bvr == pc) {
152 g_array_remove_index(hw_breakpoints, i);
153 return 0;
154 }
155 }
156 return -ENOENT;
157}
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191static int insert_hw_watchpoint(target_ulong addr,
192 target_ulong len, int type)
193{
194 HWWatchpoint wp = {
195 .wcr = 1,
196 .wvr = addr & (~0x7ULL),
197 .details = { .vaddr = addr, .len = len }
198 };
199
200 if (cur_hw_wps >= max_hw_wps) {
201 return -ENOBUFS;
202 }
203
204
205
206
207
208 wp.wcr = deposit32(wp.wcr, 1, 2, 3);
209
210 switch (type) {
211 case GDB_WATCHPOINT_READ:
212 wp.wcr = deposit32(wp.wcr, 3, 2, 1);
213 wp.details.flags = BP_MEM_READ;
214 break;
215 case GDB_WATCHPOINT_WRITE:
216 wp.wcr = deposit32(wp.wcr, 3, 2, 2);
217 wp.details.flags = BP_MEM_WRITE;
218 break;
219 case GDB_WATCHPOINT_ACCESS:
220 wp.wcr = deposit32(wp.wcr, 3, 2, 3);
221 wp.details.flags = BP_MEM_ACCESS;
222 break;
223 default:
224 g_assert_not_reached();
225 break;
226 }
227 if (len <= 8) {
228
229 int off = addr & 0x7;
230 int bas = (1 << len) - 1;
231
232 wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas);
233 } else {
234
235 if (is_power_of_2(len)) {
236 int bits = ctz64(len);
237
238 wp.wvr &= ~((1 << bits) - 1);
239 wp.wcr = deposit32(wp.wcr, 24, 4, bits);
240 wp.wcr = deposit32(wp.wcr, 5, 8, 0xff);
241 } else {
242 return -ENOBUFS;
243 }
244 }
245
246 g_array_append_val(hw_watchpoints, wp);
247 return 0;
248}
249
250
251static bool check_watchpoint_in_range(int i, target_ulong addr)
252{
253 HWWatchpoint *wp = get_hw_wp(i);
254 uint64_t addr_top, addr_bottom = wp->wvr;
255 int bas = extract32(wp->wcr, 5, 8);
256 int mask = extract32(wp->wcr, 24, 4);
257
258 if (mask) {
259 addr_top = addr_bottom + (1 << mask);
260 } else {
261
262
263 addr_bottom = addr_bottom + ctz32(bas);
264 addr_top = addr_bottom + clo32(bas);
265 }
266
267 if (addr >= addr_bottom && addr <= addr_top) {
268 return true;
269 }
270
271 return false;
272}
273
274
275
276
277
278
279
280
281static int delete_hw_watchpoint(target_ulong addr,
282 target_ulong len, int type)
283{
284 int i;
285 for (i = 0; i < cur_hw_wps; i++) {
286 if (check_watchpoint_in_range(i, addr)) {
287 g_array_remove_index(hw_watchpoints, i);
288 return 0;
289 }
290 }
291 return -ENOENT;
292}
293
294
295int kvm_arch_insert_hw_breakpoint(target_ulong addr,
296 target_ulong len, int type)
297{
298 switch (type) {
299 case GDB_BREAKPOINT_HW:
300 return insert_hw_breakpoint(addr);
301 break;
302 case GDB_WATCHPOINT_READ:
303 case GDB_WATCHPOINT_WRITE:
304 case GDB_WATCHPOINT_ACCESS:
305 return insert_hw_watchpoint(addr, len, type);
306 default:
307 return -ENOSYS;
308 }
309}
310
311int kvm_arch_remove_hw_breakpoint(target_ulong addr,
312 target_ulong len, int type)
313{
314 switch (type) {
315 case GDB_BREAKPOINT_HW:
316 return delete_hw_breakpoint(addr);
317 break;
318 case GDB_WATCHPOINT_READ:
319 case GDB_WATCHPOINT_WRITE:
320 case GDB_WATCHPOINT_ACCESS:
321 return delete_hw_watchpoint(addr, len, type);
322 default:
323 return -ENOSYS;
324 }
325}
326
327
328void kvm_arch_remove_all_hw_breakpoints(void)
329{
330 if (cur_hw_wps > 0) {
331 g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
332 }
333 if (cur_hw_bps > 0) {
334 g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
335 }
336}
337
338void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
339{
340 int i;
341 memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
342
343 for (i = 0; i < max_hw_wps; i++) {
344 HWWatchpoint *wp = get_hw_wp(i);
345 ptr->dbg_wcr[i] = wp->wcr;
346 ptr->dbg_wvr[i] = wp->wvr;
347 }
348 for (i = 0; i < max_hw_bps; i++) {
349 HWBreakpoint *bp = get_hw_bp(i);
350 ptr->dbg_bcr[i] = bp->bcr;
351 ptr->dbg_bvr[i] = bp->bvr;
352 }
353}
354
355bool kvm_arm_hw_debug_active(CPUState *cs)
356{
357 return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
358}
359
360static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
361{
362 int i;
363
364 for (i = 0; i < cur_hw_bps; i++) {
365 HWBreakpoint *bp = get_hw_bp(i);
366 if (bp->bvr == pc) {
367 return true;
368 }
369 }
370 return false;
371}
372
373static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
374{
375 int i;
376
377 for (i = 0; i < cur_hw_wps; i++) {
378 if (check_watchpoint_in_range(i, addr)) {
379 return &get_hw_wp(i)->details;
380 }
381 }
382 return NULL;
383}
384
385
386static inline void set_feature(uint64_t *features, int feature)
387{
388 *features |= 1ULL << feature;
389}
390
391bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
392{
393
394
395
396
397
398
399
400 int fdarray[3];
401 uint64_t features = 0;
402
403
404
405
406
407 static const uint32_t cpus_to_try[] = {
408 KVM_ARM_TARGET_AEM_V8,
409 KVM_ARM_TARGET_FOUNDATION_V8,
410 KVM_ARM_TARGET_CORTEX_A57,
411 QEMU_KVM_ARM_TARGET_NONE
412 };
413 struct kvm_vcpu_init init;
414
415 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
416 return false;
417 }
418
419 ahcc->target = init.target;
420 ahcc->dtb_compatible = "arm,arm-v8";
421
422 kvm_arm_destroy_scratch_host_vcpu(fdarray);
423
424
425
426
427
428 set_feature(&features, ARM_FEATURE_V8);
429 set_feature(&features, ARM_FEATURE_VFP4);
430 set_feature(&features, ARM_FEATURE_NEON);
431 set_feature(&features, ARM_FEATURE_AARCH64);
432
433 ahcc->features = features;
434
435 return true;
436}
437
438#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
439
440int kvm_arch_init_vcpu(CPUState *cs)
441{
442 int ret;
443 uint64_t mpidr;
444 ARMCPU *cpu = ARM_CPU(cs);
445
446 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
447 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
448 fprintf(stderr, "KVM is not supported for this guest CPU type\n");
449 return -EINVAL;
450 }
451
452
453 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
454 if (cpu->start_powered_off) {
455 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
456 }
457 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
458 cpu->psci_version = 2;
459 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
460 }
461 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
462 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
463 }
464
465
466 ret = kvm_arm_vcpu_init(cs);
467 if (ret) {
468 return ret;
469 }
470
471
472
473
474
475
476 ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
477 if (ret) {
478 return ret;
479 }
480 cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
481
482 kvm_arm_init_debug(cs);
483
484 return kvm_arm_init_cpreg_list(cpu);
485}
486
487bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
488{
489
490
491
492
493 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
494 case KVM_REG_ARM_CORE:
495 return false;
496 default:
497 return true;
498 }
499}
500
501typedef struct CPRegStateLevel {
502 uint64_t regidx;
503 int level;
504} CPRegStateLevel;
505
506
507
508
509
510
511static const CPRegStateLevel non_runtime_cpregs[] = {
512 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
513};
514
515int kvm_arm_cpreg_level(uint64_t regidx)
516{
517 int i;
518
519 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
520 const CPRegStateLevel *l = &non_runtime_cpregs[i];
521 if (l->regidx == regidx) {
522 return l->level;
523 }
524 }
525
526 return KVM_PUT_RUNTIME_STATE;
527}
528
529#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
530 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
531
532#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
533 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
534
535#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
536 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
537
538int kvm_arch_put_registers(CPUState *cs, int level)
539{
540 struct kvm_one_reg reg;
541 uint32_t fpr;
542 uint64_t val;
543 int i;
544 int ret;
545 unsigned int el;
546
547 ARMCPU *cpu = ARM_CPU(cs);
548 CPUARMState *env = &cpu->env;
549
550
551
552
553 if (!is_a64(env)) {
554 aarch64_sync_32_to_64(env);
555 }
556
557 for (i = 0; i < 31; i++) {
558 reg.id = AARCH64_CORE_REG(regs.regs[i]);
559 reg.addr = (uintptr_t) &env->xregs[i];
560 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
561 if (ret) {
562 return ret;
563 }
564 }
565
566
567
568
569 aarch64_save_sp(env, 1);
570
571 reg.id = AARCH64_CORE_REG(regs.sp);
572 reg.addr = (uintptr_t) &env->sp_el[0];
573 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
574 if (ret) {
575 return ret;
576 }
577
578 reg.id = AARCH64_CORE_REG(sp_el1);
579 reg.addr = (uintptr_t) &env->sp_el[1];
580 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
581 if (ret) {
582 return ret;
583 }
584
585
586 if (is_a64(env)) {
587 val = pstate_read(env);
588 } else {
589 val = cpsr_read(env);
590 }
591 reg.id = AARCH64_CORE_REG(regs.pstate);
592 reg.addr = (uintptr_t) &val;
593 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
594 if (ret) {
595 return ret;
596 }
597
598 reg.id = AARCH64_CORE_REG(regs.pc);
599 reg.addr = (uintptr_t) &env->pc;
600 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
601 if (ret) {
602 return ret;
603 }
604
605 reg.id = AARCH64_CORE_REG(elr_el1);
606 reg.addr = (uintptr_t) &env->elr_el[1];
607 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
608 if (ret) {
609 return ret;
610 }
611
612
613
614
615
616
617
618 el = arm_current_el(env);
619 if (el > 0 && !is_a64(env)) {
620 i = bank_number(env->uncached_cpsr & CPSR_M);
621 env->banked_spsr[i] = env->spsr;
622 }
623
624
625 for (i = 0; i < KVM_NR_SPSR; i++) {
626 reg.id = AARCH64_CORE_REG(spsr[i]);
627 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
628 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
629 if (ret) {
630 return ret;
631 }
632 }
633
634
635
636
637 for (i = 0; i < 32; i++) {
638 int rd = i << 1;
639 uint64_t fp_val[2];
640#ifdef HOST_WORDS_BIGENDIAN
641 fp_val[0] = env->vfp.regs[rd + 1];
642 fp_val[1] = env->vfp.regs[rd];
643#else
644 fp_val[1] = env->vfp.regs[rd + 1];
645 fp_val[0] = env->vfp.regs[rd];
646#endif
647 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
648 reg.addr = (uintptr_t)(&fp_val);
649 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
650 if (ret) {
651 return ret;
652 }
653 }
654
655 reg.addr = (uintptr_t)(&fpr);
656 fpr = vfp_get_fpsr(env);
657 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
658 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
659 if (ret) {
660 return ret;
661 }
662
663 fpr = vfp_get_fpcr(env);
664 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
665 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
666 if (ret) {
667 return ret;
668 }
669
670 if (!write_list_to_kvmstate(cpu, level)) {
671 return EINVAL;
672 }
673
674 kvm_arm_sync_mpstate_to_kvm(cpu);
675
676 return ret;
677}
678
679int kvm_arch_get_registers(CPUState *cs)
680{
681 struct kvm_one_reg reg;
682 uint64_t val;
683 uint32_t fpr;
684 unsigned int el;
685 int i;
686 int ret;
687
688 ARMCPU *cpu = ARM_CPU(cs);
689 CPUARMState *env = &cpu->env;
690
691 for (i = 0; i < 31; i++) {
692 reg.id = AARCH64_CORE_REG(regs.regs[i]);
693 reg.addr = (uintptr_t) &env->xregs[i];
694 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
695 if (ret) {
696 return ret;
697 }
698 }
699
700 reg.id = AARCH64_CORE_REG(regs.sp);
701 reg.addr = (uintptr_t) &env->sp_el[0];
702 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
703 if (ret) {
704 return ret;
705 }
706
707 reg.id = AARCH64_CORE_REG(sp_el1);
708 reg.addr = (uintptr_t) &env->sp_el[1];
709 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
710 if (ret) {
711 return ret;
712 }
713
714 reg.id = AARCH64_CORE_REG(regs.pstate);
715 reg.addr = (uintptr_t) &val;
716 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
717 if (ret) {
718 return ret;
719 }
720
721 env->aarch64 = ((val & PSTATE_nRW) == 0);
722 if (is_a64(env)) {
723 pstate_write(env, val);
724 } else {
725 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
726 }
727
728
729
730
731 aarch64_restore_sp(env, 1);
732
733 reg.id = AARCH64_CORE_REG(regs.pc);
734 reg.addr = (uintptr_t) &env->pc;
735 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
736 if (ret) {
737 return ret;
738 }
739
740
741
742
743
744
745 if (!is_a64(env)) {
746 aarch64_sync_64_to_32(env);
747 }
748
749 reg.id = AARCH64_CORE_REG(elr_el1);
750 reg.addr = (uintptr_t) &env->elr_el[1];
751 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
752 if (ret) {
753 return ret;
754 }
755
756
757
758
759
760 for (i = 0; i < KVM_NR_SPSR; i++) {
761 reg.id = AARCH64_CORE_REG(spsr[i]);
762 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
763 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
764 if (ret) {
765 return ret;
766 }
767 }
768
769 el = arm_current_el(env);
770 if (el > 0 && !is_a64(env)) {
771 i = bank_number(env->uncached_cpsr & CPSR_M);
772 env->spsr = env->banked_spsr[i];
773 }
774
775
776
777
778 for (i = 0; i < 32; i++) {
779 uint64_t fp_val[2];
780 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
781 reg.addr = (uintptr_t)(&fp_val);
782 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
783 if (ret) {
784 return ret;
785 } else {
786 int rd = i << 1;
787#ifdef HOST_WORDS_BIGENDIAN
788 env->vfp.regs[rd + 1] = fp_val[0];
789 env->vfp.regs[rd] = fp_val[1];
790#else
791 env->vfp.regs[rd + 1] = fp_val[1];
792 env->vfp.regs[rd] = fp_val[0];
793#endif
794 }
795 }
796
797 reg.addr = (uintptr_t)(&fpr);
798 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
799 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
800 if (ret) {
801 return ret;
802 }
803 vfp_set_fpsr(env, fpr);
804
805 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
806 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
807 if (ret) {
808 return ret;
809 }
810 vfp_set_fpcr(env, fpr);
811
812 if (!write_kvmstate_to_list(cpu)) {
813 return EINVAL;
814 }
815
816
817
818 write_list_to_cpustate(cpu);
819
820 kvm_arm_sync_mpstate_to_qemu(cpu);
821
822
823 return ret;
824}
825
826
827static const uint32_t brk_insn = 0xd4200000;
828
829int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
830{
831 if (have_guest_debug) {
832 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
833 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
834 return -EINVAL;
835 }
836 return 0;
837 } else {
838 error_report("guest debug not supported on this kernel");
839 return -EINVAL;
840 }
841}
842
843int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
844{
845 static uint32_t brk;
846
847 if (have_guest_debug) {
848 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
849 brk != brk_insn ||
850 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
851 return -EINVAL;
852 }
853 return 0;
854 } else {
855 error_report("guest debug not supported on this kernel");
856 return -EINVAL;
857 }
858}
859
860
861
862
863
864
865
866
867bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
868{
869 int hsr_ec = debug_exit->hsr >> ARM_EL_EC_SHIFT;
870 ARMCPU *cpu = ARM_CPU(cs);
871 CPUClass *cc = CPU_GET_CLASS(cs);
872 CPUARMState *env = &cpu->env;
873
874
875 kvm_cpu_synchronize_state(cs);
876
877 switch (hsr_ec) {
878 case EC_SOFTWARESTEP:
879 if (cs->singlestep_enabled) {
880 return true;
881 } else {
882
883
884
885
886 error_report("%s: guest single-step while debugging unsupported"
887 " (%"PRIx64", %"PRIx32")\n",
888 __func__, env->pc, debug_exit->hsr);
889 return false;
890 }
891 break;
892 case EC_AA64_BKPT:
893 if (kvm_find_sw_breakpoint(cs, env->pc)) {
894 return true;
895 }
896 break;
897 case EC_BREAKPOINT:
898 if (find_hw_breakpoint(cs, env->pc)) {
899 return true;
900 }
901 break;
902 case EC_WATCHPOINT:
903 {
904 CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
905 if (wp) {
906 cs->watchpoint_hit = wp;
907 return true;
908 }
909 break;
910 }
911 default:
912 error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")\n",
913 __func__, debug_exit->hsr, env->pc);
914 }
915
916
917
918
919
920 cs->exception_index = EXCP_BKPT;
921 env->exception.syndrome = debug_exit->hsr;
922 env->exception.vaddress = debug_exit->far;
923 cc->do_interrupt(cs);
924
925 return false;
926}
927