1
2
3
4
5
6
7
8
9
10
11
12#include "qemu/osdep.h"
13#include <sys/ioctl.h>
14#include <sys/ptrace.h>
15
16#include <linux/elf.h>
17#include <linux/kvm.h>
18
19#include "qemu-common.h"
20#include "cpu.h"
21#include "qemu/timer.h"
22#include "qemu/error-report.h"
23#include "qemu/host-utils.h"
24#include "exec/gdbstub.h"
25#include "sysemu/sysemu.h"
26#include "sysemu/kvm.h"
27#include "kvm_arm.h"
28#include "internals.h"
29#include "hw/arm/arm.h"
30
31static bool have_guest_debug;
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48typedef struct {
49 uint64_t bcr;
50 uint64_t bvr;
51} HWBreakpoint;
52
53
54
55
56
57
58typedef struct {
59 uint64_t wcr;
60 uint64_t wvr;
61 CPUWatchpoint details;
62} HWWatchpoint;
63
64
65int max_hw_bps, max_hw_wps;
66GArray *hw_breakpoints, *hw_watchpoints;
67
68#define cur_hw_wps (hw_watchpoints->len)
69#define cur_hw_bps (hw_breakpoints->len)
70#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
71#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
72
73
74
75
76
77
78
79
80
81static void kvm_arm_init_debug(CPUState *cs)
82{
83 have_guest_debug = kvm_check_extension(cs->kvm_state,
84 KVM_CAP_SET_GUEST_DEBUG);
85
86 max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS);
87 hw_watchpoints = g_array_sized_new(true, true,
88 sizeof(HWWatchpoint), max_hw_wps);
89
90 max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS);
91 hw_breakpoints = g_array_sized_new(true, true,
92 sizeof(HWBreakpoint), max_hw_bps);
93 return;
94}
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119static int insert_hw_breakpoint(target_ulong addr)
120{
121 HWBreakpoint brk = {
122 .bcr = 0x1,
123 .bvr = addr
124 };
125
126 if (cur_hw_bps >= max_hw_bps) {
127 return -ENOBUFS;
128 }
129
130 brk.bcr = deposit32(brk.bcr, 1, 2, 0x3);
131 brk.bcr = deposit32(brk.bcr, 5, 4, 0xf);
132
133 g_array_append_val(hw_breakpoints, brk);
134
135 return 0;
136}
137
138
139
140
141
142
143
144
145static int delete_hw_breakpoint(target_ulong pc)
146{
147 int i;
148 for (i = 0; i < hw_breakpoints->len; i++) {
149 HWBreakpoint *brk = get_hw_bp(i);
150 if (brk->bvr == pc) {
151 g_array_remove_index(hw_breakpoints, i);
152 return 0;
153 }
154 }
155 return -ENOENT;
156}
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190static int insert_hw_watchpoint(target_ulong addr,
191 target_ulong len, int type)
192{
193 HWWatchpoint wp = {
194 .wcr = 1,
195 .wvr = addr & (~0x7ULL),
196 .details = { .vaddr = addr, .len = len }
197 };
198
199 if (cur_hw_wps >= max_hw_wps) {
200 return -ENOBUFS;
201 }
202
203
204
205
206
207 wp.wcr = deposit32(wp.wcr, 1, 2, 3);
208
209 switch (type) {
210 case GDB_WATCHPOINT_READ:
211 wp.wcr = deposit32(wp.wcr, 3, 2, 1);
212 wp.details.flags = BP_MEM_READ;
213 break;
214 case GDB_WATCHPOINT_WRITE:
215 wp.wcr = deposit32(wp.wcr, 3, 2, 2);
216 wp.details.flags = BP_MEM_WRITE;
217 break;
218 case GDB_WATCHPOINT_ACCESS:
219 wp.wcr = deposit32(wp.wcr, 3, 2, 3);
220 wp.details.flags = BP_MEM_ACCESS;
221 break;
222 default:
223 g_assert_not_reached();
224 break;
225 }
226 if (len <= 8) {
227
228 int off = addr & 0x7;
229 int bas = (1 << len) - 1;
230
231 wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas);
232 } else {
233
234 if (is_power_of_2(len)) {
235 int bits = ctz64(len);
236
237 wp.wvr &= ~((1 << bits) - 1);
238 wp.wcr = deposit32(wp.wcr, 24, 4, bits);
239 wp.wcr = deposit32(wp.wcr, 5, 8, 0xff);
240 } else {
241 return -ENOBUFS;
242 }
243 }
244
245 g_array_append_val(hw_watchpoints, wp);
246 return 0;
247}
248
249
250static bool check_watchpoint_in_range(int i, target_ulong addr)
251{
252 HWWatchpoint *wp = get_hw_wp(i);
253 uint64_t addr_top, addr_bottom = wp->wvr;
254 int bas = extract32(wp->wcr, 5, 8);
255 int mask = extract32(wp->wcr, 24, 4);
256
257 if (mask) {
258 addr_top = addr_bottom + (1 << mask);
259 } else {
260
261
262 addr_bottom = addr_bottom + ctz32(bas);
263 addr_top = addr_bottom + clo32(bas);
264 }
265
266 if (addr >= addr_bottom && addr <= addr_top) {
267 return true;
268 }
269
270 return false;
271}
272
273
274
275
276
277
278
279
280static int delete_hw_watchpoint(target_ulong addr,
281 target_ulong len, int type)
282{
283 int i;
284 for (i = 0; i < cur_hw_wps; i++) {
285 if (check_watchpoint_in_range(i, addr)) {
286 g_array_remove_index(hw_watchpoints, i);
287 return 0;
288 }
289 }
290 return -ENOENT;
291}
292
293
294int kvm_arch_insert_hw_breakpoint(target_ulong addr,
295 target_ulong len, int type)
296{
297 switch (type) {
298 case GDB_BREAKPOINT_HW:
299 return insert_hw_breakpoint(addr);
300 break;
301 case GDB_WATCHPOINT_READ:
302 case GDB_WATCHPOINT_WRITE:
303 case GDB_WATCHPOINT_ACCESS:
304 return insert_hw_watchpoint(addr, len, type);
305 default:
306 return -ENOSYS;
307 }
308}
309
310int kvm_arch_remove_hw_breakpoint(target_ulong addr,
311 target_ulong len, int type)
312{
313 switch (type) {
314 case GDB_BREAKPOINT_HW:
315 return delete_hw_breakpoint(addr);
316 break;
317 case GDB_WATCHPOINT_READ:
318 case GDB_WATCHPOINT_WRITE:
319 case GDB_WATCHPOINT_ACCESS:
320 return delete_hw_watchpoint(addr, len, type);
321 default:
322 return -ENOSYS;
323 }
324}
325
326
327void kvm_arch_remove_all_hw_breakpoints(void)
328{
329 if (cur_hw_wps > 0) {
330 g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
331 }
332 if (cur_hw_bps > 0) {
333 g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
334 }
335}
336
337void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
338{
339 int i;
340 memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
341
342 for (i = 0; i < max_hw_wps; i++) {
343 HWWatchpoint *wp = get_hw_wp(i);
344 ptr->dbg_wcr[i] = wp->wcr;
345 ptr->dbg_wvr[i] = wp->wvr;
346 }
347 for (i = 0; i < max_hw_bps; i++) {
348 HWBreakpoint *bp = get_hw_bp(i);
349 ptr->dbg_bcr[i] = bp->bcr;
350 ptr->dbg_bvr[i] = bp->bvr;
351 }
352}
353
354bool kvm_arm_hw_debug_active(CPUState *cs)
355{
356 return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
357}
358
359static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
360{
361 int i;
362
363 for (i = 0; i < cur_hw_bps; i++) {
364 HWBreakpoint *bp = get_hw_bp(i);
365 if (bp->bvr == pc) {
366 return true;
367 }
368 }
369 return false;
370}
371
372static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
373{
374 int i;
375
376 for (i = 0; i < cur_hw_wps; i++) {
377 if (check_watchpoint_in_range(i, addr)) {
378 return &get_hw_wp(i)->details;
379 }
380 }
381 return NULL;
382}
383
384static bool kvm_arm_pmu_set_attr(CPUState *cs, struct kvm_device_attr *attr)
385{
386 int err;
387
388 err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
389 if (err != 0) {
390 error_report("PMU: KVM_HAS_DEVICE_ATTR: %s", strerror(-err));
391 return false;
392 }
393
394 err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
395 if (err != 0) {
396 error_report("PMU: KVM_SET_DEVICE_ATTR: %s", strerror(-err));
397 return false;
398 }
399
400 return true;
401}
402
403void kvm_arm_pmu_init(CPUState *cs)
404{
405 struct kvm_device_attr attr = {
406 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
407 .attr = KVM_ARM_VCPU_PMU_V3_INIT,
408 };
409
410 if (!ARM_CPU(cs)->has_pmu) {
411 return;
412 }
413 if (!kvm_arm_pmu_set_attr(cs, &attr)) {
414 error_report("failed to init PMU");
415 abort();
416 }
417}
418
419void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
420{
421 struct kvm_device_attr attr = {
422 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
423 .addr = (intptr_t)&irq,
424 .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
425 };
426
427 if (!ARM_CPU(cs)->has_pmu) {
428 return;
429 }
430 if (!kvm_arm_pmu_set_attr(cs, &attr)) {
431 error_report("failed to set irq for PMU");
432 abort();
433 }
434}
435
436static inline void set_feature(uint64_t *features, int feature)
437{
438 *features |= 1ULL << feature;
439}
440
441static inline void unset_feature(uint64_t *features, int feature)
442{
443 *features &= ~(1ULL << feature);
444}
445
446bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
447{
448
449
450
451
452
453
454
455 int fdarray[3];
456 uint64_t features = 0;
457
458
459
460
461
462 static const uint32_t cpus_to_try[] = {
463 KVM_ARM_TARGET_AEM_V8,
464 KVM_ARM_TARGET_FOUNDATION_V8,
465 KVM_ARM_TARGET_CORTEX_A57,
466 QEMU_KVM_ARM_TARGET_NONE
467 };
468 struct kvm_vcpu_init init;
469
470 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
471 return false;
472 }
473
474 ahcf->target = init.target;
475 ahcf->dtb_compatible = "arm,arm-v8";
476
477 kvm_arm_destroy_scratch_host_vcpu(fdarray);
478
479
480
481
482
483 set_feature(&features, ARM_FEATURE_V8);
484 set_feature(&features, ARM_FEATURE_VFP4);
485 set_feature(&features, ARM_FEATURE_NEON);
486 set_feature(&features, ARM_FEATURE_AARCH64);
487 set_feature(&features, ARM_FEATURE_PMU);
488
489 ahcf->features = features;
490
491 return true;
492}
493
494#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
495
496int kvm_arch_init_vcpu(CPUState *cs)
497{
498 int ret;
499 uint64_t mpidr;
500 ARMCPU *cpu = ARM_CPU(cs);
501 CPUARMState *env = &cpu->env;
502
503 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
504 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
505 fprintf(stderr, "KVM is not supported for this guest CPU type\n");
506 return -EINVAL;
507 }
508
509
510 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
511 if (cpu->start_powered_off) {
512 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
513 }
514 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
515 cpu->psci_version = 2;
516 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
517 }
518 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
519 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
520 }
521 if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
522 cpu->has_pmu = false;
523 }
524 if (cpu->has_pmu) {
525 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
526 } else {
527 unset_feature(&env->features, ARM_FEATURE_PMU);
528 }
529
530
531 ret = kvm_arm_vcpu_init(cs);
532 if (ret) {
533 return ret;
534 }
535
536
537
538
539
540
541 ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
542 if (ret) {
543 return ret;
544 }
545 cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
546
547 kvm_arm_init_debug(cs);
548
549 return kvm_arm_init_cpreg_list(cpu);
550}
551
552bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
553{
554
555
556
557
558 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
559 case KVM_REG_ARM_CORE:
560 return false;
561 default:
562 return true;
563 }
564}
565
566typedef struct CPRegStateLevel {
567 uint64_t regidx;
568 int level;
569} CPRegStateLevel;
570
571
572
573
574
575
576static const CPRegStateLevel non_runtime_cpregs[] = {
577 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
578};
579
580int kvm_arm_cpreg_level(uint64_t regidx)
581{
582 int i;
583
584 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
585 const CPRegStateLevel *l = &non_runtime_cpregs[i];
586 if (l->regidx == regidx) {
587 return l->level;
588 }
589 }
590
591 return KVM_PUT_RUNTIME_STATE;
592}
593
594#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
595 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
596
597#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
598 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
599
600#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
601 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
602
603int kvm_arch_put_registers(CPUState *cs, int level)
604{
605 struct kvm_one_reg reg;
606 uint32_t fpr;
607 uint64_t val;
608 int i;
609 int ret;
610 unsigned int el;
611
612 ARMCPU *cpu = ARM_CPU(cs);
613 CPUARMState *env = &cpu->env;
614
615
616
617
618 if (!is_a64(env)) {
619 aarch64_sync_32_to_64(env);
620 }
621
622 for (i = 0; i < 31; i++) {
623 reg.id = AARCH64_CORE_REG(regs.regs[i]);
624 reg.addr = (uintptr_t) &env->xregs[i];
625 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
626 if (ret) {
627 return ret;
628 }
629 }
630
631
632
633
634 aarch64_save_sp(env, 1);
635
636 reg.id = AARCH64_CORE_REG(regs.sp);
637 reg.addr = (uintptr_t) &env->sp_el[0];
638 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
639 if (ret) {
640 return ret;
641 }
642
643 reg.id = AARCH64_CORE_REG(sp_el1);
644 reg.addr = (uintptr_t) &env->sp_el[1];
645 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
646 if (ret) {
647 return ret;
648 }
649
650
651 if (is_a64(env)) {
652 val = pstate_read(env);
653 } else {
654 val = cpsr_read(env);
655 }
656 reg.id = AARCH64_CORE_REG(regs.pstate);
657 reg.addr = (uintptr_t) &val;
658 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
659 if (ret) {
660 return ret;
661 }
662
663 reg.id = AARCH64_CORE_REG(regs.pc);
664 reg.addr = (uintptr_t) &env->pc;
665 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
666 if (ret) {
667 return ret;
668 }
669
670 reg.id = AARCH64_CORE_REG(elr_el1);
671 reg.addr = (uintptr_t) &env->elr_el[1];
672 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
673 if (ret) {
674 return ret;
675 }
676
677
678
679
680
681
682
683 el = arm_current_el(env);
684 if (el > 0 && !is_a64(env)) {
685 i = bank_number(env->uncached_cpsr & CPSR_M);
686 env->banked_spsr[i] = env->spsr;
687 }
688
689
690 for (i = 0; i < KVM_NR_SPSR; i++) {
691 reg.id = AARCH64_CORE_REG(spsr[i]);
692 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
693 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
694 if (ret) {
695 return ret;
696 }
697 }
698
699
700 for (i = 0; i < 32; i++) {
701 uint64_t *q = aa64_vfp_qreg(env, i);
702#ifdef HOST_WORDS_BIGENDIAN
703 uint64_t fp_val[2] = { q[1], q[0] };
704 reg.addr = (uintptr_t)fp_val;
705#else
706 reg.addr = (uintptr_t)q;
707#endif
708 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
709 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
710 if (ret) {
711 return ret;
712 }
713 }
714
715 reg.addr = (uintptr_t)(&fpr);
716 fpr = vfp_get_fpsr(env);
717 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
718 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
719 if (ret) {
720 return ret;
721 }
722
723 fpr = vfp_get_fpcr(env);
724 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
725 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
726 if (ret) {
727 return ret;
728 }
729
730 if (!write_list_to_kvmstate(cpu, level)) {
731 return EINVAL;
732 }
733
734 kvm_arm_sync_mpstate_to_kvm(cpu);
735
736 return ret;
737}
738
739int kvm_arch_get_registers(CPUState *cs)
740{
741 struct kvm_one_reg reg;
742 uint64_t val;
743 uint32_t fpr;
744 unsigned int el;
745 int i;
746 int ret;
747
748 ARMCPU *cpu = ARM_CPU(cs);
749 CPUARMState *env = &cpu->env;
750
751 for (i = 0; i < 31; i++) {
752 reg.id = AARCH64_CORE_REG(regs.regs[i]);
753 reg.addr = (uintptr_t) &env->xregs[i];
754 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
755 if (ret) {
756 return ret;
757 }
758 }
759
760 reg.id = AARCH64_CORE_REG(regs.sp);
761 reg.addr = (uintptr_t) &env->sp_el[0];
762 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
763 if (ret) {
764 return ret;
765 }
766
767 reg.id = AARCH64_CORE_REG(sp_el1);
768 reg.addr = (uintptr_t) &env->sp_el[1];
769 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
770 if (ret) {
771 return ret;
772 }
773
774 reg.id = AARCH64_CORE_REG(regs.pstate);
775 reg.addr = (uintptr_t) &val;
776 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
777 if (ret) {
778 return ret;
779 }
780
781 env->aarch64 = ((val & PSTATE_nRW) == 0);
782 if (is_a64(env)) {
783 pstate_write(env, val);
784 } else {
785 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
786 }
787
788
789
790
791 aarch64_restore_sp(env, 1);
792
793 reg.id = AARCH64_CORE_REG(regs.pc);
794 reg.addr = (uintptr_t) &env->pc;
795 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
796 if (ret) {
797 return ret;
798 }
799
800
801
802
803
804
805 if (!is_a64(env)) {
806 aarch64_sync_64_to_32(env);
807 }
808
809 reg.id = AARCH64_CORE_REG(elr_el1);
810 reg.addr = (uintptr_t) &env->elr_el[1];
811 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
812 if (ret) {
813 return ret;
814 }
815
816
817
818
819
820 for (i = 0; i < KVM_NR_SPSR; i++) {
821 reg.id = AARCH64_CORE_REG(spsr[i]);
822 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
823 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
824 if (ret) {
825 return ret;
826 }
827 }
828
829 el = arm_current_el(env);
830 if (el > 0 && !is_a64(env)) {
831 i = bank_number(env->uncached_cpsr & CPSR_M);
832 env->spsr = env->banked_spsr[i];
833 }
834
835
836 for (i = 0; i < 32; i++) {
837 uint64_t *q = aa64_vfp_qreg(env, i);
838 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
839 reg.addr = (uintptr_t)q;
840 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
841 if (ret) {
842 return ret;
843 } else {
844#ifdef HOST_WORDS_BIGENDIAN
845 uint64_t t;
846 t = q[0], q[0] = q[1], q[1] = t;
847#endif
848 }
849 }
850
851 reg.addr = (uintptr_t)(&fpr);
852 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
853 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
854 if (ret) {
855 return ret;
856 }
857 vfp_set_fpsr(env, fpr);
858
859 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
860 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
861 if (ret) {
862 return ret;
863 }
864 vfp_set_fpcr(env, fpr);
865
866 if (!write_kvmstate_to_list(cpu)) {
867 return EINVAL;
868 }
869
870
871
872 write_list_to_cpustate(cpu);
873
874 kvm_arm_sync_mpstate_to_qemu(cpu);
875
876
877 return ret;
878}
879
880
881static const uint32_t brk_insn = 0xd4200000;
882
883int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
884{
885 if (have_guest_debug) {
886 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
887 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
888 return -EINVAL;
889 }
890 return 0;
891 } else {
892 error_report("guest debug not supported on this kernel");
893 return -EINVAL;
894 }
895}
896
897int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
898{
899 static uint32_t brk;
900
901 if (have_guest_debug) {
902 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
903 brk != brk_insn ||
904 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
905 return -EINVAL;
906 }
907 return 0;
908 } else {
909 error_report("guest debug not supported on this kernel");
910 return -EINVAL;
911 }
912}
913
914
915
916
917
918
919
920
921bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
922{
923 int hsr_ec = debug_exit->hsr >> ARM_EL_EC_SHIFT;
924 ARMCPU *cpu = ARM_CPU(cs);
925 CPUClass *cc = CPU_GET_CLASS(cs);
926 CPUARMState *env = &cpu->env;
927
928
929 kvm_cpu_synchronize_state(cs);
930
931 switch (hsr_ec) {
932 case EC_SOFTWARESTEP:
933 if (cs->singlestep_enabled) {
934 return true;
935 } else {
936
937
938
939
940 error_report("%s: guest single-step while debugging unsupported"
941 " (%"PRIx64", %"PRIx32")",
942 __func__, env->pc, debug_exit->hsr);
943 return false;
944 }
945 break;
946 case EC_AA64_BKPT:
947 if (kvm_find_sw_breakpoint(cs, env->pc)) {
948 return true;
949 }
950 break;
951 case EC_BREAKPOINT:
952 if (find_hw_breakpoint(cs, env->pc)) {
953 return true;
954 }
955 break;
956 case EC_WATCHPOINT:
957 {
958 CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
959 if (wp) {
960 cs->watchpoint_hit = wp;
961 return true;
962 }
963 break;
964 }
965 default:
966 error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")",
967 __func__, debug_exit->hsr, env->pc);
968 }
969
970
971
972
973
974 cs->exception_index = EXCP_BKPT;
975 env->exception.syndrome = debug_exit->hsr;
976 env->exception.vaddress = debug_exit->far;
977 cc->do_interrupt(cs);
978
979 return false;
980}
981