1
2
3
4
5
6
7
8
9
10
11
12#include "qemu/osdep.h"
13#include <sys/ioctl.h>
14#include <sys/ptrace.h>
15
16#include <linux/elf.h>
17#include <linux/kvm.h>
18
19#include "qemu-common.h"
20#include "cpu.h"
21#include "qemu/timer.h"
22#include "qemu/error-report.h"
23#include "qemu/host-utils.h"
24#include "exec/gdbstub.h"
25#include "sysemu/sysemu.h"
26#include "sysemu/kvm.h"
27#include "kvm_arm.h"
28#include "internals.h"
29#include "hw/arm/arm.h"
30
31static bool have_guest_debug;
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48typedef struct {
49 uint64_t bcr;
50 uint64_t bvr;
51} HWBreakpoint;
52
53
54
55
56
57
58typedef struct {
59 uint64_t wcr;
60 uint64_t wvr;
61 CPUWatchpoint details;
62} HWWatchpoint;
63
64
65int max_hw_bps, max_hw_wps;
66GArray *hw_breakpoints, *hw_watchpoints;
67
68#define cur_hw_wps (hw_watchpoints->len)
69#define cur_hw_bps (hw_breakpoints->len)
70#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
71#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
72
73
74
75
76
77
78
79
80
81static void kvm_arm_init_debug(CPUState *cs)
82{
83 have_guest_debug = kvm_check_extension(cs->kvm_state,
84 KVM_CAP_SET_GUEST_DEBUG);
85
86 max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS);
87 hw_watchpoints = g_array_sized_new(true, true,
88 sizeof(HWWatchpoint), max_hw_wps);
89
90 max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS);
91 hw_breakpoints = g_array_sized_new(true, true,
92 sizeof(HWBreakpoint), max_hw_bps);
93 return;
94}
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132static int insert_hw_breakpoint(target_ulong addr)
133{
134 HWBreakpoint brk = {
135 .bcr = 0x1,
136 .bvr = sextract64(addr, 0, 53)
137 };
138
139 if (cur_hw_bps >= max_hw_bps) {
140 return -ENOBUFS;
141 }
142
143 brk.bcr = deposit32(brk.bcr, 1, 2, 0x3);
144 brk.bcr = deposit32(brk.bcr, 5, 4, 0xf);
145
146 g_array_append_val(hw_breakpoints, brk);
147
148 return 0;
149}
150
151
152
153
154
155
156
157
158static int delete_hw_breakpoint(target_ulong pc)
159{
160 int i;
161 for (i = 0; i < hw_breakpoints->len; i++) {
162 HWBreakpoint *brk = get_hw_bp(i);
163 if (brk->bvr == pc) {
164 g_array_remove_index(hw_breakpoints, i);
165 return 0;
166 }
167 }
168 return -ENOENT;
169}
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203static int insert_hw_watchpoint(target_ulong addr,
204 target_ulong len, int type)
205{
206 HWWatchpoint wp = {
207 .wcr = 1,
208 .wvr = addr & (~0x7ULL),
209 .details = { .vaddr = addr, .len = len }
210 };
211
212 if (cur_hw_wps >= max_hw_wps) {
213 return -ENOBUFS;
214 }
215
216
217
218
219
220 wp.wcr = deposit32(wp.wcr, 1, 2, 3);
221
222 switch (type) {
223 case GDB_WATCHPOINT_READ:
224 wp.wcr = deposit32(wp.wcr, 3, 2, 1);
225 wp.details.flags = BP_MEM_READ;
226 break;
227 case GDB_WATCHPOINT_WRITE:
228 wp.wcr = deposit32(wp.wcr, 3, 2, 2);
229 wp.details.flags = BP_MEM_WRITE;
230 break;
231 case GDB_WATCHPOINT_ACCESS:
232 wp.wcr = deposit32(wp.wcr, 3, 2, 3);
233 wp.details.flags = BP_MEM_ACCESS;
234 break;
235 default:
236 g_assert_not_reached();
237 break;
238 }
239 if (len <= 8) {
240
241 int off = addr & 0x7;
242 int bas = (1 << len) - 1;
243
244 wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas);
245 } else {
246
247 if (is_power_of_2(len)) {
248 int bits = ctz64(len);
249
250 wp.wvr &= ~((1 << bits) - 1);
251 wp.wcr = deposit32(wp.wcr, 24, 4, bits);
252 wp.wcr = deposit32(wp.wcr, 5, 8, 0xff);
253 } else {
254 return -ENOBUFS;
255 }
256 }
257
258 g_array_append_val(hw_watchpoints, wp);
259 return 0;
260}
261
262
263static bool check_watchpoint_in_range(int i, target_ulong addr)
264{
265 HWWatchpoint *wp = get_hw_wp(i);
266 uint64_t addr_top, addr_bottom = wp->wvr;
267 int bas = extract32(wp->wcr, 5, 8);
268 int mask = extract32(wp->wcr, 24, 4);
269
270 if (mask) {
271 addr_top = addr_bottom + (1 << mask);
272 } else {
273
274
275 addr_bottom = addr_bottom + ctz32(bas);
276 addr_top = addr_bottom + clo32(bas);
277 }
278
279 if (addr >= addr_bottom && addr <= addr_top) {
280 return true;
281 }
282
283 return false;
284}
285
286
287
288
289
290
291
292
293static int delete_hw_watchpoint(target_ulong addr,
294 target_ulong len, int type)
295{
296 int i;
297 for (i = 0; i < cur_hw_wps; i++) {
298 if (check_watchpoint_in_range(i, addr)) {
299 g_array_remove_index(hw_watchpoints, i);
300 return 0;
301 }
302 }
303 return -ENOENT;
304}
305
306
307int kvm_arch_insert_hw_breakpoint(target_ulong addr,
308 target_ulong len, int type)
309{
310 switch (type) {
311 case GDB_BREAKPOINT_HW:
312 return insert_hw_breakpoint(addr);
313 break;
314 case GDB_WATCHPOINT_READ:
315 case GDB_WATCHPOINT_WRITE:
316 case GDB_WATCHPOINT_ACCESS:
317 return insert_hw_watchpoint(addr, len, type);
318 default:
319 return -ENOSYS;
320 }
321}
322
323int kvm_arch_remove_hw_breakpoint(target_ulong addr,
324 target_ulong len, int type)
325{
326 switch (type) {
327 case GDB_BREAKPOINT_HW:
328 return delete_hw_breakpoint(addr);
329 break;
330 case GDB_WATCHPOINT_READ:
331 case GDB_WATCHPOINT_WRITE:
332 case GDB_WATCHPOINT_ACCESS:
333 return delete_hw_watchpoint(addr, len, type);
334 default:
335 return -ENOSYS;
336 }
337}
338
339
340void kvm_arch_remove_all_hw_breakpoints(void)
341{
342 if (cur_hw_wps > 0) {
343 g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
344 }
345 if (cur_hw_bps > 0) {
346 g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
347 }
348}
349
350void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
351{
352 int i;
353 memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
354
355 for (i = 0; i < max_hw_wps; i++) {
356 HWWatchpoint *wp = get_hw_wp(i);
357 ptr->dbg_wcr[i] = wp->wcr;
358 ptr->dbg_wvr[i] = wp->wvr;
359 }
360 for (i = 0; i < max_hw_bps; i++) {
361 HWBreakpoint *bp = get_hw_bp(i);
362 ptr->dbg_bcr[i] = bp->bcr;
363 ptr->dbg_bvr[i] = bp->bvr;
364 }
365}
366
367bool kvm_arm_hw_debug_active(CPUState *cs)
368{
369 return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
370}
371
372static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
373{
374 int i;
375
376 for (i = 0; i < cur_hw_bps; i++) {
377 HWBreakpoint *bp = get_hw_bp(i);
378 if (bp->bvr == pc) {
379 return true;
380 }
381 }
382 return false;
383}
384
385static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
386{
387 int i;
388
389 for (i = 0; i < cur_hw_wps; i++) {
390 if (check_watchpoint_in_range(i, addr)) {
391 return &get_hw_wp(i)->details;
392 }
393 }
394 return NULL;
395}
396
397static bool kvm_arm_pmu_set_attr(CPUState *cs, struct kvm_device_attr *attr)
398{
399 int err;
400
401 err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
402 if (err != 0) {
403 error_report("PMU: KVM_HAS_DEVICE_ATTR: %s", strerror(-err));
404 return false;
405 }
406
407 err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
408 if (err != 0) {
409 error_report("PMU: KVM_SET_DEVICE_ATTR: %s", strerror(-err));
410 return false;
411 }
412
413 return true;
414}
415
416void kvm_arm_pmu_init(CPUState *cs)
417{
418 struct kvm_device_attr attr = {
419 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
420 .attr = KVM_ARM_VCPU_PMU_V3_INIT,
421 };
422
423 if (!ARM_CPU(cs)->has_pmu) {
424 return;
425 }
426 if (!kvm_arm_pmu_set_attr(cs, &attr)) {
427 error_report("failed to init PMU");
428 abort();
429 }
430}
431
432void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
433{
434 struct kvm_device_attr attr = {
435 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
436 .addr = (intptr_t)&irq,
437 .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
438 };
439
440 if (!ARM_CPU(cs)->has_pmu) {
441 return;
442 }
443 if (!kvm_arm_pmu_set_attr(cs, &attr)) {
444 error_report("failed to set irq for PMU");
445 abort();
446 }
447}
448
449static inline void set_feature(uint64_t *features, int feature)
450{
451 *features |= 1ULL << feature;
452}
453
454static inline void unset_feature(uint64_t *features, int feature)
455{
456 *features &= ~(1ULL << feature);
457}
458
459static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id)
460{
461 uint64_t ret;
462 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret };
463 int err;
464
465 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
466 err = ioctl(fd, KVM_GET_ONE_REG, &idreg);
467 if (err < 0) {
468 return -1;
469 }
470 *pret = ret;
471 return 0;
472}
473
474static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id)
475{
476 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret };
477
478 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
479 return ioctl(fd, KVM_GET_ONE_REG, &idreg);
480}
481
482bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
483{
484
485
486
487
488
489 int fdarray[3];
490 uint64_t features = 0;
491 int err;
492
493
494
495
496
497
498 static const uint32_t cpus_to_try[] = {
499 KVM_ARM_TARGET_AEM_V8,
500 KVM_ARM_TARGET_FOUNDATION_V8,
501 KVM_ARM_TARGET_CORTEX_A57,
502 QEMU_KVM_ARM_TARGET_NONE
503 };
504 struct kvm_vcpu_init init;
505
506 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
507 return false;
508 }
509
510 ahcf->target = init.target;
511 ahcf->dtb_compatible = "arm,arm-v8";
512
513 err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0,
514 ARM64_SYS_REG(3, 0, 0, 4, 0));
515 if (unlikely(err < 0)) {
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532 ahcf->isar.id_aa64pfr0 = 0x00000011;
533 err = 0;
534 } else {
535 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1,
536 ARM64_SYS_REG(3, 0, 0, 4, 1));
537 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0,
538 ARM64_SYS_REG(3, 0, 0, 6, 0));
539 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
540 ARM64_SYS_REG(3, 0, 0, 6, 1));
541
542
543
544
545
546
547
548
549 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
550 ARM64_SYS_REG(3, 0, 0, 2, 0));
551 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
552 ARM64_SYS_REG(3, 0, 0, 2, 1));
553 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
554 ARM64_SYS_REG(3, 0, 0, 2, 2));
555 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
556 ARM64_SYS_REG(3, 0, 0, 2, 3));
557 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
558 ARM64_SYS_REG(3, 0, 0, 2, 4));
559 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
560 ARM64_SYS_REG(3, 0, 0, 2, 5));
561 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
562 ARM64_SYS_REG(3, 0, 0, 2, 7));
563
564 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
565 ARM64_SYS_REG(3, 0, 0, 3, 0));
566 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
567 ARM64_SYS_REG(3, 0, 0, 3, 1));
568 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
569 ARM64_SYS_REG(3, 0, 0, 3, 2));
570 }
571
572 kvm_arm_destroy_scratch_host_vcpu(fdarray);
573
574 if (err < 0) {
575 return false;
576 }
577
578
579
580
581
582 set_feature(&features, ARM_FEATURE_V8);
583 set_feature(&features, ARM_FEATURE_VFP4);
584 set_feature(&features, ARM_FEATURE_NEON);
585 set_feature(&features, ARM_FEATURE_AARCH64);
586 set_feature(&features, ARM_FEATURE_PMU);
587
588 ahcf->features = features;
589
590 return true;
591}
592
593#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
594
595int kvm_arch_init_vcpu(CPUState *cs)
596{
597 int ret;
598 uint64_t mpidr;
599 ARMCPU *cpu = ARM_CPU(cs);
600 CPUARMState *env = &cpu->env;
601
602 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
603 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
604 fprintf(stderr, "KVM is not supported for this guest CPU type\n");
605 return -EINVAL;
606 }
607
608
609 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
610 if (cpu->start_powered_off) {
611 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
612 }
613 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
614 cpu->psci_version = 2;
615 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
616 }
617 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
618 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
619 }
620 if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
621 cpu->has_pmu = false;
622 }
623 if (cpu->has_pmu) {
624 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
625 } else {
626 unset_feature(&env->features, ARM_FEATURE_PMU);
627 }
628
629
630 ret = kvm_arm_vcpu_init(cs);
631 if (ret) {
632 return ret;
633 }
634
635
636
637
638
639
640 ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
641 if (ret) {
642 return ret;
643 }
644 cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
645
646 kvm_arm_init_debug(cs);
647
648
649 kvm_arm_init_serror_injection(cs);
650
651 return kvm_arm_init_cpreg_list(cpu);
652}
653
654bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
655{
656
657
658
659
660 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
661 case KVM_REG_ARM_CORE:
662 return false;
663 default:
664 return true;
665 }
666}
667
668typedef struct CPRegStateLevel {
669 uint64_t regidx;
670 int level;
671} CPRegStateLevel;
672
673
674
675
676
677
678static const CPRegStateLevel non_runtime_cpregs[] = {
679 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
680};
681
682int kvm_arm_cpreg_level(uint64_t regidx)
683{
684 int i;
685
686 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
687 const CPRegStateLevel *l = &non_runtime_cpregs[i];
688 if (l->regidx == regidx) {
689 return l->level;
690 }
691 }
692
693 return KVM_PUT_RUNTIME_STATE;
694}
695
696#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
697 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
698
699#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
700 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
701
702#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
703 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
704
705int kvm_arch_put_registers(CPUState *cs, int level)
706{
707 struct kvm_one_reg reg;
708 uint32_t fpr;
709 uint64_t val;
710 int i;
711 int ret;
712 unsigned int el;
713
714 ARMCPU *cpu = ARM_CPU(cs);
715 CPUARMState *env = &cpu->env;
716
717
718
719
720 if (!is_a64(env)) {
721 aarch64_sync_32_to_64(env);
722 }
723
724 for (i = 0; i < 31; i++) {
725 reg.id = AARCH64_CORE_REG(regs.regs[i]);
726 reg.addr = (uintptr_t) &env->xregs[i];
727 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
728 if (ret) {
729 return ret;
730 }
731 }
732
733
734
735
736 aarch64_save_sp(env, 1);
737
738 reg.id = AARCH64_CORE_REG(regs.sp);
739 reg.addr = (uintptr_t) &env->sp_el[0];
740 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
741 if (ret) {
742 return ret;
743 }
744
745 reg.id = AARCH64_CORE_REG(sp_el1);
746 reg.addr = (uintptr_t) &env->sp_el[1];
747 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
748 if (ret) {
749 return ret;
750 }
751
752
753 if (is_a64(env)) {
754 val = pstate_read(env);
755 } else {
756 val = cpsr_read(env);
757 }
758 reg.id = AARCH64_CORE_REG(regs.pstate);
759 reg.addr = (uintptr_t) &val;
760 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
761 if (ret) {
762 return ret;
763 }
764
765 reg.id = AARCH64_CORE_REG(regs.pc);
766 reg.addr = (uintptr_t) &env->pc;
767 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
768 if (ret) {
769 return ret;
770 }
771
772 reg.id = AARCH64_CORE_REG(elr_el1);
773 reg.addr = (uintptr_t) &env->elr_el[1];
774 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
775 if (ret) {
776 return ret;
777 }
778
779
780
781
782
783
784
785 el = arm_current_el(env);
786 if (el > 0 && !is_a64(env)) {
787 i = bank_number(env->uncached_cpsr & CPSR_M);
788 env->banked_spsr[i] = env->spsr;
789 }
790
791
792 for (i = 0; i < KVM_NR_SPSR; i++) {
793 reg.id = AARCH64_CORE_REG(spsr[i]);
794 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
795 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
796 if (ret) {
797 return ret;
798 }
799 }
800
801
802 for (i = 0; i < 32; i++) {
803 uint64_t *q = aa64_vfp_qreg(env, i);
804#ifdef HOST_WORDS_BIGENDIAN
805 uint64_t fp_val[2] = { q[1], q[0] };
806 reg.addr = (uintptr_t)fp_val;
807#else
808 reg.addr = (uintptr_t)q;
809#endif
810 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
811 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
812 if (ret) {
813 return ret;
814 }
815 }
816
817 reg.addr = (uintptr_t)(&fpr);
818 fpr = vfp_get_fpsr(env);
819 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
820 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
821 if (ret) {
822 return ret;
823 }
824
825 fpr = vfp_get_fpcr(env);
826 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
827 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
828 if (ret) {
829 return ret;
830 }
831
832 ret = kvm_put_vcpu_events(cpu);
833 if (ret) {
834 return ret;
835 }
836
837 if (!write_list_to_kvmstate(cpu, level)) {
838 return EINVAL;
839 }
840
841 kvm_arm_sync_mpstate_to_kvm(cpu);
842
843 return ret;
844}
845
846int kvm_arch_get_registers(CPUState *cs)
847{
848 struct kvm_one_reg reg;
849 uint64_t val;
850 uint32_t fpr;
851 unsigned int el;
852 int i;
853 int ret;
854
855 ARMCPU *cpu = ARM_CPU(cs);
856 CPUARMState *env = &cpu->env;
857
858 for (i = 0; i < 31; i++) {
859 reg.id = AARCH64_CORE_REG(regs.regs[i]);
860 reg.addr = (uintptr_t) &env->xregs[i];
861 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
862 if (ret) {
863 return ret;
864 }
865 }
866
867 reg.id = AARCH64_CORE_REG(regs.sp);
868 reg.addr = (uintptr_t) &env->sp_el[0];
869 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
870 if (ret) {
871 return ret;
872 }
873
874 reg.id = AARCH64_CORE_REG(sp_el1);
875 reg.addr = (uintptr_t) &env->sp_el[1];
876 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
877 if (ret) {
878 return ret;
879 }
880
881 reg.id = AARCH64_CORE_REG(regs.pstate);
882 reg.addr = (uintptr_t) &val;
883 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
884 if (ret) {
885 return ret;
886 }
887
888 env->aarch64 = ((val & PSTATE_nRW) == 0);
889 if (is_a64(env)) {
890 pstate_write(env, val);
891 } else {
892 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
893 }
894
895
896
897
898 aarch64_restore_sp(env, 1);
899
900 reg.id = AARCH64_CORE_REG(regs.pc);
901 reg.addr = (uintptr_t) &env->pc;
902 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
903 if (ret) {
904 return ret;
905 }
906
907
908
909
910
911
912 if (!is_a64(env)) {
913 aarch64_sync_64_to_32(env);
914 }
915
916 reg.id = AARCH64_CORE_REG(elr_el1);
917 reg.addr = (uintptr_t) &env->elr_el[1];
918 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
919 if (ret) {
920 return ret;
921 }
922
923
924
925
926
927 for (i = 0; i < KVM_NR_SPSR; i++) {
928 reg.id = AARCH64_CORE_REG(spsr[i]);
929 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
930 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
931 if (ret) {
932 return ret;
933 }
934 }
935
936 el = arm_current_el(env);
937 if (el > 0 && !is_a64(env)) {
938 i = bank_number(env->uncached_cpsr & CPSR_M);
939 env->spsr = env->banked_spsr[i];
940 }
941
942
943 for (i = 0; i < 32; i++) {
944 uint64_t *q = aa64_vfp_qreg(env, i);
945 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
946 reg.addr = (uintptr_t)q;
947 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
948 if (ret) {
949 return ret;
950 } else {
951#ifdef HOST_WORDS_BIGENDIAN
952 uint64_t t;
953 t = q[0], q[0] = q[1], q[1] = t;
954#endif
955 }
956 }
957
958 reg.addr = (uintptr_t)(&fpr);
959 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
960 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
961 if (ret) {
962 return ret;
963 }
964 vfp_set_fpsr(env, fpr);
965
966 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
967 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
968 if (ret) {
969 return ret;
970 }
971 vfp_set_fpcr(env, fpr);
972
973 ret = kvm_get_vcpu_events(cpu);
974 if (ret) {
975 return ret;
976 }
977
978 if (!write_kvmstate_to_list(cpu)) {
979 return EINVAL;
980 }
981
982
983
984 write_list_to_cpustate(cpu);
985
986 kvm_arm_sync_mpstate_to_qemu(cpu);
987
988
989 return ret;
990}
991
992
993static const uint32_t brk_insn = 0xd4200000;
994
995int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
996{
997 if (have_guest_debug) {
998 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
999 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
1000 return -EINVAL;
1001 }
1002 return 0;
1003 } else {
1004 error_report("guest debug not supported on this kernel");
1005 return -EINVAL;
1006 }
1007}
1008
1009int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1010{
1011 static uint32_t brk;
1012
1013 if (have_guest_debug) {
1014 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
1015 brk != brk_insn ||
1016 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
1017 return -EINVAL;
1018 }
1019 return 0;
1020 } else {
1021 error_report("guest debug not supported on this kernel");
1022 return -EINVAL;
1023 }
1024}
1025
1026
1027
1028
1029
1030
1031
1032
1033bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
1034{
1035 int hsr_ec = syn_get_ec(debug_exit->hsr);
1036 ARMCPU *cpu = ARM_CPU(cs);
1037 CPUClass *cc = CPU_GET_CLASS(cs);
1038 CPUARMState *env = &cpu->env;
1039
1040
1041 kvm_cpu_synchronize_state(cs);
1042
1043 switch (hsr_ec) {
1044 case EC_SOFTWARESTEP:
1045 if (cs->singlestep_enabled) {
1046 return true;
1047 } else {
1048
1049
1050
1051
1052 error_report("%s: guest single-step while debugging unsupported"
1053 " (%"PRIx64", %"PRIx32")",
1054 __func__, env->pc, debug_exit->hsr);
1055 return false;
1056 }
1057 break;
1058 case EC_AA64_BKPT:
1059 if (kvm_find_sw_breakpoint(cs, env->pc)) {
1060 return true;
1061 }
1062 break;
1063 case EC_BREAKPOINT:
1064 if (find_hw_breakpoint(cs, env->pc)) {
1065 return true;
1066 }
1067 break;
1068 case EC_WATCHPOINT:
1069 {
1070 CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
1071 if (wp) {
1072 cs->watchpoint_hit = wp;
1073 return true;
1074 }
1075 break;
1076 }
1077 default:
1078 error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")",
1079 __func__, debug_exit->hsr, env->pc);
1080 }
1081
1082
1083
1084
1085
1086 cs->exception_index = EXCP_BKPT;
1087 env->exception.syndrome = debug_exit->hsr;
1088 env->exception.vaddress = debug_exit->far;
1089 env->exception.target_el = 1;
1090 qemu_mutex_lock_iothread();
1091 cc->do_interrupt(cs);
1092 qemu_mutex_unlock_iothread();
1093
1094 return false;
1095}
1096