1
2
3
4
5
6
7
8
9
10
11
12#include "qemu/osdep.h"
13#include <sys/ioctl.h>
14#include <sys/ptrace.h>
15
16#include <linux/elf.h>
17#include <linux/kvm.h>
18
19#include "qemu-common.h"
20#include "cpu.h"
21#include "qemu/timer.h"
22#include "qemu/error-report.h"
23#include "qemu/host-utils.h"
24#include "exec/gdbstub.h"
25#include "sysemu/sysemu.h"
26#include "sysemu/kvm.h"
27#include "kvm_arm.h"
28#include "internals.h"
29#include "hw/arm/arm.h"
30
31static bool have_guest_debug;
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48typedef struct {
49 uint64_t bcr;
50 uint64_t bvr;
51} HWBreakpoint;
52
53
54
55
56
57
58typedef struct {
59 uint64_t wcr;
60 uint64_t wvr;
61 CPUWatchpoint details;
62} HWWatchpoint;
63
64
65int max_hw_bps, max_hw_wps;
66GArray *hw_breakpoints, *hw_watchpoints;
67
68#define cur_hw_wps (hw_watchpoints->len)
69#define cur_hw_bps (hw_breakpoints->len)
70#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
71#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
72
73
74
75
76
77
78
79
80
81static void kvm_arm_init_debug(CPUState *cs)
82{
83 have_guest_debug = kvm_check_extension(cs->kvm_state,
84 KVM_CAP_SET_GUEST_DEBUG);
85
86 max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS);
87 hw_watchpoints = g_array_sized_new(true, true,
88 sizeof(HWWatchpoint), max_hw_wps);
89
90 max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS);
91 hw_breakpoints = g_array_sized_new(true, true,
92 sizeof(HWBreakpoint), max_hw_bps);
93 return;
94}
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132static int insert_hw_breakpoint(target_ulong addr)
133{
134 HWBreakpoint brk = {
135 .bcr = 0x1,
136 .bvr = sextract64(addr, 0, 53)
137 };
138
139 if (cur_hw_bps >= max_hw_bps) {
140 return -ENOBUFS;
141 }
142
143 brk.bcr = deposit32(brk.bcr, 1, 2, 0x3);
144 brk.bcr = deposit32(brk.bcr, 5, 4, 0xf);
145
146 g_array_append_val(hw_breakpoints, brk);
147
148 return 0;
149}
150
151
152
153
154
155
156
157
158static int delete_hw_breakpoint(target_ulong pc)
159{
160 int i;
161 for (i = 0; i < hw_breakpoints->len; i++) {
162 HWBreakpoint *brk = get_hw_bp(i);
163 if (brk->bvr == pc) {
164 g_array_remove_index(hw_breakpoints, i);
165 return 0;
166 }
167 }
168 return -ENOENT;
169}
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203static int insert_hw_watchpoint(target_ulong addr,
204 target_ulong len, int type)
205{
206 HWWatchpoint wp = {
207 .wcr = 1,
208 .wvr = addr & (~0x7ULL),
209 .details = { .vaddr = addr, .len = len }
210 };
211
212 if (cur_hw_wps >= max_hw_wps) {
213 return -ENOBUFS;
214 }
215
216
217
218
219
220 wp.wcr = deposit32(wp.wcr, 1, 2, 3);
221
222 switch (type) {
223 case GDB_WATCHPOINT_READ:
224 wp.wcr = deposit32(wp.wcr, 3, 2, 1);
225 wp.details.flags = BP_MEM_READ;
226 break;
227 case GDB_WATCHPOINT_WRITE:
228 wp.wcr = deposit32(wp.wcr, 3, 2, 2);
229 wp.details.flags = BP_MEM_WRITE;
230 break;
231 case GDB_WATCHPOINT_ACCESS:
232 wp.wcr = deposit32(wp.wcr, 3, 2, 3);
233 wp.details.flags = BP_MEM_ACCESS;
234 break;
235 default:
236 g_assert_not_reached();
237 break;
238 }
239 if (len <= 8) {
240
241 int off = addr & 0x7;
242 int bas = (1 << len) - 1;
243
244 wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas);
245 } else {
246
247 if (is_power_of_2(len)) {
248 int bits = ctz64(len);
249
250 wp.wvr &= ~((1 << bits) - 1);
251 wp.wcr = deposit32(wp.wcr, 24, 4, bits);
252 wp.wcr = deposit32(wp.wcr, 5, 8, 0xff);
253 } else {
254 return -ENOBUFS;
255 }
256 }
257
258 g_array_append_val(hw_watchpoints, wp);
259 return 0;
260}
261
262
263static bool check_watchpoint_in_range(int i, target_ulong addr)
264{
265 HWWatchpoint *wp = get_hw_wp(i);
266 uint64_t addr_top, addr_bottom = wp->wvr;
267 int bas = extract32(wp->wcr, 5, 8);
268 int mask = extract32(wp->wcr, 24, 4);
269
270 if (mask) {
271 addr_top = addr_bottom + (1 << mask);
272 } else {
273
274
275 addr_bottom = addr_bottom + ctz32(bas);
276 addr_top = addr_bottom + clo32(bas);
277 }
278
279 if (addr >= addr_bottom && addr <= addr_top) {
280 return true;
281 }
282
283 return false;
284}
285
286
287
288
289
290
291
292
293static int delete_hw_watchpoint(target_ulong addr,
294 target_ulong len, int type)
295{
296 int i;
297 for (i = 0; i < cur_hw_wps; i++) {
298 if (check_watchpoint_in_range(i, addr)) {
299 g_array_remove_index(hw_watchpoints, i);
300 return 0;
301 }
302 }
303 return -ENOENT;
304}
305
306
307int kvm_arch_insert_hw_breakpoint(target_ulong addr,
308 target_ulong len, int type)
309{
310 switch (type) {
311 case GDB_BREAKPOINT_HW:
312 return insert_hw_breakpoint(addr);
313 break;
314 case GDB_WATCHPOINT_READ:
315 case GDB_WATCHPOINT_WRITE:
316 case GDB_WATCHPOINT_ACCESS:
317 return insert_hw_watchpoint(addr, len, type);
318 default:
319 return -ENOSYS;
320 }
321}
322
323int kvm_arch_remove_hw_breakpoint(target_ulong addr,
324 target_ulong len, int type)
325{
326 switch (type) {
327 case GDB_BREAKPOINT_HW:
328 return delete_hw_breakpoint(addr);
329 break;
330 case GDB_WATCHPOINT_READ:
331 case GDB_WATCHPOINT_WRITE:
332 case GDB_WATCHPOINT_ACCESS:
333 return delete_hw_watchpoint(addr, len, type);
334 default:
335 return -ENOSYS;
336 }
337}
338
339
340void kvm_arch_remove_all_hw_breakpoints(void)
341{
342 if (cur_hw_wps > 0) {
343 g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
344 }
345 if (cur_hw_bps > 0) {
346 g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
347 }
348}
349
350void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
351{
352 int i;
353 memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
354
355 for (i = 0; i < max_hw_wps; i++) {
356 HWWatchpoint *wp = get_hw_wp(i);
357 ptr->dbg_wcr[i] = wp->wcr;
358 ptr->dbg_wvr[i] = wp->wvr;
359 }
360 for (i = 0; i < max_hw_bps; i++) {
361 HWBreakpoint *bp = get_hw_bp(i);
362 ptr->dbg_bcr[i] = bp->bcr;
363 ptr->dbg_bvr[i] = bp->bvr;
364 }
365}
366
367bool kvm_arm_hw_debug_active(CPUState *cs)
368{
369 return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
370}
371
372static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
373{
374 int i;
375
376 for (i = 0; i < cur_hw_bps; i++) {
377 HWBreakpoint *bp = get_hw_bp(i);
378 if (bp->bvr == pc) {
379 return true;
380 }
381 }
382 return false;
383}
384
385static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
386{
387 int i;
388
389 for (i = 0; i < cur_hw_wps; i++) {
390 if (check_watchpoint_in_range(i, addr)) {
391 return &get_hw_wp(i)->details;
392 }
393 }
394 return NULL;
395}
396
397static bool kvm_arm_pmu_set_attr(CPUState *cs, struct kvm_device_attr *attr)
398{
399 int err;
400
401 err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
402 if (err != 0) {
403 error_report("PMU: KVM_HAS_DEVICE_ATTR: %s", strerror(-err));
404 return false;
405 }
406
407 err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
408 if (err != 0) {
409 error_report("PMU: KVM_SET_DEVICE_ATTR: %s", strerror(-err));
410 return false;
411 }
412
413 return true;
414}
415
416void kvm_arm_pmu_init(CPUState *cs)
417{
418 struct kvm_device_attr attr = {
419 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
420 .attr = KVM_ARM_VCPU_PMU_V3_INIT,
421 };
422
423 if (!ARM_CPU(cs)->has_pmu) {
424 return;
425 }
426 if (!kvm_arm_pmu_set_attr(cs, &attr)) {
427 error_report("failed to init PMU");
428 abort();
429 }
430}
431
432void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
433{
434 struct kvm_device_attr attr = {
435 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
436 .addr = (intptr_t)&irq,
437 .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
438 };
439
440 if (!ARM_CPU(cs)->has_pmu) {
441 return;
442 }
443 if (!kvm_arm_pmu_set_attr(cs, &attr)) {
444 error_report("failed to set irq for PMU");
445 abort();
446 }
447}
448
449static inline void set_feature(uint64_t *features, int feature)
450{
451 *features |= 1ULL << feature;
452}
453
454static inline void unset_feature(uint64_t *features, int feature)
455{
456 *features &= ~(1ULL << feature);
457}
458
459static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id)
460{
461 uint64_t ret;
462 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret };
463 int err;
464
465 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
466 err = ioctl(fd, KVM_GET_ONE_REG, &idreg);
467 if (err < 0) {
468 return -1;
469 }
470 *pret = ret;
471 return 0;
472}
473
474static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id)
475{
476 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret };
477
478 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
479 return ioctl(fd, KVM_GET_ONE_REG, &idreg);
480}
481
482bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
483{
484
485
486
487
488
489 int fdarray[3];
490 uint64_t features = 0;
491 int err;
492
493
494
495
496
497
498 static const uint32_t cpus_to_try[] = {
499 KVM_ARM_TARGET_AEM_V8,
500 KVM_ARM_TARGET_FOUNDATION_V8,
501 KVM_ARM_TARGET_CORTEX_A57,
502 QEMU_KVM_ARM_TARGET_NONE
503 };
504 struct kvm_vcpu_init init;
505
506 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
507 return false;
508 }
509
510 ahcf->target = init.target;
511 ahcf->dtb_compatible = "arm,arm-v8";
512
513 err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0,
514 ARM64_SYS_REG(3, 0, 0, 4, 0));
515 if (unlikely(err < 0)) {
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532 ahcf->isar.id_aa64pfr0 = 0x00000011;
533 err = 0;
534 } else {
535 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1,
536 ARM64_SYS_REG(3, 0, 0, 4, 1));
537 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0,
538 ARM64_SYS_REG(3, 0, 0, 6, 0));
539 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
540 ARM64_SYS_REG(3, 0, 0, 6, 1));
541 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
542 ARM64_SYS_REG(3, 0, 0, 7, 0));
543 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
544 ARM64_SYS_REG(3, 0, 0, 7, 1));
545
546
547
548
549
550
551
552
553 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
554 ARM64_SYS_REG(3, 0, 0, 2, 0));
555 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
556 ARM64_SYS_REG(3, 0, 0, 2, 1));
557 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
558 ARM64_SYS_REG(3, 0, 0, 2, 2));
559 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
560 ARM64_SYS_REG(3, 0, 0, 2, 3));
561 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
562 ARM64_SYS_REG(3, 0, 0, 2, 4));
563 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
564 ARM64_SYS_REG(3, 0, 0, 2, 5));
565 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
566 ARM64_SYS_REG(3, 0, 0, 2, 7));
567
568 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
569 ARM64_SYS_REG(3, 0, 0, 3, 0));
570 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
571 ARM64_SYS_REG(3, 0, 0, 3, 1));
572 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
573 ARM64_SYS_REG(3, 0, 0, 3, 2));
574 }
575
576 kvm_arm_destroy_scratch_host_vcpu(fdarray);
577
578 if (err < 0) {
579 return false;
580 }
581
582
583
584
585
586 set_feature(&features, ARM_FEATURE_V8);
587 set_feature(&features, ARM_FEATURE_VFP4);
588 set_feature(&features, ARM_FEATURE_NEON);
589 set_feature(&features, ARM_FEATURE_AARCH64);
590 set_feature(&features, ARM_FEATURE_PMU);
591
592 ahcf->features = features;
593
594 return true;
595}
596
597#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
598
599int kvm_arch_init_vcpu(CPUState *cs)
600{
601 int ret;
602 uint64_t mpidr;
603 ARMCPU *cpu = ARM_CPU(cs);
604 CPUARMState *env = &cpu->env;
605
606 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
607 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
608 fprintf(stderr, "KVM is not supported for this guest CPU type\n");
609 return -EINVAL;
610 }
611
612
613 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
614 if (cpu->start_powered_off) {
615 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
616 }
617 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
618 cpu->psci_version = 2;
619 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
620 }
621 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
622 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
623 }
624 if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
625 cpu->has_pmu = false;
626 }
627 if (cpu->has_pmu) {
628 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
629 } else {
630 unset_feature(&env->features, ARM_FEATURE_PMU);
631 }
632
633
634 ret = kvm_arm_vcpu_init(cs);
635 if (ret) {
636 return ret;
637 }
638
639
640
641
642
643
644 ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
645 if (ret) {
646 return ret;
647 }
648 cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
649
650 kvm_arm_init_debug(cs);
651
652
653 kvm_arm_init_serror_injection(cs);
654
655 return kvm_arm_init_cpreg_list(cpu);
656}
657
658bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
659{
660
661
662
663
664 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
665 case KVM_REG_ARM_CORE:
666 return false;
667 default:
668 return true;
669 }
670}
671
672typedef struct CPRegStateLevel {
673 uint64_t regidx;
674 int level;
675} CPRegStateLevel;
676
677
678
679
680
681
682static const CPRegStateLevel non_runtime_cpregs[] = {
683 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
684};
685
686int kvm_arm_cpreg_level(uint64_t regidx)
687{
688 int i;
689
690 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
691 const CPRegStateLevel *l = &non_runtime_cpregs[i];
692 if (l->regidx == regidx) {
693 return l->level;
694 }
695 }
696
697 return KVM_PUT_RUNTIME_STATE;
698}
699
700#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
701 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
702
703#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
704 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
705
706#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
707 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
708
709int kvm_arch_put_registers(CPUState *cs, int level)
710{
711 struct kvm_one_reg reg;
712 uint32_t fpr;
713 uint64_t val;
714 int i;
715 int ret;
716 unsigned int el;
717
718 ARMCPU *cpu = ARM_CPU(cs);
719 CPUARMState *env = &cpu->env;
720
721
722
723
724 if (!is_a64(env)) {
725 aarch64_sync_32_to_64(env);
726 }
727
728 for (i = 0; i < 31; i++) {
729 reg.id = AARCH64_CORE_REG(regs.regs[i]);
730 reg.addr = (uintptr_t) &env->xregs[i];
731 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
732 if (ret) {
733 return ret;
734 }
735 }
736
737
738
739
740 aarch64_save_sp(env, 1);
741
742 reg.id = AARCH64_CORE_REG(regs.sp);
743 reg.addr = (uintptr_t) &env->sp_el[0];
744 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
745 if (ret) {
746 return ret;
747 }
748
749 reg.id = AARCH64_CORE_REG(sp_el1);
750 reg.addr = (uintptr_t) &env->sp_el[1];
751 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
752 if (ret) {
753 return ret;
754 }
755
756
757 if (is_a64(env)) {
758 val = pstate_read(env);
759 } else {
760 val = cpsr_read(env);
761 }
762 reg.id = AARCH64_CORE_REG(regs.pstate);
763 reg.addr = (uintptr_t) &val;
764 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
765 if (ret) {
766 return ret;
767 }
768
769 reg.id = AARCH64_CORE_REG(regs.pc);
770 reg.addr = (uintptr_t) &env->pc;
771 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
772 if (ret) {
773 return ret;
774 }
775
776 reg.id = AARCH64_CORE_REG(elr_el1);
777 reg.addr = (uintptr_t) &env->elr_el[1];
778 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
779 if (ret) {
780 return ret;
781 }
782
783
784
785
786
787
788
789 el = arm_current_el(env);
790 if (el > 0 && !is_a64(env)) {
791 i = bank_number(env->uncached_cpsr & CPSR_M);
792 env->banked_spsr[i] = env->spsr;
793 }
794
795
796 for (i = 0; i < KVM_NR_SPSR; i++) {
797 reg.id = AARCH64_CORE_REG(spsr[i]);
798 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
799 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
800 if (ret) {
801 return ret;
802 }
803 }
804
805
806 for (i = 0; i < 32; i++) {
807 uint64_t *q = aa64_vfp_qreg(env, i);
808#ifdef HOST_WORDS_BIGENDIAN
809 uint64_t fp_val[2] = { q[1], q[0] };
810 reg.addr = (uintptr_t)fp_val;
811#else
812 reg.addr = (uintptr_t)q;
813#endif
814 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
815 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
816 if (ret) {
817 return ret;
818 }
819 }
820
821 reg.addr = (uintptr_t)(&fpr);
822 fpr = vfp_get_fpsr(env);
823 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
824 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
825 if (ret) {
826 return ret;
827 }
828
829 fpr = vfp_get_fpcr(env);
830 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
831 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
832 if (ret) {
833 return ret;
834 }
835
836 ret = kvm_put_vcpu_events(cpu);
837 if (ret) {
838 return ret;
839 }
840
841 if (!write_list_to_kvmstate(cpu, level)) {
842 return EINVAL;
843 }
844
845 kvm_arm_sync_mpstate_to_kvm(cpu);
846
847 return ret;
848}
849
850int kvm_arch_get_registers(CPUState *cs)
851{
852 struct kvm_one_reg reg;
853 uint64_t val;
854 uint32_t fpr;
855 unsigned int el;
856 int i;
857 int ret;
858
859 ARMCPU *cpu = ARM_CPU(cs);
860 CPUARMState *env = &cpu->env;
861
862 for (i = 0; i < 31; i++) {
863 reg.id = AARCH64_CORE_REG(regs.regs[i]);
864 reg.addr = (uintptr_t) &env->xregs[i];
865 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
866 if (ret) {
867 return ret;
868 }
869 }
870
871 reg.id = AARCH64_CORE_REG(regs.sp);
872 reg.addr = (uintptr_t) &env->sp_el[0];
873 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
874 if (ret) {
875 return ret;
876 }
877
878 reg.id = AARCH64_CORE_REG(sp_el1);
879 reg.addr = (uintptr_t) &env->sp_el[1];
880 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
881 if (ret) {
882 return ret;
883 }
884
885 reg.id = AARCH64_CORE_REG(regs.pstate);
886 reg.addr = (uintptr_t) &val;
887 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
888 if (ret) {
889 return ret;
890 }
891
892 env->aarch64 = ((val & PSTATE_nRW) == 0);
893 if (is_a64(env)) {
894 pstate_write(env, val);
895 } else {
896 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
897 }
898
899
900
901
902 aarch64_restore_sp(env, 1);
903
904 reg.id = AARCH64_CORE_REG(regs.pc);
905 reg.addr = (uintptr_t) &env->pc;
906 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
907 if (ret) {
908 return ret;
909 }
910
911
912
913
914
915
916 if (!is_a64(env)) {
917 aarch64_sync_64_to_32(env);
918 }
919
920 reg.id = AARCH64_CORE_REG(elr_el1);
921 reg.addr = (uintptr_t) &env->elr_el[1];
922 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
923 if (ret) {
924 return ret;
925 }
926
927
928
929
930
931 for (i = 0; i < KVM_NR_SPSR; i++) {
932 reg.id = AARCH64_CORE_REG(spsr[i]);
933 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
934 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
935 if (ret) {
936 return ret;
937 }
938 }
939
940 el = arm_current_el(env);
941 if (el > 0 && !is_a64(env)) {
942 i = bank_number(env->uncached_cpsr & CPSR_M);
943 env->spsr = env->banked_spsr[i];
944 }
945
946
947 for (i = 0; i < 32; i++) {
948 uint64_t *q = aa64_vfp_qreg(env, i);
949 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
950 reg.addr = (uintptr_t)q;
951 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
952 if (ret) {
953 return ret;
954 } else {
955#ifdef HOST_WORDS_BIGENDIAN
956 uint64_t t;
957 t = q[0], q[0] = q[1], q[1] = t;
958#endif
959 }
960 }
961
962 reg.addr = (uintptr_t)(&fpr);
963 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
964 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
965 if (ret) {
966 return ret;
967 }
968 vfp_set_fpsr(env, fpr);
969
970 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
971 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
972 if (ret) {
973 return ret;
974 }
975 vfp_set_fpcr(env, fpr);
976
977 ret = kvm_get_vcpu_events(cpu);
978 if (ret) {
979 return ret;
980 }
981
982 if (!write_kvmstate_to_list(cpu)) {
983 return EINVAL;
984 }
985
986
987
988 write_list_to_cpustate(cpu);
989
990 kvm_arm_sync_mpstate_to_qemu(cpu);
991
992
993 return ret;
994}
995
996
997static const uint32_t brk_insn = 0xd4200000;
998
999int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1000{
1001 if (have_guest_debug) {
1002 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
1003 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
1004 return -EINVAL;
1005 }
1006 return 0;
1007 } else {
1008 error_report("guest debug not supported on this kernel");
1009 return -EINVAL;
1010 }
1011}
1012
1013int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1014{
1015 static uint32_t brk;
1016
1017 if (have_guest_debug) {
1018 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
1019 brk != brk_insn ||
1020 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
1021 return -EINVAL;
1022 }
1023 return 0;
1024 } else {
1025 error_report("guest debug not supported on this kernel");
1026 return -EINVAL;
1027 }
1028}
1029
1030
1031
1032
1033
1034
1035
1036
1037bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
1038{
1039 int hsr_ec = syn_get_ec(debug_exit->hsr);
1040 ARMCPU *cpu = ARM_CPU(cs);
1041 CPUClass *cc = CPU_GET_CLASS(cs);
1042 CPUARMState *env = &cpu->env;
1043
1044
1045 kvm_cpu_synchronize_state(cs);
1046
1047 switch (hsr_ec) {
1048 case EC_SOFTWARESTEP:
1049 if (cs->singlestep_enabled) {
1050 return true;
1051 } else {
1052
1053
1054
1055
1056 error_report("%s: guest single-step while debugging unsupported"
1057 " (%"PRIx64", %"PRIx32")",
1058 __func__, env->pc, debug_exit->hsr);
1059 return false;
1060 }
1061 break;
1062 case EC_AA64_BKPT:
1063 if (kvm_find_sw_breakpoint(cs, env->pc)) {
1064 return true;
1065 }
1066 break;
1067 case EC_BREAKPOINT:
1068 if (find_hw_breakpoint(cs, env->pc)) {
1069 return true;
1070 }
1071 break;
1072 case EC_WATCHPOINT:
1073 {
1074 CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
1075 if (wp) {
1076 cs->watchpoint_hit = wp;
1077 return true;
1078 }
1079 break;
1080 }
1081 default:
1082 error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")",
1083 __func__, debug_exit->hsr, env->pc);
1084 }
1085
1086
1087
1088
1089
1090 cs->exception_index = EXCP_BKPT;
1091 env->exception.syndrome = debug_exit->hsr;
1092 env->exception.vaddress = debug_exit->far;
1093 env->exception.target_el = 1;
1094 qemu_mutex_lock_iothread();
1095 cc->do_interrupt(cs);
1096 qemu_mutex_unlock_iothread();
1097
1098 return false;
1099}
1100