1
2
3
4
5
6
7
8
9
10
11
12#include "qemu/osdep.h"
13#include <sys/ioctl.h>
14#include <sys/ptrace.h>
15
16#include <linux/elf.h>
17#include <linux/kvm.h>
18
19#include "qemu-common.h"
20#include "cpu.h"
21#include "qemu/timer.h"
22#include "qemu/error-report.h"
23#include "qemu/host-utils.h"
24#include "qemu/main-loop.h"
25#include "exec/gdbstub.h"
26#include "sysemu/kvm.h"
27#include "sysemu/kvm_int.h"
28#include "kvm_arm.h"
29#include "hw/boards.h"
30#include "internals.h"
31
32static bool have_guest_debug;
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49typedef struct {
50 uint64_t bcr;
51 uint64_t bvr;
52} HWBreakpoint;
53
54
55
56
57
58
59typedef struct {
60 uint64_t wcr;
61 uint64_t wvr;
62 CPUWatchpoint details;
63} HWWatchpoint;
64
65
66int max_hw_bps, max_hw_wps;
67GArray *hw_breakpoints, *hw_watchpoints;
68
69#define cur_hw_wps (hw_watchpoints->len)
70#define cur_hw_bps (hw_breakpoints->len)
71#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
72#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
73
74
75
76
77
78
79
80
81
82static void kvm_arm_init_debug(CPUState *cs)
83{
84 have_guest_debug = kvm_check_extension(cs->kvm_state,
85 KVM_CAP_SET_GUEST_DEBUG);
86
87 max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS);
88 hw_watchpoints = g_array_sized_new(true, true,
89 sizeof(HWWatchpoint), max_hw_wps);
90
91 max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS);
92 hw_breakpoints = g_array_sized_new(true, true,
93 sizeof(HWBreakpoint), max_hw_bps);
94 return;
95}
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133static int insert_hw_breakpoint(target_ulong addr)
134{
135 HWBreakpoint brk = {
136 .bcr = 0x1,
137 .bvr = sextract64(addr, 0, 53)
138 };
139
140 if (cur_hw_bps >= max_hw_bps) {
141 return -ENOBUFS;
142 }
143
144 brk.bcr = deposit32(brk.bcr, 1, 2, 0x3);
145 brk.bcr = deposit32(brk.bcr, 5, 4, 0xf);
146
147 g_array_append_val(hw_breakpoints, brk);
148
149 return 0;
150}
151
152
153
154
155
156
157
158
159static int delete_hw_breakpoint(target_ulong pc)
160{
161 int i;
162 for (i = 0; i < hw_breakpoints->len; i++) {
163 HWBreakpoint *brk = get_hw_bp(i);
164 if (brk->bvr == pc) {
165 g_array_remove_index(hw_breakpoints, i);
166 return 0;
167 }
168 }
169 return -ENOENT;
170}
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204static int insert_hw_watchpoint(target_ulong addr,
205 target_ulong len, int type)
206{
207 HWWatchpoint wp = {
208 .wcr = 1,
209 .wvr = addr & (~0x7ULL),
210 .details = { .vaddr = addr, .len = len }
211 };
212
213 if (cur_hw_wps >= max_hw_wps) {
214 return -ENOBUFS;
215 }
216
217
218
219
220
221 wp.wcr = deposit32(wp.wcr, 1, 2, 3);
222
223 switch (type) {
224 case GDB_WATCHPOINT_READ:
225 wp.wcr = deposit32(wp.wcr, 3, 2, 1);
226 wp.details.flags = BP_MEM_READ;
227 break;
228 case GDB_WATCHPOINT_WRITE:
229 wp.wcr = deposit32(wp.wcr, 3, 2, 2);
230 wp.details.flags = BP_MEM_WRITE;
231 break;
232 case GDB_WATCHPOINT_ACCESS:
233 wp.wcr = deposit32(wp.wcr, 3, 2, 3);
234 wp.details.flags = BP_MEM_ACCESS;
235 break;
236 default:
237 g_assert_not_reached();
238 break;
239 }
240 if (len <= 8) {
241
242 int off = addr & 0x7;
243 int bas = (1 << len) - 1;
244
245 wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas);
246 } else {
247
248 if (is_power_of_2(len)) {
249 int bits = ctz64(len);
250
251 wp.wvr &= ~((1 << bits) - 1);
252 wp.wcr = deposit32(wp.wcr, 24, 4, bits);
253 wp.wcr = deposit32(wp.wcr, 5, 8, 0xff);
254 } else {
255 return -ENOBUFS;
256 }
257 }
258
259 g_array_append_val(hw_watchpoints, wp);
260 return 0;
261}
262
263
264static bool check_watchpoint_in_range(int i, target_ulong addr)
265{
266 HWWatchpoint *wp = get_hw_wp(i);
267 uint64_t addr_top, addr_bottom = wp->wvr;
268 int bas = extract32(wp->wcr, 5, 8);
269 int mask = extract32(wp->wcr, 24, 4);
270
271 if (mask) {
272 addr_top = addr_bottom + (1 << mask);
273 } else {
274
275
276 addr_bottom = addr_bottom + ctz32(bas);
277 addr_top = addr_bottom + clo32(bas);
278 }
279
280 if (addr >= addr_bottom && addr <= addr_top) {
281 return true;
282 }
283
284 return false;
285}
286
287
288
289
290
291
292
293
294static int delete_hw_watchpoint(target_ulong addr,
295 target_ulong len, int type)
296{
297 int i;
298 for (i = 0; i < cur_hw_wps; i++) {
299 if (check_watchpoint_in_range(i, addr)) {
300 g_array_remove_index(hw_watchpoints, i);
301 return 0;
302 }
303 }
304 return -ENOENT;
305}
306
307
308int kvm_arch_insert_hw_breakpoint(target_ulong addr,
309 target_ulong len, int type)
310{
311 switch (type) {
312 case GDB_BREAKPOINT_HW:
313 return insert_hw_breakpoint(addr);
314 break;
315 case GDB_WATCHPOINT_READ:
316 case GDB_WATCHPOINT_WRITE:
317 case GDB_WATCHPOINT_ACCESS:
318 return insert_hw_watchpoint(addr, len, type);
319 default:
320 return -ENOSYS;
321 }
322}
323
324int kvm_arch_remove_hw_breakpoint(target_ulong addr,
325 target_ulong len, int type)
326{
327 switch (type) {
328 case GDB_BREAKPOINT_HW:
329 return delete_hw_breakpoint(addr);
330 break;
331 case GDB_WATCHPOINT_READ:
332 case GDB_WATCHPOINT_WRITE:
333 case GDB_WATCHPOINT_ACCESS:
334 return delete_hw_watchpoint(addr, len, type);
335 default:
336 return -ENOSYS;
337 }
338}
339
340
341void kvm_arch_remove_all_hw_breakpoints(void)
342{
343 if (cur_hw_wps > 0) {
344 g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
345 }
346 if (cur_hw_bps > 0) {
347 g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
348 }
349}
350
351void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
352{
353 int i;
354 memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
355
356 for (i = 0; i < max_hw_wps; i++) {
357 HWWatchpoint *wp = get_hw_wp(i);
358 ptr->dbg_wcr[i] = wp->wcr;
359 ptr->dbg_wvr[i] = wp->wvr;
360 }
361 for (i = 0; i < max_hw_bps; i++) {
362 HWBreakpoint *bp = get_hw_bp(i);
363 ptr->dbg_bcr[i] = bp->bcr;
364 ptr->dbg_bvr[i] = bp->bvr;
365 }
366}
367
368bool kvm_arm_hw_debug_active(CPUState *cs)
369{
370 return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
371}
372
373static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
374{
375 int i;
376
377 for (i = 0; i < cur_hw_bps; i++) {
378 HWBreakpoint *bp = get_hw_bp(i);
379 if (bp->bvr == pc) {
380 return true;
381 }
382 }
383 return false;
384}
385
386static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
387{
388 int i;
389
390 for (i = 0; i < cur_hw_wps; i++) {
391 if (check_watchpoint_in_range(i, addr)) {
392 return &get_hw_wp(i)->details;
393 }
394 }
395 return NULL;
396}
397
398static bool kvm_arm_pmu_set_attr(CPUState *cs, struct kvm_device_attr *attr)
399{
400 int err;
401
402 err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
403 if (err != 0) {
404 error_report("PMU: KVM_HAS_DEVICE_ATTR: %s", strerror(-err));
405 return false;
406 }
407
408 err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
409 if (err != 0) {
410 error_report("PMU: KVM_SET_DEVICE_ATTR: %s", strerror(-err));
411 return false;
412 }
413
414 return true;
415}
416
417void kvm_arm_pmu_init(CPUState *cs)
418{
419 struct kvm_device_attr attr = {
420 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
421 .attr = KVM_ARM_VCPU_PMU_V3_INIT,
422 };
423
424 if (!ARM_CPU(cs)->has_pmu) {
425 return;
426 }
427 if (!kvm_arm_pmu_set_attr(cs, &attr)) {
428 error_report("failed to init PMU");
429 abort();
430 }
431}
432
433void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
434{
435 struct kvm_device_attr attr = {
436 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
437 .addr = (intptr_t)&irq,
438 .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
439 };
440
441 if (!ARM_CPU(cs)->has_pmu) {
442 return;
443 }
444 if (!kvm_arm_pmu_set_attr(cs, &attr)) {
445 error_report("failed to set irq for PMU");
446 abort();
447 }
448}
449
450static inline void set_feature(uint64_t *features, int feature)
451{
452 *features |= 1ULL << feature;
453}
454
455static inline void unset_feature(uint64_t *features, int feature)
456{
457 *features &= ~(1ULL << feature);
458}
459
460static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id)
461{
462 uint64_t ret;
463 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret };
464 int err;
465
466 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
467 err = ioctl(fd, KVM_GET_ONE_REG, &idreg);
468 if (err < 0) {
469 return -1;
470 }
471 *pret = ret;
472 return 0;
473}
474
475static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id)
476{
477 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret };
478
479 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
480 return ioctl(fd, KVM_GET_ONE_REG, &idreg);
481}
482
483bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
484{
485
486
487
488
489
490 int fdarray[3];
491 bool sve_supported;
492 uint64_t features = 0;
493 uint64_t t;
494 int err;
495
496
497
498
499
500
501 static const uint32_t cpus_to_try[] = {
502 KVM_ARM_TARGET_AEM_V8,
503 KVM_ARM_TARGET_FOUNDATION_V8,
504 KVM_ARM_TARGET_CORTEX_A57,
505 QEMU_KVM_ARM_TARGET_NONE
506 };
507
508
509
510
511 struct kvm_vcpu_init init = { .target = -1, };
512
513 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
514 return false;
515 }
516
517 ahcf->target = init.target;
518 ahcf->dtb_compatible = "arm,arm-v8";
519
520 err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0,
521 ARM64_SYS_REG(3, 0, 0, 4, 0));
522 if (unlikely(err < 0)) {
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539 ahcf->isar.id_aa64pfr0 = 0x00000011;
540 err = 0;
541 } else {
542 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1,
543 ARM64_SYS_REG(3, 0, 0, 4, 1));
544 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0,
545 ARM64_SYS_REG(3, 0, 0, 6, 0));
546 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
547 ARM64_SYS_REG(3, 0, 0, 6, 1));
548 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
549 ARM64_SYS_REG(3, 0, 0, 7, 0));
550 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
551 ARM64_SYS_REG(3, 0, 0, 7, 1));
552
553
554
555
556
557
558
559
560 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
561 ARM64_SYS_REG(3, 0, 0, 2, 0));
562 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
563 ARM64_SYS_REG(3, 0, 0, 2, 1));
564 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
565 ARM64_SYS_REG(3, 0, 0, 2, 2));
566 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
567 ARM64_SYS_REG(3, 0, 0, 2, 3));
568 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
569 ARM64_SYS_REG(3, 0, 0, 2, 4));
570 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
571 ARM64_SYS_REG(3, 0, 0, 2, 5));
572 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
573 ARM64_SYS_REG(3, 0, 0, 2, 7));
574
575 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
576 ARM64_SYS_REG(3, 0, 0, 3, 0));
577 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
578 ARM64_SYS_REG(3, 0, 0, 3, 1));
579 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
580 ARM64_SYS_REG(3, 0, 0, 3, 2));
581 }
582
583 sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0;
584
585 kvm_arm_destroy_scratch_host_vcpu(fdarray);
586
587 if (err < 0) {
588 return false;
589 }
590
591
592 if (sve_supported) {
593 t = ahcf->isar.id_aa64pfr0;
594 t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
595 ahcf->isar.id_aa64pfr0 = t;
596 }
597
598
599
600
601
602
603 set_feature(&features, ARM_FEATURE_V8);
604 set_feature(&features, ARM_FEATURE_VFP4);
605 set_feature(&features, ARM_FEATURE_NEON);
606 set_feature(&features, ARM_FEATURE_AARCH64);
607 set_feature(&features, ARM_FEATURE_PMU);
608
609 ahcf->features = features;
610
611 return true;
612}
613
614bool kvm_arm_aarch32_supported(CPUState *cpu)
615{
616 KVMState *s = KVM_STATE(current_machine->accelerator);
617
618 return kvm_check_extension(s, KVM_CAP_ARM_EL1_32BIT);
619}
620
621bool kvm_arm_sve_supported(CPUState *cpu)
622{
623 KVMState *s = KVM_STATE(current_machine->accelerator);
624
625 return kvm_check_extension(s, KVM_CAP_ARM_SVE);
626}
627
628QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
629
630void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map)
631{
632
633 static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS];
634 static bool probed;
635 uint32_t vq = 0;
636 int i, j;
637
638 bitmap_clear(map, 0, ARM_MAX_VQ);
639
640
641
642
643
644
645 if (!probed) {
646 struct kvm_vcpu_init init = {
647 .target = -1,
648 .features[0] = (1 << KVM_ARM_VCPU_SVE),
649 };
650 struct kvm_one_reg reg = {
651 .id = KVM_REG_ARM64_SVE_VLS,
652 .addr = (uint64_t)&vls[0],
653 };
654 int fdarray[3], ret;
655
656 probed = true;
657
658 if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) {
659 error_report("failed to create scratch VCPU with SVE enabled");
660 abort();
661 }
662 ret = ioctl(fdarray[2], KVM_GET_ONE_REG, ®);
663 kvm_arm_destroy_scratch_host_vcpu(fdarray);
664 if (ret) {
665 error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
666 strerror(errno));
667 abort();
668 }
669
670 for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) {
671 if (vls[i]) {
672 vq = 64 - clz64(vls[i]) + i * 64;
673 break;
674 }
675 }
676 if (vq > ARM_MAX_VQ) {
677 warn_report("KVM supports vector lengths larger than "
678 "QEMU can enable");
679 }
680 }
681
682 for (i = 0; i < KVM_ARM64_SVE_VLS_WORDS; ++i) {
683 if (!vls[i]) {
684 continue;
685 }
686 for (j = 1; j <= 64; ++j) {
687 vq = j + i * 64;
688 if (vq > ARM_MAX_VQ) {
689 return;
690 }
691 if (vls[i] & (1UL << (j - 1))) {
692 set_bit(vq - 1, map);
693 }
694 }
695 }
696}
697
698static int kvm_arm_sve_set_vls(CPUState *cs)
699{
700 uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = {0};
701 struct kvm_one_reg reg = {
702 .id = KVM_REG_ARM64_SVE_VLS,
703 .addr = (uint64_t)&vls[0],
704 };
705 ARMCPU *cpu = ARM_CPU(cs);
706 uint32_t vq;
707 int i, j;
708
709 assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX);
710
711 for (vq = 1; vq <= cpu->sve_max_vq; ++vq) {
712 if (test_bit(vq - 1, cpu->sve_vq_map)) {
713 i = (vq - 1) / 64;
714 j = (vq - 1) % 64;
715 vls[i] |= 1UL << j;
716 }
717 }
718
719 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
720}
721
722#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
723
724int kvm_arch_init_vcpu(CPUState *cs)
725{
726 int ret;
727 uint64_t mpidr;
728 ARMCPU *cpu = ARM_CPU(cs);
729 CPUARMState *env = &cpu->env;
730
731 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
732 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
733 error_report("KVM is not supported for this guest CPU type");
734 return -EINVAL;
735 }
736
737
738 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
739 if (cpu->start_powered_off) {
740 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
741 }
742 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
743 cpu->psci_version = 2;
744 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
745 }
746 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
747 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
748 }
749 if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
750 cpu->has_pmu = false;
751 }
752 if (cpu->has_pmu) {
753 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
754 } else {
755 unset_feature(&env->features, ARM_FEATURE_PMU);
756 }
757 if (cpu_isar_feature(aa64_sve, cpu)) {
758 assert(kvm_arm_sve_supported(cs));
759 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE;
760 }
761
762
763 ret = kvm_arm_vcpu_init(cs);
764 if (ret) {
765 return ret;
766 }
767
768 if (cpu_isar_feature(aa64_sve, cpu)) {
769 ret = kvm_arm_sve_set_vls(cs);
770 if (ret) {
771 return ret;
772 }
773 ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE);
774 if (ret) {
775 return ret;
776 }
777 }
778
779
780
781
782
783
784 ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
785 if (ret) {
786 return ret;
787 }
788 cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
789
790 kvm_arm_init_debug(cs);
791
792
793 kvm_arm_init_serror_injection(cs);
794
795 return kvm_arm_init_cpreg_list(cpu);
796}
797
798int kvm_arch_destroy_vcpu(CPUState *cs)
799{
800 return 0;
801}
802
803bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
804{
805
806
807
808
809 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
810 case KVM_REG_ARM_CORE:
811 case KVM_REG_ARM64_SVE:
812 return false;
813 default:
814 return true;
815 }
816}
817
818typedef struct CPRegStateLevel {
819 uint64_t regidx;
820 int level;
821} CPRegStateLevel;
822
823
824
825
826
827
828static const CPRegStateLevel non_runtime_cpregs[] = {
829 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
830};
831
832int kvm_arm_cpreg_level(uint64_t regidx)
833{
834 int i;
835
836 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
837 const CPRegStateLevel *l = &non_runtime_cpregs[i];
838 if (l->regidx == regidx) {
839 return l->level;
840 }
841 }
842
843 return KVM_PUT_RUNTIME_STATE;
844}
845
846#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
847 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
848
849#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
850 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
851
852#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
853 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
854
855static int kvm_arch_put_fpsimd(CPUState *cs)
856{
857 CPUARMState *env = &ARM_CPU(cs)->env;
858 struct kvm_one_reg reg;
859 int i, ret;
860
861 for (i = 0; i < 32; i++) {
862 uint64_t *q = aa64_vfp_qreg(env, i);
863#ifdef HOST_WORDS_BIGENDIAN
864 uint64_t fp_val[2] = { q[1], q[0] };
865 reg.addr = (uintptr_t)fp_val;
866#else
867 reg.addr = (uintptr_t)q;
868#endif
869 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
870 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
871 if (ret) {
872 return ret;
873 }
874 }
875
876 return 0;
877}
878
879
880
881
882
883
884
885
886
887
888static uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr)
889{
890#ifdef HOST_WORDS_BIGENDIAN
891 int i;
892
893 for (i = 0; i < nr; ++i) {
894 dst[i] = bswap64(src[i]);
895 }
896
897 return dst;
898#else
899 return src;
900#endif
901}
902
903
904
905
906
907
908
909static int kvm_arch_put_sve(CPUState *cs)
910{
911 ARMCPU *cpu = ARM_CPU(cs);
912 CPUARMState *env = &cpu->env;
913 uint64_t tmp[ARM_MAX_VQ * 2];
914 uint64_t *r;
915 struct kvm_one_reg reg;
916 int n, ret;
917
918 for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
919 r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2);
920 reg.addr = (uintptr_t)r;
921 reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
922 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
923 if (ret) {
924 return ret;
925 }
926 }
927
928 for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
929 r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0],
930 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
931 reg.addr = (uintptr_t)r;
932 reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
933 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
934 if (ret) {
935 return ret;
936 }
937 }
938
939 r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
940 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
941 reg.addr = (uintptr_t)r;
942 reg.id = KVM_REG_ARM64_SVE_FFR(0);
943 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
944 if (ret) {
945 return ret;
946 }
947
948 return 0;
949}
950
951int kvm_arch_put_registers(CPUState *cs, int level)
952{
953 struct kvm_one_reg reg;
954 uint64_t val;
955 uint32_t fpr;
956 int i, ret;
957 unsigned int el;
958
959 ARMCPU *cpu = ARM_CPU(cs);
960 CPUARMState *env = &cpu->env;
961
962
963
964
965 if (!is_a64(env)) {
966 aarch64_sync_32_to_64(env);
967 }
968
969 for (i = 0; i < 31; i++) {
970 reg.id = AARCH64_CORE_REG(regs.regs[i]);
971 reg.addr = (uintptr_t) &env->xregs[i];
972 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
973 if (ret) {
974 return ret;
975 }
976 }
977
978
979
980
981 aarch64_save_sp(env, 1);
982
983 reg.id = AARCH64_CORE_REG(regs.sp);
984 reg.addr = (uintptr_t) &env->sp_el[0];
985 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
986 if (ret) {
987 return ret;
988 }
989
990 reg.id = AARCH64_CORE_REG(sp_el1);
991 reg.addr = (uintptr_t) &env->sp_el[1];
992 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
993 if (ret) {
994 return ret;
995 }
996
997
998 if (is_a64(env)) {
999 val = pstate_read(env);
1000 } else {
1001 val = cpsr_read(env);
1002 }
1003 reg.id = AARCH64_CORE_REG(regs.pstate);
1004 reg.addr = (uintptr_t) &val;
1005 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1006 if (ret) {
1007 return ret;
1008 }
1009
1010 reg.id = AARCH64_CORE_REG(regs.pc);
1011 reg.addr = (uintptr_t) &env->pc;
1012 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1013 if (ret) {
1014 return ret;
1015 }
1016
1017 reg.id = AARCH64_CORE_REG(elr_el1);
1018 reg.addr = (uintptr_t) &env->elr_el[1];
1019 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1020 if (ret) {
1021 return ret;
1022 }
1023
1024
1025
1026
1027
1028
1029
1030 el = arm_current_el(env);
1031 if (el > 0 && !is_a64(env)) {
1032 i = bank_number(env->uncached_cpsr & CPSR_M);
1033 env->banked_spsr[i] = env->spsr;
1034 }
1035
1036
1037 for (i = 0; i < KVM_NR_SPSR; i++) {
1038 reg.id = AARCH64_CORE_REG(spsr[i]);
1039 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
1040 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1041 if (ret) {
1042 return ret;
1043 }
1044 }
1045
1046 if (cpu_isar_feature(aa64_sve, cpu)) {
1047 ret = kvm_arch_put_sve(cs);
1048 } else {
1049 ret = kvm_arch_put_fpsimd(cs);
1050 }
1051 if (ret) {
1052 return ret;
1053 }
1054
1055 reg.addr = (uintptr_t)(&fpr);
1056 fpr = vfp_get_fpsr(env);
1057 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
1058 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1059 if (ret) {
1060 return ret;
1061 }
1062
1063 reg.addr = (uintptr_t)(&fpr);
1064 fpr = vfp_get_fpcr(env);
1065 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
1066 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1067 if (ret) {
1068 return ret;
1069 }
1070
1071 ret = kvm_put_vcpu_events(cpu);
1072 if (ret) {
1073 return ret;
1074 }
1075
1076 write_cpustate_to_list(cpu, true);
1077
1078 if (!write_list_to_kvmstate(cpu, level)) {
1079 return -EINVAL;
1080 }
1081
1082 kvm_arm_sync_mpstate_to_kvm(cpu);
1083
1084 return ret;
1085}
1086
1087static int kvm_arch_get_fpsimd(CPUState *cs)
1088{
1089 CPUARMState *env = &ARM_CPU(cs)->env;
1090 struct kvm_one_reg reg;
1091 int i, ret;
1092
1093 for (i = 0; i < 32; i++) {
1094 uint64_t *q = aa64_vfp_qreg(env, i);
1095 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
1096 reg.addr = (uintptr_t)q;
1097 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1098 if (ret) {
1099 return ret;
1100 } else {
1101#ifdef HOST_WORDS_BIGENDIAN
1102 uint64_t t;
1103 t = q[0], q[0] = q[1], q[1] = t;
1104#endif
1105 }
1106 }
1107
1108 return 0;
1109}
1110
1111
1112
1113
1114
1115
1116
1117static int kvm_arch_get_sve(CPUState *cs)
1118{
1119 ARMCPU *cpu = ARM_CPU(cs);
1120 CPUARMState *env = &cpu->env;
1121 struct kvm_one_reg reg;
1122 uint64_t *r;
1123 int n, ret;
1124
1125 for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
1126 r = &env->vfp.zregs[n].d[0];
1127 reg.addr = (uintptr_t)r;
1128 reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
1129 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1130 if (ret) {
1131 return ret;
1132 }
1133 sve_bswap64(r, r, cpu->sve_max_vq * 2);
1134 }
1135
1136 for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
1137 r = &env->vfp.pregs[n].p[0];
1138 reg.addr = (uintptr_t)r;
1139 reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
1140 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1141 if (ret) {
1142 return ret;
1143 }
1144 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1145 }
1146
1147 r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
1148 reg.addr = (uintptr_t)r;
1149 reg.id = KVM_REG_ARM64_SVE_FFR(0);
1150 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1151 if (ret) {
1152 return ret;
1153 }
1154 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1155
1156 return 0;
1157}
1158
1159int kvm_arch_get_registers(CPUState *cs)
1160{
1161 struct kvm_one_reg reg;
1162 uint64_t val;
1163 unsigned int el;
1164 uint32_t fpr;
1165 int i, ret;
1166
1167 ARMCPU *cpu = ARM_CPU(cs);
1168 CPUARMState *env = &cpu->env;
1169
1170 for (i = 0; i < 31; i++) {
1171 reg.id = AARCH64_CORE_REG(regs.regs[i]);
1172 reg.addr = (uintptr_t) &env->xregs[i];
1173 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1174 if (ret) {
1175 return ret;
1176 }
1177 }
1178
1179 reg.id = AARCH64_CORE_REG(regs.sp);
1180 reg.addr = (uintptr_t) &env->sp_el[0];
1181 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1182 if (ret) {
1183 return ret;
1184 }
1185
1186 reg.id = AARCH64_CORE_REG(sp_el1);
1187 reg.addr = (uintptr_t) &env->sp_el[1];
1188 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1189 if (ret) {
1190 return ret;
1191 }
1192
1193 reg.id = AARCH64_CORE_REG(regs.pstate);
1194 reg.addr = (uintptr_t) &val;
1195 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1196 if (ret) {
1197 return ret;
1198 }
1199
1200 env->aarch64 = ((val & PSTATE_nRW) == 0);
1201 if (is_a64(env)) {
1202 pstate_write(env, val);
1203 } else {
1204 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
1205 }
1206
1207
1208
1209
1210 aarch64_restore_sp(env, 1);
1211
1212 reg.id = AARCH64_CORE_REG(regs.pc);
1213 reg.addr = (uintptr_t) &env->pc;
1214 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1215 if (ret) {
1216 return ret;
1217 }
1218
1219
1220
1221
1222
1223
1224 if (!is_a64(env)) {
1225 aarch64_sync_64_to_32(env);
1226 }
1227
1228 reg.id = AARCH64_CORE_REG(elr_el1);
1229 reg.addr = (uintptr_t) &env->elr_el[1];
1230 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1231 if (ret) {
1232 return ret;
1233 }
1234
1235
1236
1237
1238
1239 for (i = 0; i < KVM_NR_SPSR; i++) {
1240 reg.id = AARCH64_CORE_REG(spsr[i]);
1241 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
1242 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1243 if (ret) {
1244 return ret;
1245 } else {
1246 int rd = i << 1;
1247#ifdef HOST_WORDS_BIGENDIAN
1248 env->vfp.regs[rd + 1] = fp_val[0];
1249 env->vfp.regs[rd] = fp_val[1];
1250#else
1251 env->vfp.regs[rd + 1] = fp_val[1];
1252 env->vfp.regs[rd] = fp_val[0];
1253#endif
1254 }
1255 }
1256
1257 el = arm_current_el(env);
1258 if (el > 0 && !is_a64(env)) {
1259 i = bank_number(env->uncached_cpsr & CPSR_M);
1260 env->spsr = env->banked_spsr[i];
1261 }
1262
1263 if (cpu_isar_feature(aa64_sve, cpu)) {
1264 ret = kvm_arch_get_sve(cs);
1265 } else {
1266 ret = kvm_arch_get_fpsimd(cs);
1267 }
1268 if (ret) {
1269 return ret;
1270 }
1271
1272 reg.addr = (uintptr_t)(&fpr);
1273 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
1274 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1275 if (ret) {
1276 return ret;
1277 }
1278 vfp_set_fpsr(env, fpr);
1279
1280 reg.addr = (uintptr_t)(&fpr);
1281 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
1282 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1283 if (ret) {
1284 return ret;
1285 }
1286 vfp_set_fpcr(env, fpr);
1287
1288 ret = kvm_get_vcpu_events(cpu);
1289 if (ret) {
1290 return ret;
1291 }
1292
1293 if (!write_kvmstate_to_list(cpu)) {
1294 return -EINVAL;
1295 }
1296
1297
1298
1299 write_list_to_cpustate(cpu);
1300
1301 kvm_arm_sync_mpstate_to_qemu(cpu);
1302
1303
1304 return ret;
1305}
1306
1307
1308static const uint32_t brk_insn = 0xd4200000;
1309
1310int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1311{
1312 if (have_guest_debug) {
1313 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
1314 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
1315 return -EINVAL;
1316 }
1317 return 0;
1318 } else {
1319 error_report("guest debug not supported on this kernel");
1320 return -EINVAL;
1321 }
1322}
1323
1324int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1325{
1326 static uint32_t brk;
1327
1328 if (have_guest_debug) {
1329 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
1330 brk != brk_insn ||
1331 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
1332 return -EINVAL;
1333 }
1334 return 0;
1335 } else {
1336 error_report("guest debug not supported on this kernel");
1337 return -EINVAL;
1338 }
1339}
1340
1341
1342
1343
1344
1345
1346
1347
1348bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
1349{
1350 int hsr_ec = syn_get_ec(debug_exit->hsr);
1351 ARMCPU *cpu = ARM_CPU(cs);
1352 CPUClass *cc = CPU_GET_CLASS(cs);
1353 CPUARMState *env = &cpu->env;
1354
1355
1356 kvm_cpu_synchronize_state(cs);
1357
1358 switch (hsr_ec) {
1359 case EC_SOFTWARESTEP:
1360 if (cs->singlestep_enabled) {
1361 return true;
1362 } else {
1363
1364
1365
1366
1367 error_report("%s: guest single-step while debugging unsupported"
1368 " (%"PRIx64", %"PRIx32")",
1369 __func__, env->pc, debug_exit->hsr);
1370 return false;
1371 }
1372 break;
1373 case EC_AA64_BKPT:
1374 if (kvm_find_sw_breakpoint(cs, env->pc)) {
1375 return true;
1376 }
1377 break;
1378 case EC_BREAKPOINT:
1379 if (find_hw_breakpoint(cs, env->pc)) {
1380 return true;
1381 }
1382 break;
1383 case EC_WATCHPOINT:
1384 {
1385 CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
1386 if (wp) {
1387 cs->watchpoint_hit = wp;
1388 return true;
1389 }
1390 break;
1391 }
1392 default:
1393 error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")",
1394 __func__, debug_exit->hsr, env->pc);
1395 }
1396
1397
1398
1399
1400
1401 cs->exception_index = EXCP_BKPT;
1402 env->exception.syndrome = debug_exit->hsr;
1403 env->exception.vaddress = debug_exit->far;
1404 env->exception.target_el = 1;
1405 qemu_mutex_lock_iothread();
1406 cc->do_interrupt(cs);
1407 qemu_mutex_unlock_iothread();
1408
1409 return false;
1410}
1411