1
2
3
4
5
6
7
8
9
10
11
12#include "qemu/osdep.h"
13#include <sys/ioctl.h>
14#include <sys/ptrace.h>
15
16#include <linux/elf.h>
17#include <linux/kvm.h>
18
19#include "qemu-common.h"
20#include "qapi/error.h"
21#include "cpu.h"
22#include "qemu/timer.h"
23#include "qemu/error-report.h"
24#include "qemu/host-utils.h"
25#include "qemu/main-loop.h"
26#include "exec/gdbstub.h"
27#include "sysemu/runstate.h"
28#include "sysemu/kvm.h"
29#include "sysemu/kvm_int.h"
30#include "kvm_arm.h"
31#include "internals.h"
32#include "hw/acpi/acpi.h"
33#include "hw/acpi/ghes.h"
34#include "hw/arm/virt.h"
35
36static bool have_guest_debug;
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53typedef struct {
54 uint64_t bcr;
55 uint64_t bvr;
56} HWBreakpoint;
57
58
59
60
61
62
63typedef struct {
64 uint64_t wcr;
65 uint64_t wvr;
66 CPUWatchpoint details;
67} HWWatchpoint;
68
69
70int max_hw_bps, max_hw_wps;
71GArray *hw_breakpoints, *hw_watchpoints;
72
73#define cur_hw_wps (hw_watchpoints->len)
74#define cur_hw_bps (hw_breakpoints->len)
75#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
76#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
77
78
79
80
81
82
83
84
85
86static void kvm_arm_init_debug(CPUState *cs)
87{
88 have_guest_debug = kvm_check_extension(cs->kvm_state,
89 KVM_CAP_SET_GUEST_DEBUG);
90
91 max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS);
92 hw_watchpoints = g_array_sized_new(true, true,
93 sizeof(HWWatchpoint), max_hw_wps);
94
95 max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS);
96 hw_breakpoints = g_array_sized_new(true, true,
97 sizeof(HWBreakpoint), max_hw_bps);
98 return;
99}
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137static int insert_hw_breakpoint(target_ulong addr)
138{
139 HWBreakpoint brk = {
140 .bcr = 0x1,
141 .bvr = sextract64(addr, 0, 53)
142 };
143
144 if (cur_hw_bps >= max_hw_bps) {
145 return -ENOBUFS;
146 }
147
148 brk.bcr = deposit32(brk.bcr, 1, 2, 0x3);
149 brk.bcr = deposit32(brk.bcr, 5, 4, 0xf);
150
151 g_array_append_val(hw_breakpoints, brk);
152
153 return 0;
154}
155
156
157
158
159
160
161
162
163static int delete_hw_breakpoint(target_ulong pc)
164{
165 int i;
166 for (i = 0; i < hw_breakpoints->len; i++) {
167 HWBreakpoint *brk = get_hw_bp(i);
168 if (brk->bvr == pc) {
169 g_array_remove_index(hw_breakpoints, i);
170 return 0;
171 }
172 }
173 return -ENOENT;
174}
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208static int insert_hw_watchpoint(target_ulong addr,
209 target_ulong len, int type)
210{
211 HWWatchpoint wp = {
212 .wcr = 1,
213 .wvr = addr & (~0x7ULL),
214 .details = { .vaddr = addr, .len = len }
215 };
216
217 if (cur_hw_wps >= max_hw_wps) {
218 return -ENOBUFS;
219 }
220
221
222
223
224
225 wp.wcr = deposit32(wp.wcr, 1, 2, 3);
226
227 switch (type) {
228 case GDB_WATCHPOINT_READ:
229 wp.wcr = deposit32(wp.wcr, 3, 2, 1);
230 wp.details.flags = BP_MEM_READ;
231 break;
232 case GDB_WATCHPOINT_WRITE:
233 wp.wcr = deposit32(wp.wcr, 3, 2, 2);
234 wp.details.flags = BP_MEM_WRITE;
235 break;
236 case GDB_WATCHPOINT_ACCESS:
237 wp.wcr = deposit32(wp.wcr, 3, 2, 3);
238 wp.details.flags = BP_MEM_ACCESS;
239 break;
240 default:
241 g_assert_not_reached();
242 break;
243 }
244 if (len <= 8) {
245
246 int off = addr & 0x7;
247 int bas = (1 << len) - 1;
248
249 wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas);
250 } else {
251
252 if (is_power_of_2(len)) {
253 int bits = ctz64(len);
254
255 wp.wvr &= ~((1 << bits) - 1);
256 wp.wcr = deposit32(wp.wcr, 24, 4, bits);
257 wp.wcr = deposit32(wp.wcr, 5, 8, 0xff);
258 } else {
259 return -ENOBUFS;
260 }
261 }
262
263 g_array_append_val(hw_watchpoints, wp);
264 return 0;
265}
266
267
268static bool check_watchpoint_in_range(int i, target_ulong addr)
269{
270 HWWatchpoint *wp = get_hw_wp(i);
271 uint64_t addr_top, addr_bottom = wp->wvr;
272 int bas = extract32(wp->wcr, 5, 8);
273 int mask = extract32(wp->wcr, 24, 4);
274
275 if (mask) {
276 addr_top = addr_bottom + (1 << mask);
277 } else {
278
279
280 addr_bottom = addr_bottom + ctz32(bas);
281 addr_top = addr_bottom + clo32(bas);
282 }
283
284 if (addr >= addr_bottom && addr <= addr_top) {
285 return true;
286 }
287
288 return false;
289}
290
291
292
293
294
295
296
297
298static int delete_hw_watchpoint(target_ulong addr,
299 target_ulong len, int type)
300{
301 int i;
302 for (i = 0; i < cur_hw_wps; i++) {
303 if (check_watchpoint_in_range(i, addr)) {
304 g_array_remove_index(hw_watchpoints, i);
305 return 0;
306 }
307 }
308 return -ENOENT;
309}
310
311
312int kvm_arch_insert_hw_breakpoint(target_ulong addr,
313 target_ulong len, int type)
314{
315 switch (type) {
316 case GDB_BREAKPOINT_HW:
317 return insert_hw_breakpoint(addr);
318 break;
319 case GDB_WATCHPOINT_READ:
320 case GDB_WATCHPOINT_WRITE:
321 case GDB_WATCHPOINT_ACCESS:
322 return insert_hw_watchpoint(addr, len, type);
323 default:
324 return -ENOSYS;
325 }
326}
327
328int kvm_arch_remove_hw_breakpoint(target_ulong addr,
329 target_ulong len, int type)
330{
331 switch (type) {
332 case GDB_BREAKPOINT_HW:
333 return delete_hw_breakpoint(addr);
334 case GDB_WATCHPOINT_READ:
335 case GDB_WATCHPOINT_WRITE:
336 case GDB_WATCHPOINT_ACCESS:
337 return delete_hw_watchpoint(addr, len, type);
338 default:
339 return -ENOSYS;
340 }
341}
342
343
344void kvm_arch_remove_all_hw_breakpoints(void)
345{
346 if (cur_hw_wps > 0) {
347 g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
348 }
349 if (cur_hw_bps > 0) {
350 g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
351 }
352}
353
354void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
355{
356 int i;
357 memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
358
359 for (i = 0; i < max_hw_wps; i++) {
360 HWWatchpoint *wp = get_hw_wp(i);
361 ptr->dbg_wcr[i] = wp->wcr;
362 ptr->dbg_wvr[i] = wp->wvr;
363 }
364 for (i = 0; i < max_hw_bps; i++) {
365 HWBreakpoint *bp = get_hw_bp(i);
366 ptr->dbg_bcr[i] = bp->bcr;
367 ptr->dbg_bvr[i] = bp->bvr;
368 }
369}
370
371bool kvm_arm_hw_debug_active(CPUState *cs)
372{
373 return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
374}
375
376static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
377{
378 int i;
379
380 for (i = 0; i < cur_hw_bps; i++) {
381 HWBreakpoint *bp = get_hw_bp(i);
382 if (bp->bvr == pc) {
383 return true;
384 }
385 }
386 return false;
387}
388
389static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
390{
391 int i;
392
393 for (i = 0; i < cur_hw_wps; i++) {
394 if (check_watchpoint_in_range(i, addr)) {
395 return &get_hw_wp(i)->details;
396 }
397 }
398 return NULL;
399}
400
401static bool kvm_arm_set_device_attr(CPUState *cs, struct kvm_device_attr *attr,
402 const char *name)
403{
404 int err;
405
406 err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
407 if (err != 0) {
408 error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err));
409 return false;
410 }
411
412 err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
413 if (err != 0) {
414 error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err));
415 return false;
416 }
417
418 return true;
419}
420
421void kvm_arm_pmu_init(CPUState *cs)
422{
423 struct kvm_device_attr attr = {
424 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
425 .attr = KVM_ARM_VCPU_PMU_V3_INIT,
426 };
427
428 if (!ARM_CPU(cs)->has_pmu) {
429 return;
430 }
431 if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) {
432 error_report("failed to init PMU");
433 abort();
434 }
435}
436
437void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
438{
439 struct kvm_device_attr attr = {
440 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
441 .addr = (intptr_t)&irq,
442 .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
443 };
444
445 if (!ARM_CPU(cs)->has_pmu) {
446 return;
447 }
448 if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) {
449 error_report("failed to set irq for PMU");
450 abort();
451 }
452}
453
454void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa)
455{
456 struct kvm_device_attr attr = {
457 .group = KVM_ARM_VCPU_PVTIME_CTRL,
458 .attr = KVM_ARM_VCPU_PVTIME_IPA,
459 .addr = (uint64_t)&ipa,
460 };
461
462 if (ARM_CPU(cs)->kvm_steal_time == ON_OFF_AUTO_OFF) {
463 return;
464 }
465 if (!kvm_arm_set_device_attr(cs, &attr, "PVTIME IPA")) {
466 error_report("failed to init PVTIME IPA");
467 abort();
468 }
469}
470
471static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id)
472{
473 uint64_t ret;
474 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret };
475 int err;
476
477 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
478 err = ioctl(fd, KVM_GET_ONE_REG, &idreg);
479 if (err < 0) {
480 return -1;
481 }
482 *pret = ret;
483 return 0;
484}
485
486static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id)
487{
488 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret };
489
490 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
491 return ioctl(fd, KVM_GET_ONE_REG, &idreg);
492}
493
494bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
495{
496
497
498
499
500
501 int fdarray[3];
502 bool sve_supported;
503 uint64_t features = 0;
504 uint64_t t;
505 int err;
506
507
508
509
510
511
512 static const uint32_t cpus_to_try[] = {
513 KVM_ARM_TARGET_AEM_V8,
514 KVM_ARM_TARGET_FOUNDATION_V8,
515 KVM_ARM_TARGET_CORTEX_A57,
516 QEMU_KVM_ARM_TARGET_NONE
517 };
518
519
520
521
522 struct kvm_vcpu_init init = { .target = -1, };
523
524 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
525 return false;
526 }
527
528 ahcf->target = init.target;
529 ahcf->dtb_compatible = "arm,arm-v8";
530
531 err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0,
532 ARM64_SYS_REG(3, 0, 0, 4, 0));
533 if (unlikely(err < 0)) {
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550 ahcf->isar.id_aa64pfr0 = 0x00000011;
551 err = 0;
552 } else {
553 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1,
554 ARM64_SYS_REG(3, 0, 0, 4, 1));
555 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0,
556 ARM64_SYS_REG(3, 0, 0, 5, 0));
557 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1,
558 ARM64_SYS_REG(3, 0, 0, 5, 1));
559 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0,
560 ARM64_SYS_REG(3, 0, 0, 6, 0));
561 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
562 ARM64_SYS_REG(3, 0, 0, 6, 1));
563 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
564 ARM64_SYS_REG(3, 0, 0, 7, 0));
565 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
566 ARM64_SYS_REG(3, 0, 0, 7, 1));
567 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2,
568 ARM64_SYS_REG(3, 0, 0, 7, 2));
569
570
571
572
573
574
575
576
577 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0,
578 ARM64_SYS_REG(3, 0, 0, 1, 0));
579 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1,
580 ARM64_SYS_REG(3, 0, 0, 1, 1));
581 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2,
582 ARM64_SYS_REG(3, 0, 0, 3, 4));
583 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0,
584 ARM64_SYS_REG(3, 0, 0, 1, 2));
585 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0,
586 ARM64_SYS_REG(3, 0, 0, 1, 4));
587 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1,
588 ARM64_SYS_REG(3, 0, 0, 1, 5));
589 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2,
590 ARM64_SYS_REG(3, 0, 0, 1, 6));
591 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3,
592 ARM64_SYS_REG(3, 0, 0, 1, 7));
593 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
594 ARM64_SYS_REG(3, 0, 0, 2, 0));
595 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
596 ARM64_SYS_REG(3, 0, 0, 2, 1));
597 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
598 ARM64_SYS_REG(3, 0, 0, 2, 2));
599 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
600 ARM64_SYS_REG(3, 0, 0, 2, 3));
601 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
602 ARM64_SYS_REG(3, 0, 0, 2, 4));
603 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
604 ARM64_SYS_REG(3, 0, 0, 2, 5));
605 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4,
606 ARM64_SYS_REG(3, 0, 0, 2, 6));
607 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
608 ARM64_SYS_REG(3, 0, 0, 2, 7));
609
610 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
611 ARM64_SYS_REG(3, 0, 0, 3, 0));
612 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
613 ARM64_SYS_REG(3, 0, 0, 3, 1));
614 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
615 ARM64_SYS_REG(3, 0, 0, 3, 2));
616
617
618
619
620
621
622
623
624
625
626
627 if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) {
628 int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS);
629 int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS);
630 int ctx_cmps =
631 FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS);
632 int version = 6;
633 bool has_el3 =
634 !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3);
635 uint32_t dbgdidr = 0;
636
637 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps);
638 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps);
639 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps);
640 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version);
641 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3);
642 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3);
643 dbgdidr |= (1 << 15);
644 ahcf->isar.dbgdidr = dbgdidr;
645 }
646 }
647
648 sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0;
649
650
651 if (sve_supported) {
652 t = ahcf->isar.id_aa64pfr0;
653 t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
654 ahcf->isar.id_aa64pfr0 = t;
655
656
657
658
659
660
661
662 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
663 ARM64_SYS_REG(3, 0, 0, 4, 4));
664 }
665
666 kvm_arm_destroy_scratch_host_vcpu(fdarray);
667
668 if (err < 0) {
669 return false;
670 }
671
672
673
674
675
676
677 features |= 1ULL << ARM_FEATURE_V8;
678 features |= 1ULL << ARM_FEATURE_NEON;
679 features |= 1ULL << ARM_FEATURE_AARCH64;
680 features |= 1ULL << ARM_FEATURE_PMU;
681 features |= 1ULL << ARM_FEATURE_GENERIC_TIMER;
682
683 ahcf->features = features;
684
685 return true;
686}
687
688void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
689{
690 bool has_steal_time = kvm_arm_steal_time_supported();
691
692 if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) {
693 if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
694 cpu->kvm_steal_time = ON_OFF_AUTO_OFF;
695 } else {
696 cpu->kvm_steal_time = ON_OFF_AUTO_ON;
697 }
698 } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) {
699 if (!has_steal_time) {
700 error_setg(errp, "'kvm-steal-time' cannot be enabled "
701 "on this host");
702 return;
703 } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
704
705
706
707
708
709
710
711 error_setg(errp, "'kvm-steal-time' cannot be enabled "
712 "for AArch32 guests");
713 return;
714 }
715 }
716}
717
718bool kvm_arm_aarch32_supported(void)
719{
720 return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT);
721}
722
723bool kvm_arm_sve_supported(void)
724{
725 return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
726}
727
728bool kvm_arm_steal_time_supported(void)
729{
730 return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME);
731}
732
733QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
734
735void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map)
736{
737
738 static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS];
739 static bool probed;
740 uint32_t vq = 0;
741 int i, j;
742
743 bitmap_zero(map, ARM_MAX_VQ);
744
745
746
747
748
749
750 if (!probed) {
751 struct kvm_vcpu_init init = {
752 .target = -1,
753 .features[0] = (1 << KVM_ARM_VCPU_SVE),
754 };
755 struct kvm_one_reg reg = {
756 .id = KVM_REG_ARM64_SVE_VLS,
757 .addr = (uint64_t)&vls[0],
758 };
759 int fdarray[3], ret;
760
761 probed = true;
762
763 if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) {
764 error_report("failed to create scratch VCPU with SVE enabled");
765 abort();
766 }
767 ret = ioctl(fdarray[2], KVM_GET_ONE_REG, ®);
768 kvm_arm_destroy_scratch_host_vcpu(fdarray);
769 if (ret) {
770 error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
771 strerror(errno));
772 abort();
773 }
774
775 for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) {
776 if (vls[i]) {
777 vq = 64 - clz64(vls[i]) + i * 64;
778 break;
779 }
780 }
781 if (vq > ARM_MAX_VQ) {
782 warn_report("KVM supports vector lengths larger than "
783 "QEMU can enable");
784 }
785 }
786
787 for (i = 0; i < KVM_ARM64_SVE_VLS_WORDS; ++i) {
788 if (!vls[i]) {
789 continue;
790 }
791 for (j = 1; j <= 64; ++j) {
792 vq = j + i * 64;
793 if (vq > ARM_MAX_VQ) {
794 return;
795 }
796 if (vls[i] & (1UL << (j - 1))) {
797 set_bit(vq - 1, map);
798 }
799 }
800 }
801}
802
803static int kvm_arm_sve_set_vls(CPUState *cs)
804{
805 uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = {0};
806 struct kvm_one_reg reg = {
807 .id = KVM_REG_ARM64_SVE_VLS,
808 .addr = (uint64_t)&vls[0],
809 };
810 ARMCPU *cpu = ARM_CPU(cs);
811 uint32_t vq;
812 int i, j;
813
814 assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX);
815
816 for (vq = 1; vq <= cpu->sve_max_vq; ++vq) {
817 if (test_bit(vq - 1, cpu->sve_vq_map)) {
818 i = (vq - 1) / 64;
819 j = (vq - 1) % 64;
820 vls[i] |= 1UL << j;
821 }
822 }
823
824 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
825}
826
827#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
828
829int kvm_arch_init_vcpu(CPUState *cs)
830{
831 int ret;
832 uint64_t mpidr;
833 ARMCPU *cpu = ARM_CPU(cs);
834 CPUARMState *env = &cpu->env;
835
836 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
837 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
838 error_report("KVM is not supported for this guest CPU type");
839 return -EINVAL;
840 }
841
842 qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cs);
843
844
845 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
846 if (cs->start_powered_off) {
847 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
848 }
849 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
850 cpu->psci_version = 2;
851 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
852 }
853 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
854 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
855 }
856 if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
857 cpu->has_pmu = false;
858 }
859 if (cpu->has_pmu) {
860 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
861 } else {
862 env->features &= ~(1ULL << ARM_FEATURE_PMU);
863 }
864 if (cpu_isar_feature(aa64_sve, cpu)) {
865 assert(kvm_arm_sve_supported());
866 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE;
867 }
868
869
870 ret = kvm_arm_vcpu_init(cs);
871 if (ret) {
872 return ret;
873 }
874
875 if (cpu_isar_feature(aa64_sve, cpu)) {
876 ret = kvm_arm_sve_set_vls(cs);
877 if (ret) {
878 return ret;
879 }
880 ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE);
881 if (ret) {
882 return ret;
883 }
884 }
885
886
887
888
889
890
891 ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
892 if (ret) {
893 return ret;
894 }
895 cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
896
897 kvm_arm_init_debug(cs);
898
899
900 kvm_arm_init_serror_injection(cs);
901
902 return kvm_arm_init_cpreg_list(cpu);
903}
904
905int kvm_arch_destroy_vcpu(CPUState *cs)
906{
907 return 0;
908}
909
910bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
911{
912
913
914
915
916 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
917 case KVM_REG_ARM_CORE:
918 case KVM_REG_ARM64_SVE:
919 return false;
920 default:
921 return true;
922 }
923}
924
925typedef struct CPRegStateLevel {
926 uint64_t regidx;
927 int level;
928} CPRegStateLevel;
929
930
931
932
933
934
935static const CPRegStateLevel non_runtime_cpregs[] = {
936 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
937};
938
939int kvm_arm_cpreg_level(uint64_t regidx)
940{
941 int i;
942
943 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
944 const CPRegStateLevel *l = &non_runtime_cpregs[i];
945 if (l->regidx == regidx) {
946 return l->level;
947 }
948 }
949
950 return KVM_PUT_RUNTIME_STATE;
951}
952
953
954static void kvm_inject_arm_sea(CPUState *c)
955{
956 ARMCPU *cpu = ARM_CPU(c);
957 CPUARMState *env = &cpu->env;
958 uint32_t esr;
959 bool same_el;
960
961 c->exception_index = EXCP_DATA_ABORT;
962 env->exception.target_el = 1;
963
964
965
966
967
968 same_el = arm_current_el(env) == env->exception.target_el;
969 esr = syn_data_abort_no_iss(same_el, 1, 0, 0, 0, 0, 0x10);
970
971 env->exception.syndrome = esr;
972
973 arm_cpu_do_interrupt(c);
974}
975
976#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
977 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
978
979#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
980 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
981
982#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
983 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
984
985static int kvm_arch_put_fpsimd(CPUState *cs)
986{
987 CPUARMState *env = &ARM_CPU(cs)->env;
988 struct kvm_one_reg reg;
989 int i, ret;
990
991 for (i = 0; i < 32; i++) {
992 uint64_t *q = aa64_vfp_qreg(env, i);
993#ifdef HOST_WORDS_BIGENDIAN
994 uint64_t fp_val[2] = { q[1], q[0] };
995 reg.addr = (uintptr_t)fp_val;
996#else
997 reg.addr = (uintptr_t)q;
998#endif
999 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
1000 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1001 if (ret) {
1002 return ret;
1003 }
1004 }
1005
1006 return 0;
1007}
1008
1009
1010
1011
1012
1013
1014
1015static int kvm_arch_put_sve(CPUState *cs)
1016{
1017 ARMCPU *cpu = ARM_CPU(cs);
1018 CPUARMState *env = &cpu->env;
1019 uint64_t tmp[ARM_MAX_VQ * 2];
1020 uint64_t *r;
1021 struct kvm_one_reg reg;
1022 int n, ret;
1023
1024 for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
1025 r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2);
1026 reg.addr = (uintptr_t)r;
1027 reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
1028 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1029 if (ret) {
1030 return ret;
1031 }
1032 }
1033
1034 for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
1035 r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0],
1036 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1037 reg.addr = (uintptr_t)r;
1038 reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
1039 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1040 if (ret) {
1041 return ret;
1042 }
1043 }
1044
1045 r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
1046 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1047 reg.addr = (uintptr_t)r;
1048 reg.id = KVM_REG_ARM64_SVE_FFR(0);
1049 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1050 if (ret) {
1051 return ret;
1052 }
1053
1054 return 0;
1055}
1056
1057int kvm_arch_put_registers(CPUState *cs, int level)
1058{
1059 struct kvm_one_reg reg;
1060 uint64_t val;
1061 uint32_t fpr;
1062 int i, ret;
1063 unsigned int el;
1064
1065 ARMCPU *cpu = ARM_CPU(cs);
1066 CPUARMState *env = &cpu->env;
1067
1068
1069
1070
1071 if (!is_a64(env)) {
1072 aarch64_sync_32_to_64(env);
1073 }
1074
1075 for (i = 0; i < 31; i++) {
1076 reg.id = AARCH64_CORE_REG(regs.regs[i]);
1077 reg.addr = (uintptr_t) &env->xregs[i];
1078 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1079 if (ret) {
1080 return ret;
1081 }
1082 }
1083
1084
1085
1086
1087 aarch64_save_sp(env, 1);
1088
1089 reg.id = AARCH64_CORE_REG(regs.sp);
1090 reg.addr = (uintptr_t) &env->sp_el[0];
1091 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1092 if (ret) {
1093 return ret;
1094 }
1095
1096 reg.id = AARCH64_CORE_REG(sp_el1);
1097 reg.addr = (uintptr_t) &env->sp_el[1];
1098 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1099 if (ret) {
1100 return ret;
1101 }
1102
1103
1104 if (is_a64(env)) {
1105 val = pstate_read(env);
1106 } else {
1107 val = cpsr_read(env);
1108 }
1109 reg.id = AARCH64_CORE_REG(regs.pstate);
1110 reg.addr = (uintptr_t) &val;
1111 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1112 if (ret) {
1113 return ret;
1114 }
1115
1116 reg.id = AARCH64_CORE_REG(regs.pc);
1117 reg.addr = (uintptr_t) &env->pc;
1118 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1119 if (ret) {
1120 return ret;
1121 }
1122
1123 reg.id = AARCH64_CORE_REG(elr_el1);
1124 reg.addr = (uintptr_t) &env->elr_el[1];
1125 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1126 if (ret) {
1127 return ret;
1128 }
1129
1130
1131
1132
1133
1134
1135
1136 el = arm_current_el(env);
1137 if (el > 0 && !is_a64(env)) {
1138 i = bank_number(env->uncached_cpsr & CPSR_M);
1139 env->banked_spsr[i] = env->spsr;
1140 }
1141
1142
1143 for (i = 0; i < KVM_NR_SPSR; i++) {
1144 reg.id = AARCH64_CORE_REG(spsr[i]);
1145 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
1146 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1147 if (ret) {
1148 return ret;
1149 }
1150 }
1151
1152 if (cpu_isar_feature(aa64_sve, cpu)) {
1153 ret = kvm_arch_put_sve(cs);
1154 } else {
1155 ret = kvm_arch_put_fpsimd(cs);
1156 }
1157 if (ret) {
1158 return ret;
1159 }
1160
1161 reg.addr = (uintptr_t)(&fpr);
1162 fpr = vfp_get_fpsr(env);
1163 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
1164 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1165 if (ret) {
1166 return ret;
1167 }
1168
1169 reg.addr = (uintptr_t)(&fpr);
1170 fpr = vfp_get_fpcr(env);
1171 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
1172 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1173 if (ret) {
1174 return ret;
1175 }
1176
1177 write_cpustate_to_list(cpu, true);
1178
1179 if (!write_list_to_kvmstate(cpu, level)) {
1180 return -EINVAL;
1181 }
1182
1183
1184
1185
1186
1187
1188 ret = kvm_put_vcpu_events(cpu);
1189 if (ret) {
1190 return ret;
1191 }
1192
1193 kvm_arm_sync_mpstate_to_kvm(cpu);
1194
1195 return ret;
1196}
1197
1198static int kvm_arch_get_fpsimd(CPUState *cs)
1199{
1200 CPUARMState *env = &ARM_CPU(cs)->env;
1201 struct kvm_one_reg reg;
1202 int i, ret;
1203
1204 for (i = 0; i < 32; i++) {
1205 uint64_t *q = aa64_vfp_qreg(env, i);
1206 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
1207 reg.addr = (uintptr_t)q;
1208 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1209 if (ret) {
1210 return ret;
1211 } else {
1212#ifdef HOST_WORDS_BIGENDIAN
1213 uint64_t t;
1214 t = q[0], q[0] = q[1], q[1] = t;
1215#endif
1216 }
1217 }
1218
1219 return 0;
1220}
1221
1222
1223
1224
1225
1226
1227
1228static int kvm_arch_get_sve(CPUState *cs)
1229{
1230 ARMCPU *cpu = ARM_CPU(cs);
1231 CPUARMState *env = &cpu->env;
1232 struct kvm_one_reg reg;
1233 uint64_t *r;
1234 int n, ret;
1235
1236 for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
1237 r = &env->vfp.zregs[n].d[0];
1238 reg.addr = (uintptr_t)r;
1239 reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
1240 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1241 if (ret) {
1242 return ret;
1243 }
1244 sve_bswap64(r, r, cpu->sve_max_vq * 2);
1245 }
1246
1247 for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
1248 r = &env->vfp.pregs[n].p[0];
1249 reg.addr = (uintptr_t)r;
1250 reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
1251 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1252 if (ret) {
1253 return ret;
1254 }
1255 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1256 }
1257
1258 r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
1259 reg.addr = (uintptr_t)r;
1260 reg.id = KVM_REG_ARM64_SVE_FFR(0);
1261 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1262 if (ret) {
1263 return ret;
1264 }
1265 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1266
1267 return 0;
1268}
1269
1270int kvm_arch_get_registers(CPUState *cs)
1271{
1272 struct kvm_one_reg reg;
1273 uint64_t val;
1274 unsigned int el;
1275 uint32_t fpr;
1276 int i, ret;
1277
1278 ARMCPU *cpu = ARM_CPU(cs);
1279 CPUARMState *env = &cpu->env;
1280
1281 for (i = 0; i < 31; i++) {
1282 reg.id = AARCH64_CORE_REG(regs.regs[i]);
1283 reg.addr = (uintptr_t) &env->xregs[i];
1284 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1285 if (ret) {
1286 return ret;
1287 }
1288 }
1289
1290 reg.id = AARCH64_CORE_REG(regs.sp);
1291 reg.addr = (uintptr_t) &env->sp_el[0];
1292 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1293 if (ret) {
1294 return ret;
1295 }
1296
1297 reg.id = AARCH64_CORE_REG(sp_el1);
1298 reg.addr = (uintptr_t) &env->sp_el[1];
1299 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1300 if (ret) {
1301 return ret;
1302 }
1303
1304 reg.id = AARCH64_CORE_REG(regs.pstate);
1305 reg.addr = (uintptr_t) &val;
1306 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1307 if (ret) {
1308 return ret;
1309 }
1310
1311 env->aarch64 = ((val & PSTATE_nRW) == 0);
1312 if (is_a64(env)) {
1313 pstate_write(env, val);
1314 } else {
1315 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
1316 }
1317
1318
1319
1320
1321 aarch64_restore_sp(env, 1);
1322
1323 reg.id = AARCH64_CORE_REG(regs.pc);
1324 reg.addr = (uintptr_t) &env->pc;
1325 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1326 if (ret) {
1327 return ret;
1328 }
1329
1330
1331
1332
1333
1334
1335 if (!is_a64(env)) {
1336 aarch64_sync_64_to_32(env);
1337 }
1338
1339 reg.id = AARCH64_CORE_REG(elr_el1);
1340 reg.addr = (uintptr_t) &env->elr_el[1];
1341 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1342 if (ret) {
1343 return ret;
1344 }
1345
1346
1347
1348
1349
1350 for (i = 0; i < KVM_NR_SPSR; i++) {
1351 reg.id = AARCH64_CORE_REG(spsr[i]);
1352 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
1353 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1354 if (ret) {
1355 return ret;
1356 }
1357 }
1358
1359 el = arm_current_el(env);
1360 if (el > 0 && !is_a64(env)) {
1361 i = bank_number(env->uncached_cpsr & CPSR_M);
1362 env->spsr = env->banked_spsr[i];
1363 }
1364
1365 if (cpu_isar_feature(aa64_sve, cpu)) {
1366 ret = kvm_arch_get_sve(cs);
1367 } else {
1368 ret = kvm_arch_get_fpsimd(cs);
1369 }
1370 if (ret) {
1371 return ret;
1372 }
1373
1374 reg.addr = (uintptr_t)(&fpr);
1375 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
1376 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1377 if (ret) {
1378 return ret;
1379 }
1380 vfp_set_fpsr(env, fpr);
1381
1382 reg.addr = (uintptr_t)(&fpr);
1383 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
1384 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1385 if (ret) {
1386 return ret;
1387 }
1388 vfp_set_fpcr(env, fpr);
1389
1390 ret = kvm_get_vcpu_events(cpu);
1391 if (ret) {
1392 return ret;
1393 }
1394
1395 if (!write_kvmstate_to_list(cpu)) {
1396 return -EINVAL;
1397 }
1398
1399
1400
1401 write_list_to_cpustate(cpu);
1402
1403 kvm_arm_sync_mpstate_to_qemu(cpu);
1404
1405
1406 return ret;
1407}
1408
1409void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
1410{
1411 ram_addr_t ram_addr;
1412 hwaddr paddr;
1413
1414 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
1415
1416 if (acpi_ghes_present() && addr) {
1417 ram_addr = qemu_ram_addr_from_host(addr);
1418 if (ram_addr != RAM_ADDR_INVALID &&
1419 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
1420 kvm_hwpoison_page_add(ram_addr);
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432 if (code == BUS_MCEERR_AR) {
1433 kvm_cpu_synchronize_state(c);
1434 if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) {
1435 kvm_inject_arm_sea(c);
1436 } else {
1437 error_report("failed to record the error");
1438 abort();
1439 }
1440 }
1441 return;
1442 }
1443 if (code == BUS_MCEERR_AO) {
1444 error_report("Hardware memory error at addr %p for memory used by "
1445 "QEMU itself instead of guest system!", addr);
1446 }
1447 }
1448
1449 if (code == BUS_MCEERR_AR) {
1450 error_report("Hardware memory error!");
1451 exit(1);
1452 }
1453}
1454
1455
1456static const uint32_t brk_insn = 0xd4200000;
1457
1458int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1459{
1460 if (have_guest_debug) {
1461 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
1462 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
1463 return -EINVAL;
1464 }
1465 return 0;
1466 } else {
1467 error_report("guest debug not supported on this kernel");
1468 return -EINVAL;
1469 }
1470}
1471
1472int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1473{
1474 static uint32_t brk;
1475
1476 if (have_guest_debug) {
1477 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
1478 brk != brk_insn ||
1479 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
1480 return -EINVAL;
1481 }
1482 return 0;
1483 } else {
1484 error_report("guest debug not supported on this kernel");
1485 return -EINVAL;
1486 }
1487}
1488
1489
1490
1491
1492
1493
1494
1495
1496bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
1497{
1498 int hsr_ec = syn_get_ec(debug_exit->hsr);
1499 ARMCPU *cpu = ARM_CPU(cs);
1500 CPUARMState *env = &cpu->env;
1501
1502
1503 kvm_cpu_synchronize_state(cs);
1504
1505 switch (hsr_ec) {
1506 case EC_SOFTWARESTEP:
1507 if (cs->singlestep_enabled) {
1508 return true;
1509 } else {
1510
1511
1512
1513
1514 error_report("%s: guest single-step while debugging unsupported"
1515 " (%"PRIx64", %"PRIx32")",
1516 __func__, env->pc, debug_exit->hsr);
1517 return false;
1518 }
1519 break;
1520 case EC_AA64_BKPT:
1521 if (kvm_find_sw_breakpoint(cs, env->pc)) {
1522 return true;
1523 }
1524 break;
1525 case EC_BREAKPOINT:
1526 if (find_hw_breakpoint(cs, env->pc)) {
1527 return true;
1528 }
1529 break;
1530 case EC_WATCHPOINT:
1531 {
1532 CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
1533 if (wp) {
1534 cs->watchpoint_hit = wp;
1535 return true;
1536 }
1537 break;
1538 }
1539 default:
1540 error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")",
1541 __func__, debug_exit->hsr, env->pc);
1542 }
1543
1544
1545
1546
1547
1548 cs->exception_index = EXCP_BKPT;
1549 env->exception.syndrome = debug_exit->hsr;
1550 env->exception.vaddress = debug_exit->far;
1551 env->exception.target_el = 1;
1552 qemu_mutex_lock_iothread();
1553 arm_cpu_do_interrupt(cs);
1554 qemu_mutex_unlock_iothread();
1555
1556 return false;
1557}
1558
1559#define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0)
1560#define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2)
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573#define ESR_DFSC(aarch64, lpae, v) \
1574 ((aarch64 || (lpae)) ? ((v) & 0x3F) \
1575 : (((v) >> 6) | ((v) & 0x1F)))
1576
1577#define ESR_DFSC_EXTABT(aarch64, lpae) \
1578 ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8)
1579
1580bool kvm_arm_verify_ext_dabt_pending(CPUState *cs)
1581{
1582 uint64_t dfsr_val;
1583
1584 if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) {
1585 ARMCPU *cpu = ARM_CPU(cs);
1586 CPUARMState *env = &cpu->env;
1587 int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64);
1588 int lpae = 0;
1589
1590 if (!aarch64_mode) {
1591 uint64_t ttbcr;
1592
1593 if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) {
1594 lpae = arm_feature(env, ARM_FEATURE_LPAE)
1595 && (ttbcr & TTBCR_EAE);
1596 }
1597 }
1598
1599
1600
1601
1602 return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) ==
1603 ESR_DFSC_EXTABT(aarch64_mode, lpae));
1604 }
1605 return false;
1606}
1607