1
2
3
4
5
6
7
8
9
10
11
12#include "qemu/osdep.h"
13#include <sys/ioctl.h>
14#include <sys/ptrace.h>
15
16#include <linux/elf.h>
17#include <linux/kvm.h>
18
19#include "qemu-common.h"
20#include "qapi/error.h"
21#include "cpu.h"
22#include "qemu/timer.h"
23#include "qemu/error-report.h"
24#include "qemu/host-utils.h"
25#include "qemu/main-loop.h"
26#include "exec/gdbstub.h"
27#include "sysemu/runstate.h"
28#include "sysemu/kvm.h"
29#include "sysemu/kvm_int.h"
30#include "kvm_arm.h"
31#include "internals.h"
32#include "hw/acpi/acpi.h"
33#include "hw/acpi/ghes.h"
34#include "hw/arm/virt.h"
35
36static bool have_guest_debug;
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53typedef struct {
54 uint64_t bcr;
55 uint64_t bvr;
56} HWBreakpoint;
57
58
59
60
61
62
63typedef struct {
64 uint64_t wcr;
65 uint64_t wvr;
66 CPUWatchpoint details;
67} HWWatchpoint;
68
69
70int max_hw_bps, max_hw_wps;
71GArray *hw_breakpoints, *hw_watchpoints;
72
73#define cur_hw_wps (hw_watchpoints->len)
74#define cur_hw_bps (hw_breakpoints->len)
75#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
76#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
77
78
79
80
81
82
83
84
85
86static void kvm_arm_init_debug(CPUState *cs)
87{
88 have_guest_debug = kvm_check_extension(cs->kvm_state,
89 KVM_CAP_SET_GUEST_DEBUG);
90
91 max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS);
92 hw_watchpoints = g_array_sized_new(true, true,
93 sizeof(HWWatchpoint), max_hw_wps);
94
95 max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS);
96 hw_breakpoints = g_array_sized_new(true, true,
97 sizeof(HWBreakpoint), max_hw_bps);
98 return;
99}
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137static int insert_hw_breakpoint(target_ulong addr)
138{
139 HWBreakpoint brk = {
140 .bcr = 0x1,
141 .bvr = sextract64(addr, 0, 53)
142 };
143
144 if (cur_hw_bps >= max_hw_bps) {
145 return -ENOBUFS;
146 }
147
148 brk.bcr = deposit32(brk.bcr, 1, 2, 0x3);
149 brk.bcr = deposit32(brk.bcr, 5, 4, 0xf);
150
151 g_array_append_val(hw_breakpoints, brk);
152
153 return 0;
154}
155
156
157
158
159
160
161
162
163static int delete_hw_breakpoint(target_ulong pc)
164{
165 int i;
166 for (i = 0; i < hw_breakpoints->len; i++) {
167 HWBreakpoint *brk = get_hw_bp(i);
168 if (brk->bvr == pc) {
169 g_array_remove_index(hw_breakpoints, i);
170 return 0;
171 }
172 }
173 return -ENOENT;
174}
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208static int insert_hw_watchpoint(target_ulong addr,
209 target_ulong len, int type)
210{
211 HWWatchpoint wp = {
212 .wcr = 1,
213 .wvr = addr & (~0x7ULL),
214 .details = { .vaddr = addr, .len = len }
215 };
216
217 if (cur_hw_wps >= max_hw_wps) {
218 return -ENOBUFS;
219 }
220
221
222
223
224
225 wp.wcr = deposit32(wp.wcr, 1, 2, 3);
226
227 switch (type) {
228 case GDB_WATCHPOINT_READ:
229 wp.wcr = deposit32(wp.wcr, 3, 2, 1);
230 wp.details.flags = BP_MEM_READ;
231 break;
232 case GDB_WATCHPOINT_WRITE:
233 wp.wcr = deposit32(wp.wcr, 3, 2, 2);
234 wp.details.flags = BP_MEM_WRITE;
235 break;
236 case GDB_WATCHPOINT_ACCESS:
237 wp.wcr = deposit32(wp.wcr, 3, 2, 3);
238 wp.details.flags = BP_MEM_ACCESS;
239 break;
240 default:
241 g_assert_not_reached();
242 break;
243 }
244 if (len <= 8) {
245
246 int off = addr & 0x7;
247 int bas = (1 << len) - 1;
248
249 wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas);
250 } else {
251
252 if (is_power_of_2(len)) {
253 int bits = ctz64(len);
254
255 wp.wvr &= ~((1 << bits) - 1);
256 wp.wcr = deposit32(wp.wcr, 24, 4, bits);
257 wp.wcr = deposit32(wp.wcr, 5, 8, 0xff);
258 } else {
259 return -ENOBUFS;
260 }
261 }
262
263 g_array_append_val(hw_watchpoints, wp);
264 return 0;
265}
266
267
268static bool check_watchpoint_in_range(int i, target_ulong addr)
269{
270 HWWatchpoint *wp = get_hw_wp(i);
271 uint64_t addr_top, addr_bottom = wp->wvr;
272 int bas = extract32(wp->wcr, 5, 8);
273 int mask = extract32(wp->wcr, 24, 4);
274
275 if (mask) {
276 addr_top = addr_bottom + (1 << mask);
277 } else {
278
279
280 addr_bottom = addr_bottom + ctz32(bas);
281 addr_top = addr_bottom + clo32(bas);
282 }
283
284 if (addr >= addr_bottom && addr <= addr_top) {
285 return true;
286 }
287
288 return false;
289}
290
291
292
293
294
295
296
297
298static int delete_hw_watchpoint(target_ulong addr,
299 target_ulong len, int type)
300{
301 int i;
302 for (i = 0; i < cur_hw_wps; i++) {
303 if (check_watchpoint_in_range(i, addr)) {
304 g_array_remove_index(hw_watchpoints, i);
305 return 0;
306 }
307 }
308 return -ENOENT;
309}
310
311
312int kvm_arch_insert_hw_breakpoint(target_ulong addr,
313 target_ulong len, int type)
314{
315 switch (type) {
316 case GDB_BREAKPOINT_HW:
317 return insert_hw_breakpoint(addr);
318 break;
319 case GDB_WATCHPOINT_READ:
320 case GDB_WATCHPOINT_WRITE:
321 case GDB_WATCHPOINT_ACCESS:
322 return insert_hw_watchpoint(addr, len, type);
323 default:
324 return -ENOSYS;
325 }
326}
327
328int kvm_arch_remove_hw_breakpoint(target_ulong addr,
329 target_ulong len, int type)
330{
331 switch (type) {
332 case GDB_BREAKPOINT_HW:
333 return delete_hw_breakpoint(addr);
334 case GDB_WATCHPOINT_READ:
335 case GDB_WATCHPOINT_WRITE:
336 case GDB_WATCHPOINT_ACCESS:
337 return delete_hw_watchpoint(addr, len, type);
338 default:
339 return -ENOSYS;
340 }
341}
342
343
344void kvm_arch_remove_all_hw_breakpoints(void)
345{
346 if (cur_hw_wps > 0) {
347 g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
348 }
349 if (cur_hw_bps > 0) {
350 g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
351 }
352}
353
354void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
355{
356 int i;
357 memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
358
359 for (i = 0; i < max_hw_wps; i++) {
360 HWWatchpoint *wp = get_hw_wp(i);
361 ptr->dbg_wcr[i] = wp->wcr;
362 ptr->dbg_wvr[i] = wp->wvr;
363 }
364 for (i = 0; i < max_hw_bps; i++) {
365 HWBreakpoint *bp = get_hw_bp(i);
366 ptr->dbg_bcr[i] = bp->bcr;
367 ptr->dbg_bvr[i] = bp->bvr;
368 }
369}
370
371bool kvm_arm_hw_debug_active(CPUState *cs)
372{
373 return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
374}
375
376static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
377{
378 int i;
379
380 for (i = 0; i < cur_hw_bps; i++) {
381 HWBreakpoint *bp = get_hw_bp(i);
382 if (bp->bvr == pc) {
383 return true;
384 }
385 }
386 return false;
387}
388
389static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
390{
391 int i;
392
393 for (i = 0; i < cur_hw_wps; i++) {
394 if (check_watchpoint_in_range(i, addr)) {
395 return &get_hw_wp(i)->details;
396 }
397 }
398 return NULL;
399}
400
401static bool kvm_arm_set_device_attr(CPUState *cs, struct kvm_device_attr *attr,
402 const char *name)
403{
404 int err;
405
406 err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
407 if (err != 0) {
408 error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err));
409 return false;
410 }
411
412 err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
413 if (err != 0) {
414 error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err));
415 return false;
416 }
417
418 return true;
419}
420
421void kvm_arm_pmu_init(CPUState *cs)
422{
423 struct kvm_device_attr attr = {
424 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
425 .attr = KVM_ARM_VCPU_PMU_V3_INIT,
426 };
427
428 if (!ARM_CPU(cs)->has_pmu) {
429 return;
430 }
431 if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) {
432 error_report("failed to init PMU");
433 abort();
434 }
435}
436
437void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
438{
439 struct kvm_device_attr attr = {
440 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
441 .addr = (intptr_t)&irq,
442 .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
443 };
444
445 if (!ARM_CPU(cs)->has_pmu) {
446 return;
447 }
448 if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) {
449 error_report("failed to set irq for PMU");
450 abort();
451 }
452}
453
454void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa)
455{
456 struct kvm_device_attr attr = {
457 .group = KVM_ARM_VCPU_PVTIME_CTRL,
458 .attr = KVM_ARM_VCPU_PVTIME_IPA,
459 .addr = (uint64_t)&ipa,
460 };
461
462 if (ARM_CPU(cs)->kvm_steal_time == ON_OFF_AUTO_OFF) {
463 return;
464 }
465 if (!kvm_arm_set_device_attr(cs, &attr, "PVTIME IPA")) {
466 error_report("failed to init PVTIME IPA");
467 abort();
468 }
469}
470
471static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id)
472{
473 uint64_t ret;
474 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret };
475 int err;
476
477 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
478 err = ioctl(fd, KVM_GET_ONE_REG, &idreg);
479 if (err < 0) {
480 return -1;
481 }
482 *pret = ret;
483 return 0;
484}
485
486static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id)
487{
488 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret };
489
490 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
491 return ioctl(fd, KVM_GET_ONE_REG, &idreg);
492}
493
494bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
495{
496
497
498
499
500
501 int fdarray[3];
502 bool sve_supported;
503 uint64_t features = 0;
504 uint64_t t;
505 int err;
506
507
508
509
510
511
512 static const uint32_t cpus_to_try[] = {
513 KVM_ARM_TARGET_AEM_V8,
514 KVM_ARM_TARGET_FOUNDATION_V8,
515 KVM_ARM_TARGET_CORTEX_A57,
516 QEMU_KVM_ARM_TARGET_NONE
517 };
518
519
520
521
522 struct kvm_vcpu_init init = { .target = -1, };
523
524 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
525 return false;
526 }
527
528 ahcf->target = init.target;
529 ahcf->dtb_compatible = "arm,arm-v8";
530
531 err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0,
532 ARM64_SYS_REG(3, 0, 0, 4, 0));
533 if (unlikely(err < 0)) {
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550 ahcf->isar.id_aa64pfr0 = 0x00000011;
551 err = 0;
552 } else {
553 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1,
554 ARM64_SYS_REG(3, 0, 0, 4, 1));
555 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0,
556 ARM64_SYS_REG(3, 0, 0, 5, 0));
557 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1,
558 ARM64_SYS_REG(3, 0, 0, 5, 1));
559 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0,
560 ARM64_SYS_REG(3, 0, 0, 6, 0));
561 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
562 ARM64_SYS_REG(3, 0, 0, 6, 1));
563 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
564 ARM64_SYS_REG(3, 0, 0, 7, 0));
565 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
566 ARM64_SYS_REG(3, 0, 0, 7, 1));
567 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2,
568 ARM64_SYS_REG(3, 0, 0, 7, 2));
569
570
571
572
573
574
575
576
577 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0,
578 ARM64_SYS_REG(3, 0, 0, 1, 0));
579 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1,
580 ARM64_SYS_REG(3, 0, 0, 1, 1));
581 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2,
582 ARM64_SYS_REG(3, 0, 0, 3, 4));
583 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0,
584 ARM64_SYS_REG(3, 0, 0, 1, 2));
585 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0,
586 ARM64_SYS_REG(3, 0, 0, 1, 4));
587 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1,
588 ARM64_SYS_REG(3, 0, 0, 1, 5));
589 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2,
590 ARM64_SYS_REG(3, 0, 0, 1, 6));
591 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3,
592 ARM64_SYS_REG(3, 0, 0, 1, 7));
593 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
594 ARM64_SYS_REG(3, 0, 0, 2, 0));
595 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
596 ARM64_SYS_REG(3, 0, 0, 2, 1));
597 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
598 ARM64_SYS_REG(3, 0, 0, 2, 2));
599 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
600 ARM64_SYS_REG(3, 0, 0, 2, 3));
601 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
602 ARM64_SYS_REG(3, 0, 0, 2, 4));
603 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
604 ARM64_SYS_REG(3, 0, 0, 2, 5));
605 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4,
606 ARM64_SYS_REG(3, 0, 0, 2, 6));
607 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
608 ARM64_SYS_REG(3, 0, 0, 2, 7));
609
610 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
611 ARM64_SYS_REG(3, 0, 0, 3, 0));
612 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
613 ARM64_SYS_REG(3, 0, 0, 3, 1));
614 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
615 ARM64_SYS_REG(3, 0, 0, 3, 2));
616
617
618
619
620
621
622
623
624
625
626
627 if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) {
628 int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS);
629 int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS);
630 int ctx_cmps =
631 FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS);
632 int version = 6;
633 bool has_el3 =
634 !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3);
635 uint32_t dbgdidr = 0;
636
637 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps);
638 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps);
639 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps);
640 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version);
641 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3);
642 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3);
643 dbgdidr |= (1 << 15);
644 ahcf->isar.dbgdidr = dbgdidr;
645 }
646 }
647
648 sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0;
649
650 kvm_arm_destroy_scratch_host_vcpu(fdarray);
651
652 if (err < 0) {
653 return false;
654 }
655
656
657 if (sve_supported) {
658 t = ahcf->isar.id_aa64pfr0;
659 t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
660 ahcf->isar.id_aa64pfr0 = t;
661 }
662
663
664
665
666
667
668 features |= 1ULL << ARM_FEATURE_V8;
669 features |= 1ULL << ARM_FEATURE_NEON;
670 features |= 1ULL << ARM_FEATURE_AARCH64;
671 features |= 1ULL << ARM_FEATURE_PMU;
672 features |= 1ULL << ARM_FEATURE_GENERIC_TIMER;
673
674 ahcf->features = features;
675
676 return true;
677}
678
679void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
680{
681 bool has_steal_time = kvm_arm_steal_time_supported();
682
683 if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) {
684 if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
685 cpu->kvm_steal_time = ON_OFF_AUTO_OFF;
686 } else {
687 cpu->kvm_steal_time = ON_OFF_AUTO_ON;
688 }
689 } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) {
690 if (!has_steal_time) {
691 error_setg(errp, "'kvm-steal-time' cannot be enabled "
692 "on this host");
693 return;
694 } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
695
696
697
698
699
700
701
702 error_setg(errp, "'kvm-steal-time' cannot be enabled "
703 "for AArch32 guests");
704 return;
705 }
706 }
707}
708
709bool kvm_arm_aarch32_supported(void)
710{
711 return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT);
712}
713
714bool kvm_arm_sve_supported(void)
715{
716 return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
717}
718
719bool kvm_arm_steal_time_supported(void)
720{
721 return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME);
722}
723
724QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
725
726void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map)
727{
728
729 static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS];
730 static bool probed;
731 uint32_t vq = 0;
732 int i, j;
733
734 bitmap_clear(map, 0, ARM_MAX_VQ);
735
736
737
738
739
740
741 if (!probed) {
742 struct kvm_vcpu_init init = {
743 .target = -1,
744 .features[0] = (1 << KVM_ARM_VCPU_SVE),
745 };
746 struct kvm_one_reg reg = {
747 .id = KVM_REG_ARM64_SVE_VLS,
748 .addr = (uint64_t)&vls[0],
749 };
750 int fdarray[3], ret;
751
752 probed = true;
753
754 if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) {
755 error_report("failed to create scratch VCPU with SVE enabled");
756 abort();
757 }
758 ret = ioctl(fdarray[2], KVM_GET_ONE_REG, ®);
759 kvm_arm_destroy_scratch_host_vcpu(fdarray);
760 if (ret) {
761 error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
762 strerror(errno));
763 abort();
764 }
765
766 for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) {
767 if (vls[i]) {
768 vq = 64 - clz64(vls[i]) + i * 64;
769 break;
770 }
771 }
772 if (vq > ARM_MAX_VQ) {
773 warn_report("KVM supports vector lengths larger than "
774 "QEMU can enable");
775 }
776 }
777
778 for (i = 0; i < KVM_ARM64_SVE_VLS_WORDS; ++i) {
779 if (!vls[i]) {
780 continue;
781 }
782 for (j = 1; j <= 64; ++j) {
783 vq = j + i * 64;
784 if (vq > ARM_MAX_VQ) {
785 return;
786 }
787 if (vls[i] & (1UL << (j - 1))) {
788 set_bit(vq - 1, map);
789 }
790 }
791 }
792}
793
794static int kvm_arm_sve_set_vls(CPUState *cs)
795{
796 uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = {0};
797 struct kvm_one_reg reg = {
798 .id = KVM_REG_ARM64_SVE_VLS,
799 .addr = (uint64_t)&vls[0],
800 };
801 ARMCPU *cpu = ARM_CPU(cs);
802 uint32_t vq;
803 int i, j;
804
805 assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX);
806
807 for (vq = 1; vq <= cpu->sve_max_vq; ++vq) {
808 if (test_bit(vq - 1, cpu->sve_vq_map)) {
809 i = (vq - 1) / 64;
810 j = (vq - 1) % 64;
811 vls[i] |= 1UL << j;
812 }
813 }
814
815 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
816}
817
818#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
819
820int kvm_arch_init_vcpu(CPUState *cs)
821{
822 int ret;
823 uint64_t mpidr;
824 ARMCPU *cpu = ARM_CPU(cs);
825 CPUARMState *env = &cpu->env;
826
827 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
828 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
829 error_report("KVM is not supported for this guest CPU type");
830 return -EINVAL;
831 }
832
833 qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cs);
834
835
836 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
837 if (cs->start_powered_off) {
838 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
839 }
840 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
841 cpu->psci_version = 2;
842 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
843 }
844 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
845 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
846 }
847 if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
848 cpu->has_pmu = false;
849 }
850 if (cpu->has_pmu) {
851 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
852 } else {
853 env->features &= ~(1ULL << ARM_FEATURE_PMU);
854 }
855 if (cpu_isar_feature(aa64_sve, cpu)) {
856 assert(kvm_arm_sve_supported());
857 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE;
858 }
859
860
861 ret = kvm_arm_vcpu_init(cs);
862 if (ret) {
863 return ret;
864 }
865
866 if (cpu_isar_feature(aa64_sve, cpu)) {
867 ret = kvm_arm_sve_set_vls(cs);
868 if (ret) {
869 return ret;
870 }
871 ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE);
872 if (ret) {
873 return ret;
874 }
875 }
876
877
878
879
880
881
882 ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
883 if (ret) {
884 return ret;
885 }
886 cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
887
888 kvm_arm_init_debug(cs);
889
890
891 kvm_arm_init_serror_injection(cs);
892
893 return kvm_arm_init_cpreg_list(cpu);
894}
895
896int kvm_arch_destroy_vcpu(CPUState *cs)
897{
898 return 0;
899}
900
901bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
902{
903
904
905
906
907 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
908 case KVM_REG_ARM_CORE:
909 case KVM_REG_ARM64_SVE:
910 return false;
911 default:
912 return true;
913 }
914}
915
916typedef struct CPRegStateLevel {
917 uint64_t regidx;
918 int level;
919} CPRegStateLevel;
920
921
922
923
924
925
926static const CPRegStateLevel non_runtime_cpregs[] = {
927 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
928};
929
930int kvm_arm_cpreg_level(uint64_t regidx)
931{
932 int i;
933
934 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
935 const CPRegStateLevel *l = &non_runtime_cpregs[i];
936 if (l->regidx == regidx) {
937 return l->level;
938 }
939 }
940
941 return KVM_PUT_RUNTIME_STATE;
942}
943
944
945static void kvm_inject_arm_sea(CPUState *c)
946{
947 ARMCPU *cpu = ARM_CPU(c);
948 CPUARMState *env = &cpu->env;
949 uint32_t esr;
950 bool same_el;
951
952 c->exception_index = EXCP_DATA_ABORT;
953 env->exception.target_el = 1;
954
955
956
957
958
959 same_el = arm_current_el(env) == env->exception.target_el;
960 esr = syn_data_abort_no_iss(same_el, 1, 0, 0, 0, 0, 0x10);
961
962 env->exception.syndrome = esr;
963
964 arm_cpu_do_interrupt(c);
965}
966
967#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
968 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
969
970#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
971 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
972
973#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
974 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
975
976static int kvm_arch_put_fpsimd(CPUState *cs)
977{
978 CPUARMState *env = &ARM_CPU(cs)->env;
979 struct kvm_one_reg reg;
980 int i, ret;
981
982 for (i = 0; i < 32; i++) {
983 uint64_t *q = aa64_vfp_qreg(env, i);
984#ifdef HOST_WORDS_BIGENDIAN
985 uint64_t fp_val[2] = { q[1], q[0] };
986 reg.addr = (uintptr_t)fp_val;
987#else
988 reg.addr = (uintptr_t)q;
989#endif
990 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
991 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
992 if (ret) {
993 return ret;
994 }
995 }
996
997 return 0;
998}
999
1000
1001
1002
1003
1004
1005
1006static int kvm_arch_put_sve(CPUState *cs)
1007{
1008 ARMCPU *cpu = ARM_CPU(cs);
1009 CPUARMState *env = &cpu->env;
1010 uint64_t tmp[ARM_MAX_VQ * 2];
1011 uint64_t *r;
1012 struct kvm_one_reg reg;
1013 int n, ret;
1014
1015 for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
1016 r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2);
1017 reg.addr = (uintptr_t)r;
1018 reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
1019 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1020 if (ret) {
1021 return ret;
1022 }
1023 }
1024
1025 for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
1026 r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0],
1027 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1028 reg.addr = (uintptr_t)r;
1029 reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
1030 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1031 if (ret) {
1032 return ret;
1033 }
1034 }
1035
1036 r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
1037 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1038 reg.addr = (uintptr_t)r;
1039 reg.id = KVM_REG_ARM64_SVE_FFR(0);
1040 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1041 if (ret) {
1042 return ret;
1043 }
1044
1045 return 0;
1046}
1047
1048int kvm_arch_put_registers(CPUState *cs, int level)
1049{
1050 struct kvm_one_reg reg;
1051 uint64_t val;
1052 uint32_t fpr;
1053 int i, ret;
1054 unsigned int el;
1055
1056 ARMCPU *cpu = ARM_CPU(cs);
1057 CPUARMState *env = &cpu->env;
1058
1059
1060
1061
1062 if (!is_a64(env)) {
1063 aarch64_sync_32_to_64(env);
1064 }
1065
1066 for (i = 0; i < 31; i++) {
1067 reg.id = AARCH64_CORE_REG(regs.regs[i]);
1068 reg.addr = (uintptr_t) &env->xregs[i];
1069 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1070 if (ret) {
1071 return ret;
1072 }
1073 }
1074
1075
1076
1077
1078 aarch64_save_sp(env, 1);
1079
1080 reg.id = AARCH64_CORE_REG(regs.sp);
1081 reg.addr = (uintptr_t) &env->sp_el[0];
1082 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1083 if (ret) {
1084 return ret;
1085 }
1086
1087 reg.id = AARCH64_CORE_REG(sp_el1);
1088 reg.addr = (uintptr_t) &env->sp_el[1];
1089 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1090 if (ret) {
1091 return ret;
1092 }
1093
1094
1095 if (is_a64(env)) {
1096 val = pstate_read(env);
1097 } else {
1098 val = cpsr_read(env);
1099 }
1100 reg.id = AARCH64_CORE_REG(regs.pstate);
1101 reg.addr = (uintptr_t) &val;
1102 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1103 if (ret) {
1104 return ret;
1105 }
1106
1107 reg.id = AARCH64_CORE_REG(regs.pc);
1108 reg.addr = (uintptr_t) &env->pc;
1109 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1110 if (ret) {
1111 return ret;
1112 }
1113
1114 reg.id = AARCH64_CORE_REG(elr_el1);
1115 reg.addr = (uintptr_t) &env->elr_el[1];
1116 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1117 if (ret) {
1118 return ret;
1119 }
1120
1121
1122
1123
1124
1125
1126
1127 el = arm_current_el(env);
1128 if (el > 0 && !is_a64(env)) {
1129 i = bank_number(env->uncached_cpsr & CPSR_M);
1130 env->banked_spsr[i] = env->spsr;
1131 }
1132
1133
1134 for (i = 0; i < KVM_NR_SPSR; i++) {
1135 reg.id = AARCH64_CORE_REG(spsr[i]);
1136 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
1137 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1138 if (ret) {
1139 return ret;
1140 }
1141 }
1142
1143 if (cpu_isar_feature(aa64_sve, cpu)) {
1144 ret = kvm_arch_put_sve(cs);
1145 } else {
1146 ret = kvm_arch_put_fpsimd(cs);
1147 }
1148 if (ret) {
1149 return ret;
1150 }
1151
1152 reg.addr = (uintptr_t)(&fpr);
1153 fpr = vfp_get_fpsr(env);
1154 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
1155 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1156 if (ret) {
1157 return ret;
1158 }
1159
1160 reg.addr = (uintptr_t)(&fpr);
1161 fpr = vfp_get_fpcr(env);
1162 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
1163 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1164 if (ret) {
1165 return ret;
1166 }
1167
1168 write_cpustate_to_list(cpu, true);
1169
1170 if (!write_list_to_kvmstate(cpu, level)) {
1171 return -EINVAL;
1172 }
1173
1174
1175
1176
1177
1178
1179 ret = kvm_put_vcpu_events(cpu);
1180 if (ret) {
1181 return ret;
1182 }
1183
1184 kvm_arm_sync_mpstate_to_kvm(cpu);
1185
1186 return ret;
1187}
1188
1189static int kvm_arch_get_fpsimd(CPUState *cs)
1190{
1191 CPUARMState *env = &ARM_CPU(cs)->env;
1192 struct kvm_one_reg reg;
1193 int i, ret;
1194
1195 for (i = 0; i < 32; i++) {
1196 uint64_t *q = aa64_vfp_qreg(env, i);
1197 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
1198 reg.addr = (uintptr_t)q;
1199 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1200 if (ret) {
1201 return ret;
1202 } else {
1203#ifdef HOST_WORDS_BIGENDIAN
1204 uint64_t t;
1205 t = q[0], q[0] = q[1], q[1] = t;
1206#endif
1207 }
1208 }
1209
1210 return 0;
1211}
1212
1213
1214
1215
1216
1217
1218
1219static int kvm_arch_get_sve(CPUState *cs)
1220{
1221 ARMCPU *cpu = ARM_CPU(cs);
1222 CPUARMState *env = &cpu->env;
1223 struct kvm_one_reg reg;
1224 uint64_t *r;
1225 int n, ret;
1226
1227 for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
1228 r = &env->vfp.zregs[n].d[0];
1229 reg.addr = (uintptr_t)r;
1230 reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
1231 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1232 if (ret) {
1233 return ret;
1234 }
1235 sve_bswap64(r, r, cpu->sve_max_vq * 2);
1236 }
1237
1238 for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
1239 r = &env->vfp.pregs[n].p[0];
1240 reg.addr = (uintptr_t)r;
1241 reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
1242 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1243 if (ret) {
1244 return ret;
1245 }
1246 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1247 }
1248
1249 r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
1250 reg.addr = (uintptr_t)r;
1251 reg.id = KVM_REG_ARM64_SVE_FFR(0);
1252 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1253 if (ret) {
1254 return ret;
1255 }
1256 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1257
1258 return 0;
1259}
1260
1261int kvm_arch_get_registers(CPUState *cs)
1262{
1263 struct kvm_one_reg reg;
1264 uint64_t val;
1265 unsigned int el;
1266 uint32_t fpr;
1267 int i, ret;
1268
1269 ARMCPU *cpu = ARM_CPU(cs);
1270 CPUARMState *env = &cpu->env;
1271
1272 for (i = 0; i < 31; i++) {
1273 reg.id = AARCH64_CORE_REG(regs.regs[i]);
1274 reg.addr = (uintptr_t) &env->xregs[i];
1275 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1276 if (ret) {
1277 return ret;
1278 }
1279 }
1280
1281 reg.id = AARCH64_CORE_REG(regs.sp);
1282 reg.addr = (uintptr_t) &env->sp_el[0];
1283 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1284 if (ret) {
1285 return ret;
1286 }
1287
1288 reg.id = AARCH64_CORE_REG(sp_el1);
1289 reg.addr = (uintptr_t) &env->sp_el[1];
1290 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1291 if (ret) {
1292 return ret;
1293 }
1294
1295 reg.id = AARCH64_CORE_REG(regs.pstate);
1296 reg.addr = (uintptr_t) &val;
1297 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1298 if (ret) {
1299 return ret;
1300 }
1301
1302 env->aarch64 = ((val & PSTATE_nRW) == 0);
1303 if (is_a64(env)) {
1304 pstate_write(env, val);
1305 } else {
1306 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
1307 }
1308
1309
1310
1311
1312 aarch64_restore_sp(env, 1);
1313
1314 reg.id = AARCH64_CORE_REG(regs.pc);
1315 reg.addr = (uintptr_t) &env->pc;
1316 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1317 if (ret) {
1318 return ret;
1319 }
1320
1321
1322
1323
1324
1325
1326 if (!is_a64(env)) {
1327 aarch64_sync_64_to_32(env);
1328 }
1329
1330 reg.id = AARCH64_CORE_REG(elr_el1);
1331 reg.addr = (uintptr_t) &env->elr_el[1];
1332 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1333 if (ret) {
1334 return ret;
1335 }
1336
1337
1338
1339
1340
1341 for (i = 0; i < KVM_NR_SPSR; i++) {
1342 reg.id = AARCH64_CORE_REG(spsr[i]);
1343 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
1344 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1345 if (ret) {
1346 return ret;
1347 }
1348 }
1349
1350 el = arm_current_el(env);
1351 if (el > 0 && !is_a64(env)) {
1352 i = bank_number(env->uncached_cpsr & CPSR_M);
1353 env->spsr = env->banked_spsr[i];
1354 }
1355
1356 if (cpu_isar_feature(aa64_sve, cpu)) {
1357 ret = kvm_arch_get_sve(cs);
1358 } else {
1359 ret = kvm_arch_get_fpsimd(cs);
1360 }
1361 if (ret) {
1362 return ret;
1363 }
1364
1365 reg.addr = (uintptr_t)(&fpr);
1366 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
1367 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1368 if (ret) {
1369 return ret;
1370 }
1371 vfp_set_fpsr(env, fpr);
1372
1373 reg.addr = (uintptr_t)(&fpr);
1374 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
1375 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1376 if (ret) {
1377 return ret;
1378 }
1379 vfp_set_fpcr(env, fpr);
1380
1381 ret = kvm_get_vcpu_events(cpu);
1382 if (ret) {
1383 return ret;
1384 }
1385
1386 if (!write_kvmstate_to_list(cpu)) {
1387 return -EINVAL;
1388 }
1389
1390
1391
1392 write_list_to_cpustate(cpu);
1393
1394 kvm_arm_sync_mpstate_to_qemu(cpu);
1395
1396
1397 return ret;
1398}
1399
1400void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
1401{
1402 ram_addr_t ram_addr;
1403 hwaddr paddr;
1404 Object *obj = qdev_get_machine();
1405 VirtMachineState *vms = VIRT_MACHINE(obj);
1406 bool acpi_enabled = virt_is_acpi_enabled(vms);
1407
1408 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
1409
1410 if (acpi_enabled && addr &&
1411 object_property_get_bool(obj, "ras", NULL)) {
1412 ram_addr = qemu_ram_addr_from_host(addr);
1413 if (ram_addr != RAM_ADDR_INVALID &&
1414 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
1415 kvm_hwpoison_page_add(ram_addr);
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427 if (code == BUS_MCEERR_AR) {
1428 kvm_cpu_synchronize_state(c);
1429 if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) {
1430 kvm_inject_arm_sea(c);
1431 } else {
1432 error_report("failed to record the error");
1433 abort();
1434 }
1435 }
1436 return;
1437 }
1438 if (code == BUS_MCEERR_AO) {
1439 error_report("Hardware memory error at addr %p for memory used by "
1440 "QEMU itself instead of guest system!", addr);
1441 }
1442 }
1443
1444 if (code == BUS_MCEERR_AR) {
1445 error_report("Hardware memory error!");
1446 exit(1);
1447 }
1448}
1449
1450
1451static const uint32_t brk_insn = 0xd4200000;
1452
1453int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1454{
1455 if (have_guest_debug) {
1456 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
1457 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
1458 return -EINVAL;
1459 }
1460 return 0;
1461 } else {
1462 error_report("guest debug not supported on this kernel");
1463 return -EINVAL;
1464 }
1465}
1466
1467int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1468{
1469 static uint32_t brk;
1470
1471 if (have_guest_debug) {
1472 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
1473 brk != brk_insn ||
1474 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
1475 return -EINVAL;
1476 }
1477 return 0;
1478 } else {
1479 error_report("guest debug not supported on this kernel");
1480 return -EINVAL;
1481 }
1482}
1483
1484
1485
1486
1487
1488
1489
1490
1491bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
1492{
1493 int hsr_ec = syn_get_ec(debug_exit->hsr);
1494 ARMCPU *cpu = ARM_CPU(cs);
1495 CPUARMState *env = &cpu->env;
1496
1497
1498 kvm_cpu_synchronize_state(cs);
1499
1500 switch (hsr_ec) {
1501 case EC_SOFTWARESTEP:
1502 if (cs->singlestep_enabled) {
1503 return true;
1504 } else {
1505
1506
1507
1508
1509 error_report("%s: guest single-step while debugging unsupported"
1510 " (%"PRIx64", %"PRIx32")",
1511 __func__, env->pc, debug_exit->hsr);
1512 return false;
1513 }
1514 break;
1515 case EC_AA64_BKPT:
1516 if (kvm_find_sw_breakpoint(cs, env->pc)) {
1517 return true;
1518 }
1519 break;
1520 case EC_BREAKPOINT:
1521 if (find_hw_breakpoint(cs, env->pc)) {
1522 return true;
1523 }
1524 break;
1525 case EC_WATCHPOINT:
1526 {
1527 CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
1528 if (wp) {
1529 cs->watchpoint_hit = wp;
1530 return true;
1531 }
1532 break;
1533 }
1534 default:
1535 error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")",
1536 __func__, debug_exit->hsr, env->pc);
1537 }
1538
1539
1540
1541
1542
1543 cs->exception_index = EXCP_BKPT;
1544 env->exception.syndrome = debug_exit->hsr;
1545 env->exception.vaddress = debug_exit->far;
1546 env->exception.target_el = 1;
1547 qemu_mutex_lock_iothread();
1548 arm_cpu_do_interrupt(cs);
1549 qemu_mutex_unlock_iothread();
1550
1551 return false;
1552}
1553
1554#define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0)
1555#define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2)
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568#define ESR_DFSC(aarch64, lpae, v) \
1569 ((aarch64 || (lpae)) ? ((v) & 0x3F) \
1570 : (((v) >> 6) | ((v) & 0x1F)))
1571
1572#define ESR_DFSC_EXTABT(aarch64, lpae) \
1573 ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8)
1574
1575bool kvm_arm_verify_ext_dabt_pending(CPUState *cs)
1576{
1577 uint64_t dfsr_val;
1578
1579 if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) {
1580 ARMCPU *cpu = ARM_CPU(cs);
1581 CPUARMState *env = &cpu->env;
1582 int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64);
1583 int lpae = 0;
1584
1585 if (!aarch64_mode) {
1586 uint64_t ttbcr;
1587
1588 if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) {
1589 lpae = arm_feature(env, ARM_FEATURE_LPAE)
1590 && (ttbcr & TTBCR_EAE);
1591 }
1592 }
1593
1594
1595
1596
1597 return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) ==
1598 ESR_DFSC_EXTABT(aarch64_mode, lpae));
1599 }
1600 return false;
1601}
1602