1
2
3
4
5
6
7
8
9
10
11
12#include "qemu/osdep.h"
13#include <sys/ioctl.h>
14#include <sys/ptrace.h>
15
16#include <linux/elf.h>
17#include <linux/kvm.h>
18
19#include "qapi/error.h"
20#include "cpu.h"
21#include "qemu/timer.h"
22#include "qemu/error-report.h"
23#include "qemu/host-utils.h"
24#include "qemu/main-loop.h"
25#include "exec/gdbstub.h"
26#include "sysemu/runstate.h"
27#include "sysemu/kvm.h"
28#include "sysemu/kvm_int.h"
29#include "kvm_arm.h"
30#include "internals.h"
31#include "hw/acpi/acpi.h"
32#include "hw/acpi/ghes.h"
33#include "hw/arm/virt.h"
34
35static bool have_guest_debug;
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52typedef struct {
53 uint64_t bcr;
54 uint64_t bvr;
55} HWBreakpoint;
56
57
58
59
60
61
62typedef struct {
63 uint64_t wcr;
64 uint64_t wvr;
65 CPUWatchpoint details;
66} HWWatchpoint;
67
68
69int max_hw_bps, max_hw_wps;
70GArray *hw_breakpoints, *hw_watchpoints;
71
72#define cur_hw_wps (hw_watchpoints->len)
73#define cur_hw_bps (hw_breakpoints->len)
74#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
75#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
76
77
78
79
80
81
82
83
84
85static void kvm_arm_init_debug(CPUState *cs)
86{
87 have_guest_debug = kvm_check_extension(cs->kvm_state,
88 KVM_CAP_SET_GUEST_DEBUG);
89
90 max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS);
91 hw_watchpoints = g_array_sized_new(true, true,
92 sizeof(HWWatchpoint), max_hw_wps);
93
94 max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS);
95 hw_breakpoints = g_array_sized_new(true, true,
96 sizeof(HWBreakpoint), max_hw_bps);
97 return;
98}
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136static int insert_hw_breakpoint(target_ulong addr)
137{
138 HWBreakpoint brk = {
139 .bcr = 0x1,
140 .bvr = sextract64(addr, 0, 53)
141 };
142
143 if (cur_hw_bps >= max_hw_bps) {
144 return -ENOBUFS;
145 }
146
147 brk.bcr = deposit32(brk.bcr, 1, 2, 0x3);
148 brk.bcr = deposit32(brk.bcr, 5, 4, 0xf);
149
150 g_array_append_val(hw_breakpoints, brk);
151
152 return 0;
153}
154
155
156
157
158
159
160
161
162static int delete_hw_breakpoint(target_ulong pc)
163{
164 int i;
165 for (i = 0; i < hw_breakpoints->len; i++) {
166 HWBreakpoint *brk = get_hw_bp(i);
167 if (brk->bvr == pc) {
168 g_array_remove_index(hw_breakpoints, i);
169 return 0;
170 }
171 }
172 return -ENOENT;
173}
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207static int insert_hw_watchpoint(target_ulong addr,
208 target_ulong len, int type)
209{
210 HWWatchpoint wp = {
211 .wcr = R_DBGWCR_E_MASK,
212 .wvr = addr & (~0x7ULL),
213 .details = { .vaddr = addr, .len = len }
214 };
215
216 if (cur_hw_wps >= max_hw_wps) {
217 return -ENOBUFS;
218 }
219
220
221
222
223
224 wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, PAC, 3);
225
226 switch (type) {
227 case GDB_WATCHPOINT_READ:
228 wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, LSC, 1);
229 wp.details.flags = BP_MEM_READ;
230 break;
231 case GDB_WATCHPOINT_WRITE:
232 wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, LSC, 2);
233 wp.details.flags = BP_MEM_WRITE;
234 break;
235 case GDB_WATCHPOINT_ACCESS:
236 wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, LSC, 3);
237 wp.details.flags = BP_MEM_ACCESS;
238 break;
239 default:
240 g_assert_not_reached();
241 break;
242 }
243 if (len <= 8) {
244
245 int off = addr & 0x7;
246 int bas = (1 << len) - 1;
247
248 wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas);
249 } else {
250
251 if (is_power_of_2(len)) {
252 int bits = ctz64(len);
253
254 wp.wvr &= ~((1 << bits) - 1);
255 wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, MASK, bits);
256 wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, BAS, 0xff);
257 } else {
258 return -ENOBUFS;
259 }
260 }
261
262 g_array_append_val(hw_watchpoints, wp);
263 return 0;
264}
265
266
267static bool check_watchpoint_in_range(int i, target_ulong addr)
268{
269 HWWatchpoint *wp = get_hw_wp(i);
270 uint64_t addr_top, addr_bottom = wp->wvr;
271 int bas = extract32(wp->wcr, 5, 8);
272 int mask = extract32(wp->wcr, 24, 4);
273
274 if (mask) {
275 addr_top = addr_bottom + (1 << mask);
276 } else {
277
278
279 addr_bottom = addr_bottom + ctz32(bas);
280 addr_top = addr_bottom + clo32(bas);
281 }
282
283 if (addr >= addr_bottom && addr <= addr_top) {
284 return true;
285 }
286
287 return false;
288}
289
290
291
292
293
294
295
296
297static int delete_hw_watchpoint(target_ulong addr,
298 target_ulong len, int type)
299{
300 int i;
301 for (i = 0; i < cur_hw_wps; i++) {
302 if (check_watchpoint_in_range(i, addr)) {
303 g_array_remove_index(hw_watchpoints, i);
304 return 0;
305 }
306 }
307 return -ENOENT;
308}
309
310
311int kvm_arch_insert_hw_breakpoint(target_ulong addr,
312 target_ulong len, int type)
313{
314 switch (type) {
315 case GDB_BREAKPOINT_HW:
316 return insert_hw_breakpoint(addr);
317 break;
318 case GDB_WATCHPOINT_READ:
319 case GDB_WATCHPOINT_WRITE:
320 case GDB_WATCHPOINT_ACCESS:
321 return insert_hw_watchpoint(addr, len, type);
322 default:
323 return -ENOSYS;
324 }
325}
326
327int kvm_arch_remove_hw_breakpoint(target_ulong addr,
328 target_ulong len, int type)
329{
330 switch (type) {
331 case GDB_BREAKPOINT_HW:
332 return delete_hw_breakpoint(addr);
333 case GDB_WATCHPOINT_READ:
334 case GDB_WATCHPOINT_WRITE:
335 case GDB_WATCHPOINT_ACCESS:
336 return delete_hw_watchpoint(addr, len, type);
337 default:
338 return -ENOSYS;
339 }
340}
341
342
343void kvm_arch_remove_all_hw_breakpoints(void)
344{
345 if (cur_hw_wps > 0) {
346 g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
347 }
348 if (cur_hw_bps > 0) {
349 g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
350 }
351}
352
353void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
354{
355 int i;
356 memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
357
358 for (i = 0; i < max_hw_wps; i++) {
359 HWWatchpoint *wp = get_hw_wp(i);
360 ptr->dbg_wcr[i] = wp->wcr;
361 ptr->dbg_wvr[i] = wp->wvr;
362 }
363 for (i = 0; i < max_hw_bps; i++) {
364 HWBreakpoint *bp = get_hw_bp(i);
365 ptr->dbg_bcr[i] = bp->bcr;
366 ptr->dbg_bvr[i] = bp->bvr;
367 }
368}
369
370bool kvm_arm_hw_debug_active(CPUState *cs)
371{
372 return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
373}
374
375static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
376{
377 int i;
378
379 for (i = 0; i < cur_hw_bps; i++) {
380 HWBreakpoint *bp = get_hw_bp(i);
381 if (bp->bvr == pc) {
382 return true;
383 }
384 }
385 return false;
386}
387
388static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
389{
390 int i;
391
392 for (i = 0; i < cur_hw_wps; i++) {
393 if (check_watchpoint_in_range(i, addr)) {
394 return &get_hw_wp(i)->details;
395 }
396 }
397 return NULL;
398}
399
400static bool kvm_arm_set_device_attr(CPUState *cs, struct kvm_device_attr *attr,
401 const char *name)
402{
403 int err;
404
405 err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
406 if (err != 0) {
407 error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err));
408 return false;
409 }
410
411 err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
412 if (err != 0) {
413 error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err));
414 return false;
415 }
416
417 return true;
418}
419
420void kvm_arm_pmu_init(CPUState *cs)
421{
422 struct kvm_device_attr attr = {
423 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
424 .attr = KVM_ARM_VCPU_PMU_V3_INIT,
425 };
426
427 if (!ARM_CPU(cs)->has_pmu) {
428 return;
429 }
430 if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) {
431 error_report("failed to init PMU");
432 abort();
433 }
434}
435
436void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
437{
438 struct kvm_device_attr attr = {
439 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
440 .addr = (intptr_t)&irq,
441 .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
442 };
443
444 if (!ARM_CPU(cs)->has_pmu) {
445 return;
446 }
447 if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) {
448 error_report("failed to set irq for PMU");
449 abort();
450 }
451}
452
453void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa)
454{
455 struct kvm_device_attr attr = {
456 .group = KVM_ARM_VCPU_PVTIME_CTRL,
457 .attr = KVM_ARM_VCPU_PVTIME_IPA,
458 .addr = (uint64_t)&ipa,
459 };
460
461 if (ARM_CPU(cs)->kvm_steal_time == ON_OFF_AUTO_OFF) {
462 return;
463 }
464 if (!kvm_arm_set_device_attr(cs, &attr, "PVTIME IPA")) {
465 error_report("failed to init PVTIME IPA");
466 abort();
467 }
468}
469
470static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id)
471{
472 uint64_t ret;
473 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret };
474 int err;
475
476 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
477 err = ioctl(fd, KVM_GET_ONE_REG, &idreg);
478 if (err < 0) {
479 return -1;
480 }
481 *pret = ret;
482 return 0;
483}
484
485static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id)
486{
487 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret };
488
489 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
490 return ioctl(fd, KVM_GET_ONE_REG, &idreg);
491}
492
493static bool kvm_arm_pauth_supported(void)
494{
495 return (kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_ADDRESS) &&
496 kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC));
497}
498
499bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
500{
501
502
503
504
505
506 int fdarray[3];
507 bool sve_supported;
508 bool pmu_supported = false;
509 uint64_t features = 0;
510 int err;
511
512
513
514
515
516
517 static const uint32_t cpus_to_try[] = {
518 KVM_ARM_TARGET_AEM_V8,
519 KVM_ARM_TARGET_FOUNDATION_V8,
520 KVM_ARM_TARGET_CORTEX_A57,
521 QEMU_KVM_ARM_TARGET_NONE
522 };
523
524
525
526
527 struct kvm_vcpu_init init = { .target = -1, };
528
529
530
531
532
533 sve_supported = kvm_arm_sve_supported();
534 if (sve_supported) {
535 init.features[0] |= 1 << KVM_ARM_VCPU_SVE;
536 }
537
538
539
540
541
542 if (kvm_arm_pauth_supported()) {
543 init.features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS |
544 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC);
545 }
546
547 if (kvm_arm_pmu_supported()) {
548 init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
549 pmu_supported = true;
550 }
551
552 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
553 return false;
554 }
555
556 ahcf->target = init.target;
557 ahcf->dtb_compatible = "arm,arm-v8";
558
559 err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0,
560 ARM64_SYS_REG(3, 0, 0, 4, 0));
561 if (unlikely(err < 0)) {
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578 ahcf->isar.id_aa64pfr0 = 0x00000011;
579 err = 0;
580 } else {
581 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1,
582 ARM64_SYS_REG(3, 0, 0, 4, 1));
583 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0,
584 ARM64_SYS_REG(3, 0, 0, 4, 5));
585 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0,
586 ARM64_SYS_REG(3, 0, 0, 5, 0));
587 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1,
588 ARM64_SYS_REG(3, 0, 0, 5, 1));
589 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0,
590 ARM64_SYS_REG(3, 0, 0, 6, 0));
591 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
592 ARM64_SYS_REG(3, 0, 0, 6, 1));
593 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
594 ARM64_SYS_REG(3, 0, 0, 7, 0));
595 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
596 ARM64_SYS_REG(3, 0, 0, 7, 1));
597 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2,
598 ARM64_SYS_REG(3, 0, 0, 7, 2));
599
600
601
602
603
604
605
606
607 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0,
608 ARM64_SYS_REG(3, 0, 0, 1, 0));
609 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1,
610 ARM64_SYS_REG(3, 0, 0, 1, 1));
611 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2,
612 ARM64_SYS_REG(3, 0, 0, 3, 4));
613 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0,
614 ARM64_SYS_REG(3, 0, 0, 1, 2));
615 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0,
616 ARM64_SYS_REG(3, 0, 0, 1, 4));
617 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1,
618 ARM64_SYS_REG(3, 0, 0, 1, 5));
619 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2,
620 ARM64_SYS_REG(3, 0, 0, 1, 6));
621 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3,
622 ARM64_SYS_REG(3, 0, 0, 1, 7));
623 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
624 ARM64_SYS_REG(3, 0, 0, 2, 0));
625 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
626 ARM64_SYS_REG(3, 0, 0, 2, 1));
627 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
628 ARM64_SYS_REG(3, 0, 0, 2, 2));
629 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
630 ARM64_SYS_REG(3, 0, 0, 2, 3));
631 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
632 ARM64_SYS_REG(3, 0, 0, 2, 4));
633 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
634 ARM64_SYS_REG(3, 0, 0, 2, 5));
635 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4,
636 ARM64_SYS_REG(3, 0, 0, 2, 6));
637 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
638 ARM64_SYS_REG(3, 0, 0, 2, 7));
639
640 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
641 ARM64_SYS_REG(3, 0, 0, 3, 0));
642 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
643 ARM64_SYS_REG(3, 0, 0, 3, 1));
644 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
645 ARM64_SYS_REG(3, 0, 0, 3, 2));
646
647
648
649
650
651
652
653
654
655
656
657 if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) {
658 int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS);
659 int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS);
660 int ctx_cmps =
661 FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS);
662 int version = 6;
663 bool has_el3 =
664 !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3);
665 uint32_t dbgdidr = 0;
666
667 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps);
668 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps);
669 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps);
670 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version);
671 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3);
672 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3);
673 dbgdidr |= (1 << 15);
674 ahcf->isar.dbgdidr = dbgdidr;
675 }
676
677 if (pmu_supported) {
678
679 err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0,
680 ARM64_SYS_REG(3, 3, 9, 12, 0));
681 }
682
683 if (sve_supported) {
684
685
686
687
688
689
690
691 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
692 ARM64_SYS_REG(3, 0, 0, 4, 4));
693 }
694 }
695
696 kvm_arm_destroy_scratch_host_vcpu(fdarray);
697
698 if (err < 0) {
699 return false;
700 }
701
702
703
704
705
706
707 features |= 1ULL << ARM_FEATURE_V8;
708 features |= 1ULL << ARM_FEATURE_NEON;
709 features |= 1ULL << ARM_FEATURE_AARCH64;
710 features |= 1ULL << ARM_FEATURE_PMU;
711 features |= 1ULL << ARM_FEATURE_GENERIC_TIMER;
712
713 ahcf->features = features;
714
715 return true;
716}
717
718void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
719{
720 bool has_steal_time = kvm_arm_steal_time_supported();
721
722 if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) {
723 if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
724 cpu->kvm_steal_time = ON_OFF_AUTO_OFF;
725 } else {
726 cpu->kvm_steal_time = ON_OFF_AUTO_ON;
727 }
728 } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) {
729 if (!has_steal_time) {
730 error_setg(errp, "'kvm-steal-time' cannot be enabled "
731 "on this host");
732 return;
733 } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
734
735
736
737
738
739
740
741 error_setg(errp, "'kvm-steal-time' cannot be enabled "
742 "for AArch32 guests");
743 return;
744 }
745 }
746}
747
748bool kvm_arm_aarch32_supported(void)
749{
750 return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT);
751}
752
753bool kvm_arm_sve_supported(void)
754{
755 return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
756}
757
758bool kvm_arm_steal_time_supported(void)
759{
760 return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME);
761}
762
763QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
764
765uint32_t kvm_arm_sve_get_vls(CPUState *cs)
766{
767
768 static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS];
769 static bool probed;
770 uint32_t vq = 0;
771 int i;
772
773
774
775
776
777
778 if (!probed) {
779 struct kvm_vcpu_init init = {
780 .target = -1,
781 .features[0] = (1 << KVM_ARM_VCPU_SVE),
782 };
783 struct kvm_one_reg reg = {
784 .id = KVM_REG_ARM64_SVE_VLS,
785 .addr = (uint64_t)&vls[0],
786 };
787 int fdarray[3], ret;
788
789 probed = true;
790
791 if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) {
792 error_report("failed to create scratch VCPU with SVE enabled");
793 abort();
794 }
795 ret = ioctl(fdarray[2], KVM_GET_ONE_REG, ®);
796 kvm_arm_destroy_scratch_host_vcpu(fdarray);
797 if (ret) {
798 error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
799 strerror(errno));
800 abort();
801 }
802
803 for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) {
804 if (vls[i]) {
805 vq = 64 - clz64(vls[i]) + i * 64;
806 break;
807 }
808 }
809 if (vq > ARM_MAX_VQ) {
810 warn_report("KVM supports vector lengths larger than "
811 "QEMU can enable");
812 vls[0] &= MAKE_64BIT_MASK(0, ARM_MAX_VQ);
813 }
814 }
815
816 return vls[0];
817}
818
819static int kvm_arm_sve_set_vls(CPUState *cs)
820{
821 ARMCPU *cpu = ARM_CPU(cs);
822 uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map };
823 struct kvm_one_reg reg = {
824 .id = KVM_REG_ARM64_SVE_VLS,
825 .addr = (uint64_t)&vls[0],
826 };
827
828 assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX);
829
830 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
831}
832
833#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
834
835int kvm_arch_init_vcpu(CPUState *cs)
836{
837 int ret;
838 uint64_t mpidr;
839 ARMCPU *cpu = ARM_CPU(cs);
840 CPUARMState *env = &cpu->env;
841 uint64_t psciver;
842
843 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
844 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
845 error_report("KVM is not supported for this guest CPU type");
846 return -EINVAL;
847 }
848
849 qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cs);
850
851
852 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
853 if (cs->start_powered_off) {
854 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
855 }
856 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
857 cpu->psci_version = QEMU_PSCI_VERSION_0_2;
858 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
859 }
860 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
861 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
862 }
863 if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
864 cpu->has_pmu = false;
865 }
866 if (cpu->has_pmu) {
867 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
868 } else {
869 env->features &= ~(1ULL << ARM_FEATURE_PMU);
870 }
871 if (cpu_isar_feature(aa64_sve, cpu)) {
872 assert(kvm_arm_sve_supported());
873 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE;
874 }
875 if (cpu_isar_feature(aa64_pauth, cpu)) {
876 cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS |
877 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC);
878 }
879
880
881 ret = kvm_arm_vcpu_init(cs);
882 if (ret) {
883 return ret;
884 }
885
886 if (cpu_isar_feature(aa64_sve, cpu)) {
887 ret = kvm_arm_sve_set_vls(cs);
888 if (ret) {
889 return ret;
890 }
891 ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE);
892 if (ret) {
893 return ret;
894 }
895 }
896
897
898
899
900
901
902
903
904 if (!kvm_get_one_reg(cs, KVM_REG_ARM_PSCI_VERSION, &psciver)) {
905 cpu->psci_version = psciver;
906 }
907
908
909
910
911
912
913 ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
914 if (ret) {
915 return ret;
916 }
917 cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
918
919 kvm_arm_init_debug(cs);
920
921
922 kvm_arm_init_serror_injection(cs);
923
924 return kvm_arm_init_cpreg_list(cpu);
925}
926
927int kvm_arch_destroy_vcpu(CPUState *cs)
928{
929 return 0;
930}
931
932bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
933{
934
935
936
937
938 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
939 case KVM_REG_ARM_CORE:
940 case KVM_REG_ARM64_SVE:
941 return false;
942 default:
943 return true;
944 }
945}
946
947typedef struct CPRegStateLevel {
948 uint64_t regidx;
949 int level;
950} CPRegStateLevel;
951
952
953
954
955
956
957static const CPRegStateLevel non_runtime_cpregs[] = {
958 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
959};
960
961int kvm_arm_cpreg_level(uint64_t regidx)
962{
963 int i;
964
965 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
966 const CPRegStateLevel *l = &non_runtime_cpregs[i];
967 if (l->regidx == regidx) {
968 return l->level;
969 }
970 }
971
972 return KVM_PUT_RUNTIME_STATE;
973}
974
975
976static void kvm_inject_arm_sea(CPUState *c)
977{
978 ARMCPU *cpu = ARM_CPU(c);
979 CPUARMState *env = &cpu->env;
980 uint32_t esr;
981 bool same_el;
982
983 c->exception_index = EXCP_DATA_ABORT;
984 env->exception.target_el = 1;
985
986
987
988
989
990 same_el = arm_current_el(env) == env->exception.target_el;
991 esr = syn_data_abort_no_iss(same_el, 1, 0, 0, 0, 0, 0x10);
992
993 env->exception.syndrome = esr;
994
995 arm_cpu_do_interrupt(c);
996}
997
998#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
999 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
1000
1001#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
1002 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
1003
1004#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
1005 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
1006
1007static int kvm_arch_put_fpsimd(CPUState *cs)
1008{
1009 CPUARMState *env = &ARM_CPU(cs)->env;
1010 struct kvm_one_reg reg;
1011 int i, ret;
1012
1013 for (i = 0; i < 32; i++) {
1014 uint64_t *q = aa64_vfp_qreg(env, i);
1015#if HOST_BIG_ENDIAN
1016 uint64_t fp_val[2] = { q[1], q[0] };
1017 reg.addr = (uintptr_t)fp_val;
1018#else
1019 reg.addr = (uintptr_t)q;
1020#endif
1021 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
1022 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1023 if (ret) {
1024 return ret;
1025 }
1026 }
1027
1028 return 0;
1029}
1030
1031
1032
1033
1034
1035
1036
1037static int kvm_arch_put_sve(CPUState *cs)
1038{
1039 ARMCPU *cpu = ARM_CPU(cs);
1040 CPUARMState *env = &cpu->env;
1041 uint64_t tmp[ARM_MAX_VQ * 2];
1042 uint64_t *r;
1043 struct kvm_one_reg reg;
1044 int n, ret;
1045
1046 for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
1047 r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2);
1048 reg.addr = (uintptr_t)r;
1049 reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
1050 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1051 if (ret) {
1052 return ret;
1053 }
1054 }
1055
1056 for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
1057 r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0],
1058 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1059 reg.addr = (uintptr_t)r;
1060 reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
1061 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1062 if (ret) {
1063 return ret;
1064 }
1065 }
1066
1067 r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
1068 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1069 reg.addr = (uintptr_t)r;
1070 reg.id = KVM_REG_ARM64_SVE_FFR(0);
1071 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1072 if (ret) {
1073 return ret;
1074 }
1075
1076 return 0;
1077}
1078
1079int kvm_arch_put_registers(CPUState *cs, int level)
1080{
1081 struct kvm_one_reg reg;
1082 uint64_t val;
1083 uint32_t fpr;
1084 int i, ret;
1085 unsigned int el;
1086
1087 ARMCPU *cpu = ARM_CPU(cs);
1088 CPUARMState *env = &cpu->env;
1089
1090
1091
1092
1093 if (!is_a64(env)) {
1094 aarch64_sync_32_to_64(env);
1095 }
1096
1097 for (i = 0; i < 31; i++) {
1098 reg.id = AARCH64_CORE_REG(regs.regs[i]);
1099 reg.addr = (uintptr_t) &env->xregs[i];
1100 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1101 if (ret) {
1102 return ret;
1103 }
1104 }
1105
1106
1107
1108
1109 aarch64_save_sp(env, 1);
1110
1111 reg.id = AARCH64_CORE_REG(regs.sp);
1112 reg.addr = (uintptr_t) &env->sp_el[0];
1113 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1114 if (ret) {
1115 return ret;
1116 }
1117
1118 reg.id = AARCH64_CORE_REG(sp_el1);
1119 reg.addr = (uintptr_t) &env->sp_el[1];
1120 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1121 if (ret) {
1122 return ret;
1123 }
1124
1125
1126 if (is_a64(env)) {
1127 val = pstate_read(env);
1128 } else {
1129 val = cpsr_read(env);
1130 }
1131 reg.id = AARCH64_CORE_REG(regs.pstate);
1132 reg.addr = (uintptr_t) &val;
1133 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1134 if (ret) {
1135 return ret;
1136 }
1137
1138 reg.id = AARCH64_CORE_REG(regs.pc);
1139 reg.addr = (uintptr_t) &env->pc;
1140 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1141 if (ret) {
1142 return ret;
1143 }
1144
1145 reg.id = AARCH64_CORE_REG(elr_el1);
1146 reg.addr = (uintptr_t) &env->elr_el[1];
1147 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1148 if (ret) {
1149 return ret;
1150 }
1151
1152
1153
1154
1155
1156
1157
1158 el = arm_current_el(env);
1159 if (el > 0 && !is_a64(env)) {
1160 i = bank_number(env->uncached_cpsr & CPSR_M);
1161 env->banked_spsr[i] = env->spsr;
1162 }
1163
1164
1165 for (i = 0; i < KVM_NR_SPSR; i++) {
1166 reg.id = AARCH64_CORE_REG(spsr[i]);
1167 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
1168 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1169 if (ret) {
1170 return ret;
1171 }
1172 }
1173
1174 if (cpu_isar_feature(aa64_sve, cpu)) {
1175 ret = kvm_arch_put_sve(cs);
1176 } else {
1177 ret = kvm_arch_put_fpsimd(cs);
1178 }
1179 if (ret) {
1180 return ret;
1181 }
1182
1183 reg.addr = (uintptr_t)(&fpr);
1184 fpr = vfp_get_fpsr(env);
1185 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
1186 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1187 if (ret) {
1188 return ret;
1189 }
1190
1191 reg.addr = (uintptr_t)(&fpr);
1192 fpr = vfp_get_fpcr(env);
1193 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
1194 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1195 if (ret) {
1196 return ret;
1197 }
1198
1199 write_cpustate_to_list(cpu, true);
1200
1201 if (!write_list_to_kvmstate(cpu, level)) {
1202 return -EINVAL;
1203 }
1204
1205
1206
1207
1208
1209
1210 ret = kvm_put_vcpu_events(cpu);
1211 if (ret) {
1212 return ret;
1213 }
1214
1215 kvm_arm_sync_mpstate_to_kvm(cpu);
1216
1217 return ret;
1218}
1219
1220static int kvm_arch_get_fpsimd(CPUState *cs)
1221{
1222 CPUARMState *env = &ARM_CPU(cs)->env;
1223 struct kvm_one_reg reg;
1224 int i, ret;
1225
1226 for (i = 0; i < 32; i++) {
1227 uint64_t *q = aa64_vfp_qreg(env, i);
1228 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
1229 reg.addr = (uintptr_t)q;
1230 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1231 if (ret) {
1232 return ret;
1233 } else {
1234#if HOST_BIG_ENDIAN
1235 uint64_t t;
1236 t = q[0], q[0] = q[1], q[1] = t;
1237#endif
1238 }
1239 }
1240
1241 return 0;
1242}
1243
1244
1245
1246
1247
1248
1249
1250static int kvm_arch_get_sve(CPUState *cs)
1251{
1252 ARMCPU *cpu = ARM_CPU(cs);
1253 CPUARMState *env = &cpu->env;
1254 struct kvm_one_reg reg;
1255 uint64_t *r;
1256 int n, ret;
1257
1258 for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
1259 r = &env->vfp.zregs[n].d[0];
1260 reg.addr = (uintptr_t)r;
1261 reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
1262 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1263 if (ret) {
1264 return ret;
1265 }
1266 sve_bswap64(r, r, cpu->sve_max_vq * 2);
1267 }
1268
1269 for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
1270 r = &env->vfp.pregs[n].p[0];
1271 reg.addr = (uintptr_t)r;
1272 reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
1273 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1274 if (ret) {
1275 return ret;
1276 }
1277 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1278 }
1279
1280 r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
1281 reg.addr = (uintptr_t)r;
1282 reg.id = KVM_REG_ARM64_SVE_FFR(0);
1283 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1284 if (ret) {
1285 return ret;
1286 }
1287 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1288
1289 return 0;
1290}
1291
1292int kvm_arch_get_registers(CPUState *cs)
1293{
1294 struct kvm_one_reg reg;
1295 uint64_t val;
1296 unsigned int el;
1297 uint32_t fpr;
1298 int i, ret;
1299
1300 ARMCPU *cpu = ARM_CPU(cs);
1301 CPUARMState *env = &cpu->env;
1302
1303 for (i = 0; i < 31; i++) {
1304 reg.id = AARCH64_CORE_REG(regs.regs[i]);
1305 reg.addr = (uintptr_t) &env->xregs[i];
1306 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1307 if (ret) {
1308 return ret;
1309 }
1310 }
1311
1312 reg.id = AARCH64_CORE_REG(regs.sp);
1313 reg.addr = (uintptr_t) &env->sp_el[0];
1314 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1315 if (ret) {
1316 return ret;
1317 }
1318
1319 reg.id = AARCH64_CORE_REG(sp_el1);
1320 reg.addr = (uintptr_t) &env->sp_el[1];
1321 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1322 if (ret) {
1323 return ret;
1324 }
1325
1326 reg.id = AARCH64_CORE_REG(regs.pstate);
1327 reg.addr = (uintptr_t) &val;
1328 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1329 if (ret) {
1330 return ret;
1331 }
1332
1333 env->aarch64 = ((val & PSTATE_nRW) == 0);
1334 if (is_a64(env)) {
1335 pstate_write(env, val);
1336 } else {
1337 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
1338 }
1339
1340
1341
1342
1343 aarch64_restore_sp(env, 1);
1344
1345 reg.id = AARCH64_CORE_REG(regs.pc);
1346 reg.addr = (uintptr_t) &env->pc;
1347 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1348 if (ret) {
1349 return ret;
1350 }
1351
1352
1353
1354
1355
1356
1357 if (!is_a64(env)) {
1358 aarch64_sync_64_to_32(env);
1359 }
1360
1361 reg.id = AARCH64_CORE_REG(elr_el1);
1362 reg.addr = (uintptr_t) &env->elr_el[1];
1363 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1364 if (ret) {
1365 return ret;
1366 }
1367
1368
1369
1370
1371
1372 for (i = 0; i < KVM_NR_SPSR; i++) {
1373 reg.id = AARCH64_CORE_REG(spsr[i]);
1374 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
1375 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1376 if (ret) {
1377 return ret;
1378 }
1379 }
1380
1381 el = arm_current_el(env);
1382 if (el > 0 && !is_a64(env)) {
1383 i = bank_number(env->uncached_cpsr & CPSR_M);
1384 env->spsr = env->banked_spsr[i];
1385 }
1386
1387 if (cpu_isar_feature(aa64_sve, cpu)) {
1388 ret = kvm_arch_get_sve(cs);
1389 } else {
1390 ret = kvm_arch_get_fpsimd(cs);
1391 }
1392 if (ret) {
1393 return ret;
1394 }
1395
1396 reg.addr = (uintptr_t)(&fpr);
1397 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
1398 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1399 if (ret) {
1400 return ret;
1401 }
1402 vfp_set_fpsr(env, fpr);
1403
1404 reg.addr = (uintptr_t)(&fpr);
1405 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
1406 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
1407 if (ret) {
1408 return ret;
1409 }
1410 vfp_set_fpcr(env, fpr);
1411
1412 ret = kvm_get_vcpu_events(cpu);
1413 if (ret) {
1414 return ret;
1415 }
1416
1417 if (!write_kvmstate_to_list(cpu)) {
1418 return -EINVAL;
1419 }
1420
1421
1422
1423 write_list_to_cpustate(cpu);
1424
1425 kvm_arm_sync_mpstate_to_qemu(cpu);
1426
1427
1428 return ret;
1429}
1430
1431void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
1432{
1433 ram_addr_t ram_addr;
1434 hwaddr paddr;
1435
1436 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
1437
1438 if (acpi_ghes_present() && addr) {
1439 ram_addr = qemu_ram_addr_from_host(addr);
1440 if (ram_addr != RAM_ADDR_INVALID &&
1441 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
1442 kvm_hwpoison_page_add(ram_addr);
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454 if (code == BUS_MCEERR_AR) {
1455 kvm_cpu_synchronize_state(c);
1456 if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) {
1457 kvm_inject_arm_sea(c);
1458 } else {
1459 error_report("failed to record the error");
1460 abort();
1461 }
1462 }
1463 return;
1464 }
1465 if (code == BUS_MCEERR_AO) {
1466 error_report("Hardware memory error at addr %p for memory used by "
1467 "QEMU itself instead of guest system!", addr);
1468 }
1469 }
1470
1471 if (code == BUS_MCEERR_AR) {
1472 error_report("Hardware memory error!");
1473 exit(1);
1474 }
1475}
1476
1477
1478static const uint32_t brk_insn = 0xd4200000;
1479
1480int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1481{
1482 if (have_guest_debug) {
1483 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
1484 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
1485 return -EINVAL;
1486 }
1487 return 0;
1488 } else {
1489 error_report("guest debug not supported on this kernel");
1490 return -EINVAL;
1491 }
1492}
1493
1494int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1495{
1496 static uint32_t brk;
1497
1498 if (have_guest_debug) {
1499 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
1500 brk != brk_insn ||
1501 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
1502 return -EINVAL;
1503 }
1504 return 0;
1505 } else {
1506 error_report("guest debug not supported on this kernel");
1507 return -EINVAL;
1508 }
1509}
1510
1511
1512
1513
1514
1515
1516
1517
1518bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
1519{
1520 int hsr_ec = syn_get_ec(debug_exit->hsr);
1521 ARMCPU *cpu = ARM_CPU(cs);
1522 CPUARMState *env = &cpu->env;
1523
1524
1525 kvm_cpu_synchronize_state(cs);
1526
1527 switch (hsr_ec) {
1528 case EC_SOFTWARESTEP:
1529 if (cs->singlestep_enabled) {
1530 return true;
1531 } else {
1532
1533
1534
1535
1536 error_report("%s: guest single-step while debugging unsupported"
1537 " (%"PRIx64", %"PRIx32")",
1538 __func__, env->pc, debug_exit->hsr);
1539 return false;
1540 }
1541 break;
1542 case EC_AA64_BKPT:
1543 if (kvm_find_sw_breakpoint(cs, env->pc)) {
1544 return true;
1545 }
1546 break;
1547 case EC_BREAKPOINT:
1548 if (find_hw_breakpoint(cs, env->pc)) {
1549 return true;
1550 }
1551 break;
1552 case EC_WATCHPOINT:
1553 {
1554 CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
1555 if (wp) {
1556 cs->watchpoint_hit = wp;
1557 return true;
1558 }
1559 break;
1560 }
1561 default:
1562 error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")",
1563 __func__, debug_exit->hsr, env->pc);
1564 }
1565
1566
1567
1568
1569
1570 cs->exception_index = EXCP_BKPT;
1571 env->exception.syndrome = debug_exit->hsr;
1572 env->exception.vaddress = debug_exit->far;
1573 env->exception.target_el = 1;
1574 qemu_mutex_lock_iothread();
1575 arm_cpu_do_interrupt(cs);
1576 qemu_mutex_unlock_iothread();
1577
1578 return false;
1579}
1580
1581#define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0)
1582#define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2)
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595#define ESR_DFSC(aarch64, lpae, v) \
1596 ((aarch64 || (lpae)) ? ((v) & 0x3F) \
1597 : (((v) >> 6) | ((v) & 0x1F)))
1598
1599#define ESR_DFSC_EXTABT(aarch64, lpae) \
1600 ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8)
1601
1602bool kvm_arm_verify_ext_dabt_pending(CPUState *cs)
1603{
1604 uint64_t dfsr_val;
1605
1606 if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) {
1607 ARMCPU *cpu = ARM_CPU(cs);
1608 CPUARMState *env = &cpu->env;
1609 int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64);
1610 int lpae = 0;
1611
1612 if (!aarch64_mode) {
1613 uint64_t ttbcr;
1614
1615 if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) {
1616 lpae = arm_feature(env, ARM_FEATURE_LPAE)
1617 && (ttbcr & TTBCR_EAE);
1618 }
1619 }
1620
1621
1622
1623
1624 return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) ==
1625 ESR_DFSC_EXTABT(aarch64_mode, lpae));
1626 }
1627 return false;
1628}
1629