1
2
3
4
5
6
7
8
9
10
11
12#include "qemu/osdep.h"
13#include <sys/ioctl.h>
14#include <sys/ptrace.h>
15
16#include <linux/elf.h>
17#include <linux/kvm.h>
18
19#include "qemu-common.h"
20#include "cpu.h"
21#include "qemu/timer.h"
22#include "qemu/error-report.h"
23#include "qemu/host-utils.h"
24#include "exec/gdbstub.h"
25#include "sysemu/sysemu.h"
26#include "sysemu/kvm.h"
27#include "kvm_arm.h"
28#include "internals.h"
29#include "hw/arm/arm.h"
30
31static bool have_guest_debug;
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48typedef struct {
49 uint64_t bcr;
50 uint64_t bvr;
51} HWBreakpoint;
52
53
54
55
56
57
58typedef struct {
59 uint64_t wcr;
60 uint64_t wvr;
61 CPUWatchpoint details;
62} HWWatchpoint;
63
64
65int max_hw_bps, max_hw_wps;
66GArray *hw_breakpoints, *hw_watchpoints;
67
68#define cur_hw_wps (hw_watchpoints->len)
69#define cur_hw_bps (hw_breakpoints->len)
70#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
71#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
72
73
74
75
76
77
78
79
80
81static void kvm_arm_init_debug(CPUState *cs)
82{
83 have_guest_debug = kvm_check_extension(cs->kvm_state,
84 KVM_CAP_SET_GUEST_DEBUG);
85
86 max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS);
87 hw_watchpoints = g_array_sized_new(true, true,
88 sizeof(HWWatchpoint), max_hw_wps);
89
90 max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS);
91 hw_breakpoints = g_array_sized_new(true, true,
92 sizeof(HWBreakpoint), max_hw_bps);
93 return;
94}
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119static int insert_hw_breakpoint(target_ulong addr)
120{
121 HWBreakpoint brk = {
122 .bcr = 0x1,
123 .bvr = addr
124 };
125
126 if (cur_hw_bps >= max_hw_bps) {
127 return -ENOBUFS;
128 }
129
130 brk.bcr = deposit32(brk.bcr, 1, 2, 0x3);
131 brk.bcr = deposit32(brk.bcr, 5, 4, 0xf);
132
133 g_array_append_val(hw_breakpoints, brk);
134
135 return 0;
136}
137
138
139
140
141
142
143
144
145static int delete_hw_breakpoint(target_ulong pc)
146{
147 int i;
148 for (i = 0; i < hw_breakpoints->len; i++) {
149 HWBreakpoint *brk = get_hw_bp(i);
150 if (brk->bvr == pc) {
151 g_array_remove_index(hw_breakpoints, i);
152 return 0;
153 }
154 }
155 return -ENOENT;
156}
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190static int insert_hw_watchpoint(target_ulong addr,
191 target_ulong len, int type)
192{
193 HWWatchpoint wp = {
194 .wcr = 1,
195 .wvr = addr & (~0x7ULL),
196 .details = { .vaddr = addr, .len = len }
197 };
198
199 if (cur_hw_wps >= max_hw_wps) {
200 return -ENOBUFS;
201 }
202
203
204
205
206
207 wp.wcr = deposit32(wp.wcr, 1, 2, 3);
208
209 switch (type) {
210 case GDB_WATCHPOINT_READ:
211 wp.wcr = deposit32(wp.wcr, 3, 2, 1);
212 wp.details.flags = BP_MEM_READ;
213 break;
214 case GDB_WATCHPOINT_WRITE:
215 wp.wcr = deposit32(wp.wcr, 3, 2, 2);
216 wp.details.flags = BP_MEM_WRITE;
217 break;
218 case GDB_WATCHPOINT_ACCESS:
219 wp.wcr = deposit32(wp.wcr, 3, 2, 3);
220 wp.details.flags = BP_MEM_ACCESS;
221 break;
222 default:
223 g_assert_not_reached();
224 break;
225 }
226 if (len <= 8) {
227
228 int off = addr & 0x7;
229 int bas = (1 << len) - 1;
230
231 wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas);
232 } else {
233
234 if (is_power_of_2(len)) {
235 int bits = ctz64(len);
236
237 wp.wvr &= ~((1 << bits) - 1);
238 wp.wcr = deposit32(wp.wcr, 24, 4, bits);
239 wp.wcr = deposit32(wp.wcr, 5, 8, 0xff);
240 } else {
241 return -ENOBUFS;
242 }
243 }
244
245 g_array_append_val(hw_watchpoints, wp);
246 return 0;
247}
248
249
250static bool check_watchpoint_in_range(int i, target_ulong addr)
251{
252 HWWatchpoint *wp = get_hw_wp(i);
253 uint64_t addr_top, addr_bottom = wp->wvr;
254 int bas = extract32(wp->wcr, 5, 8);
255 int mask = extract32(wp->wcr, 24, 4);
256
257 if (mask) {
258 addr_top = addr_bottom + (1 << mask);
259 } else {
260
261
262 addr_bottom = addr_bottom + ctz32(bas);
263 addr_top = addr_bottom + clo32(bas);
264 }
265
266 if (addr >= addr_bottom && addr <= addr_top) {
267 return true;
268 }
269
270 return false;
271}
272
273
274
275
276
277
278
279
280static int delete_hw_watchpoint(target_ulong addr,
281 target_ulong len, int type)
282{
283 int i;
284 for (i = 0; i < cur_hw_wps; i++) {
285 if (check_watchpoint_in_range(i, addr)) {
286 g_array_remove_index(hw_watchpoints, i);
287 return 0;
288 }
289 }
290 return -ENOENT;
291}
292
293
294int kvm_arch_insert_hw_breakpoint(target_ulong addr,
295 target_ulong len, int type)
296{
297 switch (type) {
298 case GDB_BREAKPOINT_HW:
299 return insert_hw_breakpoint(addr);
300 break;
301 case GDB_WATCHPOINT_READ:
302 case GDB_WATCHPOINT_WRITE:
303 case GDB_WATCHPOINT_ACCESS:
304 return insert_hw_watchpoint(addr, len, type);
305 default:
306 return -ENOSYS;
307 }
308}
309
310int kvm_arch_remove_hw_breakpoint(target_ulong addr,
311 target_ulong len, int type)
312{
313 switch (type) {
314 case GDB_BREAKPOINT_HW:
315 return delete_hw_breakpoint(addr);
316 break;
317 case GDB_WATCHPOINT_READ:
318 case GDB_WATCHPOINT_WRITE:
319 case GDB_WATCHPOINT_ACCESS:
320 return delete_hw_watchpoint(addr, len, type);
321 default:
322 return -ENOSYS;
323 }
324}
325
326
327void kvm_arch_remove_all_hw_breakpoints(void)
328{
329 if (cur_hw_wps > 0) {
330 g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
331 }
332 if (cur_hw_bps > 0) {
333 g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
334 }
335}
336
337void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
338{
339 int i;
340 memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
341
342 for (i = 0; i < max_hw_wps; i++) {
343 HWWatchpoint *wp = get_hw_wp(i);
344 ptr->dbg_wcr[i] = wp->wcr;
345 ptr->dbg_wvr[i] = wp->wvr;
346 }
347 for (i = 0; i < max_hw_bps; i++) {
348 HWBreakpoint *bp = get_hw_bp(i);
349 ptr->dbg_bcr[i] = bp->bcr;
350 ptr->dbg_bvr[i] = bp->bvr;
351 }
352}
353
354bool kvm_arm_hw_debug_active(CPUState *cs)
355{
356 return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
357}
358
359static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
360{
361 int i;
362
363 for (i = 0; i < cur_hw_bps; i++) {
364 HWBreakpoint *bp = get_hw_bp(i);
365 if (bp->bvr == pc) {
366 return true;
367 }
368 }
369 return false;
370}
371
372static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
373{
374 int i;
375
376 for (i = 0; i < cur_hw_wps; i++) {
377 if (check_watchpoint_in_range(i, addr)) {
378 return &get_hw_wp(i)->details;
379 }
380 }
381 return NULL;
382}
383
384static bool kvm_arm_pmu_set_attr(CPUState *cs, struct kvm_device_attr *attr)
385{
386 int err;
387
388 err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
389 if (err != 0) {
390 error_report("PMU: KVM_HAS_DEVICE_ATTR: %s", strerror(-err));
391 return false;
392 }
393
394 err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
395 if (err != 0) {
396 error_report("PMU: KVM_SET_DEVICE_ATTR: %s", strerror(-err));
397 return false;
398 }
399
400 return true;
401}
402
403void kvm_arm_pmu_init(CPUState *cs)
404{
405 struct kvm_device_attr attr = {
406 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
407 .attr = KVM_ARM_VCPU_PMU_V3_INIT,
408 };
409
410 if (!ARM_CPU(cs)->has_pmu) {
411 return;
412 }
413 if (!kvm_arm_pmu_set_attr(cs, &attr)) {
414 error_report("failed to init PMU");
415 abort();
416 }
417}
418
419void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
420{
421 struct kvm_device_attr attr = {
422 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
423 .addr = (intptr_t)&irq,
424 .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
425 };
426
427 if (!ARM_CPU(cs)->has_pmu) {
428 return;
429 }
430 if (!kvm_arm_pmu_set_attr(cs, &attr)) {
431 error_report("failed to set irq for PMU");
432 abort();
433 }
434}
435
436static inline void set_feature(uint64_t *features, int feature)
437{
438 *features |= 1ULL << feature;
439}
440
441static inline void unset_feature(uint64_t *features, int feature)
442{
443 *features &= ~(1ULL << feature);
444}
445
446bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
447{
448
449
450
451
452
453
454
455 int fdarray[3];
456 uint64_t features = 0;
457
458
459
460
461
462 static const uint32_t cpus_to_try[] = {
463 KVM_ARM_TARGET_AEM_V8,
464 KVM_ARM_TARGET_FOUNDATION_V8,
465 KVM_ARM_TARGET_CORTEX_A57,
466 QEMU_KVM_ARM_TARGET_NONE
467 };
468 struct kvm_vcpu_init init;
469
470 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
471 return false;
472 }
473
474 ahcc->target = init.target;
475 ahcc->dtb_compatible = "arm,arm-v8";
476
477 kvm_arm_destroy_scratch_host_vcpu(fdarray);
478
479
480
481
482
483 set_feature(&features, ARM_FEATURE_V8);
484 set_feature(&features, ARM_FEATURE_VFP4);
485 set_feature(&features, ARM_FEATURE_NEON);
486 set_feature(&features, ARM_FEATURE_AARCH64);
487 set_feature(&features, ARM_FEATURE_PMU);
488
489 ahcc->features = features;
490
491 return true;
492}
493
494#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
495
496int kvm_arch_init_vcpu(CPUState *cs)
497{
498 int ret;
499 uint64_t mpidr;
500 ARMCPU *cpu = ARM_CPU(cs);
501 CPUARMState *env = &cpu->env;
502
503 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
504 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
505 fprintf(stderr, "KVM is not supported for this guest CPU type\n");
506 return -EINVAL;
507 }
508
509
510 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
511 if (cpu->start_powered_off) {
512 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
513 }
514 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
515 cpu->psci_version = 2;
516 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
517 }
518 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
519 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
520 }
521 if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
522 cpu->has_pmu = false;
523 }
524 if (cpu->has_pmu) {
525 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
526 } else {
527 unset_feature(&env->features, ARM_FEATURE_PMU);
528 }
529
530
531 ret = kvm_arm_vcpu_init(cs);
532 if (ret) {
533 return ret;
534 }
535
536
537
538
539
540
541 ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
542 if (ret) {
543 return ret;
544 }
545 cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
546
547 kvm_arm_init_debug(cs);
548
549 return kvm_arm_init_cpreg_list(cpu);
550}
551
552bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
553{
554
555
556
557
558 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
559 case KVM_REG_ARM_CORE:
560 return false;
561 default:
562 return true;
563 }
564}
565
566typedef struct CPRegStateLevel {
567 uint64_t regidx;
568 int level;
569} CPRegStateLevel;
570
571
572
573
574
575
576static const CPRegStateLevel non_runtime_cpregs[] = {
577 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
578};
579
580int kvm_arm_cpreg_level(uint64_t regidx)
581{
582 int i;
583
584 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
585 const CPRegStateLevel *l = &non_runtime_cpregs[i];
586 if (l->regidx == regidx) {
587 return l->level;
588 }
589 }
590
591 return KVM_PUT_RUNTIME_STATE;
592}
593
594#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
595 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
596
597#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
598 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
599
600#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
601 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
602
603int kvm_arch_put_registers(CPUState *cs, int level)
604{
605 struct kvm_one_reg reg;
606 uint32_t fpr;
607 uint64_t val;
608 int i;
609 int ret;
610 unsigned int el;
611
612 ARMCPU *cpu = ARM_CPU(cs);
613 CPUARMState *env = &cpu->env;
614
615
616
617
618 if (!is_a64(env)) {
619 aarch64_sync_32_to_64(env);
620 }
621
622 for (i = 0; i < 31; i++) {
623 reg.id = AARCH64_CORE_REG(regs.regs[i]);
624 reg.addr = (uintptr_t) &env->xregs[i];
625 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
626 if (ret) {
627 return ret;
628 }
629 }
630
631
632
633
634 aarch64_save_sp(env, 1);
635
636 reg.id = AARCH64_CORE_REG(regs.sp);
637 reg.addr = (uintptr_t) &env->sp_el[0];
638 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
639 if (ret) {
640 return ret;
641 }
642
643 reg.id = AARCH64_CORE_REG(sp_el1);
644 reg.addr = (uintptr_t) &env->sp_el[1];
645 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
646 if (ret) {
647 return ret;
648 }
649
650
651 if (is_a64(env)) {
652 val = pstate_read(env);
653 } else {
654 val = cpsr_read(env);
655 }
656 reg.id = AARCH64_CORE_REG(regs.pstate);
657 reg.addr = (uintptr_t) &val;
658 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
659 if (ret) {
660 return ret;
661 }
662
663 reg.id = AARCH64_CORE_REG(regs.pc);
664 reg.addr = (uintptr_t) &env->pc;
665 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
666 if (ret) {
667 return ret;
668 }
669
670 reg.id = AARCH64_CORE_REG(elr_el1);
671 reg.addr = (uintptr_t) &env->elr_el[1];
672 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
673 if (ret) {
674 return ret;
675 }
676
677
678
679
680
681
682
683 el = arm_current_el(env);
684 if (el > 0 && !is_a64(env)) {
685 i = bank_number(env->uncached_cpsr & CPSR_M);
686 env->banked_spsr[i] = env->spsr;
687 }
688
689
690 for (i = 0; i < KVM_NR_SPSR; i++) {
691 reg.id = AARCH64_CORE_REG(spsr[i]);
692 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
693 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
694 if (ret) {
695 return ret;
696 }
697 }
698
699
700
701
702 for (i = 0; i < 32; i++) {
703 int rd = i << 1;
704 uint64_t fp_val[2];
705#ifdef HOST_WORDS_BIGENDIAN
706 fp_val[0] = env->vfp.regs[rd + 1];
707 fp_val[1] = env->vfp.regs[rd];
708#else
709 fp_val[1] = env->vfp.regs[rd + 1];
710 fp_val[0] = env->vfp.regs[rd];
711#endif
712 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
713 reg.addr = (uintptr_t)(&fp_val);
714 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
715 if (ret) {
716 return ret;
717 }
718 }
719
720 reg.addr = (uintptr_t)(&fpr);
721 fpr = vfp_get_fpsr(env);
722 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
723 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
724 if (ret) {
725 return ret;
726 }
727
728 fpr = vfp_get_fpcr(env);
729 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
730 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
731 if (ret) {
732 return ret;
733 }
734
735 if (!write_list_to_kvmstate(cpu, level)) {
736 return EINVAL;
737 }
738
739 kvm_arm_sync_mpstate_to_kvm(cpu);
740
741 return ret;
742}
743
744int kvm_arch_get_registers(CPUState *cs)
745{
746 struct kvm_one_reg reg;
747 uint64_t val;
748 uint32_t fpr;
749 unsigned int el;
750 int i;
751 int ret;
752
753 ARMCPU *cpu = ARM_CPU(cs);
754 CPUARMState *env = &cpu->env;
755
756 for (i = 0; i < 31; i++) {
757 reg.id = AARCH64_CORE_REG(regs.regs[i]);
758 reg.addr = (uintptr_t) &env->xregs[i];
759 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
760 if (ret) {
761 return ret;
762 }
763 }
764
765 reg.id = AARCH64_CORE_REG(regs.sp);
766 reg.addr = (uintptr_t) &env->sp_el[0];
767 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
768 if (ret) {
769 return ret;
770 }
771
772 reg.id = AARCH64_CORE_REG(sp_el1);
773 reg.addr = (uintptr_t) &env->sp_el[1];
774 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
775 if (ret) {
776 return ret;
777 }
778
779 reg.id = AARCH64_CORE_REG(regs.pstate);
780 reg.addr = (uintptr_t) &val;
781 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
782 if (ret) {
783 return ret;
784 }
785
786 env->aarch64 = ((val & PSTATE_nRW) == 0);
787 if (is_a64(env)) {
788 pstate_write(env, val);
789 } else {
790 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
791 }
792
793
794
795
796 aarch64_restore_sp(env, 1);
797
798 reg.id = AARCH64_CORE_REG(regs.pc);
799 reg.addr = (uintptr_t) &env->pc;
800 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
801 if (ret) {
802 return ret;
803 }
804
805
806
807
808
809
810 if (!is_a64(env)) {
811 aarch64_sync_64_to_32(env);
812 }
813
814 reg.id = AARCH64_CORE_REG(elr_el1);
815 reg.addr = (uintptr_t) &env->elr_el[1];
816 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
817 if (ret) {
818 return ret;
819 }
820
821
822
823
824
825 for (i = 0; i < KVM_NR_SPSR; i++) {
826 reg.id = AARCH64_CORE_REG(spsr[i]);
827 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
828 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
829 if (ret) {
830 return ret;
831 }
832 }
833
834 el = arm_current_el(env);
835 if (el > 0 && !is_a64(env)) {
836 i = bank_number(env->uncached_cpsr & CPSR_M);
837 env->spsr = env->banked_spsr[i];
838 }
839
840
841
842
843 for (i = 0; i < 32; i++) {
844 uint64_t fp_val[2];
845 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
846 reg.addr = (uintptr_t)(&fp_val);
847 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
848 if (ret) {
849 return ret;
850 } else {
851 int rd = i << 1;
852#ifdef HOST_WORDS_BIGENDIAN
853 env->vfp.regs[rd + 1] = fp_val[0];
854 env->vfp.regs[rd] = fp_val[1];
855#else
856 env->vfp.regs[rd + 1] = fp_val[1];
857 env->vfp.regs[rd] = fp_val[0];
858#endif
859 }
860 }
861
862 reg.addr = (uintptr_t)(&fpr);
863 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
864 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
865 if (ret) {
866 return ret;
867 }
868 vfp_set_fpsr(env, fpr);
869
870 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
871 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
872 if (ret) {
873 return ret;
874 }
875 vfp_set_fpcr(env, fpr);
876
877 if (!write_kvmstate_to_list(cpu)) {
878 return EINVAL;
879 }
880
881
882
883 write_list_to_cpustate(cpu);
884
885 kvm_arm_sync_mpstate_to_qemu(cpu);
886
887
888 return ret;
889}
890
891
892static const uint32_t brk_insn = 0xd4200000;
893
894int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
895{
896 if (have_guest_debug) {
897 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
898 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
899 return -EINVAL;
900 }
901 return 0;
902 } else {
903 error_report("guest debug not supported on this kernel");
904 return -EINVAL;
905 }
906}
907
908int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
909{
910 static uint32_t brk;
911
912 if (have_guest_debug) {
913 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
914 brk != brk_insn ||
915 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
916 return -EINVAL;
917 }
918 return 0;
919 } else {
920 error_report("guest debug not supported on this kernel");
921 return -EINVAL;
922 }
923}
924
925
926
927
928
929
930
931
932bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
933{
934 int hsr_ec = debug_exit->hsr >> ARM_EL_EC_SHIFT;
935 ARMCPU *cpu = ARM_CPU(cs);
936 CPUClass *cc = CPU_GET_CLASS(cs);
937 CPUARMState *env = &cpu->env;
938
939
940 kvm_cpu_synchronize_state(cs);
941
942 switch (hsr_ec) {
943 case EC_SOFTWARESTEP:
944 if (cs->singlestep_enabled) {
945 return true;
946 } else {
947
948
949
950
951 error_report("%s: guest single-step while debugging unsupported"
952 " (%"PRIx64", %"PRIx32")",
953 __func__, env->pc, debug_exit->hsr);
954 return false;
955 }
956 break;
957 case EC_AA64_BKPT:
958 if (kvm_find_sw_breakpoint(cs, env->pc)) {
959 return true;
960 }
961 break;
962 case EC_BREAKPOINT:
963 if (find_hw_breakpoint(cs, env->pc)) {
964 return true;
965 }
966 break;
967 case EC_WATCHPOINT:
968 {
969 CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
970 if (wp) {
971 cs->watchpoint_hit = wp;
972 return true;
973 }
974 break;
975 }
976 default:
977 error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")",
978 __func__, debug_exit->hsr, env->pc);
979 }
980
981
982
983
984
985 cs->exception_index = EXCP_BKPT;
986 env->exception.syndrome = debug_exit->hsr;
987 env->exception.vaddress = debug_exit->far;
988 cc->do_interrupt(cs);
989
990 return false;
991}
992