1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/bsearch.h>
24#include <linux/kvm_host.h>
25#include <linux/mm.h>
26#include <linux/uaccess.h>
27
28#include <asm/cacheflush.h>
29#include <asm/cputype.h>
30#include <asm/debug-monitors.h>
31#include <asm/esr.h>
32#include <asm/kvm_arm.h>
33#include <asm/kvm_asm.h>
34#include <asm/kvm_coproc.h>
35#include <asm/kvm_emulate.h>
36#include <asm/kvm_host.h>
37#include <asm/kvm_mmu.h>
38#include <asm/perf_event.h>
39
40#include <trace/events/kvm.h>
41
42#include "sys_regs.h"
43
44#include "trace.h"
45
46
47
48
49
50
51
52
53
54
55
56
57
58static u32 cache_levels;
59
60
61#define CSSELR_MAX 12
62
63
64static u32 get_ccsidr(u32 csselr)
65{
66 u32 ccsidr;
67
68
69 local_irq_disable();
70
71 asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
72 isb();
73
74 asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
75 local_irq_enable();
76
77 return ccsidr;
78}
79
80
81
82
83static bool access_dcsw(struct kvm_vcpu *vcpu,
84 struct sys_reg_params *p,
85 const struct sys_reg_desc *r)
86{
87 if (!p->is_write)
88 return read_from_write_only(vcpu, p);
89
90 kvm_set_way_flush(vcpu);
91 return true;
92}
93
94
95
96
97
98
99static bool access_vm_reg(struct kvm_vcpu *vcpu,
100 struct sys_reg_params *p,
101 const struct sys_reg_desc *r)
102{
103 bool was_enabled = vcpu_has_cache_enabled(vcpu);
104
105 BUG_ON(!p->is_write);
106
107 if (!p->is_aarch32) {
108 vcpu_sys_reg(vcpu, r->reg) = p->regval;
109 } else {
110 if (!p->is_32bit)
111 vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
112 vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
113 }
114
115 kvm_toggle_cache(vcpu, was_enabled);
116 return true;
117}
118
119
120
121
122
123
124
125static bool access_gic_sgi(struct kvm_vcpu *vcpu,
126 struct sys_reg_params *p,
127 const struct sys_reg_desc *r)
128{
129 if (!p->is_write)
130 return read_from_write_only(vcpu, p);
131
132 vgic_v3_dispatch_sgi(vcpu, p->regval);
133
134 return true;
135}
136
137static bool trap_raz_wi(struct kvm_vcpu *vcpu,
138 struct sys_reg_params *p,
139 const struct sys_reg_desc *r)
140{
141 if (p->is_write)
142 return ignore_write(vcpu, p);
143 else
144 return read_zero(vcpu, p);
145}
146
147static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
148 struct sys_reg_params *p,
149 const struct sys_reg_desc *r)
150{
151 if (p->is_write) {
152 return ignore_write(vcpu, p);
153 } else {
154 p->regval = (1 << 3);
155 return true;
156 }
157}
158
159static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
160 struct sys_reg_params *p,
161 const struct sys_reg_desc *r)
162{
163 if (p->is_write) {
164 return ignore_write(vcpu, p);
165 } else {
166 u32 val;
167 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
168 p->regval = val;
169 return true;
170 }
171}
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200static bool trap_debug_regs(struct kvm_vcpu *vcpu,
201 struct sys_reg_params *p,
202 const struct sys_reg_desc *r)
203{
204 if (p->is_write) {
205 vcpu_sys_reg(vcpu, r->reg) = p->regval;
206 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
207 } else {
208 p->regval = vcpu_sys_reg(vcpu, r->reg);
209 }
210
211 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
212
213 return true;
214}
215
216
217
218
219
220
221
222
223
224
225static void reg_to_dbg(struct kvm_vcpu *vcpu,
226 struct sys_reg_params *p,
227 u64 *dbg_reg)
228{
229 u64 val = p->regval;
230
231 if (p->is_32bit) {
232 val &= 0xffffffffUL;
233 val |= ((*dbg_reg >> 32) << 32);
234 }
235
236 *dbg_reg = val;
237 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
238}
239
240static void dbg_to_reg(struct kvm_vcpu *vcpu,
241 struct sys_reg_params *p,
242 u64 *dbg_reg)
243{
244 p->regval = *dbg_reg;
245 if (p->is_32bit)
246 p->regval &= 0xffffffffUL;
247}
248
249static bool trap_bvr(struct kvm_vcpu *vcpu,
250 struct sys_reg_params *p,
251 const struct sys_reg_desc *rd)
252{
253 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
254
255 if (p->is_write)
256 reg_to_dbg(vcpu, p, dbg_reg);
257 else
258 dbg_to_reg(vcpu, p, dbg_reg);
259
260 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
261
262 return true;
263}
264
265static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
266 const struct kvm_one_reg *reg, void __user *uaddr)
267{
268 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
269
270 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
271 return -EFAULT;
272 return 0;
273}
274
275static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
276 const struct kvm_one_reg *reg, void __user *uaddr)
277{
278 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
279
280 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
281 return -EFAULT;
282 return 0;
283}
284
285static void reset_bvr(struct kvm_vcpu *vcpu,
286 const struct sys_reg_desc *rd)
287{
288 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
289}
290
291static bool trap_bcr(struct kvm_vcpu *vcpu,
292 struct sys_reg_params *p,
293 const struct sys_reg_desc *rd)
294{
295 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
296
297 if (p->is_write)
298 reg_to_dbg(vcpu, p, dbg_reg);
299 else
300 dbg_to_reg(vcpu, p, dbg_reg);
301
302 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
303
304 return true;
305}
306
307static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
308 const struct kvm_one_reg *reg, void __user *uaddr)
309{
310 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
311
312 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
313 return -EFAULT;
314
315 return 0;
316}
317
318static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
319 const struct kvm_one_reg *reg, void __user *uaddr)
320{
321 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
322
323 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
324 return -EFAULT;
325 return 0;
326}
327
328static void reset_bcr(struct kvm_vcpu *vcpu,
329 const struct sys_reg_desc *rd)
330{
331 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
332}
333
334static bool trap_wvr(struct kvm_vcpu *vcpu,
335 struct sys_reg_params *p,
336 const struct sys_reg_desc *rd)
337{
338 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
339
340 if (p->is_write)
341 reg_to_dbg(vcpu, p, dbg_reg);
342 else
343 dbg_to_reg(vcpu, p, dbg_reg);
344
345 trace_trap_reg(__func__, rd->reg, p->is_write,
346 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
347
348 return true;
349}
350
351static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
352 const struct kvm_one_reg *reg, void __user *uaddr)
353{
354 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
355
356 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
357 return -EFAULT;
358 return 0;
359}
360
361static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
362 const struct kvm_one_reg *reg, void __user *uaddr)
363{
364 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
365
366 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
367 return -EFAULT;
368 return 0;
369}
370
371static void reset_wvr(struct kvm_vcpu *vcpu,
372 const struct sys_reg_desc *rd)
373{
374 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
375}
376
377static bool trap_wcr(struct kvm_vcpu *vcpu,
378 struct sys_reg_params *p,
379 const struct sys_reg_desc *rd)
380{
381 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
382
383 if (p->is_write)
384 reg_to_dbg(vcpu, p, dbg_reg);
385 else
386 dbg_to_reg(vcpu, p, dbg_reg);
387
388 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
389
390 return true;
391}
392
393static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
394 const struct kvm_one_reg *reg, void __user *uaddr)
395{
396 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
397
398 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
399 return -EFAULT;
400 return 0;
401}
402
403static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
404 const struct kvm_one_reg *reg, void __user *uaddr)
405{
406 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
407
408 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
409 return -EFAULT;
410 return 0;
411}
412
413static void reset_wcr(struct kvm_vcpu *vcpu,
414 const struct sys_reg_desc *rd)
415{
416 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
417}
418
419static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
420{
421 u64 amair;
422
423 asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
424 vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
425}
426
427static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
428{
429 u64 mpidr;
430
431
432
433
434
435
436
437
438 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
439 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
440 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
441 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
442}
443
444static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
445{
446 u64 pmcr, val;
447
448 asm volatile("mrs %0, pmcr_el0\n" : "=r" (pmcr));
449
450
451
452 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
453 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
454 vcpu_sys_reg(vcpu, PMCR_EL0) = val;
455}
456
457static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
458{
459 u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
460
461 return !((reg & ARMV8_PMU_USERENR_EN) || vcpu_mode_priv(vcpu));
462}
463
464static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
465{
466 u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
467
468 return !((reg & (ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN))
469 || vcpu_mode_priv(vcpu));
470}
471
472static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
473{
474 u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
475
476 return !((reg & (ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN))
477 || vcpu_mode_priv(vcpu));
478}
479
480static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
481{
482 u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
483
484 return !((reg & (ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN))
485 || vcpu_mode_priv(vcpu));
486}
487
488static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
489 const struct sys_reg_desc *r)
490{
491 u64 val;
492
493 if (!kvm_arm_pmu_v3_ready(vcpu))
494 return trap_raz_wi(vcpu, p, r);
495
496 if (pmu_access_el0_disabled(vcpu))
497 return false;
498
499 if (p->is_write) {
500
501 val = vcpu_sys_reg(vcpu, PMCR_EL0);
502 val &= ~ARMV8_PMU_PMCR_MASK;
503 val |= p->regval & ARMV8_PMU_PMCR_MASK;
504 vcpu_sys_reg(vcpu, PMCR_EL0) = val;
505 kvm_pmu_handle_pmcr(vcpu, val);
506 } else {
507
508 val = vcpu_sys_reg(vcpu, PMCR_EL0)
509 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
510 p->regval = val;
511 }
512
513 return true;
514}
515
516static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
517 const struct sys_reg_desc *r)
518{
519 if (!kvm_arm_pmu_v3_ready(vcpu))
520 return trap_raz_wi(vcpu, p, r);
521
522 if (pmu_access_event_counter_el0_disabled(vcpu))
523 return false;
524
525 if (p->is_write)
526 vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
527 else
528
529 p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0)
530 & ARMV8_PMU_COUNTER_MASK;
531
532 return true;
533}
534
535static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
536 const struct sys_reg_desc *r)
537{
538 u64 pmceid;
539
540 if (!kvm_arm_pmu_v3_ready(vcpu))
541 return trap_raz_wi(vcpu, p, r);
542
543 BUG_ON(p->is_write);
544
545 if (pmu_access_el0_disabled(vcpu))
546 return false;
547
548 if (!(p->Op2 & 1))
549 asm volatile("mrs %0, pmceid0_el0\n" : "=r" (pmceid));
550 else
551 asm volatile("mrs %0, pmceid1_el0\n" : "=r" (pmceid));
552
553 p->regval = pmceid;
554
555 return true;
556}
557
558static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
559{
560 u64 pmcr, val;
561
562 pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
563 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
564 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX)
565 return false;
566
567 return true;
568}
569
570static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
571 struct sys_reg_params *p,
572 const struct sys_reg_desc *r)
573{
574 u64 idx;
575
576 if (!kvm_arm_pmu_v3_ready(vcpu))
577 return trap_raz_wi(vcpu, p, r);
578
579 if (r->CRn == 9 && r->CRm == 13) {
580 if (r->Op2 == 2) {
581
582 if (pmu_access_event_counter_el0_disabled(vcpu))
583 return false;
584
585 idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
586 & ARMV8_PMU_COUNTER_MASK;
587 } else if (r->Op2 == 0) {
588
589 if (pmu_access_cycle_counter_el0_disabled(vcpu))
590 return false;
591
592 idx = ARMV8_PMU_CYCLE_IDX;
593 } else {
594 BUG();
595 }
596 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
597
598 if (pmu_access_event_counter_el0_disabled(vcpu))
599 return false;
600
601 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
602 } else {
603 BUG();
604 }
605
606 if (!pmu_counter_idx_valid(vcpu, idx))
607 return false;
608
609 if (p->is_write) {
610 if (pmu_access_el0_disabled(vcpu))
611 return false;
612
613 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
614 } else {
615 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
616 }
617
618 return true;
619}
620
621static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
622 const struct sys_reg_desc *r)
623{
624 u64 idx, reg;
625
626 if (!kvm_arm_pmu_v3_ready(vcpu))
627 return trap_raz_wi(vcpu, p, r);
628
629 if (pmu_access_el0_disabled(vcpu))
630 return false;
631
632 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
633
634 idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
635 reg = PMEVTYPER0_EL0 + idx;
636 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
637 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
638 if (idx == ARMV8_PMU_CYCLE_IDX)
639 reg = PMCCFILTR_EL0;
640 else
641
642 reg = PMEVTYPER0_EL0 + idx;
643 } else {
644 BUG();
645 }
646
647 if (!pmu_counter_idx_valid(vcpu, idx))
648 return false;
649
650 if (p->is_write) {
651 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
652 vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
653 } else {
654 p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
655 }
656
657 return true;
658}
659
660static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
661 const struct sys_reg_desc *r)
662{
663 u64 val, mask;
664
665 if (!kvm_arm_pmu_v3_ready(vcpu))
666 return trap_raz_wi(vcpu, p, r);
667
668 if (pmu_access_el0_disabled(vcpu))
669 return false;
670
671 mask = kvm_pmu_valid_counter_mask(vcpu);
672 if (p->is_write) {
673 val = p->regval & mask;
674 if (r->Op2 & 0x1) {
675
676 vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
677 kvm_pmu_enable_counter(vcpu, val);
678 } else {
679
680 vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
681 kvm_pmu_disable_counter(vcpu, val);
682 }
683 } else {
684 p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
685 }
686
687 return true;
688}
689
690static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
691 const struct sys_reg_desc *r)
692{
693 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
694
695 if (!kvm_arm_pmu_v3_ready(vcpu))
696 return trap_raz_wi(vcpu, p, r);
697
698 if (!vcpu_mode_priv(vcpu))
699 return false;
700
701 if (p->is_write) {
702 u64 val = p->regval & mask;
703
704 if (r->Op2 & 0x1)
705
706 vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
707 else
708
709 vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
710 } else {
711 p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
712 }
713
714 return true;
715}
716
717static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
718 const struct sys_reg_desc *r)
719{
720 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
721
722 if (!kvm_arm_pmu_v3_ready(vcpu))
723 return trap_raz_wi(vcpu, p, r);
724
725 if (pmu_access_el0_disabled(vcpu))
726 return false;
727
728 if (p->is_write) {
729 if (r->CRm & 0x2)
730
731 kvm_pmu_overflow_set(vcpu, p->regval & mask);
732 else
733
734 vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
735 } else {
736 p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
737 }
738
739 return true;
740}
741
742static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
743 const struct sys_reg_desc *r)
744{
745 u64 mask;
746
747 if (!kvm_arm_pmu_v3_ready(vcpu))
748 return trap_raz_wi(vcpu, p, r);
749
750 if (pmu_write_swinc_el0_disabled(vcpu))
751 return false;
752
753 if (p->is_write) {
754 mask = kvm_pmu_valid_counter_mask(vcpu);
755 kvm_pmu_software_increment(vcpu, p->regval & mask);
756 return true;
757 }
758
759 return false;
760}
761
762static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
763 const struct sys_reg_desc *r)
764{
765 if (!kvm_arm_pmu_v3_ready(vcpu))
766 return trap_raz_wi(vcpu, p, r);
767
768 if (p->is_write) {
769 if (!vcpu_mode_priv(vcpu))
770 return false;
771
772 vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
773 & ARMV8_PMU_USERENR_MASK;
774 } else {
775 p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0)
776 & ARMV8_PMU_USERENR_MASK;
777 }
778
779 return true;
780}
781
782
783#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
784 \
785 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
786 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
787 \
788 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
789 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
790 \
791 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
792 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
793 \
794 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
795 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
796
797
798#define PMU_PMEVCNTR_EL0(n) \
799 \
800 { Op0(0b11), Op1(0b011), CRn(0b1110), \
801 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
802 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
803
804
805#define PMU_PMEVTYPER_EL0(n) \
806 \
807 { Op0(0b11), Op1(0b011), CRn(0b1110), \
808 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
809 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830static const struct sys_reg_desc sys_reg_descs[] = {
831
832 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
833 access_dcsw },
834
835 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
836 access_dcsw },
837
838 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
839 access_dcsw },
840
841 DBG_BCR_BVR_WCR_WVR_EL1(0),
842 DBG_BCR_BVR_WCR_WVR_EL1(1),
843
844 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
845 trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
846
847 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
848 trap_debug_regs, reset_val, MDSCR_EL1, 0 },
849 DBG_BCR_BVR_WCR_WVR_EL1(2),
850 DBG_BCR_BVR_WCR_WVR_EL1(3),
851 DBG_BCR_BVR_WCR_WVR_EL1(4),
852 DBG_BCR_BVR_WCR_WVR_EL1(5),
853 DBG_BCR_BVR_WCR_WVR_EL1(6),
854 DBG_BCR_BVR_WCR_WVR_EL1(7),
855 DBG_BCR_BVR_WCR_WVR_EL1(8),
856 DBG_BCR_BVR_WCR_WVR_EL1(9),
857 DBG_BCR_BVR_WCR_WVR_EL1(10),
858 DBG_BCR_BVR_WCR_WVR_EL1(11),
859 DBG_BCR_BVR_WCR_WVR_EL1(12),
860 DBG_BCR_BVR_WCR_WVR_EL1(13),
861 DBG_BCR_BVR_WCR_WVR_EL1(14),
862 DBG_BCR_BVR_WCR_WVR_EL1(15),
863
864
865 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
866 trap_raz_wi },
867
868 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
869 trap_raz_wi },
870
871 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
872 trap_oslsr_el1 },
873
874 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
875 trap_raz_wi },
876
877 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
878 trap_raz_wi },
879
880 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
881 trap_raz_wi },
882
883 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
884 trap_raz_wi },
885
886 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
887 trap_dbgauthstatus_el1 },
888
889
890 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
891 trap_raz_wi },
892
893 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
894 trap_raz_wi },
895
896 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
897 trap_raz_wi },
898
899
900 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
901 NULL, reset_val, DBGVCR32_EL2, 0 },
902
903
904 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
905 NULL, reset_mpidr, MPIDR_EL1 },
906
907 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
908 access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
909
910 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
911 NULL, reset_val, CPACR_EL1, 0 },
912
913 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
914 access_vm_reg, reset_unknown, TTBR0_EL1 },
915
916 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
917 access_vm_reg, reset_unknown, TTBR1_EL1 },
918
919 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
920 access_vm_reg, reset_val, TCR_EL1, 0 },
921
922
923 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
924 access_vm_reg, reset_unknown, AFSR0_EL1 },
925
926 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
927 access_vm_reg, reset_unknown, AFSR1_EL1 },
928
929 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
930 access_vm_reg, reset_unknown, ESR_EL1 },
931
932 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
933 access_vm_reg, reset_unknown, FAR_EL1 },
934
935 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
936 NULL, reset_unknown, PAR_EL1 },
937
938
939 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
940 access_pminten, reset_unknown, PMINTENSET_EL1 },
941
942 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
943 access_pminten, NULL, PMINTENSET_EL1 },
944
945
946 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
947 access_vm_reg, reset_unknown, MAIR_EL1 },
948
949 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
950 access_vm_reg, reset_amair_el1, AMAIR_EL1 },
951
952
953 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
954 NULL, reset_val, VBAR_EL1, 0 },
955
956
957 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
958 access_gic_sgi },
959
960 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
961 trap_raz_wi },
962
963
964 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
965 access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
966
967 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
968 NULL, reset_unknown, TPIDR_EL1 },
969
970
971 { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
972 NULL, reset_val, CNTKCTL_EL1, 0},
973
974
975 { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
976 NULL, reset_unknown, CSSELR_EL1 },
977
978
979 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
980 access_pmcr, reset_pmcr, },
981
982 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
983 access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
984
985 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
986 access_pmcnten, NULL, PMCNTENSET_EL0 },
987
988 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
989 access_pmovs, NULL, PMOVSSET_EL0 },
990
991 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
992 access_pmswinc, reset_unknown, PMSWINC_EL0 },
993
994 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
995 access_pmselr, reset_unknown, PMSELR_EL0 },
996
997 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
998 access_pmceid },
999
1000 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
1001 access_pmceid },
1002
1003 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
1004 access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
1005
1006 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
1007 access_pmu_evtyper },
1008
1009 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
1010 access_pmu_evcntr },
1011
1012
1013
1014
1015 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
1016 access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1017
1018 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
1019 access_pmovs, reset_unknown, PMOVSSET_EL0 },
1020
1021
1022 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
1023 NULL, reset_unknown, TPIDR_EL0 },
1024
1025 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
1026 NULL, reset_unknown, TPIDRRO_EL0 },
1027
1028
1029 PMU_PMEVCNTR_EL0(0),
1030 PMU_PMEVCNTR_EL0(1),
1031 PMU_PMEVCNTR_EL0(2),
1032 PMU_PMEVCNTR_EL0(3),
1033 PMU_PMEVCNTR_EL0(4),
1034 PMU_PMEVCNTR_EL0(5),
1035 PMU_PMEVCNTR_EL0(6),
1036 PMU_PMEVCNTR_EL0(7),
1037 PMU_PMEVCNTR_EL0(8),
1038 PMU_PMEVCNTR_EL0(9),
1039 PMU_PMEVCNTR_EL0(10),
1040 PMU_PMEVCNTR_EL0(11),
1041 PMU_PMEVCNTR_EL0(12),
1042 PMU_PMEVCNTR_EL0(13),
1043 PMU_PMEVCNTR_EL0(14),
1044 PMU_PMEVCNTR_EL0(15),
1045 PMU_PMEVCNTR_EL0(16),
1046 PMU_PMEVCNTR_EL0(17),
1047 PMU_PMEVCNTR_EL0(18),
1048 PMU_PMEVCNTR_EL0(19),
1049 PMU_PMEVCNTR_EL0(20),
1050 PMU_PMEVCNTR_EL0(21),
1051 PMU_PMEVCNTR_EL0(22),
1052 PMU_PMEVCNTR_EL0(23),
1053 PMU_PMEVCNTR_EL0(24),
1054 PMU_PMEVCNTR_EL0(25),
1055 PMU_PMEVCNTR_EL0(26),
1056 PMU_PMEVCNTR_EL0(27),
1057 PMU_PMEVCNTR_EL0(28),
1058 PMU_PMEVCNTR_EL0(29),
1059 PMU_PMEVCNTR_EL0(30),
1060
1061 PMU_PMEVTYPER_EL0(0),
1062 PMU_PMEVTYPER_EL0(1),
1063 PMU_PMEVTYPER_EL0(2),
1064 PMU_PMEVTYPER_EL0(3),
1065 PMU_PMEVTYPER_EL0(4),
1066 PMU_PMEVTYPER_EL0(5),
1067 PMU_PMEVTYPER_EL0(6),
1068 PMU_PMEVTYPER_EL0(7),
1069 PMU_PMEVTYPER_EL0(8),
1070 PMU_PMEVTYPER_EL0(9),
1071 PMU_PMEVTYPER_EL0(10),
1072 PMU_PMEVTYPER_EL0(11),
1073 PMU_PMEVTYPER_EL0(12),
1074 PMU_PMEVTYPER_EL0(13),
1075 PMU_PMEVTYPER_EL0(14),
1076 PMU_PMEVTYPER_EL0(15),
1077 PMU_PMEVTYPER_EL0(16),
1078 PMU_PMEVTYPER_EL0(17),
1079 PMU_PMEVTYPER_EL0(18),
1080 PMU_PMEVTYPER_EL0(19),
1081 PMU_PMEVTYPER_EL0(20),
1082 PMU_PMEVTYPER_EL0(21),
1083 PMU_PMEVTYPER_EL0(22),
1084 PMU_PMEVTYPER_EL0(23),
1085 PMU_PMEVTYPER_EL0(24),
1086 PMU_PMEVTYPER_EL0(25),
1087 PMU_PMEVTYPER_EL0(26),
1088 PMU_PMEVTYPER_EL0(27),
1089 PMU_PMEVTYPER_EL0(28),
1090 PMU_PMEVTYPER_EL0(29),
1091 PMU_PMEVTYPER_EL0(30),
1092
1093
1094
1095
1096 { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111),
1097 access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1098
1099
1100 { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
1101 NULL, reset_unknown, DACR32_EL2 },
1102
1103 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
1104 NULL, reset_unknown, IFSR32_EL2 },
1105
1106 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
1107 NULL, reset_val, FPEXC32_EL2, 0x70 },
1108};
1109
1110static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1111 struct sys_reg_params *p,
1112 const struct sys_reg_desc *r)
1113{
1114 if (p->is_write) {
1115 return ignore_write(vcpu, p);
1116 } else {
1117 u64 dfr = read_system_reg(SYS_ID_AA64DFR0_EL1);
1118 u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
1119 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1120
1121 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1122 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1123 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1124 | (6 << 16) | (el3 << 14) | (el3 << 12));
1125 return true;
1126 }
1127}
1128
1129static bool trap_debug32(struct kvm_vcpu *vcpu,
1130 struct sys_reg_params *p,
1131 const struct sys_reg_desc *r)
1132{
1133 if (p->is_write) {
1134 vcpu_cp14(vcpu, r->reg) = p->regval;
1135 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
1136 } else {
1137 p->regval = vcpu_cp14(vcpu, r->reg);
1138 }
1139
1140 return true;
1141}
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154static bool trap_xvr(struct kvm_vcpu *vcpu,
1155 struct sys_reg_params *p,
1156 const struct sys_reg_desc *rd)
1157{
1158 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1159
1160 if (p->is_write) {
1161 u64 val = *dbg_reg;
1162
1163 val &= 0xffffffffUL;
1164 val |= p->regval << 32;
1165 *dbg_reg = val;
1166
1167 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
1168 } else {
1169 p->regval = *dbg_reg >> 32;
1170 }
1171
1172 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1173
1174 return true;
1175}
1176
1177#define DBG_BCR_BVR_WCR_WVR(n) \
1178 \
1179 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1180 \
1181 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1182 \
1183 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1184 \
1185 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1186
1187#define DBGBXVR(n) \
1188 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1189
1190
1191
1192
1193
1194
1195static const struct sys_reg_desc cp14_regs[] = {
1196
1197 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1198
1199 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1200
1201 DBG_BCR_BVR_WCR_WVR(0),
1202
1203 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1204 DBG_BCR_BVR_WCR_WVR(1),
1205
1206 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
1207
1208 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
1209 DBG_BCR_BVR_WCR_WVR(2),
1210
1211 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1212
1213 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1214 DBG_BCR_BVR_WCR_WVR(3),
1215 DBG_BCR_BVR_WCR_WVR(4),
1216 DBG_BCR_BVR_WCR_WVR(5),
1217
1218 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1219
1220 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1221 DBG_BCR_BVR_WCR_WVR(6),
1222
1223 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
1224 DBG_BCR_BVR_WCR_WVR(7),
1225 DBG_BCR_BVR_WCR_WVR(8),
1226 DBG_BCR_BVR_WCR_WVR(9),
1227 DBG_BCR_BVR_WCR_WVR(10),
1228 DBG_BCR_BVR_WCR_WVR(11),
1229 DBG_BCR_BVR_WCR_WVR(12),
1230 DBG_BCR_BVR_WCR_WVR(13),
1231 DBG_BCR_BVR_WCR_WVR(14),
1232 DBG_BCR_BVR_WCR_WVR(15),
1233
1234
1235 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1236
1237 DBGBXVR(0),
1238
1239 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1240 DBGBXVR(1),
1241
1242 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1243 DBGBXVR(2),
1244 DBGBXVR(3),
1245
1246 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1247 DBGBXVR(4),
1248
1249 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1250 DBGBXVR(5),
1251 DBGBXVR(6),
1252 DBGBXVR(7),
1253 DBGBXVR(8),
1254 DBGBXVR(9),
1255 DBGBXVR(10),
1256 DBGBXVR(11),
1257 DBGBXVR(12),
1258 DBGBXVR(13),
1259 DBGBXVR(14),
1260 DBGBXVR(15),
1261
1262
1263 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1264
1265
1266 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1267
1268 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1269
1270 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1271
1272 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1273
1274 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1275
1276 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1277};
1278
1279
1280static const struct sys_reg_desc cp14_64_regs[] = {
1281
1282 { Op1( 0), CRm( 1), .access = trap_raz_wi },
1283
1284
1285 { Op1( 0), CRm( 2), .access = trap_raz_wi },
1286};
1287
1288
1289#define PMU_PMEVCNTR(n) \
1290 \
1291 { Op1(0), CRn(0b1110), \
1292 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1293 access_pmu_evcntr }
1294
1295
1296#define PMU_PMEVTYPER(n) \
1297 \
1298 { Op1(0), CRn(0b1110), \
1299 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1300 access_pmu_evtyper }
1301
1302
1303
1304
1305
1306
1307static const struct sys_reg_desc cp15_regs[] = {
1308 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1309
1310 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1311 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1312 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1313 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1314 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1315 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1316 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1317 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1318 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1319 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1320 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1321
1322
1323
1324
1325 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1326 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1327 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
1328
1329
1330 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
1331 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
1332 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
1333 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
1334 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
1335 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
1336 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
1337 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
1338 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
1339 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
1340 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
1341 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
1342 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
1343 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
1344 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
1345
1346 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
1347 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
1348 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
1349 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1350
1351
1352 { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
1353
1354 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1355
1356
1357 PMU_PMEVCNTR(0),
1358 PMU_PMEVCNTR(1),
1359 PMU_PMEVCNTR(2),
1360 PMU_PMEVCNTR(3),
1361 PMU_PMEVCNTR(4),
1362 PMU_PMEVCNTR(5),
1363 PMU_PMEVCNTR(6),
1364 PMU_PMEVCNTR(7),
1365 PMU_PMEVCNTR(8),
1366 PMU_PMEVCNTR(9),
1367 PMU_PMEVCNTR(10),
1368 PMU_PMEVCNTR(11),
1369 PMU_PMEVCNTR(12),
1370 PMU_PMEVCNTR(13),
1371 PMU_PMEVCNTR(14),
1372 PMU_PMEVCNTR(15),
1373 PMU_PMEVCNTR(16),
1374 PMU_PMEVCNTR(17),
1375 PMU_PMEVCNTR(18),
1376 PMU_PMEVCNTR(19),
1377 PMU_PMEVCNTR(20),
1378 PMU_PMEVCNTR(21),
1379 PMU_PMEVCNTR(22),
1380 PMU_PMEVCNTR(23),
1381 PMU_PMEVCNTR(24),
1382 PMU_PMEVCNTR(25),
1383 PMU_PMEVCNTR(26),
1384 PMU_PMEVCNTR(27),
1385 PMU_PMEVCNTR(28),
1386 PMU_PMEVCNTR(29),
1387 PMU_PMEVCNTR(30),
1388
1389 PMU_PMEVTYPER(0),
1390 PMU_PMEVTYPER(1),
1391 PMU_PMEVTYPER(2),
1392 PMU_PMEVTYPER(3),
1393 PMU_PMEVTYPER(4),
1394 PMU_PMEVTYPER(5),
1395 PMU_PMEVTYPER(6),
1396 PMU_PMEVTYPER(7),
1397 PMU_PMEVTYPER(8),
1398 PMU_PMEVTYPER(9),
1399 PMU_PMEVTYPER(10),
1400 PMU_PMEVTYPER(11),
1401 PMU_PMEVTYPER(12),
1402 PMU_PMEVTYPER(13),
1403 PMU_PMEVTYPER(14),
1404 PMU_PMEVTYPER(15),
1405 PMU_PMEVTYPER(16),
1406 PMU_PMEVTYPER(17),
1407 PMU_PMEVTYPER(18),
1408 PMU_PMEVTYPER(19),
1409 PMU_PMEVTYPER(20),
1410 PMU_PMEVTYPER(21),
1411 PMU_PMEVTYPER(22),
1412 PMU_PMEVTYPER(23),
1413 PMU_PMEVTYPER(24),
1414 PMU_PMEVTYPER(25),
1415 PMU_PMEVTYPER(26),
1416 PMU_PMEVTYPER(27),
1417 PMU_PMEVTYPER(28),
1418 PMU_PMEVTYPER(29),
1419 PMU_PMEVTYPER(30),
1420
1421 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
1422};
1423
1424static const struct sys_reg_desc cp15_64_regs[] = {
1425 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1426 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
1427 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1428 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
1429};
1430
1431
1432static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
1433
1434void kvm_register_target_sys_reg_table(unsigned int target,
1435 struct kvm_sys_reg_target_table *table)
1436{
1437 target_tables[target] = table;
1438}
1439
1440
1441static const struct sys_reg_desc *get_target_table(unsigned target,
1442 bool mode_is_64,
1443 size_t *num)
1444{
1445 struct kvm_sys_reg_target_table *table;
1446
1447 table = target_tables[target];
1448 if (mode_is_64) {
1449 *num = table->table64.num;
1450 return table->table64.table;
1451 } else {
1452 *num = table->table32.num;
1453 return table->table32.table;
1454 }
1455}
1456
1457#define reg_to_match_value(x) \
1458 ({ \
1459 unsigned long val; \
1460 val = (x)->Op0 << 14; \
1461 val |= (x)->Op1 << 11; \
1462 val |= (x)->CRn << 7; \
1463 val |= (x)->CRm << 3; \
1464 val |= (x)->Op2; \
1465 val; \
1466 })
1467
1468static int match_sys_reg(const void *key, const void *elt)
1469{
1470 const unsigned long pval = (unsigned long)key;
1471 const struct sys_reg_desc *r = elt;
1472
1473 return pval - reg_to_match_value(r);
1474}
1475
1476static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
1477 const struct sys_reg_desc table[],
1478 unsigned int num)
1479{
1480 unsigned long pval = reg_to_match_value(params);
1481
1482 return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
1483}
1484
1485int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
1486{
1487 kvm_inject_undefined(vcpu);
1488 return 1;
1489}
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501static int emulate_cp(struct kvm_vcpu *vcpu,
1502 struct sys_reg_params *params,
1503 const struct sys_reg_desc *table,
1504 size_t num)
1505{
1506 const struct sys_reg_desc *r;
1507
1508 if (!table)
1509 return -1;
1510
1511 r = find_reg(params, table, num);
1512
1513 if (r) {
1514
1515
1516
1517
1518
1519
1520 BUG_ON(!r->access);
1521
1522 if (likely(r->access(vcpu, params, r))) {
1523
1524 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1525
1526 return 0;
1527 }
1528 }
1529
1530
1531 return -1;
1532}
1533
1534static void unhandled_cp_access(struct kvm_vcpu *vcpu,
1535 struct sys_reg_params *params)
1536{
1537 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
1538 int cp;
1539
1540 switch(hsr_ec) {
1541 case ESR_ELx_EC_CP15_32:
1542 case ESR_ELx_EC_CP15_64:
1543 cp = 15;
1544 break;
1545 case ESR_ELx_EC_CP14_MR:
1546 case ESR_ELx_EC_CP14_64:
1547 cp = 14;
1548 break;
1549 default:
1550 WARN_ON((cp = -1));
1551 }
1552
1553 kvm_err("Unsupported guest CP%d access at: %08lx\n",
1554 cp, *vcpu_pc(vcpu));
1555 print_sys_reg_instr(params);
1556 kvm_inject_undefined(vcpu);
1557}
1558
1559
1560
1561
1562
1563
1564static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1565 const struct sys_reg_desc *global,
1566 size_t nr_global,
1567 const struct sys_reg_desc *target_specific,
1568 size_t nr_specific)
1569{
1570 struct sys_reg_params params;
1571 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1572 int Rt = (hsr >> 5) & 0xf;
1573 int Rt2 = (hsr >> 10) & 0xf;
1574
1575 params.is_aarch32 = true;
1576 params.is_32bit = false;
1577 params.CRm = (hsr >> 1) & 0xf;
1578 params.is_write = ((hsr & 1) == 0);
1579
1580 params.Op0 = 0;
1581 params.Op1 = (hsr >> 16) & 0xf;
1582 params.Op2 = 0;
1583 params.CRn = 0;
1584
1585
1586
1587
1588
1589 if (params.is_write) {
1590 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1591 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
1592 }
1593
1594 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific))
1595 goto out;
1596 if (!emulate_cp(vcpu, ¶ms, global, nr_global))
1597 goto out;
1598
1599 unhandled_cp_access(vcpu, ¶ms);
1600
1601out:
1602
1603 if (!params.is_write) {
1604 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1605 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
1606 }
1607
1608 return 1;
1609}
1610
1611
1612
1613
1614
1615
1616static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1617 const struct sys_reg_desc *global,
1618 size_t nr_global,
1619 const struct sys_reg_desc *target_specific,
1620 size_t nr_specific)
1621{
1622 struct sys_reg_params params;
1623 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1624 int Rt = (hsr >> 5) & 0xf;
1625
1626 params.is_aarch32 = true;
1627 params.is_32bit = true;
1628 params.CRm = (hsr >> 1) & 0xf;
1629 params.regval = vcpu_get_reg(vcpu, Rt);
1630 params.is_write = ((hsr & 1) == 0);
1631 params.CRn = (hsr >> 10) & 0xf;
1632 params.Op0 = 0;
1633 params.Op1 = (hsr >> 14) & 0x7;
1634 params.Op2 = (hsr >> 17) & 0x7;
1635
1636 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
1637 !emulate_cp(vcpu, ¶ms, global, nr_global)) {
1638 if (!params.is_write)
1639 vcpu_set_reg(vcpu, Rt, params.regval);
1640 return 1;
1641 }
1642
1643 unhandled_cp_access(vcpu, ¶ms);
1644 return 1;
1645}
1646
1647int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1648{
1649 const struct sys_reg_desc *target_specific;
1650 size_t num;
1651
1652 target_specific = get_target_table(vcpu->arch.target, false, &num);
1653 return kvm_handle_cp_64(vcpu,
1654 cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
1655 target_specific, num);
1656}
1657
1658int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1659{
1660 const struct sys_reg_desc *target_specific;
1661 size_t num;
1662
1663 target_specific = get_target_table(vcpu->arch.target, false, &num);
1664 return kvm_handle_cp_32(vcpu,
1665 cp15_regs, ARRAY_SIZE(cp15_regs),
1666 target_specific, num);
1667}
1668
1669int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1670{
1671 return kvm_handle_cp_64(vcpu,
1672 cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
1673 NULL, 0);
1674}
1675
1676int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1677{
1678 return kvm_handle_cp_32(vcpu,
1679 cp14_regs, ARRAY_SIZE(cp14_regs),
1680 NULL, 0);
1681}
1682
1683static int emulate_sys_reg(struct kvm_vcpu *vcpu,
1684 struct sys_reg_params *params)
1685{
1686 size_t num;
1687 const struct sys_reg_desc *table, *r;
1688
1689 table = get_target_table(vcpu->arch.target, true, &num);
1690
1691
1692 r = find_reg(params, table, num);
1693 if (!r)
1694 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1695
1696 if (likely(r)) {
1697
1698
1699
1700
1701
1702
1703 BUG_ON(!r->access);
1704
1705 if (likely(r->access(vcpu, params, r))) {
1706
1707 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1708 return 1;
1709 }
1710
1711 } else {
1712 kvm_err("Unsupported guest sys_reg access at: %lx\n",
1713 *vcpu_pc(vcpu));
1714 print_sys_reg_instr(params);
1715 }
1716 kvm_inject_undefined(vcpu);
1717 return 1;
1718}
1719
1720static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
1721 const struct sys_reg_desc *table, size_t num)
1722{
1723 unsigned long i;
1724
1725 for (i = 0; i < num; i++)
1726 if (table[i].reset)
1727 table[i].reset(vcpu, &table[i]);
1728}
1729
1730
1731
1732
1733
1734
1735int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1736{
1737 struct sys_reg_params params;
1738 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
1739 int Rt = (esr >> 5) & 0x1f;
1740 int ret;
1741
1742 trace_kvm_handle_sys_reg(esr);
1743
1744 params.is_aarch32 = false;
1745 params.is_32bit = false;
1746 params.Op0 = (esr >> 20) & 3;
1747 params.Op1 = (esr >> 14) & 0x7;
1748 params.CRn = (esr >> 10) & 0xf;
1749 params.CRm = (esr >> 1) & 0xf;
1750 params.Op2 = (esr >> 17) & 0x7;
1751 params.regval = vcpu_get_reg(vcpu, Rt);
1752 params.is_write = !(esr & 1);
1753
1754 ret = emulate_sys_reg(vcpu, ¶ms);
1755
1756 if (!params.is_write)
1757 vcpu_set_reg(vcpu, Rt, params.regval);
1758 return ret;
1759}
1760
1761
1762
1763
1764
1765static bool index_to_params(u64 id, struct sys_reg_params *params)
1766{
1767 switch (id & KVM_REG_SIZE_MASK) {
1768 case KVM_REG_SIZE_U64:
1769
1770 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
1771 | KVM_REG_ARM_COPROC_MASK
1772 | KVM_REG_ARM64_SYSREG_OP0_MASK
1773 | KVM_REG_ARM64_SYSREG_OP1_MASK
1774 | KVM_REG_ARM64_SYSREG_CRN_MASK
1775 | KVM_REG_ARM64_SYSREG_CRM_MASK
1776 | KVM_REG_ARM64_SYSREG_OP2_MASK))
1777 return false;
1778 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
1779 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
1780 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
1781 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
1782 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
1783 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
1784 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
1785 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
1786 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
1787 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
1788 return true;
1789 default:
1790 return false;
1791 }
1792}
1793
1794
1795static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
1796 u64 id)
1797{
1798 size_t num;
1799 const struct sys_reg_desc *table, *r;
1800 struct sys_reg_params params;
1801
1802
1803 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
1804 return NULL;
1805
1806 if (!index_to_params(id, ¶ms))
1807 return NULL;
1808
1809 table = get_target_table(vcpu->arch.target, true, &num);
1810 r = find_reg(¶ms, table, num);
1811 if (!r)
1812 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1813
1814
1815 if (r && !r->reg)
1816 r = NULL;
1817
1818 return r;
1819}
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829#define FUNCTION_INVARIANT(reg) \
1830 static void get_##reg(struct kvm_vcpu *v, \
1831 const struct sys_reg_desc *r) \
1832 { \
1833 u64 val; \
1834 \
1835 asm volatile("mrs %0, " __stringify(reg) "\n" \
1836 : "=r" (val)); \
1837 ((struct sys_reg_desc *)r)->val = val; \
1838 }
1839
1840FUNCTION_INVARIANT(midr_el1)
1841FUNCTION_INVARIANT(ctr_el0)
1842FUNCTION_INVARIANT(revidr_el1)
1843FUNCTION_INVARIANT(id_pfr0_el1)
1844FUNCTION_INVARIANT(id_pfr1_el1)
1845FUNCTION_INVARIANT(id_dfr0_el1)
1846FUNCTION_INVARIANT(id_afr0_el1)
1847FUNCTION_INVARIANT(id_mmfr0_el1)
1848FUNCTION_INVARIANT(id_mmfr1_el1)
1849FUNCTION_INVARIANT(id_mmfr2_el1)
1850FUNCTION_INVARIANT(id_mmfr3_el1)
1851FUNCTION_INVARIANT(id_isar0_el1)
1852FUNCTION_INVARIANT(id_isar1_el1)
1853FUNCTION_INVARIANT(id_isar2_el1)
1854FUNCTION_INVARIANT(id_isar3_el1)
1855FUNCTION_INVARIANT(id_isar4_el1)
1856FUNCTION_INVARIANT(id_isar5_el1)
1857FUNCTION_INVARIANT(clidr_el1)
1858FUNCTION_INVARIANT(aidr_el1)
1859
1860
1861static struct sys_reg_desc invariant_sys_regs[] = {
1862 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
1863 NULL, get_midr_el1 },
1864 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
1865 NULL, get_revidr_el1 },
1866 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
1867 NULL, get_id_pfr0_el1 },
1868 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
1869 NULL, get_id_pfr1_el1 },
1870 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
1871 NULL, get_id_dfr0_el1 },
1872 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
1873 NULL, get_id_afr0_el1 },
1874 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
1875 NULL, get_id_mmfr0_el1 },
1876 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
1877 NULL, get_id_mmfr1_el1 },
1878 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
1879 NULL, get_id_mmfr2_el1 },
1880 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
1881 NULL, get_id_mmfr3_el1 },
1882 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
1883 NULL, get_id_isar0_el1 },
1884 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
1885 NULL, get_id_isar1_el1 },
1886 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
1887 NULL, get_id_isar2_el1 },
1888 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
1889 NULL, get_id_isar3_el1 },
1890 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
1891 NULL, get_id_isar4_el1 },
1892 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
1893 NULL, get_id_isar5_el1 },
1894 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
1895 NULL, get_clidr_el1 },
1896 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
1897 NULL, get_aidr_el1 },
1898 { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
1899 NULL, get_ctr_el0 },
1900};
1901
1902static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
1903{
1904 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
1905 return -EFAULT;
1906 return 0;
1907}
1908
1909static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
1910{
1911 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
1912 return -EFAULT;
1913 return 0;
1914}
1915
1916static int get_invariant_sys_reg(u64 id, void __user *uaddr)
1917{
1918 struct sys_reg_params params;
1919 const struct sys_reg_desc *r;
1920
1921 if (!index_to_params(id, ¶ms))
1922 return -ENOENT;
1923
1924 r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
1925 if (!r)
1926 return -ENOENT;
1927
1928 return reg_to_user(uaddr, &r->val, id);
1929}
1930
1931static int set_invariant_sys_reg(u64 id, void __user *uaddr)
1932{
1933 struct sys_reg_params params;
1934 const struct sys_reg_desc *r;
1935 int err;
1936 u64 val = 0;
1937
1938 if (!index_to_params(id, ¶ms))
1939 return -ENOENT;
1940 r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
1941 if (!r)
1942 return -ENOENT;
1943
1944 err = reg_from_user(&val, uaddr, id);
1945 if (err)
1946 return err;
1947
1948
1949 if (r->val != val)
1950 return -EINVAL;
1951
1952 return 0;
1953}
1954
1955static bool is_valid_cache(u32 val)
1956{
1957 u32 level, ctype;
1958
1959 if (val >= CSSELR_MAX)
1960 return false;
1961
1962
1963 level = (val >> 1);
1964 ctype = (cache_levels >> (level * 3)) & 7;
1965
1966 switch (ctype) {
1967 case 0:
1968 return false;
1969 case 1:
1970 return (val & 1);
1971 case 2:
1972 case 4:
1973 return !(val & 1);
1974 case 3:
1975 return true;
1976 default:
1977 return false;
1978 }
1979}
1980
1981static int demux_c15_get(u64 id, void __user *uaddr)
1982{
1983 u32 val;
1984 u32 __user *uval = uaddr;
1985
1986
1987 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1988 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1989 return -ENOENT;
1990
1991 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1992 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1993 if (KVM_REG_SIZE(id) != 4)
1994 return -ENOENT;
1995 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1996 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1997 if (!is_valid_cache(val))
1998 return -ENOENT;
1999
2000 return put_user(get_ccsidr(val), uval);
2001 default:
2002 return -ENOENT;
2003 }
2004}
2005
2006static int demux_c15_set(u64 id, void __user *uaddr)
2007{
2008 u32 val, newval;
2009 u32 __user *uval = uaddr;
2010
2011
2012 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2013 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2014 return -ENOENT;
2015
2016 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2017 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2018 if (KVM_REG_SIZE(id) != 4)
2019 return -ENOENT;
2020 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2021 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2022 if (!is_valid_cache(val))
2023 return -ENOENT;
2024
2025 if (get_user(newval, uval))
2026 return -EFAULT;
2027
2028
2029 if (newval != get_ccsidr(val))
2030 return -EINVAL;
2031 return 0;
2032 default:
2033 return -ENOENT;
2034 }
2035}
2036
2037int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2038{
2039 const struct sys_reg_desc *r;
2040 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2041
2042 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2043 return demux_c15_get(reg->id, uaddr);
2044
2045 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2046 return -ENOENT;
2047
2048 r = index_to_sys_reg_desc(vcpu, reg->id);
2049 if (!r)
2050 return get_invariant_sys_reg(reg->id, uaddr);
2051
2052 if (r->get_user)
2053 return (r->get_user)(vcpu, r, reg, uaddr);
2054
2055 return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
2056}
2057
2058int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2059{
2060 const struct sys_reg_desc *r;
2061 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2062
2063 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2064 return demux_c15_set(reg->id, uaddr);
2065
2066 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2067 return -ENOENT;
2068
2069 r = index_to_sys_reg_desc(vcpu, reg->id);
2070 if (!r)
2071 return set_invariant_sys_reg(reg->id, uaddr);
2072
2073 if (r->set_user)
2074 return (r->set_user)(vcpu, r, reg, uaddr);
2075
2076 return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2077}
2078
2079static unsigned int num_demux_regs(void)
2080{
2081 unsigned int i, count = 0;
2082
2083 for (i = 0; i < CSSELR_MAX; i++)
2084 if (is_valid_cache(i))
2085 count++;
2086
2087 return count;
2088}
2089
2090static int write_demux_regids(u64 __user *uindices)
2091{
2092 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2093 unsigned int i;
2094
2095 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2096 for (i = 0; i < CSSELR_MAX; i++) {
2097 if (!is_valid_cache(i))
2098 continue;
2099 if (put_user(val | i, uindices))
2100 return -EFAULT;
2101 uindices++;
2102 }
2103 return 0;
2104}
2105
2106static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2107{
2108 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2109 KVM_REG_ARM64_SYSREG |
2110 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2111 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2112 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2113 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2114 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2115}
2116
2117static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2118{
2119 if (!*uind)
2120 return true;
2121
2122 if (put_user(sys_reg_to_index(reg), *uind))
2123 return false;
2124
2125 (*uind)++;
2126 return true;
2127}
2128
2129
2130static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2131{
2132 const struct sys_reg_desc *i1, *i2, *end1, *end2;
2133 unsigned int total = 0;
2134 size_t num;
2135
2136
2137 i1 = get_target_table(vcpu->arch.target, true, &num);
2138 end1 = i1 + num;
2139 i2 = sys_reg_descs;
2140 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2141
2142 BUG_ON(i1 == end1 || i2 == end2);
2143
2144
2145 while (i1 || i2) {
2146 int cmp = cmp_sys_reg(i1, i2);
2147
2148 if (cmp <= 0) {
2149
2150 if (i1->reg) {
2151 if (!copy_reg_to_user(i1, &uind))
2152 return -EFAULT;
2153 total++;
2154 }
2155 } else {
2156
2157 if (i2->reg) {
2158 if (!copy_reg_to_user(i2, &uind))
2159 return -EFAULT;
2160 total++;
2161 }
2162 }
2163
2164 if (cmp <= 0 && ++i1 == end1)
2165 i1 = NULL;
2166 if (cmp >= 0 && ++i2 == end2)
2167 i2 = NULL;
2168 }
2169 return total;
2170}
2171
2172unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2173{
2174 return ARRAY_SIZE(invariant_sys_regs)
2175 + num_demux_regs()
2176 + walk_sys_regs(vcpu, (u64 __user *)NULL);
2177}
2178
2179int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2180{
2181 unsigned int i;
2182 int err;
2183
2184
2185 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2186 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2187 return -EFAULT;
2188 uindices++;
2189 }
2190
2191 err = walk_sys_regs(vcpu, uindices);
2192 if (err < 0)
2193 return err;
2194 uindices += err;
2195
2196 return write_demux_regids(uindices);
2197}
2198
2199static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
2200{
2201 unsigned int i;
2202
2203 for (i = 1; i < n; i++) {
2204 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2205 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2206 return 1;
2207 }
2208 }
2209
2210 return 0;
2211}
2212
2213void kvm_sys_reg_table_init(void)
2214{
2215 unsigned int i;
2216 struct sys_reg_desc clidr;
2217
2218
2219 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
2220 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
2221 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
2222 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
2223 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
2224 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
2225
2226
2227 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2228 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240 get_clidr_el1(NULL, &clidr);
2241 cache_levels = clidr.val;
2242 for (i = 0; i < 7; i++)
2243 if (((cache_levels >> (i*3)) & 7) == 0)
2244 break;
2245
2246 cache_levels &= (1 << (i*3))-1;
2247}
2248
2249
2250
2251
2252
2253
2254
2255
2256void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2257{
2258 size_t num;
2259 const struct sys_reg_desc *table;
2260
2261
2262 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
2263
2264
2265 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2266
2267 table = get_target_table(vcpu->arch.target, true, &num);
2268 reset_sys_reg_descs(vcpu, table, num);
2269
2270 for (num = 1; num < NR_SYS_REGS; num++)
2271 if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
2272 panic("Didn't reset vcpu_sys_reg(%zi)", num);
2273}
2274