1
2
3
4
5
6
7
8
9
10
11
12#include <linux/bitfield.h>
13#include <linux/bsearch.h>
14#include <linux/kvm_host.h>
15#include <linux/mm.h>
16#include <linux/printk.h>
17#include <linux/uaccess.h>
18
19#include <asm/cacheflush.h>
20#include <asm/cputype.h>
21#include <asm/debug-monitors.h>
22#include <asm/esr.h>
23#include <asm/kvm_arm.h>
24#include <asm/kvm_emulate.h>
25#include <asm/kvm_hyp.h>
26#include <asm/kvm_mmu.h>
27#include <asm/perf_event.h>
28#include <asm/sysreg.h>
29
30#include <trace/events/kvm.h>
31
32#include "sys_regs.h"
33
34#include "trace.h"
35
36
37
38
39
40
41
42
43
44
45
46
47static bool read_from_write_only(struct kvm_vcpu *vcpu,
48 struct sys_reg_params *params,
49 const struct sys_reg_desc *r)
50{
51 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
52 print_sys_reg_instr(params);
53 kvm_inject_undefined(vcpu);
54 return false;
55}
56
57static bool write_to_read_only(struct kvm_vcpu *vcpu,
58 struct sys_reg_params *params,
59 const struct sys_reg_desc *r)
60{
61 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
62 print_sys_reg_instr(params);
63 kvm_inject_undefined(vcpu);
64 return false;
65}
66
67u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
68{
69 u64 val = 0x8badf00d8badf00d;
70
71 if (vcpu->arch.sysregs_loaded_on_cpu &&
72 __vcpu_read_sys_reg_from_cpu(reg, &val))
73 return val;
74
75 return __vcpu_sys_reg(vcpu, reg);
76}
77
78void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
79{
80 if (vcpu->arch.sysregs_loaded_on_cpu &&
81 __vcpu_write_sys_reg_to_cpu(val, reg))
82 return;
83
84 __vcpu_sys_reg(vcpu, reg) = val;
85}
86
87
88static u32 cache_levels;
89
90
91#define CSSELR_MAX 14
92
93
94static u32 get_ccsidr(u32 csselr)
95{
96 u32 ccsidr;
97
98
99 local_irq_disable();
100 write_sysreg(csselr, csselr_el1);
101 isb();
102 ccsidr = read_sysreg(ccsidr_el1);
103 local_irq_enable();
104
105 return ccsidr;
106}
107
108
109
110
111static bool access_dcsw(struct kvm_vcpu *vcpu,
112 struct sys_reg_params *p,
113 const struct sys_reg_desc *r)
114{
115 if (!p->is_write)
116 return read_from_write_only(vcpu, p, r);
117
118
119
120
121
122
123
124
125 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
126 kvm_set_way_flush(vcpu);
127
128 return true;
129}
130
131static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
132{
133 switch (r->aarch32_map) {
134 case AA32_LO:
135 *mask = GENMASK_ULL(31, 0);
136 *shift = 0;
137 break;
138 case AA32_HI:
139 *mask = GENMASK_ULL(63, 32);
140 *shift = 32;
141 break;
142 default:
143 *mask = GENMASK_ULL(63, 0);
144 *shift = 0;
145 break;
146 }
147}
148
149
150
151
152
153
154static bool access_vm_reg(struct kvm_vcpu *vcpu,
155 struct sys_reg_params *p,
156 const struct sys_reg_desc *r)
157{
158 bool was_enabled = vcpu_has_cache_enabled(vcpu);
159 u64 val, mask, shift;
160
161 BUG_ON(!p->is_write);
162
163 get_access_mask(r, &mask, &shift);
164
165 if (~mask) {
166 val = vcpu_read_sys_reg(vcpu, r->reg);
167 val &= ~mask;
168 } else {
169 val = 0;
170 }
171
172 val |= (p->regval & (mask >> shift)) << shift;
173 vcpu_write_sys_reg(vcpu, val, r->reg);
174
175 kvm_toggle_cache(vcpu, was_enabled);
176 return true;
177}
178
179static bool access_actlr(struct kvm_vcpu *vcpu,
180 struct sys_reg_params *p,
181 const struct sys_reg_desc *r)
182{
183 u64 mask, shift;
184
185 if (p->is_write)
186 return ignore_write(vcpu, p);
187
188 get_access_mask(r, &mask, &shift);
189 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
190
191 return true;
192}
193
194
195
196
197
198
199
200static bool access_gic_sgi(struct kvm_vcpu *vcpu,
201 struct sys_reg_params *p,
202 const struct sys_reg_desc *r)
203{
204 bool g1;
205
206 if (!p->is_write)
207 return read_from_write_only(vcpu, p, r);
208
209
210
211
212
213
214
215
216 if (p->Op0 == 0) {
217 switch (p->Op1) {
218 default:
219 case 0:
220 g1 = true;
221 break;
222 case 1:
223 case 2:
224 g1 = false;
225 break;
226 }
227 } else {
228 switch (p->Op2) {
229 default:
230 case 5:
231 g1 = true;
232 break;
233 case 6:
234 case 7:
235 g1 = false;
236 break;
237 }
238 }
239
240 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
241
242 return true;
243}
244
245static bool access_gic_sre(struct kvm_vcpu *vcpu,
246 struct sys_reg_params *p,
247 const struct sys_reg_desc *r)
248{
249 if (p->is_write)
250 return ignore_write(vcpu, p);
251
252 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
253 return true;
254}
255
256static bool trap_raz_wi(struct kvm_vcpu *vcpu,
257 struct sys_reg_params *p,
258 const struct sys_reg_desc *r)
259{
260 if (p->is_write)
261 return ignore_write(vcpu, p);
262 else
263 return read_zero(vcpu, p);
264}
265
266
267
268
269
270
271
272static bool trap_loregion(struct kvm_vcpu *vcpu,
273 struct sys_reg_params *p,
274 const struct sys_reg_desc *r)
275{
276 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
277 u32 sr = reg_to_encoding(r);
278
279 if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
280 kvm_inject_undefined(vcpu);
281 return false;
282 }
283
284 if (p->is_write && sr == SYS_LORID_EL1)
285 return write_to_read_only(vcpu, p, r);
286
287 return trap_raz_wi(vcpu, p, r);
288}
289
290static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
291 struct sys_reg_params *p,
292 const struct sys_reg_desc *r)
293{
294 if (p->is_write) {
295 return ignore_write(vcpu, p);
296 } else {
297 p->regval = (1 << 3);
298 return true;
299 }
300}
301
302static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
303 struct sys_reg_params *p,
304 const struct sys_reg_desc *r)
305{
306 if (p->is_write) {
307 return ignore_write(vcpu, p);
308 } else {
309 p->regval = read_sysreg(dbgauthstatus_el1);
310 return true;
311 }
312}
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341static bool trap_debug_regs(struct kvm_vcpu *vcpu,
342 struct sys_reg_params *p,
343 const struct sys_reg_desc *r)
344{
345 if (p->is_write) {
346 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
347 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
348 } else {
349 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
350 }
351
352 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
353
354 return true;
355}
356
357
358
359
360
361
362
363
364
365
366static void reg_to_dbg(struct kvm_vcpu *vcpu,
367 struct sys_reg_params *p,
368 const struct sys_reg_desc *rd,
369 u64 *dbg_reg)
370{
371 u64 mask, shift, val;
372
373 get_access_mask(rd, &mask, &shift);
374
375 val = *dbg_reg;
376 val &= ~mask;
377 val |= (p->regval & (mask >> shift)) << shift;
378 *dbg_reg = val;
379
380 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
381}
382
383static void dbg_to_reg(struct kvm_vcpu *vcpu,
384 struct sys_reg_params *p,
385 const struct sys_reg_desc *rd,
386 u64 *dbg_reg)
387{
388 u64 mask, shift;
389
390 get_access_mask(rd, &mask, &shift);
391 p->regval = (*dbg_reg & mask) >> shift;
392}
393
394static bool trap_bvr(struct kvm_vcpu *vcpu,
395 struct sys_reg_params *p,
396 const struct sys_reg_desc *rd)
397{
398 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
399
400 if (p->is_write)
401 reg_to_dbg(vcpu, p, rd, dbg_reg);
402 else
403 dbg_to_reg(vcpu, p, rd, dbg_reg);
404
405 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
406
407 return true;
408}
409
410static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
411 const struct kvm_one_reg *reg, void __user *uaddr)
412{
413 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
414
415 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
416 return -EFAULT;
417 return 0;
418}
419
420static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
421 const struct kvm_one_reg *reg, void __user *uaddr)
422{
423 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
424
425 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
426 return -EFAULT;
427 return 0;
428}
429
430static void reset_bvr(struct kvm_vcpu *vcpu,
431 const struct sys_reg_desc *rd)
432{
433 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
434}
435
436static bool trap_bcr(struct kvm_vcpu *vcpu,
437 struct sys_reg_params *p,
438 const struct sys_reg_desc *rd)
439{
440 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
441
442 if (p->is_write)
443 reg_to_dbg(vcpu, p, rd, dbg_reg);
444 else
445 dbg_to_reg(vcpu, p, rd, dbg_reg);
446
447 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
448
449 return true;
450}
451
452static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
453 const struct kvm_one_reg *reg, void __user *uaddr)
454{
455 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
456
457 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
458 return -EFAULT;
459
460 return 0;
461}
462
463static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
464 const struct kvm_one_reg *reg, void __user *uaddr)
465{
466 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
467
468 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
469 return -EFAULT;
470 return 0;
471}
472
473static void reset_bcr(struct kvm_vcpu *vcpu,
474 const struct sys_reg_desc *rd)
475{
476 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
477}
478
479static bool trap_wvr(struct kvm_vcpu *vcpu,
480 struct sys_reg_params *p,
481 const struct sys_reg_desc *rd)
482{
483 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
484
485 if (p->is_write)
486 reg_to_dbg(vcpu, p, rd, dbg_reg);
487 else
488 dbg_to_reg(vcpu, p, rd, dbg_reg);
489
490 trace_trap_reg(__func__, rd->CRm, p->is_write,
491 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
492
493 return true;
494}
495
496static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
497 const struct kvm_one_reg *reg, void __user *uaddr)
498{
499 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
500
501 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
502 return -EFAULT;
503 return 0;
504}
505
506static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
507 const struct kvm_one_reg *reg, void __user *uaddr)
508{
509 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
510
511 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
512 return -EFAULT;
513 return 0;
514}
515
516static void reset_wvr(struct kvm_vcpu *vcpu,
517 const struct sys_reg_desc *rd)
518{
519 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
520}
521
522static bool trap_wcr(struct kvm_vcpu *vcpu,
523 struct sys_reg_params *p,
524 const struct sys_reg_desc *rd)
525{
526 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
527
528 if (p->is_write)
529 reg_to_dbg(vcpu, p, rd, dbg_reg);
530 else
531 dbg_to_reg(vcpu, p, rd, dbg_reg);
532
533 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
534
535 return true;
536}
537
538static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
539 const struct kvm_one_reg *reg, void __user *uaddr)
540{
541 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
542
543 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
544 return -EFAULT;
545 return 0;
546}
547
548static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
549 const struct kvm_one_reg *reg, void __user *uaddr)
550{
551 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
552
553 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
554 return -EFAULT;
555 return 0;
556}
557
558static void reset_wcr(struct kvm_vcpu *vcpu,
559 const struct sys_reg_desc *rd)
560{
561 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
562}
563
564static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
565{
566 u64 amair = read_sysreg(amair_el1);
567 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
568}
569
570static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
571{
572 u64 actlr = read_sysreg(actlr_el1);
573 vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
574}
575
576static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
577{
578 u64 mpidr;
579
580
581
582
583
584
585
586
587 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
588 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
589 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
590 vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
591}
592
593static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
594 const struct sys_reg_desc *r)
595{
596 if (kvm_vcpu_has_pmu(vcpu))
597 return 0;
598
599 return REG_HIDDEN;
600}
601
602static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
603{
604 u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
605
606
607 if (!kvm_arm_support_pmu_v3())
608 return;
609
610 n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
611 n &= ARMV8_PMU_PMCR_N_MASK;
612 if (n)
613 mask |= GENMASK(n - 1, 0);
614
615 reset_unknown(vcpu, r);
616 __vcpu_sys_reg(vcpu, r->reg) &= mask;
617}
618
619static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
620{
621 reset_unknown(vcpu, r);
622 __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
623}
624
625static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
626{
627 reset_unknown(vcpu, r);
628 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
629}
630
631static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
632{
633 reset_unknown(vcpu, r);
634 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
635}
636
637static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
638{
639 u64 pmcr, val;
640
641
642 if (!kvm_arm_support_pmu_v3())
643 return;
644
645 pmcr = read_sysreg(pmcr_el0);
646
647
648
649
650 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
651 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
652 if (!system_supports_32bit_el0())
653 val |= ARMV8_PMU_PMCR_LC;
654 __vcpu_sys_reg(vcpu, r->reg) = val;
655}
656
657static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
658{
659 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
660 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
661
662 if (!enabled)
663 kvm_inject_undefined(vcpu);
664
665 return !enabled;
666}
667
668static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
669{
670 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
671}
672
673static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
674{
675 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
676}
677
678static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
679{
680 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
681}
682
683static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
684{
685 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
686}
687
688static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
689 const struct sys_reg_desc *r)
690{
691 u64 val;
692
693 if (pmu_access_el0_disabled(vcpu))
694 return false;
695
696 if (p->is_write) {
697
698 val = __vcpu_sys_reg(vcpu, PMCR_EL0);
699 val &= ~ARMV8_PMU_PMCR_MASK;
700 val |= p->regval & ARMV8_PMU_PMCR_MASK;
701 if (!system_supports_32bit_el0())
702 val |= ARMV8_PMU_PMCR_LC;
703 __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
704 kvm_pmu_handle_pmcr(vcpu, val);
705 kvm_vcpu_pmu_restore_guest(vcpu);
706 } else {
707
708 val = __vcpu_sys_reg(vcpu, PMCR_EL0)
709 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
710 p->regval = val;
711 }
712
713 return true;
714}
715
716static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
717 const struct sys_reg_desc *r)
718{
719 if (pmu_access_event_counter_el0_disabled(vcpu))
720 return false;
721
722 if (p->is_write)
723 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
724 else
725
726 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
727 & ARMV8_PMU_COUNTER_MASK;
728
729 return true;
730}
731
732static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
733 const struct sys_reg_desc *r)
734{
735 u64 pmceid, mask, shift;
736
737 BUG_ON(p->is_write);
738
739 if (pmu_access_el0_disabled(vcpu))
740 return false;
741
742 get_access_mask(r, &mask, &shift);
743
744 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
745 pmceid &= mask;
746 pmceid >>= shift;
747
748 p->regval = pmceid;
749
750 return true;
751}
752
753static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
754{
755 u64 pmcr, val;
756
757 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
758 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
759 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
760 kvm_inject_undefined(vcpu);
761 return false;
762 }
763
764 return true;
765}
766
767static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
768 struct sys_reg_params *p,
769 const struct sys_reg_desc *r)
770{
771 u64 idx = ~0UL;
772
773 if (r->CRn == 9 && r->CRm == 13) {
774 if (r->Op2 == 2) {
775
776 if (pmu_access_event_counter_el0_disabled(vcpu))
777 return false;
778
779 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
780 & ARMV8_PMU_COUNTER_MASK;
781 } else if (r->Op2 == 0) {
782
783 if (pmu_access_cycle_counter_el0_disabled(vcpu))
784 return false;
785
786 idx = ARMV8_PMU_CYCLE_IDX;
787 }
788 } else if (r->CRn == 0 && r->CRm == 9) {
789
790 if (pmu_access_event_counter_el0_disabled(vcpu))
791 return false;
792
793 idx = ARMV8_PMU_CYCLE_IDX;
794 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
795
796 if (pmu_access_event_counter_el0_disabled(vcpu))
797 return false;
798
799 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
800 }
801
802
803 WARN_ON(idx == ~0UL);
804
805 if (!pmu_counter_idx_valid(vcpu, idx))
806 return false;
807
808 if (p->is_write) {
809 if (pmu_access_el0_disabled(vcpu))
810 return false;
811
812 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
813 } else {
814 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
815 }
816
817 return true;
818}
819
820static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
821 const struct sys_reg_desc *r)
822{
823 u64 idx, reg;
824
825 if (pmu_access_el0_disabled(vcpu))
826 return false;
827
828 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
829
830 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
831 reg = PMEVTYPER0_EL0 + idx;
832 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
833 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
834 if (idx == ARMV8_PMU_CYCLE_IDX)
835 reg = PMCCFILTR_EL0;
836 else
837
838 reg = PMEVTYPER0_EL0 + idx;
839 } else {
840 BUG();
841 }
842
843 if (!pmu_counter_idx_valid(vcpu, idx))
844 return false;
845
846 if (p->is_write) {
847 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
848 __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
849 kvm_vcpu_pmu_restore_guest(vcpu);
850 } else {
851 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
852 }
853
854 return true;
855}
856
857static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
858 const struct sys_reg_desc *r)
859{
860 u64 val, mask;
861
862 if (pmu_access_el0_disabled(vcpu))
863 return false;
864
865 mask = kvm_pmu_valid_counter_mask(vcpu);
866 if (p->is_write) {
867 val = p->regval & mask;
868 if (r->Op2 & 0x1) {
869
870 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
871 kvm_pmu_enable_counter_mask(vcpu, val);
872 kvm_vcpu_pmu_restore_guest(vcpu);
873 } else {
874
875 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
876 kvm_pmu_disable_counter_mask(vcpu, val);
877 }
878 } else {
879 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
880 }
881
882 return true;
883}
884
885static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
886 const struct sys_reg_desc *r)
887{
888 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
889
890 if (check_pmu_access_disabled(vcpu, 0))
891 return false;
892
893 if (p->is_write) {
894 u64 val = p->regval & mask;
895
896 if (r->Op2 & 0x1)
897
898 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
899 else
900
901 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
902 } else {
903 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
904 }
905
906 return true;
907}
908
909static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
910 const struct sys_reg_desc *r)
911{
912 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
913
914 if (pmu_access_el0_disabled(vcpu))
915 return false;
916
917 if (p->is_write) {
918 if (r->CRm & 0x2)
919
920 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
921 else
922
923 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
924 } else {
925 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
926 }
927
928 return true;
929}
930
931static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
932 const struct sys_reg_desc *r)
933{
934 u64 mask;
935
936 if (!p->is_write)
937 return read_from_write_only(vcpu, p, r);
938
939 if (pmu_write_swinc_el0_disabled(vcpu))
940 return false;
941
942 mask = kvm_pmu_valid_counter_mask(vcpu);
943 kvm_pmu_software_increment(vcpu, p->regval & mask);
944 return true;
945}
946
947static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
948 const struct sys_reg_desc *r)
949{
950 if (p->is_write) {
951 if (!vcpu_mode_priv(vcpu)) {
952 kvm_inject_undefined(vcpu);
953 return false;
954 }
955
956 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
957 p->regval & ARMV8_PMU_USERENR_MASK;
958 } else {
959 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
960 & ARMV8_PMU_USERENR_MASK;
961 }
962
963 return true;
964}
965
966
967#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
968 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
969 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
970 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
971 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
972 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
973 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
974 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
975 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
976
977#define PMU_SYS_REG(r) \
978 SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
979
980
981#define PMU_PMEVCNTR_EL0(n) \
982 { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
983 .reset = reset_pmevcntr, \
984 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
985
986
987#define PMU_PMEVTYPER_EL0(n) \
988 { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
989 .reset = reset_pmevtyper, \
990 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
991
992static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
993 const struct sys_reg_desc *r)
994{
995 kvm_inject_undefined(vcpu);
996
997 return false;
998}
999
1000
1001#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1002#define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1003#define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1004#define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1005
1006static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1007 const struct sys_reg_desc *rd)
1008{
1009 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1010}
1011
1012
1013
1014
1015
1016
1017
1018#define __PTRAUTH_KEY(k) \
1019 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1020 .visibility = ptrauth_visibility}
1021
1022#define PTRAUTH_KEY(k) \
1023 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1024 __PTRAUTH_KEY(k ## KEYHI_EL1)
1025
1026static bool access_arch_timer(struct kvm_vcpu *vcpu,
1027 struct sys_reg_params *p,
1028 const struct sys_reg_desc *r)
1029{
1030 enum kvm_arch_timers tmr;
1031 enum kvm_arch_timer_regs treg;
1032 u64 reg = reg_to_encoding(r);
1033
1034 switch (reg) {
1035 case SYS_CNTP_TVAL_EL0:
1036 case SYS_AARCH32_CNTP_TVAL:
1037 tmr = TIMER_PTIMER;
1038 treg = TIMER_REG_TVAL;
1039 break;
1040 case SYS_CNTP_CTL_EL0:
1041 case SYS_AARCH32_CNTP_CTL:
1042 tmr = TIMER_PTIMER;
1043 treg = TIMER_REG_CTL;
1044 break;
1045 case SYS_CNTP_CVAL_EL0:
1046 case SYS_AARCH32_CNTP_CVAL:
1047 tmr = TIMER_PTIMER;
1048 treg = TIMER_REG_CVAL;
1049 break;
1050 default:
1051 BUG();
1052 }
1053
1054 if (p->is_write)
1055 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1056 else
1057 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1058
1059 return true;
1060}
1061
1062
1063static u64 read_id_reg(const struct kvm_vcpu *vcpu,
1064 struct sys_reg_desc const *r, bool raz)
1065{
1066 u32 id = reg_to_encoding(r);
1067 u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
1068
1069 switch (id) {
1070 case SYS_ID_AA64PFR0_EL1:
1071 if (!vcpu_has_sve(vcpu))
1072 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE);
1073 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU);
1074 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2);
1075 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
1076 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
1077 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
1078 break;
1079 case SYS_ID_AA64PFR1_EL1:
1080 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
1081 if (kvm_has_mte(vcpu->kvm)) {
1082 u64 pfr, mte;
1083
1084 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
1085 mte = cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR1_MTE_SHIFT);
1086 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), mte);
1087 }
1088 break;
1089 case SYS_ID_AA64ISAR1_EL1:
1090 if (!vcpu_has_ptrauth(vcpu))
1091 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) |
1092 ARM64_FEATURE_MASK(ID_AA64ISAR1_API) |
1093 ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) |
1094 ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI));
1095 break;
1096 case SYS_ID_AA64DFR0_EL1:
1097
1098 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER);
1099 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6);
1100
1101 val = cpuid_feature_cap_perfmon_field(val,
1102 ID_AA64DFR0_PMUVER_SHIFT,
1103 kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
1104
1105 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER);
1106 break;
1107 case SYS_ID_DFR0_EL1:
1108
1109 val = cpuid_feature_cap_perfmon_field(val,
1110 ID_DFR0_PERFMON_SHIFT,
1111 kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0);
1112 break;
1113 }
1114
1115 return val;
1116}
1117
1118static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1119 const struct sys_reg_desc *r)
1120{
1121 u32 id = reg_to_encoding(r);
1122
1123 switch (id) {
1124 case SYS_ID_AA64ZFR0_EL1:
1125 if (!vcpu_has_sve(vcpu))
1126 return REG_RAZ;
1127 break;
1128 }
1129
1130 return 0;
1131}
1132
1133
1134
1135static bool __access_id_reg(struct kvm_vcpu *vcpu,
1136 struct sys_reg_params *p,
1137 const struct sys_reg_desc *r,
1138 bool raz)
1139{
1140 if (p->is_write)
1141 return write_to_read_only(vcpu, p, r);
1142
1143 p->regval = read_id_reg(vcpu, r, raz);
1144 return true;
1145}
1146
1147static bool access_id_reg(struct kvm_vcpu *vcpu,
1148 struct sys_reg_params *p,
1149 const struct sys_reg_desc *r)
1150{
1151 bool raz = sysreg_visible_as_raz(vcpu, r);
1152
1153 return __access_id_reg(vcpu, p, r, raz);
1154}
1155
1156static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
1157 struct sys_reg_params *p,
1158 const struct sys_reg_desc *r)
1159{
1160 return __access_id_reg(vcpu, p, r, true);
1161}
1162
1163static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
1164static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
1165static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
1166
1167
1168static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1169 const struct sys_reg_desc *rd)
1170{
1171 if (vcpu_has_sve(vcpu))
1172 return 0;
1173
1174 return REG_HIDDEN;
1175}
1176
1177static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1178 const struct sys_reg_desc *rd,
1179 const struct kvm_one_reg *reg, void __user *uaddr)
1180{
1181 const u64 id = sys_reg_to_index(rd);
1182 u8 csv2, csv3;
1183 int err;
1184 u64 val;
1185
1186 err = reg_from_user(&val, uaddr, id);
1187 if (err)
1188 return err;
1189
1190
1191
1192
1193
1194
1195 csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT);
1196 if (csv2 > 1 ||
1197 (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
1198 return -EINVAL;
1199
1200
1201 csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV3_SHIFT);
1202 if (csv3 > 1 ||
1203 (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
1204 return -EINVAL;
1205
1206
1207 val ^= read_id_reg(vcpu, rd, false);
1208 val &= ~((0xFUL << ID_AA64PFR0_CSV2_SHIFT) |
1209 (0xFUL << ID_AA64PFR0_CSV3_SHIFT));
1210 if (val)
1211 return -EINVAL;
1212
1213 vcpu->kvm->arch.pfr0_csv2 = csv2;
1214 vcpu->kvm->arch.pfr0_csv3 = csv3 ;
1215
1216 return 0;
1217}
1218
1219
1220
1221
1222
1223
1224
1225
1226static int __get_id_reg(const struct kvm_vcpu *vcpu,
1227 const struct sys_reg_desc *rd, void __user *uaddr,
1228 bool raz)
1229{
1230 const u64 id = sys_reg_to_index(rd);
1231 const u64 val = read_id_reg(vcpu, rd, raz);
1232
1233 return reg_to_user(uaddr, &val, id);
1234}
1235
1236static int __set_id_reg(const struct kvm_vcpu *vcpu,
1237 const struct sys_reg_desc *rd, void __user *uaddr,
1238 bool raz)
1239{
1240 const u64 id = sys_reg_to_index(rd);
1241 int err;
1242 u64 val;
1243
1244 err = reg_from_user(&val, uaddr, id);
1245 if (err)
1246 return err;
1247
1248
1249 if (val != read_id_reg(vcpu, rd, raz))
1250 return -EINVAL;
1251
1252 return 0;
1253}
1254
1255static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1256 const struct kvm_one_reg *reg, void __user *uaddr)
1257{
1258 bool raz = sysreg_visible_as_raz(vcpu, rd);
1259
1260 return __get_id_reg(vcpu, rd, uaddr, raz);
1261}
1262
1263static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1264 const struct kvm_one_reg *reg, void __user *uaddr)
1265{
1266 bool raz = sysreg_visible_as_raz(vcpu, rd);
1267
1268 return __set_id_reg(vcpu, rd, uaddr, raz);
1269}
1270
1271static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1272 const struct kvm_one_reg *reg, void __user *uaddr)
1273{
1274 return __get_id_reg(vcpu, rd, uaddr, true);
1275}
1276
1277static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1278 const struct kvm_one_reg *reg, void __user *uaddr)
1279{
1280 return __set_id_reg(vcpu, rd, uaddr, true);
1281}
1282
1283static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1284 const struct kvm_one_reg *reg, void __user *uaddr)
1285{
1286 int err;
1287 u64 val;
1288
1289
1290 err = reg_from_user(&val, uaddr, sys_reg_to_index(rd));
1291 if (err)
1292 return err;
1293
1294 return 0;
1295}
1296
1297static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1298 const struct sys_reg_desc *r)
1299{
1300 if (p->is_write)
1301 return write_to_read_only(vcpu, p, r);
1302
1303 p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1304 return true;
1305}
1306
1307static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1308 const struct sys_reg_desc *r)
1309{
1310 if (p->is_write)
1311 return write_to_read_only(vcpu, p, r);
1312
1313 p->regval = read_sysreg(clidr_el1);
1314 return true;
1315}
1316
1317static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1318 const struct sys_reg_desc *r)
1319{
1320 int reg = r->reg;
1321
1322 if (p->is_write)
1323 vcpu_write_sys_reg(vcpu, p->regval, reg);
1324 else
1325 p->regval = vcpu_read_sys_reg(vcpu, reg);
1326 return true;
1327}
1328
1329static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1330 const struct sys_reg_desc *r)
1331{
1332 u32 csselr;
1333
1334 if (p->is_write)
1335 return write_to_read_only(vcpu, p, r);
1336
1337 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1338 p->regval = get_ccsidr(csselr);
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352 if (!(csselr & 1))
1353 p->regval &= ~GENMASK(27, 3);
1354 return true;
1355}
1356
1357static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
1358 const struct sys_reg_desc *rd)
1359{
1360 if (kvm_has_mte(vcpu->kvm))
1361 return 0;
1362
1363 return REG_HIDDEN;
1364}
1365
1366#define MTE_REG(name) { \
1367 SYS_DESC(SYS_##name), \
1368 .access = undef_access, \
1369 .reset = reset_unknown, \
1370 .reg = name, \
1371 .visibility = mte_visibility, \
1372}
1373
1374
1375#define ID_SANITISED(name) { \
1376 SYS_DESC(SYS_##name), \
1377 .access = access_id_reg, \
1378 .get_user = get_id_reg, \
1379 .set_user = set_id_reg, \
1380 .visibility = id_visibility, \
1381}
1382
1383
1384
1385
1386
1387
1388#define ID_UNALLOCATED(crm, op2) { \
1389 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1390 .access = access_raz_id_reg, \
1391 .get_user = get_raz_id_reg, \
1392 .set_user = set_raz_id_reg, \
1393}
1394
1395
1396
1397
1398
1399
1400#define ID_HIDDEN(name) { \
1401 SYS_DESC(SYS_##name), \
1402 .access = access_raz_id_reg, \
1403 .get_user = get_raz_id_reg, \
1404 .set_user = set_raz_id_reg, \
1405}
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418static const struct sys_reg_desc sys_reg_descs[] = {
1419 { SYS_DESC(SYS_DC_ISW), access_dcsw },
1420 { SYS_DESC(SYS_DC_CSW), access_dcsw },
1421 { SYS_DESC(SYS_DC_CISW), access_dcsw },
1422
1423 DBG_BCR_BVR_WCR_WVR_EL1(0),
1424 DBG_BCR_BVR_WCR_WVR_EL1(1),
1425 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1426 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1427 DBG_BCR_BVR_WCR_WVR_EL1(2),
1428 DBG_BCR_BVR_WCR_WVR_EL1(3),
1429 DBG_BCR_BVR_WCR_WVR_EL1(4),
1430 DBG_BCR_BVR_WCR_WVR_EL1(5),
1431 DBG_BCR_BVR_WCR_WVR_EL1(6),
1432 DBG_BCR_BVR_WCR_WVR_EL1(7),
1433 DBG_BCR_BVR_WCR_WVR_EL1(8),
1434 DBG_BCR_BVR_WCR_WVR_EL1(9),
1435 DBG_BCR_BVR_WCR_WVR_EL1(10),
1436 DBG_BCR_BVR_WCR_WVR_EL1(11),
1437 DBG_BCR_BVR_WCR_WVR_EL1(12),
1438 DBG_BCR_BVR_WCR_WVR_EL1(13),
1439 DBG_BCR_BVR_WCR_WVR_EL1(14),
1440 DBG_BCR_BVR_WCR_WVR_EL1(15),
1441
1442 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1443 { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
1444 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
1445 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1446 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1447 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1448 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1449 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1450
1451 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1452 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1453
1454 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1455
1456 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1457
1458 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1459
1460
1461
1462
1463
1464
1465
1466
1467 ID_SANITISED(ID_PFR0_EL1),
1468 ID_SANITISED(ID_PFR1_EL1),
1469 ID_SANITISED(ID_DFR0_EL1),
1470 ID_HIDDEN(ID_AFR0_EL1),
1471 ID_SANITISED(ID_MMFR0_EL1),
1472 ID_SANITISED(ID_MMFR1_EL1),
1473 ID_SANITISED(ID_MMFR2_EL1),
1474 ID_SANITISED(ID_MMFR3_EL1),
1475
1476
1477 ID_SANITISED(ID_ISAR0_EL1),
1478 ID_SANITISED(ID_ISAR1_EL1),
1479 ID_SANITISED(ID_ISAR2_EL1),
1480 ID_SANITISED(ID_ISAR3_EL1),
1481 ID_SANITISED(ID_ISAR4_EL1),
1482 ID_SANITISED(ID_ISAR5_EL1),
1483 ID_SANITISED(ID_MMFR4_EL1),
1484 ID_SANITISED(ID_ISAR6_EL1),
1485
1486
1487 ID_SANITISED(MVFR0_EL1),
1488 ID_SANITISED(MVFR1_EL1),
1489 ID_SANITISED(MVFR2_EL1),
1490 ID_UNALLOCATED(3,3),
1491 ID_SANITISED(ID_PFR2_EL1),
1492 ID_HIDDEN(ID_DFR1_EL1),
1493 ID_SANITISED(ID_MMFR5_EL1),
1494 ID_UNALLOCATED(3,7),
1495
1496
1497
1498 { SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
1499 .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
1500 ID_SANITISED(ID_AA64PFR1_EL1),
1501 ID_UNALLOCATED(4,2),
1502 ID_UNALLOCATED(4,3),
1503 ID_SANITISED(ID_AA64ZFR0_EL1),
1504 ID_UNALLOCATED(4,5),
1505 ID_UNALLOCATED(4,6),
1506 ID_UNALLOCATED(4,7),
1507
1508
1509 ID_SANITISED(ID_AA64DFR0_EL1),
1510 ID_SANITISED(ID_AA64DFR1_EL1),
1511 ID_UNALLOCATED(5,2),
1512 ID_UNALLOCATED(5,3),
1513 ID_HIDDEN(ID_AA64AFR0_EL1),
1514 ID_HIDDEN(ID_AA64AFR1_EL1),
1515 ID_UNALLOCATED(5,6),
1516 ID_UNALLOCATED(5,7),
1517
1518
1519 ID_SANITISED(ID_AA64ISAR0_EL1),
1520 ID_SANITISED(ID_AA64ISAR1_EL1),
1521 ID_UNALLOCATED(6,2),
1522 ID_UNALLOCATED(6,3),
1523 ID_UNALLOCATED(6,4),
1524 ID_UNALLOCATED(6,5),
1525 ID_UNALLOCATED(6,6),
1526 ID_UNALLOCATED(6,7),
1527
1528
1529 ID_SANITISED(ID_AA64MMFR0_EL1),
1530 ID_SANITISED(ID_AA64MMFR1_EL1),
1531 ID_SANITISED(ID_AA64MMFR2_EL1),
1532 ID_UNALLOCATED(7,3),
1533 ID_UNALLOCATED(7,4),
1534 ID_UNALLOCATED(7,5),
1535 ID_UNALLOCATED(7,6),
1536 ID_UNALLOCATED(7,7),
1537
1538 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1539 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
1540 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1541
1542 MTE_REG(RGSR_EL1),
1543 MTE_REG(GCR_EL1),
1544
1545 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
1546 { SYS_DESC(SYS_TRFCR_EL1), undef_access },
1547 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1548 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1549 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1550
1551 PTRAUTH_KEY(APIA),
1552 PTRAUTH_KEY(APIB),
1553 PTRAUTH_KEY(APDA),
1554 PTRAUTH_KEY(APDB),
1555 PTRAUTH_KEY(APGA),
1556
1557 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1558 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1559 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1560
1561 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1562 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1563 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1564 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1565 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1566 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1567 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1568 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1569
1570 MTE_REG(TFSR_EL1),
1571 MTE_REG(TFSRE0_EL1),
1572
1573 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1574 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1575
1576 { SYS_DESC(SYS_PMSCR_EL1), undef_access },
1577 { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
1578 { SYS_DESC(SYS_PMSICR_EL1), undef_access },
1579 { SYS_DESC(SYS_PMSIRR_EL1), undef_access },
1580 { SYS_DESC(SYS_PMSFCR_EL1), undef_access },
1581 { SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
1582 { SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
1583 { SYS_DESC(SYS_PMSIDR_EL1), undef_access },
1584 { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
1585 { SYS_DESC(SYS_PMBPTR_EL1), undef_access },
1586 { SYS_DESC(SYS_PMBSR_EL1), undef_access },
1587
1588
1589 { PMU_SYS_REG(SYS_PMINTENSET_EL1),
1590 .access = access_pminten, .reg = PMINTENSET_EL1 },
1591 { PMU_SYS_REG(SYS_PMINTENCLR_EL1),
1592 .access = access_pminten, .reg = PMINTENSET_EL1 },
1593 { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
1594
1595 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1596 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1597
1598 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1599 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1600 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1601 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1602 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1603
1604 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1605 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1606
1607 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1608 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1609 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1610 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1611 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1612 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1613 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1614 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1615 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1616 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1617 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1618 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1619
1620 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1621 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1622
1623 { SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
1624
1625 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1626
1627 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
1628 { SYS_DESC(SYS_CLIDR_EL1), access_clidr },
1629 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
1630 { SYS_DESC(SYS_CTR_EL0), access_ctr },
1631
1632 { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
1633 .reset = reset_pmcr, .reg = PMCR_EL0 },
1634 { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
1635 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
1636 { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
1637 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
1638 { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
1639 .access = access_pmovs, .reg = PMOVSSET_EL0 },
1640
1641
1642
1643
1644 { PMU_SYS_REG(SYS_PMSWINC_EL0),
1645 .get_user = get_raz_id_reg, .set_user = set_wi_reg,
1646 .access = access_pmswinc, .reset = NULL },
1647 { PMU_SYS_REG(SYS_PMSELR_EL0),
1648 .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
1649 { PMU_SYS_REG(SYS_PMCEID0_EL0),
1650 .access = access_pmceid, .reset = NULL },
1651 { PMU_SYS_REG(SYS_PMCEID1_EL0),
1652 .access = access_pmceid, .reset = NULL },
1653 { PMU_SYS_REG(SYS_PMCCNTR_EL0),
1654 .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
1655 { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
1656 .access = access_pmu_evtyper, .reset = NULL },
1657 { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
1658 .access = access_pmu_evcntr, .reset = NULL },
1659
1660
1661
1662
1663 { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
1664 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
1665 { PMU_SYS_REG(SYS_PMOVSSET_EL0),
1666 .access = access_pmovs, .reg = PMOVSSET_EL0 },
1667
1668 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1669 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1670
1671 { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
1672
1673 { SYS_DESC(SYS_AMCR_EL0), undef_access },
1674 { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
1675 { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
1676 { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
1677 { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
1678 { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
1679 { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
1680 { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
1681 AMU_AMEVCNTR0_EL0(0),
1682 AMU_AMEVCNTR0_EL0(1),
1683 AMU_AMEVCNTR0_EL0(2),
1684 AMU_AMEVCNTR0_EL0(3),
1685 AMU_AMEVCNTR0_EL0(4),
1686 AMU_AMEVCNTR0_EL0(5),
1687 AMU_AMEVCNTR0_EL0(6),
1688 AMU_AMEVCNTR0_EL0(7),
1689 AMU_AMEVCNTR0_EL0(8),
1690 AMU_AMEVCNTR0_EL0(9),
1691 AMU_AMEVCNTR0_EL0(10),
1692 AMU_AMEVCNTR0_EL0(11),
1693 AMU_AMEVCNTR0_EL0(12),
1694 AMU_AMEVCNTR0_EL0(13),
1695 AMU_AMEVCNTR0_EL0(14),
1696 AMU_AMEVCNTR0_EL0(15),
1697 AMU_AMEVTYPER0_EL0(0),
1698 AMU_AMEVTYPER0_EL0(1),
1699 AMU_AMEVTYPER0_EL0(2),
1700 AMU_AMEVTYPER0_EL0(3),
1701 AMU_AMEVTYPER0_EL0(4),
1702 AMU_AMEVTYPER0_EL0(5),
1703 AMU_AMEVTYPER0_EL0(6),
1704 AMU_AMEVTYPER0_EL0(7),
1705 AMU_AMEVTYPER0_EL0(8),
1706 AMU_AMEVTYPER0_EL0(9),
1707 AMU_AMEVTYPER0_EL0(10),
1708 AMU_AMEVTYPER0_EL0(11),
1709 AMU_AMEVTYPER0_EL0(12),
1710 AMU_AMEVTYPER0_EL0(13),
1711 AMU_AMEVTYPER0_EL0(14),
1712 AMU_AMEVTYPER0_EL0(15),
1713 AMU_AMEVCNTR1_EL0(0),
1714 AMU_AMEVCNTR1_EL0(1),
1715 AMU_AMEVCNTR1_EL0(2),
1716 AMU_AMEVCNTR1_EL0(3),
1717 AMU_AMEVCNTR1_EL0(4),
1718 AMU_AMEVCNTR1_EL0(5),
1719 AMU_AMEVCNTR1_EL0(6),
1720 AMU_AMEVCNTR1_EL0(7),
1721 AMU_AMEVCNTR1_EL0(8),
1722 AMU_AMEVCNTR1_EL0(9),
1723 AMU_AMEVCNTR1_EL0(10),
1724 AMU_AMEVCNTR1_EL0(11),
1725 AMU_AMEVCNTR1_EL0(12),
1726 AMU_AMEVCNTR1_EL0(13),
1727 AMU_AMEVCNTR1_EL0(14),
1728 AMU_AMEVCNTR1_EL0(15),
1729 AMU_AMEVTYPER1_EL0(0),
1730 AMU_AMEVTYPER1_EL0(1),
1731 AMU_AMEVTYPER1_EL0(2),
1732 AMU_AMEVTYPER1_EL0(3),
1733 AMU_AMEVTYPER1_EL0(4),
1734 AMU_AMEVTYPER1_EL0(5),
1735 AMU_AMEVTYPER1_EL0(6),
1736 AMU_AMEVTYPER1_EL0(7),
1737 AMU_AMEVTYPER1_EL0(8),
1738 AMU_AMEVTYPER1_EL0(9),
1739 AMU_AMEVTYPER1_EL0(10),
1740 AMU_AMEVTYPER1_EL0(11),
1741 AMU_AMEVTYPER1_EL0(12),
1742 AMU_AMEVTYPER1_EL0(13),
1743 AMU_AMEVTYPER1_EL0(14),
1744 AMU_AMEVTYPER1_EL0(15),
1745
1746 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
1747 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
1748 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
1749
1750
1751 PMU_PMEVCNTR_EL0(0),
1752 PMU_PMEVCNTR_EL0(1),
1753 PMU_PMEVCNTR_EL0(2),
1754 PMU_PMEVCNTR_EL0(3),
1755 PMU_PMEVCNTR_EL0(4),
1756 PMU_PMEVCNTR_EL0(5),
1757 PMU_PMEVCNTR_EL0(6),
1758 PMU_PMEVCNTR_EL0(7),
1759 PMU_PMEVCNTR_EL0(8),
1760 PMU_PMEVCNTR_EL0(9),
1761 PMU_PMEVCNTR_EL0(10),
1762 PMU_PMEVCNTR_EL0(11),
1763 PMU_PMEVCNTR_EL0(12),
1764 PMU_PMEVCNTR_EL0(13),
1765 PMU_PMEVCNTR_EL0(14),
1766 PMU_PMEVCNTR_EL0(15),
1767 PMU_PMEVCNTR_EL0(16),
1768 PMU_PMEVCNTR_EL0(17),
1769 PMU_PMEVCNTR_EL0(18),
1770 PMU_PMEVCNTR_EL0(19),
1771 PMU_PMEVCNTR_EL0(20),
1772 PMU_PMEVCNTR_EL0(21),
1773 PMU_PMEVCNTR_EL0(22),
1774 PMU_PMEVCNTR_EL0(23),
1775 PMU_PMEVCNTR_EL0(24),
1776 PMU_PMEVCNTR_EL0(25),
1777 PMU_PMEVCNTR_EL0(26),
1778 PMU_PMEVCNTR_EL0(27),
1779 PMU_PMEVCNTR_EL0(28),
1780 PMU_PMEVCNTR_EL0(29),
1781 PMU_PMEVCNTR_EL0(30),
1782
1783 PMU_PMEVTYPER_EL0(0),
1784 PMU_PMEVTYPER_EL0(1),
1785 PMU_PMEVTYPER_EL0(2),
1786 PMU_PMEVTYPER_EL0(3),
1787 PMU_PMEVTYPER_EL0(4),
1788 PMU_PMEVTYPER_EL0(5),
1789 PMU_PMEVTYPER_EL0(6),
1790 PMU_PMEVTYPER_EL0(7),
1791 PMU_PMEVTYPER_EL0(8),
1792 PMU_PMEVTYPER_EL0(9),
1793 PMU_PMEVTYPER_EL0(10),
1794 PMU_PMEVTYPER_EL0(11),
1795 PMU_PMEVTYPER_EL0(12),
1796 PMU_PMEVTYPER_EL0(13),
1797 PMU_PMEVTYPER_EL0(14),
1798 PMU_PMEVTYPER_EL0(15),
1799 PMU_PMEVTYPER_EL0(16),
1800 PMU_PMEVTYPER_EL0(17),
1801 PMU_PMEVTYPER_EL0(18),
1802 PMU_PMEVTYPER_EL0(19),
1803 PMU_PMEVTYPER_EL0(20),
1804 PMU_PMEVTYPER_EL0(21),
1805 PMU_PMEVTYPER_EL0(22),
1806 PMU_PMEVTYPER_EL0(23),
1807 PMU_PMEVTYPER_EL0(24),
1808 PMU_PMEVTYPER_EL0(25),
1809 PMU_PMEVTYPER_EL0(26),
1810 PMU_PMEVTYPER_EL0(27),
1811 PMU_PMEVTYPER_EL0(28),
1812 PMU_PMEVTYPER_EL0(29),
1813 PMU_PMEVTYPER_EL0(30),
1814
1815
1816
1817
1818 { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
1819 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
1820
1821 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1822 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1823 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
1824};
1825
1826static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
1827 struct sys_reg_params *p,
1828 const struct sys_reg_desc *r)
1829{
1830 if (p->is_write) {
1831 return ignore_write(vcpu, p);
1832 } else {
1833 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1834 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1835 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1836
1837 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1838 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1839 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1840 | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
1841 return true;
1842 }
1843}
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854#define DBG_BCR_BVR_WCR_WVR(n) \
1855 \
1856 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1857 \
1858 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1859 \
1860 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1861 \
1862 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1863
1864#define DBGBXVR(n) \
1865 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
1866
1867
1868
1869
1870
1871
1872static const struct sys_reg_desc cp14_regs[] = {
1873
1874 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
1875
1876 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1877
1878 DBG_BCR_BVR_WCR_WVR(0),
1879
1880 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1881 DBG_BCR_BVR_WCR_WVR(1),
1882
1883 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
1884
1885 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
1886 DBG_BCR_BVR_WCR_WVR(2),
1887
1888 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1889
1890 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1891 DBG_BCR_BVR_WCR_WVR(3),
1892 DBG_BCR_BVR_WCR_WVR(4),
1893 DBG_BCR_BVR_WCR_WVR(5),
1894
1895 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1896
1897 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1898 DBG_BCR_BVR_WCR_WVR(6),
1899
1900 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
1901 DBG_BCR_BVR_WCR_WVR(7),
1902 DBG_BCR_BVR_WCR_WVR(8),
1903 DBG_BCR_BVR_WCR_WVR(9),
1904 DBG_BCR_BVR_WCR_WVR(10),
1905 DBG_BCR_BVR_WCR_WVR(11),
1906 DBG_BCR_BVR_WCR_WVR(12),
1907 DBG_BCR_BVR_WCR_WVR(13),
1908 DBG_BCR_BVR_WCR_WVR(14),
1909 DBG_BCR_BVR_WCR_WVR(15),
1910
1911
1912 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1913
1914 DBGBXVR(0),
1915
1916 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1917 DBGBXVR(1),
1918
1919 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1920 DBGBXVR(2),
1921 DBGBXVR(3),
1922
1923 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1924 DBGBXVR(4),
1925
1926 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1927 DBGBXVR(5),
1928 DBGBXVR(6),
1929 DBGBXVR(7),
1930 DBGBXVR(8),
1931 DBGBXVR(9),
1932 DBGBXVR(10),
1933 DBGBXVR(11),
1934 DBGBXVR(12),
1935 DBGBXVR(13),
1936 DBGBXVR(14),
1937 DBGBXVR(15),
1938
1939
1940 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1941
1942
1943 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1944
1945 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1946
1947 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1948
1949 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1950
1951 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1952
1953 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1954};
1955
1956
1957static const struct sys_reg_desc cp14_64_regs[] = {
1958
1959 { Op1( 0), CRm( 1), .access = trap_raz_wi },
1960
1961
1962 { Op1( 0), CRm( 2), .access = trap_raz_wi },
1963};
1964
1965
1966#define PMU_PMEVCNTR(n) \
1967 \
1968 { Op1(0), CRn(0b1110), \
1969 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1970 access_pmu_evcntr }
1971
1972
1973#define PMU_PMEVTYPER(n) \
1974 \
1975 { Op1(0), CRn(0b1110), \
1976 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1977 access_pmu_evtyper }
1978
1979
1980
1981
1982
1983
1984static const struct sys_reg_desc cp15_regs[] = {
1985 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
1986 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
1987
1988 { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
1989
1990 { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
1991 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
1992 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
1993
1994 { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
1995
1996 { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
1997 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
1998
1999 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
2000 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
2001
2002 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
2003
2004 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
2005
2006 { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
2007
2008 { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
2009
2010
2011
2012
2013 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
2014 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
2015 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
2016
2017
2018 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
2019 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
2020 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
2021 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
2022 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
2023 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
2024 { AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
2025 { AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
2026 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
2027 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
2028 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
2029 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
2030 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
2031 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
2032 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
2033 { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 4), access_pmceid },
2034 { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 5), access_pmceid },
2035
2036 { Op1( 0), CRn( 9), CRm(14), Op2( 6), trap_raz_wi },
2037
2038
2039 { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
2040
2041 { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
2042
2043 { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
2044
2045 { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
2046
2047
2048 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
2049
2050 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
2051
2052
2053 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
2054 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
2055
2056
2057 PMU_PMEVCNTR(0),
2058 PMU_PMEVCNTR(1),
2059 PMU_PMEVCNTR(2),
2060 PMU_PMEVCNTR(3),
2061 PMU_PMEVCNTR(4),
2062 PMU_PMEVCNTR(5),
2063 PMU_PMEVCNTR(6),
2064 PMU_PMEVCNTR(7),
2065 PMU_PMEVCNTR(8),
2066 PMU_PMEVCNTR(9),
2067 PMU_PMEVCNTR(10),
2068 PMU_PMEVCNTR(11),
2069 PMU_PMEVCNTR(12),
2070 PMU_PMEVCNTR(13),
2071 PMU_PMEVCNTR(14),
2072 PMU_PMEVCNTR(15),
2073 PMU_PMEVCNTR(16),
2074 PMU_PMEVCNTR(17),
2075 PMU_PMEVCNTR(18),
2076 PMU_PMEVCNTR(19),
2077 PMU_PMEVCNTR(20),
2078 PMU_PMEVCNTR(21),
2079 PMU_PMEVCNTR(22),
2080 PMU_PMEVCNTR(23),
2081 PMU_PMEVCNTR(24),
2082 PMU_PMEVCNTR(25),
2083 PMU_PMEVCNTR(26),
2084 PMU_PMEVCNTR(27),
2085 PMU_PMEVCNTR(28),
2086 PMU_PMEVCNTR(29),
2087 PMU_PMEVCNTR(30),
2088
2089 PMU_PMEVTYPER(0),
2090 PMU_PMEVTYPER(1),
2091 PMU_PMEVTYPER(2),
2092 PMU_PMEVTYPER(3),
2093 PMU_PMEVTYPER(4),
2094 PMU_PMEVTYPER(5),
2095 PMU_PMEVTYPER(6),
2096 PMU_PMEVTYPER(7),
2097 PMU_PMEVTYPER(8),
2098 PMU_PMEVTYPER(9),
2099 PMU_PMEVTYPER(10),
2100 PMU_PMEVTYPER(11),
2101 PMU_PMEVTYPER(12),
2102 PMU_PMEVTYPER(13),
2103 PMU_PMEVTYPER(14),
2104 PMU_PMEVTYPER(15),
2105 PMU_PMEVTYPER(16),
2106 PMU_PMEVTYPER(17),
2107 PMU_PMEVTYPER(18),
2108 PMU_PMEVTYPER(19),
2109 PMU_PMEVTYPER(20),
2110 PMU_PMEVTYPER(21),
2111 PMU_PMEVTYPER(22),
2112 PMU_PMEVTYPER(23),
2113 PMU_PMEVTYPER(24),
2114 PMU_PMEVTYPER(25),
2115 PMU_PMEVTYPER(26),
2116 PMU_PMEVTYPER(27),
2117 PMU_PMEVTYPER(28),
2118 PMU_PMEVTYPER(29),
2119 PMU_PMEVTYPER(30),
2120
2121 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
2122
2123 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
2124 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
2125 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
2126};
2127
2128static const struct sys_reg_desc cp15_64_regs[] = {
2129 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2130 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
2131 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
2132 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
2133 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
2134 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
2135 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
2136};
2137
2138static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
2139 bool is_32)
2140{
2141 unsigned int i;
2142
2143 for (i = 0; i < n; i++) {
2144 if (!is_32 && table[i].reg && !table[i].reset) {
2145 kvm_err("sys_reg table %p entry %d has lacks reset\n",
2146 table, i);
2147 return 1;
2148 }
2149
2150 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2151 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2152 return 1;
2153 }
2154 }
2155
2156 return 0;
2157}
2158
2159int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
2160{
2161 kvm_inject_undefined(vcpu);
2162 return 1;
2163}
2164
2165static void perform_access(struct kvm_vcpu *vcpu,
2166 struct sys_reg_params *params,
2167 const struct sys_reg_desc *r)
2168{
2169 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2170
2171
2172 if (sysreg_hidden(vcpu, r)) {
2173 kvm_inject_undefined(vcpu);
2174 return;
2175 }
2176
2177
2178
2179
2180
2181
2182 BUG_ON(!r->access);
2183
2184
2185 if (likely(r->access(vcpu, params, r)))
2186 kvm_incr_pc(vcpu);
2187}
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199static int emulate_cp(struct kvm_vcpu *vcpu,
2200 struct sys_reg_params *params,
2201 const struct sys_reg_desc *table,
2202 size_t num)
2203{
2204 const struct sys_reg_desc *r;
2205
2206 if (!table)
2207 return -1;
2208
2209 r = find_reg(params, table, num);
2210
2211 if (r) {
2212 perform_access(vcpu, params, r);
2213 return 0;
2214 }
2215
2216
2217 return -1;
2218}
2219
2220static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2221 struct sys_reg_params *params)
2222{
2223 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
2224 int cp = -1;
2225
2226 switch (esr_ec) {
2227 case ESR_ELx_EC_CP15_32:
2228 case ESR_ELx_EC_CP15_64:
2229 cp = 15;
2230 break;
2231 case ESR_ELx_EC_CP14_MR:
2232 case ESR_ELx_EC_CP14_64:
2233 cp = 14;
2234 break;
2235 default:
2236 WARN_ON(1);
2237 }
2238
2239 print_sys_reg_msg(params,
2240 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2241 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2242 kvm_inject_undefined(vcpu);
2243}
2244
2245
2246
2247
2248
2249
2250static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2251 const struct sys_reg_desc *global,
2252 size_t nr_global)
2253{
2254 struct sys_reg_params params;
2255 u32 esr = kvm_vcpu_get_esr(vcpu);
2256 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2257 int Rt2 = (esr >> 10) & 0x1f;
2258
2259 params.CRm = (esr >> 1) & 0xf;
2260 params.is_write = ((esr & 1) == 0);
2261
2262 params.Op0 = 0;
2263 params.Op1 = (esr >> 16) & 0xf;
2264 params.Op2 = 0;
2265 params.CRn = 0;
2266
2267
2268
2269
2270
2271 if (params.is_write) {
2272 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
2273 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
2274 }
2275
2276
2277
2278
2279
2280
2281 if (!emulate_cp(vcpu, ¶ms, global, nr_global)) {
2282
2283 if (!params.is_write) {
2284 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
2285 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
2286 }
2287
2288 return 1;
2289 }
2290
2291 unhandled_cp_access(vcpu, ¶ms);
2292 return 1;
2293}
2294
2295
2296
2297
2298
2299
2300static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2301 const struct sys_reg_desc *global,
2302 size_t nr_global)
2303{
2304 struct sys_reg_params params;
2305 u32 esr = kvm_vcpu_get_esr(vcpu);
2306 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2307
2308 params.CRm = (esr >> 1) & 0xf;
2309 params.regval = vcpu_get_reg(vcpu, Rt);
2310 params.is_write = ((esr & 1) == 0);
2311 params.CRn = (esr >> 10) & 0xf;
2312 params.Op0 = 0;
2313 params.Op1 = (esr >> 14) & 0x7;
2314 params.Op2 = (esr >> 17) & 0x7;
2315
2316 if (!emulate_cp(vcpu, ¶ms, global, nr_global)) {
2317 if (!params.is_write)
2318 vcpu_set_reg(vcpu, Rt, params.regval);
2319 return 1;
2320 }
2321
2322 unhandled_cp_access(vcpu, ¶ms);
2323 return 1;
2324}
2325
2326int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
2327{
2328 return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
2329}
2330
2331int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
2332{
2333 return kvm_handle_cp_32(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
2334}
2335
2336int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
2337{
2338 return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
2339}
2340
2341int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
2342{
2343 return kvm_handle_cp_32(vcpu, cp14_regs, ARRAY_SIZE(cp14_regs));
2344}
2345
2346static bool is_imp_def_sys_reg(struct sys_reg_params *params)
2347{
2348
2349 return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
2350}
2351
2352static int emulate_sys_reg(struct kvm_vcpu *vcpu,
2353 struct sys_reg_params *params)
2354{
2355 const struct sys_reg_desc *r;
2356
2357 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2358
2359 if (likely(r)) {
2360 perform_access(vcpu, params, r);
2361 } else if (is_imp_def_sys_reg(params)) {
2362 kvm_inject_undefined(vcpu);
2363 } else {
2364 print_sys_reg_msg(params,
2365 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
2366 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2367 kvm_inject_undefined(vcpu);
2368 }
2369 return 1;
2370}
2371
2372
2373
2374
2375
2376
2377
2378
2379void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2380{
2381 unsigned long i;
2382
2383 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++)
2384 if (sys_reg_descs[i].reset)
2385 sys_reg_descs[i].reset(vcpu, &sys_reg_descs[i]);
2386}
2387
2388
2389
2390
2391
2392int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
2393{
2394 struct sys_reg_params params;
2395 unsigned long esr = kvm_vcpu_get_esr(vcpu);
2396 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2397 int ret;
2398
2399 trace_kvm_handle_sys_reg(esr);
2400
2401 params = esr_sys64_to_params(esr);
2402 params.regval = vcpu_get_reg(vcpu, Rt);
2403
2404 ret = emulate_sys_reg(vcpu, ¶ms);
2405
2406 if (!params.is_write)
2407 vcpu_set_reg(vcpu, Rt, params.regval);
2408 return ret;
2409}
2410
2411
2412
2413
2414
2415static bool index_to_params(u64 id, struct sys_reg_params *params)
2416{
2417 switch (id & KVM_REG_SIZE_MASK) {
2418 case KVM_REG_SIZE_U64:
2419
2420 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2421 | KVM_REG_ARM_COPROC_MASK
2422 | KVM_REG_ARM64_SYSREG_OP0_MASK
2423 | KVM_REG_ARM64_SYSREG_OP1_MASK
2424 | KVM_REG_ARM64_SYSREG_CRN_MASK
2425 | KVM_REG_ARM64_SYSREG_CRM_MASK
2426 | KVM_REG_ARM64_SYSREG_OP2_MASK))
2427 return false;
2428 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2429 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2430 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2431 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2432 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2433 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2434 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2435 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2436 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2437 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2438 return true;
2439 default:
2440 return false;
2441 }
2442}
2443
2444const struct sys_reg_desc *find_reg_by_id(u64 id,
2445 struct sys_reg_params *params,
2446 const struct sys_reg_desc table[],
2447 unsigned int num)
2448{
2449 if (!index_to_params(id, params))
2450 return NULL;
2451
2452 return find_reg(params, table, num);
2453}
2454
2455
2456static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
2457 u64 id)
2458{
2459 const struct sys_reg_desc *r;
2460 struct sys_reg_params params;
2461
2462
2463 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2464 return NULL;
2465
2466 if (!index_to_params(id, ¶ms))
2467 return NULL;
2468
2469 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2470
2471
2472 if (r && !(r->reg || r->get_user))
2473 r = NULL;
2474
2475 return r;
2476}
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486#define FUNCTION_INVARIANT(reg) \
2487 static void get_##reg(struct kvm_vcpu *v, \
2488 const struct sys_reg_desc *r) \
2489 { \
2490 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
2491 }
2492
2493FUNCTION_INVARIANT(midr_el1)
2494FUNCTION_INVARIANT(revidr_el1)
2495FUNCTION_INVARIANT(clidr_el1)
2496FUNCTION_INVARIANT(aidr_el1)
2497
2498static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
2499{
2500 ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
2501}
2502
2503
2504static struct sys_reg_desc invariant_sys_regs[] = {
2505 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2506 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2507 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2508 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2509 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2510};
2511
2512static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
2513{
2514 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
2515 return -EFAULT;
2516 return 0;
2517}
2518
2519static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
2520{
2521 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
2522 return -EFAULT;
2523 return 0;
2524}
2525
2526static int get_invariant_sys_reg(u64 id, void __user *uaddr)
2527{
2528 struct sys_reg_params params;
2529 const struct sys_reg_desc *r;
2530
2531 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2532 ARRAY_SIZE(invariant_sys_regs));
2533 if (!r)
2534 return -ENOENT;
2535
2536 return reg_to_user(uaddr, &r->val, id);
2537}
2538
2539static int set_invariant_sys_reg(u64 id, void __user *uaddr)
2540{
2541 struct sys_reg_params params;
2542 const struct sys_reg_desc *r;
2543 int err;
2544 u64 val = 0;
2545
2546 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2547 ARRAY_SIZE(invariant_sys_regs));
2548 if (!r)
2549 return -ENOENT;
2550
2551 err = reg_from_user(&val, uaddr, id);
2552 if (err)
2553 return err;
2554
2555
2556 if (r->val != val)
2557 return -EINVAL;
2558
2559 return 0;
2560}
2561
2562static bool is_valid_cache(u32 val)
2563{
2564 u32 level, ctype;
2565
2566 if (val >= CSSELR_MAX)
2567 return false;
2568
2569
2570 level = (val >> 1);
2571 ctype = (cache_levels >> (level * 3)) & 7;
2572
2573 switch (ctype) {
2574 case 0:
2575 return false;
2576 case 1:
2577 return (val & 1);
2578 case 2:
2579 case 4:
2580 return !(val & 1);
2581 case 3:
2582 return true;
2583 default:
2584 return false;
2585 }
2586}
2587
2588static int demux_c15_get(u64 id, void __user *uaddr)
2589{
2590 u32 val;
2591 u32 __user *uval = uaddr;
2592
2593
2594 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2595 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2596 return -ENOENT;
2597
2598 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2599 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2600 if (KVM_REG_SIZE(id) != 4)
2601 return -ENOENT;
2602 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2603 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2604 if (!is_valid_cache(val))
2605 return -ENOENT;
2606
2607 return put_user(get_ccsidr(val), uval);
2608 default:
2609 return -ENOENT;
2610 }
2611}
2612
2613static int demux_c15_set(u64 id, void __user *uaddr)
2614{
2615 u32 val, newval;
2616 u32 __user *uval = uaddr;
2617
2618
2619 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2620 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2621 return -ENOENT;
2622
2623 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2624 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2625 if (KVM_REG_SIZE(id) != 4)
2626 return -ENOENT;
2627 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2628 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2629 if (!is_valid_cache(val))
2630 return -ENOENT;
2631
2632 if (get_user(newval, uval))
2633 return -EFAULT;
2634
2635
2636 if (newval != get_ccsidr(val))
2637 return -EINVAL;
2638 return 0;
2639 default:
2640 return -ENOENT;
2641 }
2642}
2643
2644int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2645{
2646 const struct sys_reg_desc *r;
2647 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2648
2649 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2650 return demux_c15_get(reg->id, uaddr);
2651
2652 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2653 return -ENOENT;
2654
2655 r = index_to_sys_reg_desc(vcpu, reg->id);
2656 if (!r)
2657 return get_invariant_sys_reg(reg->id, uaddr);
2658
2659
2660 if (sysreg_hidden(vcpu, r))
2661 return -ENOENT;
2662
2663 if (r->get_user)
2664 return (r->get_user)(vcpu, r, reg, uaddr);
2665
2666 return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
2667}
2668
2669int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2670{
2671 const struct sys_reg_desc *r;
2672 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2673
2674 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2675 return demux_c15_set(reg->id, uaddr);
2676
2677 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2678 return -ENOENT;
2679
2680 r = index_to_sys_reg_desc(vcpu, reg->id);
2681 if (!r)
2682 return set_invariant_sys_reg(reg->id, uaddr);
2683
2684
2685 if (sysreg_hidden(vcpu, r))
2686 return -ENOENT;
2687
2688 if (r->set_user)
2689 return (r->set_user)(vcpu, r, reg, uaddr);
2690
2691 return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2692}
2693
2694static unsigned int num_demux_regs(void)
2695{
2696 unsigned int i, count = 0;
2697
2698 for (i = 0; i < CSSELR_MAX; i++)
2699 if (is_valid_cache(i))
2700 count++;
2701
2702 return count;
2703}
2704
2705static int write_demux_regids(u64 __user *uindices)
2706{
2707 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2708 unsigned int i;
2709
2710 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2711 for (i = 0; i < CSSELR_MAX; i++) {
2712 if (!is_valid_cache(i))
2713 continue;
2714 if (put_user(val | i, uindices))
2715 return -EFAULT;
2716 uindices++;
2717 }
2718 return 0;
2719}
2720
2721static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2722{
2723 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2724 KVM_REG_ARM64_SYSREG |
2725 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2726 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2727 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2728 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2729 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2730}
2731
2732static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2733{
2734 if (!*uind)
2735 return true;
2736
2737 if (put_user(sys_reg_to_index(reg), *uind))
2738 return false;
2739
2740 (*uind)++;
2741 return true;
2742}
2743
2744static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
2745 const struct sys_reg_desc *rd,
2746 u64 __user **uind,
2747 unsigned int *total)
2748{
2749
2750
2751
2752
2753 if (!(rd->reg || rd->get_user))
2754 return 0;
2755
2756 if (sysreg_hidden(vcpu, rd))
2757 return 0;
2758
2759 if (!copy_reg_to_user(rd, uind))
2760 return -EFAULT;
2761
2762 (*total)++;
2763 return 0;
2764}
2765
2766
2767static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2768{
2769 const struct sys_reg_desc *i2, *end2;
2770 unsigned int total = 0;
2771 int err;
2772
2773 i2 = sys_reg_descs;
2774 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2775
2776 while (i2 != end2) {
2777 err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
2778 if (err)
2779 return err;
2780 }
2781 return total;
2782}
2783
2784unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2785{
2786 return ARRAY_SIZE(invariant_sys_regs)
2787 + num_demux_regs()
2788 + walk_sys_regs(vcpu, (u64 __user *)NULL);
2789}
2790
2791int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2792{
2793 unsigned int i;
2794 int err;
2795
2796
2797 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2798 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2799 return -EFAULT;
2800 uindices++;
2801 }
2802
2803 err = walk_sys_regs(vcpu, uindices);
2804 if (err < 0)
2805 return err;
2806 uindices += err;
2807
2808 return write_demux_regids(uindices);
2809}
2810
2811void kvm_sys_reg_table_init(void)
2812{
2813 unsigned int i;
2814 struct sys_reg_desc clidr;
2815
2816
2817 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false));
2818 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true));
2819 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true));
2820 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true));
2821 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true));
2822 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false));
2823
2824
2825 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2826 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838 get_clidr_el1(NULL, &clidr);
2839 cache_levels = clidr.val;
2840 for (i = 0; i < 7; i++)
2841 if (((cache_levels >> (i*3)) & 7) == 0)
2842 break;
2843
2844 cache_levels &= (1 << (i*3))-1;
2845}
2846