1
2
3
4
5
6
7
8
9
10
11
12#include <linux/bsearch.h>
13#include <linux/kvm_host.h>
14#include <linux/mm.h>
15#include <linux/printk.h>
16#include <linux/uaccess.h>
17
18#include <asm/cacheflush.h>
19#include <asm/cputype.h>
20#include <asm/debug-monitors.h>
21#include <asm/esr.h>
22#include <asm/kvm_arm.h>
23#include <asm/kvm_coproc.h>
24#include <asm/kvm_emulate.h>
25#include <asm/kvm_host.h>
26#include <asm/kvm_hyp.h>
27#include <asm/kvm_mmu.h>
28#include <asm/perf_event.h>
29#include <asm/sysreg.h>
30
31#include <trace/events/kvm.h>
32
33#include "sys_regs.h"
34
35#include "trace.h"
36
37
38
39
40
41
42
43
44
45
46
47
48static bool read_from_write_only(struct kvm_vcpu *vcpu,
49 struct sys_reg_params *params,
50 const struct sys_reg_desc *r)
51{
52 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
53 print_sys_reg_instr(params);
54 kvm_inject_undefined(vcpu);
55 return false;
56}
57
58static bool write_to_read_only(struct kvm_vcpu *vcpu,
59 struct sys_reg_params *params,
60 const struct sys_reg_desc *r)
61{
62 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
63 print_sys_reg_instr(params);
64 kvm_inject_undefined(vcpu);
65 return false;
66}
67
68u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
69{
70 if (!vcpu->arch.sysregs_loaded_on_cpu)
71 goto immediate_read;
72
73
74
75
76
77
78
79
80
81
82 switch (reg) {
83 case CSSELR_EL1: return read_sysreg_s(SYS_CSSELR_EL1);
84 case SCTLR_EL1: return read_sysreg_s(SYS_SCTLR_EL12);
85 case ACTLR_EL1: return read_sysreg_s(SYS_ACTLR_EL1);
86 case CPACR_EL1: return read_sysreg_s(SYS_CPACR_EL12);
87 case TTBR0_EL1: return read_sysreg_s(SYS_TTBR0_EL12);
88 case TTBR1_EL1: return read_sysreg_s(SYS_TTBR1_EL12);
89 case TCR_EL1: return read_sysreg_s(SYS_TCR_EL12);
90 case ESR_EL1: return read_sysreg_s(SYS_ESR_EL12);
91 case AFSR0_EL1: return read_sysreg_s(SYS_AFSR0_EL12);
92 case AFSR1_EL1: return read_sysreg_s(SYS_AFSR1_EL12);
93 case FAR_EL1: return read_sysreg_s(SYS_FAR_EL12);
94 case MAIR_EL1: return read_sysreg_s(SYS_MAIR_EL12);
95 case VBAR_EL1: return read_sysreg_s(SYS_VBAR_EL12);
96 case CONTEXTIDR_EL1: return read_sysreg_s(SYS_CONTEXTIDR_EL12);
97 case TPIDR_EL0: return read_sysreg_s(SYS_TPIDR_EL0);
98 case TPIDRRO_EL0: return read_sysreg_s(SYS_TPIDRRO_EL0);
99 case TPIDR_EL1: return read_sysreg_s(SYS_TPIDR_EL1);
100 case AMAIR_EL1: return read_sysreg_s(SYS_AMAIR_EL12);
101 case CNTKCTL_EL1: return read_sysreg_s(SYS_CNTKCTL_EL12);
102 case PAR_EL1: return read_sysreg_s(SYS_PAR_EL1);
103 case DACR32_EL2: return read_sysreg_s(SYS_DACR32_EL2);
104 case IFSR32_EL2: return read_sysreg_s(SYS_IFSR32_EL2);
105 case DBGVCR32_EL2: return read_sysreg_s(SYS_DBGVCR32_EL2);
106 }
107
108immediate_read:
109 return __vcpu_sys_reg(vcpu, reg);
110}
111
112void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
113{
114 if (!vcpu->arch.sysregs_loaded_on_cpu)
115 goto immediate_write;
116
117
118
119
120
121
122
123
124
125 switch (reg) {
126 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); return;
127 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); return;
128 case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); return;
129 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); return;
130 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); return;
131 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); return;
132 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); return;
133 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); return;
134 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); return;
135 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); return;
136 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); return;
137 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); return;
138 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); return;
139 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12); return;
140 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); return;
141 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); return;
142 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); return;
143 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); return;
144 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); return;
145 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); return;
146 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); return;
147 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); return;
148 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); return;
149 }
150
151immediate_write:
152 __vcpu_sys_reg(vcpu, reg) = val;
153}
154
155
156static u32 cache_levels;
157
158
159#define CSSELR_MAX 12
160
161
162static u32 get_ccsidr(u32 csselr)
163{
164 u32 ccsidr;
165
166
167 local_irq_disable();
168 write_sysreg(csselr, csselr_el1);
169 isb();
170 ccsidr = read_sysreg(ccsidr_el1);
171 local_irq_enable();
172
173 return ccsidr;
174}
175
176
177
178
179static bool access_dcsw(struct kvm_vcpu *vcpu,
180 struct sys_reg_params *p,
181 const struct sys_reg_desc *r)
182{
183 if (!p->is_write)
184 return read_from_write_only(vcpu, p, r);
185
186
187
188
189
190
191
192
193 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
194 kvm_set_way_flush(vcpu);
195
196 return true;
197}
198
199
200
201
202
203
204static bool access_vm_reg(struct kvm_vcpu *vcpu,
205 struct sys_reg_params *p,
206 const struct sys_reg_desc *r)
207{
208 bool was_enabled = vcpu_has_cache_enabled(vcpu);
209 u64 val;
210 int reg = r->reg;
211
212 BUG_ON(!p->is_write);
213
214
215 if (p->is_aarch32)
216 reg = r->reg / 2;
217
218 if (!p->is_aarch32 || !p->is_32bit) {
219 val = p->regval;
220 } else {
221 val = vcpu_read_sys_reg(vcpu, reg);
222 if (r->reg % 2)
223 val = (p->regval << 32) | (u64)lower_32_bits(val);
224 else
225 val = ((u64)upper_32_bits(val) << 32) |
226 lower_32_bits(p->regval);
227 }
228 vcpu_write_sys_reg(vcpu, val, reg);
229
230 kvm_toggle_cache(vcpu, was_enabled);
231 return true;
232}
233
234
235
236
237
238
239
240static bool access_gic_sgi(struct kvm_vcpu *vcpu,
241 struct sys_reg_params *p,
242 const struct sys_reg_desc *r)
243{
244 bool g1;
245
246 if (!p->is_write)
247 return read_from_write_only(vcpu, p, r);
248
249
250
251
252
253
254
255
256 if (p->is_aarch32) {
257 switch (p->Op1) {
258 default:
259 case 0:
260 g1 = true;
261 break;
262 case 1:
263 case 2:
264 g1 = false;
265 break;
266 }
267 } else {
268 switch (p->Op2) {
269 default:
270 case 5:
271 g1 = true;
272 break;
273 case 6:
274 case 7:
275 g1 = false;
276 break;
277 }
278 }
279
280 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
281
282 return true;
283}
284
285static bool access_gic_sre(struct kvm_vcpu *vcpu,
286 struct sys_reg_params *p,
287 const struct sys_reg_desc *r)
288{
289 if (p->is_write)
290 return ignore_write(vcpu, p);
291
292 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
293 return true;
294}
295
296static bool trap_raz_wi(struct kvm_vcpu *vcpu,
297 struct sys_reg_params *p,
298 const struct sys_reg_desc *r)
299{
300 if (p->is_write)
301 return ignore_write(vcpu, p);
302 else
303 return read_zero(vcpu, p);
304}
305
306
307
308
309
310
311
312static bool trap_loregion(struct kvm_vcpu *vcpu,
313 struct sys_reg_params *p,
314 const struct sys_reg_desc *r)
315{
316 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
317 u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
318 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
319
320 if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
321 kvm_inject_undefined(vcpu);
322 return false;
323 }
324
325 if (p->is_write && sr == SYS_LORID_EL1)
326 return write_to_read_only(vcpu, p, r);
327
328 return trap_raz_wi(vcpu, p, r);
329}
330
331static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
332 struct sys_reg_params *p,
333 const struct sys_reg_desc *r)
334{
335 if (p->is_write) {
336 return ignore_write(vcpu, p);
337 } else {
338 p->regval = (1 << 3);
339 return true;
340 }
341}
342
343static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
344 struct sys_reg_params *p,
345 const struct sys_reg_desc *r)
346{
347 if (p->is_write) {
348 return ignore_write(vcpu, p);
349 } else {
350 p->regval = read_sysreg(dbgauthstatus_el1);
351 return true;
352 }
353}
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382static bool trap_debug_regs(struct kvm_vcpu *vcpu,
383 struct sys_reg_params *p,
384 const struct sys_reg_desc *r)
385{
386 if (p->is_write) {
387 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
388 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
389 } else {
390 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
391 }
392
393 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
394
395 return true;
396}
397
398
399
400
401
402
403
404
405
406
407static void reg_to_dbg(struct kvm_vcpu *vcpu,
408 struct sys_reg_params *p,
409 u64 *dbg_reg)
410{
411 u64 val = p->regval;
412
413 if (p->is_32bit) {
414 val &= 0xffffffffUL;
415 val |= ((*dbg_reg >> 32) << 32);
416 }
417
418 *dbg_reg = val;
419 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
420}
421
422static void dbg_to_reg(struct kvm_vcpu *vcpu,
423 struct sys_reg_params *p,
424 u64 *dbg_reg)
425{
426 p->regval = *dbg_reg;
427 if (p->is_32bit)
428 p->regval &= 0xffffffffUL;
429}
430
431static bool trap_bvr(struct kvm_vcpu *vcpu,
432 struct sys_reg_params *p,
433 const struct sys_reg_desc *rd)
434{
435 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
436
437 if (p->is_write)
438 reg_to_dbg(vcpu, p, dbg_reg);
439 else
440 dbg_to_reg(vcpu, p, dbg_reg);
441
442 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
443
444 return true;
445}
446
447static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
448 const struct kvm_one_reg *reg, void __user *uaddr)
449{
450 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
451
452 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
453 return -EFAULT;
454 return 0;
455}
456
457static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
458 const struct kvm_one_reg *reg, void __user *uaddr)
459{
460 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
461
462 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
463 return -EFAULT;
464 return 0;
465}
466
467static void reset_bvr(struct kvm_vcpu *vcpu,
468 const struct sys_reg_desc *rd)
469{
470 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
471}
472
473static bool trap_bcr(struct kvm_vcpu *vcpu,
474 struct sys_reg_params *p,
475 const struct sys_reg_desc *rd)
476{
477 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
478
479 if (p->is_write)
480 reg_to_dbg(vcpu, p, dbg_reg);
481 else
482 dbg_to_reg(vcpu, p, dbg_reg);
483
484 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
485
486 return true;
487}
488
489static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
490 const struct kvm_one_reg *reg, void __user *uaddr)
491{
492 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
493
494 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
495 return -EFAULT;
496
497 return 0;
498}
499
500static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
501 const struct kvm_one_reg *reg, void __user *uaddr)
502{
503 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
504
505 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
506 return -EFAULT;
507 return 0;
508}
509
510static void reset_bcr(struct kvm_vcpu *vcpu,
511 const struct sys_reg_desc *rd)
512{
513 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
514}
515
516static bool trap_wvr(struct kvm_vcpu *vcpu,
517 struct sys_reg_params *p,
518 const struct sys_reg_desc *rd)
519{
520 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
521
522 if (p->is_write)
523 reg_to_dbg(vcpu, p, dbg_reg);
524 else
525 dbg_to_reg(vcpu, p, dbg_reg);
526
527 trace_trap_reg(__func__, rd->reg, p->is_write,
528 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
529
530 return true;
531}
532
533static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
534 const struct kvm_one_reg *reg, void __user *uaddr)
535{
536 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
537
538 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
539 return -EFAULT;
540 return 0;
541}
542
543static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
544 const struct kvm_one_reg *reg, void __user *uaddr)
545{
546 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
547
548 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
549 return -EFAULT;
550 return 0;
551}
552
553static void reset_wvr(struct kvm_vcpu *vcpu,
554 const struct sys_reg_desc *rd)
555{
556 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
557}
558
559static bool trap_wcr(struct kvm_vcpu *vcpu,
560 struct sys_reg_params *p,
561 const struct sys_reg_desc *rd)
562{
563 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
564
565 if (p->is_write)
566 reg_to_dbg(vcpu, p, dbg_reg);
567 else
568 dbg_to_reg(vcpu, p, dbg_reg);
569
570 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
571
572 return true;
573}
574
575static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
576 const struct kvm_one_reg *reg, void __user *uaddr)
577{
578 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
579
580 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
581 return -EFAULT;
582 return 0;
583}
584
585static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
586 const struct kvm_one_reg *reg, void __user *uaddr)
587{
588 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
589
590 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
591 return -EFAULT;
592 return 0;
593}
594
595static void reset_wcr(struct kvm_vcpu *vcpu,
596 const struct sys_reg_desc *rd)
597{
598 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
599}
600
601static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
602{
603 u64 amair = read_sysreg(amair_el1);
604 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
605}
606
607static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
608{
609 u64 mpidr;
610
611
612
613
614
615
616
617
618 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
619 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
620 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
621 vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
622}
623
624static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
625{
626 u64 pmcr, val;
627
628 pmcr = read_sysreg(pmcr_el0);
629
630
631
632
633 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
634 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
635 if (!system_supports_32bit_el0())
636 val |= ARMV8_PMU_PMCR_LC;
637 __vcpu_sys_reg(vcpu, r->reg) = val;
638}
639
640static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
641{
642 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
643 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
644
645 if (!enabled)
646 kvm_inject_undefined(vcpu);
647
648 return !enabled;
649}
650
651static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
652{
653 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
654}
655
656static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
657{
658 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
659}
660
661static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
662{
663 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
664}
665
666static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
667{
668 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
669}
670
671static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
672 const struct sys_reg_desc *r)
673{
674 u64 val;
675
676 if (!kvm_arm_pmu_v3_ready(vcpu))
677 return trap_raz_wi(vcpu, p, r);
678
679 if (pmu_access_el0_disabled(vcpu))
680 return false;
681
682 if (p->is_write) {
683
684 val = __vcpu_sys_reg(vcpu, PMCR_EL0);
685 val &= ~ARMV8_PMU_PMCR_MASK;
686 val |= p->regval & ARMV8_PMU_PMCR_MASK;
687 if (!system_supports_32bit_el0())
688 val |= ARMV8_PMU_PMCR_LC;
689 __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
690 kvm_pmu_handle_pmcr(vcpu, val);
691 kvm_vcpu_pmu_restore_guest(vcpu);
692 } else {
693
694 val = __vcpu_sys_reg(vcpu, PMCR_EL0)
695 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
696 p->regval = val;
697 }
698
699 return true;
700}
701
702static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
703 const struct sys_reg_desc *r)
704{
705 if (!kvm_arm_pmu_v3_ready(vcpu))
706 return trap_raz_wi(vcpu, p, r);
707
708 if (pmu_access_event_counter_el0_disabled(vcpu))
709 return false;
710
711 if (p->is_write)
712 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
713 else
714
715 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
716 & ARMV8_PMU_COUNTER_MASK;
717
718 return true;
719}
720
721static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
722 const struct sys_reg_desc *r)
723{
724 u64 pmceid;
725
726 if (!kvm_arm_pmu_v3_ready(vcpu))
727 return trap_raz_wi(vcpu, p, r);
728
729 BUG_ON(p->is_write);
730
731 if (pmu_access_el0_disabled(vcpu))
732 return false;
733
734 if (!(p->Op2 & 1))
735 pmceid = read_sysreg(pmceid0_el0);
736 else
737 pmceid = read_sysreg(pmceid1_el0);
738
739 p->regval = pmceid;
740
741 return true;
742}
743
744static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
745{
746 u64 pmcr, val;
747
748 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
749 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
750 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
751 kvm_inject_undefined(vcpu);
752 return false;
753 }
754
755 return true;
756}
757
758static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
759 struct sys_reg_params *p,
760 const struct sys_reg_desc *r)
761{
762 u64 idx;
763
764 if (!kvm_arm_pmu_v3_ready(vcpu))
765 return trap_raz_wi(vcpu, p, r);
766
767 if (r->CRn == 9 && r->CRm == 13) {
768 if (r->Op2 == 2) {
769
770 if (pmu_access_event_counter_el0_disabled(vcpu))
771 return false;
772
773 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
774 & ARMV8_PMU_COUNTER_MASK;
775 } else if (r->Op2 == 0) {
776
777 if (pmu_access_cycle_counter_el0_disabled(vcpu))
778 return false;
779
780 idx = ARMV8_PMU_CYCLE_IDX;
781 } else {
782 return false;
783 }
784 } else if (r->CRn == 0 && r->CRm == 9) {
785
786 if (pmu_access_event_counter_el0_disabled(vcpu))
787 return false;
788
789 idx = ARMV8_PMU_CYCLE_IDX;
790 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
791
792 if (pmu_access_event_counter_el0_disabled(vcpu))
793 return false;
794
795 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
796 } else {
797 return false;
798 }
799
800 if (!pmu_counter_idx_valid(vcpu, idx))
801 return false;
802
803 if (p->is_write) {
804 if (pmu_access_el0_disabled(vcpu))
805 return false;
806
807 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
808 } else {
809 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
810 }
811
812 return true;
813}
814
815static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
816 const struct sys_reg_desc *r)
817{
818 u64 idx, reg;
819
820 if (!kvm_arm_pmu_v3_ready(vcpu))
821 return trap_raz_wi(vcpu, p, r);
822
823 if (pmu_access_el0_disabled(vcpu))
824 return false;
825
826 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
827
828 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
829 reg = PMEVTYPER0_EL0 + idx;
830 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
831 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
832 if (idx == ARMV8_PMU_CYCLE_IDX)
833 reg = PMCCFILTR_EL0;
834 else
835
836 reg = PMEVTYPER0_EL0 + idx;
837 } else {
838 BUG();
839 }
840
841 if (!pmu_counter_idx_valid(vcpu, idx))
842 return false;
843
844 if (p->is_write) {
845 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
846 __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
847 kvm_vcpu_pmu_restore_guest(vcpu);
848 } else {
849 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
850 }
851
852 return true;
853}
854
855static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
856 const struct sys_reg_desc *r)
857{
858 u64 val, mask;
859
860 if (!kvm_arm_pmu_v3_ready(vcpu))
861 return trap_raz_wi(vcpu, p, r);
862
863 if (pmu_access_el0_disabled(vcpu))
864 return false;
865
866 mask = kvm_pmu_valid_counter_mask(vcpu);
867 if (p->is_write) {
868 val = p->regval & mask;
869 if (r->Op2 & 0x1) {
870
871 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
872 kvm_pmu_enable_counter_mask(vcpu, val);
873 kvm_vcpu_pmu_restore_guest(vcpu);
874 } else {
875
876 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
877 kvm_pmu_disable_counter_mask(vcpu, val);
878 }
879 } else {
880 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
881 }
882
883 return true;
884}
885
886static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
887 const struct sys_reg_desc *r)
888{
889 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
890
891 if (!kvm_arm_pmu_v3_ready(vcpu))
892 return trap_raz_wi(vcpu, p, r);
893
894 if (!vcpu_mode_priv(vcpu)) {
895 kvm_inject_undefined(vcpu);
896 return false;
897 }
898
899 if (p->is_write) {
900 u64 val = p->regval & mask;
901
902 if (r->Op2 & 0x1)
903
904 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
905 else
906
907 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
908 } else {
909 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
910 }
911
912 return true;
913}
914
915static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
916 const struct sys_reg_desc *r)
917{
918 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
919
920 if (!kvm_arm_pmu_v3_ready(vcpu))
921 return trap_raz_wi(vcpu, p, r);
922
923 if (pmu_access_el0_disabled(vcpu))
924 return false;
925
926 if (p->is_write) {
927 if (r->CRm & 0x2)
928
929 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
930 else
931
932 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
933 } else {
934 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
935 }
936
937 return true;
938}
939
940static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
941 const struct sys_reg_desc *r)
942{
943 u64 mask;
944
945 if (!kvm_arm_pmu_v3_ready(vcpu))
946 return trap_raz_wi(vcpu, p, r);
947
948 if (!p->is_write)
949 return read_from_write_only(vcpu, p, r);
950
951 if (pmu_write_swinc_el0_disabled(vcpu))
952 return false;
953
954 mask = kvm_pmu_valid_counter_mask(vcpu);
955 kvm_pmu_software_increment(vcpu, p->regval & mask);
956 return true;
957}
958
959static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
960 const struct sys_reg_desc *r)
961{
962 if (!kvm_arm_pmu_v3_ready(vcpu))
963 return trap_raz_wi(vcpu, p, r);
964
965 if (p->is_write) {
966 if (!vcpu_mode_priv(vcpu)) {
967 kvm_inject_undefined(vcpu);
968 return false;
969 }
970
971 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
972 p->regval & ARMV8_PMU_USERENR_MASK;
973 } else {
974 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
975 & ARMV8_PMU_USERENR_MASK;
976 }
977
978 return true;
979}
980
981#define reg_to_encoding(x) \
982 sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
983 (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
984
985
986#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
987 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
988 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
989 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
990 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
991 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
992 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
993 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
994 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
995
996
997#define PMU_PMEVCNTR_EL0(n) \
998 { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
999 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
1000
1001
1002#define PMU_PMEVTYPER_EL0(n) \
1003 { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
1004 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
1005
1006static bool trap_ptrauth(struct kvm_vcpu *vcpu,
1007 struct sys_reg_params *p,
1008 const struct sys_reg_desc *rd)
1009{
1010 kvm_arm_vcpu_ptrauth_trap(vcpu);
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020 return false;
1021}
1022
1023static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1024 const struct sys_reg_desc *rd)
1025{
1026 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN_USER | REG_HIDDEN_GUEST;
1027}
1028
1029#define __PTRAUTH_KEY(k) \
1030 { SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k, \
1031 .visibility = ptrauth_visibility}
1032
1033#define PTRAUTH_KEY(k) \
1034 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1035 __PTRAUTH_KEY(k ## KEYHI_EL1)
1036
1037static bool access_arch_timer(struct kvm_vcpu *vcpu,
1038 struct sys_reg_params *p,
1039 const struct sys_reg_desc *r)
1040{
1041 enum kvm_arch_timers tmr;
1042 enum kvm_arch_timer_regs treg;
1043 u64 reg = reg_to_encoding(r);
1044
1045 switch (reg) {
1046 case SYS_CNTP_TVAL_EL0:
1047 case SYS_AARCH32_CNTP_TVAL:
1048 tmr = TIMER_PTIMER;
1049 treg = TIMER_REG_TVAL;
1050 break;
1051 case SYS_CNTP_CTL_EL0:
1052 case SYS_AARCH32_CNTP_CTL:
1053 tmr = TIMER_PTIMER;
1054 treg = TIMER_REG_CTL;
1055 break;
1056 case SYS_CNTP_CVAL_EL0:
1057 case SYS_AARCH32_CNTP_CVAL:
1058 tmr = TIMER_PTIMER;
1059 treg = TIMER_REG_CVAL;
1060 break;
1061 default:
1062 BUG();
1063 }
1064
1065 if (p->is_write)
1066 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1067 else
1068 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1069
1070 return true;
1071}
1072
1073
1074static u64 read_id_reg(const struct kvm_vcpu *vcpu,
1075 struct sys_reg_desc const *r, bool raz)
1076{
1077 u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
1078 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
1079 u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
1080
1081 if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) {
1082 val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
1083 } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
1084 val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
1085 (0xfUL << ID_AA64ISAR1_API_SHIFT) |
1086 (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
1087 (0xfUL << ID_AA64ISAR1_GPI_SHIFT));
1088 }
1089
1090 return val;
1091}
1092
1093
1094
1095static bool __access_id_reg(struct kvm_vcpu *vcpu,
1096 struct sys_reg_params *p,
1097 const struct sys_reg_desc *r,
1098 bool raz)
1099{
1100 if (p->is_write)
1101 return write_to_read_only(vcpu, p, r);
1102
1103 p->regval = read_id_reg(vcpu, r, raz);
1104 return true;
1105}
1106
1107static bool access_id_reg(struct kvm_vcpu *vcpu,
1108 struct sys_reg_params *p,
1109 const struct sys_reg_desc *r)
1110{
1111 return __access_id_reg(vcpu, p, r, false);
1112}
1113
1114static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
1115 struct sys_reg_params *p,
1116 const struct sys_reg_desc *r)
1117{
1118 return __access_id_reg(vcpu, p, r, true);
1119}
1120
1121static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
1122static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
1123static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
1124
1125
1126static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1127 const struct sys_reg_desc *rd)
1128{
1129 if (vcpu_has_sve(vcpu))
1130 return 0;
1131
1132 return REG_HIDDEN_USER | REG_HIDDEN_GUEST;
1133}
1134
1135
1136static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu,
1137 const struct sys_reg_desc *rd)
1138{
1139 if (vcpu_has_sve(vcpu))
1140 return 0;
1141
1142 return REG_HIDDEN_USER;
1143}
1144
1145
1146static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu)
1147{
1148 if (!vcpu_has_sve(vcpu))
1149 return 0;
1150
1151 return read_sanitised_ftr_reg(SYS_ID_AA64ZFR0_EL1);
1152}
1153
1154static bool access_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1155 struct sys_reg_params *p,
1156 const struct sys_reg_desc *rd)
1157{
1158 if (p->is_write)
1159 return write_to_read_only(vcpu, p, rd);
1160
1161 p->regval = guest_id_aa64zfr0_el1(vcpu);
1162 return true;
1163}
1164
1165static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1166 const struct sys_reg_desc *rd,
1167 const struct kvm_one_reg *reg, void __user *uaddr)
1168{
1169 u64 val;
1170
1171 if (WARN_ON(!vcpu_has_sve(vcpu)))
1172 return -ENOENT;
1173
1174 val = guest_id_aa64zfr0_el1(vcpu);
1175 return reg_to_user(uaddr, &val, reg->id);
1176}
1177
1178static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1179 const struct sys_reg_desc *rd,
1180 const struct kvm_one_reg *reg, void __user *uaddr)
1181{
1182 const u64 id = sys_reg_to_index(rd);
1183 int err;
1184 u64 val;
1185
1186 if (WARN_ON(!vcpu_has_sve(vcpu)))
1187 return -ENOENT;
1188
1189 err = reg_from_user(&val, uaddr, id);
1190 if (err)
1191 return err;
1192
1193
1194 if (val != guest_id_aa64zfr0_el1(vcpu))
1195 return -EINVAL;
1196
1197 return 0;
1198}
1199
1200
1201
1202
1203
1204
1205
1206
1207static int __get_id_reg(const struct kvm_vcpu *vcpu,
1208 const struct sys_reg_desc *rd, void __user *uaddr,
1209 bool raz)
1210{
1211 const u64 id = sys_reg_to_index(rd);
1212 const u64 val = read_id_reg(vcpu, rd, raz);
1213
1214 return reg_to_user(uaddr, &val, id);
1215}
1216
1217static int __set_id_reg(const struct kvm_vcpu *vcpu,
1218 const struct sys_reg_desc *rd, void __user *uaddr,
1219 bool raz)
1220{
1221 const u64 id = sys_reg_to_index(rd);
1222 int err;
1223 u64 val;
1224
1225 err = reg_from_user(&val, uaddr, id);
1226 if (err)
1227 return err;
1228
1229
1230 if (val != read_id_reg(vcpu, rd, raz))
1231 return -EINVAL;
1232
1233 return 0;
1234}
1235
1236static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1237 const struct kvm_one_reg *reg, void __user *uaddr)
1238{
1239 return __get_id_reg(vcpu, rd, uaddr, false);
1240}
1241
1242static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1243 const struct kvm_one_reg *reg, void __user *uaddr)
1244{
1245 return __set_id_reg(vcpu, rd, uaddr, false);
1246}
1247
1248static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1249 const struct kvm_one_reg *reg, void __user *uaddr)
1250{
1251 return __get_id_reg(vcpu, rd, uaddr, true);
1252}
1253
1254static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1255 const struct kvm_one_reg *reg, void __user *uaddr)
1256{
1257 return __set_id_reg(vcpu, rd, uaddr, true);
1258}
1259
1260static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1261 const struct sys_reg_desc *r)
1262{
1263 if (p->is_write)
1264 return write_to_read_only(vcpu, p, r);
1265
1266 p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1267 return true;
1268}
1269
1270static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1271 const struct sys_reg_desc *r)
1272{
1273 if (p->is_write)
1274 return write_to_read_only(vcpu, p, r);
1275
1276 p->regval = read_sysreg(clidr_el1);
1277 return true;
1278}
1279
1280static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1281 const struct sys_reg_desc *r)
1282{
1283 if (p->is_write)
1284 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
1285 else
1286 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
1287 return true;
1288}
1289
1290static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1291 const struct sys_reg_desc *r)
1292{
1293 u32 csselr;
1294
1295 if (p->is_write)
1296 return write_to_read_only(vcpu, p, r);
1297
1298 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1299 p->regval = get_ccsidr(csselr);
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313 if (!(csselr & 1))
1314 p->regval &= ~GENMASK(27, 3);
1315 return true;
1316}
1317
1318
1319#define ID_SANITISED(name) { \
1320 SYS_DESC(SYS_##name), \
1321 .access = access_id_reg, \
1322 .get_user = get_id_reg, \
1323 .set_user = set_id_reg, \
1324}
1325
1326
1327
1328
1329
1330
1331#define ID_UNALLOCATED(crm, op2) { \
1332 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1333 .access = access_raz_id_reg, \
1334 .get_user = get_raz_id_reg, \
1335 .set_user = set_raz_id_reg, \
1336}
1337
1338
1339
1340
1341
1342
1343#define ID_HIDDEN(name) { \
1344 SYS_DESC(SYS_##name), \
1345 .access = access_raz_id_reg, \
1346 .get_user = get_raz_id_reg, \
1347 .set_user = set_raz_id_reg, \
1348}
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361static const struct sys_reg_desc sys_reg_descs[] = {
1362 { SYS_DESC(SYS_DC_ISW), access_dcsw },
1363 { SYS_DESC(SYS_DC_CSW), access_dcsw },
1364 { SYS_DESC(SYS_DC_CISW), access_dcsw },
1365
1366 DBG_BCR_BVR_WCR_WVR_EL1(0),
1367 DBG_BCR_BVR_WCR_WVR_EL1(1),
1368 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1369 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1370 DBG_BCR_BVR_WCR_WVR_EL1(2),
1371 DBG_BCR_BVR_WCR_WVR_EL1(3),
1372 DBG_BCR_BVR_WCR_WVR_EL1(4),
1373 DBG_BCR_BVR_WCR_WVR_EL1(5),
1374 DBG_BCR_BVR_WCR_WVR_EL1(6),
1375 DBG_BCR_BVR_WCR_WVR_EL1(7),
1376 DBG_BCR_BVR_WCR_WVR_EL1(8),
1377 DBG_BCR_BVR_WCR_WVR_EL1(9),
1378 DBG_BCR_BVR_WCR_WVR_EL1(10),
1379 DBG_BCR_BVR_WCR_WVR_EL1(11),
1380 DBG_BCR_BVR_WCR_WVR_EL1(12),
1381 DBG_BCR_BVR_WCR_WVR_EL1(13),
1382 DBG_BCR_BVR_WCR_WVR_EL1(14),
1383 DBG_BCR_BVR_WCR_WVR_EL1(15),
1384
1385 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1386 { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
1387 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
1388 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1389 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1390 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1391 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1392 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1393
1394 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1395 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1396
1397 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1398
1399 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1400
1401 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1402
1403
1404
1405
1406
1407
1408
1409
1410 ID_SANITISED(ID_PFR0_EL1),
1411 ID_SANITISED(ID_PFR1_EL1),
1412 ID_SANITISED(ID_DFR0_EL1),
1413 ID_HIDDEN(ID_AFR0_EL1),
1414 ID_SANITISED(ID_MMFR0_EL1),
1415 ID_SANITISED(ID_MMFR1_EL1),
1416 ID_SANITISED(ID_MMFR2_EL1),
1417 ID_SANITISED(ID_MMFR3_EL1),
1418
1419
1420 ID_SANITISED(ID_ISAR0_EL1),
1421 ID_SANITISED(ID_ISAR1_EL1),
1422 ID_SANITISED(ID_ISAR2_EL1),
1423 ID_SANITISED(ID_ISAR3_EL1),
1424 ID_SANITISED(ID_ISAR4_EL1),
1425 ID_SANITISED(ID_ISAR5_EL1),
1426 ID_SANITISED(ID_MMFR4_EL1),
1427 ID_SANITISED(ID_ISAR6_EL1),
1428
1429
1430 ID_SANITISED(MVFR0_EL1),
1431 ID_SANITISED(MVFR1_EL1),
1432 ID_SANITISED(MVFR2_EL1),
1433 ID_UNALLOCATED(3,3),
1434 ID_UNALLOCATED(3,4),
1435 ID_UNALLOCATED(3,5),
1436 ID_UNALLOCATED(3,6),
1437 ID_UNALLOCATED(3,7),
1438
1439
1440
1441 ID_SANITISED(ID_AA64PFR0_EL1),
1442 ID_SANITISED(ID_AA64PFR1_EL1),
1443 ID_UNALLOCATED(4,2),
1444 ID_UNALLOCATED(4,3),
1445 { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility },
1446 ID_UNALLOCATED(4,5),
1447 ID_UNALLOCATED(4,6),
1448 ID_UNALLOCATED(4,7),
1449
1450
1451 ID_SANITISED(ID_AA64DFR0_EL1),
1452 ID_SANITISED(ID_AA64DFR1_EL1),
1453 ID_UNALLOCATED(5,2),
1454 ID_UNALLOCATED(5,3),
1455 ID_HIDDEN(ID_AA64AFR0_EL1),
1456 ID_HIDDEN(ID_AA64AFR1_EL1),
1457 ID_UNALLOCATED(5,6),
1458 ID_UNALLOCATED(5,7),
1459
1460
1461 ID_SANITISED(ID_AA64ISAR0_EL1),
1462 ID_SANITISED(ID_AA64ISAR1_EL1),
1463 ID_UNALLOCATED(6,2),
1464 ID_UNALLOCATED(6,3),
1465 ID_UNALLOCATED(6,4),
1466 ID_UNALLOCATED(6,5),
1467 ID_UNALLOCATED(6,6),
1468 ID_UNALLOCATED(6,7),
1469
1470
1471 ID_SANITISED(ID_AA64MMFR0_EL1),
1472 ID_SANITISED(ID_AA64MMFR1_EL1),
1473 ID_SANITISED(ID_AA64MMFR2_EL1),
1474 ID_UNALLOCATED(7,3),
1475 ID_UNALLOCATED(7,4),
1476 ID_UNALLOCATED(7,5),
1477 ID_UNALLOCATED(7,6),
1478 ID_UNALLOCATED(7,7),
1479
1480 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1481 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1482 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
1483 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1484 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1485 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1486
1487 PTRAUTH_KEY(APIA),
1488 PTRAUTH_KEY(APIB),
1489 PTRAUTH_KEY(APDA),
1490 PTRAUTH_KEY(APDB),
1491 PTRAUTH_KEY(APGA),
1492
1493 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1494 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1495 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1496
1497 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1498 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1499 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1500 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1501 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1502 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1503 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1504 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1505
1506 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1507 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1508
1509 { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
1510 { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
1511
1512 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1513 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1514
1515 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1516 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1517 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1518 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1519 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1520
1521 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1522 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1523
1524 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1525 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1526 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1527 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1528 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1529 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1530 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1531 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1532 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1533 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1534 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1535 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1536
1537 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1538 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1539
1540 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1541
1542 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
1543 { SYS_DESC(SYS_CLIDR_EL1), access_clidr },
1544 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
1545 { SYS_DESC(SYS_CTR_EL0), access_ctr },
1546
1547 { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
1548 { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
1549 { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
1550 { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
1551 { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
1552 { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
1553 { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
1554 { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
1555 { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
1556 { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
1557 { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
1558
1559
1560
1561
1562 { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1563 { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
1564
1565 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1566 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1567
1568 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
1569 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
1570 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
1571
1572
1573 PMU_PMEVCNTR_EL0(0),
1574 PMU_PMEVCNTR_EL0(1),
1575 PMU_PMEVCNTR_EL0(2),
1576 PMU_PMEVCNTR_EL0(3),
1577 PMU_PMEVCNTR_EL0(4),
1578 PMU_PMEVCNTR_EL0(5),
1579 PMU_PMEVCNTR_EL0(6),
1580 PMU_PMEVCNTR_EL0(7),
1581 PMU_PMEVCNTR_EL0(8),
1582 PMU_PMEVCNTR_EL0(9),
1583 PMU_PMEVCNTR_EL0(10),
1584 PMU_PMEVCNTR_EL0(11),
1585 PMU_PMEVCNTR_EL0(12),
1586 PMU_PMEVCNTR_EL0(13),
1587 PMU_PMEVCNTR_EL0(14),
1588 PMU_PMEVCNTR_EL0(15),
1589 PMU_PMEVCNTR_EL0(16),
1590 PMU_PMEVCNTR_EL0(17),
1591 PMU_PMEVCNTR_EL0(18),
1592 PMU_PMEVCNTR_EL0(19),
1593 PMU_PMEVCNTR_EL0(20),
1594 PMU_PMEVCNTR_EL0(21),
1595 PMU_PMEVCNTR_EL0(22),
1596 PMU_PMEVCNTR_EL0(23),
1597 PMU_PMEVCNTR_EL0(24),
1598 PMU_PMEVCNTR_EL0(25),
1599 PMU_PMEVCNTR_EL0(26),
1600 PMU_PMEVCNTR_EL0(27),
1601 PMU_PMEVCNTR_EL0(28),
1602 PMU_PMEVCNTR_EL0(29),
1603 PMU_PMEVCNTR_EL0(30),
1604
1605 PMU_PMEVTYPER_EL0(0),
1606 PMU_PMEVTYPER_EL0(1),
1607 PMU_PMEVTYPER_EL0(2),
1608 PMU_PMEVTYPER_EL0(3),
1609 PMU_PMEVTYPER_EL0(4),
1610 PMU_PMEVTYPER_EL0(5),
1611 PMU_PMEVTYPER_EL0(6),
1612 PMU_PMEVTYPER_EL0(7),
1613 PMU_PMEVTYPER_EL0(8),
1614 PMU_PMEVTYPER_EL0(9),
1615 PMU_PMEVTYPER_EL0(10),
1616 PMU_PMEVTYPER_EL0(11),
1617 PMU_PMEVTYPER_EL0(12),
1618 PMU_PMEVTYPER_EL0(13),
1619 PMU_PMEVTYPER_EL0(14),
1620 PMU_PMEVTYPER_EL0(15),
1621 PMU_PMEVTYPER_EL0(16),
1622 PMU_PMEVTYPER_EL0(17),
1623 PMU_PMEVTYPER_EL0(18),
1624 PMU_PMEVTYPER_EL0(19),
1625 PMU_PMEVTYPER_EL0(20),
1626 PMU_PMEVTYPER_EL0(21),
1627 PMU_PMEVTYPER_EL0(22),
1628 PMU_PMEVTYPER_EL0(23),
1629 PMU_PMEVTYPER_EL0(24),
1630 PMU_PMEVTYPER_EL0(25),
1631 PMU_PMEVTYPER_EL0(26),
1632 PMU_PMEVTYPER_EL0(27),
1633 PMU_PMEVTYPER_EL0(28),
1634 PMU_PMEVTYPER_EL0(29),
1635 PMU_PMEVTYPER_EL0(30),
1636
1637
1638
1639
1640 { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1641
1642 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1643 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1644 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
1645};
1646
1647static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1648 struct sys_reg_params *p,
1649 const struct sys_reg_desc *r)
1650{
1651 if (p->is_write) {
1652 return ignore_write(vcpu, p);
1653 } else {
1654 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1655 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1656 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1657
1658 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1659 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1660 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1661 | (6 << 16) | (el3 << 14) | (el3 << 12));
1662 return true;
1663 }
1664}
1665
1666static bool trap_debug32(struct kvm_vcpu *vcpu,
1667 struct sys_reg_params *p,
1668 const struct sys_reg_desc *r)
1669{
1670 if (p->is_write) {
1671 vcpu_cp14(vcpu, r->reg) = p->regval;
1672 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1673 } else {
1674 p->regval = vcpu_cp14(vcpu, r->reg);
1675 }
1676
1677 return true;
1678}
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691static bool trap_xvr(struct kvm_vcpu *vcpu,
1692 struct sys_reg_params *p,
1693 const struct sys_reg_desc *rd)
1694{
1695 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1696
1697 if (p->is_write) {
1698 u64 val = *dbg_reg;
1699
1700 val &= 0xffffffffUL;
1701 val |= p->regval << 32;
1702 *dbg_reg = val;
1703
1704 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1705 } else {
1706 p->regval = *dbg_reg >> 32;
1707 }
1708
1709 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1710
1711 return true;
1712}
1713
1714#define DBG_BCR_BVR_WCR_WVR(n) \
1715 \
1716 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1717 \
1718 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1719 \
1720 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1721 \
1722 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1723
1724#define DBGBXVR(n) \
1725 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1726
1727
1728
1729
1730
1731
1732static const struct sys_reg_desc cp14_regs[] = {
1733
1734 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1735
1736 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1737
1738 DBG_BCR_BVR_WCR_WVR(0),
1739
1740 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1741 DBG_BCR_BVR_WCR_WVR(1),
1742
1743 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
1744
1745 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
1746 DBG_BCR_BVR_WCR_WVR(2),
1747
1748 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1749
1750 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1751 DBG_BCR_BVR_WCR_WVR(3),
1752 DBG_BCR_BVR_WCR_WVR(4),
1753 DBG_BCR_BVR_WCR_WVR(5),
1754
1755 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1756
1757 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1758 DBG_BCR_BVR_WCR_WVR(6),
1759
1760 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
1761 DBG_BCR_BVR_WCR_WVR(7),
1762 DBG_BCR_BVR_WCR_WVR(8),
1763 DBG_BCR_BVR_WCR_WVR(9),
1764 DBG_BCR_BVR_WCR_WVR(10),
1765 DBG_BCR_BVR_WCR_WVR(11),
1766 DBG_BCR_BVR_WCR_WVR(12),
1767 DBG_BCR_BVR_WCR_WVR(13),
1768 DBG_BCR_BVR_WCR_WVR(14),
1769 DBG_BCR_BVR_WCR_WVR(15),
1770
1771
1772 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1773
1774 DBGBXVR(0),
1775
1776 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1777 DBGBXVR(1),
1778
1779 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1780 DBGBXVR(2),
1781 DBGBXVR(3),
1782
1783 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1784 DBGBXVR(4),
1785
1786 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1787 DBGBXVR(5),
1788 DBGBXVR(6),
1789 DBGBXVR(7),
1790 DBGBXVR(8),
1791 DBGBXVR(9),
1792 DBGBXVR(10),
1793 DBGBXVR(11),
1794 DBGBXVR(12),
1795 DBGBXVR(13),
1796 DBGBXVR(14),
1797 DBGBXVR(15),
1798
1799
1800 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1801
1802
1803 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1804
1805 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1806
1807 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1808
1809 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1810
1811 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1812
1813 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1814};
1815
1816
1817static const struct sys_reg_desc cp14_64_regs[] = {
1818
1819 { Op1( 0), CRm( 1), .access = trap_raz_wi },
1820
1821
1822 { Op1( 0), CRm( 2), .access = trap_raz_wi },
1823};
1824
1825
1826#define PMU_PMEVCNTR(n) \
1827 \
1828 { Op1(0), CRn(0b1110), \
1829 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1830 access_pmu_evcntr }
1831
1832
1833#define PMU_PMEVTYPER(n) \
1834 \
1835 { Op1(0), CRn(0b1110), \
1836 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1837 access_pmu_evtyper }
1838
1839
1840
1841
1842
1843
1844static const struct sys_reg_desc cp15_regs[] = {
1845 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
1846 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1847 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1848 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1849 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1850 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1851 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1852 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1853 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1854 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1855 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1856 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1857
1858
1859
1860
1861 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1862 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1863 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
1864
1865
1866 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
1867 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
1868 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
1869 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
1870 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
1871 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
1872 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
1873 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
1874 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
1875 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
1876 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
1877 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
1878 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
1879 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
1880 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
1881
1882 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
1883 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
1884 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
1885 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1886
1887
1888 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
1889
1890 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1891
1892
1893 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
1894 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
1895
1896
1897 PMU_PMEVCNTR(0),
1898 PMU_PMEVCNTR(1),
1899 PMU_PMEVCNTR(2),
1900 PMU_PMEVCNTR(3),
1901 PMU_PMEVCNTR(4),
1902 PMU_PMEVCNTR(5),
1903 PMU_PMEVCNTR(6),
1904 PMU_PMEVCNTR(7),
1905 PMU_PMEVCNTR(8),
1906 PMU_PMEVCNTR(9),
1907 PMU_PMEVCNTR(10),
1908 PMU_PMEVCNTR(11),
1909 PMU_PMEVCNTR(12),
1910 PMU_PMEVCNTR(13),
1911 PMU_PMEVCNTR(14),
1912 PMU_PMEVCNTR(15),
1913 PMU_PMEVCNTR(16),
1914 PMU_PMEVCNTR(17),
1915 PMU_PMEVCNTR(18),
1916 PMU_PMEVCNTR(19),
1917 PMU_PMEVCNTR(20),
1918 PMU_PMEVCNTR(21),
1919 PMU_PMEVCNTR(22),
1920 PMU_PMEVCNTR(23),
1921 PMU_PMEVCNTR(24),
1922 PMU_PMEVCNTR(25),
1923 PMU_PMEVCNTR(26),
1924 PMU_PMEVCNTR(27),
1925 PMU_PMEVCNTR(28),
1926 PMU_PMEVCNTR(29),
1927 PMU_PMEVCNTR(30),
1928
1929 PMU_PMEVTYPER(0),
1930 PMU_PMEVTYPER(1),
1931 PMU_PMEVTYPER(2),
1932 PMU_PMEVTYPER(3),
1933 PMU_PMEVTYPER(4),
1934 PMU_PMEVTYPER(5),
1935 PMU_PMEVTYPER(6),
1936 PMU_PMEVTYPER(7),
1937 PMU_PMEVTYPER(8),
1938 PMU_PMEVTYPER(9),
1939 PMU_PMEVTYPER(10),
1940 PMU_PMEVTYPER(11),
1941 PMU_PMEVTYPER(12),
1942 PMU_PMEVTYPER(13),
1943 PMU_PMEVTYPER(14),
1944 PMU_PMEVTYPER(15),
1945 PMU_PMEVTYPER(16),
1946 PMU_PMEVTYPER(17),
1947 PMU_PMEVTYPER(18),
1948 PMU_PMEVTYPER(19),
1949 PMU_PMEVTYPER(20),
1950 PMU_PMEVTYPER(21),
1951 PMU_PMEVTYPER(22),
1952 PMU_PMEVTYPER(23),
1953 PMU_PMEVTYPER(24),
1954 PMU_PMEVTYPER(25),
1955 PMU_PMEVTYPER(26),
1956 PMU_PMEVTYPER(27),
1957 PMU_PMEVTYPER(28),
1958 PMU_PMEVTYPER(29),
1959 PMU_PMEVTYPER(30),
1960
1961 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
1962
1963 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
1964 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
1965 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, c0_CSSELR },
1966};
1967
1968static const struct sys_reg_desc cp15_64_regs[] = {
1969 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1970 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
1971 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1972 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
1973 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1974 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1975 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
1976};
1977
1978
1979static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
1980
1981void kvm_register_target_sys_reg_table(unsigned int target,
1982 struct kvm_sys_reg_target_table *table)
1983{
1984 target_tables[target] = table;
1985}
1986
1987
1988static const struct sys_reg_desc *get_target_table(unsigned target,
1989 bool mode_is_64,
1990 size_t *num)
1991{
1992 struct kvm_sys_reg_target_table *table;
1993
1994 table = target_tables[target];
1995 if (mode_is_64) {
1996 *num = table->table64.num;
1997 return table->table64.table;
1998 } else {
1999 *num = table->table32.num;
2000 return table->table32.table;
2001 }
2002}
2003
2004static int match_sys_reg(const void *key, const void *elt)
2005{
2006 const unsigned long pval = (unsigned long)key;
2007 const struct sys_reg_desc *r = elt;
2008
2009 return pval - reg_to_encoding(r);
2010}
2011
2012static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
2013 const struct sys_reg_desc table[],
2014 unsigned int num)
2015{
2016 unsigned long pval = reg_to_encoding(params);
2017
2018 return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
2019}
2020
2021int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
2022{
2023 kvm_inject_undefined(vcpu);
2024 return 1;
2025}
2026
2027static void perform_access(struct kvm_vcpu *vcpu,
2028 struct sys_reg_params *params,
2029 const struct sys_reg_desc *r)
2030{
2031 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2032
2033
2034 if (sysreg_hidden_from_guest(vcpu, r)) {
2035 kvm_inject_undefined(vcpu);
2036 return;
2037 }
2038
2039
2040
2041
2042
2043
2044 BUG_ON(!r->access);
2045
2046
2047 if (likely(r->access(vcpu, params, r)))
2048 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
2049}
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061static int emulate_cp(struct kvm_vcpu *vcpu,
2062 struct sys_reg_params *params,
2063 const struct sys_reg_desc *table,
2064 size_t num)
2065{
2066 const struct sys_reg_desc *r;
2067
2068 if (!table)
2069 return -1;
2070
2071 r = find_reg(params, table, num);
2072
2073 if (r) {
2074 perform_access(vcpu, params, r);
2075 return 0;
2076 }
2077
2078
2079 return -1;
2080}
2081
2082static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2083 struct sys_reg_params *params)
2084{
2085 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
2086 int cp = -1;
2087
2088 switch(hsr_ec) {
2089 case ESR_ELx_EC_CP15_32:
2090 case ESR_ELx_EC_CP15_64:
2091 cp = 15;
2092 break;
2093 case ESR_ELx_EC_CP14_MR:
2094 case ESR_ELx_EC_CP14_64:
2095 cp = 14;
2096 break;
2097 default:
2098 WARN_ON(1);
2099 }
2100
2101 print_sys_reg_msg(params,
2102 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2103 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2104 kvm_inject_undefined(vcpu);
2105}
2106
2107
2108
2109
2110
2111
2112static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2113 const struct sys_reg_desc *global,
2114 size_t nr_global,
2115 const struct sys_reg_desc *target_specific,
2116 size_t nr_specific)
2117{
2118 struct sys_reg_params params;
2119 u32 hsr = kvm_vcpu_get_hsr(vcpu);
2120 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2121 int Rt2 = (hsr >> 10) & 0x1f;
2122
2123 params.is_aarch32 = true;
2124 params.is_32bit = false;
2125 params.CRm = (hsr >> 1) & 0xf;
2126 params.is_write = ((hsr & 1) == 0);
2127
2128 params.Op0 = 0;
2129 params.Op1 = (hsr >> 16) & 0xf;
2130 params.Op2 = 0;
2131 params.CRn = 0;
2132
2133
2134
2135
2136
2137 if (params.is_write) {
2138 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
2139 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
2140 }
2141
2142
2143
2144
2145
2146
2147
2148
2149 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
2150 !emulate_cp(vcpu, ¶ms, global, nr_global)) {
2151
2152 if (!params.is_write) {
2153 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
2154 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
2155 }
2156
2157 return 1;
2158 }
2159
2160 unhandled_cp_access(vcpu, ¶ms);
2161 return 1;
2162}
2163
2164
2165
2166
2167
2168
2169static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2170 const struct sys_reg_desc *global,
2171 size_t nr_global,
2172 const struct sys_reg_desc *target_specific,
2173 size_t nr_specific)
2174{
2175 struct sys_reg_params params;
2176 u32 hsr = kvm_vcpu_get_hsr(vcpu);
2177 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2178
2179 params.is_aarch32 = true;
2180 params.is_32bit = true;
2181 params.CRm = (hsr >> 1) & 0xf;
2182 params.regval = vcpu_get_reg(vcpu, Rt);
2183 params.is_write = ((hsr & 1) == 0);
2184 params.CRn = (hsr >> 10) & 0xf;
2185 params.Op0 = 0;
2186 params.Op1 = (hsr >> 14) & 0x7;
2187 params.Op2 = (hsr >> 17) & 0x7;
2188
2189 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
2190 !emulate_cp(vcpu, ¶ms, global, nr_global)) {
2191 if (!params.is_write)
2192 vcpu_set_reg(vcpu, Rt, params.regval);
2193 return 1;
2194 }
2195
2196 unhandled_cp_access(vcpu, ¶ms);
2197 return 1;
2198}
2199
2200int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2201{
2202 const struct sys_reg_desc *target_specific;
2203 size_t num;
2204
2205 target_specific = get_target_table(vcpu->arch.target, false, &num);
2206 return kvm_handle_cp_64(vcpu,
2207 cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
2208 target_specific, num);
2209}
2210
2211int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2212{
2213 const struct sys_reg_desc *target_specific;
2214 size_t num;
2215
2216 target_specific = get_target_table(vcpu->arch.target, false, &num);
2217 return kvm_handle_cp_32(vcpu,
2218 cp15_regs, ARRAY_SIZE(cp15_regs),
2219 target_specific, num);
2220}
2221
2222int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2223{
2224 return kvm_handle_cp_64(vcpu,
2225 cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
2226 NULL, 0);
2227}
2228
2229int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2230{
2231 return kvm_handle_cp_32(vcpu,
2232 cp14_regs, ARRAY_SIZE(cp14_regs),
2233 NULL, 0);
2234}
2235
2236static bool is_imp_def_sys_reg(struct sys_reg_params *params)
2237{
2238
2239 return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
2240}
2241
2242static int emulate_sys_reg(struct kvm_vcpu *vcpu,
2243 struct sys_reg_params *params)
2244{
2245 size_t num;
2246 const struct sys_reg_desc *table, *r;
2247
2248 table = get_target_table(vcpu->arch.target, true, &num);
2249
2250
2251 r = find_reg(params, table, num);
2252 if (!r)
2253 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2254
2255 if (likely(r)) {
2256 perform_access(vcpu, params, r);
2257 } else if (is_imp_def_sys_reg(params)) {
2258 kvm_inject_undefined(vcpu);
2259 } else {
2260 print_sys_reg_msg(params,
2261 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
2262 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2263 kvm_inject_undefined(vcpu);
2264 }
2265 return 1;
2266}
2267
2268static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
2269 const struct sys_reg_desc *table, size_t num,
2270 unsigned long *bmap)
2271{
2272 unsigned long i;
2273
2274 for (i = 0; i < num; i++)
2275 if (table[i].reset) {
2276 int reg = table[i].reg;
2277
2278 table[i].reset(vcpu, &table[i]);
2279 if (reg > 0 && reg < NR_SYS_REGS)
2280 set_bit(reg, bmap);
2281 }
2282}
2283
2284
2285
2286
2287
2288
2289int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
2290{
2291 struct sys_reg_params params;
2292 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
2293 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2294 int ret;
2295
2296 trace_kvm_handle_sys_reg(esr);
2297
2298 params.is_aarch32 = false;
2299 params.is_32bit = false;
2300 params.Op0 = (esr >> 20) & 3;
2301 params.Op1 = (esr >> 14) & 0x7;
2302 params.CRn = (esr >> 10) & 0xf;
2303 params.CRm = (esr >> 1) & 0xf;
2304 params.Op2 = (esr >> 17) & 0x7;
2305 params.regval = vcpu_get_reg(vcpu, Rt);
2306 params.is_write = !(esr & 1);
2307
2308 ret = emulate_sys_reg(vcpu, ¶ms);
2309
2310 if (!params.is_write)
2311 vcpu_set_reg(vcpu, Rt, params.regval);
2312 return ret;
2313}
2314
2315
2316
2317
2318
2319static bool index_to_params(u64 id, struct sys_reg_params *params)
2320{
2321 switch (id & KVM_REG_SIZE_MASK) {
2322 case KVM_REG_SIZE_U64:
2323
2324 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2325 | KVM_REG_ARM_COPROC_MASK
2326 | KVM_REG_ARM64_SYSREG_OP0_MASK
2327 | KVM_REG_ARM64_SYSREG_OP1_MASK
2328 | KVM_REG_ARM64_SYSREG_CRN_MASK
2329 | KVM_REG_ARM64_SYSREG_CRM_MASK
2330 | KVM_REG_ARM64_SYSREG_OP2_MASK))
2331 return false;
2332 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2333 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2334 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2335 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2336 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2337 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2338 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2339 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2340 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2341 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2342 return true;
2343 default:
2344 return false;
2345 }
2346}
2347
2348const struct sys_reg_desc *find_reg_by_id(u64 id,
2349 struct sys_reg_params *params,
2350 const struct sys_reg_desc table[],
2351 unsigned int num)
2352{
2353 if (!index_to_params(id, params))
2354 return NULL;
2355
2356 return find_reg(params, table, num);
2357}
2358
2359
2360static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
2361 u64 id)
2362{
2363 size_t num;
2364 const struct sys_reg_desc *table, *r;
2365 struct sys_reg_params params;
2366
2367
2368 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2369 return NULL;
2370
2371 if (!index_to_params(id, ¶ms))
2372 return NULL;
2373
2374 table = get_target_table(vcpu->arch.target, true, &num);
2375 r = find_reg(¶ms, table, num);
2376 if (!r)
2377 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2378
2379
2380 if (r && !(r->reg || r->get_user))
2381 r = NULL;
2382
2383 return r;
2384}
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394#define FUNCTION_INVARIANT(reg) \
2395 static void get_##reg(struct kvm_vcpu *v, \
2396 const struct sys_reg_desc *r) \
2397 { \
2398 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
2399 }
2400
2401FUNCTION_INVARIANT(midr_el1)
2402FUNCTION_INVARIANT(revidr_el1)
2403FUNCTION_INVARIANT(clidr_el1)
2404FUNCTION_INVARIANT(aidr_el1)
2405
2406static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
2407{
2408 ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
2409}
2410
2411
2412static struct sys_reg_desc invariant_sys_regs[] = {
2413 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2414 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2415 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2416 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2417 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2418};
2419
2420static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
2421{
2422 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
2423 return -EFAULT;
2424 return 0;
2425}
2426
2427static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
2428{
2429 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
2430 return -EFAULT;
2431 return 0;
2432}
2433
2434static int get_invariant_sys_reg(u64 id, void __user *uaddr)
2435{
2436 struct sys_reg_params params;
2437 const struct sys_reg_desc *r;
2438
2439 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2440 ARRAY_SIZE(invariant_sys_regs));
2441 if (!r)
2442 return -ENOENT;
2443
2444 return reg_to_user(uaddr, &r->val, id);
2445}
2446
2447static int set_invariant_sys_reg(u64 id, void __user *uaddr)
2448{
2449 struct sys_reg_params params;
2450 const struct sys_reg_desc *r;
2451 int err;
2452 u64 val = 0;
2453
2454 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2455 ARRAY_SIZE(invariant_sys_regs));
2456 if (!r)
2457 return -ENOENT;
2458
2459 err = reg_from_user(&val, uaddr, id);
2460 if (err)
2461 return err;
2462
2463
2464 if (r->val != val)
2465 return -EINVAL;
2466
2467 return 0;
2468}
2469
2470static bool is_valid_cache(u32 val)
2471{
2472 u32 level, ctype;
2473
2474 if (val >= CSSELR_MAX)
2475 return false;
2476
2477
2478 level = (val >> 1);
2479 ctype = (cache_levels >> (level * 3)) & 7;
2480
2481 switch (ctype) {
2482 case 0:
2483 return false;
2484 case 1:
2485 return (val & 1);
2486 case 2:
2487 case 4:
2488 return !(val & 1);
2489 case 3:
2490 return true;
2491 default:
2492 return false;
2493 }
2494}
2495
2496static int demux_c15_get(u64 id, void __user *uaddr)
2497{
2498 u32 val;
2499 u32 __user *uval = uaddr;
2500
2501
2502 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2503 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2504 return -ENOENT;
2505
2506 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2507 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2508 if (KVM_REG_SIZE(id) != 4)
2509 return -ENOENT;
2510 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2511 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2512 if (!is_valid_cache(val))
2513 return -ENOENT;
2514
2515 return put_user(get_ccsidr(val), uval);
2516 default:
2517 return -ENOENT;
2518 }
2519}
2520
2521static int demux_c15_set(u64 id, void __user *uaddr)
2522{
2523 u32 val, newval;
2524 u32 __user *uval = uaddr;
2525
2526
2527 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2528 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2529 return -ENOENT;
2530
2531 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2532 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2533 if (KVM_REG_SIZE(id) != 4)
2534 return -ENOENT;
2535 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2536 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2537 if (!is_valid_cache(val))
2538 return -ENOENT;
2539
2540 if (get_user(newval, uval))
2541 return -EFAULT;
2542
2543
2544 if (newval != get_ccsidr(val))
2545 return -EINVAL;
2546 return 0;
2547 default:
2548 return -ENOENT;
2549 }
2550}
2551
2552int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2553{
2554 const struct sys_reg_desc *r;
2555 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2556
2557 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2558 return demux_c15_get(reg->id, uaddr);
2559
2560 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2561 return -ENOENT;
2562
2563 r = index_to_sys_reg_desc(vcpu, reg->id);
2564 if (!r)
2565 return get_invariant_sys_reg(reg->id, uaddr);
2566
2567
2568 if (sysreg_hidden_from_user(vcpu, r))
2569 return -ENOENT;
2570
2571 if (r->get_user)
2572 return (r->get_user)(vcpu, r, reg, uaddr);
2573
2574 return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
2575}
2576
2577int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2578{
2579 const struct sys_reg_desc *r;
2580 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2581
2582 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2583 return demux_c15_set(reg->id, uaddr);
2584
2585 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2586 return -ENOENT;
2587
2588 r = index_to_sys_reg_desc(vcpu, reg->id);
2589 if (!r)
2590 return set_invariant_sys_reg(reg->id, uaddr);
2591
2592
2593 if (sysreg_hidden_from_user(vcpu, r))
2594 return -ENOENT;
2595
2596 if (r->set_user)
2597 return (r->set_user)(vcpu, r, reg, uaddr);
2598
2599 return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2600}
2601
2602static unsigned int num_demux_regs(void)
2603{
2604 unsigned int i, count = 0;
2605
2606 for (i = 0; i < CSSELR_MAX; i++)
2607 if (is_valid_cache(i))
2608 count++;
2609
2610 return count;
2611}
2612
2613static int write_demux_regids(u64 __user *uindices)
2614{
2615 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2616 unsigned int i;
2617
2618 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2619 for (i = 0; i < CSSELR_MAX; i++) {
2620 if (!is_valid_cache(i))
2621 continue;
2622 if (put_user(val | i, uindices))
2623 return -EFAULT;
2624 uindices++;
2625 }
2626 return 0;
2627}
2628
2629static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2630{
2631 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2632 KVM_REG_ARM64_SYSREG |
2633 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2634 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2635 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2636 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2637 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2638}
2639
2640static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2641{
2642 if (!*uind)
2643 return true;
2644
2645 if (put_user(sys_reg_to_index(reg), *uind))
2646 return false;
2647
2648 (*uind)++;
2649 return true;
2650}
2651
2652static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
2653 const struct sys_reg_desc *rd,
2654 u64 __user **uind,
2655 unsigned int *total)
2656{
2657
2658
2659
2660
2661 if (!(rd->reg || rd->get_user))
2662 return 0;
2663
2664 if (sysreg_hidden_from_user(vcpu, rd))
2665 return 0;
2666
2667 if (!copy_reg_to_user(rd, uind))
2668 return -EFAULT;
2669
2670 (*total)++;
2671 return 0;
2672}
2673
2674
2675static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2676{
2677 const struct sys_reg_desc *i1, *i2, *end1, *end2;
2678 unsigned int total = 0;
2679 size_t num;
2680 int err;
2681
2682
2683 i1 = get_target_table(vcpu->arch.target, true, &num);
2684 end1 = i1 + num;
2685 i2 = sys_reg_descs;
2686 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2687
2688 BUG_ON(i1 == end1 || i2 == end2);
2689
2690
2691 while (i1 || i2) {
2692 int cmp = cmp_sys_reg(i1, i2);
2693
2694 if (cmp <= 0)
2695 err = walk_one_sys_reg(vcpu, i1, &uind, &total);
2696 else
2697 err = walk_one_sys_reg(vcpu, i2, &uind, &total);
2698
2699 if (err)
2700 return err;
2701
2702 if (cmp <= 0 && ++i1 == end1)
2703 i1 = NULL;
2704 if (cmp >= 0 && ++i2 == end2)
2705 i2 = NULL;
2706 }
2707 return total;
2708}
2709
2710unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2711{
2712 return ARRAY_SIZE(invariant_sys_regs)
2713 + num_demux_regs()
2714 + walk_sys_regs(vcpu, (u64 __user *)NULL);
2715}
2716
2717int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2718{
2719 unsigned int i;
2720 int err;
2721
2722
2723 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2724 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2725 return -EFAULT;
2726 uindices++;
2727 }
2728
2729 err = walk_sys_regs(vcpu, uindices);
2730 if (err < 0)
2731 return err;
2732 uindices += err;
2733
2734 return write_demux_regids(uindices);
2735}
2736
2737static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
2738{
2739 unsigned int i;
2740
2741 for (i = 1; i < n; i++) {
2742 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2743 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2744 return 1;
2745 }
2746 }
2747
2748 return 0;
2749}
2750
2751void kvm_sys_reg_table_init(void)
2752{
2753 unsigned int i;
2754 struct sys_reg_desc clidr;
2755
2756
2757 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
2758 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
2759 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
2760 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
2761 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
2762 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
2763
2764
2765 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2766 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778 get_clidr_el1(NULL, &clidr);
2779 cache_levels = clidr.val;
2780 for (i = 0; i < 7; i++)
2781 if (((cache_levels >> (i*3)) & 7) == 0)
2782 break;
2783
2784 cache_levels &= (1 << (i*3))-1;
2785}
2786
2787
2788
2789
2790
2791
2792
2793
2794void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2795{
2796 size_t num;
2797 const struct sys_reg_desc *table;
2798 DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
2799
2800
2801 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
2802
2803 table = get_target_table(vcpu->arch.target, true, &num);
2804 reset_sys_reg_descs(vcpu, table, num, bmap);
2805
2806 for (num = 1; num < NR_SYS_REGS; num++) {
2807 if (WARN(!test_bit(num, bmap),
2808 "Didn't reset __vcpu_sys_reg(%zi)\n", num))
2809 break;
2810 }
2811}
2812