1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/bsearch.h>
24#include <linux/kvm_host.h>
25#include <linux/mm.h>
26#include <linux/printk.h>
27#include <linux/uaccess.h>
28
29#include <asm/cacheflush.h>
30#include <asm/cputype.h>
31#include <asm/debug-monitors.h>
32#include <asm/esr.h>
33#include <asm/kvm_arm.h>
34#include <asm/kvm_coproc.h>
35#include <asm/kvm_emulate.h>
36#include <asm/kvm_host.h>
37#include <asm/kvm_hyp.h>
38#include <asm/kvm_mmu.h>
39#include <asm/perf_event.h>
40#include <asm/sysreg.h>
41
42#include <trace/events/kvm.h>
43
44#include "sys_regs.h"
45
46#include "trace.h"
47
48
49
50
51
52
53
54
55
56
57
58
59static bool read_from_write_only(struct kvm_vcpu *vcpu,
60 struct sys_reg_params *params,
61 const struct sys_reg_desc *r)
62{
63 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
64 print_sys_reg_instr(params);
65 kvm_inject_undefined(vcpu);
66 return false;
67}
68
69static bool write_to_read_only(struct kvm_vcpu *vcpu,
70 struct sys_reg_params *params,
71 const struct sys_reg_desc *r)
72{
73 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
74 print_sys_reg_instr(params);
75 kvm_inject_undefined(vcpu);
76 return false;
77}
78
79u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
80{
81 if (!vcpu->arch.sysregs_loaded_on_cpu)
82 goto immediate_read;
83
84
85
86
87
88
89
90
91
92
93 switch (reg) {
94 case CSSELR_EL1: return read_sysreg_s(SYS_CSSELR_EL1);
95 case SCTLR_EL1: return read_sysreg_s(sctlr_EL12);
96 case ACTLR_EL1: return read_sysreg_s(SYS_ACTLR_EL1);
97 case CPACR_EL1: return read_sysreg_s(cpacr_EL12);
98 case TTBR0_EL1: return read_sysreg_s(ttbr0_EL12);
99 case TTBR1_EL1: return read_sysreg_s(ttbr1_EL12);
100 case TCR_EL1: return read_sysreg_s(tcr_EL12);
101 case ESR_EL1: return read_sysreg_s(esr_EL12);
102 case AFSR0_EL1: return read_sysreg_s(afsr0_EL12);
103 case AFSR1_EL1: return read_sysreg_s(afsr1_EL12);
104 case FAR_EL1: return read_sysreg_s(far_EL12);
105 case MAIR_EL1: return read_sysreg_s(mair_EL12);
106 case VBAR_EL1: return read_sysreg_s(vbar_EL12);
107 case CONTEXTIDR_EL1: return read_sysreg_s(contextidr_EL12);
108 case TPIDR_EL0: return read_sysreg_s(SYS_TPIDR_EL0);
109 case TPIDRRO_EL0: return read_sysreg_s(SYS_TPIDRRO_EL0);
110 case TPIDR_EL1: return read_sysreg_s(SYS_TPIDR_EL1);
111 case AMAIR_EL1: return read_sysreg_s(amair_EL12);
112 case CNTKCTL_EL1: return read_sysreg_s(cntkctl_EL12);
113 case PAR_EL1: return read_sysreg_s(SYS_PAR_EL1);
114 case DACR32_EL2: return read_sysreg_s(SYS_DACR32_EL2);
115 case IFSR32_EL2: return read_sysreg_s(SYS_IFSR32_EL2);
116 case DBGVCR32_EL2: return read_sysreg_s(SYS_DBGVCR32_EL2);
117 }
118
119immediate_read:
120 return __vcpu_sys_reg(vcpu, reg);
121}
122
123void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
124{
125 if (!vcpu->arch.sysregs_loaded_on_cpu)
126 goto immediate_write;
127
128
129
130
131
132
133
134
135
136 switch (reg) {
137 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); return;
138 case SCTLR_EL1: write_sysreg_s(val, sctlr_EL12); return;
139 case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); return;
140 case CPACR_EL1: write_sysreg_s(val, cpacr_EL12); return;
141 case TTBR0_EL1: write_sysreg_s(val, ttbr0_EL12); return;
142 case TTBR1_EL1: write_sysreg_s(val, ttbr1_EL12); return;
143 case TCR_EL1: write_sysreg_s(val, tcr_EL12); return;
144 case ESR_EL1: write_sysreg_s(val, esr_EL12); return;
145 case AFSR0_EL1: write_sysreg_s(val, afsr0_EL12); return;
146 case AFSR1_EL1: write_sysreg_s(val, afsr1_EL12); return;
147 case FAR_EL1: write_sysreg_s(val, far_EL12); return;
148 case MAIR_EL1: write_sysreg_s(val, mair_EL12); return;
149 case VBAR_EL1: write_sysreg_s(val, vbar_EL12); return;
150 case CONTEXTIDR_EL1: write_sysreg_s(val, contextidr_EL12); return;
151 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); return;
152 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); return;
153 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); return;
154 case AMAIR_EL1: write_sysreg_s(val, amair_EL12); return;
155 case CNTKCTL_EL1: write_sysreg_s(val, cntkctl_EL12); return;
156 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); return;
157 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); return;
158 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); return;
159 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); return;
160 }
161
162immediate_write:
163 __vcpu_sys_reg(vcpu, reg) = val;
164}
165
166
167static u32 cache_levels;
168
169
170#define CSSELR_MAX 12
171
172
173static u32 get_ccsidr(u32 csselr)
174{
175 u32 ccsidr;
176
177
178 local_irq_disable();
179 write_sysreg(csselr, csselr_el1);
180 isb();
181 ccsidr = read_sysreg(ccsidr_el1);
182 local_irq_enable();
183
184 return ccsidr;
185}
186
187
188
189
190static bool access_dcsw(struct kvm_vcpu *vcpu,
191 struct sys_reg_params *p,
192 const struct sys_reg_desc *r)
193{
194 if (!p->is_write)
195 return read_from_write_only(vcpu, p, r);
196
197
198
199
200
201
202
203
204 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
205 kvm_set_way_flush(vcpu);
206
207 return true;
208}
209
210
211
212
213
214
215static bool access_vm_reg(struct kvm_vcpu *vcpu,
216 struct sys_reg_params *p,
217 const struct sys_reg_desc *r)
218{
219 bool was_enabled = vcpu_has_cache_enabled(vcpu);
220 u64 val;
221 int reg = r->reg;
222
223 BUG_ON(!p->is_write);
224
225
226 if (p->is_aarch32)
227 reg = r->reg / 2;
228
229 if (!p->is_aarch32 || !p->is_32bit) {
230 val = p->regval;
231 } else {
232 val = vcpu_read_sys_reg(vcpu, reg);
233 if (r->reg % 2)
234 val = (p->regval << 32) | (u64)lower_32_bits(val);
235 else
236 val = ((u64)upper_32_bits(val) << 32) |
237 lower_32_bits(p->regval);
238 }
239 vcpu_write_sys_reg(vcpu, val, reg);
240
241 kvm_toggle_cache(vcpu, was_enabled);
242 return true;
243}
244
245
246
247
248
249
250
251static bool access_gic_sgi(struct kvm_vcpu *vcpu,
252 struct sys_reg_params *p,
253 const struct sys_reg_desc *r)
254{
255 bool g1;
256
257 if (!p->is_write)
258 return read_from_write_only(vcpu, p, r);
259
260
261
262
263
264
265
266
267 if (p->is_aarch32) {
268 switch (p->Op1) {
269 default:
270 case 0:
271 g1 = true;
272 break;
273 case 1:
274 case 2:
275 g1 = false;
276 break;
277 }
278 } else {
279 switch (p->Op2) {
280 default:
281 case 5:
282 g1 = true;
283 break;
284 case 6:
285 case 7:
286 g1 = false;
287 break;
288 }
289 }
290
291 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
292
293 return true;
294}
295
296static bool access_gic_sre(struct kvm_vcpu *vcpu,
297 struct sys_reg_params *p,
298 const struct sys_reg_desc *r)
299{
300 if (p->is_write)
301 return ignore_write(vcpu, p);
302
303 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
304 return true;
305}
306
307static bool trap_raz_wi(struct kvm_vcpu *vcpu,
308 struct sys_reg_params *p,
309 const struct sys_reg_desc *r)
310{
311 if (p->is_write)
312 return ignore_write(vcpu, p);
313 else
314 return read_zero(vcpu, p);
315}
316
317
318
319
320
321
322
323static bool trap_loregion(struct kvm_vcpu *vcpu,
324 struct sys_reg_params *p,
325 const struct sys_reg_desc *r)
326{
327 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
328 u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
329 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
330
331 if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
332 kvm_inject_undefined(vcpu);
333 return false;
334 }
335
336 if (p->is_write && sr == SYS_LORID_EL1)
337 return write_to_read_only(vcpu, p, r);
338
339 return trap_raz_wi(vcpu, p, r);
340}
341
342static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
343 struct sys_reg_params *p,
344 const struct sys_reg_desc *r)
345{
346 if (p->is_write) {
347 return ignore_write(vcpu, p);
348 } else {
349 p->regval = (1 << 3);
350 return true;
351 }
352}
353
354static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
355 struct sys_reg_params *p,
356 const struct sys_reg_desc *r)
357{
358 if (p->is_write) {
359 return ignore_write(vcpu, p);
360 } else {
361 p->regval = read_sysreg(dbgauthstatus_el1);
362 return true;
363 }
364}
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393static bool trap_debug_regs(struct kvm_vcpu *vcpu,
394 struct sys_reg_params *p,
395 const struct sys_reg_desc *r)
396{
397 if (p->is_write) {
398 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
399 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
400 } else {
401 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
402 }
403
404 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
405
406 return true;
407}
408
409
410
411
412
413
414
415
416
417
418static void reg_to_dbg(struct kvm_vcpu *vcpu,
419 struct sys_reg_params *p,
420 u64 *dbg_reg)
421{
422 u64 val = p->regval;
423
424 if (p->is_32bit) {
425 val &= 0xffffffffUL;
426 val |= ((*dbg_reg >> 32) << 32);
427 }
428
429 *dbg_reg = val;
430 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
431}
432
433static void dbg_to_reg(struct kvm_vcpu *vcpu,
434 struct sys_reg_params *p,
435 u64 *dbg_reg)
436{
437 p->regval = *dbg_reg;
438 if (p->is_32bit)
439 p->regval &= 0xffffffffUL;
440}
441
442static bool trap_bvr(struct kvm_vcpu *vcpu,
443 struct sys_reg_params *p,
444 const struct sys_reg_desc *rd)
445{
446 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
447
448 if (p->is_write)
449 reg_to_dbg(vcpu, p, dbg_reg);
450 else
451 dbg_to_reg(vcpu, p, dbg_reg);
452
453 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
454
455 return true;
456}
457
458static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
459 const struct kvm_one_reg *reg, void __user *uaddr)
460{
461 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
462
463 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
464 return -EFAULT;
465 return 0;
466}
467
468static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
469 const struct kvm_one_reg *reg, void __user *uaddr)
470{
471 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
472
473 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
474 return -EFAULT;
475 return 0;
476}
477
478static void reset_bvr(struct kvm_vcpu *vcpu,
479 const struct sys_reg_desc *rd)
480{
481 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
482}
483
484static bool trap_bcr(struct kvm_vcpu *vcpu,
485 struct sys_reg_params *p,
486 const struct sys_reg_desc *rd)
487{
488 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
489
490 if (p->is_write)
491 reg_to_dbg(vcpu, p, dbg_reg);
492 else
493 dbg_to_reg(vcpu, p, dbg_reg);
494
495 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
496
497 return true;
498}
499
500static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
501 const struct kvm_one_reg *reg, void __user *uaddr)
502{
503 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
504
505 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
506 return -EFAULT;
507
508 return 0;
509}
510
511static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
512 const struct kvm_one_reg *reg, void __user *uaddr)
513{
514 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
515
516 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
517 return -EFAULT;
518 return 0;
519}
520
521static void reset_bcr(struct kvm_vcpu *vcpu,
522 const struct sys_reg_desc *rd)
523{
524 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
525}
526
527static bool trap_wvr(struct kvm_vcpu *vcpu,
528 struct sys_reg_params *p,
529 const struct sys_reg_desc *rd)
530{
531 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
532
533 if (p->is_write)
534 reg_to_dbg(vcpu, p, dbg_reg);
535 else
536 dbg_to_reg(vcpu, p, dbg_reg);
537
538 trace_trap_reg(__func__, rd->reg, p->is_write,
539 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
540
541 return true;
542}
543
544static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
545 const struct kvm_one_reg *reg, void __user *uaddr)
546{
547 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
548
549 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
550 return -EFAULT;
551 return 0;
552}
553
554static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
555 const struct kvm_one_reg *reg, void __user *uaddr)
556{
557 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
558
559 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
560 return -EFAULT;
561 return 0;
562}
563
564static void reset_wvr(struct kvm_vcpu *vcpu,
565 const struct sys_reg_desc *rd)
566{
567 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
568}
569
570static bool trap_wcr(struct kvm_vcpu *vcpu,
571 struct sys_reg_params *p,
572 const struct sys_reg_desc *rd)
573{
574 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
575
576 if (p->is_write)
577 reg_to_dbg(vcpu, p, dbg_reg);
578 else
579 dbg_to_reg(vcpu, p, dbg_reg);
580
581 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
582
583 return true;
584}
585
586static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
587 const struct kvm_one_reg *reg, void __user *uaddr)
588{
589 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
590
591 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
592 return -EFAULT;
593 return 0;
594}
595
596static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
597 const struct kvm_one_reg *reg, void __user *uaddr)
598{
599 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
600
601 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
602 return -EFAULT;
603 return 0;
604}
605
606static void reset_wcr(struct kvm_vcpu *vcpu,
607 const struct sys_reg_desc *rd)
608{
609 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
610}
611
612static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
613{
614 u64 amair = read_sysreg(amair_el1);
615 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
616}
617
618static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
619{
620 u64 mpidr;
621
622
623
624
625
626
627
628
629 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
630 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
631 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
632 vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
633}
634
635static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
636{
637 u64 pmcr, val;
638
639 pmcr = read_sysreg(pmcr_el0);
640
641
642
643
644 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
645 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
646 __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
647}
648
649static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
650{
651 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
652 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
653
654 if (!enabled)
655 kvm_inject_undefined(vcpu);
656
657 return !enabled;
658}
659
660static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
661{
662 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
663}
664
665static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
666{
667 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
668}
669
670static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
671{
672 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
673}
674
675static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
676{
677 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
678}
679
680static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
681 const struct sys_reg_desc *r)
682{
683 u64 val;
684
685 if (!kvm_arm_pmu_v3_ready(vcpu))
686 return trap_raz_wi(vcpu, p, r);
687
688 if (pmu_access_el0_disabled(vcpu))
689 return false;
690
691 if (p->is_write) {
692
693 val = __vcpu_sys_reg(vcpu, PMCR_EL0);
694 val &= ~ARMV8_PMU_PMCR_MASK;
695 val |= p->regval & ARMV8_PMU_PMCR_MASK;
696 __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
697 kvm_pmu_handle_pmcr(vcpu, val);
698 } else {
699
700 val = __vcpu_sys_reg(vcpu, PMCR_EL0)
701 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
702 p->regval = val;
703 }
704
705 return true;
706}
707
708static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
709 const struct sys_reg_desc *r)
710{
711 if (!kvm_arm_pmu_v3_ready(vcpu))
712 return trap_raz_wi(vcpu, p, r);
713
714 if (pmu_access_event_counter_el0_disabled(vcpu))
715 return false;
716
717 if (p->is_write)
718 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
719 else
720
721 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
722 & ARMV8_PMU_COUNTER_MASK;
723
724 return true;
725}
726
727static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
728 const struct sys_reg_desc *r)
729{
730 u64 pmceid;
731
732 if (!kvm_arm_pmu_v3_ready(vcpu))
733 return trap_raz_wi(vcpu, p, r);
734
735 BUG_ON(p->is_write);
736
737 if (pmu_access_el0_disabled(vcpu))
738 return false;
739
740 if (!(p->Op2 & 1))
741 pmceid = read_sysreg(pmceid0_el0);
742 else
743 pmceid = read_sysreg(pmceid1_el0);
744
745 p->regval = pmceid;
746
747 return true;
748}
749
750static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
751{
752 u64 pmcr, val;
753
754 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
755 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
756 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
757 kvm_inject_undefined(vcpu);
758 return false;
759 }
760
761 return true;
762}
763
764static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
765 struct sys_reg_params *p,
766 const struct sys_reg_desc *r)
767{
768 u64 idx;
769
770 if (!kvm_arm_pmu_v3_ready(vcpu))
771 return trap_raz_wi(vcpu, p, r);
772
773 if (r->CRn == 9 && r->CRm == 13) {
774 if (r->Op2 == 2) {
775
776 if (pmu_access_event_counter_el0_disabled(vcpu))
777 return false;
778
779 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
780 & ARMV8_PMU_COUNTER_MASK;
781 } else if (r->Op2 == 0) {
782
783 if (pmu_access_cycle_counter_el0_disabled(vcpu))
784 return false;
785
786 idx = ARMV8_PMU_CYCLE_IDX;
787 } else {
788 return false;
789 }
790 } else if (r->CRn == 0 && r->CRm == 9) {
791
792 if (pmu_access_event_counter_el0_disabled(vcpu))
793 return false;
794
795 idx = ARMV8_PMU_CYCLE_IDX;
796 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
797
798 if (pmu_access_event_counter_el0_disabled(vcpu))
799 return false;
800
801 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
802 } else {
803 return false;
804 }
805
806 if (!pmu_counter_idx_valid(vcpu, idx))
807 return false;
808
809 if (p->is_write) {
810 if (pmu_access_el0_disabled(vcpu))
811 return false;
812
813 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
814 } else {
815 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
816 }
817
818 return true;
819}
820
821static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
822 const struct sys_reg_desc *r)
823{
824 u64 idx, reg;
825
826 if (!kvm_arm_pmu_v3_ready(vcpu))
827 return trap_raz_wi(vcpu, p, r);
828
829 if (pmu_access_el0_disabled(vcpu))
830 return false;
831
832 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
833
834 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
835 reg = PMEVTYPER0_EL0 + idx;
836 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
837 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
838 if (idx == ARMV8_PMU_CYCLE_IDX)
839 reg = PMCCFILTR_EL0;
840 else
841
842 reg = PMEVTYPER0_EL0 + idx;
843 } else {
844 BUG();
845 }
846
847 if (!pmu_counter_idx_valid(vcpu, idx))
848 return false;
849
850 if (p->is_write) {
851 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
852 __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
853 } else {
854 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
855 }
856
857 return true;
858}
859
860static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
861 const struct sys_reg_desc *r)
862{
863 u64 val, mask;
864
865 if (!kvm_arm_pmu_v3_ready(vcpu))
866 return trap_raz_wi(vcpu, p, r);
867
868 if (pmu_access_el0_disabled(vcpu))
869 return false;
870
871 mask = kvm_pmu_valid_counter_mask(vcpu);
872 if (p->is_write) {
873 val = p->regval & mask;
874 if (r->Op2 & 0x1) {
875
876 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
877 kvm_pmu_enable_counter(vcpu, val);
878 } else {
879
880 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
881 kvm_pmu_disable_counter(vcpu, val);
882 }
883 } else {
884 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
885 }
886
887 return true;
888}
889
890static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
891 const struct sys_reg_desc *r)
892{
893 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
894
895 if (!kvm_arm_pmu_v3_ready(vcpu))
896 return trap_raz_wi(vcpu, p, r);
897
898 if (!vcpu_mode_priv(vcpu)) {
899 kvm_inject_undefined(vcpu);
900 return false;
901 }
902
903 if (p->is_write) {
904 u64 val = p->regval & mask;
905
906 if (r->Op2 & 0x1)
907
908 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
909 else
910
911 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
912 } else {
913 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
914 }
915
916 return true;
917}
918
919static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
920 const struct sys_reg_desc *r)
921{
922 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
923
924 if (!kvm_arm_pmu_v3_ready(vcpu))
925 return trap_raz_wi(vcpu, p, r);
926
927 if (pmu_access_el0_disabled(vcpu))
928 return false;
929
930 if (p->is_write) {
931 if (r->CRm & 0x2)
932
933 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
934 else
935
936 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
937 } else {
938 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
939 }
940
941 return true;
942}
943
944static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
945 const struct sys_reg_desc *r)
946{
947 u64 mask;
948
949 if (!kvm_arm_pmu_v3_ready(vcpu))
950 return trap_raz_wi(vcpu, p, r);
951
952 if (!p->is_write)
953 return read_from_write_only(vcpu, p, r);
954
955 if (pmu_write_swinc_el0_disabled(vcpu))
956 return false;
957
958 mask = kvm_pmu_valid_counter_mask(vcpu);
959 kvm_pmu_software_increment(vcpu, p->regval & mask);
960 return true;
961}
962
963static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
964 const struct sys_reg_desc *r)
965{
966 if (!kvm_arm_pmu_v3_ready(vcpu))
967 return trap_raz_wi(vcpu, p, r);
968
969 if (p->is_write) {
970 if (!vcpu_mode_priv(vcpu)) {
971 kvm_inject_undefined(vcpu);
972 return false;
973 }
974
975 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
976 p->regval & ARMV8_PMU_USERENR_MASK;
977 } else {
978 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
979 & ARMV8_PMU_USERENR_MASK;
980 }
981
982 return true;
983}
984
985
986#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
987 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
988 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
989 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
990 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
991 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
992 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
993 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
994 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
995
996
997#define PMU_PMEVCNTR_EL0(n) \
998 { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
999 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
1000
1001
1002#define PMU_PMEVTYPER_EL0(n) \
1003 { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
1004 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
1005
1006static bool access_cntp_tval(struct kvm_vcpu *vcpu,
1007 struct sys_reg_params *p,
1008 const struct sys_reg_desc *r)
1009{
1010 u64 now = kvm_phys_timer_read();
1011 u64 cval;
1012
1013 if (p->is_write) {
1014 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL,
1015 p->regval + now);
1016 } else {
1017 cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
1018 p->regval = cval - now;
1019 }
1020
1021 return true;
1022}
1023
1024static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
1025 struct sys_reg_params *p,
1026 const struct sys_reg_desc *r)
1027{
1028 if (p->is_write)
1029 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval);
1030 else
1031 p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
1032
1033 return true;
1034}
1035
1036static bool access_cntp_cval(struct kvm_vcpu *vcpu,
1037 struct sys_reg_params *p,
1038 const struct sys_reg_desc *r)
1039{
1040 if (p->is_write)
1041 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval);
1042 else
1043 p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
1044
1045 return true;
1046}
1047
1048
1049static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
1050{
1051 u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
1052 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
1053 u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
1054
1055 if (id == SYS_ID_AA64PFR0_EL1) {
1056 if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
1057 kvm_debug("SVE unsupported for guests, suppressing\n");
1058
1059 val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
1060 } else if (id == SYS_ID_AA64ISAR1_EL1) {
1061 const u64 ptrauth_mask = (0xfUL << ID_AA64ISAR1_APA_SHIFT) |
1062 (0xfUL << ID_AA64ISAR1_API_SHIFT) |
1063 (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
1064 (0xfUL << ID_AA64ISAR1_GPI_SHIFT);
1065 if (val & ptrauth_mask)
1066 kvm_debug("ptrauth unsupported for guests, suppressing\n");
1067 val &= ~ptrauth_mask;
1068 }
1069
1070 return val;
1071}
1072
1073
1074
1075static bool __access_id_reg(struct kvm_vcpu *vcpu,
1076 struct sys_reg_params *p,
1077 const struct sys_reg_desc *r,
1078 bool raz)
1079{
1080 if (p->is_write)
1081 return write_to_read_only(vcpu, p, r);
1082
1083 p->regval = read_id_reg(r, raz);
1084 return true;
1085}
1086
1087static bool access_id_reg(struct kvm_vcpu *vcpu,
1088 struct sys_reg_params *p,
1089 const struct sys_reg_desc *r)
1090{
1091 return __access_id_reg(vcpu, p, r, false);
1092}
1093
1094static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
1095 struct sys_reg_params *p,
1096 const struct sys_reg_desc *r)
1097{
1098 return __access_id_reg(vcpu, p, r, true);
1099}
1100
1101static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
1102static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
1103static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
1104
1105
1106
1107
1108
1109
1110
1111
1112static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
1113 bool raz)
1114{
1115 const u64 id = sys_reg_to_index(rd);
1116 const u64 val = read_id_reg(rd, raz);
1117
1118 return reg_to_user(uaddr, &val, id);
1119}
1120
1121static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
1122 bool raz)
1123{
1124 const u64 id = sys_reg_to_index(rd);
1125 int err;
1126 u64 val;
1127
1128 err = reg_from_user(&val, uaddr, id);
1129 if (err)
1130 return err;
1131
1132
1133 if (val != read_id_reg(rd, raz))
1134 return -EINVAL;
1135
1136 return 0;
1137}
1138
1139static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1140 const struct kvm_one_reg *reg, void __user *uaddr)
1141{
1142 return __get_id_reg(rd, uaddr, false);
1143}
1144
1145static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1146 const struct kvm_one_reg *reg, void __user *uaddr)
1147{
1148 return __set_id_reg(rd, uaddr, false);
1149}
1150
1151static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1152 const struct kvm_one_reg *reg, void __user *uaddr)
1153{
1154 return __get_id_reg(rd, uaddr, true);
1155}
1156
1157static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1158 const struct kvm_one_reg *reg, void __user *uaddr)
1159{
1160 return __set_id_reg(rd, uaddr, true);
1161}
1162
1163
1164#define ID_SANITISED(name) { \
1165 SYS_DESC(SYS_##name), \
1166 .access = access_id_reg, \
1167 .get_user = get_id_reg, \
1168 .set_user = set_id_reg, \
1169}
1170
1171
1172
1173
1174
1175
1176#define ID_UNALLOCATED(crm, op2) { \
1177 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1178 .access = access_raz_id_reg, \
1179 .get_user = get_raz_id_reg, \
1180 .set_user = set_raz_id_reg, \
1181}
1182
1183
1184
1185
1186
1187
1188#define ID_HIDDEN(name) { \
1189 SYS_DESC(SYS_##name), \
1190 .access = access_raz_id_reg, \
1191 .get_user = get_raz_id_reg, \
1192 .set_user = set_raz_id_reg, \
1193}
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206static const struct sys_reg_desc sys_reg_descs[] = {
1207 { SYS_DESC(SYS_DC_ISW), access_dcsw },
1208 { SYS_DESC(SYS_DC_CSW), access_dcsw },
1209 { SYS_DESC(SYS_DC_CISW), access_dcsw },
1210
1211 DBG_BCR_BVR_WCR_WVR_EL1(0),
1212 DBG_BCR_BVR_WCR_WVR_EL1(1),
1213 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1214 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1215 DBG_BCR_BVR_WCR_WVR_EL1(2),
1216 DBG_BCR_BVR_WCR_WVR_EL1(3),
1217 DBG_BCR_BVR_WCR_WVR_EL1(4),
1218 DBG_BCR_BVR_WCR_WVR_EL1(5),
1219 DBG_BCR_BVR_WCR_WVR_EL1(6),
1220 DBG_BCR_BVR_WCR_WVR_EL1(7),
1221 DBG_BCR_BVR_WCR_WVR_EL1(8),
1222 DBG_BCR_BVR_WCR_WVR_EL1(9),
1223 DBG_BCR_BVR_WCR_WVR_EL1(10),
1224 DBG_BCR_BVR_WCR_WVR_EL1(11),
1225 DBG_BCR_BVR_WCR_WVR_EL1(12),
1226 DBG_BCR_BVR_WCR_WVR_EL1(13),
1227 DBG_BCR_BVR_WCR_WVR_EL1(14),
1228 DBG_BCR_BVR_WCR_WVR_EL1(15),
1229
1230 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1231 { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
1232 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
1233 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1234 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1235 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1236 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1237 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1238
1239 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1240 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1241
1242 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1243
1244 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1245
1246 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1247
1248
1249
1250
1251
1252
1253
1254
1255 ID_SANITISED(ID_PFR0_EL1),
1256 ID_SANITISED(ID_PFR1_EL1),
1257 ID_SANITISED(ID_DFR0_EL1),
1258 ID_HIDDEN(ID_AFR0_EL1),
1259 ID_SANITISED(ID_MMFR0_EL1),
1260 ID_SANITISED(ID_MMFR1_EL1),
1261 ID_SANITISED(ID_MMFR2_EL1),
1262 ID_SANITISED(ID_MMFR3_EL1),
1263
1264
1265 ID_SANITISED(ID_ISAR0_EL1),
1266 ID_SANITISED(ID_ISAR1_EL1),
1267 ID_SANITISED(ID_ISAR2_EL1),
1268 ID_SANITISED(ID_ISAR3_EL1),
1269 ID_SANITISED(ID_ISAR4_EL1),
1270 ID_SANITISED(ID_ISAR5_EL1),
1271 ID_SANITISED(ID_MMFR4_EL1),
1272 ID_UNALLOCATED(2,7),
1273
1274
1275 ID_SANITISED(MVFR0_EL1),
1276 ID_SANITISED(MVFR1_EL1),
1277 ID_SANITISED(MVFR2_EL1),
1278 ID_UNALLOCATED(3,3),
1279 ID_UNALLOCATED(3,4),
1280 ID_UNALLOCATED(3,5),
1281 ID_UNALLOCATED(3,6),
1282 ID_UNALLOCATED(3,7),
1283
1284
1285
1286 ID_SANITISED(ID_AA64PFR0_EL1),
1287 ID_SANITISED(ID_AA64PFR1_EL1),
1288 ID_UNALLOCATED(4,2),
1289 ID_UNALLOCATED(4,3),
1290 ID_UNALLOCATED(4,4),
1291 ID_UNALLOCATED(4,5),
1292 ID_UNALLOCATED(4,6),
1293 ID_UNALLOCATED(4,7),
1294
1295
1296 ID_SANITISED(ID_AA64DFR0_EL1),
1297 ID_SANITISED(ID_AA64DFR1_EL1),
1298 ID_UNALLOCATED(5,2),
1299 ID_UNALLOCATED(5,3),
1300 ID_HIDDEN(ID_AA64AFR0_EL1),
1301 ID_HIDDEN(ID_AA64AFR1_EL1),
1302 ID_UNALLOCATED(5,6),
1303 ID_UNALLOCATED(5,7),
1304
1305
1306 ID_SANITISED(ID_AA64ISAR0_EL1),
1307 ID_SANITISED(ID_AA64ISAR1_EL1),
1308 ID_UNALLOCATED(6,2),
1309 ID_UNALLOCATED(6,3),
1310 ID_UNALLOCATED(6,4),
1311 ID_UNALLOCATED(6,5),
1312 ID_UNALLOCATED(6,6),
1313 ID_UNALLOCATED(6,7),
1314
1315
1316 ID_SANITISED(ID_AA64MMFR0_EL1),
1317 ID_SANITISED(ID_AA64MMFR1_EL1),
1318 ID_SANITISED(ID_AA64MMFR2_EL1),
1319 ID_UNALLOCATED(7,3),
1320 ID_UNALLOCATED(7,4),
1321 ID_UNALLOCATED(7,5),
1322 ID_UNALLOCATED(7,6),
1323 ID_UNALLOCATED(7,7),
1324
1325 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1326 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1327 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1328 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1329 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1330
1331 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1332 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1333 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1334
1335 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1336 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1337 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1338 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1339 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1340 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1341 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1342 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1343
1344 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1345 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1346
1347 { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
1348 { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
1349
1350 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1351 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1352
1353 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1354 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1355 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1356 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1357 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1358
1359 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1360 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1361
1362 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1363 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1364 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1365 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1366 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1367 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1368 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1369 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1370 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1371 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1372 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1373 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1374
1375 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1376 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1377
1378 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1379
1380 { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
1381
1382 { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
1383 { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
1384 { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
1385 { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
1386 { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
1387 { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
1388 { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
1389 { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
1390 { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
1391 { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
1392 { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
1393
1394
1395
1396
1397 { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1398 { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
1399
1400 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1401 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1402
1403 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
1404 { SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
1405 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
1406
1407
1408 PMU_PMEVCNTR_EL0(0),
1409 PMU_PMEVCNTR_EL0(1),
1410 PMU_PMEVCNTR_EL0(2),
1411 PMU_PMEVCNTR_EL0(3),
1412 PMU_PMEVCNTR_EL0(4),
1413 PMU_PMEVCNTR_EL0(5),
1414 PMU_PMEVCNTR_EL0(6),
1415 PMU_PMEVCNTR_EL0(7),
1416 PMU_PMEVCNTR_EL0(8),
1417 PMU_PMEVCNTR_EL0(9),
1418 PMU_PMEVCNTR_EL0(10),
1419 PMU_PMEVCNTR_EL0(11),
1420 PMU_PMEVCNTR_EL0(12),
1421 PMU_PMEVCNTR_EL0(13),
1422 PMU_PMEVCNTR_EL0(14),
1423 PMU_PMEVCNTR_EL0(15),
1424 PMU_PMEVCNTR_EL0(16),
1425 PMU_PMEVCNTR_EL0(17),
1426 PMU_PMEVCNTR_EL0(18),
1427 PMU_PMEVCNTR_EL0(19),
1428 PMU_PMEVCNTR_EL0(20),
1429 PMU_PMEVCNTR_EL0(21),
1430 PMU_PMEVCNTR_EL0(22),
1431 PMU_PMEVCNTR_EL0(23),
1432 PMU_PMEVCNTR_EL0(24),
1433 PMU_PMEVCNTR_EL0(25),
1434 PMU_PMEVCNTR_EL0(26),
1435 PMU_PMEVCNTR_EL0(27),
1436 PMU_PMEVCNTR_EL0(28),
1437 PMU_PMEVCNTR_EL0(29),
1438 PMU_PMEVCNTR_EL0(30),
1439
1440 PMU_PMEVTYPER_EL0(0),
1441 PMU_PMEVTYPER_EL0(1),
1442 PMU_PMEVTYPER_EL0(2),
1443 PMU_PMEVTYPER_EL0(3),
1444 PMU_PMEVTYPER_EL0(4),
1445 PMU_PMEVTYPER_EL0(5),
1446 PMU_PMEVTYPER_EL0(6),
1447 PMU_PMEVTYPER_EL0(7),
1448 PMU_PMEVTYPER_EL0(8),
1449 PMU_PMEVTYPER_EL0(9),
1450 PMU_PMEVTYPER_EL0(10),
1451 PMU_PMEVTYPER_EL0(11),
1452 PMU_PMEVTYPER_EL0(12),
1453 PMU_PMEVTYPER_EL0(13),
1454 PMU_PMEVTYPER_EL0(14),
1455 PMU_PMEVTYPER_EL0(15),
1456 PMU_PMEVTYPER_EL0(16),
1457 PMU_PMEVTYPER_EL0(17),
1458 PMU_PMEVTYPER_EL0(18),
1459 PMU_PMEVTYPER_EL0(19),
1460 PMU_PMEVTYPER_EL0(20),
1461 PMU_PMEVTYPER_EL0(21),
1462 PMU_PMEVTYPER_EL0(22),
1463 PMU_PMEVTYPER_EL0(23),
1464 PMU_PMEVTYPER_EL0(24),
1465 PMU_PMEVTYPER_EL0(25),
1466 PMU_PMEVTYPER_EL0(26),
1467 PMU_PMEVTYPER_EL0(27),
1468 PMU_PMEVTYPER_EL0(28),
1469 PMU_PMEVTYPER_EL0(29),
1470 PMU_PMEVTYPER_EL0(30),
1471
1472
1473
1474
1475 { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1476
1477 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1478 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1479 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
1480};
1481
1482static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1483 struct sys_reg_params *p,
1484 const struct sys_reg_desc *r)
1485{
1486 if (p->is_write) {
1487 return ignore_write(vcpu, p);
1488 } else {
1489 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1490 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1491 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1492
1493 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1494 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1495 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1496 | (6 << 16) | (el3 << 14) | (el3 << 12));
1497 return true;
1498 }
1499}
1500
1501static bool trap_debug32(struct kvm_vcpu *vcpu,
1502 struct sys_reg_params *p,
1503 const struct sys_reg_desc *r)
1504{
1505 if (p->is_write) {
1506 vcpu_cp14(vcpu, r->reg) = p->regval;
1507 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1508 } else {
1509 p->regval = vcpu_cp14(vcpu, r->reg);
1510 }
1511
1512 return true;
1513}
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526static bool trap_xvr(struct kvm_vcpu *vcpu,
1527 struct sys_reg_params *p,
1528 const struct sys_reg_desc *rd)
1529{
1530 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1531
1532 if (p->is_write) {
1533 u64 val = *dbg_reg;
1534
1535 val &= 0xffffffffUL;
1536 val |= p->regval << 32;
1537 *dbg_reg = val;
1538
1539 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1540 } else {
1541 p->regval = *dbg_reg >> 32;
1542 }
1543
1544 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1545
1546 return true;
1547}
1548
1549#define DBG_BCR_BVR_WCR_WVR(n) \
1550 \
1551 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1552 \
1553 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1554 \
1555 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1556 \
1557 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1558
1559#define DBGBXVR(n) \
1560 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1561
1562
1563
1564
1565
1566
1567static const struct sys_reg_desc cp14_regs[] = {
1568
1569 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1570
1571 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1572
1573 DBG_BCR_BVR_WCR_WVR(0),
1574
1575 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1576 DBG_BCR_BVR_WCR_WVR(1),
1577
1578 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
1579
1580 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
1581 DBG_BCR_BVR_WCR_WVR(2),
1582
1583 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1584
1585 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1586 DBG_BCR_BVR_WCR_WVR(3),
1587 DBG_BCR_BVR_WCR_WVR(4),
1588 DBG_BCR_BVR_WCR_WVR(5),
1589
1590 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1591
1592 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1593 DBG_BCR_BVR_WCR_WVR(6),
1594
1595 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
1596 DBG_BCR_BVR_WCR_WVR(7),
1597 DBG_BCR_BVR_WCR_WVR(8),
1598 DBG_BCR_BVR_WCR_WVR(9),
1599 DBG_BCR_BVR_WCR_WVR(10),
1600 DBG_BCR_BVR_WCR_WVR(11),
1601 DBG_BCR_BVR_WCR_WVR(12),
1602 DBG_BCR_BVR_WCR_WVR(13),
1603 DBG_BCR_BVR_WCR_WVR(14),
1604 DBG_BCR_BVR_WCR_WVR(15),
1605
1606
1607 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1608
1609 DBGBXVR(0),
1610
1611 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1612 DBGBXVR(1),
1613
1614 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1615 DBGBXVR(2),
1616 DBGBXVR(3),
1617
1618 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1619 DBGBXVR(4),
1620
1621 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1622 DBGBXVR(5),
1623 DBGBXVR(6),
1624 DBGBXVR(7),
1625 DBGBXVR(8),
1626 DBGBXVR(9),
1627 DBGBXVR(10),
1628 DBGBXVR(11),
1629 DBGBXVR(12),
1630 DBGBXVR(13),
1631 DBGBXVR(14),
1632 DBGBXVR(15),
1633
1634
1635 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1636
1637
1638 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1639
1640 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1641
1642 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1643
1644 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1645
1646 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1647
1648 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1649};
1650
1651
1652static const struct sys_reg_desc cp14_64_regs[] = {
1653
1654 { Op1( 0), CRm( 1), .access = trap_raz_wi },
1655
1656
1657 { Op1( 0), CRm( 2), .access = trap_raz_wi },
1658};
1659
1660
1661#define PMU_PMEVCNTR(n) \
1662 \
1663 { Op1(0), CRn(0b1110), \
1664 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1665 access_pmu_evcntr }
1666
1667
1668#define PMU_PMEVTYPER(n) \
1669 \
1670 { Op1(0), CRn(0b1110), \
1671 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1672 access_pmu_evtyper }
1673
1674
1675
1676
1677
1678
1679static const struct sys_reg_desc cp15_regs[] = {
1680 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1681 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1682 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1683 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1684 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1685 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1686 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1687 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1688 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1689 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1690 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1691
1692
1693
1694
1695 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1696 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1697 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
1698
1699
1700 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
1701 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
1702 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
1703 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
1704 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
1705 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
1706 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
1707 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
1708 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
1709 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
1710 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
1711 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
1712 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
1713 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
1714 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
1715
1716 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
1717 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
1718 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
1719 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1720
1721
1722 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
1723
1724 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1725
1726
1727 { Op1( 0), CRn(14), CRm( 2), Op2( 0), access_cntp_tval },
1728
1729 { Op1( 0), CRn(14), CRm( 2), Op2( 1), access_cntp_ctl },
1730
1731
1732 PMU_PMEVCNTR(0),
1733 PMU_PMEVCNTR(1),
1734 PMU_PMEVCNTR(2),
1735 PMU_PMEVCNTR(3),
1736 PMU_PMEVCNTR(4),
1737 PMU_PMEVCNTR(5),
1738 PMU_PMEVCNTR(6),
1739 PMU_PMEVCNTR(7),
1740 PMU_PMEVCNTR(8),
1741 PMU_PMEVCNTR(9),
1742 PMU_PMEVCNTR(10),
1743 PMU_PMEVCNTR(11),
1744 PMU_PMEVCNTR(12),
1745 PMU_PMEVCNTR(13),
1746 PMU_PMEVCNTR(14),
1747 PMU_PMEVCNTR(15),
1748 PMU_PMEVCNTR(16),
1749 PMU_PMEVCNTR(17),
1750 PMU_PMEVCNTR(18),
1751 PMU_PMEVCNTR(19),
1752 PMU_PMEVCNTR(20),
1753 PMU_PMEVCNTR(21),
1754 PMU_PMEVCNTR(22),
1755 PMU_PMEVCNTR(23),
1756 PMU_PMEVCNTR(24),
1757 PMU_PMEVCNTR(25),
1758 PMU_PMEVCNTR(26),
1759 PMU_PMEVCNTR(27),
1760 PMU_PMEVCNTR(28),
1761 PMU_PMEVCNTR(29),
1762 PMU_PMEVCNTR(30),
1763
1764 PMU_PMEVTYPER(0),
1765 PMU_PMEVTYPER(1),
1766 PMU_PMEVTYPER(2),
1767 PMU_PMEVTYPER(3),
1768 PMU_PMEVTYPER(4),
1769 PMU_PMEVTYPER(5),
1770 PMU_PMEVTYPER(6),
1771 PMU_PMEVTYPER(7),
1772 PMU_PMEVTYPER(8),
1773 PMU_PMEVTYPER(9),
1774 PMU_PMEVTYPER(10),
1775 PMU_PMEVTYPER(11),
1776 PMU_PMEVTYPER(12),
1777 PMU_PMEVTYPER(13),
1778 PMU_PMEVTYPER(14),
1779 PMU_PMEVTYPER(15),
1780 PMU_PMEVTYPER(16),
1781 PMU_PMEVTYPER(17),
1782 PMU_PMEVTYPER(18),
1783 PMU_PMEVTYPER(19),
1784 PMU_PMEVTYPER(20),
1785 PMU_PMEVTYPER(21),
1786 PMU_PMEVTYPER(22),
1787 PMU_PMEVTYPER(23),
1788 PMU_PMEVTYPER(24),
1789 PMU_PMEVTYPER(25),
1790 PMU_PMEVTYPER(26),
1791 PMU_PMEVTYPER(27),
1792 PMU_PMEVTYPER(28),
1793 PMU_PMEVTYPER(29),
1794 PMU_PMEVTYPER(30),
1795
1796 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
1797};
1798
1799static const struct sys_reg_desc cp15_64_regs[] = {
1800 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1801 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
1802 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1803 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
1804 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1805 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1806 { Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval },
1807};
1808
1809
1810static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
1811
1812void kvm_register_target_sys_reg_table(unsigned int target,
1813 struct kvm_sys_reg_target_table *table)
1814{
1815 target_tables[target] = table;
1816}
1817
1818
1819static const struct sys_reg_desc *get_target_table(unsigned target,
1820 bool mode_is_64,
1821 size_t *num)
1822{
1823 struct kvm_sys_reg_target_table *table;
1824
1825 table = target_tables[target];
1826 if (mode_is_64) {
1827 *num = table->table64.num;
1828 return table->table64.table;
1829 } else {
1830 *num = table->table32.num;
1831 return table->table32.table;
1832 }
1833}
1834
1835#define reg_to_match_value(x) \
1836 ({ \
1837 unsigned long val; \
1838 val = (x)->Op0 << 14; \
1839 val |= (x)->Op1 << 11; \
1840 val |= (x)->CRn << 7; \
1841 val |= (x)->CRm << 3; \
1842 val |= (x)->Op2; \
1843 val; \
1844 })
1845
1846static int match_sys_reg(const void *key, const void *elt)
1847{
1848 const unsigned long pval = (unsigned long)key;
1849 const struct sys_reg_desc *r = elt;
1850
1851 return pval - reg_to_match_value(r);
1852}
1853
1854static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
1855 const struct sys_reg_desc table[],
1856 unsigned int num)
1857{
1858 unsigned long pval = reg_to_match_value(params);
1859
1860 return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
1861}
1862
1863int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
1864{
1865 kvm_inject_undefined(vcpu);
1866 return 1;
1867}
1868
1869static void perform_access(struct kvm_vcpu *vcpu,
1870 struct sys_reg_params *params,
1871 const struct sys_reg_desc *r)
1872{
1873 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
1874
1875
1876
1877
1878
1879
1880 BUG_ON(!r->access);
1881
1882
1883 if (likely(r->access(vcpu, params, r)))
1884 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1885}
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897static int emulate_cp(struct kvm_vcpu *vcpu,
1898 struct sys_reg_params *params,
1899 const struct sys_reg_desc *table,
1900 size_t num)
1901{
1902 const struct sys_reg_desc *r;
1903
1904 if (!table)
1905 return -1;
1906
1907 r = find_reg(params, table, num);
1908
1909 if (r) {
1910 perform_access(vcpu, params, r);
1911 return 0;
1912 }
1913
1914
1915 return -1;
1916}
1917
1918static void unhandled_cp_access(struct kvm_vcpu *vcpu,
1919 struct sys_reg_params *params)
1920{
1921 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
1922 int cp = -1;
1923
1924 switch(hsr_ec) {
1925 case ESR_ELx_EC_CP15_32:
1926 case ESR_ELx_EC_CP15_64:
1927 cp = 15;
1928 break;
1929 case ESR_ELx_EC_CP14_MR:
1930 case ESR_ELx_EC_CP14_64:
1931 cp = 14;
1932 break;
1933 default:
1934 WARN_ON(1);
1935 }
1936
1937 kvm_err("Unsupported guest CP%d access at: %08lx [%08lx]\n",
1938 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
1939 print_sys_reg_instr(params);
1940 kvm_inject_undefined(vcpu);
1941}
1942
1943
1944
1945
1946
1947
1948static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1949 const struct sys_reg_desc *global,
1950 size_t nr_global,
1951 const struct sys_reg_desc *target_specific,
1952 size_t nr_specific)
1953{
1954 struct sys_reg_params params;
1955 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1956 int Rt = kvm_vcpu_sys_get_rt(vcpu);
1957 int Rt2 = (hsr >> 10) & 0x1f;
1958
1959 params.is_aarch32 = true;
1960 params.is_32bit = false;
1961 params.CRm = (hsr >> 1) & 0xf;
1962 params.is_write = ((hsr & 1) == 0);
1963
1964 params.Op0 = 0;
1965 params.Op1 = (hsr >> 16) & 0xf;
1966 params.Op2 = 0;
1967 params.CRn = 0;
1968
1969
1970
1971
1972
1973 if (params.is_write) {
1974 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1975 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
1976 }
1977
1978
1979
1980
1981
1982
1983
1984
1985 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
1986 !emulate_cp(vcpu, ¶ms, global, nr_global)) {
1987
1988 if (!params.is_write) {
1989 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1990 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
1991 }
1992
1993 return 1;
1994 }
1995
1996 unhandled_cp_access(vcpu, ¶ms);
1997 return 1;
1998}
1999
2000
2001
2002
2003
2004
2005static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2006 const struct sys_reg_desc *global,
2007 size_t nr_global,
2008 const struct sys_reg_desc *target_specific,
2009 size_t nr_specific)
2010{
2011 struct sys_reg_params params;
2012 u32 hsr = kvm_vcpu_get_hsr(vcpu);
2013 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2014
2015 params.is_aarch32 = true;
2016 params.is_32bit = true;
2017 params.CRm = (hsr >> 1) & 0xf;
2018 params.regval = vcpu_get_reg(vcpu, Rt);
2019 params.is_write = ((hsr & 1) == 0);
2020 params.CRn = (hsr >> 10) & 0xf;
2021 params.Op0 = 0;
2022 params.Op1 = (hsr >> 14) & 0x7;
2023 params.Op2 = (hsr >> 17) & 0x7;
2024
2025 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
2026 !emulate_cp(vcpu, ¶ms, global, nr_global)) {
2027 if (!params.is_write)
2028 vcpu_set_reg(vcpu, Rt, params.regval);
2029 return 1;
2030 }
2031
2032 unhandled_cp_access(vcpu, ¶ms);
2033 return 1;
2034}
2035
2036int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2037{
2038 const struct sys_reg_desc *target_specific;
2039 size_t num;
2040
2041 target_specific = get_target_table(vcpu->arch.target, false, &num);
2042 return kvm_handle_cp_64(vcpu,
2043 cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
2044 target_specific, num);
2045}
2046
2047int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2048{
2049 const struct sys_reg_desc *target_specific;
2050 size_t num;
2051
2052 target_specific = get_target_table(vcpu->arch.target, false, &num);
2053 return kvm_handle_cp_32(vcpu,
2054 cp15_regs, ARRAY_SIZE(cp15_regs),
2055 target_specific, num);
2056}
2057
2058int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2059{
2060 return kvm_handle_cp_64(vcpu,
2061 cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
2062 NULL, 0);
2063}
2064
2065int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2066{
2067 return kvm_handle_cp_32(vcpu,
2068 cp14_regs, ARRAY_SIZE(cp14_regs),
2069 NULL, 0);
2070}
2071
2072static int emulate_sys_reg(struct kvm_vcpu *vcpu,
2073 struct sys_reg_params *params)
2074{
2075 size_t num;
2076 const struct sys_reg_desc *table, *r;
2077
2078 table = get_target_table(vcpu->arch.target, true, &num);
2079
2080
2081 r = find_reg(params, table, num);
2082 if (!r)
2083 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2084
2085 if (likely(r)) {
2086 perform_access(vcpu, params, r);
2087 } else {
2088 kvm_err("Unsupported guest sys_reg access at: %lx [%08lx]\n",
2089 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2090 print_sys_reg_instr(params);
2091 kvm_inject_undefined(vcpu);
2092 }
2093 return 1;
2094}
2095
2096static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
2097 const struct sys_reg_desc *table, size_t num)
2098{
2099 unsigned long i;
2100
2101 for (i = 0; i < num; i++)
2102 if (table[i].reset)
2103 table[i].reset(vcpu, &table[i]);
2104}
2105
2106
2107
2108
2109
2110
2111int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
2112{
2113 struct sys_reg_params params;
2114 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
2115 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2116 int ret;
2117
2118 trace_kvm_handle_sys_reg(esr);
2119
2120 params.is_aarch32 = false;
2121 params.is_32bit = false;
2122 params.Op0 = (esr >> 20) & 3;
2123 params.Op1 = (esr >> 14) & 0x7;
2124 params.CRn = (esr >> 10) & 0xf;
2125 params.CRm = (esr >> 1) & 0xf;
2126 params.Op2 = (esr >> 17) & 0x7;
2127 params.regval = vcpu_get_reg(vcpu, Rt);
2128 params.is_write = !(esr & 1);
2129
2130 ret = emulate_sys_reg(vcpu, ¶ms);
2131
2132 if (!params.is_write)
2133 vcpu_set_reg(vcpu, Rt, params.regval);
2134 return ret;
2135}
2136
2137
2138
2139
2140
2141static bool index_to_params(u64 id, struct sys_reg_params *params)
2142{
2143 switch (id & KVM_REG_SIZE_MASK) {
2144 case KVM_REG_SIZE_U64:
2145
2146 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2147 | KVM_REG_ARM_COPROC_MASK
2148 | KVM_REG_ARM64_SYSREG_OP0_MASK
2149 | KVM_REG_ARM64_SYSREG_OP1_MASK
2150 | KVM_REG_ARM64_SYSREG_CRN_MASK
2151 | KVM_REG_ARM64_SYSREG_CRM_MASK
2152 | KVM_REG_ARM64_SYSREG_OP2_MASK))
2153 return false;
2154 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2155 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2156 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2157 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2158 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2159 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2160 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2161 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2162 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2163 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2164 return true;
2165 default:
2166 return false;
2167 }
2168}
2169
2170const struct sys_reg_desc *find_reg_by_id(u64 id,
2171 struct sys_reg_params *params,
2172 const struct sys_reg_desc table[],
2173 unsigned int num)
2174{
2175 if (!index_to_params(id, params))
2176 return NULL;
2177
2178 return find_reg(params, table, num);
2179}
2180
2181
2182static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
2183 u64 id)
2184{
2185 size_t num;
2186 const struct sys_reg_desc *table, *r;
2187 struct sys_reg_params params;
2188
2189
2190 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2191 return NULL;
2192
2193 table = get_target_table(vcpu->arch.target, true, &num);
2194 r = find_reg_by_id(id, ¶ms, table, num);
2195 if (!r)
2196 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2197
2198
2199 if (r && !(r->reg || r->get_user))
2200 r = NULL;
2201
2202 return r;
2203}
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213#define FUNCTION_INVARIANT(reg) \
2214 static void get_##reg(struct kvm_vcpu *v, \
2215 const struct sys_reg_desc *r) \
2216 { \
2217 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
2218 }
2219
2220FUNCTION_INVARIANT(midr_el1)
2221FUNCTION_INVARIANT(ctr_el0)
2222FUNCTION_INVARIANT(revidr_el1)
2223FUNCTION_INVARIANT(clidr_el1)
2224FUNCTION_INVARIANT(aidr_el1)
2225
2226
2227static struct sys_reg_desc invariant_sys_regs[] = {
2228 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2229 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2230 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2231 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2232 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2233};
2234
2235static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
2236{
2237 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
2238 return -EFAULT;
2239 return 0;
2240}
2241
2242static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
2243{
2244 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
2245 return -EFAULT;
2246 return 0;
2247}
2248
2249static int get_invariant_sys_reg(u64 id, void __user *uaddr)
2250{
2251 struct sys_reg_params params;
2252 const struct sys_reg_desc *r;
2253
2254 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2255 ARRAY_SIZE(invariant_sys_regs));
2256 if (!r)
2257 return -ENOENT;
2258
2259 return reg_to_user(uaddr, &r->val, id);
2260}
2261
2262static int set_invariant_sys_reg(u64 id, void __user *uaddr)
2263{
2264 struct sys_reg_params params;
2265 const struct sys_reg_desc *r;
2266 int err;
2267 u64 val = 0;
2268
2269 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2270 ARRAY_SIZE(invariant_sys_regs));
2271 if (!r)
2272 return -ENOENT;
2273
2274 err = reg_from_user(&val, uaddr, id);
2275 if (err)
2276 return err;
2277
2278
2279 if (r->val != val)
2280 return -EINVAL;
2281
2282 return 0;
2283}
2284
2285static bool is_valid_cache(u32 val)
2286{
2287 u32 level, ctype;
2288
2289 if (val >= CSSELR_MAX)
2290 return false;
2291
2292
2293 level = (val >> 1);
2294 ctype = (cache_levels >> (level * 3)) & 7;
2295
2296 switch (ctype) {
2297 case 0:
2298 return false;
2299 case 1:
2300 return (val & 1);
2301 case 2:
2302 case 4:
2303 return !(val & 1);
2304 case 3:
2305 return true;
2306 default:
2307 return false;
2308 }
2309}
2310
2311static int demux_c15_get(u64 id, void __user *uaddr)
2312{
2313 u32 val;
2314 u32 __user *uval = uaddr;
2315
2316
2317 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2318 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2319 return -ENOENT;
2320
2321 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2322 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2323 if (KVM_REG_SIZE(id) != 4)
2324 return -ENOENT;
2325 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2326 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2327 if (!is_valid_cache(val))
2328 return -ENOENT;
2329
2330 return put_user(get_ccsidr(val), uval);
2331 default:
2332 return -ENOENT;
2333 }
2334}
2335
2336static int demux_c15_set(u64 id, void __user *uaddr)
2337{
2338 u32 val, newval;
2339 u32 __user *uval = uaddr;
2340
2341
2342 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2343 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2344 return -ENOENT;
2345
2346 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2347 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2348 if (KVM_REG_SIZE(id) != 4)
2349 return -ENOENT;
2350 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2351 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2352 if (!is_valid_cache(val))
2353 return -ENOENT;
2354
2355 if (get_user(newval, uval))
2356 return -EFAULT;
2357
2358
2359 if (newval != get_ccsidr(val))
2360 return -EINVAL;
2361 return 0;
2362 default:
2363 return -ENOENT;
2364 }
2365}
2366
2367int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2368{
2369 const struct sys_reg_desc *r;
2370 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2371
2372 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2373 return demux_c15_get(reg->id, uaddr);
2374
2375 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2376 return -ENOENT;
2377
2378 r = index_to_sys_reg_desc(vcpu, reg->id);
2379 if (!r)
2380 return get_invariant_sys_reg(reg->id, uaddr);
2381
2382 if (r->get_user)
2383 return (r->get_user)(vcpu, r, reg, uaddr);
2384
2385 return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
2386}
2387
2388int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2389{
2390 const struct sys_reg_desc *r;
2391 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2392
2393 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2394 return demux_c15_set(reg->id, uaddr);
2395
2396 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2397 return -ENOENT;
2398
2399 r = index_to_sys_reg_desc(vcpu, reg->id);
2400 if (!r)
2401 return set_invariant_sys_reg(reg->id, uaddr);
2402
2403 if (r->set_user)
2404 return (r->set_user)(vcpu, r, reg, uaddr);
2405
2406 return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2407}
2408
2409static unsigned int num_demux_regs(void)
2410{
2411 unsigned int i, count = 0;
2412
2413 for (i = 0; i < CSSELR_MAX; i++)
2414 if (is_valid_cache(i))
2415 count++;
2416
2417 return count;
2418}
2419
2420static int write_demux_regids(u64 __user *uindices)
2421{
2422 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2423 unsigned int i;
2424
2425 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2426 for (i = 0; i < CSSELR_MAX; i++) {
2427 if (!is_valid_cache(i))
2428 continue;
2429 if (put_user(val | i, uindices))
2430 return -EFAULT;
2431 uindices++;
2432 }
2433 return 0;
2434}
2435
2436static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2437{
2438 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2439 KVM_REG_ARM64_SYSREG |
2440 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2441 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2442 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2443 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2444 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2445}
2446
2447static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2448{
2449 if (!*uind)
2450 return true;
2451
2452 if (put_user(sys_reg_to_index(reg), *uind))
2453 return false;
2454
2455 (*uind)++;
2456 return true;
2457}
2458
2459static int walk_one_sys_reg(const struct sys_reg_desc *rd,
2460 u64 __user **uind,
2461 unsigned int *total)
2462{
2463
2464
2465
2466
2467 if (!(rd->reg || rd->get_user))
2468 return 0;
2469
2470 if (!copy_reg_to_user(rd, uind))
2471 return -EFAULT;
2472
2473 (*total)++;
2474 return 0;
2475}
2476
2477
2478static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2479{
2480 const struct sys_reg_desc *i1, *i2, *end1, *end2;
2481 unsigned int total = 0;
2482 size_t num;
2483 int err;
2484
2485
2486 i1 = get_target_table(vcpu->arch.target, true, &num);
2487 end1 = i1 + num;
2488 i2 = sys_reg_descs;
2489 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2490
2491 BUG_ON(i1 == end1 || i2 == end2);
2492
2493
2494 while (i1 || i2) {
2495 int cmp = cmp_sys_reg(i1, i2);
2496
2497 if (cmp <= 0)
2498 err = walk_one_sys_reg(i1, &uind, &total);
2499 else
2500 err = walk_one_sys_reg(i2, &uind, &total);
2501
2502 if (err)
2503 return err;
2504
2505 if (cmp <= 0 && ++i1 == end1)
2506 i1 = NULL;
2507 if (cmp >= 0 && ++i2 == end2)
2508 i2 = NULL;
2509 }
2510 return total;
2511}
2512
2513unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2514{
2515 return ARRAY_SIZE(invariant_sys_regs)
2516 + num_demux_regs()
2517 + walk_sys_regs(vcpu, (u64 __user *)NULL);
2518}
2519
2520int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2521{
2522 unsigned int i;
2523 int err;
2524
2525
2526 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2527 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2528 return -EFAULT;
2529 uindices++;
2530 }
2531
2532 err = walk_sys_regs(vcpu, uindices);
2533 if (err < 0)
2534 return err;
2535 uindices += err;
2536
2537 return write_demux_regids(uindices);
2538}
2539
2540static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
2541{
2542 unsigned int i;
2543
2544 for (i = 1; i < n; i++) {
2545 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2546 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2547 return 1;
2548 }
2549 }
2550
2551 return 0;
2552}
2553
2554void kvm_sys_reg_table_init(void)
2555{
2556 unsigned int i;
2557 struct sys_reg_desc clidr;
2558
2559
2560 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
2561 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
2562 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
2563 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
2564 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
2565 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
2566
2567
2568 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2569 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581 get_clidr_el1(NULL, &clidr);
2582 cache_levels = clidr.val;
2583 for (i = 0; i < 7; i++)
2584 if (((cache_levels >> (i*3)) & 7) == 0)
2585 break;
2586
2587 cache_levels &= (1 << (i*3))-1;
2588}
2589
2590
2591
2592
2593
2594
2595
2596
2597void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2598{
2599 size_t num;
2600 const struct sys_reg_desc *table;
2601
2602
2603 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
2604
2605
2606 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2607
2608 table = get_target_table(vcpu->arch.target, true, &num);
2609 reset_sys_reg_descs(vcpu, table, num);
2610
2611 for (num = 1; num < NR_SYS_REGS; num++) {
2612 if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
2613 "Didn't reset __vcpu_sys_reg(%zi)\n", num))
2614 break;
2615 }
2616}
2617