linux/arch/arm64/kvm/sys_regs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012,2013 - ARM Ltd
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 *
   6 * Derived from arch/arm/kvm/coproc.c:
   7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
   9 *          Christoffer Dall <c.dall@virtualopensystems.com>
  10 */
  11
  12#include <linux/bsearch.h>
  13#include <linux/kvm_host.h>
  14#include <linux/mm.h>
  15#include <linux/printk.h>
  16#include <linux/uaccess.h>
  17
  18#include <asm/cacheflush.h>
  19#include <asm/cputype.h>
  20#include <asm/debug-monitors.h>
  21#include <asm/esr.h>
  22#include <asm/kvm_arm.h>
  23#include <asm/kvm_coproc.h>
  24#include <asm/kvm_emulate.h>
  25#include <asm/kvm_host.h>
  26#include <asm/kvm_hyp.h>
  27#include <asm/kvm_mmu.h>
  28#include <asm/perf_event.h>
  29#include <asm/sysreg.h>
  30
  31#include <trace/events/kvm.h>
  32
  33#include "sys_regs.h"
  34
  35#include "trace.h"
  36
  37/*
  38 * All of this file is extremly similar to the ARM coproc.c, but the
  39 * types are different. My gut feeling is that it should be pretty
  40 * easy to merge, but that would be an ABI breakage -- again. VFP
  41 * would also need to be abstracted.
  42 *
  43 * For AArch32, we only take care of what is being trapped. Anything
  44 * that has to do with init and userspace access has to go via the
  45 * 64bit interface.
  46 */
  47
  48static bool read_from_write_only(struct kvm_vcpu *vcpu,
  49                                 struct sys_reg_params *params,
  50                                 const struct sys_reg_desc *r)
  51{
  52        WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
  53        print_sys_reg_instr(params);
  54        kvm_inject_undefined(vcpu);
  55        return false;
  56}
  57
  58static bool write_to_read_only(struct kvm_vcpu *vcpu,
  59                               struct sys_reg_params *params,
  60                               const struct sys_reg_desc *r)
  61{
  62        WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
  63        print_sys_reg_instr(params);
  64        kvm_inject_undefined(vcpu);
  65        return false;
  66}
  67
  68u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
  69{
  70        if (!vcpu->arch.sysregs_loaded_on_cpu)
  71                goto immediate_read;
  72
  73        /*
  74         * System registers listed in the switch are not saved on every
  75         * exit from the guest but are only saved on vcpu_put.
  76         *
  77         * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
  78         * should never be listed below, because the guest cannot modify its
  79         * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
  80         * thread when emulating cross-VCPU communication.
  81         */
  82        switch (reg) {
  83        case CSSELR_EL1:        return read_sysreg_s(SYS_CSSELR_EL1);
  84        case SCTLR_EL1:         return read_sysreg_s(SYS_SCTLR_EL12);
  85        case ACTLR_EL1:         return read_sysreg_s(SYS_ACTLR_EL1);
  86        case CPACR_EL1:         return read_sysreg_s(SYS_CPACR_EL12);
  87        case TTBR0_EL1:         return read_sysreg_s(SYS_TTBR0_EL12);
  88        case TTBR1_EL1:         return read_sysreg_s(SYS_TTBR1_EL12);
  89        case TCR_EL1:           return read_sysreg_s(SYS_TCR_EL12);
  90        case ESR_EL1:           return read_sysreg_s(SYS_ESR_EL12);
  91        case AFSR0_EL1:         return read_sysreg_s(SYS_AFSR0_EL12);
  92        case AFSR1_EL1:         return read_sysreg_s(SYS_AFSR1_EL12);
  93        case FAR_EL1:           return read_sysreg_s(SYS_FAR_EL12);
  94        case MAIR_EL1:          return read_sysreg_s(SYS_MAIR_EL12);
  95        case VBAR_EL1:          return read_sysreg_s(SYS_VBAR_EL12);
  96        case CONTEXTIDR_EL1:    return read_sysreg_s(SYS_CONTEXTIDR_EL12);
  97        case TPIDR_EL0:         return read_sysreg_s(SYS_TPIDR_EL0);
  98        case TPIDRRO_EL0:       return read_sysreg_s(SYS_TPIDRRO_EL0);
  99        case TPIDR_EL1:         return read_sysreg_s(SYS_TPIDR_EL1);
 100        case AMAIR_EL1:         return read_sysreg_s(SYS_AMAIR_EL12);
 101        case CNTKCTL_EL1:       return read_sysreg_s(SYS_CNTKCTL_EL12);
 102        case PAR_EL1:           return read_sysreg_s(SYS_PAR_EL1);
 103        case DACR32_EL2:        return read_sysreg_s(SYS_DACR32_EL2);
 104        case IFSR32_EL2:        return read_sysreg_s(SYS_IFSR32_EL2);
 105        case DBGVCR32_EL2:      return read_sysreg_s(SYS_DBGVCR32_EL2);
 106        }
 107
 108immediate_read:
 109        return __vcpu_sys_reg(vcpu, reg);
 110}
 111
 112void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
 113{
 114        if (!vcpu->arch.sysregs_loaded_on_cpu)
 115                goto immediate_write;
 116
 117        /*
 118         * System registers listed in the switch are not restored on every
 119         * entry to the guest but are only restored on vcpu_load.
 120         *
 121         * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
 122         * should never be listed below, because the the MPIDR should only be
 123         * set once, before running the VCPU, and never changed later.
 124         */
 125        switch (reg) {
 126        case CSSELR_EL1:        write_sysreg_s(val, SYS_CSSELR_EL1);    return;
 127        case SCTLR_EL1:         write_sysreg_s(val, SYS_SCTLR_EL12);    return;
 128        case ACTLR_EL1:         write_sysreg_s(val, SYS_ACTLR_EL1);     return;
 129        case CPACR_EL1:         write_sysreg_s(val, SYS_CPACR_EL12);    return;
 130        case TTBR0_EL1:         write_sysreg_s(val, SYS_TTBR0_EL12);    return;
 131        case TTBR1_EL1:         write_sysreg_s(val, SYS_TTBR1_EL12);    return;
 132        case TCR_EL1:           write_sysreg_s(val, SYS_TCR_EL12);      return;
 133        case ESR_EL1:           write_sysreg_s(val, SYS_ESR_EL12);      return;
 134        case AFSR0_EL1:         write_sysreg_s(val, SYS_AFSR0_EL12);    return;
 135        case AFSR1_EL1:         write_sysreg_s(val, SYS_AFSR1_EL12);    return;
 136        case FAR_EL1:           write_sysreg_s(val, SYS_FAR_EL12);      return;
 137        case MAIR_EL1:          write_sysreg_s(val, SYS_MAIR_EL12);     return;
 138        case VBAR_EL1:          write_sysreg_s(val, SYS_VBAR_EL12);     return;
 139        case CONTEXTIDR_EL1:    write_sysreg_s(val, SYS_CONTEXTIDR_EL12); return;
 140        case TPIDR_EL0:         write_sysreg_s(val, SYS_TPIDR_EL0);     return;
 141        case TPIDRRO_EL0:       write_sysreg_s(val, SYS_TPIDRRO_EL0);   return;
 142        case TPIDR_EL1:         write_sysreg_s(val, SYS_TPIDR_EL1);     return;
 143        case AMAIR_EL1:         write_sysreg_s(val, SYS_AMAIR_EL12);    return;
 144        case CNTKCTL_EL1:       write_sysreg_s(val, SYS_CNTKCTL_EL12);  return;
 145        case PAR_EL1:           write_sysreg_s(val, SYS_PAR_EL1);       return;
 146        case DACR32_EL2:        write_sysreg_s(val, SYS_DACR32_EL2);    return;
 147        case IFSR32_EL2:        write_sysreg_s(val, SYS_IFSR32_EL2);    return;
 148        case DBGVCR32_EL2:      write_sysreg_s(val, SYS_DBGVCR32_EL2);  return;
 149        }
 150
 151immediate_write:
 152         __vcpu_sys_reg(vcpu, reg) = val;
 153}
 154
 155/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
 156static u32 cache_levels;
 157
 158/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
 159#define CSSELR_MAX 12
 160
 161/* Which cache CCSIDR represents depends on CSSELR value. */
 162static u32 get_ccsidr(u32 csselr)
 163{
 164        u32 ccsidr;
 165
 166        /* Make sure noone else changes CSSELR during this! */
 167        local_irq_disable();
 168        write_sysreg(csselr, csselr_el1);
 169        isb();
 170        ccsidr = read_sysreg(ccsidr_el1);
 171        local_irq_enable();
 172
 173        return ccsidr;
 174}
 175
 176/*
 177 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
 178 */
 179static bool access_dcsw(struct kvm_vcpu *vcpu,
 180                        struct sys_reg_params *p,
 181                        const struct sys_reg_desc *r)
 182{
 183        if (!p->is_write)
 184                return read_from_write_only(vcpu, p, r);
 185
 186        /*
 187         * Only track S/W ops if we don't have FWB. It still indicates
 188         * that the guest is a bit broken (S/W operations should only
 189         * be done by firmware, knowing that there is only a single
 190         * CPU left in the system, and certainly not from non-secure
 191         * software).
 192         */
 193        if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
 194                kvm_set_way_flush(vcpu);
 195
 196        return true;
 197}
 198
 199/*
 200 * Generic accessor for VM registers. Only called as long as HCR_TVM
 201 * is set. If the guest enables the MMU, we stop trapping the VM
 202 * sys_regs and leave it in complete control of the caches.
 203 */
 204static bool access_vm_reg(struct kvm_vcpu *vcpu,
 205                          struct sys_reg_params *p,
 206                          const struct sys_reg_desc *r)
 207{
 208        bool was_enabled = vcpu_has_cache_enabled(vcpu);
 209        u64 val;
 210        int reg = r->reg;
 211
 212        BUG_ON(!p->is_write);
 213
 214        /* See the 32bit mapping in kvm_host.h */
 215        if (p->is_aarch32)
 216                reg = r->reg / 2;
 217
 218        if (!p->is_aarch32 || !p->is_32bit) {
 219                val = p->regval;
 220        } else {
 221                val = vcpu_read_sys_reg(vcpu, reg);
 222                if (r->reg % 2)
 223                        val = (p->regval << 32) | (u64)lower_32_bits(val);
 224                else
 225                        val = ((u64)upper_32_bits(val) << 32) |
 226                                lower_32_bits(p->regval);
 227        }
 228        vcpu_write_sys_reg(vcpu, val, reg);
 229
 230        kvm_toggle_cache(vcpu, was_enabled);
 231        return true;
 232}
 233
 234/*
 235 * Trap handler for the GICv3 SGI generation system register.
 236 * Forward the request to the VGIC emulation.
 237 * The cp15_64 code makes sure this automatically works
 238 * for both AArch64 and AArch32 accesses.
 239 */
 240static bool access_gic_sgi(struct kvm_vcpu *vcpu,
 241                           struct sys_reg_params *p,
 242                           const struct sys_reg_desc *r)
 243{
 244        bool g1;
 245
 246        if (!p->is_write)
 247                return read_from_write_only(vcpu, p, r);
 248
 249        /*
 250         * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
 251         * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
 252         * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
 253         * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
 254         * group.
 255         */
 256        if (p->is_aarch32) {
 257                switch (p->Op1) {
 258                default:                /* Keep GCC quiet */
 259                case 0:                 /* ICC_SGI1R */
 260                        g1 = true;
 261                        break;
 262                case 1:                 /* ICC_ASGI1R */
 263                case 2:                 /* ICC_SGI0R */
 264                        g1 = false;
 265                        break;
 266                }
 267        } else {
 268                switch (p->Op2) {
 269                default:                /* Keep GCC quiet */
 270                case 5:                 /* ICC_SGI1R_EL1 */
 271                        g1 = true;
 272                        break;
 273                case 6:                 /* ICC_ASGI1R_EL1 */
 274                case 7:                 /* ICC_SGI0R_EL1 */
 275                        g1 = false;
 276                        break;
 277                }
 278        }
 279
 280        vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
 281
 282        return true;
 283}
 284
 285static bool access_gic_sre(struct kvm_vcpu *vcpu,
 286                           struct sys_reg_params *p,
 287                           const struct sys_reg_desc *r)
 288{
 289        if (p->is_write)
 290                return ignore_write(vcpu, p);
 291
 292        p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
 293        return true;
 294}
 295
 296static bool trap_raz_wi(struct kvm_vcpu *vcpu,
 297                        struct sys_reg_params *p,
 298                        const struct sys_reg_desc *r)
 299{
 300        if (p->is_write)
 301                return ignore_write(vcpu, p);
 302        else
 303                return read_zero(vcpu, p);
 304}
 305
 306/*
 307 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
 308 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
 309 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
 310 * treat it separately.
 311 */
 312static bool trap_loregion(struct kvm_vcpu *vcpu,
 313                          struct sys_reg_params *p,
 314                          const struct sys_reg_desc *r)
 315{
 316        u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
 317        u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
 318                         (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
 319
 320        if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
 321                kvm_inject_undefined(vcpu);
 322                return false;
 323        }
 324
 325        if (p->is_write && sr == SYS_LORID_EL1)
 326                return write_to_read_only(vcpu, p, r);
 327
 328        return trap_raz_wi(vcpu, p, r);
 329}
 330
 331static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
 332                           struct sys_reg_params *p,
 333                           const struct sys_reg_desc *r)
 334{
 335        if (p->is_write) {
 336                return ignore_write(vcpu, p);
 337        } else {
 338                p->regval = (1 << 3);
 339                return true;
 340        }
 341}
 342
 343static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
 344                                   struct sys_reg_params *p,
 345                                   const struct sys_reg_desc *r)
 346{
 347        if (p->is_write) {
 348                return ignore_write(vcpu, p);
 349        } else {
 350                p->regval = read_sysreg(dbgauthstatus_el1);
 351                return true;
 352        }
 353}
 354
 355/*
 356 * We want to avoid world-switching all the DBG registers all the
 357 * time:
 358 * 
 359 * - If we've touched any debug register, it is likely that we're
 360 *   going to touch more of them. It then makes sense to disable the
 361 *   traps and start doing the save/restore dance
 362 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
 363 *   then mandatory to save/restore the registers, as the guest
 364 *   depends on them.
 365 * 
 366 * For this, we use a DIRTY bit, indicating the guest has modified the
 367 * debug registers, used as follow:
 368 *
 369 * On guest entry:
 370 * - If the dirty bit is set (because we're coming back from trapping),
 371 *   disable the traps, save host registers, restore guest registers.
 372 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
 373 *   set the dirty bit, disable the traps, save host registers,
 374 *   restore guest registers.
 375 * - Otherwise, enable the traps
 376 *
 377 * On guest exit:
 378 * - If the dirty bit is set, save guest registers, restore host
 379 *   registers and clear the dirty bit. This ensure that the host can
 380 *   now use the debug registers.
 381 */
 382static bool trap_debug_regs(struct kvm_vcpu *vcpu,
 383                            struct sys_reg_params *p,
 384                            const struct sys_reg_desc *r)
 385{
 386        if (p->is_write) {
 387                vcpu_write_sys_reg(vcpu, p->regval, r->reg);
 388                vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
 389        } else {
 390                p->regval = vcpu_read_sys_reg(vcpu, r->reg);
 391        }
 392
 393        trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
 394
 395        return true;
 396}
 397
 398/*
 399 * reg_to_dbg/dbg_to_reg
 400 *
 401 * A 32 bit write to a debug register leave top bits alone
 402 * A 32 bit read from a debug register only returns the bottom bits
 403 *
 404 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
 405 * hyp.S code switches between host and guest values in future.
 406 */
 407static void reg_to_dbg(struct kvm_vcpu *vcpu,
 408                       struct sys_reg_params *p,
 409                       u64 *dbg_reg)
 410{
 411        u64 val = p->regval;
 412
 413        if (p->is_32bit) {
 414                val &= 0xffffffffUL;
 415                val |= ((*dbg_reg >> 32) << 32);
 416        }
 417
 418        *dbg_reg = val;
 419        vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
 420}
 421
 422static void dbg_to_reg(struct kvm_vcpu *vcpu,
 423                       struct sys_reg_params *p,
 424                       u64 *dbg_reg)
 425{
 426        p->regval = *dbg_reg;
 427        if (p->is_32bit)
 428                p->regval &= 0xffffffffUL;
 429}
 430
 431static bool trap_bvr(struct kvm_vcpu *vcpu,
 432                     struct sys_reg_params *p,
 433                     const struct sys_reg_desc *rd)
 434{
 435        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
 436
 437        if (p->is_write)
 438                reg_to_dbg(vcpu, p, dbg_reg);
 439        else
 440                dbg_to_reg(vcpu, p, dbg_reg);
 441
 442        trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
 443
 444        return true;
 445}
 446
 447static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 448                const struct kvm_one_reg *reg, void __user *uaddr)
 449{
 450        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
 451
 452        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
 453                return -EFAULT;
 454        return 0;
 455}
 456
 457static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 458        const struct kvm_one_reg *reg, void __user *uaddr)
 459{
 460        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
 461
 462        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
 463                return -EFAULT;
 464        return 0;
 465}
 466
 467static void reset_bvr(struct kvm_vcpu *vcpu,
 468                      const struct sys_reg_desc *rd)
 469{
 470        vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
 471}
 472
 473static bool trap_bcr(struct kvm_vcpu *vcpu,
 474                     struct sys_reg_params *p,
 475                     const struct sys_reg_desc *rd)
 476{
 477        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
 478
 479        if (p->is_write)
 480                reg_to_dbg(vcpu, p, dbg_reg);
 481        else
 482                dbg_to_reg(vcpu, p, dbg_reg);
 483
 484        trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
 485
 486        return true;
 487}
 488
 489static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 490                const struct kvm_one_reg *reg, void __user *uaddr)
 491{
 492        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
 493
 494        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
 495                return -EFAULT;
 496
 497        return 0;
 498}
 499
 500static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 501        const struct kvm_one_reg *reg, void __user *uaddr)
 502{
 503        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
 504
 505        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
 506                return -EFAULT;
 507        return 0;
 508}
 509
 510static void reset_bcr(struct kvm_vcpu *vcpu,
 511                      const struct sys_reg_desc *rd)
 512{
 513        vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
 514}
 515
 516static bool trap_wvr(struct kvm_vcpu *vcpu,
 517                     struct sys_reg_params *p,
 518                     const struct sys_reg_desc *rd)
 519{
 520        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
 521
 522        if (p->is_write)
 523                reg_to_dbg(vcpu, p, dbg_reg);
 524        else
 525                dbg_to_reg(vcpu, p, dbg_reg);
 526
 527        trace_trap_reg(__func__, rd->reg, p->is_write,
 528                vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
 529
 530        return true;
 531}
 532
 533static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 534                const struct kvm_one_reg *reg, void __user *uaddr)
 535{
 536        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
 537
 538        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
 539                return -EFAULT;
 540        return 0;
 541}
 542
 543static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 544        const struct kvm_one_reg *reg, void __user *uaddr)
 545{
 546        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
 547
 548        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
 549                return -EFAULT;
 550        return 0;
 551}
 552
 553static void reset_wvr(struct kvm_vcpu *vcpu,
 554                      const struct sys_reg_desc *rd)
 555{
 556        vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
 557}
 558
 559static bool trap_wcr(struct kvm_vcpu *vcpu,
 560                     struct sys_reg_params *p,
 561                     const struct sys_reg_desc *rd)
 562{
 563        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
 564
 565        if (p->is_write)
 566                reg_to_dbg(vcpu, p, dbg_reg);
 567        else
 568                dbg_to_reg(vcpu, p, dbg_reg);
 569
 570        trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
 571
 572        return true;
 573}
 574
 575static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 576                const struct kvm_one_reg *reg, void __user *uaddr)
 577{
 578        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
 579
 580        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
 581                return -EFAULT;
 582        return 0;
 583}
 584
 585static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 586        const struct kvm_one_reg *reg, void __user *uaddr)
 587{
 588        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
 589
 590        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
 591                return -EFAULT;
 592        return 0;
 593}
 594
 595static void reset_wcr(struct kvm_vcpu *vcpu,
 596                      const struct sys_reg_desc *rd)
 597{
 598        vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
 599}
 600
 601static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 602{
 603        u64 amair = read_sysreg(amair_el1);
 604        vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
 605}
 606
 607static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 608{
 609        u64 mpidr;
 610
 611        /*
 612         * Map the vcpu_id into the first three affinity level fields of
 613         * the MPIDR. We limit the number of VCPUs in level 0 due to a
 614         * limitation to 16 CPUs in that level in the ICC_SGIxR registers
 615         * of the GICv3 to be able to address each CPU directly when
 616         * sending IPIs.
 617         */
 618        mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
 619        mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
 620        mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
 621        vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
 622}
 623
 624static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 625{
 626        u64 pmcr, val;
 627
 628        pmcr = read_sysreg(pmcr_el0);
 629        /*
 630         * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
 631         * except PMCR.E resetting to zero.
 632         */
 633        val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
 634               | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
 635        __vcpu_sys_reg(vcpu, r->reg) = val;
 636}
 637
 638static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
 639{
 640        u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
 641        bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
 642
 643        if (!enabled)
 644                kvm_inject_undefined(vcpu);
 645
 646        return !enabled;
 647}
 648
 649static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
 650{
 651        return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
 652}
 653
 654static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
 655{
 656        return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
 657}
 658
 659static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
 660{
 661        return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
 662}
 663
 664static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
 665{
 666        return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
 667}
 668
 669static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 670                        const struct sys_reg_desc *r)
 671{
 672        u64 val;
 673
 674        if (!kvm_arm_pmu_v3_ready(vcpu))
 675                return trap_raz_wi(vcpu, p, r);
 676
 677        if (pmu_access_el0_disabled(vcpu))
 678                return false;
 679
 680        if (p->is_write) {
 681                /* Only update writeable bits of PMCR */
 682                val = __vcpu_sys_reg(vcpu, PMCR_EL0);
 683                val &= ~ARMV8_PMU_PMCR_MASK;
 684                val |= p->regval & ARMV8_PMU_PMCR_MASK;
 685                __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
 686                kvm_pmu_handle_pmcr(vcpu, val);
 687                kvm_vcpu_pmu_restore_guest(vcpu);
 688        } else {
 689                /* PMCR.P & PMCR.C are RAZ */
 690                val = __vcpu_sys_reg(vcpu, PMCR_EL0)
 691                      & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
 692                p->regval = val;
 693        }
 694
 695        return true;
 696}
 697
 698static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 699                          const struct sys_reg_desc *r)
 700{
 701        if (!kvm_arm_pmu_v3_ready(vcpu))
 702                return trap_raz_wi(vcpu, p, r);
 703
 704        if (pmu_access_event_counter_el0_disabled(vcpu))
 705                return false;
 706
 707        if (p->is_write)
 708                __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
 709        else
 710                /* return PMSELR.SEL field */
 711                p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
 712                            & ARMV8_PMU_COUNTER_MASK;
 713
 714        return true;
 715}
 716
 717static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 718                          const struct sys_reg_desc *r)
 719{
 720        u64 pmceid;
 721
 722        if (!kvm_arm_pmu_v3_ready(vcpu))
 723                return trap_raz_wi(vcpu, p, r);
 724
 725        BUG_ON(p->is_write);
 726
 727        if (pmu_access_el0_disabled(vcpu))
 728                return false;
 729
 730        if (!(p->Op2 & 1))
 731                pmceid = read_sysreg(pmceid0_el0);
 732        else
 733                pmceid = read_sysreg(pmceid1_el0);
 734
 735        p->regval = pmceid;
 736
 737        return true;
 738}
 739
 740static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
 741{
 742        u64 pmcr, val;
 743
 744        pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
 745        val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
 746        if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
 747                kvm_inject_undefined(vcpu);
 748                return false;
 749        }
 750
 751        return true;
 752}
 753
 754static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
 755                              struct sys_reg_params *p,
 756                              const struct sys_reg_desc *r)
 757{
 758        u64 idx;
 759
 760        if (!kvm_arm_pmu_v3_ready(vcpu))
 761                return trap_raz_wi(vcpu, p, r);
 762
 763        if (r->CRn == 9 && r->CRm == 13) {
 764                if (r->Op2 == 2) {
 765                        /* PMXEVCNTR_EL0 */
 766                        if (pmu_access_event_counter_el0_disabled(vcpu))
 767                                return false;
 768
 769                        idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
 770                              & ARMV8_PMU_COUNTER_MASK;
 771                } else if (r->Op2 == 0) {
 772                        /* PMCCNTR_EL0 */
 773                        if (pmu_access_cycle_counter_el0_disabled(vcpu))
 774                                return false;
 775
 776                        idx = ARMV8_PMU_CYCLE_IDX;
 777                } else {
 778                        return false;
 779                }
 780        } else if (r->CRn == 0 && r->CRm == 9) {
 781                /* PMCCNTR */
 782                if (pmu_access_event_counter_el0_disabled(vcpu))
 783                        return false;
 784
 785                idx = ARMV8_PMU_CYCLE_IDX;
 786        } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
 787                /* PMEVCNTRn_EL0 */
 788                if (pmu_access_event_counter_el0_disabled(vcpu))
 789                        return false;
 790
 791                idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
 792        } else {
 793                return false;
 794        }
 795
 796        if (!pmu_counter_idx_valid(vcpu, idx))
 797                return false;
 798
 799        if (p->is_write) {
 800                if (pmu_access_el0_disabled(vcpu))
 801                        return false;
 802
 803                kvm_pmu_set_counter_value(vcpu, idx, p->regval);
 804        } else {
 805                p->regval = kvm_pmu_get_counter_value(vcpu, idx);
 806        }
 807
 808        return true;
 809}
 810
 811static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 812                               const struct sys_reg_desc *r)
 813{
 814        u64 idx, reg;
 815
 816        if (!kvm_arm_pmu_v3_ready(vcpu))
 817                return trap_raz_wi(vcpu, p, r);
 818
 819        if (pmu_access_el0_disabled(vcpu))
 820                return false;
 821
 822        if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
 823                /* PMXEVTYPER_EL0 */
 824                idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
 825                reg = PMEVTYPER0_EL0 + idx;
 826        } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
 827                idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
 828                if (idx == ARMV8_PMU_CYCLE_IDX)
 829                        reg = PMCCFILTR_EL0;
 830                else
 831                        /* PMEVTYPERn_EL0 */
 832                        reg = PMEVTYPER0_EL0 + idx;
 833        } else {
 834                BUG();
 835        }
 836
 837        if (!pmu_counter_idx_valid(vcpu, idx))
 838                return false;
 839
 840        if (p->is_write) {
 841                kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
 842                __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
 843                kvm_vcpu_pmu_restore_guest(vcpu);
 844        } else {
 845                p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
 846        }
 847
 848        return true;
 849}
 850
 851static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 852                           const struct sys_reg_desc *r)
 853{
 854        u64 val, mask;
 855
 856        if (!kvm_arm_pmu_v3_ready(vcpu))
 857                return trap_raz_wi(vcpu, p, r);
 858
 859        if (pmu_access_el0_disabled(vcpu))
 860                return false;
 861
 862        mask = kvm_pmu_valid_counter_mask(vcpu);
 863        if (p->is_write) {
 864                val = p->regval & mask;
 865                if (r->Op2 & 0x1) {
 866                        /* accessing PMCNTENSET_EL0 */
 867                        __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
 868                        kvm_pmu_enable_counter_mask(vcpu, val);
 869                        kvm_vcpu_pmu_restore_guest(vcpu);
 870                } else {
 871                        /* accessing PMCNTENCLR_EL0 */
 872                        __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
 873                        kvm_pmu_disable_counter_mask(vcpu, val);
 874                }
 875        } else {
 876                p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
 877        }
 878
 879        return true;
 880}
 881
 882static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 883                           const struct sys_reg_desc *r)
 884{
 885        u64 mask = kvm_pmu_valid_counter_mask(vcpu);
 886
 887        if (!kvm_arm_pmu_v3_ready(vcpu))
 888                return trap_raz_wi(vcpu, p, r);
 889
 890        if (!vcpu_mode_priv(vcpu)) {
 891                kvm_inject_undefined(vcpu);
 892                return false;
 893        }
 894
 895        if (p->is_write) {
 896                u64 val = p->regval & mask;
 897
 898                if (r->Op2 & 0x1)
 899                        /* accessing PMINTENSET_EL1 */
 900                        __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
 901                else
 902                        /* accessing PMINTENCLR_EL1 */
 903                        __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
 904        } else {
 905                p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
 906        }
 907
 908        return true;
 909}
 910
 911static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 912                         const struct sys_reg_desc *r)
 913{
 914        u64 mask = kvm_pmu_valid_counter_mask(vcpu);
 915
 916        if (!kvm_arm_pmu_v3_ready(vcpu))
 917                return trap_raz_wi(vcpu, p, r);
 918
 919        if (pmu_access_el0_disabled(vcpu))
 920                return false;
 921
 922        if (p->is_write) {
 923                if (r->CRm & 0x2)
 924                        /* accessing PMOVSSET_EL0 */
 925                        __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
 926                else
 927                        /* accessing PMOVSCLR_EL0 */
 928                        __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
 929        } else {
 930                p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
 931        }
 932
 933        return true;
 934}
 935
 936static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 937                           const struct sys_reg_desc *r)
 938{
 939        u64 mask;
 940
 941        if (!kvm_arm_pmu_v3_ready(vcpu))
 942                return trap_raz_wi(vcpu, p, r);
 943
 944        if (!p->is_write)
 945                return read_from_write_only(vcpu, p, r);
 946
 947        if (pmu_write_swinc_el0_disabled(vcpu))
 948                return false;
 949
 950        mask = kvm_pmu_valid_counter_mask(vcpu);
 951        kvm_pmu_software_increment(vcpu, p->regval & mask);
 952        return true;
 953}
 954
 955static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 956                             const struct sys_reg_desc *r)
 957{
 958        if (!kvm_arm_pmu_v3_ready(vcpu))
 959                return trap_raz_wi(vcpu, p, r);
 960
 961        if (p->is_write) {
 962                if (!vcpu_mode_priv(vcpu)) {
 963                        kvm_inject_undefined(vcpu);
 964                        return false;
 965                }
 966
 967                __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
 968                               p->regval & ARMV8_PMU_USERENR_MASK;
 969        } else {
 970                p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
 971                            & ARMV8_PMU_USERENR_MASK;
 972        }
 973
 974        return true;
 975}
 976
 977#define reg_to_encoding(x)                                              \
 978        sys_reg((u32)(x)->Op0, (u32)(x)->Op1,                           \
 979                (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
 980
 981/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
 982#define DBG_BCR_BVR_WCR_WVR_EL1(n)                                      \
 983        { SYS_DESC(SYS_DBGBVRn_EL1(n)),                                 \
 984          trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr },                \
 985        { SYS_DESC(SYS_DBGBCRn_EL1(n)),                                 \
 986          trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr },                \
 987        { SYS_DESC(SYS_DBGWVRn_EL1(n)),                                 \
 988          trap_wvr, reset_wvr, 0, 0,  get_wvr, set_wvr },               \
 989        { SYS_DESC(SYS_DBGWCRn_EL1(n)),                                 \
 990          trap_wcr, reset_wcr, 0, 0,  get_wcr, set_wcr }
 991
 992/* Macro to expand the PMEVCNTRn_EL0 register */
 993#define PMU_PMEVCNTR_EL0(n)                                             \
 994        { SYS_DESC(SYS_PMEVCNTRn_EL0(n)),                                       \
 995          access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
 996
 997/* Macro to expand the PMEVTYPERn_EL0 register */
 998#define PMU_PMEVTYPER_EL0(n)                                            \
 999        { SYS_DESC(SYS_PMEVTYPERn_EL0(n)),                                      \
1000          access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
1001
1002static bool trap_ptrauth(struct kvm_vcpu *vcpu,
1003                         struct sys_reg_params *p,
1004                         const struct sys_reg_desc *rd)
1005{
1006        kvm_arm_vcpu_ptrauth_trap(vcpu);
1007
1008        /*
1009         * Return false for both cases as we never skip the trapped
1010         * instruction:
1011         *
1012         * - Either we re-execute the same key register access instruction
1013         *   after enabling ptrauth.
1014         * - Or an UNDEF is injected as ptrauth is not supported/enabled.
1015         */
1016        return false;
1017}
1018
1019static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1020                        const struct sys_reg_desc *rd)
1021{
1022        return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN_USER | REG_HIDDEN_GUEST;
1023}
1024
1025#define __PTRAUTH_KEY(k)                                                \
1026        { SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k,           \
1027        .visibility = ptrauth_visibility}
1028
1029#define PTRAUTH_KEY(k)                                                  \
1030        __PTRAUTH_KEY(k ## KEYLO_EL1),                                  \
1031        __PTRAUTH_KEY(k ## KEYHI_EL1)
1032
1033static bool access_arch_timer(struct kvm_vcpu *vcpu,
1034                              struct sys_reg_params *p,
1035                              const struct sys_reg_desc *r)
1036{
1037        enum kvm_arch_timers tmr;
1038        enum kvm_arch_timer_regs treg;
1039        u64 reg = reg_to_encoding(r);
1040
1041        switch (reg) {
1042        case SYS_CNTP_TVAL_EL0:
1043        case SYS_AARCH32_CNTP_TVAL:
1044                tmr = TIMER_PTIMER;
1045                treg = TIMER_REG_TVAL;
1046                break;
1047        case SYS_CNTP_CTL_EL0:
1048        case SYS_AARCH32_CNTP_CTL:
1049                tmr = TIMER_PTIMER;
1050                treg = TIMER_REG_CTL;
1051                break;
1052        case SYS_CNTP_CVAL_EL0:
1053        case SYS_AARCH32_CNTP_CVAL:
1054                tmr = TIMER_PTIMER;
1055                treg = TIMER_REG_CVAL;
1056                break;
1057        default:
1058                BUG();
1059        }
1060
1061        if (p->is_write)
1062                kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1063        else
1064                p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1065
1066        return true;
1067}
1068
1069/* Read a sanitised cpufeature ID register by sys_reg_desc */
1070static u64 read_id_reg(const struct kvm_vcpu *vcpu,
1071                struct sys_reg_desc const *r, bool raz)
1072{
1073        u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
1074                         (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
1075        u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
1076
1077        if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) {
1078                val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
1079        } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
1080                val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
1081                         (0xfUL << ID_AA64ISAR1_API_SHIFT) |
1082                         (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
1083                         (0xfUL << ID_AA64ISAR1_GPI_SHIFT));
1084        }
1085
1086        return val;
1087}
1088
1089/* cpufeature ID register access trap handlers */
1090
1091static bool __access_id_reg(struct kvm_vcpu *vcpu,
1092                            struct sys_reg_params *p,
1093                            const struct sys_reg_desc *r,
1094                            bool raz)
1095{
1096        if (p->is_write)
1097                return write_to_read_only(vcpu, p, r);
1098
1099        p->regval = read_id_reg(vcpu, r, raz);
1100        return true;
1101}
1102
1103static bool access_id_reg(struct kvm_vcpu *vcpu,
1104                          struct sys_reg_params *p,
1105                          const struct sys_reg_desc *r)
1106{
1107        return __access_id_reg(vcpu, p, r, false);
1108}
1109
1110static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
1111                              struct sys_reg_params *p,
1112                              const struct sys_reg_desc *r)
1113{
1114        return __access_id_reg(vcpu, p, r, true);
1115}
1116
1117static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
1118static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
1119static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
1120
1121/* Visibility overrides for SVE-specific control registers */
1122static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1123                                   const struct sys_reg_desc *rd)
1124{
1125        if (vcpu_has_sve(vcpu))
1126                return 0;
1127
1128        return REG_HIDDEN_USER | REG_HIDDEN_GUEST;
1129}
1130
1131/* Visibility overrides for SVE-specific ID registers */
1132static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu,
1133                                      const struct sys_reg_desc *rd)
1134{
1135        if (vcpu_has_sve(vcpu))
1136                return 0;
1137
1138        return REG_HIDDEN_USER;
1139}
1140
1141/* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */
1142static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu)
1143{
1144        if (!vcpu_has_sve(vcpu))
1145                return 0;
1146
1147        return read_sanitised_ftr_reg(SYS_ID_AA64ZFR0_EL1);
1148}
1149
1150static bool access_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1151                                   struct sys_reg_params *p,
1152                                   const struct sys_reg_desc *rd)
1153{
1154        if (p->is_write)
1155                return write_to_read_only(vcpu, p, rd);
1156
1157        p->regval = guest_id_aa64zfr0_el1(vcpu);
1158        return true;
1159}
1160
1161static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1162                const struct sys_reg_desc *rd,
1163                const struct kvm_one_reg *reg, void __user *uaddr)
1164{
1165        u64 val;
1166
1167        if (WARN_ON(!vcpu_has_sve(vcpu)))
1168                return -ENOENT;
1169
1170        val = guest_id_aa64zfr0_el1(vcpu);
1171        return reg_to_user(uaddr, &val, reg->id);
1172}
1173
1174static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1175                const struct sys_reg_desc *rd,
1176                const struct kvm_one_reg *reg, void __user *uaddr)
1177{
1178        const u64 id = sys_reg_to_index(rd);
1179        int err;
1180        u64 val;
1181
1182        if (WARN_ON(!vcpu_has_sve(vcpu)))
1183                return -ENOENT;
1184
1185        err = reg_from_user(&val, uaddr, id);
1186        if (err)
1187                return err;
1188
1189        /* This is what we mean by invariant: you can't change it. */
1190        if (val != guest_id_aa64zfr0_el1(vcpu))
1191                return -EINVAL;
1192
1193        return 0;
1194}
1195
1196/*
1197 * cpufeature ID register user accessors
1198 *
1199 * For now, these registers are immutable for userspace, so no values
1200 * are stored, and for set_id_reg() we don't allow the effective value
1201 * to be changed.
1202 */
1203static int __get_id_reg(const struct kvm_vcpu *vcpu,
1204                        const struct sys_reg_desc *rd, void __user *uaddr,
1205                        bool raz)
1206{
1207        const u64 id = sys_reg_to_index(rd);
1208        const u64 val = read_id_reg(vcpu, rd, raz);
1209
1210        return reg_to_user(uaddr, &val, id);
1211}
1212
1213static int __set_id_reg(const struct kvm_vcpu *vcpu,
1214                        const struct sys_reg_desc *rd, void __user *uaddr,
1215                        bool raz)
1216{
1217        const u64 id = sys_reg_to_index(rd);
1218        int err;
1219        u64 val;
1220
1221        err = reg_from_user(&val, uaddr, id);
1222        if (err)
1223                return err;
1224
1225        /* This is what we mean by invariant: you can't change it. */
1226        if (val != read_id_reg(vcpu, rd, raz))
1227                return -EINVAL;
1228
1229        return 0;
1230}
1231
1232static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1233                      const struct kvm_one_reg *reg, void __user *uaddr)
1234{
1235        return __get_id_reg(vcpu, rd, uaddr, false);
1236}
1237
1238static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1239                      const struct kvm_one_reg *reg, void __user *uaddr)
1240{
1241        return __set_id_reg(vcpu, rd, uaddr, false);
1242}
1243
1244static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1245                          const struct kvm_one_reg *reg, void __user *uaddr)
1246{
1247        return __get_id_reg(vcpu, rd, uaddr, true);
1248}
1249
1250static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1251                          const struct kvm_one_reg *reg, void __user *uaddr)
1252{
1253        return __set_id_reg(vcpu, rd, uaddr, true);
1254}
1255
1256static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1257                       const struct sys_reg_desc *r)
1258{
1259        if (p->is_write)
1260                return write_to_read_only(vcpu, p, r);
1261
1262        p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1263        return true;
1264}
1265
1266static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1267                         const struct sys_reg_desc *r)
1268{
1269        if (p->is_write)
1270                return write_to_read_only(vcpu, p, r);
1271
1272        p->regval = read_sysreg(clidr_el1);
1273        return true;
1274}
1275
1276static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1277                          const struct sys_reg_desc *r)
1278{
1279        if (p->is_write)
1280                vcpu_write_sys_reg(vcpu, p->regval, r->reg);
1281        else
1282                p->regval = vcpu_read_sys_reg(vcpu, r->reg);
1283        return true;
1284}
1285
1286static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1287                          const struct sys_reg_desc *r)
1288{
1289        u32 csselr;
1290
1291        if (p->is_write)
1292                return write_to_read_only(vcpu, p, r);
1293
1294        csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1295        p->regval = get_ccsidr(csselr);
1296
1297        /*
1298         * Guests should not be doing cache operations by set/way at all, and
1299         * for this reason, we trap them and attempt to infer the intent, so
1300         * that we can flush the entire guest's address space at the appropriate
1301         * time.
1302         * To prevent this trapping from causing performance problems, let's
1303         * expose the geometry of all data and unified caches (which are
1304         * guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way.
1305         * [If guests should attempt to infer aliasing properties from the
1306         * geometry (which is not permitted by the architecture), they would
1307         * only do so for virtually indexed caches.]
1308         */
1309        if (!(csselr & 1)) // data or unified cache
1310                p->regval &= ~GENMASK(27, 3);
1311        return true;
1312}
1313
1314/* sys_reg_desc initialiser for known cpufeature ID registers */
1315#define ID_SANITISED(name) {                    \
1316        SYS_DESC(SYS_##name),                   \
1317        .access = access_id_reg,                \
1318        .get_user = get_id_reg,                 \
1319        .set_user = set_id_reg,                 \
1320}
1321
1322/*
1323 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1324 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1325 * (1 <= crm < 8, 0 <= Op2 < 8).
1326 */
1327#define ID_UNALLOCATED(crm, op2) {                      \
1328        Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2),     \
1329        .access = access_raz_id_reg,                    \
1330        .get_user = get_raz_id_reg,                     \
1331        .set_user = set_raz_id_reg,                     \
1332}
1333
1334/*
1335 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1336 * For now, these are exposed just like unallocated ID regs: they appear
1337 * RAZ for the guest.
1338 */
1339#define ID_HIDDEN(name) {                       \
1340        SYS_DESC(SYS_##name),                   \
1341        .access = access_raz_id_reg,            \
1342        .get_user = get_raz_id_reg,             \
1343        .set_user = set_raz_id_reg,             \
1344}
1345
1346/*
1347 * Architected system registers.
1348 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1349 *
1350 * Debug handling: We do trap most, if not all debug related system
1351 * registers. The implementation is good enough to ensure that a guest
1352 * can use these with minimal performance degradation. The drawback is
1353 * that we don't implement any of the external debug, none of the
1354 * OSlock protocol. This should be revisited if we ever encounter a
1355 * more demanding guest...
1356 */
1357static const struct sys_reg_desc sys_reg_descs[] = {
1358        { SYS_DESC(SYS_DC_ISW), access_dcsw },
1359        { SYS_DESC(SYS_DC_CSW), access_dcsw },
1360        { SYS_DESC(SYS_DC_CISW), access_dcsw },
1361
1362        DBG_BCR_BVR_WCR_WVR_EL1(0),
1363        DBG_BCR_BVR_WCR_WVR_EL1(1),
1364        { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1365        { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1366        DBG_BCR_BVR_WCR_WVR_EL1(2),
1367        DBG_BCR_BVR_WCR_WVR_EL1(3),
1368        DBG_BCR_BVR_WCR_WVR_EL1(4),
1369        DBG_BCR_BVR_WCR_WVR_EL1(5),
1370        DBG_BCR_BVR_WCR_WVR_EL1(6),
1371        DBG_BCR_BVR_WCR_WVR_EL1(7),
1372        DBG_BCR_BVR_WCR_WVR_EL1(8),
1373        DBG_BCR_BVR_WCR_WVR_EL1(9),
1374        DBG_BCR_BVR_WCR_WVR_EL1(10),
1375        DBG_BCR_BVR_WCR_WVR_EL1(11),
1376        DBG_BCR_BVR_WCR_WVR_EL1(12),
1377        DBG_BCR_BVR_WCR_WVR_EL1(13),
1378        DBG_BCR_BVR_WCR_WVR_EL1(14),
1379        DBG_BCR_BVR_WCR_WVR_EL1(15),
1380
1381        { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1382        { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
1383        { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
1384        { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1385        { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1386        { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1387        { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1388        { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1389
1390        { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1391        { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1392        // DBGDTR[TR]X_EL0 share the same encoding
1393        { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1394
1395        { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1396
1397        { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1398
1399        /*
1400         * ID regs: all ID_SANITISED() entries here must have corresponding
1401         * entries in arm64_ftr_regs[].
1402         */
1403
1404        /* AArch64 mappings of the AArch32 ID registers */
1405        /* CRm=1 */
1406        ID_SANITISED(ID_PFR0_EL1),
1407        ID_SANITISED(ID_PFR1_EL1),
1408        ID_SANITISED(ID_DFR0_EL1),
1409        ID_HIDDEN(ID_AFR0_EL1),
1410        ID_SANITISED(ID_MMFR0_EL1),
1411        ID_SANITISED(ID_MMFR1_EL1),
1412        ID_SANITISED(ID_MMFR2_EL1),
1413        ID_SANITISED(ID_MMFR3_EL1),
1414
1415        /* CRm=2 */
1416        ID_SANITISED(ID_ISAR0_EL1),
1417        ID_SANITISED(ID_ISAR1_EL1),
1418        ID_SANITISED(ID_ISAR2_EL1),
1419        ID_SANITISED(ID_ISAR3_EL1),
1420        ID_SANITISED(ID_ISAR4_EL1),
1421        ID_SANITISED(ID_ISAR5_EL1),
1422        ID_SANITISED(ID_MMFR4_EL1),
1423        ID_UNALLOCATED(2,7),
1424
1425        /* CRm=3 */
1426        ID_SANITISED(MVFR0_EL1),
1427        ID_SANITISED(MVFR1_EL1),
1428        ID_SANITISED(MVFR2_EL1),
1429        ID_UNALLOCATED(3,3),
1430        ID_UNALLOCATED(3,4),
1431        ID_UNALLOCATED(3,5),
1432        ID_UNALLOCATED(3,6),
1433        ID_UNALLOCATED(3,7),
1434
1435        /* AArch64 ID registers */
1436        /* CRm=4 */
1437        ID_SANITISED(ID_AA64PFR0_EL1),
1438        ID_SANITISED(ID_AA64PFR1_EL1),
1439        ID_UNALLOCATED(4,2),
1440        ID_UNALLOCATED(4,3),
1441        { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility },
1442        ID_UNALLOCATED(4,5),
1443        ID_UNALLOCATED(4,6),
1444        ID_UNALLOCATED(4,7),
1445
1446        /* CRm=5 */
1447        ID_SANITISED(ID_AA64DFR0_EL1),
1448        ID_SANITISED(ID_AA64DFR1_EL1),
1449        ID_UNALLOCATED(5,2),
1450        ID_UNALLOCATED(5,3),
1451        ID_HIDDEN(ID_AA64AFR0_EL1),
1452        ID_HIDDEN(ID_AA64AFR1_EL1),
1453        ID_UNALLOCATED(5,6),
1454        ID_UNALLOCATED(5,7),
1455
1456        /* CRm=6 */
1457        ID_SANITISED(ID_AA64ISAR0_EL1),
1458        ID_SANITISED(ID_AA64ISAR1_EL1),
1459        ID_UNALLOCATED(6,2),
1460        ID_UNALLOCATED(6,3),
1461        ID_UNALLOCATED(6,4),
1462        ID_UNALLOCATED(6,5),
1463        ID_UNALLOCATED(6,6),
1464        ID_UNALLOCATED(6,7),
1465
1466        /* CRm=7 */
1467        ID_SANITISED(ID_AA64MMFR0_EL1),
1468        ID_SANITISED(ID_AA64MMFR1_EL1),
1469        ID_SANITISED(ID_AA64MMFR2_EL1),
1470        ID_UNALLOCATED(7,3),
1471        ID_UNALLOCATED(7,4),
1472        ID_UNALLOCATED(7,5),
1473        ID_UNALLOCATED(7,6),
1474        ID_UNALLOCATED(7,7),
1475
1476        { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1477        { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1478        { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
1479        { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1480        { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1481        { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1482
1483        PTRAUTH_KEY(APIA),
1484        PTRAUTH_KEY(APIB),
1485        PTRAUTH_KEY(APDA),
1486        PTRAUTH_KEY(APDB),
1487        PTRAUTH_KEY(APGA),
1488
1489        { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1490        { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1491        { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1492
1493        { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1494        { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1495        { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1496        { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1497        { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1498        { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1499        { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1500        { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1501
1502        { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1503        { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1504
1505        { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
1506        { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
1507
1508        { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1509        { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1510
1511        { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1512        { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1513        { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1514        { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1515        { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1516
1517        { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1518        { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1519
1520        { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1521        { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1522        { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1523        { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1524        { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1525        { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1526        { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1527        { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1528        { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1529        { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1530        { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1531        { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1532
1533        { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1534        { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1535
1536        { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1537
1538        { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
1539        { SYS_DESC(SYS_CLIDR_EL1), access_clidr },
1540        { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
1541        { SYS_DESC(SYS_CTR_EL0), access_ctr },
1542
1543        { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
1544        { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
1545        { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
1546        { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
1547        { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
1548        { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
1549        { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
1550        { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
1551        { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
1552        { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
1553        { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
1554        /*
1555         * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
1556         * in 32bit mode. Here we choose to reset it as zero for consistency.
1557         */
1558        { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1559        { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
1560
1561        { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1562        { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1563
1564        { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
1565        { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
1566        { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
1567
1568        /* PMEVCNTRn_EL0 */
1569        PMU_PMEVCNTR_EL0(0),
1570        PMU_PMEVCNTR_EL0(1),
1571        PMU_PMEVCNTR_EL0(2),
1572        PMU_PMEVCNTR_EL0(3),
1573        PMU_PMEVCNTR_EL0(4),
1574        PMU_PMEVCNTR_EL0(5),
1575        PMU_PMEVCNTR_EL0(6),
1576        PMU_PMEVCNTR_EL0(7),
1577        PMU_PMEVCNTR_EL0(8),
1578        PMU_PMEVCNTR_EL0(9),
1579        PMU_PMEVCNTR_EL0(10),
1580        PMU_PMEVCNTR_EL0(11),
1581        PMU_PMEVCNTR_EL0(12),
1582        PMU_PMEVCNTR_EL0(13),
1583        PMU_PMEVCNTR_EL0(14),
1584        PMU_PMEVCNTR_EL0(15),
1585        PMU_PMEVCNTR_EL0(16),
1586        PMU_PMEVCNTR_EL0(17),
1587        PMU_PMEVCNTR_EL0(18),
1588        PMU_PMEVCNTR_EL0(19),
1589        PMU_PMEVCNTR_EL0(20),
1590        PMU_PMEVCNTR_EL0(21),
1591        PMU_PMEVCNTR_EL0(22),
1592        PMU_PMEVCNTR_EL0(23),
1593        PMU_PMEVCNTR_EL0(24),
1594        PMU_PMEVCNTR_EL0(25),
1595        PMU_PMEVCNTR_EL0(26),
1596        PMU_PMEVCNTR_EL0(27),
1597        PMU_PMEVCNTR_EL0(28),
1598        PMU_PMEVCNTR_EL0(29),
1599        PMU_PMEVCNTR_EL0(30),
1600        /* PMEVTYPERn_EL0 */
1601        PMU_PMEVTYPER_EL0(0),
1602        PMU_PMEVTYPER_EL0(1),
1603        PMU_PMEVTYPER_EL0(2),
1604        PMU_PMEVTYPER_EL0(3),
1605        PMU_PMEVTYPER_EL0(4),
1606        PMU_PMEVTYPER_EL0(5),
1607        PMU_PMEVTYPER_EL0(6),
1608        PMU_PMEVTYPER_EL0(7),
1609        PMU_PMEVTYPER_EL0(8),
1610        PMU_PMEVTYPER_EL0(9),
1611        PMU_PMEVTYPER_EL0(10),
1612        PMU_PMEVTYPER_EL0(11),
1613        PMU_PMEVTYPER_EL0(12),
1614        PMU_PMEVTYPER_EL0(13),
1615        PMU_PMEVTYPER_EL0(14),
1616        PMU_PMEVTYPER_EL0(15),
1617        PMU_PMEVTYPER_EL0(16),
1618        PMU_PMEVTYPER_EL0(17),
1619        PMU_PMEVTYPER_EL0(18),
1620        PMU_PMEVTYPER_EL0(19),
1621        PMU_PMEVTYPER_EL0(20),
1622        PMU_PMEVTYPER_EL0(21),
1623        PMU_PMEVTYPER_EL0(22),
1624        PMU_PMEVTYPER_EL0(23),
1625        PMU_PMEVTYPER_EL0(24),
1626        PMU_PMEVTYPER_EL0(25),
1627        PMU_PMEVTYPER_EL0(26),
1628        PMU_PMEVTYPER_EL0(27),
1629        PMU_PMEVTYPER_EL0(28),
1630        PMU_PMEVTYPER_EL0(29),
1631        PMU_PMEVTYPER_EL0(30),
1632        /*
1633         * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1634         * in 32bit mode. Here we choose to reset it as zero for consistency.
1635         */
1636        { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1637
1638        { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1639        { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1640        { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
1641};
1642
1643static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1644                        struct sys_reg_params *p,
1645                        const struct sys_reg_desc *r)
1646{
1647        if (p->is_write) {
1648                return ignore_write(vcpu, p);
1649        } else {
1650                u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1651                u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1652                u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1653
1654                p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1655                             (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1656                             (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1657                             | (6 << 16) | (el3 << 14) | (el3 << 12));
1658                return true;
1659        }
1660}
1661
1662static bool trap_debug32(struct kvm_vcpu *vcpu,
1663                         struct sys_reg_params *p,
1664                         const struct sys_reg_desc *r)
1665{
1666        if (p->is_write) {
1667                vcpu_cp14(vcpu, r->reg) = p->regval;
1668                vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1669        } else {
1670                p->regval = vcpu_cp14(vcpu, r->reg);
1671        }
1672
1673        return true;
1674}
1675
1676/* AArch32 debug register mappings
1677 *
1678 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1679 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1680 *
1681 * All control registers and watchpoint value registers are mapped to
1682 * the lower 32 bits of their AArch64 equivalents. We share the trap
1683 * handlers with the above AArch64 code which checks what mode the
1684 * system is in.
1685 */
1686
1687static bool trap_xvr(struct kvm_vcpu *vcpu,
1688                     struct sys_reg_params *p,
1689                     const struct sys_reg_desc *rd)
1690{
1691        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1692
1693        if (p->is_write) {
1694                u64 val = *dbg_reg;
1695
1696                val &= 0xffffffffUL;
1697                val |= p->regval << 32;
1698                *dbg_reg = val;
1699
1700                vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1701        } else {
1702                p->regval = *dbg_reg >> 32;
1703        }
1704
1705        trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1706
1707        return true;
1708}
1709
1710#define DBG_BCR_BVR_WCR_WVR(n)                                          \
1711        /* DBGBVRn */                                                   \
1712        { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n },     \
1713        /* DBGBCRn */                                                   \
1714        { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n },     \
1715        /* DBGWVRn */                                                   \
1716        { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n },     \
1717        /* DBGWCRn */                                                   \
1718        { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1719
1720#define DBGBXVR(n)                                                      \
1721        { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1722
1723/*
1724 * Trapped cp14 registers. We generally ignore most of the external
1725 * debug, on the principle that they don't really make sense to a
1726 * guest. Revisit this one day, would this principle change.
1727 */
1728static const struct sys_reg_desc cp14_regs[] = {
1729        /* DBGIDR */
1730        { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1731        /* DBGDTRRXext */
1732        { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1733
1734        DBG_BCR_BVR_WCR_WVR(0),
1735        /* DBGDSCRint */
1736        { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1737        DBG_BCR_BVR_WCR_WVR(1),
1738        /* DBGDCCINT */
1739        { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
1740        /* DBGDSCRext */
1741        { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
1742        DBG_BCR_BVR_WCR_WVR(2),
1743        /* DBGDTR[RT]Xint */
1744        { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1745        /* DBGDTR[RT]Xext */
1746        { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1747        DBG_BCR_BVR_WCR_WVR(3),
1748        DBG_BCR_BVR_WCR_WVR(4),
1749        DBG_BCR_BVR_WCR_WVR(5),
1750        /* DBGWFAR */
1751        { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1752        /* DBGOSECCR */
1753        { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1754        DBG_BCR_BVR_WCR_WVR(6),
1755        /* DBGVCR */
1756        { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
1757        DBG_BCR_BVR_WCR_WVR(7),
1758        DBG_BCR_BVR_WCR_WVR(8),
1759        DBG_BCR_BVR_WCR_WVR(9),
1760        DBG_BCR_BVR_WCR_WVR(10),
1761        DBG_BCR_BVR_WCR_WVR(11),
1762        DBG_BCR_BVR_WCR_WVR(12),
1763        DBG_BCR_BVR_WCR_WVR(13),
1764        DBG_BCR_BVR_WCR_WVR(14),
1765        DBG_BCR_BVR_WCR_WVR(15),
1766
1767        /* DBGDRAR (32bit) */
1768        { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1769
1770        DBGBXVR(0),
1771        /* DBGOSLAR */
1772        { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1773        DBGBXVR(1),
1774        /* DBGOSLSR */
1775        { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1776        DBGBXVR(2),
1777        DBGBXVR(3),
1778        /* DBGOSDLR */
1779        { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1780        DBGBXVR(4),
1781        /* DBGPRCR */
1782        { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1783        DBGBXVR(5),
1784        DBGBXVR(6),
1785        DBGBXVR(7),
1786        DBGBXVR(8),
1787        DBGBXVR(9),
1788        DBGBXVR(10),
1789        DBGBXVR(11),
1790        DBGBXVR(12),
1791        DBGBXVR(13),
1792        DBGBXVR(14),
1793        DBGBXVR(15),
1794
1795        /* DBGDSAR (32bit) */
1796        { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1797
1798        /* DBGDEVID2 */
1799        { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1800        /* DBGDEVID1 */
1801        { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1802        /* DBGDEVID */
1803        { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1804        /* DBGCLAIMSET */
1805        { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1806        /* DBGCLAIMCLR */
1807        { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1808        /* DBGAUTHSTATUS */
1809        { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1810};
1811
1812/* Trapped cp14 64bit registers */
1813static const struct sys_reg_desc cp14_64_regs[] = {
1814        /* DBGDRAR (64bit) */
1815        { Op1( 0), CRm( 1), .access = trap_raz_wi },
1816
1817        /* DBGDSAR (64bit) */
1818        { Op1( 0), CRm( 2), .access = trap_raz_wi },
1819};
1820
1821/* Macro to expand the PMEVCNTRn register */
1822#define PMU_PMEVCNTR(n)                                                 \
1823        /* PMEVCNTRn */                                                 \
1824        { Op1(0), CRn(0b1110),                                          \
1825          CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),         \
1826          access_pmu_evcntr }
1827
1828/* Macro to expand the PMEVTYPERn register */
1829#define PMU_PMEVTYPER(n)                                                \
1830        /* PMEVTYPERn */                                                \
1831        { Op1(0), CRn(0b1110),                                          \
1832          CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),         \
1833          access_pmu_evtyper }
1834
1835/*
1836 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1837 * depending on the way they are accessed (as a 32bit or a 64bit
1838 * register).
1839 */
1840static const struct sys_reg_desc cp15_regs[] = {
1841        { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
1842        { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1843        { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1844        { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1845        { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1846        { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1847        { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1848        { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1849        { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1850        { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1851        { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1852        { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1853
1854        /*
1855         * DC{C,I,CI}SW operations:
1856         */
1857        { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1858        { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1859        { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
1860
1861        /* PMU */
1862        { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
1863        { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
1864        { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
1865        { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
1866        { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
1867        { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
1868        { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
1869        { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
1870        { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
1871        { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
1872        { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
1873        { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
1874        { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
1875        { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
1876        { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
1877
1878        { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
1879        { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
1880        { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
1881        { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1882
1883        /* ICC_SRE */
1884        { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
1885
1886        { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1887
1888        /* Arch Tmers */
1889        { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
1890        { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
1891
1892        /* PMEVCNTRn */
1893        PMU_PMEVCNTR(0),
1894        PMU_PMEVCNTR(1),
1895        PMU_PMEVCNTR(2),
1896        PMU_PMEVCNTR(3),
1897        PMU_PMEVCNTR(4),
1898        PMU_PMEVCNTR(5),
1899        PMU_PMEVCNTR(6),
1900        PMU_PMEVCNTR(7),
1901        PMU_PMEVCNTR(8),
1902        PMU_PMEVCNTR(9),
1903        PMU_PMEVCNTR(10),
1904        PMU_PMEVCNTR(11),
1905        PMU_PMEVCNTR(12),
1906        PMU_PMEVCNTR(13),
1907        PMU_PMEVCNTR(14),
1908        PMU_PMEVCNTR(15),
1909        PMU_PMEVCNTR(16),
1910        PMU_PMEVCNTR(17),
1911        PMU_PMEVCNTR(18),
1912        PMU_PMEVCNTR(19),
1913        PMU_PMEVCNTR(20),
1914        PMU_PMEVCNTR(21),
1915        PMU_PMEVCNTR(22),
1916        PMU_PMEVCNTR(23),
1917        PMU_PMEVCNTR(24),
1918        PMU_PMEVCNTR(25),
1919        PMU_PMEVCNTR(26),
1920        PMU_PMEVCNTR(27),
1921        PMU_PMEVCNTR(28),
1922        PMU_PMEVCNTR(29),
1923        PMU_PMEVCNTR(30),
1924        /* PMEVTYPERn */
1925        PMU_PMEVTYPER(0),
1926        PMU_PMEVTYPER(1),
1927        PMU_PMEVTYPER(2),
1928        PMU_PMEVTYPER(3),
1929        PMU_PMEVTYPER(4),
1930        PMU_PMEVTYPER(5),
1931        PMU_PMEVTYPER(6),
1932        PMU_PMEVTYPER(7),
1933        PMU_PMEVTYPER(8),
1934        PMU_PMEVTYPER(9),
1935        PMU_PMEVTYPER(10),
1936        PMU_PMEVTYPER(11),
1937        PMU_PMEVTYPER(12),
1938        PMU_PMEVTYPER(13),
1939        PMU_PMEVTYPER(14),
1940        PMU_PMEVTYPER(15),
1941        PMU_PMEVTYPER(16),
1942        PMU_PMEVTYPER(17),
1943        PMU_PMEVTYPER(18),
1944        PMU_PMEVTYPER(19),
1945        PMU_PMEVTYPER(20),
1946        PMU_PMEVTYPER(21),
1947        PMU_PMEVTYPER(22),
1948        PMU_PMEVTYPER(23),
1949        PMU_PMEVTYPER(24),
1950        PMU_PMEVTYPER(25),
1951        PMU_PMEVTYPER(26),
1952        PMU_PMEVTYPER(27),
1953        PMU_PMEVTYPER(28),
1954        PMU_PMEVTYPER(29),
1955        PMU_PMEVTYPER(30),
1956        /* PMCCFILTR */
1957        { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
1958
1959        { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
1960        { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
1961        { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, c0_CSSELR },
1962};
1963
1964static const struct sys_reg_desc cp15_64_regs[] = {
1965        { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1966        { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
1967        { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
1968        { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
1969        { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
1970        { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
1971        { SYS_DESC(SYS_AARCH32_CNTP_CVAL),    access_arch_timer },
1972};
1973
1974/* Target specific emulation tables */
1975static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
1976
1977void kvm_register_target_sys_reg_table(unsigned int target,
1978                                       struct kvm_sys_reg_target_table *table)
1979{
1980        target_tables[target] = table;
1981}
1982
1983/* Get specific register table for this target. */
1984static const struct sys_reg_desc *get_target_table(unsigned target,
1985                                                   bool mode_is_64,
1986                                                   size_t *num)
1987{
1988        struct kvm_sys_reg_target_table *table;
1989
1990        table = target_tables[target];
1991        if (mode_is_64) {
1992                *num = table->table64.num;
1993                return table->table64.table;
1994        } else {
1995                *num = table->table32.num;
1996                return table->table32.table;
1997        }
1998}
1999
2000static int match_sys_reg(const void *key, const void *elt)
2001{
2002        const unsigned long pval = (unsigned long)key;
2003        const struct sys_reg_desc *r = elt;
2004
2005        return pval - reg_to_encoding(r);
2006}
2007
2008static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
2009                                         const struct sys_reg_desc table[],
2010                                         unsigned int num)
2011{
2012        unsigned long pval = reg_to_encoding(params);
2013
2014        return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
2015}
2016
2017int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
2018{
2019        kvm_inject_undefined(vcpu);
2020        return 1;
2021}
2022
2023static void perform_access(struct kvm_vcpu *vcpu,
2024                           struct sys_reg_params *params,
2025                           const struct sys_reg_desc *r)
2026{
2027        trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2028
2029        /* Check for regs disabled by runtime config */
2030        if (sysreg_hidden_from_guest(vcpu, r)) {
2031                kvm_inject_undefined(vcpu);
2032                return;
2033        }
2034
2035        /*
2036         * Not having an accessor means that we have configured a trap
2037         * that we don't know how to handle. This certainly qualifies
2038         * as a gross bug that should be fixed right away.
2039         */
2040        BUG_ON(!r->access);
2041
2042        /* Skip instruction if instructed so */
2043        if (likely(r->access(vcpu, params, r)))
2044                kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
2045}
2046
2047/*
2048 * emulate_cp --  tries to match a sys_reg access in a handling table, and
2049 *                call the corresponding trap handler.
2050 *
2051 * @params: pointer to the descriptor of the access
2052 * @table: array of trap descriptors
2053 * @num: size of the trap descriptor array
2054 *
2055 * Return 0 if the access has been handled, and -1 if not.
2056 */
2057static int emulate_cp(struct kvm_vcpu *vcpu,
2058                      struct sys_reg_params *params,
2059                      const struct sys_reg_desc *table,
2060                      size_t num)
2061{
2062        const struct sys_reg_desc *r;
2063
2064        if (!table)
2065                return -1;      /* Not handled */
2066
2067        r = find_reg(params, table, num);
2068
2069        if (r) {
2070                perform_access(vcpu, params, r);
2071                return 0;
2072        }
2073
2074        /* Not handled */
2075        return -1;
2076}
2077
2078static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2079                                struct sys_reg_params *params)
2080{
2081        u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
2082        int cp = -1;
2083
2084        switch(hsr_ec) {
2085        case ESR_ELx_EC_CP15_32:
2086        case ESR_ELx_EC_CP15_64:
2087                cp = 15;
2088                break;
2089        case ESR_ELx_EC_CP14_MR:
2090        case ESR_ELx_EC_CP14_64:
2091                cp = 14;
2092                break;
2093        default:
2094                WARN_ON(1);
2095        }
2096
2097        kvm_err("Unsupported guest CP%d access at: %08lx [%08lx]\n",
2098                cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2099        print_sys_reg_instr(params);
2100        kvm_inject_undefined(vcpu);
2101}
2102
2103/**
2104 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
2105 * @vcpu: The VCPU pointer
2106 * @run:  The kvm_run struct
2107 */
2108static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2109                            const struct sys_reg_desc *global,
2110                            size_t nr_global,
2111                            const struct sys_reg_desc *target_specific,
2112                            size_t nr_specific)
2113{
2114        struct sys_reg_params params;
2115        u32 hsr = kvm_vcpu_get_hsr(vcpu);
2116        int Rt = kvm_vcpu_sys_get_rt(vcpu);
2117        int Rt2 = (hsr >> 10) & 0x1f;
2118
2119        params.is_aarch32 = true;
2120        params.is_32bit = false;
2121        params.CRm = (hsr >> 1) & 0xf;
2122        params.is_write = ((hsr & 1) == 0);
2123
2124        params.Op0 = 0;
2125        params.Op1 = (hsr >> 16) & 0xf;
2126        params.Op2 = 0;
2127        params.CRn = 0;
2128
2129        /*
2130         * Make a 64-bit value out of Rt and Rt2. As we use the same trap
2131         * backends between AArch32 and AArch64, we get away with it.
2132         */
2133        if (params.is_write) {
2134                params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
2135                params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
2136        }
2137
2138        /*
2139         * Try to emulate the coprocessor access using the target
2140         * specific table first, and using the global table afterwards.
2141         * If either of the tables contains a handler, handle the
2142         * potential register operation in the case of a read and return
2143         * with success.
2144         */
2145        if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
2146            !emulate_cp(vcpu, &params, global, nr_global)) {
2147                /* Split up the value between registers for the read side */
2148                if (!params.is_write) {
2149                        vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
2150                        vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
2151                }
2152
2153                return 1;
2154        }
2155
2156        unhandled_cp_access(vcpu, &params);
2157        return 1;
2158}
2159
2160/**
2161 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
2162 * @vcpu: The VCPU pointer
2163 * @run:  The kvm_run struct
2164 */
2165static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2166                            const struct sys_reg_desc *global,
2167                            size_t nr_global,
2168                            const struct sys_reg_desc *target_specific,
2169                            size_t nr_specific)
2170{
2171        struct sys_reg_params params;
2172        u32 hsr = kvm_vcpu_get_hsr(vcpu);
2173        int Rt  = kvm_vcpu_sys_get_rt(vcpu);
2174
2175        params.is_aarch32 = true;
2176        params.is_32bit = true;
2177        params.CRm = (hsr >> 1) & 0xf;
2178        params.regval = vcpu_get_reg(vcpu, Rt);
2179        params.is_write = ((hsr & 1) == 0);
2180        params.CRn = (hsr >> 10) & 0xf;
2181        params.Op0 = 0;
2182        params.Op1 = (hsr >> 14) & 0x7;
2183        params.Op2 = (hsr >> 17) & 0x7;
2184
2185        if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
2186            !emulate_cp(vcpu, &params, global, nr_global)) {
2187                if (!params.is_write)
2188                        vcpu_set_reg(vcpu, Rt, params.regval);
2189                return 1;
2190        }
2191
2192        unhandled_cp_access(vcpu, &params);
2193        return 1;
2194}
2195
2196int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2197{
2198        const struct sys_reg_desc *target_specific;
2199        size_t num;
2200
2201        target_specific = get_target_table(vcpu->arch.target, false, &num);
2202        return kvm_handle_cp_64(vcpu,
2203                                cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
2204                                target_specific, num);
2205}
2206
2207int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2208{
2209        const struct sys_reg_desc *target_specific;
2210        size_t num;
2211
2212        target_specific = get_target_table(vcpu->arch.target, false, &num);
2213        return kvm_handle_cp_32(vcpu,
2214                                cp15_regs, ARRAY_SIZE(cp15_regs),
2215                                target_specific, num);
2216}
2217
2218int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2219{
2220        return kvm_handle_cp_64(vcpu,
2221                                cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
2222                                NULL, 0);
2223}
2224
2225int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2226{
2227        return kvm_handle_cp_32(vcpu,
2228                                cp14_regs, ARRAY_SIZE(cp14_regs),
2229                                NULL, 0);
2230}
2231
2232static int emulate_sys_reg(struct kvm_vcpu *vcpu,
2233                           struct sys_reg_params *params)
2234{
2235        size_t num;
2236        const struct sys_reg_desc *table, *r;
2237
2238        table = get_target_table(vcpu->arch.target, true, &num);
2239
2240        /* Search target-specific then generic table. */
2241        r = find_reg(params, table, num);
2242        if (!r)
2243                r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2244
2245        if (likely(r)) {
2246                perform_access(vcpu, params, r);
2247        } else {
2248                kvm_err("Unsupported guest sys_reg access at: %lx [%08lx]\n",
2249                        *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2250                print_sys_reg_instr(params);
2251                kvm_inject_undefined(vcpu);
2252        }
2253        return 1;
2254}
2255
2256static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
2257                                const struct sys_reg_desc *table, size_t num,
2258                                unsigned long *bmap)
2259{
2260        unsigned long i;
2261
2262        for (i = 0; i < num; i++)
2263                if (table[i].reset) {
2264                        int reg = table[i].reg;
2265
2266                        table[i].reset(vcpu, &table[i]);
2267                        if (reg > 0 && reg < NR_SYS_REGS)
2268                                set_bit(reg, bmap);
2269                }
2270}
2271
2272/**
2273 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
2274 * @vcpu: The VCPU pointer
2275 * @run:  The kvm_run struct
2276 */
2277int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
2278{
2279        struct sys_reg_params params;
2280        unsigned long esr = kvm_vcpu_get_hsr(vcpu);
2281        int Rt = kvm_vcpu_sys_get_rt(vcpu);
2282        int ret;
2283
2284        trace_kvm_handle_sys_reg(esr);
2285
2286        params.is_aarch32 = false;
2287        params.is_32bit = false;
2288        params.Op0 = (esr >> 20) & 3;
2289        params.Op1 = (esr >> 14) & 0x7;
2290        params.CRn = (esr >> 10) & 0xf;
2291        params.CRm = (esr >> 1) & 0xf;
2292        params.Op2 = (esr >> 17) & 0x7;
2293        params.regval = vcpu_get_reg(vcpu, Rt);
2294        params.is_write = !(esr & 1);
2295
2296        ret = emulate_sys_reg(vcpu, &params);
2297
2298        if (!params.is_write)
2299                vcpu_set_reg(vcpu, Rt, params.regval);
2300        return ret;
2301}
2302
2303/******************************************************************************
2304 * Userspace API
2305 *****************************************************************************/
2306
2307static bool index_to_params(u64 id, struct sys_reg_params *params)
2308{
2309        switch (id & KVM_REG_SIZE_MASK) {
2310        case KVM_REG_SIZE_U64:
2311                /* Any unused index bits means it's not valid. */
2312                if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2313                              | KVM_REG_ARM_COPROC_MASK
2314                              | KVM_REG_ARM64_SYSREG_OP0_MASK
2315                              | KVM_REG_ARM64_SYSREG_OP1_MASK
2316                              | KVM_REG_ARM64_SYSREG_CRN_MASK
2317                              | KVM_REG_ARM64_SYSREG_CRM_MASK
2318                              | KVM_REG_ARM64_SYSREG_OP2_MASK))
2319                        return false;
2320                params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2321                               >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2322                params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2323                               >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2324                params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2325                               >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2326                params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2327                               >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2328                params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2329                               >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2330                return true;
2331        default:
2332                return false;
2333        }
2334}
2335
2336const struct sys_reg_desc *find_reg_by_id(u64 id,
2337                                          struct sys_reg_params *params,
2338                                          const struct sys_reg_desc table[],
2339                                          unsigned int num)
2340{
2341        if (!index_to_params(id, params))
2342                return NULL;
2343
2344        return find_reg(params, table, num);
2345}
2346
2347/* Decode an index value, and find the sys_reg_desc entry. */
2348static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
2349                                                    u64 id)
2350{
2351        size_t num;
2352        const struct sys_reg_desc *table, *r;
2353        struct sys_reg_params params;
2354
2355        /* We only do sys_reg for now. */
2356        if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2357                return NULL;
2358
2359        table = get_target_table(vcpu->arch.target, true, &num);
2360        r = find_reg_by_id(id, &params, table, num);
2361        if (!r)
2362                r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2363
2364        /* Not saved in the sys_reg array and not otherwise accessible? */
2365        if (r && !(r->reg || r->get_user))
2366                r = NULL;
2367
2368        return r;
2369}
2370
2371/*
2372 * These are the invariant sys_reg registers: we let the guest see the
2373 * host versions of these, so they're part of the guest state.
2374 *
2375 * A future CPU may provide a mechanism to present different values to
2376 * the guest, or a future kvm may trap them.
2377 */
2378
2379#define FUNCTION_INVARIANT(reg)                                         \
2380        static void get_##reg(struct kvm_vcpu *v,                       \
2381                              const struct sys_reg_desc *r)             \
2382        {                                                               \
2383                ((struct sys_reg_desc *)r)->val = read_sysreg(reg);     \
2384        }
2385
2386FUNCTION_INVARIANT(midr_el1)
2387FUNCTION_INVARIANT(revidr_el1)
2388FUNCTION_INVARIANT(clidr_el1)
2389FUNCTION_INVARIANT(aidr_el1)
2390
2391static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
2392{
2393        ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
2394}
2395
2396/* ->val is filled in by kvm_sys_reg_table_init() */
2397static struct sys_reg_desc invariant_sys_regs[] = {
2398        { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2399        { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2400        { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2401        { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2402        { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2403};
2404
2405static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
2406{
2407        if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
2408                return -EFAULT;
2409        return 0;
2410}
2411
2412static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
2413{
2414        if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
2415                return -EFAULT;
2416        return 0;
2417}
2418
2419static int get_invariant_sys_reg(u64 id, void __user *uaddr)
2420{
2421        struct sys_reg_params params;
2422        const struct sys_reg_desc *r;
2423
2424        r = find_reg_by_id(id, &params, invariant_sys_regs,
2425                           ARRAY_SIZE(invariant_sys_regs));
2426        if (!r)
2427                return -ENOENT;
2428
2429        return reg_to_user(uaddr, &r->val, id);
2430}
2431
2432static int set_invariant_sys_reg(u64 id, void __user *uaddr)
2433{
2434        struct sys_reg_params params;
2435        const struct sys_reg_desc *r;
2436        int err;
2437        u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
2438
2439        r = find_reg_by_id(id, &params, invariant_sys_regs,
2440                           ARRAY_SIZE(invariant_sys_regs));
2441        if (!r)
2442                return -ENOENT;
2443
2444        err = reg_from_user(&val, uaddr, id);
2445        if (err)
2446                return err;
2447
2448        /* This is what we mean by invariant: you can't change it. */
2449        if (r->val != val)
2450                return -EINVAL;
2451
2452        return 0;
2453}
2454
2455static bool is_valid_cache(u32 val)
2456{
2457        u32 level, ctype;
2458
2459        if (val >= CSSELR_MAX)
2460                return false;
2461
2462        /* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
2463        level = (val >> 1);
2464        ctype = (cache_levels >> (level * 3)) & 7;
2465
2466        switch (ctype) {
2467        case 0: /* No cache */
2468                return false;
2469        case 1: /* Instruction cache only */
2470                return (val & 1);
2471        case 2: /* Data cache only */
2472        case 4: /* Unified cache */
2473                return !(val & 1);
2474        case 3: /* Separate instruction and data caches */
2475                return true;
2476        default: /* Reserved: we can't know instruction or data. */
2477                return false;
2478        }
2479}
2480
2481static int demux_c15_get(u64 id, void __user *uaddr)
2482{
2483        u32 val;
2484        u32 __user *uval = uaddr;
2485
2486        /* Fail if we have unknown bits set. */
2487        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2488                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2489                return -ENOENT;
2490
2491        switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2492        case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2493                if (KVM_REG_SIZE(id) != 4)
2494                        return -ENOENT;
2495                val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2496                        >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2497                if (!is_valid_cache(val))
2498                        return -ENOENT;
2499
2500                return put_user(get_ccsidr(val), uval);
2501        default:
2502                return -ENOENT;
2503        }
2504}
2505
2506static int demux_c15_set(u64 id, void __user *uaddr)
2507{
2508        u32 val, newval;
2509        u32 __user *uval = uaddr;
2510
2511        /* Fail if we have unknown bits set. */
2512        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2513                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2514                return -ENOENT;
2515
2516        switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2517        case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2518                if (KVM_REG_SIZE(id) != 4)
2519                        return -ENOENT;
2520                val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2521                        >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2522                if (!is_valid_cache(val))
2523                        return -ENOENT;
2524
2525                if (get_user(newval, uval))
2526                        return -EFAULT;
2527
2528                /* This is also invariant: you can't change it. */
2529                if (newval != get_ccsidr(val))
2530                        return -EINVAL;
2531                return 0;
2532        default:
2533                return -ENOENT;
2534        }
2535}
2536
2537int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2538{
2539        const struct sys_reg_desc *r;
2540        void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2541
2542        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2543                return demux_c15_get(reg->id, uaddr);
2544
2545        if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2546                return -ENOENT;
2547
2548        r = index_to_sys_reg_desc(vcpu, reg->id);
2549        if (!r)
2550                return get_invariant_sys_reg(reg->id, uaddr);
2551
2552        /* Check for regs disabled by runtime config */
2553        if (sysreg_hidden_from_user(vcpu, r))
2554                return -ENOENT;
2555
2556        if (r->get_user)
2557                return (r->get_user)(vcpu, r, reg, uaddr);
2558
2559        return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
2560}
2561
2562int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2563{
2564        const struct sys_reg_desc *r;
2565        void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2566
2567        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2568                return demux_c15_set(reg->id, uaddr);
2569
2570        if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2571                return -ENOENT;
2572
2573        r = index_to_sys_reg_desc(vcpu, reg->id);
2574        if (!r)
2575                return set_invariant_sys_reg(reg->id, uaddr);
2576
2577        /* Check for regs disabled by runtime config */
2578        if (sysreg_hidden_from_user(vcpu, r))
2579                return -ENOENT;
2580
2581        if (r->set_user)
2582                return (r->set_user)(vcpu, r, reg, uaddr);
2583
2584        return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2585}
2586
2587static unsigned int num_demux_regs(void)
2588{
2589        unsigned int i, count = 0;
2590
2591        for (i = 0; i < CSSELR_MAX; i++)
2592                if (is_valid_cache(i))
2593                        count++;
2594
2595        return count;
2596}
2597
2598static int write_demux_regids(u64 __user *uindices)
2599{
2600        u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2601        unsigned int i;
2602
2603        val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2604        for (i = 0; i < CSSELR_MAX; i++) {
2605                if (!is_valid_cache(i))
2606                        continue;
2607                if (put_user(val | i, uindices))
2608                        return -EFAULT;
2609                uindices++;
2610        }
2611        return 0;
2612}
2613
2614static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2615{
2616        return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2617                KVM_REG_ARM64_SYSREG |
2618                (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2619                (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2620                (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2621                (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2622                (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2623}
2624
2625static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2626{
2627        if (!*uind)
2628                return true;
2629
2630        if (put_user(sys_reg_to_index(reg), *uind))
2631                return false;
2632
2633        (*uind)++;
2634        return true;
2635}
2636
2637static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
2638                            const struct sys_reg_desc *rd,
2639                            u64 __user **uind,
2640                            unsigned int *total)
2641{
2642        /*
2643         * Ignore registers we trap but don't save,
2644         * and for which no custom user accessor is provided.
2645         */
2646        if (!(rd->reg || rd->get_user))
2647                return 0;
2648
2649        if (sysreg_hidden_from_user(vcpu, rd))
2650                return 0;
2651
2652        if (!copy_reg_to_user(rd, uind))
2653                return -EFAULT;
2654
2655        (*total)++;
2656        return 0;
2657}
2658
2659/* Assumed ordered tables, see kvm_sys_reg_table_init. */
2660static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2661{
2662        const struct sys_reg_desc *i1, *i2, *end1, *end2;
2663        unsigned int total = 0;
2664        size_t num;
2665        int err;
2666
2667        /* We check for duplicates here, to allow arch-specific overrides. */
2668        i1 = get_target_table(vcpu->arch.target, true, &num);
2669        end1 = i1 + num;
2670        i2 = sys_reg_descs;
2671        end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2672
2673        BUG_ON(i1 == end1 || i2 == end2);
2674
2675        /* Walk carefully, as both tables may refer to the same register. */
2676        while (i1 || i2) {
2677                int cmp = cmp_sys_reg(i1, i2);
2678                /* target-specific overrides generic entry. */
2679                if (cmp <= 0)
2680                        err = walk_one_sys_reg(vcpu, i1, &uind, &total);
2681                else
2682                        err = walk_one_sys_reg(vcpu, i2, &uind, &total);
2683
2684                if (err)
2685                        return err;
2686
2687                if (cmp <= 0 && ++i1 == end1)
2688                        i1 = NULL;
2689                if (cmp >= 0 && ++i2 == end2)
2690                        i2 = NULL;
2691        }
2692        return total;
2693}
2694
2695unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2696{
2697        return ARRAY_SIZE(invariant_sys_regs)
2698                + num_demux_regs()
2699                + walk_sys_regs(vcpu, (u64 __user *)NULL);
2700}
2701
2702int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2703{
2704        unsigned int i;
2705        int err;
2706
2707        /* Then give them all the invariant registers' indices. */
2708        for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2709                if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2710                        return -EFAULT;
2711                uindices++;
2712        }
2713
2714        err = walk_sys_regs(vcpu, uindices);
2715        if (err < 0)
2716                return err;
2717        uindices += err;
2718
2719        return write_demux_regids(uindices);
2720}
2721
2722static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
2723{
2724        unsigned int i;
2725
2726        for (i = 1; i < n; i++) {
2727                if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2728                        kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2729                        return 1;
2730                }
2731        }
2732
2733        return 0;
2734}
2735
2736void kvm_sys_reg_table_init(void)
2737{
2738        unsigned int i;
2739        struct sys_reg_desc clidr;
2740
2741        /* Make sure tables are unique and in order. */
2742        BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
2743        BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
2744        BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
2745        BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
2746        BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
2747        BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
2748
2749        /* We abuse the reset function to overwrite the table itself. */
2750        for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2751                invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2752
2753        /*
2754         * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
2755         *
2756         *   If software reads the Cache Type fields from Ctype1
2757         *   upwards, once it has seen a value of 0b000, no caches
2758         *   exist at further-out levels of the hierarchy. So, for
2759         *   example, if Ctype3 is the first Cache Type field with a
2760         *   value of 0b000, the values of Ctype4 to Ctype7 must be
2761         *   ignored.
2762         */
2763        get_clidr_el1(NULL, &clidr); /* Ugly... */
2764        cache_levels = clidr.val;
2765        for (i = 0; i < 7; i++)
2766                if (((cache_levels >> (i*3)) & 7) == 0)
2767                        break;
2768        /* Clear all higher bits. */
2769        cache_levels &= (1 << (i*3))-1;
2770}
2771
2772/**
2773 * kvm_reset_sys_regs - sets system registers to reset value
2774 * @vcpu: The VCPU pointer
2775 *
2776 * This function finds the right table above and sets the registers on the
2777 * virtual CPU struct to their architecturally defined reset values.
2778 */
2779void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2780{
2781        size_t num;
2782        const struct sys_reg_desc *table;
2783        DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
2784
2785        /* Generic chip reset first (so target could override). */
2786        reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
2787
2788        table = get_target_table(vcpu->arch.target, true, &num);
2789        reset_sys_reg_descs(vcpu, table, num, bmap);
2790
2791        for (num = 1; num < NR_SYS_REGS; num++) {
2792                if (WARN(!test_bit(num, bmap),
2793                         "Didn't reset __vcpu_sys_reg(%zi)\n", num))
2794                        break;
2795        }
2796}
2797