linux/arch/arm64/kvm/sys_regs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012,2013 - ARM Ltd
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 *
   6 * Derived from arch/arm/kvm/coproc.c:
   7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
   9 *          Christoffer Dall <c.dall@virtualopensystems.com>
  10 */
  11
  12#include <linux/bsearch.h>
  13#include <linux/kvm_host.h>
  14#include <linux/mm.h>
  15#include <linux/printk.h>
  16#include <linux/uaccess.h>
  17
  18#include <asm/cacheflush.h>
  19#include <asm/cputype.h>
  20#include <asm/debug-monitors.h>
  21#include <asm/esr.h>
  22#include <asm/kvm_arm.h>
  23#include <asm/kvm_coproc.h>
  24#include <asm/kvm_emulate.h>
  25#include <asm/kvm_host.h>
  26#include <asm/kvm_hyp.h>
  27#include <asm/kvm_mmu.h>
  28#include <asm/perf_event.h>
  29#include <asm/sysreg.h>
  30
  31#include <trace/events/kvm.h>
  32
  33#include "sys_regs.h"
  34
  35#include "trace.h"
  36
  37/*
  38 * All of this file is extremly similar to the ARM coproc.c, but the
  39 * types are different. My gut feeling is that it should be pretty
  40 * easy to merge, but that would be an ABI breakage -- again. VFP
  41 * would also need to be abstracted.
  42 *
  43 * For AArch32, we only take care of what is being trapped. Anything
  44 * that has to do with init and userspace access has to go via the
  45 * 64bit interface.
  46 */
  47
  48static bool read_from_write_only(struct kvm_vcpu *vcpu,
  49                                 struct sys_reg_params *params,
  50                                 const struct sys_reg_desc *r)
  51{
  52        WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
  53        print_sys_reg_instr(params);
  54        kvm_inject_undefined(vcpu);
  55        return false;
  56}
  57
  58static bool write_to_read_only(struct kvm_vcpu *vcpu,
  59                               struct sys_reg_params *params,
  60                               const struct sys_reg_desc *r)
  61{
  62        WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
  63        print_sys_reg_instr(params);
  64        kvm_inject_undefined(vcpu);
  65        return false;
  66}
  67
  68u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
  69{
  70        if (!vcpu->arch.sysregs_loaded_on_cpu)
  71                goto immediate_read;
  72
  73        /*
  74         * System registers listed in the switch are not saved on every
  75         * exit from the guest but are only saved on vcpu_put.
  76         *
  77         * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
  78         * should never be listed below, because the guest cannot modify its
  79         * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
  80         * thread when emulating cross-VCPU communication.
  81         */
  82        switch (reg) {
  83        case CSSELR_EL1:        return read_sysreg_s(SYS_CSSELR_EL1);
  84        case SCTLR_EL1:         return read_sysreg_s(SYS_SCTLR_EL12);
  85        case ACTLR_EL1:         return read_sysreg_s(SYS_ACTLR_EL1);
  86        case CPACR_EL1:         return read_sysreg_s(SYS_CPACR_EL12);
  87        case TTBR0_EL1:         return read_sysreg_s(SYS_TTBR0_EL12);
  88        case TTBR1_EL1:         return read_sysreg_s(SYS_TTBR1_EL12);
  89        case TCR_EL1:           return read_sysreg_s(SYS_TCR_EL12);
  90        case ESR_EL1:           return read_sysreg_s(SYS_ESR_EL12);
  91        case AFSR0_EL1:         return read_sysreg_s(SYS_AFSR0_EL12);
  92        case AFSR1_EL1:         return read_sysreg_s(SYS_AFSR1_EL12);
  93        case FAR_EL1:           return read_sysreg_s(SYS_FAR_EL12);
  94        case MAIR_EL1:          return read_sysreg_s(SYS_MAIR_EL12);
  95        case VBAR_EL1:          return read_sysreg_s(SYS_VBAR_EL12);
  96        case CONTEXTIDR_EL1:    return read_sysreg_s(SYS_CONTEXTIDR_EL12);
  97        case TPIDR_EL0:         return read_sysreg_s(SYS_TPIDR_EL0);
  98        case TPIDRRO_EL0:       return read_sysreg_s(SYS_TPIDRRO_EL0);
  99        case TPIDR_EL1:         return read_sysreg_s(SYS_TPIDR_EL1);
 100        case AMAIR_EL1:         return read_sysreg_s(SYS_AMAIR_EL12);
 101        case CNTKCTL_EL1:       return read_sysreg_s(SYS_CNTKCTL_EL12);
 102        case PAR_EL1:           return read_sysreg_s(SYS_PAR_EL1);
 103        case DACR32_EL2:        return read_sysreg_s(SYS_DACR32_EL2);
 104        case IFSR32_EL2:        return read_sysreg_s(SYS_IFSR32_EL2);
 105        case DBGVCR32_EL2:      return read_sysreg_s(SYS_DBGVCR32_EL2);
 106        }
 107
 108immediate_read:
 109        return __vcpu_sys_reg(vcpu, reg);
 110}
 111
 112void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
 113{
 114        if (!vcpu->arch.sysregs_loaded_on_cpu)
 115                goto immediate_write;
 116
 117        /*
 118         * System registers listed in the switch are not restored on every
 119         * entry to the guest but are only restored on vcpu_load.
 120         *
 121         * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
 122         * should never be listed below, because the the MPIDR should only be
 123         * set once, before running the VCPU, and never changed later.
 124         */
 125        switch (reg) {
 126        case CSSELR_EL1:        write_sysreg_s(val, SYS_CSSELR_EL1);    return;
 127        case SCTLR_EL1:         write_sysreg_s(val, SYS_SCTLR_EL12);    return;
 128        case ACTLR_EL1:         write_sysreg_s(val, SYS_ACTLR_EL1);     return;
 129        case CPACR_EL1:         write_sysreg_s(val, SYS_CPACR_EL12);    return;
 130        case TTBR0_EL1:         write_sysreg_s(val, SYS_TTBR0_EL12);    return;
 131        case TTBR1_EL1:         write_sysreg_s(val, SYS_TTBR1_EL12);    return;
 132        case TCR_EL1:           write_sysreg_s(val, SYS_TCR_EL12);      return;
 133        case ESR_EL1:           write_sysreg_s(val, SYS_ESR_EL12);      return;
 134        case AFSR0_EL1:         write_sysreg_s(val, SYS_AFSR0_EL12);    return;
 135        case AFSR1_EL1:         write_sysreg_s(val, SYS_AFSR1_EL12);    return;
 136        case FAR_EL1:           write_sysreg_s(val, SYS_FAR_EL12);      return;
 137        case MAIR_EL1:          write_sysreg_s(val, SYS_MAIR_EL12);     return;
 138        case VBAR_EL1:          write_sysreg_s(val, SYS_VBAR_EL12);     return;
 139        case CONTEXTIDR_EL1:    write_sysreg_s(val, SYS_CONTEXTIDR_EL12); return;
 140        case TPIDR_EL0:         write_sysreg_s(val, SYS_TPIDR_EL0);     return;
 141        case TPIDRRO_EL0:       write_sysreg_s(val, SYS_TPIDRRO_EL0);   return;
 142        case TPIDR_EL1:         write_sysreg_s(val, SYS_TPIDR_EL1);     return;
 143        case AMAIR_EL1:         write_sysreg_s(val, SYS_AMAIR_EL12);    return;
 144        case CNTKCTL_EL1:       write_sysreg_s(val, SYS_CNTKCTL_EL12);  return;
 145        case PAR_EL1:           write_sysreg_s(val, SYS_PAR_EL1);       return;
 146        case DACR32_EL2:        write_sysreg_s(val, SYS_DACR32_EL2);    return;
 147        case IFSR32_EL2:        write_sysreg_s(val, SYS_IFSR32_EL2);    return;
 148        case DBGVCR32_EL2:      write_sysreg_s(val, SYS_DBGVCR32_EL2);  return;
 149        }
 150
 151immediate_write:
 152         __vcpu_sys_reg(vcpu, reg) = val;
 153}
 154
 155/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
 156static u32 cache_levels;
 157
 158/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
 159#define CSSELR_MAX 12
 160
 161/* Which cache CCSIDR represents depends on CSSELR value. */
 162static u32 get_ccsidr(u32 csselr)
 163{
 164        u32 ccsidr;
 165
 166        /* Make sure noone else changes CSSELR during this! */
 167        local_irq_disable();
 168        write_sysreg(csselr, csselr_el1);
 169        isb();
 170        ccsidr = read_sysreg(ccsidr_el1);
 171        local_irq_enable();
 172
 173        return ccsidr;
 174}
 175
 176/*
 177 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
 178 */
 179static bool access_dcsw(struct kvm_vcpu *vcpu,
 180                        struct sys_reg_params *p,
 181                        const struct sys_reg_desc *r)
 182{
 183        if (!p->is_write)
 184                return read_from_write_only(vcpu, p, r);
 185
 186        /*
 187         * Only track S/W ops if we don't have FWB. It still indicates
 188         * that the guest is a bit broken (S/W operations should only
 189         * be done by firmware, knowing that there is only a single
 190         * CPU left in the system, and certainly not from non-secure
 191         * software).
 192         */
 193        if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
 194                kvm_set_way_flush(vcpu);
 195
 196        return true;
 197}
 198
 199/*
 200 * Generic accessor for VM registers. Only called as long as HCR_TVM
 201 * is set. If the guest enables the MMU, we stop trapping the VM
 202 * sys_regs and leave it in complete control of the caches.
 203 */
 204static bool access_vm_reg(struct kvm_vcpu *vcpu,
 205                          struct sys_reg_params *p,
 206                          const struct sys_reg_desc *r)
 207{
 208        bool was_enabled = vcpu_has_cache_enabled(vcpu);
 209        u64 val;
 210        int reg = r->reg;
 211
 212        BUG_ON(!p->is_write);
 213
 214        /* See the 32bit mapping in kvm_host.h */
 215        if (p->is_aarch32)
 216                reg = r->reg / 2;
 217
 218        if (!p->is_aarch32 || !p->is_32bit) {
 219                val = p->regval;
 220        } else {
 221                val = vcpu_read_sys_reg(vcpu, reg);
 222                if (r->reg % 2)
 223                        val = (p->regval << 32) | (u64)lower_32_bits(val);
 224                else
 225                        val = ((u64)upper_32_bits(val) << 32) |
 226                                lower_32_bits(p->regval);
 227        }
 228        vcpu_write_sys_reg(vcpu, val, reg);
 229
 230        kvm_toggle_cache(vcpu, was_enabled);
 231        return true;
 232}
 233
 234/*
 235 * Trap handler for the GICv3 SGI generation system register.
 236 * Forward the request to the VGIC emulation.
 237 * The cp15_64 code makes sure this automatically works
 238 * for both AArch64 and AArch32 accesses.
 239 */
 240static bool access_gic_sgi(struct kvm_vcpu *vcpu,
 241                           struct sys_reg_params *p,
 242                           const struct sys_reg_desc *r)
 243{
 244        bool g1;
 245
 246        if (!p->is_write)
 247                return read_from_write_only(vcpu, p, r);
 248
 249        /*
 250         * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
 251         * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
 252         * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
 253         * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
 254         * group.
 255         */
 256        if (p->is_aarch32) {
 257                switch (p->Op1) {
 258                default:                /* Keep GCC quiet */
 259                case 0:                 /* ICC_SGI1R */
 260                        g1 = true;
 261                        break;
 262                case 1:                 /* ICC_ASGI1R */
 263                case 2:                 /* ICC_SGI0R */
 264                        g1 = false;
 265                        break;
 266                }
 267        } else {
 268                switch (p->Op2) {
 269                default:                /* Keep GCC quiet */
 270                case 5:                 /* ICC_SGI1R_EL1 */
 271                        g1 = true;
 272                        break;
 273                case 6:                 /* ICC_ASGI1R_EL1 */
 274                case 7:                 /* ICC_SGI0R_EL1 */
 275                        g1 = false;
 276                        break;
 277                }
 278        }
 279
 280        vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
 281
 282        return true;
 283}
 284
 285static bool access_gic_sre(struct kvm_vcpu *vcpu,
 286                           struct sys_reg_params *p,
 287                           const struct sys_reg_desc *r)
 288{
 289        if (p->is_write)
 290                return ignore_write(vcpu, p);
 291
 292        p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
 293        return true;
 294}
 295
 296static bool trap_raz_wi(struct kvm_vcpu *vcpu,
 297                        struct sys_reg_params *p,
 298                        const struct sys_reg_desc *r)
 299{
 300        if (p->is_write)
 301                return ignore_write(vcpu, p);
 302        else
 303                return read_zero(vcpu, p);
 304}
 305
 306/*
 307 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
 308 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
 309 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
 310 * treat it separately.
 311 */
 312static bool trap_loregion(struct kvm_vcpu *vcpu,
 313                          struct sys_reg_params *p,
 314                          const struct sys_reg_desc *r)
 315{
 316        u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
 317        u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
 318                         (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
 319
 320        if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
 321                kvm_inject_undefined(vcpu);
 322                return false;
 323        }
 324
 325        if (p->is_write && sr == SYS_LORID_EL1)
 326                return write_to_read_only(vcpu, p, r);
 327
 328        return trap_raz_wi(vcpu, p, r);
 329}
 330
 331static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
 332                           struct sys_reg_params *p,
 333                           const struct sys_reg_desc *r)
 334{
 335        if (p->is_write) {
 336                return ignore_write(vcpu, p);
 337        } else {
 338                p->regval = (1 << 3);
 339                return true;
 340        }
 341}
 342
 343static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
 344                                   struct sys_reg_params *p,
 345                                   const struct sys_reg_desc *r)
 346{
 347        if (p->is_write) {
 348                return ignore_write(vcpu, p);
 349        } else {
 350                p->regval = read_sysreg(dbgauthstatus_el1);
 351                return true;
 352        }
 353}
 354
 355/*
 356 * We want to avoid world-switching all the DBG registers all the
 357 * time:
 358 * 
 359 * - If we've touched any debug register, it is likely that we're
 360 *   going to touch more of them. It then makes sense to disable the
 361 *   traps and start doing the save/restore dance
 362 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
 363 *   then mandatory to save/restore the registers, as the guest
 364 *   depends on them.
 365 * 
 366 * For this, we use a DIRTY bit, indicating the guest has modified the
 367 * debug registers, used as follow:
 368 *
 369 * On guest entry:
 370 * - If the dirty bit is set (because we're coming back from trapping),
 371 *   disable the traps, save host registers, restore guest registers.
 372 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
 373 *   set the dirty bit, disable the traps, save host registers,
 374 *   restore guest registers.
 375 * - Otherwise, enable the traps
 376 *
 377 * On guest exit:
 378 * - If the dirty bit is set, save guest registers, restore host
 379 *   registers and clear the dirty bit. This ensure that the host can
 380 *   now use the debug registers.
 381 */
 382static bool trap_debug_regs(struct kvm_vcpu *vcpu,
 383                            struct sys_reg_params *p,
 384                            const struct sys_reg_desc *r)
 385{
 386        if (p->is_write) {
 387                vcpu_write_sys_reg(vcpu, p->regval, r->reg);
 388                vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
 389        } else {
 390                p->regval = vcpu_read_sys_reg(vcpu, r->reg);
 391        }
 392
 393        trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
 394
 395        return true;
 396}
 397
 398/*
 399 * reg_to_dbg/dbg_to_reg
 400 *
 401 * A 32 bit write to a debug register leave top bits alone
 402 * A 32 bit read from a debug register only returns the bottom bits
 403 *
 404 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
 405 * hyp.S code switches between host and guest values in future.
 406 */
 407static void reg_to_dbg(struct kvm_vcpu *vcpu,
 408                       struct sys_reg_params *p,
 409                       u64 *dbg_reg)
 410{
 411        u64 val = p->regval;
 412
 413        if (p->is_32bit) {
 414                val &= 0xffffffffUL;
 415                val |= ((*dbg_reg >> 32) << 32);
 416        }
 417
 418        *dbg_reg = val;
 419        vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
 420}
 421
 422static void dbg_to_reg(struct kvm_vcpu *vcpu,
 423                       struct sys_reg_params *p,
 424                       u64 *dbg_reg)
 425{
 426        p->regval = *dbg_reg;
 427        if (p->is_32bit)
 428                p->regval &= 0xffffffffUL;
 429}
 430
 431static bool trap_bvr(struct kvm_vcpu *vcpu,
 432                     struct sys_reg_params *p,
 433                     const struct sys_reg_desc *rd)
 434{
 435        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
 436
 437        if (p->is_write)
 438                reg_to_dbg(vcpu, p, dbg_reg);
 439        else
 440                dbg_to_reg(vcpu, p, dbg_reg);
 441
 442        trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
 443
 444        return true;
 445}
 446
 447static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 448                const struct kvm_one_reg *reg, void __user *uaddr)
 449{
 450        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
 451
 452        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
 453                return -EFAULT;
 454        return 0;
 455}
 456
 457static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 458        const struct kvm_one_reg *reg, void __user *uaddr)
 459{
 460        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
 461
 462        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
 463                return -EFAULT;
 464        return 0;
 465}
 466
 467static void reset_bvr(struct kvm_vcpu *vcpu,
 468                      const struct sys_reg_desc *rd)
 469{
 470        vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
 471}
 472
 473static bool trap_bcr(struct kvm_vcpu *vcpu,
 474                     struct sys_reg_params *p,
 475                     const struct sys_reg_desc *rd)
 476{
 477        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
 478
 479        if (p->is_write)
 480                reg_to_dbg(vcpu, p, dbg_reg);
 481        else
 482                dbg_to_reg(vcpu, p, dbg_reg);
 483
 484        trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
 485
 486        return true;
 487}
 488
 489static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 490                const struct kvm_one_reg *reg, void __user *uaddr)
 491{
 492        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
 493
 494        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
 495                return -EFAULT;
 496
 497        return 0;
 498}
 499
 500static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 501        const struct kvm_one_reg *reg, void __user *uaddr)
 502{
 503        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
 504
 505        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
 506                return -EFAULT;
 507        return 0;
 508}
 509
 510static void reset_bcr(struct kvm_vcpu *vcpu,
 511                      const struct sys_reg_desc *rd)
 512{
 513        vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
 514}
 515
 516static bool trap_wvr(struct kvm_vcpu *vcpu,
 517                     struct sys_reg_params *p,
 518                     const struct sys_reg_desc *rd)
 519{
 520        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
 521
 522        if (p->is_write)
 523                reg_to_dbg(vcpu, p, dbg_reg);
 524        else
 525                dbg_to_reg(vcpu, p, dbg_reg);
 526
 527        trace_trap_reg(__func__, rd->reg, p->is_write,
 528                vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
 529
 530        return true;
 531}
 532
 533static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 534                const struct kvm_one_reg *reg, void __user *uaddr)
 535{
 536        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
 537
 538        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
 539                return -EFAULT;
 540        return 0;
 541}
 542
 543static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 544        const struct kvm_one_reg *reg, void __user *uaddr)
 545{
 546        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
 547
 548        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
 549                return -EFAULT;
 550        return 0;
 551}
 552
 553static void reset_wvr(struct kvm_vcpu *vcpu,
 554                      const struct sys_reg_desc *rd)
 555{
 556        vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
 557}
 558
 559static bool trap_wcr(struct kvm_vcpu *vcpu,
 560                     struct sys_reg_params *p,
 561                     const struct sys_reg_desc *rd)
 562{
 563        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
 564
 565        if (p->is_write)
 566                reg_to_dbg(vcpu, p, dbg_reg);
 567        else
 568                dbg_to_reg(vcpu, p, dbg_reg);
 569
 570        trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
 571
 572        return true;
 573}
 574
 575static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 576                const struct kvm_one_reg *reg, void __user *uaddr)
 577{
 578        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
 579
 580        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
 581                return -EFAULT;
 582        return 0;
 583}
 584
 585static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 586        const struct kvm_one_reg *reg, void __user *uaddr)
 587{
 588        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
 589
 590        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
 591                return -EFAULT;
 592        return 0;
 593}
 594
 595static void reset_wcr(struct kvm_vcpu *vcpu,
 596                      const struct sys_reg_desc *rd)
 597{
 598        vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
 599}
 600
 601static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 602{
 603        u64 amair = read_sysreg(amair_el1);
 604        vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
 605}
 606
 607static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 608{
 609        u64 mpidr;
 610
 611        /*
 612         * Map the vcpu_id into the first three affinity level fields of
 613         * the MPIDR. We limit the number of VCPUs in level 0 due to a
 614         * limitation to 16 CPUs in that level in the ICC_SGIxR registers
 615         * of the GICv3 to be able to address each CPU directly when
 616         * sending IPIs.
 617         */
 618        mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
 619        mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
 620        mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
 621        vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
 622}
 623
 624static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 625{
 626        u64 pmcr, val;
 627
 628        pmcr = read_sysreg(pmcr_el0);
 629        /*
 630         * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
 631         * except PMCR.E resetting to zero.
 632         */
 633        val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
 634               | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
 635        if (!system_supports_32bit_el0())
 636                val |= ARMV8_PMU_PMCR_LC;
 637        __vcpu_sys_reg(vcpu, r->reg) = val;
 638}
 639
 640static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
 641{
 642        u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
 643        bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
 644
 645        if (!enabled)
 646                kvm_inject_undefined(vcpu);
 647
 648        return !enabled;
 649}
 650
 651static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
 652{
 653        return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
 654}
 655
 656static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
 657{
 658        return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
 659}
 660
 661static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
 662{
 663        return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
 664}
 665
 666static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
 667{
 668        return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
 669}
 670
 671static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 672                        const struct sys_reg_desc *r)
 673{
 674        u64 val;
 675
 676        if (!kvm_arm_pmu_v3_ready(vcpu))
 677                return trap_raz_wi(vcpu, p, r);
 678
 679        if (pmu_access_el0_disabled(vcpu))
 680                return false;
 681
 682        if (p->is_write) {
 683                /* Only update writeable bits of PMCR */
 684                val = __vcpu_sys_reg(vcpu, PMCR_EL0);
 685                val &= ~ARMV8_PMU_PMCR_MASK;
 686                val |= p->regval & ARMV8_PMU_PMCR_MASK;
 687                if (!system_supports_32bit_el0())
 688                        val |= ARMV8_PMU_PMCR_LC;
 689                __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
 690                kvm_pmu_handle_pmcr(vcpu, val);
 691                kvm_vcpu_pmu_restore_guest(vcpu);
 692        } else {
 693                /* PMCR.P & PMCR.C are RAZ */
 694                val = __vcpu_sys_reg(vcpu, PMCR_EL0)
 695                      & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
 696                p->regval = val;
 697        }
 698
 699        return true;
 700}
 701
 702static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 703                          const struct sys_reg_desc *r)
 704{
 705        if (!kvm_arm_pmu_v3_ready(vcpu))
 706                return trap_raz_wi(vcpu, p, r);
 707
 708        if (pmu_access_event_counter_el0_disabled(vcpu))
 709                return false;
 710
 711        if (p->is_write)
 712                __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
 713        else
 714                /* return PMSELR.SEL field */
 715                p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
 716                            & ARMV8_PMU_COUNTER_MASK;
 717
 718        return true;
 719}
 720
 721static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 722                          const struct sys_reg_desc *r)
 723{
 724        u64 pmceid;
 725
 726        if (!kvm_arm_pmu_v3_ready(vcpu))
 727                return trap_raz_wi(vcpu, p, r);
 728
 729        BUG_ON(p->is_write);
 730
 731        if (pmu_access_el0_disabled(vcpu))
 732                return false;
 733
 734        if (!(p->Op2 & 1))
 735                pmceid = read_sysreg(pmceid0_el0);
 736        else
 737                pmceid = read_sysreg(pmceid1_el0);
 738
 739        p->regval = pmceid;
 740
 741        return true;
 742}
 743
 744static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
 745{
 746        u64 pmcr, val;
 747
 748        pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
 749        val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
 750        if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
 751                kvm_inject_undefined(vcpu);
 752                return false;
 753        }
 754
 755        return true;
 756}
 757
 758static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
 759                              struct sys_reg_params *p,
 760                              const struct sys_reg_desc *r)
 761{
 762        u64 idx;
 763
 764        if (!kvm_arm_pmu_v3_ready(vcpu))
 765                return trap_raz_wi(vcpu, p, r);
 766
 767        if (r->CRn == 9 && r->CRm == 13) {
 768                if (r->Op2 == 2) {
 769                        /* PMXEVCNTR_EL0 */
 770                        if (pmu_access_event_counter_el0_disabled(vcpu))
 771                                return false;
 772
 773                        idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
 774                              & ARMV8_PMU_COUNTER_MASK;
 775                } else if (r->Op2 == 0) {
 776                        /* PMCCNTR_EL0 */
 777                        if (pmu_access_cycle_counter_el0_disabled(vcpu))
 778                                return false;
 779
 780                        idx = ARMV8_PMU_CYCLE_IDX;
 781                } else {
 782                        return false;
 783                }
 784        } else if (r->CRn == 0 && r->CRm == 9) {
 785                /* PMCCNTR */
 786                if (pmu_access_event_counter_el0_disabled(vcpu))
 787                        return false;
 788
 789                idx = ARMV8_PMU_CYCLE_IDX;
 790        } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
 791                /* PMEVCNTRn_EL0 */
 792                if (pmu_access_event_counter_el0_disabled(vcpu))
 793                        return false;
 794
 795                idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
 796        } else {
 797                return false;
 798        }
 799
 800        if (!pmu_counter_idx_valid(vcpu, idx))
 801                return false;
 802
 803        if (p->is_write) {
 804                if (pmu_access_el0_disabled(vcpu))
 805                        return false;
 806
 807                kvm_pmu_set_counter_value(vcpu, idx, p->regval);
 808        } else {
 809                p->regval = kvm_pmu_get_counter_value(vcpu, idx);
 810        }
 811
 812        return true;
 813}
 814
 815static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 816                               const struct sys_reg_desc *r)
 817{
 818        u64 idx, reg;
 819
 820        if (!kvm_arm_pmu_v3_ready(vcpu))
 821                return trap_raz_wi(vcpu, p, r);
 822
 823        if (pmu_access_el0_disabled(vcpu))
 824                return false;
 825
 826        if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
 827                /* PMXEVTYPER_EL0 */
 828                idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
 829                reg = PMEVTYPER0_EL0 + idx;
 830        } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
 831                idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
 832                if (idx == ARMV8_PMU_CYCLE_IDX)
 833                        reg = PMCCFILTR_EL0;
 834                else
 835                        /* PMEVTYPERn_EL0 */
 836                        reg = PMEVTYPER0_EL0 + idx;
 837        } else {
 838                BUG();
 839        }
 840
 841        if (!pmu_counter_idx_valid(vcpu, idx))
 842                return false;
 843
 844        if (p->is_write) {
 845                kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
 846                __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
 847                kvm_vcpu_pmu_restore_guest(vcpu);
 848        } else {
 849                p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
 850        }
 851
 852        return true;
 853}
 854
 855static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 856                           const struct sys_reg_desc *r)
 857{
 858        u64 val, mask;
 859
 860        if (!kvm_arm_pmu_v3_ready(vcpu))
 861                return trap_raz_wi(vcpu, p, r);
 862
 863        if (pmu_access_el0_disabled(vcpu))
 864                return false;
 865
 866        mask = kvm_pmu_valid_counter_mask(vcpu);
 867        if (p->is_write) {
 868                val = p->regval & mask;
 869                if (r->Op2 & 0x1) {
 870                        /* accessing PMCNTENSET_EL0 */
 871                        __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
 872                        kvm_pmu_enable_counter_mask(vcpu, val);
 873                        kvm_vcpu_pmu_restore_guest(vcpu);
 874                } else {
 875                        /* accessing PMCNTENCLR_EL0 */
 876                        __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
 877                        kvm_pmu_disable_counter_mask(vcpu, val);
 878                }
 879        } else {
 880                p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
 881        }
 882
 883        return true;
 884}
 885
 886static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 887                           const struct sys_reg_desc *r)
 888{
 889        u64 mask = kvm_pmu_valid_counter_mask(vcpu);
 890
 891        if (!kvm_arm_pmu_v3_ready(vcpu))
 892                return trap_raz_wi(vcpu, p, r);
 893
 894        if (!vcpu_mode_priv(vcpu)) {
 895                kvm_inject_undefined(vcpu);
 896                return false;
 897        }
 898
 899        if (p->is_write) {
 900                u64 val = p->regval & mask;
 901
 902                if (r->Op2 & 0x1)
 903                        /* accessing PMINTENSET_EL1 */
 904                        __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
 905                else
 906                        /* accessing PMINTENCLR_EL1 */
 907                        __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
 908        } else {
 909                p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
 910        }
 911
 912        return true;
 913}
 914
 915static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 916                         const struct sys_reg_desc *r)
 917{
 918        u64 mask = kvm_pmu_valid_counter_mask(vcpu);
 919
 920        if (!kvm_arm_pmu_v3_ready(vcpu))
 921                return trap_raz_wi(vcpu, p, r);
 922
 923        if (pmu_access_el0_disabled(vcpu))
 924                return false;
 925
 926        if (p->is_write) {
 927                if (r->CRm & 0x2)
 928                        /* accessing PMOVSSET_EL0 */
 929                        __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
 930                else
 931                        /* accessing PMOVSCLR_EL0 */
 932                        __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
 933        } else {
 934                p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
 935        }
 936
 937        return true;
 938}
 939
 940static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 941                           const struct sys_reg_desc *r)
 942{
 943        u64 mask;
 944
 945        if (!kvm_arm_pmu_v3_ready(vcpu))
 946                return trap_raz_wi(vcpu, p, r);
 947
 948        if (!p->is_write)
 949                return read_from_write_only(vcpu, p, r);
 950
 951        if (pmu_write_swinc_el0_disabled(vcpu))
 952                return false;
 953
 954        mask = kvm_pmu_valid_counter_mask(vcpu);
 955        kvm_pmu_software_increment(vcpu, p->regval & mask);
 956        return true;
 957}
 958
 959static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 960                             const struct sys_reg_desc *r)
 961{
 962        if (!kvm_arm_pmu_v3_ready(vcpu))
 963                return trap_raz_wi(vcpu, p, r);
 964
 965        if (p->is_write) {
 966                if (!vcpu_mode_priv(vcpu)) {
 967                        kvm_inject_undefined(vcpu);
 968                        return false;
 969                }
 970
 971                __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
 972                               p->regval & ARMV8_PMU_USERENR_MASK;
 973        } else {
 974                p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
 975                            & ARMV8_PMU_USERENR_MASK;
 976        }
 977
 978        return true;
 979}
 980
 981#define reg_to_encoding(x)                                              \
 982        sys_reg((u32)(x)->Op0, (u32)(x)->Op1,                           \
 983                (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
 984
 985/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
 986#define DBG_BCR_BVR_WCR_WVR_EL1(n)                                      \
 987        { SYS_DESC(SYS_DBGBVRn_EL1(n)),                                 \
 988          trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr },                \
 989        { SYS_DESC(SYS_DBGBCRn_EL1(n)),                                 \
 990          trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr },                \
 991        { SYS_DESC(SYS_DBGWVRn_EL1(n)),                                 \
 992          trap_wvr, reset_wvr, 0, 0,  get_wvr, set_wvr },               \
 993        { SYS_DESC(SYS_DBGWCRn_EL1(n)),                                 \
 994          trap_wcr, reset_wcr, 0, 0,  get_wcr, set_wcr }
 995
 996/* Macro to expand the PMEVCNTRn_EL0 register */
 997#define PMU_PMEVCNTR_EL0(n)                                             \
 998        { SYS_DESC(SYS_PMEVCNTRn_EL0(n)),                                       \
 999          access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
1000
1001/* Macro to expand the PMEVTYPERn_EL0 register */
1002#define PMU_PMEVTYPER_EL0(n)                                            \
1003        { SYS_DESC(SYS_PMEVTYPERn_EL0(n)),                                      \
1004          access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
1005
1006static bool trap_ptrauth(struct kvm_vcpu *vcpu,
1007                         struct sys_reg_params *p,
1008                         const struct sys_reg_desc *rd)
1009{
1010        kvm_arm_vcpu_ptrauth_trap(vcpu);
1011
1012        /*
1013         * Return false for both cases as we never skip the trapped
1014         * instruction:
1015         *
1016         * - Either we re-execute the same key register access instruction
1017         *   after enabling ptrauth.
1018         * - Or an UNDEF is injected as ptrauth is not supported/enabled.
1019         */
1020        return false;
1021}
1022
1023static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1024                        const struct sys_reg_desc *rd)
1025{
1026        return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN_USER | REG_HIDDEN_GUEST;
1027}
1028
1029#define __PTRAUTH_KEY(k)                                                \
1030        { SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k,           \
1031        .visibility = ptrauth_visibility}
1032
1033#define PTRAUTH_KEY(k)                                                  \
1034        __PTRAUTH_KEY(k ## KEYLO_EL1),                                  \
1035        __PTRAUTH_KEY(k ## KEYHI_EL1)
1036
1037static bool access_arch_timer(struct kvm_vcpu *vcpu,
1038                              struct sys_reg_params *p,
1039                              const struct sys_reg_desc *r)
1040{
1041        enum kvm_arch_timers tmr;
1042        enum kvm_arch_timer_regs treg;
1043        u64 reg = reg_to_encoding(r);
1044
1045        switch (reg) {
1046        case SYS_CNTP_TVAL_EL0:
1047        case SYS_AARCH32_CNTP_TVAL:
1048                tmr = TIMER_PTIMER;
1049                treg = TIMER_REG_TVAL;
1050                break;
1051        case SYS_CNTP_CTL_EL0:
1052        case SYS_AARCH32_CNTP_CTL:
1053                tmr = TIMER_PTIMER;
1054                treg = TIMER_REG_CTL;
1055                break;
1056        case SYS_CNTP_CVAL_EL0:
1057        case SYS_AARCH32_CNTP_CVAL:
1058                tmr = TIMER_PTIMER;
1059                treg = TIMER_REG_CVAL;
1060                break;
1061        default:
1062                BUG();
1063        }
1064
1065        if (p->is_write)
1066                kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1067        else
1068                p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1069
1070        return true;
1071}
1072
1073/* Read a sanitised cpufeature ID register by sys_reg_desc */
1074static u64 read_id_reg(const struct kvm_vcpu *vcpu,
1075                struct sys_reg_desc const *r, bool raz)
1076{
1077        u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
1078                         (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
1079        u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
1080
1081        if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) {
1082                val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
1083        } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
1084                val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
1085                         (0xfUL << ID_AA64ISAR1_API_SHIFT) |
1086                         (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
1087                         (0xfUL << ID_AA64ISAR1_GPI_SHIFT));
1088        }
1089
1090        return val;
1091}
1092
1093/* cpufeature ID register access trap handlers */
1094
1095static bool __access_id_reg(struct kvm_vcpu *vcpu,
1096                            struct sys_reg_params *p,
1097                            const struct sys_reg_desc *r,
1098                            bool raz)
1099{
1100        if (p->is_write)
1101                return write_to_read_only(vcpu, p, r);
1102
1103        p->regval = read_id_reg(vcpu, r, raz);
1104        return true;
1105}
1106
1107static bool access_id_reg(struct kvm_vcpu *vcpu,
1108                          struct sys_reg_params *p,
1109                          const struct sys_reg_desc *r)
1110{
1111        return __access_id_reg(vcpu, p, r, false);
1112}
1113
1114static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
1115                              struct sys_reg_params *p,
1116                              const struct sys_reg_desc *r)
1117{
1118        return __access_id_reg(vcpu, p, r, true);
1119}
1120
1121static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
1122static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
1123static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
1124
1125/* Visibility overrides for SVE-specific control registers */
1126static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1127                                   const struct sys_reg_desc *rd)
1128{
1129        if (vcpu_has_sve(vcpu))
1130                return 0;
1131
1132        return REG_HIDDEN_USER | REG_HIDDEN_GUEST;
1133}
1134
1135/* Visibility overrides for SVE-specific ID registers */
1136static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu,
1137                                      const struct sys_reg_desc *rd)
1138{
1139        if (vcpu_has_sve(vcpu))
1140                return 0;
1141
1142        return REG_HIDDEN_USER;
1143}
1144
1145/* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */
1146static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu)
1147{
1148        if (!vcpu_has_sve(vcpu))
1149                return 0;
1150
1151        return read_sanitised_ftr_reg(SYS_ID_AA64ZFR0_EL1);
1152}
1153
1154static bool access_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1155                                   struct sys_reg_params *p,
1156                                   const struct sys_reg_desc *rd)
1157{
1158        if (p->is_write)
1159                return write_to_read_only(vcpu, p, rd);
1160
1161        p->regval = guest_id_aa64zfr0_el1(vcpu);
1162        return true;
1163}
1164
1165static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1166                const struct sys_reg_desc *rd,
1167                const struct kvm_one_reg *reg, void __user *uaddr)
1168{
1169        u64 val;
1170
1171        if (WARN_ON(!vcpu_has_sve(vcpu)))
1172                return -ENOENT;
1173
1174        val = guest_id_aa64zfr0_el1(vcpu);
1175        return reg_to_user(uaddr, &val, reg->id);
1176}
1177
1178static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1179                const struct sys_reg_desc *rd,
1180                const struct kvm_one_reg *reg, void __user *uaddr)
1181{
1182        const u64 id = sys_reg_to_index(rd);
1183        int err;
1184        u64 val;
1185
1186        if (WARN_ON(!vcpu_has_sve(vcpu)))
1187                return -ENOENT;
1188
1189        err = reg_from_user(&val, uaddr, id);
1190        if (err)
1191                return err;
1192
1193        /* This is what we mean by invariant: you can't change it. */
1194        if (val != guest_id_aa64zfr0_el1(vcpu))
1195                return -EINVAL;
1196
1197        return 0;
1198}
1199
1200/*
1201 * cpufeature ID register user accessors
1202 *
1203 * For now, these registers are immutable for userspace, so no values
1204 * are stored, and for set_id_reg() we don't allow the effective value
1205 * to be changed.
1206 */
1207static int __get_id_reg(const struct kvm_vcpu *vcpu,
1208                        const struct sys_reg_desc *rd, void __user *uaddr,
1209                        bool raz)
1210{
1211        const u64 id = sys_reg_to_index(rd);
1212        const u64 val = read_id_reg(vcpu, rd, raz);
1213
1214        return reg_to_user(uaddr, &val, id);
1215}
1216
1217static int __set_id_reg(const struct kvm_vcpu *vcpu,
1218                        const struct sys_reg_desc *rd, void __user *uaddr,
1219                        bool raz)
1220{
1221        const u64 id = sys_reg_to_index(rd);
1222        int err;
1223        u64 val;
1224
1225        err = reg_from_user(&val, uaddr, id);
1226        if (err)
1227                return err;
1228
1229        /* This is what we mean by invariant: you can't change it. */
1230        if (val != read_id_reg(vcpu, rd, raz))
1231                return -EINVAL;
1232
1233        return 0;
1234}
1235
1236static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1237                      const struct kvm_one_reg *reg, void __user *uaddr)
1238{
1239        return __get_id_reg(vcpu, rd, uaddr, false);
1240}
1241
1242static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1243                      const struct kvm_one_reg *reg, void __user *uaddr)
1244{
1245        return __set_id_reg(vcpu, rd, uaddr, false);
1246}
1247
1248static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1249                          const struct kvm_one_reg *reg, void __user *uaddr)
1250{
1251        return __get_id_reg(vcpu, rd, uaddr, true);
1252}
1253
1254static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1255                          const struct kvm_one_reg *reg, void __user *uaddr)
1256{
1257        return __set_id_reg(vcpu, rd, uaddr, true);
1258}
1259
1260static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1261                       const struct sys_reg_desc *r)
1262{
1263        if (p->is_write)
1264                return write_to_read_only(vcpu, p, r);
1265
1266        p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1267        return true;
1268}
1269
1270static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1271                         const struct sys_reg_desc *r)
1272{
1273        if (p->is_write)
1274                return write_to_read_only(vcpu, p, r);
1275
1276        p->regval = read_sysreg(clidr_el1);
1277        return true;
1278}
1279
1280static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1281                          const struct sys_reg_desc *r)
1282{
1283        if (p->is_write)
1284                vcpu_write_sys_reg(vcpu, p->regval, r->reg);
1285        else
1286                p->regval = vcpu_read_sys_reg(vcpu, r->reg);
1287        return true;
1288}
1289
1290static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1291                          const struct sys_reg_desc *r)
1292{
1293        u32 csselr;
1294
1295        if (p->is_write)
1296                return write_to_read_only(vcpu, p, r);
1297
1298        csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1299        p->regval = get_ccsidr(csselr);
1300
1301        /*
1302         * Guests should not be doing cache operations by set/way at all, and
1303         * for this reason, we trap them and attempt to infer the intent, so
1304         * that we can flush the entire guest's address space at the appropriate
1305         * time.
1306         * To prevent this trapping from causing performance problems, let's
1307         * expose the geometry of all data and unified caches (which are
1308         * guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way.
1309         * [If guests should attempt to infer aliasing properties from the
1310         * geometry (which is not permitted by the architecture), they would
1311         * only do so for virtually indexed caches.]
1312         */
1313        if (!(csselr & 1)) // data or unified cache
1314                p->regval &= ~GENMASK(27, 3);
1315        return true;
1316}
1317
1318/* sys_reg_desc initialiser for known cpufeature ID registers */
1319#define ID_SANITISED(name) {                    \
1320        SYS_DESC(SYS_##name),                   \
1321        .access = access_id_reg,                \
1322        .get_user = get_id_reg,                 \
1323        .set_user = set_id_reg,                 \
1324}
1325
1326/*
1327 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1328 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1329 * (1 <= crm < 8, 0 <= Op2 < 8).
1330 */
1331#define ID_UNALLOCATED(crm, op2) {                      \
1332        Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2),     \
1333        .access = access_raz_id_reg,                    \
1334        .get_user = get_raz_id_reg,                     \
1335        .set_user = set_raz_id_reg,                     \
1336}
1337
1338/*
1339 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1340 * For now, these are exposed just like unallocated ID regs: they appear
1341 * RAZ for the guest.
1342 */
1343#define ID_HIDDEN(name) {                       \
1344        SYS_DESC(SYS_##name),                   \
1345        .access = access_raz_id_reg,            \
1346        .get_user = get_raz_id_reg,             \
1347        .set_user = set_raz_id_reg,             \
1348}
1349
1350/*
1351 * Architected system registers.
1352 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1353 *
1354 * Debug handling: We do trap most, if not all debug related system
1355 * registers. The implementation is good enough to ensure that a guest
1356 * can use these with minimal performance degradation. The drawback is
1357 * that we don't implement any of the external debug, none of the
1358 * OSlock protocol. This should be revisited if we ever encounter a
1359 * more demanding guest...
1360 */
1361static const struct sys_reg_desc sys_reg_descs[] = {
1362        { SYS_DESC(SYS_DC_ISW), access_dcsw },
1363        { SYS_DESC(SYS_DC_CSW), access_dcsw },
1364        { SYS_DESC(SYS_DC_CISW), access_dcsw },
1365
1366        DBG_BCR_BVR_WCR_WVR_EL1(0),
1367        DBG_BCR_BVR_WCR_WVR_EL1(1),
1368        { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1369        { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1370        DBG_BCR_BVR_WCR_WVR_EL1(2),
1371        DBG_BCR_BVR_WCR_WVR_EL1(3),
1372        DBG_BCR_BVR_WCR_WVR_EL1(4),
1373        DBG_BCR_BVR_WCR_WVR_EL1(5),
1374        DBG_BCR_BVR_WCR_WVR_EL1(6),
1375        DBG_BCR_BVR_WCR_WVR_EL1(7),
1376        DBG_BCR_BVR_WCR_WVR_EL1(8),
1377        DBG_BCR_BVR_WCR_WVR_EL1(9),
1378        DBG_BCR_BVR_WCR_WVR_EL1(10),
1379        DBG_BCR_BVR_WCR_WVR_EL1(11),
1380        DBG_BCR_BVR_WCR_WVR_EL1(12),
1381        DBG_BCR_BVR_WCR_WVR_EL1(13),
1382        DBG_BCR_BVR_WCR_WVR_EL1(14),
1383        DBG_BCR_BVR_WCR_WVR_EL1(15),
1384
1385        { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1386        { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
1387        { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
1388        { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1389        { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1390        { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1391        { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1392        { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1393
1394        { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1395        { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1396        // DBGDTR[TR]X_EL0 share the same encoding
1397        { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1398
1399        { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1400
1401        { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1402
1403        /*
1404         * ID regs: all ID_SANITISED() entries here must have corresponding
1405         * entries in arm64_ftr_regs[].
1406         */
1407
1408        /* AArch64 mappings of the AArch32 ID registers */
1409        /* CRm=1 */
1410        ID_SANITISED(ID_PFR0_EL1),
1411        ID_SANITISED(ID_PFR1_EL1),
1412        ID_SANITISED(ID_DFR0_EL1),
1413        ID_HIDDEN(ID_AFR0_EL1),
1414        ID_SANITISED(ID_MMFR0_EL1),
1415        ID_SANITISED(ID_MMFR1_EL1),
1416        ID_SANITISED(ID_MMFR2_EL1),
1417        ID_SANITISED(ID_MMFR3_EL1),
1418
1419        /* CRm=2 */
1420        ID_SANITISED(ID_ISAR0_EL1),
1421        ID_SANITISED(ID_ISAR1_EL1),
1422        ID_SANITISED(ID_ISAR2_EL1),
1423        ID_SANITISED(ID_ISAR3_EL1),
1424        ID_SANITISED(ID_ISAR4_EL1),
1425        ID_SANITISED(ID_ISAR5_EL1),
1426        ID_SANITISED(ID_MMFR4_EL1),
1427        ID_SANITISED(ID_ISAR6_EL1),
1428
1429        /* CRm=3 */
1430        ID_SANITISED(MVFR0_EL1),
1431        ID_SANITISED(MVFR1_EL1),
1432        ID_SANITISED(MVFR2_EL1),
1433        ID_UNALLOCATED(3,3),
1434        ID_UNALLOCATED(3,4),
1435        ID_UNALLOCATED(3,5),
1436        ID_UNALLOCATED(3,6),
1437        ID_UNALLOCATED(3,7),
1438
1439        /* AArch64 ID registers */
1440        /* CRm=4 */
1441        ID_SANITISED(ID_AA64PFR0_EL1),
1442        ID_SANITISED(ID_AA64PFR1_EL1),
1443        ID_UNALLOCATED(4,2),
1444        ID_UNALLOCATED(4,3),
1445        { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility },
1446        ID_UNALLOCATED(4,5),
1447        ID_UNALLOCATED(4,6),
1448        ID_UNALLOCATED(4,7),
1449
1450        /* CRm=5 */
1451        ID_SANITISED(ID_AA64DFR0_EL1),
1452        ID_SANITISED(ID_AA64DFR1_EL1),
1453        ID_UNALLOCATED(5,2),
1454        ID_UNALLOCATED(5,3),
1455        ID_HIDDEN(ID_AA64AFR0_EL1),
1456        ID_HIDDEN(ID_AA64AFR1_EL1),
1457        ID_UNALLOCATED(5,6),
1458        ID_UNALLOCATED(5,7),
1459
1460        /* CRm=6 */
1461        ID_SANITISED(ID_AA64ISAR0_EL1),
1462        ID_SANITISED(ID_AA64ISAR1_EL1),
1463        ID_UNALLOCATED(6,2),
1464        ID_UNALLOCATED(6,3),
1465        ID_UNALLOCATED(6,4),
1466        ID_UNALLOCATED(6,5),
1467        ID_UNALLOCATED(6,6),
1468        ID_UNALLOCATED(6,7),
1469
1470        /* CRm=7 */
1471        ID_SANITISED(ID_AA64MMFR0_EL1),
1472        ID_SANITISED(ID_AA64MMFR1_EL1),
1473        ID_SANITISED(ID_AA64MMFR2_EL1),
1474        ID_UNALLOCATED(7,3),
1475        ID_UNALLOCATED(7,4),
1476        ID_UNALLOCATED(7,5),
1477        ID_UNALLOCATED(7,6),
1478        ID_UNALLOCATED(7,7),
1479
1480        { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1481        { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1482        { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
1483        { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1484        { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1485        { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1486
1487        PTRAUTH_KEY(APIA),
1488        PTRAUTH_KEY(APIB),
1489        PTRAUTH_KEY(APDA),
1490        PTRAUTH_KEY(APDB),
1491        PTRAUTH_KEY(APGA),
1492
1493        { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1494        { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1495        { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1496
1497        { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1498        { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1499        { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1500        { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1501        { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1502        { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1503        { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1504        { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1505
1506        { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1507        { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1508
1509        { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
1510        { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
1511
1512        { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1513        { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1514
1515        { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1516        { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1517        { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1518        { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1519        { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1520
1521        { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1522        { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1523
1524        { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1525        { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1526        { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1527        { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1528        { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1529        { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1530        { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1531        { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1532        { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1533        { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1534        { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1535        { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1536
1537        { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1538        { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1539
1540        { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1541
1542        { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
1543        { SYS_DESC(SYS_CLIDR_EL1), access_clidr },
1544        { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
1545        { SYS_DESC(SYS_CTR_EL0), access_ctr },
1546
1547        { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
1548        { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
1549        { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
1550        { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
1551        { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
1552        { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
1553        { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
1554        { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
1555        { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
1556        { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
1557        { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
1558        /*
1559         * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
1560         * in 32bit mode. Here we choose to reset it as zero for consistency.
1561         */
1562        { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1563        { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
1564
1565        { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1566        { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1567
1568        { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
1569        { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
1570        { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
1571
1572        /* PMEVCNTRn_EL0 */
1573        PMU_PMEVCNTR_EL0(0),
1574        PMU_PMEVCNTR_EL0(1),
1575        PMU_PMEVCNTR_EL0(2),
1576        PMU_PMEVCNTR_EL0(3),
1577        PMU_PMEVCNTR_EL0(4),
1578        PMU_PMEVCNTR_EL0(5),
1579        PMU_PMEVCNTR_EL0(6),
1580        PMU_PMEVCNTR_EL0(7),
1581        PMU_PMEVCNTR_EL0(8),
1582        PMU_PMEVCNTR_EL0(9),
1583        PMU_PMEVCNTR_EL0(10),
1584        PMU_PMEVCNTR_EL0(11),
1585        PMU_PMEVCNTR_EL0(12),
1586        PMU_PMEVCNTR_EL0(13),
1587        PMU_PMEVCNTR_EL0(14),
1588        PMU_PMEVCNTR_EL0(15),
1589        PMU_PMEVCNTR_EL0(16),
1590        PMU_PMEVCNTR_EL0(17),
1591        PMU_PMEVCNTR_EL0(18),
1592        PMU_PMEVCNTR_EL0(19),
1593        PMU_PMEVCNTR_EL0(20),
1594        PMU_PMEVCNTR_EL0(21),
1595        PMU_PMEVCNTR_EL0(22),
1596        PMU_PMEVCNTR_EL0(23),
1597        PMU_PMEVCNTR_EL0(24),
1598        PMU_PMEVCNTR_EL0(25),
1599        PMU_PMEVCNTR_EL0(26),
1600        PMU_PMEVCNTR_EL0(27),
1601        PMU_PMEVCNTR_EL0(28),
1602        PMU_PMEVCNTR_EL0(29),
1603        PMU_PMEVCNTR_EL0(30),
1604        /* PMEVTYPERn_EL0 */
1605        PMU_PMEVTYPER_EL0(0),
1606        PMU_PMEVTYPER_EL0(1),
1607        PMU_PMEVTYPER_EL0(2),
1608        PMU_PMEVTYPER_EL0(3),
1609        PMU_PMEVTYPER_EL0(4),
1610        PMU_PMEVTYPER_EL0(5),
1611        PMU_PMEVTYPER_EL0(6),
1612        PMU_PMEVTYPER_EL0(7),
1613        PMU_PMEVTYPER_EL0(8),
1614        PMU_PMEVTYPER_EL0(9),
1615        PMU_PMEVTYPER_EL0(10),
1616        PMU_PMEVTYPER_EL0(11),
1617        PMU_PMEVTYPER_EL0(12),
1618        PMU_PMEVTYPER_EL0(13),
1619        PMU_PMEVTYPER_EL0(14),
1620        PMU_PMEVTYPER_EL0(15),
1621        PMU_PMEVTYPER_EL0(16),
1622        PMU_PMEVTYPER_EL0(17),
1623        PMU_PMEVTYPER_EL0(18),
1624        PMU_PMEVTYPER_EL0(19),
1625        PMU_PMEVTYPER_EL0(20),
1626        PMU_PMEVTYPER_EL0(21),
1627        PMU_PMEVTYPER_EL0(22),
1628        PMU_PMEVTYPER_EL0(23),
1629        PMU_PMEVTYPER_EL0(24),
1630        PMU_PMEVTYPER_EL0(25),
1631        PMU_PMEVTYPER_EL0(26),
1632        PMU_PMEVTYPER_EL0(27),
1633        PMU_PMEVTYPER_EL0(28),
1634        PMU_PMEVTYPER_EL0(29),
1635        PMU_PMEVTYPER_EL0(30),
1636        /*
1637         * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1638         * in 32bit mode. Here we choose to reset it as zero for consistency.
1639         */
1640        { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1641
1642        { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1643        { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1644        { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
1645};
1646
1647static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1648                        struct sys_reg_params *p,
1649                        const struct sys_reg_desc *r)
1650{
1651        if (p->is_write) {
1652                return ignore_write(vcpu, p);
1653        } else {
1654                u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1655                u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1656                u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1657
1658                p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1659                             (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1660                             (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1661                             | (6 << 16) | (el3 << 14) | (el3 << 12));
1662                return true;
1663        }
1664}
1665
1666static bool trap_debug32(struct kvm_vcpu *vcpu,
1667                         struct sys_reg_params *p,
1668                         const struct sys_reg_desc *r)
1669{
1670        if (p->is_write) {
1671                vcpu_cp14(vcpu, r->reg) = p->regval;
1672                vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1673        } else {
1674                p->regval = vcpu_cp14(vcpu, r->reg);
1675        }
1676
1677        return true;
1678}
1679
1680/* AArch32 debug register mappings
1681 *
1682 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1683 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1684 *
1685 * All control registers and watchpoint value registers are mapped to
1686 * the lower 32 bits of their AArch64 equivalents. We share the trap
1687 * handlers with the above AArch64 code which checks what mode the
1688 * system is in.
1689 */
1690
1691static bool trap_xvr(struct kvm_vcpu *vcpu,
1692                     struct sys_reg_params *p,
1693                     const struct sys_reg_desc *rd)
1694{
1695        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1696
1697        if (p->is_write) {
1698                u64 val = *dbg_reg;
1699
1700                val &= 0xffffffffUL;
1701                val |= p->regval << 32;
1702                *dbg_reg = val;
1703
1704                vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1705        } else {
1706                p->regval = *dbg_reg >> 32;
1707        }
1708
1709        trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1710
1711        return true;
1712}
1713
1714#define DBG_BCR_BVR_WCR_WVR(n)                                          \
1715        /* DBGBVRn */                                                   \
1716        { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n },     \
1717        /* DBGBCRn */                                                   \
1718        { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n },     \
1719        /* DBGWVRn */                                                   \
1720        { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n },     \
1721        /* DBGWCRn */                                                   \
1722        { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1723
1724#define DBGBXVR(n)                                                      \
1725        { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1726
1727/*
1728 * Trapped cp14 registers. We generally ignore most of the external
1729 * debug, on the principle that they don't really make sense to a
1730 * guest. Revisit this one day, would this principle change.
1731 */
1732static const struct sys_reg_desc cp14_regs[] = {
1733        /* DBGIDR */
1734        { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1735        /* DBGDTRRXext */
1736        { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1737
1738        DBG_BCR_BVR_WCR_WVR(0),
1739        /* DBGDSCRint */
1740        { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1741        DBG_BCR_BVR_WCR_WVR(1),
1742        /* DBGDCCINT */
1743        { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
1744        /* DBGDSCRext */
1745        { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
1746        DBG_BCR_BVR_WCR_WVR(2),
1747        /* DBGDTR[RT]Xint */
1748        { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1749        /* DBGDTR[RT]Xext */
1750        { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1751        DBG_BCR_BVR_WCR_WVR(3),
1752        DBG_BCR_BVR_WCR_WVR(4),
1753        DBG_BCR_BVR_WCR_WVR(5),
1754        /* DBGWFAR */
1755        { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1756        /* DBGOSECCR */
1757        { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1758        DBG_BCR_BVR_WCR_WVR(6),
1759        /* DBGVCR */
1760        { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
1761        DBG_BCR_BVR_WCR_WVR(7),
1762        DBG_BCR_BVR_WCR_WVR(8),
1763        DBG_BCR_BVR_WCR_WVR(9),
1764        DBG_BCR_BVR_WCR_WVR(10),
1765        DBG_BCR_BVR_WCR_WVR(11),
1766        DBG_BCR_BVR_WCR_WVR(12),
1767        DBG_BCR_BVR_WCR_WVR(13),
1768        DBG_BCR_BVR_WCR_WVR(14),
1769        DBG_BCR_BVR_WCR_WVR(15),
1770
1771        /* DBGDRAR (32bit) */
1772        { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1773
1774        DBGBXVR(0),
1775        /* DBGOSLAR */
1776        { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1777        DBGBXVR(1),
1778        /* DBGOSLSR */
1779        { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1780        DBGBXVR(2),
1781        DBGBXVR(3),
1782        /* DBGOSDLR */
1783        { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1784        DBGBXVR(4),
1785        /* DBGPRCR */
1786        { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1787        DBGBXVR(5),
1788        DBGBXVR(6),
1789        DBGBXVR(7),
1790        DBGBXVR(8),
1791        DBGBXVR(9),
1792        DBGBXVR(10),
1793        DBGBXVR(11),
1794        DBGBXVR(12),
1795        DBGBXVR(13),
1796        DBGBXVR(14),
1797        DBGBXVR(15),
1798
1799        /* DBGDSAR (32bit) */
1800        { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1801
1802        /* DBGDEVID2 */
1803        { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1804        /* DBGDEVID1 */
1805        { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1806        /* DBGDEVID */
1807        { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1808        /* DBGCLAIMSET */
1809        { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1810        /* DBGCLAIMCLR */
1811        { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1812        /* DBGAUTHSTATUS */
1813        { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1814};
1815
1816/* Trapped cp14 64bit registers */
1817static const struct sys_reg_desc cp14_64_regs[] = {
1818        /* DBGDRAR (64bit) */
1819        { Op1( 0), CRm( 1), .access = trap_raz_wi },
1820
1821        /* DBGDSAR (64bit) */
1822        { Op1( 0), CRm( 2), .access = trap_raz_wi },
1823};
1824
1825/* Macro to expand the PMEVCNTRn register */
1826#define PMU_PMEVCNTR(n)                                                 \
1827        /* PMEVCNTRn */                                                 \
1828        { Op1(0), CRn(0b1110),                                          \
1829          CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),         \
1830          access_pmu_evcntr }
1831
1832/* Macro to expand the PMEVTYPERn register */
1833#define PMU_PMEVTYPER(n)                                                \
1834        /* PMEVTYPERn */                                                \
1835        { Op1(0), CRn(0b1110),                                          \
1836          CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),         \
1837          access_pmu_evtyper }
1838
1839/*
1840 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1841 * depending on the way they are accessed (as a 32bit or a 64bit
1842 * register).
1843 */
1844static const struct sys_reg_desc cp15_regs[] = {
1845        { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
1846        { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1847        { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1848        { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1849        { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1850        { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1851        { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1852        { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1853        { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1854        { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1855        { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1856        { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1857
1858        /*
1859         * DC{C,I,CI}SW operations:
1860         */
1861        { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1862        { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1863        { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
1864
1865        /* PMU */
1866        { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
1867        { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
1868        { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
1869        { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
1870        { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
1871        { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
1872        { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
1873        { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
1874        { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
1875        { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
1876        { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
1877        { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
1878        { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
1879        { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
1880        { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
1881
1882        { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
1883        { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
1884        { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
1885        { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1886
1887        /* ICC_SRE */
1888        { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
1889
1890        { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1891
1892        /* Arch Tmers */
1893        { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
1894        { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
1895
1896        /* PMEVCNTRn */
1897        PMU_PMEVCNTR(0),
1898        PMU_PMEVCNTR(1),
1899        PMU_PMEVCNTR(2),
1900        PMU_PMEVCNTR(3),
1901        PMU_PMEVCNTR(4),
1902        PMU_PMEVCNTR(5),
1903        PMU_PMEVCNTR(6),
1904        PMU_PMEVCNTR(7),
1905        PMU_PMEVCNTR(8),
1906        PMU_PMEVCNTR(9),
1907        PMU_PMEVCNTR(10),
1908        PMU_PMEVCNTR(11),
1909        PMU_PMEVCNTR(12),
1910        PMU_PMEVCNTR(13),
1911        PMU_PMEVCNTR(14),
1912        PMU_PMEVCNTR(15),
1913        PMU_PMEVCNTR(16),
1914        PMU_PMEVCNTR(17),
1915        PMU_PMEVCNTR(18),
1916        PMU_PMEVCNTR(19),
1917        PMU_PMEVCNTR(20),
1918        PMU_PMEVCNTR(21),
1919        PMU_PMEVCNTR(22),
1920        PMU_PMEVCNTR(23),
1921        PMU_PMEVCNTR(24),
1922        PMU_PMEVCNTR(25),
1923        PMU_PMEVCNTR(26),
1924        PMU_PMEVCNTR(27),
1925        PMU_PMEVCNTR(28),
1926        PMU_PMEVCNTR(29),
1927        PMU_PMEVCNTR(30),
1928        /* PMEVTYPERn */
1929        PMU_PMEVTYPER(0),
1930        PMU_PMEVTYPER(1),
1931        PMU_PMEVTYPER(2),
1932        PMU_PMEVTYPER(3),
1933        PMU_PMEVTYPER(4),
1934        PMU_PMEVTYPER(5),
1935        PMU_PMEVTYPER(6),
1936        PMU_PMEVTYPER(7),
1937        PMU_PMEVTYPER(8),
1938        PMU_PMEVTYPER(9),
1939        PMU_PMEVTYPER(10),
1940        PMU_PMEVTYPER(11),
1941        PMU_PMEVTYPER(12),
1942        PMU_PMEVTYPER(13),
1943        PMU_PMEVTYPER(14),
1944        PMU_PMEVTYPER(15),
1945        PMU_PMEVTYPER(16),
1946        PMU_PMEVTYPER(17),
1947        PMU_PMEVTYPER(18),
1948        PMU_PMEVTYPER(19),
1949        PMU_PMEVTYPER(20),
1950        PMU_PMEVTYPER(21),
1951        PMU_PMEVTYPER(22),
1952        PMU_PMEVTYPER(23),
1953        PMU_PMEVTYPER(24),
1954        PMU_PMEVTYPER(25),
1955        PMU_PMEVTYPER(26),
1956        PMU_PMEVTYPER(27),
1957        PMU_PMEVTYPER(28),
1958        PMU_PMEVTYPER(29),
1959        PMU_PMEVTYPER(30),
1960        /* PMCCFILTR */
1961        { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
1962
1963        { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
1964        { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
1965        { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, c0_CSSELR },
1966};
1967
1968static const struct sys_reg_desc cp15_64_regs[] = {
1969        { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1970        { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
1971        { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
1972        { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
1973        { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
1974        { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
1975        { SYS_DESC(SYS_AARCH32_CNTP_CVAL),    access_arch_timer },
1976};
1977
1978/* Target specific emulation tables */
1979static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
1980
1981void kvm_register_target_sys_reg_table(unsigned int target,
1982                                       struct kvm_sys_reg_target_table *table)
1983{
1984        target_tables[target] = table;
1985}
1986
1987/* Get specific register table for this target. */
1988static const struct sys_reg_desc *get_target_table(unsigned target,
1989                                                   bool mode_is_64,
1990                                                   size_t *num)
1991{
1992        struct kvm_sys_reg_target_table *table;
1993
1994        table = target_tables[target];
1995        if (mode_is_64) {
1996                *num = table->table64.num;
1997                return table->table64.table;
1998        } else {
1999                *num = table->table32.num;
2000                return table->table32.table;
2001        }
2002}
2003
2004static int match_sys_reg(const void *key, const void *elt)
2005{
2006        const unsigned long pval = (unsigned long)key;
2007        const struct sys_reg_desc *r = elt;
2008
2009        return pval - reg_to_encoding(r);
2010}
2011
2012static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
2013                                         const struct sys_reg_desc table[],
2014                                         unsigned int num)
2015{
2016        unsigned long pval = reg_to_encoding(params);
2017
2018        return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
2019}
2020
2021int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
2022{
2023        kvm_inject_undefined(vcpu);
2024        return 1;
2025}
2026
2027static void perform_access(struct kvm_vcpu *vcpu,
2028                           struct sys_reg_params *params,
2029                           const struct sys_reg_desc *r)
2030{
2031        trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2032
2033        /* Check for regs disabled by runtime config */
2034        if (sysreg_hidden_from_guest(vcpu, r)) {
2035                kvm_inject_undefined(vcpu);
2036                return;
2037        }
2038
2039        /*
2040         * Not having an accessor means that we have configured a trap
2041         * that we don't know how to handle. This certainly qualifies
2042         * as a gross bug that should be fixed right away.
2043         */
2044        BUG_ON(!r->access);
2045
2046        /* Skip instruction if instructed so */
2047        if (likely(r->access(vcpu, params, r)))
2048                kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
2049}
2050
2051/*
2052 * emulate_cp --  tries to match a sys_reg access in a handling table, and
2053 *                call the corresponding trap handler.
2054 *
2055 * @params: pointer to the descriptor of the access
2056 * @table: array of trap descriptors
2057 * @num: size of the trap descriptor array
2058 *
2059 * Return 0 if the access has been handled, and -1 if not.
2060 */
2061static int emulate_cp(struct kvm_vcpu *vcpu,
2062                      struct sys_reg_params *params,
2063                      const struct sys_reg_desc *table,
2064                      size_t num)
2065{
2066        const struct sys_reg_desc *r;
2067
2068        if (!table)
2069                return -1;      /* Not handled */
2070
2071        r = find_reg(params, table, num);
2072
2073        if (r) {
2074                perform_access(vcpu, params, r);
2075                return 0;
2076        }
2077
2078        /* Not handled */
2079        return -1;
2080}
2081
2082static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2083                                struct sys_reg_params *params)
2084{
2085        u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
2086        int cp = -1;
2087
2088        switch(hsr_ec) {
2089        case ESR_ELx_EC_CP15_32:
2090        case ESR_ELx_EC_CP15_64:
2091                cp = 15;
2092                break;
2093        case ESR_ELx_EC_CP14_MR:
2094        case ESR_ELx_EC_CP14_64:
2095                cp = 14;
2096                break;
2097        default:
2098                WARN_ON(1);
2099        }
2100
2101        print_sys_reg_msg(params,
2102                          "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2103                          cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2104        kvm_inject_undefined(vcpu);
2105}
2106
2107/**
2108 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
2109 * @vcpu: The VCPU pointer
2110 * @run:  The kvm_run struct
2111 */
2112static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2113                            const struct sys_reg_desc *global,
2114                            size_t nr_global,
2115                            const struct sys_reg_desc *target_specific,
2116                            size_t nr_specific)
2117{
2118        struct sys_reg_params params;
2119        u32 hsr = kvm_vcpu_get_hsr(vcpu);
2120        int Rt = kvm_vcpu_sys_get_rt(vcpu);
2121        int Rt2 = (hsr >> 10) & 0x1f;
2122
2123        params.is_aarch32 = true;
2124        params.is_32bit = false;
2125        params.CRm = (hsr >> 1) & 0xf;
2126        params.is_write = ((hsr & 1) == 0);
2127
2128        params.Op0 = 0;
2129        params.Op1 = (hsr >> 16) & 0xf;
2130        params.Op2 = 0;
2131        params.CRn = 0;
2132
2133        /*
2134         * Make a 64-bit value out of Rt and Rt2. As we use the same trap
2135         * backends between AArch32 and AArch64, we get away with it.
2136         */
2137        if (params.is_write) {
2138                params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
2139                params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
2140        }
2141
2142        /*
2143         * Try to emulate the coprocessor access using the target
2144         * specific table first, and using the global table afterwards.
2145         * If either of the tables contains a handler, handle the
2146         * potential register operation in the case of a read and return
2147         * with success.
2148         */
2149        if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
2150            !emulate_cp(vcpu, &params, global, nr_global)) {
2151                /* Split up the value between registers for the read side */
2152                if (!params.is_write) {
2153                        vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
2154                        vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
2155                }
2156
2157                return 1;
2158        }
2159
2160        unhandled_cp_access(vcpu, &params);
2161        return 1;
2162}
2163
2164/**
2165 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
2166 * @vcpu: The VCPU pointer
2167 * @run:  The kvm_run struct
2168 */
2169static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2170                            const struct sys_reg_desc *global,
2171                            size_t nr_global,
2172                            const struct sys_reg_desc *target_specific,
2173                            size_t nr_specific)
2174{
2175        struct sys_reg_params params;
2176        u32 hsr = kvm_vcpu_get_hsr(vcpu);
2177        int Rt  = kvm_vcpu_sys_get_rt(vcpu);
2178
2179        params.is_aarch32 = true;
2180        params.is_32bit = true;
2181        params.CRm = (hsr >> 1) & 0xf;
2182        params.regval = vcpu_get_reg(vcpu, Rt);
2183        params.is_write = ((hsr & 1) == 0);
2184        params.CRn = (hsr >> 10) & 0xf;
2185        params.Op0 = 0;
2186        params.Op1 = (hsr >> 14) & 0x7;
2187        params.Op2 = (hsr >> 17) & 0x7;
2188
2189        if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
2190            !emulate_cp(vcpu, &params, global, nr_global)) {
2191                if (!params.is_write)
2192                        vcpu_set_reg(vcpu, Rt, params.regval);
2193                return 1;
2194        }
2195
2196        unhandled_cp_access(vcpu, &params);
2197        return 1;
2198}
2199
2200int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2201{
2202        const struct sys_reg_desc *target_specific;
2203        size_t num;
2204
2205        target_specific = get_target_table(vcpu->arch.target, false, &num);
2206        return kvm_handle_cp_64(vcpu,
2207                                cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
2208                                target_specific, num);
2209}
2210
2211int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2212{
2213        const struct sys_reg_desc *target_specific;
2214        size_t num;
2215
2216        target_specific = get_target_table(vcpu->arch.target, false, &num);
2217        return kvm_handle_cp_32(vcpu,
2218                                cp15_regs, ARRAY_SIZE(cp15_regs),
2219                                target_specific, num);
2220}
2221
2222int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2223{
2224        return kvm_handle_cp_64(vcpu,
2225                                cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
2226                                NULL, 0);
2227}
2228
2229int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2230{
2231        return kvm_handle_cp_32(vcpu,
2232                                cp14_regs, ARRAY_SIZE(cp14_regs),
2233                                NULL, 0);
2234}
2235
2236static bool is_imp_def_sys_reg(struct sys_reg_params *params)
2237{
2238        // See ARM DDI 0487E.a, section D12.3.2
2239        return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
2240}
2241
2242static int emulate_sys_reg(struct kvm_vcpu *vcpu,
2243                           struct sys_reg_params *params)
2244{
2245        size_t num;
2246        const struct sys_reg_desc *table, *r;
2247
2248        table = get_target_table(vcpu->arch.target, true, &num);
2249
2250        /* Search target-specific then generic table. */
2251        r = find_reg(params, table, num);
2252        if (!r)
2253                r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2254
2255        if (likely(r)) {
2256                perform_access(vcpu, params, r);
2257        } else if (is_imp_def_sys_reg(params)) {
2258                kvm_inject_undefined(vcpu);
2259        } else {
2260                print_sys_reg_msg(params,
2261                                  "Unsupported guest sys_reg access at: %lx [%08lx]\n",
2262                                  *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2263                kvm_inject_undefined(vcpu);
2264        }
2265        return 1;
2266}
2267
2268static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
2269                                const struct sys_reg_desc *table, size_t num,
2270                                unsigned long *bmap)
2271{
2272        unsigned long i;
2273
2274        for (i = 0; i < num; i++)
2275                if (table[i].reset) {
2276                        int reg = table[i].reg;
2277
2278                        table[i].reset(vcpu, &table[i]);
2279                        if (reg > 0 && reg < NR_SYS_REGS)
2280                                set_bit(reg, bmap);
2281                }
2282}
2283
2284/**
2285 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
2286 * @vcpu: The VCPU pointer
2287 * @run:  The kvm_run struct
2288 */
2289int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
2290{
2291        struct sys_reg_params params;
2292        unsigned long esr = kvm_vcpu_get_hsr(vcpu);
2293        int Rt = kvm_vcpu_sys_get_rt(vcpu);
2294        int ret;
2295
2296        trace_kvm_handle_sys_reg(esr);
2297
2298        params.is_aarch32 = false;
2299        params.is_32bit = false;
2300        params.Op0 = (esr >> 20) & 3;
2301        params.Op1 = (esr >> 14) & 0x7;
2302        params.CRn = (esr >> 10) & 0xf;
2303        params.CRm = (esr >> 1) & 0xf;
2304        params.Op2 = (esr >> 17) & 0x7;
2305        params.regval = vcpu_get_reg(vcpu, Rt);
2306        params.is_write = !(esr & 1);
2307
2308        ret = emulate_sys_reg(vcpu, &params);
2309
2310        if (!params.is_write)
2311                vcpu_set_reg(vcpu, Rt, params.regval);
2312        return ret;
2313}
2314
2315/******************************************************************************
2316 * Userspace API
2317 *****************************************************************************/
2318
2319static bool index_to_params(u64 id, struct sys_reg_params *params)
2320{
2321        switch (id & KVM_REG_SIZE_MASK) {
2322        case KVM_REG_SIZE_U64:
2323                /* Any unused index bits means it's not valid. */
2324                if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2325                              | KVM_REG_ARM_COPROC_MASK
2326                              | KVM_REG_ARM64_SYSREG_OP0_MASK
2327                              | KVM_REG_ARM64_SYSREG_OP1_MASK
2328                              | KVM_REG_ARM64_SYSREG_CRN_MASK
2329                              | KVM_REG_ARM64_SYSREG_CRM_MASK
2330                              | KVM_REG_ARM64_SYSREG_OP2_MASK))
2331                        return false;
2332                params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2333                               >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2334                params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2335                               >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2336                params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2337                               >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2338                params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2339                               >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2340                params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2341                               >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2342                return true;
2343        default:
2344                return false;
2345        }
2346}
2347
2348const struct sys_reg_desc *find_reg_by_id(u64 id,
2349                                          struct sys_reg_params *params,
2350                                          const struct sys_reg_desc table[],
2351                                          unsigned int num)
2352{
2353        if (!index_to_params(id, params))
2354                return NULL;
2355
2356        return find_reg(params, table, num);
2357}
2358
2359/* Decode an index value, and find the sys_reg_desc entry. */
2360static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
2361                                                    u64 id)
2362{
2363        size_t num;
2364        const struct sys_reg_desc *table, *r;
2365        struct sys_reg_params params;
2366
2367        /* We only do sys_reg for now. */
2368        if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2369                return NULL;
2370
2371        if (!index_to_params(id, &params))
2372                return NULL;
2373
2374        table = get_target_table(vcpu->arch.target, true, &num);
2375        r = find_reg(&params, table, num);
2376        if (!r)
2377                r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2378
2379        /* Not saved in the sys_reg array and not otherwise accessible? */
2380        if (r && !(r->reg || r->get_user))
2381                r = NULL;
2382
2383        return r;
2384}
2385
2386/*
2387 * These are the invariant sys_reg registers: we let the guest see the
2388 * host versions of these, so they're part of the guest state.
2389 *
2390 * A future CPU may provide a mechanism to present different values to
2391 * the guest, or a future kvm may trap them.
2392 */
2393
2394#define FUNCTION_INVARIANT(reg)                                         \
2395        static void get_##reg(struct kvm_vcpu *v,                       \
2396                              const struct sys_reg_desc *r)             \
2397        {                                                               \
2398                ((struct sys_reg_desc *)r)->val = read_sysreg(reg);     \
2399        }
2400
2401FUNCTION_INVARIANT(midr_el1)
2402FUNCTION_INVARIANT(revidr_el1)
2403FUNCTION_INVARIANT(clidr_el1)
2404FUNCTION_INVARIANT(aidr_el1)
2405
2406static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
2407{
2408        ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
2409}
2410
2411/* ->val is filled in by kvm_sys_reg_table_init() */
2412static struct sys_reg_desc invariant_sys_regs[] = {
2413        { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2414        { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2415        { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2416        { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2417        { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2418};
2419
2420static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
2421{
2422        if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
2423                return -EFAULT;
2424        return 0;
2425}
2426
2427static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
2428{
2429        if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
2430                return -EFAULT;
2431        return 0;
2432}
2433
2434static int get_invariant_sys_reg(u64 id, void __user *uaddr)
2435{
2436        struct sys_reg_params params;
2437        const struct sys_reg_desc *r;
2438
2439        r = find_reg_by_id(id, &params, invariant_sys_regs,
2440                           ARRAY_SIZE(invariant_sys_regs));
2441        if (!r)
2442                return -ENOENT;
2443
2444        return reg_to_user(uaddr, &r->val, id);
2445}
2446
2447static int set_invariant_sys_reg(u64 id, void __user *uaddr)
2448{
2449        struct sys_reg_params params;
2450        const struct sys_reg_desc *r;
2451        int err;
2452        u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
2453
2454        r = find_reg_by_id(id, &params, invariant_sys_regs,
2455                           ARRAY_SIZE(invariant_sys_regs));
2456        if (!r)
2457                return -ENOENT;
2458
2459        err = reg_from_user(&val, uaddr, id);
2460        if (err)
2461                return err;
2462
2463        /* This is what we mean by invariant: you can't change it. */
2464        if (r->val != val)
2465                return -EINVAL;
2466
2467        return 0;
2468}
2469
2470static bool is_valid_cache(u32 val)
2471{
2472        u32 level, ctype;
2473
2474        if (val >= CSSELR_MAX)
2475                return false;
2476
2477        /* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
2478        level = (val >> 1);
2479        ctype = (cache_levels >> (level * 3)) & 7;
2480
2481        switch (ctype) {
2482        case 0: /* No cache */
2483                return false;
2484        case 1: /* Instruction cache only */
2485                return (val & 1);
2486        case 2: /* Data cache only */
2487        case 4: /* Unified cache */
2488                return !(val & 1);
2489        case 3: /* Separate instruction and data caches */
2490                return true;
2491        default: /* Reserved: we can't know instruction or data. */
2492                return false;
2493        }
2494}
2495
2496static int demux_c15_get(u64 id, void __user *uaddr)
2497{
2498        u32 val;
2499        u32 __user *uval = uaddr;
2500
2501        /* Fail if we have unknown bits set. */
2502        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2503                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2504                return -ENOENT;
2505
2506        switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2507        case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2508                if (KVM_REG_SIZE(id) != 4)
2509                        return -ENOENT;
2510                val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2511                        >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2512                if (!is_valid_cache(val))
2513                        return -ENOENT;
2514
2515                return put_user(get_ccsidr(val), uval);
2516        default:
2517                return -ENOENT;
2518        }
2519}
2520
2521static int demux_c15_set(u64 id, void __user *uaddr)
2522{
2523        u32 val, newval;
2524        u32 __user *uval = uaddr;
2525
2526        /* Fail if we have unknown bits set. */
2527        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2528                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2529                return -ENOENT;
2530
2531        switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2532        case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2533                if (KVM_REG_SIZE(id) != 4)
2534                        return -ENOENT;
2535                val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2536                        >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2537                if (!is_valid_cache(val))
2538                        return -ENOENT;
2539
2540                if (get_user(newval, uval))
2541                        return -EFAULT;
2542
2543                /* This is also invariant: you can't change it. */
2544                if (newval != get_ccsidr(val))
2545                        return -EINVAL;
2546                return 0;
2547        default:
2548                return -ENOENT;
2549        }
2550}
2551
2552int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2553{
2554        const struct sys_reg_desc *r;
2555        void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2556
2557        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2558                return demux_c15_get(reg->id, uaddr);
2559
2560        if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2561                return -ENOENT;
2562
2563        r = index_to_sys_reg_desc(vcpu, reg->id);
2564        if (!r)
2565                return get_invariant_sys_reg(reg->id, uaddr);
2566
2567        /* Check for regs disabled by runtime config */
2568        if (sysreg_hidden_from_user(vcpu, r))
2569                return -ENOENT;
2570
2571        if (r->get_user)
2572                return (r->get_user)(vcpu, r, reg, uaddr);
2573
2574        return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
2575}
2576
2577int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2578{
2579        const struct sys_reg_desc *r;
2580        void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2581
2582        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2583                return demux_c15_set(reg->id, uaddr);
2584
2585        if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2586                return -ENOENT;
2587
2588        r = index_to_sys_reg_desc(vcpu, reg->id);
2589        if (!r)
2590                return set_invariant_sys_reg(reg->id, uaddr);
2591
2592        /* Check for regs disabled by runtime config */
2593        if (sysreg_hidden_from_user(vcpu, r))
2594                return -ENOENT;
2595
2596        if (r->set_user)
2597                return (r->set_user)(vcpu, r, reg, uaddr);
2598
2599        return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2600}
2601
2602static unsigned int num_demux_regs(void)
2603{
2604        unsigned int i, count = 0;
2605
2606        for (i = 0; i < CSSELR_MAX; i++)
2607                if (is_valid_cache(i))
2608                        count++;
2609
2610        return count;
2611}
2612
2613static int write_demux_regids(u64 __user *uindices)
2614{
2615        u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2616        unsigned int i;
2617
2618        val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2619        for (i = 0; i < CSSELR_MAX; i++) {
2620                if (!is_valid_cache(i))
2621                        continue;
2622                if (put_user(val | i, uindices))
2623                        return -EFAULT;
2624                uindices++;
2625        }
2626        return 0;
2627}
2628
2629static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2630{
2631        return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2632                KVM_REG_ARM64_SYSREG |
2633                (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2634                (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2635                (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2636                (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2637                (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2638}
2639
2640static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2641{
2642        if (!*uind)
2643                return true;
2644
2645        if (put_user(sys_reg_to_index(reg), *uind))
2646                return false;
2647
2648        (*uind)++;
2649        return true;
2650}
2651
2652static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
2653                            const struct sys_reg_desc *rd,
2654                            u64 __user **uind,
2655                            unsigned int *total)
2656{
2657        /*
2658         * Ignore registers we trap but don't save,
2659         * and for which no custom user accessor is provided.
2660         */
2661        if (!(rd->reg || rd->get_user))
2662                return 0;
2663
2664        if (sysreg_hidden_from_user(vcpu, rd))
2665                return 0;
2666
2667        if (!copy_reg_to_user(rd, uind))
2668                return -EFAULT;
2669
2670        (*total)++;
2671        return 0;
2672}
2673
2674/* Assumed ordered tables, see kvm_sys_reg_table_init. */
2675static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2676{
2677        const struct sys_reg_desc *i1, *i2, *end1, *end2;
2678        unsigned int total = 0;
2679        size_t num;
2680        int err;
2681
2682        /* We check for duplicates here, to allow arch-specific overrides. */
2683        i1 = get_target_table(vcpu->arch.target, true, &num);
2684        end1 = i1 + num;
2685        i2 = sys_reg_descs;
2686        end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2687
2688        BUG_ON(i1 == end1 || i2 == end2);
2689
2690        /* Walk carefully, as both tables may refer to the same register. */
2691        while (i1 || i2) {
2692                int cmp = cmp_sys_reg(i1, i2);
2693                /* target-specific overrides generic entry. */
2694                if (cmp <= 0)
2695                        err = walk_one_sys_reg(vcpu, i1, &uind, &total);
2696                else
2697                        err = walk_one_sys_reg(vcpu, i2, &uind, &total);
2698
2699                if (err)
2700                        return err;
2701
2702                if (cmp <= 0 && ++i1 == end1)
2703                        i1 = NULL;
2704                if (cmp >= 0 && ++i2 == end2)
2705                        i2 = NULL;
2706        }
2707        return total;
2708}
2709
2710unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2711{
2712        return ARRAY_SIZE(invariant_sys_regs)
2713                + num_demux_regs()
2714                + walk_sys_regs(vcpu, (u64 __user *)NULL);
2715}
2716
2717int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2718{
2719        unsigned int i;
2720        int err;
2721
2722        /* Then give them all the invariant registers' indices. */
2723        for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2724                if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2725                        return -EFAULT;
2726                uindices++;
2727        }
2728
2729        err = walk_sys_regs(vcpu, uindices);
2730        if (err < 0)
2731                return err;
2732        uindices += err;
2733
2734        return write_demux_regids(uindices);
2735}
2736
2737static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
2738{
2739        unsigned int i;
2740
2741        for (i = 1; i < n; i++) {
2742                if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2743                        kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2744                        return 1;
2745                }
2746        }
2747
2748        return 0;
2749}
2750
2751void kvm_sys_reg_table_init(void)
2752{
2753        unsigned int i;
2754        struct sys_reg_desc clidr;
2755
2756        /* Make sure tables are unique and in order. */
2757        BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
2758        BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
2759        BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
2760        BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
2761        BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
2762        BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
2763
2764        /* We abuse the reset function to overwrite the table itself. */
2765        for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2766                invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2767
2768        /*
2769         * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
2770         *
2771         *   If software reads the Cache Type fields from Ctype1
2772         *   upwards, once it has seen a value of 0b000, no caches
2773         *   exist at further-out levels of the hierarchy. So, for
2774         *   example, if Ctype3 is the first Cache Type field with a
2775         *   value of 0b000, the values of Ctype4 to Ctype7 must be
2776         *   ignored.
2777         */
2778        get_clidr_el1(NULL, &clidr); /* Ugly... */
2779        cache_levels = clidr.val;
2780        for (i = 0; i < 7; i++)
2781                if (((cache_levels >> (i*3)) & 7) == 0)
2782                        break;
2783        /* Clear all higher bits. */
2784        cache_levels &= (1 << (i*3))-1;
2785}
2786
2787/**
2788 * kvm_reset_sys_regs - sets system registers to reset value
2789 * @vcpu: The VCPU pointer
2790 *
2791 * This function finds the right table above and sets the registers on the
2792 * virtual CPU struct to their architecturally defined reset values.
2793 */
2794void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2795{
2796        size_t num;
2797        const struct sys_reg_desc *table;
2798        DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
2799
2800        /* Generic chip reset first (so target could override). */
2801        reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
2802
2803        table = get_target_table(vcpu->arch.target, true, &num);
2804        reset_sys_reg_descs(vcpu, table, num, bmap);
2805
2806        for (num = 1; num < NR_SYS_REGS; num++) {
2807                if (WARN(!test_bit(num, bmap),
2808                         "Didn't reset __vcpu_sys_reg(%zi)\n", num))
2809                        break;
2810        }
2811}
2812