linux/arch/arm64/kvm/sys_regs.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012,2013 - ARM Ltd
   3 * Author: Marc Zyngier <marc.zyngier@arm.com>
   4 *
   5 * Derived from arch/arm/kvm/coproc.c:
   6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   7 * Authors: Rusty Russell <rusty@rustcorp.com.au>
   8 *          Christoffer Dall <c.dall@virtualopensystems.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License, version 2, as
  12 * published by the Free Software Foundation.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  21 */
  22
  23#include <linux/bsearch.h>
  24#include <linux/kvm_host.h>
  25#include <linux/mm.h>
  26#include <linux/uaccess.h>
  27
  28#include <asm/cacheflush.h>
  29#include <asm/cputype.h>
  30#include <asm/debug-monitors.h>
  31#include <asm/esr.h>
  32#include <asm/kvm_arm.h>
  33#include <asm/kvm_asm.h>
  34#include <asm/kvm_coproc.h>
  35#include <asm/kvm_emulate.h>
  36#include <asm/kvm_host.h>
  37#include <asm/kvm_mmu.h>
  38#include <asm/perf_event.h>
  39
  40#include <trace/events/kvm.h>
  41
  42#include "sys_regs.h"
  43
  44#include "trace.h"
  45
  46/*
  47 * All of this file is extremly similar to the ARM coproc.c, but the
  48 * types are different. My gut feeling is that it should be pretty
  49 * easy to merge, but that would be an ABI breakage -- again. VFP
  50 * would also need to be abstracted.
  51 *
  52 * For AArch32, we only take care of what is being trapped. Anything
  53 * that has to do with init and userspace access has to go via the
  54 * 64bit interface.
  55 */
  56
  57/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  58static u32 cache_levels;
  59
  60/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  61#define CSSELR_MAX 12
  62
  63/* Which cache CCSIDR represents depends on CSSELR value. */
  64static u32 get_ccsidr(u32 csselr)
  65{
  66        u32 ccsidr;
  67
  68        /* Make sure noone else changes CSSELR during this! */
  69        local_irq_disable();
  70        /* Put value into CSSELR */
  71        asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
  72        isb();
  73        /* Read result out of CCSIDR */
  74        asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
  75        local_irq_enable();
  76
  77        return ccsidr;
  78}
  79
  80/*
  81 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
  82 */
  83static bool access_dcsw(struct kvm_vcpu *vcpu,
  84                        struct sys_reg_params *p,
  85                        const struct sys_reg_desc *r)
  86{
  87        if (!p->is_write)
  88                return read_from_write_only(vcpu, p);
  89
  90        kvm_set_way_flush(vcpu);
  91        return true;
  92}
  93
  94/*
  95 * Generic accessor for VM registers. Only called as long as HCR_TVM
  96 * is set. If the guest enables the MMU, we stop trapping the VM
  97 * sys_regs and leave it in complete control of the caches.
  98 */
  99static bool access_vm_reg(struct kvm_vcpu *vcpu,
 100                          struct sys_reg_params *p,
 101                          const struct sys_reg_desc *r)
 102{
 103        bool was_enabled = vcpu_has_cache_enabled(vcpu);
 104
 105        BUG_ON(!p->is_write);
 106
 107        if (!p->is_aarch32) {
 108                vcpu_sys_reg(vcpu, r->reg) = p->regval;
 109        } else {
 110                if (!p->is_32bit)
 111                        vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
 112                vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
 113        }
 114
 115        kvm_toggle_cache(vcpu, was_enabled);
 116        return true;
 117}
 118
 119/*
 120 * Trap handler for the GICv3 SGI generation system register.
 121 * Forward the request to the VGIC emulation.
 122 * The cp15_64 code makes sure this automatically works
 123 * for both AArch64 and AArch32 accesses.
 124 */
 125static bool access_gic_sgi(struct kvm_vcpu *vcpu,
 126                           struct sys_reg_params *p,
 127                           const struct sys_reg_desc *r)
 128{
 129        if (!p->is_write)
 130                return read_from_write_only(vcpu, p);
 131
 132        vgic_v3_dispatch_sgi(vcpu, p->regval);
 133
 134        return true;
 135}
 136
 137static bool trap_raz_wi(struct kvm_vcpu *vcpu,
 138                        struct sys_reg_params *p,
 139                        const struct sys_reg_desc *r)
 140{
 141        if (p->is_write)
 142                return ignore_write(vcpu, p);
 143        else
 144                return read_zero(vcpu, p);
 145}
 146
 147static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
 148                           struct sys_reg_params *p,
 149                           const struct sys_reg_desc *r)
 150{
 151        if (p->is_write) {
 152                return ignore_write(vcpu, p);
 153        } else {
 154                p->regval = (1 << 3);
 155                return true;
 156        }
 157}
 158
 159static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
 160                                   struct sys_reg_params *p,
 161                                   const struct sys_reg_desc *r)
 162{
 163        if (p->is_write) {
 164                return ignore_write(vcpu, p);
 165        } else {
 166                u32 val;
 167                asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
 168                p->regval = val;
 169                return true;
 170        }
 171}
 172
 173/*
 174 * We want to avoid world-switching all the DBG registers all the
 175 * time:
 176 * 
 177 * - If we've touched any debug register, it is likely that we're
 178 *   going to touch more of them. It then makes sense to disable the
 179 *   traps and start doing the save/restore dance
 180 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
 181 *   then mandatory to save/restore the registers, as the guest
 182 *   depends on them.
 183 * 
 184 * For this, we use a DIRTY bit, indicating the guest has modified the
 185 * debug registers, used as follow:
 186 *
 187 * On guest entry:
 188 * - If the dirty bit is set (because we're coming back from trapping),
 189 *   disable the traps, save host registers, restore guest registers.
 190 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
 191 *   set the dirty bit, disable the traps, save host registers,
 192 *   restore guest registers.
 193 * - Otherwise, enable the traps
 194 *
 195 * On guest exit:
 196 * - If the dirty bit is set, save guest registers, restore host
 197 *   registers and clear the dirty bit. This ensure that the host can
 198 *   now use the debug registers.
 199 */
 200static bool trap_debug_regs(struct kvm_vcpu *vcpu,
 201                            struct sys_reg_params *p,
 202                            const struct sys_reg_desc *r)
 203{
 204        if (p->is_write) {
 205                vcpu_sys_reg(vcpu, r->reg) = p->regval;
 206                vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
 207        } else {
 208                p->regval = vcpu_sys_reg(vcpu, r->reg);
 209        }
 210
 211        trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
 212
 213        return true;
 214}
 215
 216/*
 217 * reg_to_dbg/dbg_to_reg
 218 *
 219 * A 32 bit write to a debug register leave top bits alone
 220 * A 32 bit read from a debug register only returns the bottom bits
 221 *
 222 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
 223 * hyp.S code switches between host and guest values in future.
 224 */
 225static void reg_to_dbg(struct kvm_vcpu *vcpu,
 226                       struct sys_reg_params *p,
 227                       u64 *dbg_reg)
 228{
 229        u64 val = p->regval;
 230
 231        if (p->is_32bit) {
 232                val &= 0xffffffffUL;
 233                val |= ((*dbg_reg >> 32) << 32);
 234        }
 235
 236        *dbg_reg = val;
 237        vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
 238}
 239
 240static void dbg_to_reg(struct kvm_vcpu *vcpu,
 241                       struct sys_reg_params *p,
 242                       u64 *dbg_reg)
 243{
 244        p->regval = *dbg_reg;
 245        if (p->is_32bit)
 246                p->regval &= 0xffffffffUL;
 247}
 248
 249static bool trap_bvr(struct kvm_vcpu *vcpu,
 250                     struct sys_reg_params *p,
 251                     const struct sys_reg_desc *rd)
 252{
 253        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
 254
 255        if (p->is_write)
 256                reg_to_dbg(vcpu, p, dbg_reg);
 257        else
 258                dbg_to_reg(vcpu, p, dbg_reg);
 259
 260        trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
 261
 262        return true;
 263}
 264
 265static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 266                const struct kvm_one_reg *reg, void __user *uaddr)
 267{
 268        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
 269
 270        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
 271                return -EFAULT;
 272        return 0;
 273}
 274
 275static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 276        const struct kvm_one_reg *reg, void __user *uaddr)
 277{
 278        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
 279
 280        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
 281                return -EFAULT;
 282        return 0;
 283}
 284
 285static void reset_bvr(struct kvm_vcpu *vcpu,
 286                      const struct sys_reg_desc *rd)
 287{
 288        vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
 289}
 290
 291static bool trap_bcr(struct kvm_vcpu *vcpu,
 292                     struct sys_reg_params *p,
 293                     const struct sys_reg_desc *rd)
 294{
 295        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
 296
 297        if (p->is_write)
 298                reg_to_dbg(vcpu, p, dbg_reg);
 299        else
 300                dbg_to_reg(vcpu, p, dbg_reg);
 301
 302        trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
 303
 304        return true;
 305}
 306
 307static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 308                const struct kvm_one_reg *reg, void __user *uaddr)
 309{
 310        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
 311
 312        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
 313                return -EFAULT;
 314
 315        return 0;
 316}
 317
 318static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 319        const struct kvm_one_reg *reg, void __user *uaddr)
 320{
 321        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
 322
 323        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
 324                return -EFAULT;
 325        return 0;
 326}
 327
 328static void reset_bcr(struct kvm_vcpu *vcpu,
 329                      const struct sys_reg_desc *rd)
 330{
 331        vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
 332}
 333
 334static bool trap_wvr(struct kvm_vcpu *vcpu,
 335                     struct sys_reg_params *p,
 336                     const struct sys_reg_desc *rd)
 337{
 338        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
 339
 340        if (p->is_write)
 341                reg_to_dbg(vcpu, p, dbg_reg);
 342        else
 343                dbg_to_reg(vcpu, p, dbg_reg);
 344
 345        trace_trap_reg(__func__, rd->reg, p->is_write,
 346                vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
 347
 348        return true;
 349}
 350
 351static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 352                const struct kvm_one_reg *reg, void __user *uaddr)
 353{
 354        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
 355
 356        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
 357                return -EFAULT;
 358        return 0;
 359}
 360
 361static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 362        const struct kvm_one_reg *reg, void __user *uaddr)
 363{
 364        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
 365
 366        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
 367                return -EFAULT;
 368        return 0;
 369}
 370
 371static void reset_wvr(struct kvm_vcpu *vcpu,
 372                      const struct sys_reg_desc *rd)
 373{
 374        vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
 375}
 376
 377static bool trap_wcr(struct kvm_vcpu *vcpu,
 378                     struct sys_reg_params *p,
 379                     const struct sys_reg_desc *rd)
 380{
 381        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
 382
 383        if (p->is_write)
 384                reg_to_dbg(vcpu, p, dbg_reg);
 385        else
 386                dbg_to_reg(vcpu, p, dbg_reg);
 387
 388        trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
 389
 390        return true;
 391}
 392
 393static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 394                const struct kvm_one_reg *reg, void __user *uaddr)
 395{
 396        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
 397
 398        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
 399                return -EFAULT;
 400        return 0;
 401}
 402
 403static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 404        const struct kvm_one_reg *reg, void __user *uaddr)
 405{
 406        __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
 407
 408        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
 409                return -EFAULT;
 410        return 0;
 411}
 412
 413static void reset_wcr(struct kvm_vcpu *vcpu,
 414                      const struct sys_reg_desc *rd)
 415{
 416        vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
 417}
 418
 419static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 420{
 421        u64 amair;
 422
 423        asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
 424        vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
 425}
 426
 427static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 428{
 429        u64 mpidr;
 430
 431        /*
 432         * Map the vcpu_id into the first three affinity level fields of
 433         * the MPIDR. We limit the number of VCPUs in level 0 due to a
 434         * limitation to 16 CPUs in that level in the ICC_SGIxR registers
 435         * of the GICv3 to be able to address each CPU directly when
 436         * sending IPIs.
 437         */
 438        mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
 439        mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
 440        mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
 441        vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
 442}
 443
 444static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 445{
 446        u64 pmcr, val;
 447
 448        asm volatile("mrs %0, pmcr_el0\n" : "=r" (pmcr));
 449        /* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) is reset to UNKNOWN
 450         * except PMCR.E resetting to zero.
 451         */
 452        val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
 453               | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
 454        vcpu_sys_reg(vcpu, PMCR_EL0) = val;
 455}
 456
 457static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
 458{
 459        u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
 460
 461        return !((reg & ARMV8_PMU_USERENR_EN) || vcpu_mode_priv(vcpu));
 462}
 463
 464static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
 465{
 466        u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
 467
 468        return !((reg & (ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN))
 469                 || vcpu_mode_priv(vcpu));
 470}
 471
 472static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
 473{
 474        u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
 475
 476        return !((reg & (ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN))
 477                 || vcpu_mode_priv(vcpu));
 478}
 479
 480static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
 481{
 482        u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
 483
 484        return !((reg & (ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN))
 485                 || vcpu_mode_priv(vcpu));
 486}
 487
 488static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 489                        const struct sys_reg_desc *r)
 490{
 491        u64 val;
 492
 493        if (!kvm_arm_pmu_v3_ready(vcpu))
 494                return trap_raz_wi(vcpu, p, r);
 495
 496        if (pmu_access_el0_disabled(vcpu))
 497                return false;
 498
 499        if (p->is_write) {
 500                /* Only update writeable bits of PMCR */
 501                val = vcpu_sys_reg(vcpu, PMCR_EL0);
 502                val &= ~ARMV8_PMU_PMCR_MASK;
 503                val |= p->regval & ARMV8_PMU_PMCR_MASK;
 504                vcpu_sys_reg(vcpu, PMCR_EL0) = val;
 505                kvm_pmu_handle_pmcr(vcpu, val);
 506        } else {
 507                /* PMCR.P & PMCR.C are RAZ */
 508                val = vcpu_sys_reg(vcpu, PMCR_EL0)
 509                      & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
 510                p->regval = val;
 511        }
 512
 513        return true;
 514}
 515
 516static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 517                          const struct sys_reg_desc *r)
 518{
 519        if (!kvm_arm_pmu_v3_ready(vcpu))
 520                return trap_raz_wi(vcpu, p, r);
 521
 522        if (pmu_access_event_counter_el0_disabled(vcpu))
 523                return false;
 524
 525        if (p->is_write)
 526                vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
 527        else
 528                /* return PMSELR.SEL field */
 529                p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0)
 530                            & ARMV8_PMU_COUNTER_MASK;
 531
 532        return true;
 533}
 534
 535static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 536                          const struct sys_reg_desc *r)
 537{
 538        u64 pmceid;
 539
 540        if (!kvm_arm_pmu_v3_ready(vcpu))
 541                return trap_raz_wi(vcpu, p, r);
 542
 543        BUG_ON(p->is_write);
 544
 545        if (pmu_access_el0_disabled(vcpu))
 546                return false;
 547
 548        if (!(p->Op2 & 1))
 549                asm volatile("mrs %0, pmceid0_el0\n" : "=r" (pmceid));
 550        else
 551                asm volatile("mrs %0, pmceid1_el0\n" : "=r" (pmceid));
 552
 553        p->regval = pmceid;
 554
 555        return true;
 556}
 557
 558static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
 559{
 560        u64 pmcr, val;
 561
 562        pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
 563        val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
 564        if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX)
 565                return false;
 566
 567        return true;
 568}
 569
 570static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
 571                              struct sys_reg_params *p,
 572                              const struct sys_reg_desc *r)
 573{
 574        u64 idx;
 575
 576        if (!kvm_arm_pmu_v3_ready(vcpu))
 577                return trap_raz_wi(vcpu, p, r);
 578
 579        if (r->CRn == 9 && r->CRm == 13) {
 580                if (r->Op2 == 2) {
 581                        /* PMXEVCNTR_EL0 */
 582                        if (pmu_access_event_counter_el0_disabled(vcpu))
 583                                return false;
 584
 585                        idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
 586                              & ARMV8_PMU_COUNTER_MASK;
 587                } else if (r->Op2 == 0) {
 588                        /* PMCCNTR_EL0 */
 589                        if (pmu_access_cycle_counter_el0_disabled(vcpu))
 590                                return false;
 591
 592                        idx = ARMV8_PMU_CYCLE_IDX;
 593                } else {
 594                        BUG();
 595                }
 596        } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
 597                /* PMEVCNTRn_EL0 */
 598                if (pmu_access_event_counter_el0_disabled(vcpu))
 599                        return false;
 600
 601                idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
 602        } else {
 603                BUG();
 604        }
 605
 606        if (!pmu_counter_idx_valid(vcpu, idx))
 607                return false;
 608
 609        if (p->is_write) {
 610                if (pmu_access_el0_disabled(vcpu))
 611                        return false;
 612
 613                kvm_pmu_set_counter_value(vcpu, idx, p->regval);
 614        } else {
 615                p->regval = kvm_pmu_get_counter_value(vcpu, idx);
 616        }
 617
 618        return true;
 619}
 620
 621static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 622                               const struct sys_reg_desc *r)
 623{
 624        u64 idx, reg;
 625
 626        if (!kvm_arm_pmu_v3_ready(vcpu))
 627                return trap_raz_wi(vcpu, p, r);
 628
 629        if (pmu_access_el0_disabled(vcpu))
 630                return false;
 631
 632        if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
 633                /* PMXEVTYPER_EL0 */
 634                idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
 635                reg = PMEVTYPER0_EL0 + idx;
 636        } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
 637                idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
 638                if (idx == ARMV8_PMU_CYCLE_IDX)
 639                        reg = PMCCFILTR_EL0;
 640                else
 641                        /* PMEVTYPERn_EL0 */
 642                        reg = PMEVTYPER0_EL0 + idx;
 643        } else {
 644                BUG();
 645        }
 646
 647        if (!pmu_counter_idx_valid(vcpu, idx))
 648                return false;
 649
 650        if (p->is_write) {
 651                kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
 652                vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
 653        } else {
 654                p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
 655        }
 656
 657        return true;
 658}
 659
 660static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 661                           const struct sys_reg_desc *r)
 662{
 663        u64 val, mask;
 664
 665        if (!kvm_arm_pmu_v3_ready(vcpu))
 666                return trap_raz_wi(vcpu, p, r);
 667
 668        if (pmu_access_el0_disabled(vcpu))
 669                return false;
 670
 671        mask = kvm_pmu_valid_counter_mask(vcpu);
 672        if (p->is_write) {
 673                val = p->regval & mask;
 674                if (r->Op2 & 0x1) {
 675                        /* accessing PMCNTENSET_EL0 */
 676                        vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
 677                        kvm_pmu_enable_counter(vcpu, val);
 678                } else {
 679                        /* accessing PMCNTENCLR_EL0 */
 680                        vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
 681                        kvm_pmu_disable_counter(vcpu, val);
 682                }
 683        } else {
 684                p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
 685        }
 686
 687        return true;
 688}
 689
 690static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 691                           const struct sys_reg_desc *r)
 692{
 693        u64 mask = kvm_pmu_valid_counter_mask(vcpu);
 694
 695        if (!kvm_arm_pmu_v3_ready(vcpu))
 696                return trap_raz_wi(vcpu, p, r);
 697
 698        if (!vcpu_mode_priv(vcpu))
 699                return false;
 700
 701        if (p->is_write) {
 702                u64 val = p->regval & mask;
 703
 704                if (r->Op2 & 0x1)
 705                        /* accessing PMINTENSET_EL1 */
 706                        vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
 707                else
 708                        /* accessing PMINTENCLR_EL1 */
 709                        vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
 710        } else {
 711                p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
 712        }
 713
 714        return true;
 715}
 716
 717static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 718                         const struct sys_reg_desc *r)
 719{
 720        u64 mask = kvm_pmu_valid_counter_mask(vcpu);
 721
 722        if (!kvm_arm_pmu_v3_ready(vcpu))
 723                return trap_raz_wi(vcpu, p, r);
 724
 725        if (pmu_access_el0_disabled(vcpu))
 726                return false;
 727
 728        if (p->is_write) {
 729                if (r->CRm & 0x2)
 730                        /* accessing PMOVSSET_EL0 */
 731                        kvm_pmu_overflow_set(vcpu, p->regval & mask);
 732                else
 733                        /* accessing PMOVSCLR_EL0 */
 734                        vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
 735        } else {
 736                p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
 737        }
 738
 739        return true;
 740}
 741
 742static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 743                           const struct sys_reg_desc *r)
 744{
 745        u64 mask;
 746
 747        if (!kvm_arm_pmu_v3_ready(vcpu))
 748                return trap_raz_wi(vcpu, p, r);
 749
 750        if (pmu_write_swinc_el0_disabled(vcpu))
 751                return false;
 752
 753        if (p->is_write) {
 754                mask = kvm_pmu_valid_counter_mask(vcpu);
 755                kvm_pmu_software_increment(vcpu, p->regval & mask);
 756                return true;
 757        }
 758
 759        return false;
 760}
 761
 762static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 763                             const struct sys_reg_desc *r)
 764{
 765        if (!kvm_arm_pmu_v3_ready(vcpu))
 766                return trap_raz_wi(vcpu, p, r);
 767
 768        if (p->is_write) {
 769                if (!vcpu_mode_priv(vcpu))
 770                        return false;
 771
 772                vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
 773                                                    & ARMV8_PMU_USERENR_MASK;
 774        } else {
 775                p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0)
 776                            & ARMV8_PMU_USERENR_MASK;
 777        }
 778
 779        return true;
 780}
 781
 782/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
 783#define DBG_BCR_BVR_WCR_WVR_EL1(n)                                      \
 784        /* DBGBVRn_EL1 */                                               \
 785        { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100),     \
 786          trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr },                \
 787        /* DBGBCRn_EL1 */                                               \
 788        { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101),     \
 789          trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr },                \
 790        /* DBGWVRn_EL1 */                                               \
 791        { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110),     \
 792          trap_wvr, reset_wvr, n, 0,  get_wvr, set_wvr },               \
 793        /* DBGWCRn_EL1 */                                               \
 794        { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111),     \
 795          trap_wcr, reset_wcr, n, 0,  get_wcr, set_wcr }
 796
 797/* Macro to expand the PMEVCNTRn_EL0 register */
 798#define PMU_PMEVCNTR_EL0(n)                                             \
 799        /* PMEVCNTRn_EL0 */                                             \
 800        { Op0(0b11), Op1(0b011), CRn(0b1110),                           \
 801          CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),         \
 802          access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
 803
 804/* Macro to expand the PMEVTYPERn_EL0 register */
 805#define PMU_PMEVTYPER_EL0(n)                                            \
 806        /* PMEVTYPERn_EL0 */                                            \
 807        { Op0(0b11), Op1(0b011), CRn(0b1110),                           \
 808          CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),         \
 809          access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
 810
 811/*
 812 * Architected system registers.
 813 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
 814 *
 815 * We could trap ID_DFR0 and tell the guest we don't support performance
 816 * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
 817 * NAKed, so it will read the PMCR anyway.
 818 *
 819 * Therefore we tell the guest we have 0 counters.  Unfortunately, we
 820 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
 821 * all PM registers, which doesn't crash the guest kernel at least.
 822 *
 823 * Debug handling: We do trap most, if not all debug related system
 824 * registers. The implementation is good enough to ensure that a guest
 825 * can use these with minimal performance degradation. The drawback is
 826 * that we don't implement any of the external debug, none of the
 827 * OSlock protocol. This should be revisited if we ever encounter a
 828 * more demanding guest...
 829 */
 830static const struct sys_reg_desc sys_reg_descs[] = {
 831        /* DC ISW */
 832        { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
 833          access_dcsw },
 834        /* DC CSW */
 835        { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
 836          access_dcsw },
 837        /* DC CISW */
 838        { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
 839          access_dcsw },
 840
 841        DBG_BCR_BVR_WCR_WVR_EL1(0),
 842        DBG_BCR_BVR_WCR_WVR_EL1(1),
 843        /* MDCCINT_EL1 */
 844        { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
 845          trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
 846        /* MDSCR_EL1 */
 847        { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
 848          trap_debug_regs, reset_val, MDSCR_EL1, 0 },
 849        DBG_BCR_BVR_WCR_WVR_EL1(2),
 850        DBG_BCR_BVR_WCR_WVR_EL1(3),
 851        DBG_BCR_BVR_WCR_WVR_EL1(4),
 852        DBG_BCR_BVR_WCR_WVR_EL1(5),
 853        DBG_BCR_BVR_WCR_WVR_EL1(6),
 854        DBG_BCR_BVR_WCR_WVR_EL1(7),
 855        DBG_BCR_BVR_WCR_WVR_EL1(8),
 856        DBG_BCR_BVR_WCR_WVR_EL1(9),
 857        DBG_BCR_BVR_WCR_WVR_EL1(10),
 858        DBG_BCR_BVR_WCR_WVR_EL1(11),
 859        DBG_BCR_BVR_WCR_WVR_EL1(12),
 860        DBG_BCR_BVR_WCR_WVR_EL1(13),
 861        DBG_BCR_BVR_WCR_WVR_EL1(14),
 862        DBG_BCR_BVR_WCR_WVR_EL1(15),
 863
 864        /* MDRAR_EL1 */
 865        { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
 866          trap_raz_wi },
 867        /* OSLAR_EL1 */
 868        { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
 869          trap_raz_wi },
 870        /* OSLSR_EL1 */
 871        { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
 872          trap_oslsr_el1 },
 873        /* OSDLR_EL1 */
 874        { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
 875          trap_raz_wi },
 876        /* DBGPRCR_EL1 */
 877        { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
 878          trap_raz_wi },
 879        /* DBGCLAIMSET_EL1 */
 880        { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
 881          trap_raz_wi },
 882        /* DBGCLAIMCLR_EL1 */
 883        { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
 884          trap_raz_wi },
 885        /* DBGAUTHSTATUS_EL1 */
 886        { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
 887          trap_dbgauthstatus_el1 },
 888
 889        /* MDCCSR_EL1 */
 890        { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
 891          trap_raz_wi },
 892        /* DBGDTR_EL0 */
 893        { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
 894          trap_raz_wi },
 895        /* DBGDTR[TR]X_EL0 */
 896        { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
 897          trap_raz_wi },
 898
 899        /* DBGVCR32_EL2 */
 900        { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
 901          NULL, reset_val, DBGVCR32_EL2, 0 },
 902
 903        /* MPIDR_EL1 */
 904        { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
 905          NULL, reset_mpidr, MPIDR_EL1 },
 906        /* SCTLR_EL1 */
 907        { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
 908          access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
 909        /* CPACR_EL1 */
 910        { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
 911          NULL, reset_val, CPACR_EL1, 0 },
 912        /* TTBR0_EL1 */
 913        { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
 914          access_vm_reg, reset_unknown, TTBR0_EL1 },
 915        /* TTBR1_EL1 */
 916        { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
 917          access_vm_reg, reset_unknown, TTBR1_EL1 },
 918        /* TCR_EL1 */
 919        { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
 920          access_vm_reg, reset_val, TCR_EL1, 0 },
 921
 922        /* AFSR0_EL1 */
 923        { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
 924          access_vm_reg, reset_unknown, AFSR0_EL1 },
 925        /* AFSR1_EL1 */
 926        { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
 927          access_vm_reg, reset_unknown, AFSR1_EL1 },
 928        /* ESR_EL1 */
 929        { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
 930          access_vm_reg, reset_unknown, ESR_EL1 },
 931        /* FAR_EL1 */
 932        { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
 933          access_vm_reg, reset_unknown, FAR_EL1 },
 934        /* PAR_EL1 */
 935        { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
 936          NULL, reset_unknown, PAR_EL1 },
 937
 938        /* PMINTENSET_EL1 */
 939        { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
 940          access_pminten, reset_unknown, PMINTENSET_EL1 },
 941        /* PMINTENCLR_EL1 */
 942        { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
 943          access_pminten, NULL, PMINTENSET_EL1 },
 944
 945        /* MAIR_EL1 */
 946        { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
 947          access_vm_reg, reset_unknown, MAIR_EL1 },
 948        /* AMAIR_EL1 */
 949        { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
 950          access_vm_reg, reset_amair_el1, AMAIR_EL1 },
 951
 952        /* VBAR_EL1 */
 953        { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
 954          NULL, reset_val, VBAR_EL1, 0 },
 955
 956        /* ICC_SGI1R_EL1 */
 957        { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
 958          access_gic_sgi },
 959        /* ICC_SRE_EL1 */
 960        { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
 961          trap_raz_wi },
 962
 963        /* CONTEXTIDR_EL1 */
 964        { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
 965          access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
 966        /* TPIDR_EL1 */
 967        { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
 968          NULL, reset_unknown, TPIDR_EL1 },
 969
 970        /* CNTKCTL_EL1 */
 971        { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
 972          NULL, reset_val, CNTKCTL_EL1, 0},
 973
 974        /* CSSELR_EL1 */
 975        { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
 976          NULL, reset_unknown, CSSELR_EL1 },
 977
 978        /* PMCR_EL0 */
 979        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
 980          access_pmcr, reset_pmcr, },
 981        /* PMCNTENSET_EL0 */
 982        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
 983          access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
 984        /* PMCNTENCLR_EL0 */
 985        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
 986          access_pmcnten, NULL, PMCNTENSET_EL0 },
 987        /* PMOVSCLR_EL0 */
 988        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
 989          access_pmovs, NULL, PMOVSSET_EL0 },
 990        /* PMSWINC_EL0 */
 991        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
 992          access_pmswinc, reset_unknown, PMSWINC_EL0 },
 993        /* PMSELR_EL0 */
 994        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
 995          access_pmselr, reset_unknown, PMSELR_EL0 },
 996        /* PMCEID0_EL0 */
 997        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
 998          access_pmceid },
 999        /* PMCEID1_EL0 */
1000        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
1001          access_pmceid },
1002        /* PMCCNTR_EL0 */
1003        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
1004          access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
1005        /* PMXEVTYPER_EL0 */
1006        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
1007          access_pmu_evtyper },
1008        /* PMXEVCNTR_EL0 */
1009        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
1010          access_pmu_evcntr },
1011        /* PMUSERENR_EL0
1012         * This register resets as unknown in 64bit mode while it resets as zero
1013         * in 32bit mode. Here we choose to reset it as zero for consistency.
1014         */
1015        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
1016          access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1017        /* PMOVSSET_EL0 */
1018        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
1019          access_pmovs, reset_unknown, PMOVSSET_EL0 },
1020
1021        /* TPIDR_EL0 */
1022        { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
1023          NULL, reset_unknown, TPIDR_EL0 },
1024        /* TPIDRRO_EL0 */
1025        { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
1026          NULL, reset_unknown, TPIDRRO_EL0 },
1027
1028        /* PMEVCNTRn_EL0 */
1029        PMU_PMEVCNTR_EL0(0),
1030        PMU_PMEVCNTR_EL0(1),
1031        PMU_PMEVCNTR_EL0(2),
1032        PMU_PMEVCNTR_EL0(3),
1033        PMU_PMEVCNTR_EL0(4),
1034        PMU_PMEVCNTR_EL0(5),
1035        PMU_PMEVCNTR_EL0(6),
1036        PMU_PMEVCNTR_EL0(7),
1037        PMU_PMEVCNTR_EL0(8),
1038        PMU_PMEVCNTR_EL0(9),
1039        PMU_PMEVCNTR_EL0(10),
1040        PMU_PMEVCNTR_EL0(11),
1041        PMU_PMEVCNTR_EL0(12),
1042        PMU_PMEVCNTR_EL0(13),
1043        PMU_PMEVCNTR_EL0(14),
1044        PMU_PMEVCNTR_EL0(15),
1045        PMU_PMEVCNTR_EL0(16),
1046        PMU_PMEVCNTR_EL0(17),
1047        PMU_PMEVCNTR_EL0(18),
1048        PMU_PMEVCNTR_EL0(19),
1049        PMU_PMEVCNTR_EL0(20),
1050        PMU_PMEVCNTR_EL0(21),
1051        PMU_PMEVCNTR_EL0(22),
1052        PMU_PMEVCNTR_EL0(23),
1053        PMU_PMEVCNTR_EL0(24),
1054        PMU_PMEVCNTR_EL0(25),
1055        PMU_PMEVCNTR_EL0(26),
1056        PMU_PMEVCNTR_EL0(27),
1057        PMU_PMEVCNTR_EL0(28),
1058        PMU_PMEVCNTR_EL0(29),
1059        PMU_PMEVCNTR_EL0(30),
1060        /* PMEVTYPERn_EL0 */
1061        PMU_PMEVTYPER_EL0(0),
1062        PMU_PMEVTYPER_EL0(1),
1063        PMU_PMEVTYPER_EL0(2),
1064        PMU_PMEVTYPER_EL0(3),
1065        PMU_PMEVTYPER_EL0(4),
1066        PMU_PMEVTYPER_EL0(5),
1067        PMU_PMEVTYPER_EL0(6),
1068        PMU_PMEVTYPER_EL0(7),
1069        PMU_PMEVTYPER_EL0(8),
1070        PMU_PMEVTYPER_EL0(9),
1071        PMU_PMEVTYPER_EL0(10),
1072        PMU_PMEVTYPER_EL0(11),
1073        PMU_PMEVTYPER_EL0(12),
1074        PMU_PMEVTYPER_EL0(13),
1075        PMU_PMEVTYPER_EL0(14),
1076        PMU_PMEVTYPER_EL0(15),
1077        PMU_PMEVTYPER_EL0(16),
1078        PMU_PMEVTYPER_EL0(17),
1079        PMU_PMEVTYPER_EL0(18),
1080        PMU_PMEVTYPER_EL0(19),
1081        PMU_PMEVTYPER_EL0(20),
1082        PMU_PMEVTYPER_EL0(21),
1083        PMU_PMEVTYPER_EL0(22),
1084        PMU_PMEVTYPER_EL0(23),
1085        PMU_PMEVTYPER_EL0(24),
1086        PMU_PMEVTYPER_EL0(25),
1087        PMU_PMEVTYPER_EL0(26),
1088        PMU_PMEVTYPER_EL0(27),
1089        PMU_PMEVTYPER_EL0(28),
1090        PMU_PMEVTYPER_EL0(29),
1091        PMU_PMEVTYPER_EL0(30),
1092        /* PMCCFILTR_EL0
1093         * This register resets as unknown in 64bit mode while it resets as zero
1094         * in 32bit mode. Here we choose to reset it as zero for consistency.
1095         */
1096        { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111),
1097          access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1098
1099        /* DACR32_EL2 */
1100        { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
1101          NULL, reset_unknown, DACR32_EL2 },
1102        /* IFSR32_EL2 */
1103        { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
1104          NULL, reset_unknown, IFSR32_EL2 },
1105        /* FPEXC32_EL2 */
1106        { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
1107          NULL, reset_val, FPEXC32_EL2, 0x70 },
1108};
1109
1110static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1111                        struct sys_reg_params *p,
1112                        const struct sys_reg_desc *r)
1113{
1114        if (p->is_write) {
1115                return ignore_write(vcpu, p);
1116        } else {
1117                u64 dfr = read_system_reg(SYS_ID_AA64DFR0_EL1);
1118                u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
1119                u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1120
1121                p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1122                             (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1123                             (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1124                             | (6 << 16) | (el3 << 14) | (el3 << 12));
1125                return true;
1126        }
1127}
1128
1129static bool trap_debug32(struct kvm_vcpu *vcpu,
1130                         struct sys_reg_params *p,
1131                         const struct sys_reg_desc *r)
1132{
1133        if (p->is_write) {
1134                vcpu_cp14(vcpu, r->reg) = p->regval;
1135                vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
1136        } else {
1137                p->regval = vcpu_cp14(vcpu, r->reg);
1138        }
1139
1140        return true;
1141}
1142
1143/* AArch32 debug register mappings
1144 *
1145 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1146 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1147 *
1148 * All control registers and watchpoint value registers are mapped to
1149 * the lower 32 bits of their AArch64 equivalents. We share the trap
1150 * handlers with the above AArch64 code which checks what mode the
1151 * system is in.
1152 */
1153
1154static bool trap_xvr(struct kvm_vcpu *vcpu,
1155                     struct sys_reg_params *p,
1156                     const struct sys_reg_desc *rd)
1157{
1158        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1159
1160        if (p->is_write) {
1161                u64 val = *dbg_reg;
1162
1163                val &= 0xffffffffUL;
1164                val |= p->regval << 32;
1165                *dbg_reg = val;
1166
1167                vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
1168        } else {
1169                p->regval = *dbg_reg >> 32;
1170        }
1171
1172        trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1173
1174        return true;
1175}
1176
1177#define DBG_BCR_BVR_WCR_WVR(n)                                          \
1178        /* DBGBVRn */                                                   \
1179        { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n },     \
1180        /* DBGBCRn */                                                   \
1181        { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n },     \
1182        /* DBGWVRn */                                                   \
1183        { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n },     \
1184        /* DBGWCRn */                                                   \
1185        { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1186
1187#define DBGBXVR(n)                                                      \
1188        { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1189
1190/*
1191 * Trapped cp14 registers. We generally ignore most of the external
1192 * debug, on the principle that they don't really make sense to a
1193 * guest. Revisit this one day, would this principle change.
1194 */
1195static const struct sys_reg_desc cp14_regs[] = {
1196        /* DBGIDR */
1197        { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1198        /* DBGDTRRXext */
1199        { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1200
1201        DBG_BCR_BVR_WCR_WVR(0),
1202        /* DBGDSCRint */
1203        { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1204        DBG_BCR_BVR_WCR_WVR(1),
1205        /* DBGDCCINT */
1206        { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
1207        /* DBGDSCRext */
1208        { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
1209        DBG_BCR_BVR_WCR_WVR(2),
1210        /* DBGDTR[RT]Xint */
1211        { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1212        /* DBGDTR[RT]Xext */
1213        { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1214        DBG_BCR_BVR_WCR_WVR(3),
1215        DBG_BCR_BVR_WCR_WVR(4),
1216        DBG_BCR_BVR_WCR_WVR(5),
1217        /* DBGWFAR */
1218        { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1219        /* DBGOSECCR */
1220        { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1221        DBG_BCR_BVR_WCR_WVR(6),
1222        /* DBGVCR */
1223        { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
1224        DBG_BCR_BVR_WCR_WVR(7),
1225        DBG_BCR_BVR_WCR_WVR(8),
1226        DBG_BCR_BVR_WCR_WVR(9),
1227        DBG_BCR_BVR_WCR_WVR(10),
1228        DBG_BCR_BVR_WCR_WVR(11),
1229        DBG_BCR_BVR_WCR_WVR(12),
1230        DBG_BCR_BVR_WCR_WVR(13),
1231        DBG_BCR_BVR_WCR_WVR(14),
1232        DBG_BCR_BVR_WCR_WVR(15),
1233
1234        /* DBGDRAR (32bit) */
1235        { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1236
1237        DBGBXVR(0),
1238        /* DBGOSLAR */
1239        { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1240        DBGBXVR(1),
1241        /* DBGOSLSR */
1242        { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1243        DBGBXVR(2),
1244        DBGBXVR(3),
1245        /* DBGOSDLR */
1246        { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1247        DBGBXVR(4),
1248        /* DBGPRCR */
1249        { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1250        DBGBXVR(5),
1251        DBGBXVR(6),
1252        DBGBXVR(7),
1253        DBGBXVR(8),
1254        DBGBXVR(9),
1255        DBGBXVR(10),
1256        DBGBXVR(11),
1257        DBGBXVR(12),
1258        DBGBXVR(13),
1259        DBGBXVR(14),
1260        DBGBXVR(15),
1261
1262        /* DBGDSAR (32bit) */
1263        { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1264
1265        /* DBGDEVID2 */
1266        { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1267        /* DBGDEVID1 */
1268        { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1269        /* DBGDEVID */
1270        { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1271        /* DBGCLAIMSET */
1272        { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1273        /* DBGCLAIMCLR */
1274        { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1275        /* DBGAUTHSTATUS */
1276        { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1277};
1278
1279/* Trapped cp14 64bit registers */
1280static const struct sys_reg_desc cp14_64_regs[] = {
1281        /* DBGDRAR (64bit) */
1282        { Op1( 0), CRm( 1), .access = trap_raz_wi },
1283
1284        /* DBGDSAR (64bit) */
1285        { Op1( 0), CRm( 2), .access = trap_raz_wi },
1286};
1287
1288/* Macro to expand the PMEVCNTRn register */
1289#define PMU_PMEVCNTR(n)                                                 \
1290        /* PMEVCNTRn */                                                 \
1291        { Op1(0), CRn(0b1110),                                          \
1292          CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),         \
1293          access_pmu_evcntr }
1294
1295/* Macro to expand the PMEVTYPERn register */
1296#define PMU_PMEVTYPER(n)                                                \
1297        /* PMEVTYPERn */                                                \
1298        { Op1(0), CRn(0b1110),                                          \
1299          CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),         \
1300          access_pmu_evtyper }
1301
1302/*
1303 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1304 * depending on the way they are accessed (as a 32bit or a 64bit
1305 * register).
1306 */
1307static const struct sys_reg_desc cp15_regs[] = {
1308        { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1309
1310        { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1311        { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1312        { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1313        { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1314        { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1315        { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1316        { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1317        { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1318        { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1319        { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1320        { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1321
1322        /*
1323         * DC{C,I,CI}SW operations:
1324         */
1325        { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1326        { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1327        { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
1328
1329        /* PMU */
1330        { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
1331        { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
1332        { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
1333        { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
1334        { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
1335        { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
1336        { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
1337        { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
1338        { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
1339        { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
1340        { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
1341        { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
1342        { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
1343        { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
1344        { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
1345
1346        { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
1347        { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
1348        { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
1349        { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1350
1351        /* ICC_SRE */
1352        { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
1353
1354        { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1355
1356        /* PMEVCNTRn */
1357        PMU_PMEVCNTR(0),
1358        PMU_PMEVCNTR(1),
1359        PMU_PMEVCNTR(2),
1360        PMU_PMEVCNTR(3),
1361        PMU_PMEVCNTR(4),
1362        PMU_PMEVCNTR(5),
1363        PMU_PMEVCNTR(6),
1364        PMU_PMEVCNTR(7),
1365        PMU_PMEVCNTR(8),
1366        PMU_PMEVCNTR(9),
1367        PMU_PMEVCNTR(10),
1368        PMU_PMEVCNTR(11),
1369        PMU_PMEVCNTR(12),
1370        PMU_PMEVCNTR(13),
1371        PMU_PMEVCNTR(14),
1372        PMU_PMEVCNTR(15),
1373        PMU_PMEVCNTR(16),
1374        PMU_PMEVCNTR(17),
1375        PMU_PMEVCNTR(18),
1376        PMU_PMEVCNTR(19),
1377        PMU_PMEVCNTR(20),
1378        PMU_PMEVCNTR(21),
1379        PMU_PMEVCNTR(22),
1380        PMU_PMEVCNTR(23),
1381        PMU_PMEVCNTR(24),
1382        PMU_PMEVCNTR(25),
1383        PMU_PMEVCNTR(26),
1384        PMU_PMEVCNTR(27),
1385        PMU_PMEVCNTR(28),
1386        PMU_PMEVCNTR(29),
1387        PMU_PMEVCNTR(30),
1388        /* PMEVTYPERn */
1389        PMU_PMEVTYPER(0),
1390        PMU_PMEVTYPER(1),
1391        PMU_PMEVTYPER(2),
1392        PMU_PMEVTYPER(3),
1393        PMU_PMEVTYPER(4),
1394        PMU_PMEVTYPER(5),
1395        PMU_PMEVTYPER(6),
1396        PMU_PMEVTYPER(7),
1397        PMU_PMEVTYPER(8),
1398        PMU_PMEVTYPER(9),
1399        PMU_PMEVTYPER(10),
1400        PMU_PMEVTYPER(11),
1401        PMU_PMEVTYPER(12),
1402        PMU_PMEVTYPER(13),
1403        PMU_PMEVTYPER(14),
1404        PMU_PMEVTYPER(15),
1405        PMU_PMEVTYPER(16),
1406        PMU_PMEVTYPER(17),
1407        PMU_PMEVTYPER(18),
1408        PMU_PMEVTYPER(19),
1409        PMU_PMEVTYPER(20),
1410        PMU_PMEVTYPER(21),
1411        PMU_PMEVTYPER(22),
1412        PMU_PMEVTYPER(23),
1413        PMU_PMEVTYPER(24),
1414        PMU_PMEVTYPER(25),
1415        PMU_PMEVTYPER(26),
1416        PMU_PMEVTYPER(27),
1417        PMU_PMEVTYPER(28),
1418        PMU_PMEVTYPER(29),
1419        PMU_PMEVTYPER(30),
1420        /* PMCCFILTR */
1421        { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
1422};
1423
1424static const struct sys_reg_desc cp15_64_regs[] = {
1425        { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1426        { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
1427        { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1428        { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
1429};
1430
1431/* Target specific emulation tables */
1432static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
1433
1434void kvm_register_target_sys_reg_table(unsigned int target,
1435                                       struct kvm_sys_reg_target_table *table)
1436{
1437        target_tables[target] = table;
1438}
1439
1440/* Get specific register table for this target. */
1441static const struct sys_reg_desc *get_target_table(unsigned target,
1442                                                   bool mode_is_64,
1443                                                   size_t *num)
1444{
1445        struct kvm_sys_reg_target_table *table;
1446
1447        table = target_tables[target];
1448        if (mode_is_64) {
1449                *num = table->table64.num;
1450                return table->table64.table;
1451        } else {
1452                *num = table->table32.num;
1453                return table->table32.table;
1454        }
1455}
1456
1457#define reg_to_match_value(x)                                           \
1458        ({                                                              \
1459                unsigned long val;                                      \
1460                val  = (x)->Op0 << 14;                                  \
1461                val |= (x)->Op1 << 11;                                  \
1462                val |= (x)->CRn << 7;                                   \
1463                val |= (x)->CRm << 3;                                   \
1464                val |= (x)->Op2;                                        \
1465                val;                                                    \
1466         })
1467
1468static int match_sys_reg(const void *key, const void *elt)
1469{
1470        const unsigned long pval = (unsigned long)key;
1471        const struct sys_reg_desc *r = elt;
1472
1473        return pval - reg_to_match_value(r);
1474}
1475
1476static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
1477                                         const struct sys_reg_desc table[],
1478                                         unsigned int num)
1479{
1480        unsigned long pval = reg_to_match_value(params);
1481
1482        return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
1483}
1484
1485int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
1486{
1487        kvm_inject_undefined(vcpu);
1488        return 1;
1489}
1490
1491/*
1492 * emulate_cp --  tries to match a sys_reg access in a handling table, and
1493 *                call the corresponding trap handler.
1494 *
1495 * @params: pointer to the descriptor of the access
1496 * @table: array of trap descriptors
1497 * @num: size of the trap descriptor array
1498 *
1499 * Return 0 if the access has been handled, and -1 if not.
1500 */
1501static int emulate_cp(struct kvm_vcpu *vcpu,
1502                      struct sys_reg_params *params,
1503                      const struct sys_reg_desc *table,
1504                      size_t num)
1505{
1506        const struct sys_reg_desc *r;
1507
1508        if (!table)
1509                return -1;      /* Not handled */
1510
1511        r = find_reg(params, table, num);
1512
1513        if (r) {
1514                /*
1515                 * Not having an accessor means that we have
1516                 * configured a trap that we don't know how to
1517                 * handle. This certainly qualifies as a gross bug
1518                 * that should be fixed right away.
1519                 */
1520                BUG_ON(!r->access);
1521
1522                if (likely(r->access(vcpu, params, r))) {
1523                        /* Skip instruction, since it was emulated */
1524                        kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1525                        /* Handled */
1526                        return 0;
1527                }
1528        }
1529
1530        /* Not handled */
1531        return -1;
1532}
1533
1534static void unhandled_cp_access(struct kvm_vcpu *vcpu,
1535                                struct sys_reg_params *params)
1536{
1537        u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
1538        int cp;
1539
1540        switch(hsr_ec) {
1541        case ESR_ELx_EC_CP15_32:
1542        case ESR_ELx_EC_CP15_64:
1543                cp = 15;
1544                break;
1545        case ESR_ELx_EC_CP14_MR:
1546        case ESR_ELx_EC_CP14_64:
1547                cp = 14;
1548                break;
1549        default:
1550                WARN_ON((cp = -1));
1551        }
1552
1553        kvm_err("Unsupported guest CP%d access at: %08lx\n",
1554                cp, *vcpu_pc(vcpu));
1555        print_sys_reg_instr(params);
1556        kvm_inject_undefined(vcpu);
1557}
1558
1559/**
1560 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
1561 * @vcpu: The VCPU pointer
1562 * @run:  The kvm_run struct
1563 */
1564static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1565                            const struct sys_reg_desc *global,
1566                            size_t nr_global,
1567                            const struct sys_reg_desc *target_specific,
1568                            size_t nr_specific)
1569{
1570        struct sys_reg_params params;
1571        u32 hsr = kvm_vcpu_get_hsr(vcpu);
1572        int Rt = (hsr >> 5) & 0xf;
1573        int Rt2 = (hsr >> 10) & 0xf;
1574
1575        params.is_aarch32 = true;
1576        params.is_32bit = false;
1577        params.CRm = (hsr >> 1) & 0xf;
1578        params.is_write = ((hsr & 1) == 0);
1579
1580        params.Op0 = 0;
1581        params.Op1 = (hsr >> 16) & 0xf;
1582        params.Op2 = 0;
1583        params.CRn = 0;
1584
1585        /*
1586         * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1587         * backends between AArch32 and AArch64, we get away with it.
1588         */
1589        if (params.is_write) {
1590                params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1591                params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
1592        }
1593
1594        if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
1595                goto out;
1596        if (!emulate_cp(vcpu, &params, global, nr_global))
1597                goto out;
1598
1599        unhandled_cp_access(vcpu, &params);
1600
1601out:
1602        /* Split up the value between registers for the read side */
1603        if (!params.is_write) {
1604                vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1605                vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
1606        }
1607
1608        return 1;
1609}
1610
1611/**
1612 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
1613 * @vcpu: The VCPU pointer
1614 * @run:  The kvm_run struct
1615 */
1616static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1617                            const struct sys_reg_desc *global,
1618                            size_t nr_global,
1619                            const struct sys_reg_desc *target_specific,
1620                            size_t nr_specific)
1621{
1622        struct sys_reg_params params;
1623        u32 hsr = kvm_vcpu_get_hsr(vcpu);
1624        int Rt  = (hsr >> 5) & 0xf;
1625
1626        params.is_aarch32 = true;
1627        params.is_32bit = true;
1628        params.CRm = (hsr >> 1) & 0xf;
1629        params.regval = vcpu_get_reg(vcpu, Rt);
1630        params.is_write = ((hsr & 1) == 0);
1631        params.CRn = (hsr >> 10) & 0xf;
1632        params.Op0 = 0;
1633        params.Op1 = (hsr >> 14) & 0x7;
1634        params.Op2 = (hsr >> 17) & 0x7;
1635
1636        if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
1637            !emulate_cp(vcpu, &params, global, nr_global)) {
1638                if (!params.is_write)
1639                        vcpu_set_reg(vcpu, Rt, params.regval);
1640                return 1;
1641        }
1642
1643        unhandled_cp_access(vcpu, &params);
1644        return 1;
1645}
1646
1647int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1648{
1649        const struct sys_reg_desc *target_specific;
1650        size_t num;
1651
1652        target_specific = get_target_table(vcpu->arch.target, false, &num);
1653        return kvm_handle_cp_64(vcpu,
1654                                cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
1655                                target_specific, num);
1656}
1657
1658int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1659{
1660        const struct sys_reg_desc *target_specific;
1661        size_t num;
1662
1663        target_specific = get_target_table(vcpu->arch.target, false, &num);
1664        return kvm_handle_cp_32(vcpu,
1665                                cp15_regs, ARRAY_SIZE(cp15_regs),
1666                                target_specific, num);
1667}
1668
1669int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1670{
1671        return kvm_handle_cp_64(vcpu,
1672                                cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
1673                                NULL, 0);
1674}
1675
1676int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1677{
1678        return kvm_handle_cp_32(vcpu,
1679                                cp14_regs, ARRAY_SIZE(cp14_regs),
1680                                NULL, 0);
1681}
1682
1683static int emulate_sys_reg(struct kvm_vcpu *vcpu,
1684                           struct sys_reg_params *params)
1685{
1686        size_t num;
1687        const struct sys_reg_desc *table, *r;
1688
1689        table = get_target_table(vcpu->arch.target, true, &num);
1690
1691        /* Search target-specific then generic table. */
1692        r = find_reg(params, table, num);
1693        if (!r)
1694                r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1695
1696        if (likely(r)) {
1697                /*
1698                 * Not having an accessor means that we have
1699                 * configured a trap that we don't know how to
1700                 * handle. This certainly qualifies as a gross bug
1701                 * that should be fixed right away.
1702                 */
1703                BUG_ON(!r->access);
1704
1705                if (likely(r->access(vcpu, params, r))) {
1706                        /* Skip instruction, since it was emulated */
1707                        kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1708                        return 1;
1709                }
1710                /* If access function fails, it should complain. */
1711        } else {
1712                kvm_err("Unsupported guest sys_reg access at: %lx\n",
1713                        *vcpu_pc(vcpu));
1714                print_sys_reg_instr(params);
1715        }
1716        kvm_inject_undefined(vcpu);
1717        return 1;
1718}
1719
1720static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
1721                              const struct sys_reg_desc *table, size_t num)
1722{
1723        unsigned long i;
1724
1725        for (i = 0; i < num; i++)
1726                if (table[i].reset)
1727                        table[i].reset(vcpu, &table[i]);
1728}
1729
1730/**
1731 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
1732 * @vcpu: The VCPU pointer
1733 * @run:  The kvm_run struct
1734 */
1735int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1736{
1737        struct sys_reg_params params;
1738        unsigned long esr = kvm_vcpu_get_hsr(vcpu);
1739        int Rt = (esr >> 5) & 0x1f;
1740        int ret;
1741
1742        trace_kvm_handle_sys_reg(esr);
1743
1744        params.is_aarch32 = false;
1745        params.is_32bit = false;
1746        params.Op0 = (esr >> 20) & 3;
1747        params.Op1 = (esr >> 14) & 0x7;
1748        params.CRn = (esr >> 10) & 0xf;
1749        params.CRm = (esr >> 1) & 0xf;
1750        params.Op2 = (esr >> 17) & 0x7;
1751        params.regval = vcpu_get_reg(vcpu, Rt);
1752        params.is_write = !(esr & 1);
1753
1754        ret = emulate_sys_reg(vcpu, &params);
1755
1756        if (!params.is_write)
1757                vcpu_set_reg(vcpu, Rt, params.regval);
1758        return ret;
1759}
1760
1761/******************************************************************************
1762 * Userspace API
1763 *****************************************************************************/
1764
1765static bool index_to_params(u64 id, struct sys_reg_params *params)
1766{
1767        switch (id & KVM_REG_SIZE_MASK) {
1768        case KVM_REG_SIZE_U64:
1769                /* Any unused index bits means it's not valid. */
1770                if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
1771                              | KVM_REG_ARM_COPROC_MASK
1772                              | KVM_REG_ARM64_SYSREG_OP0_MASK
1773                              | KVM_REG_ARM64_SYSREG_OP1_MASK
1774                              | KVM_REG_ARM64_SYSREG_CRN_MASK
1775                              | KVM_REG_ARM64_SYSREG_CRM_MASK
1776                              | KVM_REG_ARM64_SYSREG_OP2_MASK))
1777                        return false;
1778                params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
1779                               >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
1780                params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
1781                               >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
1782                params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
1783                               >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
1784                params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
1785                               >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
1786                params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
1787                               >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
1788                return true;
1789        default:
1790                return false;
1791        }
1792}
1793
1794/* Decode an index value, and find the sys_reg_desc entry. */
1795static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
1796                                                    u64 id)
1797{
1798        size_t num;
1799        const struct sys_reg_desc *table, *r;
1800        struct sys_reg_params params;
1801
1802        /* We only do sys_reg for now. */
1803        if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
1804                return NULL;
1805
1806        if (!index_to_params(id, &params))
1807                return NULL;
1808
1809        table = get_target_table(vcpu->arch.target, true, &num);
1810        r = find_reg(&params, table, num);
1811        if (!r)
1812                r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1813
1814        /* Not saved in the sys_reg array? */
1815        if (r && !r->reg)
1816                r = NULL;
1817
1818        return r;
1819}
1820
1821/*
1822 * These are the invariant sys_reg registers: we let the guest see the
1823 * host versions of these, so they're part of the guest state.
1824 *
1825 * A future CPU may provide a mechanism to present different values to
1826 * the guest, or a future kvm may trap them.
1827 */
1828
1829#define FUNCTION_INVARIANT(reg)                                         \
1830        static void get_##reg(struct kvm_vcpu *v,                       \
1831                              const struct sys_reg_desc *r)             \
1832        {                                                               \
1833                u64 val;                                                \
1834                                                                        \
1835                asm volatile("mrs %0, " __stringify(reg) "\n"           \
1836                             : "=r" (val));                             \
1837                ((struct sys_reg_desc *)r)->val = val;                  \
1838        }
1839
1840FUNCTION_INVARIANT(midr_el1)
1841FUNCTION_INVARIANT(ctr_el0)
1842FUNCTION_INVARIANT(revidr_el1)
1843FUNCTION_INVARIANT(id_pfr0_el1)
1844FUNCTION_INVARIANT(id_pfr1_el1)
1845FUNCTION_INVARIANT(id_dfr0_el1)
1846FUNCTION_INVARIANT(id_afr0_el1)
1847FUNCTION_INVARIANT(id_mmfr0_el1)
1848FUNCTION_INVARIANT(id_mmfr1_el1)
1849FUNCTION_INVARIANT(id_mmfr2_el1)
1850FUNCTION_INVARIANT(id_mmfr3_el1)
1851FUNCTION_INVARIANT(id_isar0_el1)
1852FUNCTION_INVARIANT(id_isar1_el1)
1853FUNCTION_INVARIANT(id_isar2_el1)
1854FUNCTION_INVARIANT(id_isar3_el1)
1855FUNCTION_INVARIANT(id_isar4_el1)
1856FUNCTION_INVARIANT(id_isar5_el1)
1857FUNCTION_INVARIANT(clidr_el1)
1858FUNCTION_INVARIANT(aidr_el1)
1859
1860/* ->val is filled in by kvm_sys_reg_table_init() */
1861static struct sys_reg_desc invariant_sys_regs[] = {
1862        { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
1863          NULL, get_midr_el1 },
1864        { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
1865          NULL, get_revidr_el1 },
1866        { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
1867          NULL, get_id_pfr0_el1 },
1868        { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
1869          NULL, get_id_pfr1_el1 },
1870        { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
1871          NULL, get_id_dfr0_el1 },
1872        { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
1873          NULL, get_id_afr0_el1 },
1874        { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
1875          NULL, get_id_mmfr0_el1 },
1876        { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
1877          NULL, get_id_mmfr1_el1 },
1878        { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
1879          NULL, get_id_mmfr2_el1 },
1880        { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
1881          NULL, get_id_mmfr3_el1 },
1882        { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
1883          NULL, get_id_isar0_el1 },
1884        { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
1885          NULL, get_id_isar1_el1 },
1886        { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
1887          NULL, get_id_isar2_el1 },
1888        { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
1889          NULL, get_id_isar3_el1 },
1890        { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
1891          NULL, get_id_isar4_el1 },
1892        { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
1893          NULL, get_id_isar5_el1 },
1894        { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
1895          NULL, get_clidr_el1 },
1896        { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
1897          NULL, get_aidr_el1 },
1898        { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
1899          NULL, get_ctr_el0 },
1900};
1901
1902static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
1903{
1904        if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
1905                return -EFAULT;
1906        return 0;
1907}
1908
1909static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
1910{
1911        if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
1912                return -EFAULT;
1913        return 0;
1914}
1915
1916static int get_invariant_sys_reg(u64 id, void __user *uaddr)
1917{
1918        struct sys_reg_params params;
1919        const struct sys_reg_desc *r;
1920
1921        if (!index_to_params(id, &params))
1922                return -ENOENT;
1923
1924        r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
1925        if (!r)
1926                return -ENOENT;
1927
1928        return reg_to_user(uaddr, &r->val, id);
1929}
1930
1931static int set_invariant_sys_reg(u64 id, void __user *uaddr)
1932{
1933        struct sys_reg_params params;
1934        const struct sys_reg_desc *r;
1935        int err;
1936        u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
1937
1938        if (!index_to_params(id, &params))
1939                return -ENOENT;
1940        r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
1941        if (!r)
1942                return -ENOENT;
1943
1944        err = reg_from_user(&val, uaddr, id);
1945        if (err)
1946                return err;
1947
1948        /* This is what we mean by invariant: you can't change it. */
1949        if (r->val != val)
1950                return -EINVAL;
1951
1952        return 0;
1953}
1954
1955static bool is_valid_cache(u32 val)
1956{
1957        u32 level, ctype;
1958
1959        if (val >= CSSELR_MAX)
1960                return false;
1961
1962        /* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
1963        level = (val >> 1);
1964        ctype = (cache_levels >> (level * 3)) & 7;
1965
1966        switch (ctype) {
1967        case 0: /* No cache */
1968                return false;
1969        case 1: /* Instruction cache only */
1970                return (val & 1);
1971        case 2: /* Data cache only */
1972        case 4: /* Unified cache */
1973                return !(val & 1);
1974        case 3: /* Separate instruction and data caches */
1975                return true;
1976        default: /* Reserved: we can't know instruction or data. */
1977                return false;
1978        }
1979}
1980
1981static int demux_c15_get(u64 id, void __user *uaddr)
1982{
1983        u32 val;
1984        u32 __user *uval = uaddr;
1985
1986        /* Fail if we have unknown bits set. */
1987        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1988                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1989                return -ENOENT;
1990
1991        switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1992        case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1993                if (KVM_REG_SIZE(id) != 4)
1994                        return -ENOENT;
1995                val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1996                        >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1997                if (!is_valid_cache(val))
1998                        return -ENOENT;
1999
2000                return put_user(get_ccsidr(val), uval);
2001        default:
2002                return -ENOENT;
2003        }
2004}
2005
2006static int demux_c15_set(u64 id, void __user *uaddr)
2007{
2008        u32 val, newval;
2009        u32 __user *uval = uaddr;
2010
2011        /* Fail if we have unknown bits set. */
2012        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2013                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2014                return -ENOENT;
2015
2016        switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2017        case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2018                if (KVM_REG_SIZE(id) != 4)
2019                        return -ENOENT;
2020                val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2021                        >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2022                if (!is_valid_cache(val))
2023                        return -ENOENT;
2024
2025                if (get_user(newval, uval))
2026                        return -EFAULT;
2027
2028                /* This is also invariant: you can't change it. */
2029                if (newval != get_ccsidr(val))
2030                        return -EINVAL;
2031                return 0;
2032        default:
2033                return -ENOENT;
2034        }
2035}
2036
2037int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2038{
2039        const struct sys_reg_desc *r;
2040        void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2041
2042        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2043                return demux_c15_get(reg->id, uaddr);
2044
2045        if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2046                return -ENOENT;
2047
2048        r = index_to_sys_reg_desc(vcpu, reg->id);
2049        if (!r)
2050                return get_invariant_sys_reg(reg->id, uaddr);
2051
2052        if (r->get_user)
2053                return (r->get_user)(vcpu, r, reg, uaddr);
2054
2055        return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
2056}
2057
2058int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2059{
2060        const struct sys_reg_desc *r;
2061        void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2062
2063        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2064                return demux_c15_set(reg->id, uaddr);
2065
2066        if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2067                return -ENOENT;
2068
2069        r = index_to_sys_reg_desc(vcpu, reg->id);
2070        if (!r)
2071                return set_invariant_sys_reg(reg->id, uaddr);
2072
2073        if (r->set_user)
2074                return (r->set_user)(vcpu, r, reg, uaddr);
2075
2076        return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2077}
2078
2079static unsigned int num_demux_regs(void)
2080{
2081        unsigned int i, count = 0;
2082
2083        for (i = 0; i < CSSELR_MAX; i++)
2084                if (is_valid_cache(i))
2085                        count++;
2086
2087        return count;
2088}
2089
2090static int write_demux_regids(u64 __user *uindices)
2091{
2092        u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2093        unsigned int i;
2094
2095        val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2096        for (i = 0; i < CSSELR_MAX; i++) {
2097                if (!is_valid_cache(i))
2098                        continue;
2099                if (put_user(val | i, uindices))
2100                        return -EFAULT;
2101                uindices++;
2102        }
2103        return 0;
2104}
2105
2106static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2107{
2108        return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2109                KVM_REG_ARM64_SYSREG |
2110                (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2111                (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2112                (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2113                (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2114                (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2115}
2116
2117static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2118{
2119        if (!*uind)
2120                return true;
2121
2122        if (put_user(sys_reg_to_index(reg), *uind))
2123                return false;
2124
2125        (*uind)++;
2126        return true;
2127}
2128
2129/* Assumed ordered tables, see kvm_sys_reg_table_init. */
2130static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2131{
2132        const struct sys_reg_desc *i1, *i2, *end1, *end2;
2133        unsigned int total = 0;
2134        size_t num;
2135
2136        /* We check for duplicates here, to allow arch-specific overrides. */
2137        i1 = get_target_table(vcpu->arch.target, true, &num);
2138        end1 = i1 + num;
2139        i2 = sys_reg_descs;
2140        end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2141
2142        BUG_ON(i1 == end1 || i2 == end2);
2143
2144        /* Walk carefully, as both tables may refer to the same register. */
2145        while (i1 || i2) {
2146                int cmp = cmp_sys_reg(i1, i2);
2147                /* target-specific overrides generic entry. */
2148                if (cmp <= 0) {
2149                        /* Ignore registers we trap but don't save. */
2150                        if (i1->reg) {
2151                                if (!copy_reg_to_user(i1, &uind))
2152                                        return -EFAULT;
2153                                total++;
2154                        }
2155                } else {
2156                        /* Ignore registers we trap but don't save. */
2157                        if (i2->reg) {
2158                                if (!copy_reg_to_user(i2, &uind))
2159                                        return -EFAULT;
2160                                total++;
2161                        }
2162                }
2163
2164                if (cmp <= 0 && ++i1 == end1)
2165                        i1 = NULL;
2166                if (cmp >= 0 && ++i2 == end2)
2167                        i2 = NULL;
2168        }
2169        return total;
2170}
2171
2172unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2173{
2174        return ARRAY_SIZE(invariant_sys_regs)
2175                + num_demux_regs()
2176                + walk_sys_regs(vcpu, (u64 __user *)NULL);
2177}
2178
2179int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2180{
2181        unsigned int i;
2182        int err;
2183
2184        /* Then give them all the invariant registers' indices. */
2185        for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2186                if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2187                        return -EFAULT;
2188                uindices++;
2189        }
2190
2191        err = walk_sys_regs(vcpu, uindices);
2192        if (err < 0)
2193                return err;
2194        uindices += err;
2195
2196        return write_demux_regids(uindices);
2197}
2198
2199static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
2200{
2201        unsigned int i;
2202
2203        for (i = 1; i < n; i++) {
2204                if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2205                        kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2206                        return 1;
2207                }
2208        }
2209
2210        return 0;
2211}
2212
2213void kvm_sys_reg_table_init(void)
2214{
2215        unsigned int i;
2216        struct sys_reg_desc clidr;
2217
2218        /* Make sure tables are unique and in order. */
2219        BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
2220        BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
2221        BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
2222        BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
2223        BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
2224        BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
2225
2226        /* We abuse the reset function to overwrite the table itself. */
2227        for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2228                invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2229
2230        /*
2231         * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
2232         *
2233         *   If software reads the Cache Type fields from Ctype1
2234         *   upwards, once it has seen a value of 0b000, no caches
2235         *   exist at further-out levels of the hierarchy. So, for
2236         *   example, if Ctype3 is the first Cache Type field with a
2237         *   value of 0b000, the values of Ctype4 to Ctype7 must be
2238         *   ignored.
2239         */
2240        get_clidr_el1(NULL, &clidr); /* Ugly... */
2241        cache_levels = clidr.val;
2242        for (i = 0; i < 7; i++)
2243                if (((cache_levels >> (i*3)) & 7) == 0)
2244                        break;
2245        /* Clear all higher bits. */
2246        cache_levels &= (1 << (i*3))-1;
2247}
2248
2249/**
2250 * kvm_reset_sys_regs - sets system registers to reset value
2251 * @vcpu: The VCPU pointer
2252 *
2253 * This function finds the right table above and sets the registers on the
2254 * virtual CPU struct to their architecturally defined reset values.
2255 */
2256void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2257{
2258        size_t num;
2259        const struct sys_reg_desc *table;
2260
2261        /* Catch someone adding a register without putting in reset entry. */
2262        memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
2263
2264        /* Generic chip reset first (so target could override). */
2265        reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2266
2267        table = get_target_table(vcpu->arch.target, true, &num);
2268        reset_sys_reg_descs(vcpu, table, num);
2269
2270        for (num = 1; num < NR_SYS_REGS; num++)
2271                if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
2272                        panic("Didn't reset vcpu_sys_reg(%zi)", num);
2273}
2274