linux/arch/arm/kvm/coproc.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   3 * Authors: Rusty Russell <rusty@rustcorp.com.au>
   4 *          Christoffer Dall <c.dall@virtualopensystems.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License, version 2, as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  18 */
  19
  20#include <linux/bsearch.h>
  21#include <linux/mm.h>
  22#include <linux/kvm_host.h>
  23#include <linux/uaccess.h>
  24#include <asm/kvm_arm.h>
  25#include <asm/kvm_host.h>
  26#include <asm/kvm_emulate.h>
  27#include <asm/kvm_coproc.h>
  28#include <asm/kvm_mmu.h>
  29#include <asm/cacheflush.h>
  30#include <asm/cputype.h>
  31#include <trace/events/kvm.h>
  32#include <asm/vfp.h>
  33#include "../vfp/vfpinstr.h"
  34
  35#define CREATE_TRACE_POINTS
  36#include "trace.h"
  37#include "coproc.h"
  38
  39
  40/******************************************************************************
  41 * Co-processor emulation
  42 *****************************************************************************/
  43
  44static bool write_to_read_only(struct kvm_vcpu *vcpu,
  45                               const struct coproc_params *params)
  46{
  47        WARN_ONCE(1, "CP15 write to read-only register\n");
  48        print_cp_instr(params);
  49        kvm_inject_undefined(vcpu);
  50        return false;
  51}
  52
  53static bool read_from_write_only(struct kvm_vcpu *vcpu,
  54                                 const struct coproc_params *params)
  55{
  56        WARN_ONCE(1, "CP15 read to write-only register\n");
  57        print_cp_instr(params);
  58        kvm_inject_undefined(vcpu);
  59        return false;
  60}
  61
  62/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  63static u32 cache_levels;
  64
  65/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  66#define CSSELR_MAX 12
  67
  68/*
  69 * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some
  70 * of cp15 registers can be viewed either as couple of two u32 registers
  71 * or one u64 register. Current u64 register encoding is that least
  72 * significant u32 word is followed by most significant u32 word.
  73 */
  74static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu,
  75                                       const struct coproc_reg *r,
  76                                       u64 val)
  77{
  78        vcpu_cp15(vcpu, r->reg) = val & 0xffffffff;
  79        vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
  80}
  81
  82static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
  83                                      const struct coproc_reg *r)
  84{
  85        u64 val;
  86
  87        val = vcpu_cp15(vcpu, r->reg + 1);
  88        val = val << 32;
  89        val = val | vcpu_cp15(vcpu, r->reg);
  90        return val;
  91}
  92
  93int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
  94{
  95        kvm_inject_undefined(vcpu);
  96        return 1;
  97}
  98
  99int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
 100{
 101        /*
 102         * We can get here, if the host has been built without VFPv3 support,
 103         * but the guest attempted a floating point operation.
 104         */
 105        kvm_inject_undefined(vcpu);
 106        return 1;
 107}
 108
 109int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
 110{
 111        kvm_inject_undefined(vcpu);
 112        return 1;
 113}
 114
 115static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 116{
 117        /*
 118         * Compute guest MPIDR. We build a virtual cluster out of the
 119         * vcpu_id, but we read the 'U' bit from the underlying
 120         * hardware directly.
 121         */
 122        vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
 123                                     ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
 124                                     (vcpu->vcpu_id & 3));
 125}
 126
 127/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
 128static bool access_actlr(struct kvm_vcpu *vcpu,
 129                         const struct coproc_params *p,
 130                         const struct coproc_reg *r)
 131{
 132        if (p->is_write)
 133                return ignore_write(vcpu, p);
 134
 135        *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR);
 136        return true;
 137}
 138
 139/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */
 140static bool access_cbar(struct kvm_vcpu *vcpu,
 141                        const struct coproc_params *p,
 142                        const struct coproc_reg *r)
 143{
 144        if (p->is_write)
 145                return write_to_read_only(vcpu, p);
 146        return read_zero(vcpu, p);
 147}
 148
 149/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */
 150static bool access_l2ctlr(struct kvm_vcpu *vcpu,
 151                          const struct coproc_params *p,
 152                          const struct coproc_reg *r)
 153{
 154        if (p->is_write)
 155                return ignore_write(vcpu, p);
 156
 157        *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR);
 158        return true;
 159}
 160
 161static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 162{
 163        u32 l2ctlr, ncores;
 164
 165        asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
 166        l2ctlr &= ~(3 << 24);
 167        ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
 168        /* How many cores in the current cluster and the next ones */
 169        ncores -= (vcpu->vcpu_id & ~3);
 170        /* Cap it to the maximum number of cores in a single cluster */
 171        ncores = min(ncores, 3U);
 172        l2ctlr |= (ncores & 3) << 24;
 173
 174        vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr;
 175}
 176
 177static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 178{
 179        u32 actlr;
 180
 181        /* ACTLR contains SMP bit: make sure you create all cpus first! */
 182        asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
 183        /* Make the SMP bit consistent with the guest configuration */
 184        if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
 185                actlr |= 1U << 6;
 186        else
 187                actlr &= ~(1U << 6);
 188
 189        vcpu_cp15(vcpu, c1_ACTLR) = actlr;
 190}
 191
 192/*
 193 * TRM entries: A7:4.3.50, A15:4.3.49
 194 * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored).
 195 */
 196static bool access_l2ectlr(struct kvm_vcpu *vcpu,
 197                           const struct coproc_params *p,
 198                           const struct coproc_reg *r)
 199{
 200        if (p->is_write)
 201                return ignore_write(vcpu, p);
 202
 203        *vcpu_reg(vcpu, p->Rt1) = 0;
 204        return true;
 205}
 206
 207/*
 208 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
 209 */
 210static bool access_dcsw(struct kvm_vcpu *vcpu,
 211                        const struct coproc_params *p,
 212                        const struct coproc_reg *r)
 213{
 214        if (!p->is_write)
 215                return read_from_write_only(vcpu, p);
 216
 217        kvm_set_way_flush(vcpu);
 218        return true;
 219}
 220
 221/*
 222 * Generic accessor for VM registers. Only called as long as HCR_TVM
 223 * is set.  If the guest enables the MMU, we stop trapping the VM
 224 * sys_regs and leave it in complete control of the caches.
 225 *
 226 * Used by the cpu-specific code.
 227 */
 228bool access_vm_reg(struct kvm_vcpu *vcpu,
 229                   const struct coproc_params *p,
 230                   const struct coproc_reg *r)
 231{
 232        bool was_enabled = vcpu_has_cache_enabled(vcpu);
 233
 234        BUG_ON(!p->is_write);
 235
 236        vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1);
 237        if (p->is_64bit)
 238                vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2);
 239
 240        kvm_toggle_cache(vcpu, was_enabled);
 241        return true;
 242}
 243
 244static bool access_gic_sgi(struct kvm_vcpu *vcpu,
 245                           const struct coproc_params *p,
 246                           const struct coproc_reg *r)
 247{
 248        u64 reg;
 249        bool g1;
 250
 251        if (!p->is_write)
 252                return read_from_write_only(vcpu, p);
 253
 254        reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
 255        reg |= *vcpu_reg(vcpu, p->Rt1) ;
 256
 257        /*
 258         * In a system where GICD_CTLR.DS=1, a ICC_SGI0R access generates
 259         * Group0 SGIs only, while ICC_SGI1R can generate either group,
 260         * depending on the SGI configuration. ICC_ASGI1R is effectively
 261         * equivalent to ICC_SGI0R, as there is no "alternative" secure
 262         * group.
 263         */
 264        switch (p->Op1) {
 265        default:                /* Keep GCC quiet */
 266        case 0:                 /* ICC_SGI1R */
 267                g1 = true;
 268                break;
 269        case 1:                 /* ICC_ASGI1R */
 270        case 2:                 /* ICC_SGI0R */
 271                g1 = false;
 272                break;
 273        }
 274
 275        vgic_v3_dispatch_sgi(vcpu, reg, g1);
 276
 277        return true;
 278}
 279
 280static bool access_gic_sre(struct kvm_vcpu *vcpu,
 281                           const struct coproc_params *p,
 282                           const struct coproc_reg *r)
 283{
 284        if (p->is_write)
 285                return ignore_write(vcpu, p);
 286
 287        *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
 288
 289        return true;
 290}
 291
 292static bool access_cntp_tval(struct kvm_vcpu *vcpu,
 293                             const struct coproc_params *p,
 294                             const struct coproc_reg *r)
 295{
 296        u64 now = kvm_phys_timer_read();
 297        u64 val;
 298
 299        if (p->is_write) {
 300                val = *vcpu_reg(vcpu, p->Rt1);
 301                kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, val + now);
 302        } else {
 303                val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
 304                *vcpu_reg(vcpu, p->Rt1) = val - now;
 305        }
 306
 307        return true;
 308}
 309
 310static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
 311                            const struct coproc_params *p,
 312                            const struct coproc_reg *r)
 313{
 314        u32 val;
 315
 316        if (p->is_write) {
 317                val = *vcpu_reg(vcpu, p->Rt1);
 318                kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, val);
 319        } else {
 320                val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
 321                *vcpu_reg(vcpu, p->Rt1) = val;
 322        }
 323
 324        return true;
 325}
 326
 327static bool access_cntp_cval(struct kvm_vcpu *vcpu,
 328                             const struct coproc_params *p,
 329                             const struct coproc_reg *r)
 330{
 331        u64 val;
 332
 333        if (p->is_write) {
 334                val = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
 335                val |= *vcpu_reg(vcpu, p->Rt1);
 336                kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, val);
 337        } else {
 338                val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
 339                *vcpu_reg(vcpu, p->Rt1) = val;
 340                *vcpu_reg(vcpu, p->Rt2) = val >> 32;
 341        }
 342
 343        return true;
 344}
 345
 346/*
 347 * We could trap ID_DFR0 and tell the guest we don't support performance
 348 * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
 349 * NAKed, so it will read the PMCR anyway.
 350 *
 351 * Therefore we tell the guest we have 0 counters.  Unfortunately, we
 352 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
 353 * all PM registers, which doesn't crash the guest kernel at least.
 354 */
 355static bool trap_raz_wi(struct kvm_vcpu *vcpu,
 356                    const struct coproc_params *p,
 357                    const struct coproc_reg *r)
 358{
 359        if (p->is_write)
 360                return ignore_write(vcpu, p);
 361        else
 362                return read_zero(vcpu, p);
 363}
 364
 365#define access_pmcr trap_raz_wi
 366#define access_pmcntenset trap_raz_wi
 367#define access_pmcntenclr trap_raz_wi
 368#define access_pmovsr trap_raz_wi
 369#define access_pmselr trap_raz_wi
 370#define access_pmceid0 trap_raz_wi
 371#define access_pmceid1 trap_raz_wi
 372#define access_pmccntr trap_raz_wi
 373#define access_pmxevtyper trap_raz_wi
 374#define access_pmxevcntr trap_raz_wi
 375#define access_pmuserenr trap_raz_wi
 376#define access_pmintenset trap_raz_wi
 377#define access_pmintenclr trap_raz_wi
 378
 379/* Architected CP15 registers.
 380 * CRn denotes the primary register number, but is copied to the CRm in the
 381 * user space API for 64-bit register access in line with the terminology used
 382 * in the ARM ARM.
 383 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
 384 *            registers preceding 32-bit ones.
 385 */
 386static const struct coproc_reg cp15_regs[] = {
 387        /* MPIDR: we use VMPIDR for guest access. */
 388        { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
 389                        NULL, reset_mpidr, c0_MPIDR },
 390
 391        /* CSSELR: swapped by interrupt.S. */
 392        { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
 393                        NULL, reset_unknown, c0_CSSELR },
 394
 395        /* ACTLR: trapped by HCR.TAC bit. */
 396        { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
 397                        access_actlr, reset_actlr, c1_ACTLR },
 398
 399        /* CPACR: swapped by interrupt.S. */
 400        { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
 401                        NULL, reset_val, c1_CPACR, 0x00000000 },
 402
 403        /* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */
 404        { CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 },
 405        { CRn(2), CRm( 0), Op1( 0), Op2( 0), is32,
 406                        access_vm_reg, reset_unknown, c2_TTBR0 },
 407        { CRn(2), CRm( 0), Op1( 0), Op2( 1), is32,
 408                        access_vm_reg, reset_unknown, c2_TTBR1 },
 409        { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
 410                        access_vm_reg, reset_val, c2_TTBCR, 0x00000000 },
 411        { CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 },
 412
 413
 414        /* DACR: swapped by interrupt.S. */
 415        { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
 416                        access_vm_reg, reset_unknown, c3_DACR },
 417
 418        /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
 419        { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
 420                        access_vm_reg, reset_unknown, c5_DFSR },
 421        { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
 422                        access_vm_reg, reset_unknown, c5_IFSR },
 423        { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
 424                        access_vm_reg, reset_unknown, c5_ADFSR },
 425        { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
 426                        access_vm_reg, reset_unknown, c5_AIFSR },
 427
 428        /* DFAR/IFAR: swapped by interrupt.S. */
 429        { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
 430                        access_vm_reg, reset_unknown, c6_DFAR },
 431        { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
 432                        access_vm_reg, reset_unknown, c6_IFAR },
 433
 434        /* PAR swapped by interrupt.S */
 435        { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
 436
 437        /*
 438         * DC{C,I,CI}SW operations:
 439         */
 440        { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
 441        { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
 442        { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
 443        /*
 444         * L2CTLR access (guest wants to know #CPUs).
 445         */
 446        { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
 447                        access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
 448        { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
 449
 450        /*
 451         * Dummy performance monitor implementation.
 452         */
 453        { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
 454        { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
 455        { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
 456        { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
 457        { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
 458        { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
 459        { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
 460        { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
 461        { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
 462        { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
 463        { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
 464        { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
 465        { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
 466
 467        /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
 468        { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
 469                        access_vm_reg, reset_unknown, c10_PRRR},
 470        { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
 471                        access_vm_reg, reset_unknown, c10_NMRR},
 472
 473        /* AMAIR0/AMAIR1: swapped by interrupt.S. */
 474        { CRn(10), CRm( 3), Op1( 0), Op2( 0), is32,
 475                        access_vm_reg, reset_unknown, c10_AMAIR0},
 476        { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
 477                        access_vm_reg, reset_unknown, c10_AMAIR1},
 478
 479        /* ICC_SGI1R */
 480        { CRm64(12), Op1( 0), is64, access_gic_sgi},
 481
 482        /* VBAR: swapped by interrupt.S. */
 483        { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
 484                        NULL, reset_val, c12_VBAR, 0x00000000 },
 485
 486        /* ICC_ASGI1R */
 487        { CRm64(12), Op1( 1), is64, access_gic_sgi},
 488        /* ICC_SGI0R */
 489        { CRm64(12), Op1( 2), is64, access_gic_sgi},
 490        /* ICC_SRE */
 491        { CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre },
 492
 493        /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
 494        { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
 495                        access_vm_reg, reset_val, c13_CID, 0x00000000 },
 496        { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
 497                        NULL, reset_unknown, c13_TID_URW },
 498        { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
 499                        NULL, reset_unknown, c13_TID_URO },
 500        { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
 501                        NULL, reset_unknown, c13_TID_PRIV },
 502
 503        /* CNTP */
 504        { CRm64(14), Op1( 2), is64, access_cntp_cval},
 505
 506        /* CNTKCTL: swapped by interrupt.S. */
 507        { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
 508                        NULL, reset_val, c14_CNTKCTL, 0x00000000 },
 509
 510        /* CNTP */
 511        { CRn(14), CRm( 2), Op1( 0), Op2( 0), is32, access_cntp_tval },
 512        { CRn(14), CRm( 2), Op1( 0), Op2( 1), is32, access_cntp_ctl },
 513
 514        /* The Configuration Base Address Register. */
 515        { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
 516};
 517
 518static int check_reg_table(const struct coproc_reg *table, unsigned int n)
 519{
 520        unsigned int i;
 521
 522        for (i = 1; i < n; i++) {
 523                if (cmp_reg(&table[i-1], &table[i]) >= 0) {
 524                        kvm_err("reg table %p out of order (%d)\n", table, i - 1);
 525                        return 1;
 526                }
 527        }
 528
 529        return 0;
 530}
 531
 532/* Target specific emulation tables */
 533static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
 534
 535void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
 536{
 537        BUG_ON(check_reg_table(table->table, table->num));
 538        target_tables[table->target] = table;
 539}
 540
 541/* Get specific register table for this target. */
 542static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
 543{
 544        struct kvm_coproc_target_table *table;
 545
 546        table = target_tables[target];
 547        *num = table->num;
 548        return table->table;
 549}
 550
 551#define reg_to_match_value(x)                                           \
 552        ({                                                              \
 553                unsigned long val;                                      \
 554                val  = (x)->CRn << 11;                                  \
 555                val |= (x)->CRm << 7;                                   \
 556                val |= (x)->Op1 << 4;                                   \
 557                val |= (x)->Op2 << 1;                                   \
 558                val |= !(x)->is_64bit;                                  \
 559                val;                                                    \
 560         })
 561
 562static int match_reg(const void *key, const void *elt)
 563{
 564        const unsigned long pval = (unsigned long)key;
 565        const struct coproc_reg *r = elt;
 566
 567        return pval - reg_to_match_value(r);
 568}
 569
 570static const struct coproc_reg *find_reg(const struct coproc_params *params,
 571                                         const struct coproc_reg table[],
 572                                         unsigned int num)
 573{
 574        unsigned long pval = reg_to_match_value(params);
 575
 576        return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg);
 577}
 578
 579static int emulate_cp15(struct kvm_vcpu *vcpu,
 580                        const struct coproc_params *params)
 581{
 582        size_t num;
 583        const struct coproc_reg *table, *r;
 584
 585        trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
 586                                   params->CRm, params->Op2, params->is_write);
 587
 588        table = get_target_table(vcpu->arch.target, &num);
 589
 590        /* Search target-specific then generic table. */
 591        r = find_reg(params, table, num);
 592        if (!r)
 593                r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
 594
 595        if (likely(r)) {
 596                /* If we don't have an accessor, we should never get here! */
 597                BUG_ON(!r->access);
 598
 599                if (likely(r->access(vcpu, params, r))) {
 600                        /* Skip instruction, since it was emulated */
 601                        kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 602                }
 603        } else {
 604                /* If access function fails, it should complain. */
 605                kvm_err("Unsupported guest CP15 access at: %08lx [%08lx]\n",
 606                        *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
 607                print_cp_instr(params);
 608                kvm_inject_undefined(vcpu);
 609        }
 610
 611        return 1;
 612}
 613
 614static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu)
 615{
 616        struct coproc_params params;
 617
 618        params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 619        params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 620        params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 621        params.is_64bit = true;
 622
 623        params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
 624        params.Op2 = 0;
 625        params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 626        params.CRm = 0;
 627
 628        return params;
 629}
 630
 631/**
 632 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
 633 * @vcpu: The VCPU pointer
 634 * @run:  The kvm_run struct
 635 */
 636int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 637{
 638        struct coproc_params params = decode_64bit_hsr(vcpu);
 639
 640        return emulate_cp15(vcpu, &params);
 641}
 642
 643/**
 644 * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
 645 * @vcpu: The VCPU pointer
 646 * @run:  The kvm_run struct
 647 */
 648int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 649{
 650        struct coproc_params params = decode_64bit_hsr(vcpu);
 651
 652        /* raz_wi cp14 */
 653        trap_raz_wi(vcpu, &params, NULL);
 654
 655        /* handled */
 656        kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 657        return 1;
 658}
 659
 660static void reset_coproc_regs(struct kvm_vcpu *vcpu,
 661                              const struct coproc_reg *table, size_t num)
 662{
 663        unsigned long i;
 664
 665        for (i = 0; i < num; i++)
 666                if (table[i].reset)
 667                        table[i].reset(vcpu, &table[i]);
 668}
 669
 670static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
 671{
 672        struct coproc_params params;
 673
 674        params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 675        params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 676        params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 677        params.is_64bit = false;
 678
 679        params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 680        params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
 681        params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
 682        params.Rt2 = 0;
 683
 684        return params;
 685}
 686
 687/**
 688 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
 689 * @vcpu: The VCPU pointer
 690 * @run:  The kvm_run struct
 691 */
 692int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 693{
 694        struct coproc_params params = decode_32bit_hsr(vcpu);
 695        return emulate_cp15(vcpu, &params);
 696}
 697
 698/**
 699 * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
 700 * @vcpu: The VCPU pointer
 701 * @run:  The kvm_run struct
 702 */
 703int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 704{
 705        struct coproc_params params = decode_32bit_hsr(vcpu);
 706
 707        /* raz_wi cp14 */
 708        trap_raz_wi(vcpu, &params, NULL);
 709
 710        /* handled */
 711        kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 712        return 1;
 713}
 714
 715/******************************************************************************
 716 * Userspace API
 717 *****************************************************************************/
 718
 719static bool index_to_params(u64 id, struct coproc_params *params)
 720{
 721        switch (id & KVM_REG_SIZE_MASK) {
 722        case KVM_REG_SIZE_U32:
 723                /* Any unused index bits means it's not valid. */
 724                if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 725                           | KVM_REG_ARM_COPROC_MASK
 726                           | KVM_REG_ARM_32_CRN_MASK
 727                           | KVM_REG_ARM_CRM_MASK
 728                           | KVM_REG_ARM_OPC1_MASK
 729                           | KVM_REG_ARM_32_OPC2_MASK))
 730                        return false;
 731
 732                params->is_64bit = false;
 733                params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
 734                               >> KVM_REG_ARM_32_CRN_SHIFT);
 735                params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
 736                               >> KVM_REG_ARM_CRM_SHIFT);
 737                params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 738                               >> KVM_REG_ARM_OPC1_SHIFT);
 739                params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
 740                               >> KVM_REG_ARM_32_OPC2_SHIFT);
 741                return true;
 742        case KVM_REG_SIZE_U64:
 743                /* Any unused index bits means it's not valid. */
 744                if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 745                              | KVM_REG_ARM_COPROC_MASK
 746                              | KVM_REG_ARM_CRM_MASK
 747                              | KVM_REG_ARM_OPC1_MASK))
 748                        return false;
 749                params->is_64bit = true;
 750                /* CRm to CRn: see cp15_to_index for details */
 751                params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
 752                               >> KVM_REG_ARM_CRM_SHIFT);
 753                params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 754                               >> KVM_REG_ARM_OPC1_SHIFT);
 755                params->Op2 = 0;
 756                params->CRm = 0;
 757                return true;
 758        default:
 759                return false;
 760        }
 761}
 762
 763/* Decode an index value, and find the cp15 coproc_reg entry. */
 764static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
 765                                                    u64 id)
 766{
 767        size_t num;
 768        const struct coproc_reg *table, *r;
 769        struct coproc_params params;
 770
 771        /* We only do cp15 for now. */
 772        if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
 773                return NULL;
 774
 775        if (!index_to_params(id, &params))
 776                return NULL;
 777
 778        table = get_target_table(vcpu->arch.target, &num);
 779        r = find_reg(&params, table, num);
 780        if (!r)
 781                r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs));
 782
 783        /* Not saved in the cp15 array? */
 784        if (r && !r->reg)
 785                r = NULL;
 786
 787        return r;
 788}
 789
 790/*
 791 * These are the invariant cp15 registers: we let the guest see the host
 792 * versions of these, so they're part of the guest state.
 793 *
 794 * A future CPU may provide a mechanism to present different values to
 795 * the guest, or a future kvm may trap them.
 796 */
 797/* Unfortunately, there's no register-argument for mrc, so generate. */
 798#define FUNCTION_FOR32(crn, crm, op1, op2, name)                        \
 799        static void get_##name(struct kvm_vcpu *v,                      \
 800                               const struct coproc_reg *r)              \
 801        {                                                               \
 802                u32 val;                                                \
 803                                                                        \
 804                asm volatile("mrc p15, " __stringify(op1)               \
 805                             ", %0, c" __stringify(crn)                 \
 806                             ", c" __stringify(crm)                     \
 807                             ", " __stringify(op2) "\n" : "=r" (val));  \
 808                ((struct coproc_reg *)r)->val = val;                    \
 809        }
 810
 811FUNCTION_FOR32(0, 0, 0, 0, MIDR)
 812FUNCTION_FOR32(0, 0, 0, 1, CTR)
 813FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
 814FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
 815FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
 816FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
 817FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
 818FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
 819FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
 820FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
 821FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
 822FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
 823FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
 824FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
 825FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
 826FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
 827FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
 828FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
 829FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
 830FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
 831FUNCTION_FOR32(0, 0, 1, 7, AIDR)
 832
 833/* ->val is filled in by kvm_invariant_coproc_table_init() */
 834static struct coproc_reg invariant_cp15[] = {
 835        { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
 836        { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
 837        { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
 838        { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
 839        { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
 840
 841        { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
 842        { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
 843
 844        { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
 845        { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
 846        { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
 847        { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
 848        { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
 849        { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
 850        { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
 851        { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
 852
 853        { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
 854        { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
 855        { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
 856        { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
 857        { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
 858        { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
 859};
 860
 861/*
 862 * Reads a register value from a userspace address to a kernel
 863 * variable. Make sure that register size matches sizeof(*__val).
 864 */
 865static int reg_from_user(void *val, const void __user *uaddr, u64 id)
 866{
 867        if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
 868                return -EFAULT;
 869        return 0;
 870}
 871
 872/*
 873 * Writes a register value to a userspace address from a kernel variable.
 874 * Make sure that register size matches sizeof(*__val).
 875 */
 876static int reg_to_user(void __user *uaddr, const void *val, u64 id)
 877{
 878        if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
 879                return -EFAULT;
 880        return 0;
 881}
 882
 883static int get_invariant_cp15(u64 id, void __user *uaddr)
 884{
 885        struct coproc_params params;
 886        const struct coproc_reg *r;
 887        int ret;
 888
 889        if (!index_to_params(id, &params))
 890                return -ENOENT;
 891
 892        r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 893        if (!r)
 894                return -ENOENT;
 895
 896        ret = -ENOENT;
 897        if (KVM_REG_SIZE(id) == 4) {
 898                u32 val = r->val;
 899
 900                ret = reg_to_user(uaddr, &val, id);
 901        } else if (KVM_REG_SIZE(id) == 8) {
 902                ret = reg_to_user(uaddr, &r->val, id);
 903        }
 904        return ret;
 905}
 906
 907static int set_invariant_cp15(u64 id, void __user *uaddr)
 908{
 909        struct coproc_params params;
 910        const struct coproc_reg *r;
 911        int err;
 912        u64 val;
 913
 914        if (!index_to_params(id, &params))
 915                return -ENOENT;
 916        r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 917        if (!r)
 918                return -ENOENT;
 919
 920        err = -ENOENT;
 921        if (KVM_REG_SIZE(id) == 4) {
 922                u32 val32;
 923
 924                err = reg_from_user(&val32, uaddr, id);
 925                if (!err)
 926                        val = val32;
 927        } else if (KVM_REG_SIZE(id) == 8) {
 928                err = reg_from_user(&val, uaddr, id);
 929        }
 930        if (err)
 931                return err;
 932
 933        /* This is what we mean by invariant: you can't change it. */
 934        if (r->val != val)
 935                return -EINVAL;
 936
 937        return 0;
 938}
 939
 940static bool is_valid_cache(u32 val)
 941{
 942        u32 level, ctype;
 943
 944        if (val >= CSSELR_MAX)
 945                return false;
 946
 947        /* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
 948        level = (val >> 1);
 949        ctype = (cache_levels >> (level * 3)) & 7;
 950
 951        switch (ctype) {
 952        case 0: /* No cache */
 953                return false;
 954        case 1: /* Instruction cache only */
 955                return (val & 1);
 956        case 2: /* Data cache only */
 957        case 4: /* Unified cache */
 958                return !(val & 1);
 959        case 3: /* Separate instruction and data caches */
 960                return true;
 961        default: /* Reserved: we can't know instruction or data. */
 962                return false;
 963        }
 964}
 965
 966/* Which cache CCSIDR represents depends on CSSELR value. */
 967static u32 get_ccsidr(u32 csselr)
 968{
 969        u32 ccsidr;
 970
 971        /* Make sure noone else changes CSSELR during this! */
 972        local_irq_disable();
 973        /* Put value into CSSELR */
 974        asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr));
 975        isb();
 976        /* Read result out of CCSIDR */
 977        asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr));
 978        local_irq_enable();
 979
 980        return ccsidr;
 981}
 982
 983static int demux_c15_get(u64 id, void __user *uaddr)
 984{
 985        u32 val;
 986        u32 __user *uval = uaddr;
 987
 988        /* Fail if we have unknown bits set. */
 989        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 990                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 991                return -ENOENT;
 992
 993        switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 994        case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 995                if (KVM_REG_SIZE(id) != 4)
 996                        return -ENOENT;
 997                val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
 998                        >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
 999                if (!is_valid_cache(val))
1000                        return -ENOENT;
1001
1002                return put_user(get_ccsidr(val), uval);
1003        default:
1004                return -ENOENT;
1005        }
1006}
1007
1008static int demux_c15_set(u64 id, void __user *uaddr)
1009{
1010        u32 val, newval;
1011        u32 __user *uval = uaddr;
1012
1013        /* Fail if we have unknown bits set. */
1014        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1015                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1016                return -ENOENT;
1017
1018        switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1019        case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1020                if (KVM_REG_SIZE(id) != 4)
1021                        return -ENOENT;
1022                val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1023                        >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1024                if (!is_valid_cache(val))
1025                        return -ENOENT;
1026
1027                if (get_user(newval, uval))
1028                        return -EFAULT;
1029
1030                /* This is also invariant: you can't change it. */
1031                if (newval != get_ccsidr(val))
1032                        return -EINVAL;
1033                return 0;
1034        default:
1035                return -ENOENT;
1036        }
1037}
1038
1039#ifdef CONFIG_VFPv3
1040static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC,
1041                                   KVM_REG_ARM_VFP_FPSCR,
1042                                   KVM_REG_ARM_VFP_FPINST,
1043                                   KVM_REG_ARM_VFP_FPINST2,
1044                                   KVM_REG_ARM_VFP_MVFR0,
1045                                   KVM_REG_ARM_VFP_MVFR1,
1046                                   KVM_REG_ARM_VFP_FPSID };
1047
1048static unsigned int num_fp_regs(void)
1049{
1050        if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2)
1051                return 32;
1052        else
1053                return 16;
1054}
1055
1056static unsigned int num_vfp_regs(void)
1057{
1058        /* Normal FP regs + control regs. */
1059        return num_fp_regs() + ARRAY_SIZE(vfp_sysregs);
1060}
1061
1062static int copy_vfp_regids(u64 __user *uindices)
1063{
1064        unsigned int i;
1065        const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP;
1066        const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
1067
1068        for (i = 0; i < num_fp_regs(); i++) {
1069                if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i,
1070                             uindices))
1071                        return -EFAULT;
1072                uindices++;
1073        }
1074
1075        for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) {
1076                if (put_user(u32reg | vfp_sysregs[i], uindices))
1077                        return -EFAULT;
1078                uindices++;
1079        }
1080
1081        return num_vfp_regs();
1082}
1083
1084static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
1085{
1086        u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
1087        u32 val;
1088
1089        /* Fail if we have unknown bits set. */
1090        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1091                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1092                return -ENOENT;
1093
1094        if (vfpid < num_fp_regs()) {
1095                if (KVM_REG_SIZE(id) != 8)
1096                        return -ENOENT;
1097                return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid],
1098                                   id);
1099        }
1100
1101        /* FP control registers are all 32 bit. */
1102        if (KVM_REG_SIZE(id) != 4)
1103                return -ENOENT;
1104
1105        switch (vfpid) {
1106        case KVM_REG_ARM_VFP_FPEXC:
1107                return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id);
1108        case KVM_REG_ARM_VFP_FPSCR:
1109                return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id);
1110        case KVM_REG_ARM_VFP_FPINST:
1111                return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id);
1112        case KVM_REG_ARM_VFP_FPINST2:
1113                return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id);
1114        case KVM_REG_ARM_VFP_MVFR0:
1115                val = fmrx(MVFR0);
1116                return reg_to_user(uaddr, &val, id);
1117        case KVM_REG_ARM_VFP_MVFR1:
1118                val = fmrx(MVFR1);
1119                return reg_to_user(uaddr, &val, id);
1120        case KVM_REG_ARM_VFP_FPSID:
1121                val = fmrx(FPSID);
1122                return reg_to_user(uaddr, &val, id);
1123        default:
1124                return -ENOENT;
1125        }
1126}
1127
1128static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
1129{
1130        u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
1131        u32 val;
1132
1133        /* Fail if we have unknown bits set. */
1134        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1135                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1136                return -ENOENT;
1137
1138        if (vfpid < num_fp_regs()) {
1139                if (KVM_REG_SIZE(id) != 8)
1140                        return -ENOENT;
1141                return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid],
1142                                     uaddr, id);
1143        }
1144
1145        /* FP control registers are all 32 bit. */
1146        if (KVM_REG_SIZE(id) != 4)
1147                return -ENOENT;
1148
1149        switch (vfpid) {
1150        case KVM_REG_ARM_VFP_FPEXC:
1151                return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id);
1152        case KVM_REG_ARM_VFP_FPSCR:
1153                return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id);
1154        case KVM_REG_ARM_VFP_FPINST:
1155                return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id);
1156        case KVM_REG_ARM_VFP_FPINST2:
1157                return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id);
1158        /* These are invariant. */
1159        case KVM_REG_ARM_VFP_MVFR0:
1160                if (reg_from_user(&val, uaddr, id))
1161                        return -EFAULT;
1162                if (val != fmrx(MVFR0))
1163                        return -EINVAL;
1164                return 0;
1165        case KVM_REG_ARM_VFP_MVFR1:
1166                if (reg_from_user(&val, uaddr, id))
1167                        return -EFAULT;
1168                if (val != fmrx(MVFR1))
1169                        return -EINVAL;
1170                return 0;
1171        case KVM_REG_ARM_VFP_FPSID:
1172                if (reg_from_user(&val, uaddr, id))
1173                        return -EFAULT;
1174                if (val != fmrx(FPSID))
1175                        return -EINVAL;
1176                return 0;
1177        default:
1178                return -ENOENT;
1179        }
1180}
1181#else /* !CONFIG_VFPv3 */
1182static unsigned int num_vfp_regs(void)
1183{
1184        return 0;
1185}
1186
1187static int copy_vfp_regids(u64 __user *uindices)
1188{
1189        return 0;
1190}
1191
1192static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
1193{
1194        return -ENOENT;
1195}
1196
1197static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
1198{
1199        return -ENOENT;
1200}
1201#endif /* !CONFIG_VFPv3 */
1202
1203int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1204{
1205        const struct coproc_reg *r;
1206        void __user *uaddr = (void __user *)(long)reg->addr;
1207        int ret;
1208
1209        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1210                return demux_c15_get(reg->id, uaddr);
1211
1212        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1213                return vfp_get_reg(vcpu, reg->id, uaddr);
1214
1215        r = index_to_coproc_reg(vcpu, reg->id);
1216        if (!r)
1217                return get_invariant_cp15(reg->id, uaddr);
1218
1219        ret = -ENOENT;
1220        if (KVM_REG_SIZE(reg->id) == 8) {
1221                u64 val;
1222
1223                val = vcpu_cp15_reg64_get(vcpu, r);
1224                ret = reg_to_user(uaddr, &val, reg->id);
1225        } else if (KVM_REG_SIZE(reg->id) == 4) {
1226                ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id);
1227        }
1228
1229        return ret;
1230}
1231
1232int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1233{
1234        const struct coproc_reg *r;
1235        void __user *uaddr = (void __user *)(long)reg->addr;
1236        int ret;
1237
1238        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1239                return demux_c15_set(reg->id, uaddr);
1240
1241        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1242                return vfp_set_reg(vcpu, reg->id, uaddr);
1243
1244        r = index_to_coproc_reg(vcpu, reg->id);
1245        if (!r)
1246                return set_invariant_cp15(reg->id, uaddr);
1247
1248        ret = -ENOENT;
1249        if (KVM_REG_SIZE(reg->id) == 8) {
1250                u64 val;
1251
1252                ret = reg_from_user(&val, uaddr, reg->id);
1253                if (!ret)
1254                        vcpu_cp15_reg64_set(vcpu, r, val);
1255        } else if (KVM_REG_SIZE(reg->id) == 4) {
1256                ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id);
1257        }
1258
1259        return ret;
1260}
1261
1262static unsigned int num_demux_regs(void)
1263{
1264        unsigned int i, count = 0;
1265
1266        for (i = 0; i < CSSELR_MAX; i++)
1267                if (is_valid_cache(i))
1268                        count++;
1269
1270        return count;
1271}
1272
1273static int write_demux_regids(u64 __user *uindices)
1274{
1275        u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
1276        unsigned int i;
1277
1278        val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
1279        for (i = 0; i < CSSELR_MAX; i++) {
1280                if (!is_valid_cache(i))
1281                        continue;
1282                if (put_user(val | i, uindices))
1283                        return -EFAULT;
1284                uindices++;
1285        }
1286        return 0;
1287}
1288
1289static u64 cp15_to_index(const struct coproc_reg *reg)
1290{
1291        u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
1292        if (reg->is_64bit) {
1293                val |= KVM_REG_SIZE_U64;
1294                val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1295                /*
1296                 * CRn always denotes the primary coproc. reg. nr. for the
1297                 * in-kernel representation, but the user space API uses the
1298                 * CRm for the encoding, because it is modelled after the
1299                 * MRRC/MCRR instructions: see the ARM ARM rev. c page
1300                 * B3-1445
1301                 */
1302                val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
1303        } else {
1304                val |= KVM_REG_SIZE_U32;
1305                val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1306                val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
1307                val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
1308                val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
1309        }
1310        return val;
1311}
1312
1313static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
1314{
1315        if (!*uind)
1316                return true;
1317
1318        if (put_user(cp15_to_index(reg), *uind))
1319                return false;
1320
1321        (*uind)++;
1322        return true;
1323}
1324
1325/* Assumed ordered tables, see kvm_coproc_table_init. */
1326static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
1327{
1328        const struct coproc_reg *i1, *i2, *end1, *end2;
1329        unsigned int total = 0;
1330        size_t num;
1331
1332        /* We check for duplicates here, to allow arch-specific overrides. */
1333        i1 = get_target_table(vcpu->arch.target, &num);
1334        end1 = i1 + num;
1335        i2 = cp15_regs;
1336        end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
1337
1338        BUG_ON(i1 == end1 || i2 == end2);
1339
1340        /* Walk carefully, as both tables may refer to the same register. */
1341        while (i1 || i2) {
1342                int cmp = cmp_reg(i1, i2);
1343                /* target-specific overrides generic entry. */
1344                if (cmp <= 0) {
1345                        /* Ignore registers we trap but don't save. */
1346                        if (i1->reg) {
1347                                if (!copy_reg_to_user(i1, &uind))
1348                                        return -EFAULT;
1349                                total++;
1350                        }
1351                } else {
1352                        /* Ignore registers we trap but don't save. */
1353                        if (i2->reg) {
1354                                if (!copy_reg_to_user(i2, &uind))
1355                                        return -EFAULT;
1356                                total++;
1357                        }
1358                }
1359
1360                if (cmp <= 0 && ++i1 == end1)
1361                        i1 = NULL;
1362                if (cmp >= 0 && ++i2 == end2)
1363                        i2 = NULL;
1364        }
1365        return total;
1366}
1367
1368unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
1369{
1370        return ARRAY_SIZE(invariant_cp15)
1371                + num_demux_regs()
1372                + num_vfp_regs()
1373                + walk_cp15(vcpu, (u64 __user *)NULL);
1374}
1375
1376int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1377{
1378        unsigned int i;
1379        int err;
1380
1381        /* Then give them all the invariant registers' indices. */
1382        for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
1383                if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
1384                        return -EFAULT;
1385                uindices++;
1386        }
1387
1388        err = walk_cp15(vcpu, uindices);
1389        if (err < 0)
1390                return err;
1391        uindices += err;
1392
1393        err = copy_vfp_regids(uindices);
1394        if (err < 0)
1395                return err;
1396        uindices += err;
1397
1398        return write_demux_regids(uindices);
1399}
1400
1401void kvm_coproc_table_init(void)
1402{
1403        unsigned int i;
1404
1405        /* Make sure tables are unique and in order. */
1406        BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
1407        BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15)));
1408
1409        /* We abuse the reset function to overwrite the table itself. */
1410        for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
1411                invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
1412
1413        /*
1414         * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
1415         *
1416         *   If software reads the Cache Type fields from Ctype1
1417         *   upwards, once it has seen a value of 0b000, no caches
1418         *   exist at further-out levels of the hierarchy. So, for
1419         *   example, if Ctype3 is the first Cache Type field with a
1420         *   value of 0b000, the values of Ctype4 to Ctype7 must be
1421         *   ignored.
1422         */
1423        asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels));
1424        for (i = 0; i < 7; i++)
1425                if (((cache_levels >> (i*3)) & 7) == 0)
1426                        break;
1427        /* Clear all higher bits. */
1428        cache_levels &= (1 << (i*3))-1;
1429}
1430
1431/**
1432 * kvm_reset_coprocs - sets cp15 registers to reset value
1433 * @vcpu: The VCPU pointer
1434 *
1435 * This function finds the right table above and sets the registers on the
1436 * virtual CPU struct to their architecturally defined reset values.
1437 */
1438void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1439{
1440        size_t num;
1441        const struct coproc_reg *table;
1442
1443        /* Catch someone adding a register without putting in reset entry. */
1444        memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
1445
1446        /* Generic chip reset first (so target could override). */
1447        reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
1448
1449        table = get_target_table(vcpu->arch.target, &num);
1450        reset_coproc_regs(vcpu, table, num);
1451
1452        for (num = 1; num < NR_CP15_REGS; num++)
1453                WARN(vcpu_cp15(vcpu, num) == 0x42424242,
1454                     "Didn't reset vcpu_cp15(vcpu, %zi)", num);
1455}
1456