linux/arch/arm/kvm/coproc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   4 * Authors: Rusty Russell <rusty@rustcorp.com.au>
   5 *          Christoffer Dall <c.dall@virtualopensystems.com>
   6 */
   7
   8#include <linux/bsearch.h>
   9#include <linux/mm.h>
  10#include <linux/kvm_host.h>
  11#include <linux/uaccess.h>
  12#include <asm/kvm_arm.h>
  13#include <asm/kvm_host.h>
  14#include <asm/kvm_emulate.h>
  15#include <asm/kvm_coproc.h>
  16#include <asm/kvm_mmu.h>
  17#include <asm/cacheflush.h>
  18#include <asm/cputype.h>
  19#include <trace/events/kvm.h>
  20#include <asm/vfp.h>
  21#include "../vfp/vfpinstr.h"
  22
  23#define CREATE_TRACE_POINTS
  24#include "trace.h"
  25#include "coproc.h"
  26
  27
  28/******************************************************************************
  29 * Co-processor emulation
  30 *****************************************************************************/
  31
  32static bool write_to_read_only(struct kvm_vcpu *vcpu,
  33                               const struct coproc_params *params)
  34{
  35        WARN_ONCE(1, "CP15 write to read-only register\n");
  36        print_cp_instr(params);
  37        kvm_inject_undefined(vcpu);
  38        return false;
  39}
  40
  41static bool read_from_write_only(struct kvm_vcpu *vcpu,
  42                                 const struct coproc_params *params)
  43{
  44        WARN_ONCE(1, "CP15 read to write-only register\n");
  45        print_cp_instr(params);
  46        kvm_inject_undefined(vcpu);
  47        return false;
  48}
  49
  50/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  51static u32 cache_levels;
  52
  53/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  54#define CSSELR_MAX 12
  55
  56/*
  57 * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some
  58 * of cp15 registers can be viewed either as couple of two u32 registers
  59 * or one u64 register. Current u64 register encoding is that least
  60 * significant u32 word is followed by most significant u32 word.
  61 */
  62static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu,
  63                                       const struct coproc_reg *r,
  64                                       u64 val)
  65{
  66        vcpu_cp15(vcpu, r->reg) = val & 0xffffffff;
  67        vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
  68}
  69
  70static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
  71                                      const struct coproc_reg *r)
  72{
  73        u64 val;
  74
  75        val = vcpu_cp15(vcpu, r->reg + 1);
  76        val = val << 32;
  77        val = val | vcpu_cp15(vcpu, r->reg);
  78        return val;
  79}
  80
  81int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
  82{
  83        kvm_inject_undefined(vcpu);
  84        return 1;
  85}
  86
  87int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  88{
  89        /*
  90         * We can get here, if the host has been built without VFPv3 support,
  91         * but the guest attempted a floating point operation.
  92         */
  93        kvm_inject_undefined(vcpu);
  94        return 1;
  95}
  96
  97int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  98{
  99        kvm_inject_undefined(vcpu);
 100        return 1;
 101}
 102
 103static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 104{
 105        /*
 106         * Compute guest MPIDR. We build a virtual cluster out of the
 107         * vcpu_id, but we read the 'U' bit from the underlying
 108         * hardware directly.
 109         */
 110        vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
 111                                     ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
 112                                     (vcpu->vcpu_id & 3));
 113}
 114
 115/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
 116static bool access_actlr(struct kvm_vcpu *vcpu,
 117                         const struct coproc_params *p,
 118                         const struct coproc_reg *r)
 119{
 120        if (p->is_write)
 121                return ignore_write(vcpu, p);
 122
 123        *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR);
 124        return true;
 125}
 126
 127/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */
 128static bool access_cbar(struct kvm_vcpu *vcpu,
 129                        const struct coproc_params *p,
 130                        const struct coproc_reg *r)
 131{
 132        if (p->is_write)
 133                return write_to_read_only(vcpu, p);
 134        return read_zero(vcpu, p);
 135}
 136
 137/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */
 138static bool access_l2ctlr(struct kvm_vcpu *vcpu,
 139                          const struct coproc_params *p,
 140                          const struct coproc_reg *r)
 141{
 142        if (p->is_write)
 143                return ignore_write(vcpu, p);
 144
 145        *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR);
 146        return true;
 147}
 148
 149static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 150{
 151        u32 l2ctlr, ncores;
 152
 153        asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
 154        l2ctlr &= ~(3 << 24);
 155        ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
 156        /* How many cores in the current cluster and the next ones */
 157        ncores -= (vcpu->vcpu_id & ~3);
 158        /* Cap it to the maximum number of cores in a single cluster */
 159        ncores = min(ncores, 3U);
 160        l2ctlr |= (ncores & 3) << 24;
 161
 162        vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr;
 163}
 164
 165static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 166{
 167        u32 actlr;
 168
 169        /* ACTLR contains SMP bit: make sure you create all cpus first! */
 170        asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
 171        /* Make the SMP bit consistent with the guest configuration */
 172        if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
 173                actlr |= 1U << 6;
 174        else
 175                actlr &= ~(1U << 6);
 176
 177        vcpu_cp15(vcpu, c1_ACTLR) = actlr;
 178}
 179
 180/*
 181 * TRM entries: A7:4.3.50, A15:4.3.49
 182 * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored).
 183 */
 184static bool access_l2ectlr(struct kvm_vcpu *vcpu,
 185                           const struct coproc_params *p,
 186                           const struct coproc_reg *r)
 187{
 188        if (p->is_write)
 189                return ignore_write(vcpu, p);
 190
 191        *vcpu_reg(vcpu, p->Rt1) = 0;
 192        return true;
 193}
 194
 195/*
 196 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
 197 */
 198static bool access_dcsw(struct kvm_vcpu *vcpu,
 199                        const struct coproc_params *p,
 200                        const struct coproc_reg *r)
 201{
 202        if (!p->is_write)
 203                return read_from_write_only(vcpu, p);
 204
 205        kvm_set_way_flush(vcpu);
 206        return true;
 207}
 208
 209/*
 210 * Generic accessor for VM registers. Only called as long as HCR_TVM
 211 * is set.  If the guest enables the MMU, we stop trapping the VM
 212 * sys_regs and leave it in complete control of the caches.
 213 *
 214 * Used by the cpu-specific code.
 215 */
 216bool access_vm_reg(struct kvm_vcpu *vcpu,
 217                   const struct coproc_params *p,
 218                   const struct coproc_reg *r)
 219{
 220        bool was_enabled = vcpu_has_cache_enabled(vcpu);
 221
 222        BUG_ON(!p->is_write);
 223
 224        vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1);
 225        if (p->is_64bit)
 226                vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2);
 227
 228        kvm_toggle_cache(vcpu, was_enabled);
 229        return true;
 230}
 231
 232static bool access_gic_sgi(struct kvm_vcpu *vcpu,
 233                           const struct coproc_params *p,
 234                           const struct coproc_reg *r)
 235{
 236        u64 reg;
 237        bool g1;
 238
 239        if (!p->is_write)
 240                return read_from_write_only(vcpu, p);
 241
 242        reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
 243        reg |= *vcpu_reg(vcpu, p->Rt1) ;
 244
 245        /*
 246         * In a system where GICD_CTLR.DS=1, a ICC_SGI0R access generates
 247         * Group0 SGIs only, while ICC_SGI1R can generate either group,
 248         * depending on the SGI configuration. ICC_ASGI1R is effectively
 249         * equivalent to ICC_SGI0R, as there is no "alternative" secure
 250         * group.
 251         */
 252        switch (p->Op1) {
 253        default:                /* Keep GCC quiet */
 254        case 0:                 /* ICC_SGI1R */
 255                g1 = true;
 256                break;
 257        case 1:                 /* ICC_ASGI1R */
 258        case 2:                 /* ICC_SGI0R */
 259                g1 = false;
 260                break;
 261        }
 262
 263        vgic_v3_dispatch_sgi(vcpu, reg, g1);
 264
 265        return true;
 266}
 267
 268static bool access_gic_sre(struct kvm_vcpu *vcpu,
 269                           const struct coproc_params *p,
 270                           const struct coproc_reg *r)
 271{
 272        if (p->is_write)
 273                return ignore_write(vcpu, p);
 274
 275        *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
 276
 277        return true;
 278}
 279
 280static bool access_cntp_tval(struct kvm_vcpu *vcpu,
 281                             const struct coproc_params *p,
 282                             const struct coproc_reg *r)
 283{
 284        u32 val;
 285
 286        if (p->is_write) {
 287                val = *vcpu_reg(vcpu, p->Rt1);
 288                kvm_arm_timer_write_sysreg(vcpu,
 289                                           TIMER_PTIMER, TIMER_REG_TVAL, val);
 290        } else {
 291                val = kvm_arm_timer_read_sysreg(vcpu,
 292                                                TIMER_PTIMER, TIMER_REG_TVAL);
 293                *vcpu_reg(vcpu, p->Rt1) = val;
 294        }
 295
 296        return true;
 297}
 298
 299static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
 300                            const struct coproc_params *p,
 301                            const struct coproc_reg *r)
 302{
 303        u32 val;
 304
 305        if (p->is_write) {
 306                val = *vcpu_reg(vcpu, p->Rt1);
 307                kvm_arm_timer_write_sysreg(vcpu,
 308                                           TIMER_PTIMER, TIMER_REG_CTL, val);
 309        } else {
 310                val = kvm_arm_timer_read_sysreg(vcpu,
 311                                                TIMER_PTIMER, TIMER_REG_CTL);
 312                *vcpu_reg(vcpu, p->Rt1) = val;
 313        }
 314
 315        return true;
 316}
 317
 318static bool access_cntp_cval(struct kvm_vcpu *vcpu,
 319                             const struct coproc_params *p,
 320                             const struct coproc_reg *r)
 321{
 322        u64 val;
 323
 324        if (p->is_write) {
 325                val = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
 326                val |= *vcpu_reg(vcpu, p->Rt1);
 327                kvm_arm_timer_write_sysreg(vcpu,
 328                                           TIMER_PTIMER, TIMER_REG_CVAL, val);
 329        } else {
 330                val = kvm_arm_timer_read_sysreg(vcpu,
 331                                                TIMER_PTIMER, TIMER_REG_CVAL);
 332                *vcpu_reg(vcpu, p->Rt1) = val;
 333                *vcpu_reg(vcpu, p->Rt2) = val >> 32;
 334        }
 335
 336        return true;
 337}
 338
 339/*
 340 * We could trap ID_DFR0 and tell the guest we don't support performance
 341 * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
 342 * NAKed, so it will read the PMCR anyway.
 343 *
 344 * Therefore we tell the guest we have 0 counters.  Unfortunately, we
 345 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
 346 * all PM registers, which doesn't crash the guest kernel at least.
 347 */
 348static bool trap_raz_wi(struct kvm_vcpu *vcpu,
 349                    const struct coproc_params *p,
 350                    const struct coproc_reg *r)
 351{
 352        if (p->is_write)
 353                return ignore_write(vcpu, p);
 354        else
 355                return read_zero(vcpu, p);
 356}
 357
 358#define access_pmcr trap_raz_wi
 359#define access_pmcntenset trap_raz_wi
 360#define access_pmcntenclr trap_raz_wi
 361#define access_pmovsr trap_raz_wi
 362#define access_pmselr trap_raz_wi
 363#define access_pmceid0 trap_raz_wi
 364#define access_pmceid1 trap_raz_wi
 365#define access_pmccntr trap_raz_wi
 366#define access_pmxevtyper trap_raz_wi
 367#define access_pmxevcntr trap_raz_wi
 368#define access_pmuserenr trap_raz_wi
 369#define access_pmintenset trap_raz_wi
 370#define access_pmintenclr trap_raz_wi
 371
 372/* Architected CP15 registers.
 373 * CRn denotes the primary register number, but is copied to the CRm in the
 374 * user space API for 64-bit register access in line with the terminology used
 375 * in the ARM ARM.
 376 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
 377 *            registers preceding 32-bit ones.
 378 */
 379static const struct coproc_reg cp15_regs[] = {
 380        /* MPIDR: we use VMPIDR for guest access. */
 381        { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
 382                        NULL, reset_mpidr, c0_MPIDR },
 383
 384        /* CSSELR: swapped by interrupt.S. */
 385        { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
 386                        NULL, reset_unknown, c0_CSSELR },
 387
 388        /* ACTLR: trapped by HCR.TAC bit. */
 389        { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
 390                        access_actlr, reset_actlr, c1_ACTLR },
 391
 392        /* CPACR: swapped by interrupt.S. */
 393        { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
 394                        NULL, reset_val, c1_CPACR, 0x00000000 },
 395
 396        /* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */
 397        { CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 },
 398        { CRn(2), CRm( 0), Op1( 0), Op2( 0), is32,
 399                        access_vm_reg, reset_unknown, c2_TTBR0 },
 400        { CRn(2), CRm( 0), Op1( 0), Op2( 1), is32,
 401                        access_vm_reg, reset_unknown, c2_TTBR1 },
 402        { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
 403                        access_vm_reg, reset_val, c2_TTBCR, 0x00000000 },
 404        { CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 },
 405
 406
 407        /* DACR: swapped by interrupt.S. */
 408        { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
 409                        access_vm_reg, reset_unknown, c3_DACR },
 410
 411        /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
 412        { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
 413                        access_vm_reg, reset_unknown, c5_DFSR },
 414        { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
 415                        access_vm_reg, reset_unknown, c5_IFSR },
 416        { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
 417                        access_vm_reg, reset_unknown, c5_ADFSR },
 418        { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
 419                        access_vm_reg, reset_unknown, c5_AIFSR },
 420
 421        /* DFAR/IFAR: swapped by interrupt.S. */
 422        { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
 423                        access_vm_reg, reset_unknown, c6_DFAR },
 424        { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
 425                        access_vm_reg, reset_unknown, c6_IFAR },
 426
 427        /* PAR swapped by interrupt.S */
 428        { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
 429
 430        /*
 431         * DC{C,I,CI}SW operations:
 432         */
 433        { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
 434        { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
 435        { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
 436        /*
 437         * L2CTLR access (guest wants to know #CPUs).
 438         */
 439        { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
 440                        access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
 441        { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
 442
 443        /*
 444         * Dummy performance monitor implementation.
 445         */
 446        { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
 447        { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
 448        { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
 449        { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
 450        { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
 451        { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
 452        { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
 453        { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
 454        { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
 455        { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
 456        { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
 457        { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
 458        { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
 459
 460        /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
 461        { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
 462                        access_vm_reg, reset_unknown, c10_PRRR},
 463        { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
 464                        access_vm_reg, reset_unknown, c10_NMRR},
 465
 466        /* AMAIR0/AMAIR1: swapped by interrupt.S. */
 467        { CRn(10), CRm( 3), Op1( 0), Op2( 0), is32,
 468                        access_vm_reg, reset_unknown, c10_AMAIR0},
 469        { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
 470                        access_vm_reg, reset_unknown, c10_AMAIR1},
 471
 472        /* ICC_SGI1R */
 473        { CRm64(12), Op1( 0), is64, access_gic_sgi},
 474
 475        /* VBAR: swapped by interrupt.S. */
 476        { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
 477                        NULL, reset_val, c12_VBAR, 0x00000000 },
 478
 479        /* ICC_ASGI1R */
 480        { CRm64(12), Op1( 1), is64, access_gic_sgi},
 481        /* ICC_SGI0R */
 482        { CRm64(12), Op1( 2), is64, access_gic_sgi},
 483        /* ICC_SRE */
 484        { CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre },
 485
 486        /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
 487        { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
 488                        access_vm_reg, reset_val, c13_CID, 0x00000000 },
 489        { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
 490                        NULL, reset_unknown, c13_TID_URW },
 491        { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
 492                        NULL, reset_unknown, c13_TID_URO },
 493        { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
 494                        NULL, reset_unknown, c13_TID_PRIV },
 495
 496        /* CNTP */
 497        { CRm64(14), Op1( 2), is64, access_cntp_cval},
 498
 499        /* CNTKCTL: swapped by interrupt.S. */
 500        { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
 501                        NULL, reset_val, c14_CNTKCTL, 0x00000000 },
 502
 503        /* CNTP */
 504        { CRn(14), CRm( 2), Op1( 0), Op2( 0), is32, access_cntp_tval },
 505        { CRn(14), CRm( 2), Op1( 0), Op2( 1), is32, access_cntp_ctl },
 506
 507        /* The Configuration Base Address Register. */
 508        { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
 509};
 510
 511static int check_reg_table(const struct coproc_reg *table, unsigned int n)
 512{
 513        unsigned int i;
 514
 515        for (i = 1; i < n; i++) {
 516                if (cmp_reg(&table[i-1], &table[i]) >= 0) {
 517                        kvm_err("reg table %p out of order (%d)\n", table, i - 1);
 518                        return 1;
 519                }
 520        }
 521
 522        return 0;
 523}
 524
 525/* Target specific emulation tables */
 526static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
 527
 528void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
 529{
 530        BUG_ON(check_reg_table(table->table, table->num));
 531        target_tables[table->target] = table;
 532}
 533
 534/* Get specific register table for this target. */
 535static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
 536{
 537        struct kvm_coproc_target_table *table;
 538
 539        table = target_tables[target];
 540        *num = table->num;
 541        return table->table;
 542}
 543
 544#define reg_to_match_value(x)                                           \
 545        ({                                                              \
 546                unsigned long val;                                      \
 547                val  = (x)->CRn << 11;                                  \
 548                val |= (x)->CRm << 7;                                   \
 549                val |= (x)->Op1 << 4;                                   \
 550                val |= (x)->Op2 << 1;                                   \
 551                val |= !(x)->is_64bit;                                  \
 552                val;                                                    \
 553         })
 554
 555static int match_reg(const void *key, const void *elt)
 556{
 557        const unsigned long pval = (unsigned long)key;
 558        const struct coproc_reg *r = elt;
 559
 560        return pval - reg_to_match_value(r);
 561}
 562
 563static const struct coproc_reg *find_reg(const struct coproc_params *params,
 564                                         const struct coproc_reg table[],
 565                                         unsigned int num)
 566{
 567        unsigned long pval = reg_to_match_value(params);
 568
 569        return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg);
 570}
 571
 572static int emulate_cp15(struct kvm_vcpu *vcpu,
 573                        const struct coproc_params *params)
 574{
 575        size_t num;
 576        const struct coproc_reg *table, *r;
 577
 578        trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
 579                                   params->CRm, params->Op2, params->is_write);
 580
 581        table = get_target_table(vcpu->arch.target, &num);
 582
 583        /* Search target-specific then generic table. */
 584        r = find_reg(params, table, num);
 585        if (!r)
 586                r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
 587
 588        if (likely(r)) {
 589                /* If we don't have an accessor, we should never get here! */
 590                BUG_ON(!r->access);
 591
 592                if (likely(r->access(vcpu, params, r))) {
 593                        /* Skip instruction, since it was emulated */
 594                        kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 595                }
 596        } else {
 597                /* If access function fails, it should complain. */
 598                kvm_err("Unsupported guest CP15 access at: %08lx [%08lx]\n",
 599                        *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
 600                print_cp_instr(params);
 601                kvm_inject_undefined(vcpu);
 602        }
 603
 604        return 1;
 605}
 606
 607static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu)
 608{
 609        struct coproc_params params;
 610
 611        params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 612        params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 613        params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 614        params.is_64bit = true;
 615
 616        params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
 617        params.Op2 = 0;
 618        params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 619        params.CRm = 0;
 620
 621        return params;
 622}
 623
 624/**
 625 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
 626 * @vcpu: The VCPU pointer
 627 * @run:  The kvm_run struct
 628 */
 629int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 630{
 631        struct coproc_params params = decode_64bit_hsr(vcpu);
 632
 633        return emulate_cp15(vcpu, &params);
 634}
 635
 636/**
 637 * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
 638 * @vcpu: The VCPU pointer
 639 * @run:  The kvm_run struct
 640 */
 641int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 642{
 643        struct coproc_params params = decode_64bit_hsr(vcpu);
 644
 645        /* raz_wi cp14 */
 646        trap_raz_wi(vcpu, &params, NULL);
 647
 648        /* handled */
 649        kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 650        return 1;
 651}
 652
 653static void reset_coproc_regs(struct kvm_vcpu *vcpu,
 654                              const struct coproc_reg *table, size_t num,
 655                              unsigned long *bmap)
 656{
 657        unsigned long i;
 658
 659        for (i = 0; i < num; i++)
 660                if (table[i].reset) {
 661                        int reg = table[i].reg;
 662
 663                        table[i].reset(vcpu, &table[i]);
 664                        if (reg > 0 && reg < NR_CP15_REGS) {
 665                                set_bit(reg, bmap);
 666                                if (table[i].is_64bit)
 667                                        set_bit(reg + 1, bmap);
 668                        }
 669                }
 670}
 671
 672static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
 673{
 674        struct coproc_params params;
 675
 676        params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 677        params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 678        params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 679        params.is_64bit = false;
 680
 681        params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 682        params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
 683        params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
 684        params.Rt2 = 0;
 685
 686        return params;
 687}
 688
 689/**
 690 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
 691 * @vcpu: The VCPU pointer
 692 * @run:  The kvm_run struct
 693 */
 694int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 695{
 696        struct coproc_params params = decode_32bit_hsr(vcpu);
 697        return emulate_cp15(vcpu, &params);
 698}
 699
 700/**
 701 * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
 702 * @vcpu: The VCPU pointer
 703 * @run:  The kvm_run struct
 704 */
 705int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 706{
 707        struct coproc_params params = decode_32bit_hsr(vcpu);
 708
 709        /* raz_wi cp14 */
 710        trap_raz_wi(vcpu, &params, NULL);
 711
 712        /* handled */
 713        kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 714        return 1;
 715}
 716
 717/******************************************************************************
 718 * Userspace API
 719 *****************************************************************************/
 720
 721static bool index_to_params(u64 id, struct coproc_params *params)
 722{
 723        switch (id & KVM_REG_SIZE_MASK) {
 724        case KVM_REG_SIZE_U32:
 725                /* Any unused index bits means it's not valid. */
 726                if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 727                           | KVM_REG_ARM_COPROC_MASK
 728                           | KVM_REG_ARM_32_CRN_MASK
 729                           | KVM_REG_ARM_CRM_MASK
 730                           | KVM_REG_ARM_OPC1_MASK
 731                           | KVM_REG_ARM_32_OPC2_MASK))
 732                        return false;
 733
 734                params->is_64bit = false;
 735                params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
 736                               >> KVM_REG_ARM_32_CRN_SHIFT);
 737                params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
 738                               >> KVM_REG_ARM_CRM_SHIFT);
 739                params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 740                               >> KVM_REG_ARM_OPC1_SHIFT);
 741                params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
 742                               >> KVM_REG_ARM_32_OPC2_SHIFT);
 743                return true;
 744        case KVM_REG_SIZE_U64:
 745                /* Any unused index bits means it's not valid. */
 746                if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 747                              | KVM_REG_ARM_COPROC_MASK
 748                              | KVM_REG_ARM_CRM_MASK
 749                              | KVM_REG_ARM_OPC1_MASK))
 750                        return false;
 751                params->is_64bit = true;
 752                /* CRm to CRn: see cp15_to_index for details */
 753                params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
 754                               >> KVM_REG_ARM_CRM_SHIFT);
 755                params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 756                               >> KVM_REG_ARM_OPC1_SHIFT);
 757                params->Op2 = 0;
 758                params->CRm = 0;
 759                return true;
 760        default:
 761                return false;
 762        }
 763}
 764
 765/* Decode an index value, and find the cp15 coproc_reg entry. */
 766static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
 767                                                    u64 id)
 768{
 769        size_t num;
 770        const struct coproc_reg *table, *r;
 771        struct coproc_params params;
 772
 773        /* We only do cp15 for now. */
 774        if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
 775                return NULL;
 776
 777        if (!index_to_params(id, &params))
 778                return NULL;
 779
 780        table = get_target_table(vcpu->arch.target, &num);
 781        r = find_reg(&params, table, num);
 782        if (!r)
 783                r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs));
 784
 785        /* Not saved in the cp15 array? */
 786        if (r && !r->reg)
 787                r = NULL;
 788
 789        return r;
 790}
 791
 792/*
 793 * These are the invariant cp15 registers: we let the guest see the host
 794 * versions of these, so they're part of the guest state.
 795 *
 796 * A future CPU may provide a mechanism to present different values to
 797 * the guest, or a future kvm may trap them.
 798 */
 799/* Unfortunately, there's no register-argument for mrc, so generate. */
 800#define FUNCTION_FOR32(crn, crm, op1, op2, name)                        \
 801        static void get_##name(struct kvm_vcpu *v,                      \
 802                               const struct coproc_reg *r)              \
 803        {                                                               \
 804                u32 val;                                                \
 805                                                                        \
 806                asm volatile("mrc p15, " __stringify(op1)               \
 807                             ", %0, c" __stringify(crn)                 \
 808                             ", c" __stringify(crm)                     \
 809                             ", " __stringify(op2) "\n" : "=r" (val));  \
 810                ((struct coproc_reg *)r)->val = val;                    \
 811        }
 812
 813FUNCTION_FOR32(0, 0, 0, 0, MIDR)
 814FUNCTION_FOR32(0, 0, 0, 1, CTR)
 815FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
 816FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
 817FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
 818FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
 819FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
 820FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
 821FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
 822FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
 823FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
 824FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
 825FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
 826FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
 827FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
 828FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
 829FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
 830FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
 831FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
 832FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
 833FUNCTION_FOR32(0, 0, 1, 7, AIDR)
 834
 835/* ->val is filled in by kvm_invariant_coproc_table_init() */
 836static struct coproc_reg invariant_cp15[] = {
 837        { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
 838        { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
 839        { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
 840        { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
 841        { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
 842
 843        { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
 844        { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
 845
 846        { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
 847        { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
 848        { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
 849        { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
 850        { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
 851        { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
 852        { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
 853        { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
 854
 855        { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
 856        { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
 857        { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
 858        { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
 859        { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
 860        { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
 861};
 862
 863/*
 864 * Reads a register value from a userspace address to a kernel
 865 * variable. Make sure that register size matches sizeof(*__val).
 866 */
 867static int reg_from_user(void *val, const void __user *uaddr, u64 id)
 868{
 869        if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
 870                return -EFAULT;
 871        return 0;
 872}
 873
 874/*
 875 * Writes a register value to a userspace address from a kernel variable.
 876 * Make sure that register size matches sizeof(*__val).
 877 */
 878static int reg_to_user(void __user *uaddr, const void *val, u64 id)
 879{
 880        if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
 881                return -EFAULT;
 882        return 0;
 883}
 884
 885static int get_invariant_cp15(u64 id, void __user *uaddr)
 886{
 887        struct coproc_params params;
 888        const struct coproc_reg *r;
 889        int ret;
 890
 891        if (!index_to_params(id, &params))
 892                return -ENOENT;
 893
 894        r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 895        if (!r)
 896                return -ENOENT;
 897
 898        ret = -ENOENT;
 899        if (KVM_REG_SIZE(id) == 4) {
 900                u32 val = r->val;
 901
 902                ret = reg_to_user(uaddr, &val, id);
 903        } else if (KVM_REG_SIZE(id) == 8) {
 904                ret = reg_to_user(uaddr, &r->val, id);
 905        }
 906        return ret;
 907}
 908
 909static int set_invariant_cp15(u64 id, void __user *uaddr)
 910{
 911        struct coproc_params params;
 912        const struct coproc_reg *r;
 913        int err;
 914        u64 val;
 915
 916        if (!index_to_params(id, &params))
 917                return -ENOENT;
 918        r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 919        if (!r)
 920                return -ENOENT;
 921
 922        err = -ENOENT;
 923        if (KVM_REG_SIZE(id) == 4) {
 924                u32 val32;
 925
 926                err = reg_from_user(&val32, uaddr, id);
 927                if (!err)
 928                        val = val32;
 929        } else if (KVM_REG_SIZE(id) == 8) {
 930                err = reg_from_user(&val, uaddr, id);
 931        }
 932        if (err)
 933                return err;
 934
 935        /* This is what we mean by invariant: you can't change it. */
 936        if (r->val != val)
 937                return -EINVAL;
 938
 939        return 0;
 940}
 941
 942static bool is_valid_cache(u32 val)
 943{
 944        u32 level, ctype;
 945
 946        if (val >= CSSELR_MAX)
 947                return false;
 948
 949        /* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
 950        level = (val >> 1);
 951        ctype = (cache_levels >> (level * 3)) & 7;
 952
 953        switch (ctype) {
 954        case 0: /* No cache */
 955                return false;
 956        case 1: /* Instruction cache only */
 957                return (val & 1);
 958        case 2: /* Data cache only */
 959        case 4: /* Unified cache */
 960                return !(val & 1);
 961        case 3: /* Separate instruction and data caches */
 962                return true;
 963        default: /* Reserved: we can't know instruction or data. */
 964                return false;
 965        }
 966}
 967
 968/* Which cache CCSIDR represents depends on CSSELR value. */
 969static u32 get_ccsidr(u32 csselr)
 970{
 971        u32 ccsidr;
 972
 973        /* Make sure noone else changes CSSELR during this! */
 974        local_irq_disable();
 975        /* Put value into CSSELR */
 976        asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr));
 977        isb();
 978        /* Read result out of CCSIDR */
 979        asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr));
 980        local_irq_enable();
 981
 982        return ccsidr;
 983}
 984
 985static int demux_c15_get(u64 id, void __user *uaddr)
 986{
 987        u32 val;
 988        u32 __user *uval = uaddr;
 989
 990        /* Fail if we have unknown bits set. */
 991        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 992                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 993                return -ENOENT;
 994
 995        switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 996        case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 997                if (KVM_REG_SIZE(id) != 4)
 998                        return -ENOENT;
 999                val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1000                        >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1001                if (!is_valid_cache(val))
1002                        return -ENOENT;
1003
1004                return put_user(get_ccsidr(val), uval);
1005        default:
1006                return -ENOENT;
1007        }
1008}
1009
1010static int demux_c15_set(u64 id, void __user *uaddr)
1011{
1012        u32 val, newval;
1013        u32 __user *uval = uaddr;
1014
1015        /* Fail if we have unknown bits set. */
1016        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1017                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1018                return -ENOENT;
1019
1020        switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1021        case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1022                if (KVM_REG_SIZE(id) != 4)
1023                        return -ENOENT;
1024                val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1025                        >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1026                if (!is_valid_cache(val))
1027                        return -ENOENT;
1028
1029                if (get_user(newval, uval))
1030                        return -EFAULT;
1031
1032                /* This is also invariant: you can't change it. */
1033                if (newval != get_ccsidr(val))
1034                        return -EINVAL;
1035                return 0;
1036        default:
1037                return -ENOENT;
1038        }
1039}
1040
1041#ifdef CONFIG_VFPv3
1042static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC,
1043                                   KVM_REG_ARM_VFP_FPSCR,
1044                                   KVM_REG_ARM_VFP_FPINST,
1045                                   KVM_REG_ARM_VFP_FPINST2,
1046                                   KVM_REG_ARM_VFP_MVFR0,
1047                                   KVM_REG_ARM_VFP_MVFR1,
1048                                   KVM_REG_ARM_VFP_FPSID };
1049
1050static unsigned int num_fp_regs(void)
1051{
1052        if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2)
1053                return 32;
1054        else
1055                return 16;
1056}
1057
1058static unsigned int num_vfp_regs(void)
1059{
1060        /* Normal FP regs + control regs. */
1061        return num_fp_regs() + ARRAY_SIZE(vfp_sysregs);
1062}
1063
1064static int copy_vfp_regids(u64 __user *uindices)
1065{
1066        unsigned int i;
1067        const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP;
1068        const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
1069
1070        for (i = 0; i < num_fp_regs(); i++) {
1071                if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i,
1072                             uindices))
1073                        return -EFAULT;
1074                uindices++;
1075        }
1076
1077        for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) {
1078                if (put_user(u32reg | vfp_sysregs[i], uindices))
1079                        return -EFAULT;
1080                uindices++;
1081        }
1082
1083        return num_vfp_regs();
1084}
1085
1086static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
1087{
1088        u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
1089        u32 val;
1090
1091        /* Fail if we have unknown bits set. */
1092        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1093                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1094                return -ENOENT;
1095
1096        if (vfpid < num_fp_regs()) {
1097                if (KVM_REG_SIZE(id) != 8)
1098                        return -ENOENT;
1099                return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid],
1100                                   id);
1101        }
1102
1103        /* FP control registers are all 32 bit. */
1104        if (KVM_REG_SIZE(id) != 4)
1105                return -ENOENT;
1106
1107        switch (vfpid) {
1108        case KVM_REG_ARM_VFP_FPEXC:
1109                return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id);
1110        case KVM_REG_ARM_VFP_FPSCR:
1111                return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id);
1112        case KVM_REG_ARM_VFP_FPINST:
1113                return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id);
1114        case KVM_REG_ARM_VFP_FPINST2:
1115                return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id);
1116        case KVM_REG_ARM_VFP_MVFR0:
1117                val = fmrx(MVFR0);
1118                return reg_to_user(uaddr, &val, id);
1119        case KVM_REG_ARM_VFP_MVFR1:
1120                val = fmrx(MVFR1);
1121                return reg_to_user(uaddr, &val, id);
1122        case KVM_REG_ARM_VFP_FPSID:
1123                val = fmrx(FPSID);
1124                return reg_to_user(uaddr, &val, id);
1125        default:
1126                return -ENOENT;
1127        }
1128}
1129
1130static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
1131{
1132        u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
1133        u32 val;
1134
1135        /* Fail if we have unknown bits set. */
1136        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1137                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1138                return -ENOENT;
1139
1140        if (vfpid < num_fp_regs()) {
1141                if (KVM_REG_SIZE(id) != 8)
1142                        return -ENOENT;
1143                return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid],
1144                                     uaddr, id);
1145        }
1146
1147        /* FP control registers are all 32 bit. */
1148        if (KVM_REG_SIZE(id) != 4)
1149                return -ENOENT;
1150
1151        switch (vfpid) {
1152        case KVM_REG_ARM_VFP_FPEXC:
1153                return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id);
1154        case KVM_REG_ARM_VFP_FPSCR:
1155                return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id);
1156        case KVM_REG_ARM_VFP_FPINST:
1157                return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id);
1158        case KVM_REG_ARM_VFP_FPINST2:
1159                return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id);
1160        /* These are invariant. */
1161        case KVM_REG_ARM_VFP_MVFR0:
1162                if (reg_from_user(&val, uaddr, id))
1163                        return -EFAULT;
1164                if (val != fmrx(MVFR0))
1165                        return -EINVAL;
1166                return 0;
1167        case KVM_REG_ARM_VFP_MVFR1:
1168                if (reg_from_user(&val, uaddr, id))
1169                        return -EFAULT;
1170                if (val != fmrx(MVFR1))
1171                        return -EINVAL;
1172                return 0;
1173        case KVM_REG_ARM_VFP_FPSID:
1174                if (reg_from_user(&val, uaddr, id))
1175                        return -EFAULT;
1176                if (val != fmrx(FPSID))
1177                        return -EINVAL;
1178                return 0;
1179        default:
1180                return -ENOENT;
1181        }
1182}
1183#else /* !CONFIG_VFPv3 */
1184static unsigned int num_vfp_regs(void)
1185{
1186        return 0;
1187}
1188
1189static int copy_vfp_regids(u64 __user *uindices)
1190{
1191        return 0;
1192}
1193
1194static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
1195{
1196        return -ENOENT;
1197}
1198
1199static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
1200{
1201        return -ENOENT;
1202}
1203#endif /* !CONFIG_VFPv3 */
1204
1205int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1206{
1207        const struct coproc_reg *r;
1208        void __user *uaddr = (void __user *)(long)reg->addr;
1209        int ret;
1210
1211        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1212                return demux_c15_get(reg->id, uaddr);
1213
1214        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1215                return vfp_get_reg(vcpu, reg->id, uaddr);
1216
1217        r = index_to_coproc_reg(vcpu, reg->id);
1218        if (!r)
1219                return get_invariant_cp15(reg->id, uaddr);
1220
1221        ret = -ENOENT;
1222        if (KVM_REG_SIZE(reg->id) == 8) {
1223                u64 val;
1224
1225                val = vcpu_cp15_reg64_get(vcpu, r);
1226                ret = reg_to_user(uaddr, &val, reg->id);
1227        } else if (KVM_REG_SIZE(reg->id) == 4) {
1228                ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id);
1229        }
1230
1231        return ret;
1232}
1233
1234int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1235{
1236        const struct coproc_reg *r;
1237        void __user *uaddr = (void __user *)(long)reg->addr;
1238        int ret;
1239
1240        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1241                return demux_c15_set(reg->id, uaddr);
1242
1243        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1244                return vfp_set_reg(vcpu, reg->id, uaddr);
1245
1246        r = index_to_coproc_reg(vcpu, reg->id);
1247        if (!r)
1248                return set_invariant_cp15(reg->id, uaddr);
1249
1250        ret = -ENOENT;
1251        if (KVM_REG_SIZE(reg->id) == 8) {
1252                u64 val;
1253
1254                ret = reg_from_user(&val, uaddr, reg->id);
1255                if (!ret)
1256                        vcpu_cp15_reg64_set(vcpu, r, val);
1257        } else if (KVM_REG_SIZE(reg->id) == 4) {
1258                ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id);
1259        }
1260
1261        return ret;
1262}
1263
1264static unsigned int num_demux_regs(void)
1265{
1266        unsigned int i, count = 0;
1267
1268        for (i = 0; i < CSSELR_MAX; i++)
1269                if (is_valid_cache(i))
1270                        count++;
1271
1272        return count;
1273}
1274
1275static int write_demux_regids(u64 __user *uindices)
1276{
1277        u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
1278        unsigned int i;
1279
1280        val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
1281        for (i = 0; i < CSSELR_MAX; i++) {
1282                if (!is_valid_cache(i))
1283                        continue;
1284                if (put_user(val | i, uindices))
1285                        return -EFAULT;
1286                uindices++;
1287        }
1288        return 0;
1289}
1290
1291static u64 cp15_to_index(const struct coproc_reg *reg)
1292{
1293        u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
1294        if (reg->is_64bit) {
1295                val |= KVM_REG_SIZE_U64;
1296                val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1297                /*
1298                 * CRn always denotes the primary coproc. reg. nr. for the
1299                 * in-kernel representation, but the user space API uses the
1300                 * CRm for the encoding, because it is modelled after the
1301                 * MRRC/MCRR instructions: see the ARM ARM rev. c page
1302                 * B3-1445
1303                 */
1304                val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
1305        } else {
1306                val |= KVM_REG_SIZE_U32;
1307                val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1308                val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
1309                val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
1310                val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
1311        }
1312        return val;
1313}
1314
1315static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
1316{
1317        if (!*uind)
1318                return true;
1319
1320        if (put_user(cp15_to_index(reg), *uind))
1321                return false;
1322
1323        (*uind)++;
1324        return true;
1325}
1326
1327/* Assumed ordered tables, see kvm_coproc_table_init. */
1328static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
1329{
1330        const struct coproc_reg *i1, *i2, *end1, *end2;
1331        unsigned int total = 0;
1332        size_t num;
1333
1334        /* We check for duplicates here, to allow arch-specific overrides. */
1335        i1 = get_target_table(vcpu->arch.target, &num);
1336        end1 = i1 + num;
1337        i2 = cp15_regs;
1338        end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
1339
1340        BUG_ON(i1 == end1 || i2 == end2);
1341
1342        /* Walk carefully, as both tables may refer to the same register. */
1343        while (i1 || i2) {
1344                int cmp = cmp_reg(i1, i2);
1345                /* target-specific overrides generic entry. */
1346                if (cmp <= 0) {
1347                        /* Ignore registers we trap but don't save. */
1348                        if (i1->reg) {
1349                                if (!copy_reg_to_user(i1, &uind))
1350                                        return -EFAULT;
1351                                total++;
1352                        }
1353                } else {
1354                        /* Ignore registers we trap but don't save. */
1355                        if (i2->reg) {
1356                                if (!copy_reg_to_user(i2, &uind))
1357                                        return -EFAULT;
1358                                total++;
1359                        }
1360                }
1361
1362                if (cmp <= 0 && ++i1 == end1)
1363                        i1 = NULL;
1364                if (cmp >= 0 && ++i2 == end2)
1365                        i2 = NULL;
1366        }
1367        return total;
1368}
1369
1370unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
1371{
1372        return ARRAY_SIZE(invariant_cp15)
1373                + num_demux_regs()
1374                + num_vfp_regs()
1375                + walk_cp15(vcpu, (u64 __user *)NULL);
1376}
1377
1378int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1379{
1380        unsigned int i;
1381        int err;
1382
1383        /* Then give them all the invariant registers' indices. */
1384        for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
1385                if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
1386                        return -EFAULT;
1387                uindices++;
1388        }
1389
1390        err = walk_cp15(vcpu, uindices);
1391        if (err < 0)
1392                return err;
1393        uindices += err;
1394
1395        err = copy_vfp_regids(uindices);
1396        if (err < 0)
1397                return err;
1398        uindices += err;
1399
1400        return write_demux_regids(uindices);
1401}
1402
1403void kvm_coproc_table_init(void)
1404{
1405        unsigned int i;
1406
1407        /* Make sure tables are unique and in order. */
1408        BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
1409        BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15)));
1410
1411        /* We abuse the reset function to overwrite the table itself. */
1412        for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
1413                invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
1414
1415        /*
1416         * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
1417         *
1418         *   If software reads the Cache Type fields from Ctype1
1419         *   upwards, once it has seen a value of 0b000, no caches
1420         *   exist at further-out levels of the hierarchy. So, for
1421         *   example, if Ctype3 is the first Cache Type field with a
1422         *   value of 0b000, the values of Ctype4 to Ctype7 must be
1423         *   ignored.
1424         */
1425        asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels));
1426        for (i = 0; i < 7; i++)
1427                if (((cache_levels >> (i*3)) & 7) == 0)
1428                        break;
1429        /* Clear all higher bits. */
1430        cache_levels &= (1 << (i*3))-1;
1431}
1432
1433/**
1434 * kvm_reset_coprocs - sets cp15 registers to reset value
1435 * @vcpu: The VCPU pointer
1436 *
1437 * This function finds the right table above and sets the registers on the
1438 * virtual CPU struct to their architecturally defined reset values.
1439 */
1440void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1441{
1442        size_t num;
1443        const struct coproc_reg *table;
1444        DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, };
1445
1446        /* Generic chip reset first (so target could override). */
1447        reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap);
1448
1449        table = get_target_table(vcpu->arch.target, &num);
1450        reset_coproc_regs(vcpu, table, num, bmap);
1451
1452        for (num = 1; num < NR_CP15_REGS; num++)
1453                WARN(!test_bit(num, bmap),
1454                     "Didn't reset vcpu_cp15(vcpu, %zi)", num);
1455}
1456