linux/arch/arm/kvm/coproc.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   3 * Authors: Rusty Russell <rusty@rustcorp.com.au>
   4 *          Christoffer Dall <c.dall@virtualopensystems.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License, version 2, as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  18 */
  19
  20#include <linux/bsearch.h>
  21#include <linux/mm.h>
  22#include <linux/kvm_host.h>
  23#include <linux/uaccess.h>
  24#include <asm/kvm_arm.h>
  25#include <asm/kvm_host.h>
  26#include <asm/kvm_emulate.h>
  27#include <asm/kvm_coproc.h>
  28#include <asm/kvm_mmu.h>
  29#include <asm/cacheflush.h>
  30#include <asm/cputype.h>
  31#include <trace/events/kvm.h>
  32#include <asm/vfp.h>
  33#include "../vfp/vfpinstr.h"
  34
  35#include "trace.h"
  36#include "coproc.h"
  37
  38
  39/******************************************************************************
  40 * Co-processor emulation
  41 *****************************************************************************/
  42
  43/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  44static u32 cache_levels;
  45
  46/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  47#define CSSELR_MAX 12
  48
  49/*
  50 * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some
  51 * of cp15 registers can be viewed either as couple of two u32 registers
  52 * or one u64 register. Current u64 register encoding is that least
  53 * significant u32 word is followed by most significant u32 word.
  54 */
  55static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu,
  56                                       const struct coproc_reg *r,
  57                                       u64 val)
  58{
  59        vcpu_cp15(vcpu, r->reg) = val & 0xffffffff;
  60        vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
  61}
  62
  63static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
  64                                      const struct coproc_reg *r)
  65{
  66        u64 val;
  67
  68        val = vcpu_cp15(vcpu, r->reg + 1);
  69        val = val << 32;
  70        val = val | vcpu_cp15(vcpu, r->reg);
  71        return val;
  72}
  73
  74int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
  75{
  76        kvm_inject_undefined(vcpu);
  77        return 1;
  78}
  79
  80int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  81{
  82        /*
  83         * We can get here, if the host has been built without VFPv3 support,
  84         * but the guest attempted a floating point operation.
  85         */
  86        kvm_inject_undefined(vcpu);
  87        return 1;
  88}
  89
  90int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  91{
  92        kvm_inject_undefined(vcpu);
  93        return 1;
  94}
  95
  96int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  97{
  98        kvm_inject_undefined(vcpu);
  99        return 1;
 100}
 101
 102static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 103{
 104        /*
 105         * Compute guest MPIDR. We build a virtual cluster out of the
 106         * vcpu_id, but we read the 'U' bit from the underlying
 107         * hardware directly.
 108         */
 109        vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
 110                                     ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
 111                                     (vcpu->vcpu_id & 3));
 112}
 113
 114/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
 115static bool access_actlr(struct kvm_vcpu *vcpu,
 116                         const struct coproc_params *p,
 117                         const struct coproc_reg *r)
 118{
 119        if (p->is_write)
 120                return ignore_write(vcpu, p);
 121
 122        *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR);
 123        return true;
 124}
 125
 126/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */
 127static bool access_cbar(struct kvm_vcpu *vcpu,
 128                        const struct coproc_params *p,
 129                        const struct coproc_reg *r)
 130{
 131        if (p->is_write)
 132                return write_to_read_only(vcpu, p);
 133        return read_zero(vcpu, p);
 134}
 135
 136/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */
 137static bool access_l2ctlr(struct kvm_vcpu *vcpu,
 138                          const struct coproc_params *p,
 139                          const struct coproc_reg *r)
 140{
 141        if (p->is_write)
 142                return ignore_write(vcpu, p);
 143
 144        *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR);
 145        return true;
 146}
 147
 148static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 149{
 150        u32 l2ctlr, ncores;
 151
 152        asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
 153        l2ctlr &= ~(3 << 24);
 154        ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
 155        /* How many cores in the current cluster and the next ones */
 156        ncores -= (vcpu->vcpu_id & ~3);
 157        /* Cap it to the maximum number of cores in a single cluster */
 158        ncores = min(ncores, 3U);
 159        l2ctlr |= (ncores & 3) << 24;
 160
 161        vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr;
 162}
 163
 164static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 165{
 166        u32 actlr;
 167
 168        /* ACTLR contains SMP bit: make sure you create all cpus first! */
 169        asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
 170        /* Make the SMP bit consistent with the guest configuration */
 171        if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
 172                actlr |= 1U << 6;
 173        else
 174                actlr &= ~(1U << 6);
 175
 176        vcpu_cp15(vcpu, c1_ACTLR) = actlr;
 177}
 178
 179/*
 180 * TRM entries: A7:4.3.50, A15:4.3.49
 181 * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored).
 182 */
 183static bool access_l2ectlr(struct kvm_vcpu *vcpu,
 184                           const struct coproc_params *p,
 185                           const struct coproc_reg *r)
 186{
 187        if (p->is_write)
 188                return ignore_write(vcpu, p);
 189
 190        *vcpu_reg(vcpu, p->Rt1) = 0;
 191        return true;
 192}
 193
 194/*
 195 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
 196 */
 197static bool access_dcsw(struct kvm_vcpu *vcpu,
 198                        const struct coproc_params *p,
 199                        const struct coproc_reg *r)
 200{
 201        if (!p->is_write)
 202                return read_from_write_only(vcpu, p);
 203
 204        kvm_set_way_flush(vcpu);
 205        return true;
 206}
 207
 208/*
 209 * Generic accessor for VM registers. Only called as long as HCR_TVM
 210 * is set.  If the guest enables the MMU, we stop trapping the VM
 211 * sys_regs and leave it in complete control of the caches.
 212 *
 213 * Used by the cpu-specific code.
 214 */
 215bool access_vm_reg(struct kvm_vcpu *vcpu,
 216                   const struct coproc_params *p,
 217                   const struct coproc_reg *r)
 218{
 219        bool was_enabled = vcpu_has_cache_enabled(vcpu);
 220
 221        BUG_ON(!p->is_write);
 222
 223        vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1);
 224        if (p->is_64bit)
 225                vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2);
 226
 227        kvm_toggle_cache(vcpu, was_enabled);
 228        return true;
 229}
 230
 231static bool access_gic_sgi(struct kvm_vcpu *vcpu,
 232                           const struct coproc_params *p,
 233                           const struct coproc_reg *r)
 234{
 235        u64 reg;
 236
 237        if (!p->is_write)
 238                return read_from_write_only(vcpu, p);
 239
 240        reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
 241        reg |= *vcpu_reg(vcpu, p->Rt1) ;
 242
 243        vgic_v3_dispatch_sgi(vcpu, reg);
 244
 245        return true;
 246}
 247
 248static bool access_gic_sre(struct kvm_vcpu *vcpu,
 249                           const struct coproc_params *p,
 250                           const struct coproc_reg *r)
 251{
 252        if (p->is_write)
 253                return ignore_write(vcpu, p);
 254
 255        *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
 256
 257        return true;
 258}
 259
 260/*
 261 * We could trap ID_DFR0 and tell the guest we don't support performance
 262 * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
 263 * NAKed, so it will read the PMCR anyway.
 264 *
 265 * Therefore we tell the guest we have 0 counters.  Unfortunately, we
 266 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
 267 * all PM registers, which doesn't crash the guest kernel at least.
 268 */
 269static bool pm_fake(struct kvm_vcpu *vcpu,
 270                    const struct coproc_params *p,
 271                    const struct coproc_reg *r)
 272{
 273        if (p->is_write)
 274                return ignore_write(vcpu, p);
 275        else
 276                return read_zero(vcpu, p);
 277}
 278
 279#define access_pmcr pm_fake
 280#define access_pmcntenset pm_fake
 281#define access_pmcntenclr pm_fake
 282#define access_pmovsr pm_fake
 283#define access_pmselr pm_fake
 284#define access_pmceid0 pm_fake
 285#define access_pmceid1 pm_fake
 286#define access_pmccntr pm_fake
 287#define access_pmxevtyper pm_fake
 288#define access_pmxevcntr pm_fake
 289#define access_pmuserenr pm_fake
 290#define access_pmintenset pm_fake
 291#define access_pmintenclr pm_fake
 292
 293/* Architected CP15 registers.
 294 * CRn denotes the primary register number, but is copied to the CRm in the
 295 * user space API for 64-bit register access in line with the terminology used
 296 * in the ARM ARM.
 297 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
 298 *            registers preceding 32-bit ones.
 299 */
 300static const struct coproc_reg cp15_regs[] = {
 301        /* MPIDR: we use VMPIDR for guest access. */
 302        { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
 303                        NULL, reset_mpidr, c0_MPIDR },
 304
 305        /* CSSELR: swapped by interrupt.S. */
 306        { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
 307                        NULL, reset_unknown, c0_CSSELR },
 308
 309        /* ACTLR: trapped by HCR.TAC bit. */
 310        { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
 311                        access_actlr, reset_actlr, c1_ACTLR },
 312
 313        /* CPACR: swapped by interrupt.S. */
 314        { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
 315                        NULL, reset_val, c1_CPACR, 0x00000000 },
 316
 317        /* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */
 318        { CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 },
 319        { CRn(2), CRm( 0), Op1( 0), Op2( 0), is32,
 320                        access_vm_reg, reset_unknown, c2_TTBR0 },
 321        { CRn(2), CRm( 0), Op1( 0), Op2( 1), is32,
 322                        access_vm_reg, reset_unknown, c2_TTBR1 },
 323        { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
 324                        access_vm_reg, reset_val, c2_TTBCR, 0x00000000 },
 325        { CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 },
 326
 327
 328        /* DACR: swapped by interrupt.S. */
 329        { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
 330                        access_vm_reg, reset_unknown, c3_DACR },
 331
 332        /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
 333        { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
 334                        access_vm_reg, reset_unknown, c5_DFSR },
 335        { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
 336                        access_vm_reg, reset_unknown, c5_IFSR },
 337        { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
 338                        access_vm_reg, reset_unknown, c5_ADFSR },
 339        { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
 340                        access_vm_reg, reset_unknown, c5_AIFSR },
 341
 342        /* DFAR/IFAR: swapped by interrupt.S. */
 343        { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
 344                        access_vm_reg, reset_unknown, c6_DFAR },
 345        { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
 346                        access_vm_reg, reset_unknown, c6_IFAR },
 347
 348        /* PAR swapped by interrupt.S */
 349        { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
 350
 351        /*
 352         * DC{C,I,CI}SW operations:
 353         */
 354        { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
 355        { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
 356        { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
 357        /*
 358         * L2CTLR access (guest wants to know #CPUs).
 359         */
 360        { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
 361                        access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
 362        { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
 363
 364        /*
 365         * Dummy performance monitor implementation.
 366         */
 367        { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
 368        { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
 369        { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
 370        { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
 371        { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
 372        { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
 373        { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
 374        { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
 375        { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
 376        { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
 377        { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
 378        { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
 379        { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
 380
 381        /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
 382        { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
 383                        access_vm_reg, reset_unknown, c10_PRRR},
 384        { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
 385                        access_vm_reg, reset_unknown, c10_NMRR},
 386
 387        /* AMAIR0/AMAIR1: swapped by interrupt.S. */
 388        { CRn(10), CRm( 3), Op1( 0), Op2( 0), is32,
 389                        access_vm_reg, reset_unknown, c10_AMAIR0},
 390        { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
 391                        access_vm_reg, reset_unknown, c10_AMAIR1},
 392
 393        /* ICC_SGI1R */
 394        { CRm64(12), Op1( 0), is64, access_gic_sgi},
 395
 396        /* VBAR: swapped by interrupt.S. */
 397        { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
 398                        NULL, reset_val, c12_VBAR, 0x00000000 },
 399
 400        /* ICC_SRE */
 401        { CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre },
 402
 403        /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
 404        { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
 405                        access_vm_reg, reset_val, c13_CID, 0x00000000 },
 406        { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
 407                        NULL, reset_unknown, c13_TID_URW },
 408        { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
 409                        NULL, reset_unknown, c13_TID_URO },
 410        { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
 411                        NULL, reset_unknown, c13_TID_PRIV },
 412
 413        /* CNTKCTL: swapped by interrupt.S. */
 414        { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
 415                        NULL, reset_val, c14_CNTKCTL, 0x00000000 },
 416
 417        /* The Configuration Base Address Register. */
 418        { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
 419};
 420
 421static int check_reg_table(const struct coproc_reg *table, unsigned int n)
 422{
 423        unsigned int i;
 424
 425        for (i = 1; i < n; i++) {
 426                if (cmp_reg(&table[i-1], &table[i]) >= 0) {
 427                        kvm_err("reg table %p out of order (%d)\n", table, i - 1);
 428                        return 1;
 429                }
 430        }
 431
 432        return 0;
 433}
 434
 435/* Target specific emulation tables */
 436static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
 437
 438void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
 439{
 440        BUG_ON(check_reg_table(table->table, table->num));
 441        target_tables[table->target] = table;
 442}
 443
 444/* Get specific register table for this target. */
 445static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
 446{
 447        struct kvm_coproc_target_table *table;
 448
 449        table = target_tables[target];
 450        *num = table->num;
 451        return table->table;
 452}
 453
 454#define reg_to_match_value(x)                                           \
 455        ({                                                              \
 456                unsigned long val;                                      \
 457                val  = (x)->CRn << 11;                                  \
 458                val |= (x)->CRm << 7;                                   \
 459                val |= (x)->Op1 << 4;                                   \
 460                val |= (x)->Op2 << 1;                                   \
 461                val |= !(x)->is_64bit;                                  \
 462                val;                                                    \
 463         })
 464
 465static int match_reg(const void *key, const void *elt)
 466{
 467        const unsigned long pval = (unsigned long)key;
 468        const struct coproc_reg *r = elt;
 469
 470        return pval - reg_to_match_value(r);
 471}
 472
 473static const struct coproc_reg *find_reg(const struct coproc_params *params,
 474                                         const struct coproc_reg table[],
 475                                         unsigned int num)
 476{
 477        unsigned long pval = reg_to_match_value(params);
 478
 479        return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg);
 480}
 481
 482static int emulate_cp15(struct kvm_vcpu *vcpu,
 483                        const struct coproc_params *params)
 484{
 485        size_t num;
 486        const struct coproc_reg *table, *r;
 487
 488        trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
 489                                   params->CRm, params->Op2, params->is_write);
 490
 491        table = get_target_table(vcpu->arch.target, &num);
 492
 493        /* Search target-specific then generic table. */
 494        r = find_reg(params, table, num);
 495        if (!r)
 496                r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
 497
 498        if (likely(r)) {
 499                /* If we don't have an accessor, we should never get here! */
 500                BUG_ON(!r->access);
 501
 502                if (likely(r->access(vcpu, params, r))) {
 503                        /* Skip instruction, since it was emulated */
 504                        kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 505                        return 1;
 506                }
 507                /* If access function fails, it should complain. */
 508        } else {
 509                kvm_err("Unsupported guest CP15 access at: %08lx\n",
 510                        *vcpu_pc(vcpu));
 511                print_cp_instr(params);
 512        }
 513        kvm_inject_undefined(vcpu);
 514        return 1;
 515}
 516
 517/**
 518 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
 519 * @vcpu: The VCPU pointer
 520 * @run:  The kvm_run struct
 521 */
 522int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 523{
 524        struct coproc_params params;
 525
 526        params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 527        params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 528        params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 529        params.is_64bit = true;
 530
 531        params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
 532        params.Op2 = 0;
 533        params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 534        params.CRm = 0;
 535
 536        return emulate_cp15(vcpu, &params);
 537}
 538
 539static void reset_coproc_regs(struct kvm_vcpu *vcpu,
 540                              const struct coproc_reg *table, size_t num)
 541{
 542        unsigned long i;
 543
 544        for (i = 0; i < num; i++)
 545                if (table[i].reset)
 546                        table[i].reset(vcpu, &table[i]);
 547}
 548
 549/**
 550 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
 551 * @vcpu: The VCPU pointer
 552 * @run:  The kvm_run struct
 553 */
 554int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 555{
 556        struct coproc_params params;
 557
 558        params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 559        params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 560        params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 561        params.is_64bit = false;
 562
 563        params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 564        params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
 565        params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
 566        params.Rt2 = 0;
 567
 568        return emulate_cp15(vcpu, &params);
 569}
 570
 571/******************************************************************************
 572 * Userspace API
 573 *****************************************************************************/
 574
 575static bool index_to_params(u64 id, struct coproc_params *params)
 576{
 577        switch (id & KVM_REG_SIZE_MASK) {
 578        case KVM_REG_SIZE_U32:
 579                /* Any unused index bits means it's not valid. */
 580                if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 581                           | KVM_REG_ARM_COPROC_MASK
 582                           | KVM_REG_ARM_32_CRN_MASK
 583                           | KVM_REG_ARM_CRM_MASK
 584                           | KVM_REG_ARM_OPC1_MASK
 585                           | KVM_REG_ARM_32_OPC2_MASK))
 586                        return false;
 587
 588                params->is_64bit = false;
 589                params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
 590                               >> KVM_REG_ARM_32_CRN_SHIFT);
 591                params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
 592                               >> KVM_REG_ARM_CRM_SHIFT);
 593                params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 594                               >> KVM_REG_ARM_OPC1_SHIFT);
 595                params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
 596                               >> KVM_REG_ARM_32_OPC2_SHIFT);
 597                return true;
 598        case KVM_REG_SIZE_U64:
 599                /* Any unused index bits means it's not valid. */
 600                if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 601                              | KVM_REG_ARM_COPROC_MASK
 602                              | KVM_REG_ARM_CRM_MASK
 603                              | KVM_REG_ARM_OPC1_MASK))
 604                        return false;
 605                params->is_64bit = true;
 606                /* CRm to CRn: see cp15_to_index for details */
 607                params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
 608                               >> KVM_REG_ARM_CRM_SHIFT);
 609                params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 610                               >> KVM_REG_ARM_OPC1_SHIFT);
 611                params->Op2 = 0;
 612                params->CRm = 0;
 613                return true;
 614        default:
 615                return false;
 616        }
 617}
 618
 619/* Decode an index value, and find the cp15 coproc_reg entry. */
 620static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
 621                                                    u64 id)
 622{
 623        size_t num;
 624        const struct coproc_reg *table, *r;
 625        struct coproc_params params;
 626
 627        /* We only do cp15 for now. */
 628        if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
 629                return NULL;
 630
 631        if (!index_to_params(id, &params))
 632                return NULL;
 633
 634        table = get_target_table(vcpu->arch.target, &num);
 635        r = find_reg(&params, table, num);
 636        if (!r)
 637                r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs));
 638
 639        /* Not saved in the cp15 array? */
 640        if (r && !r->reg)
 641                r = NULL;
 642
 643        return r;
 644}
 645
 646/*
 647 * These are the invariant cp15 registers: we let the guest see the host
 648 * versions of these, so they're part of the guest state.
 649 *
 650 * A future CPU may provide a mechanism to present different values to
 651 * the guest, or a future kvm may trap them.
 652 */
 653/* Unfortunately, there's no register-argument for mrc, so generate. */
 654#define FUNCTION_FOR32(crn, crm, op1, op2, name)                        \
 655        static void get_##name(struct kvm_vcpu *v,                      \
 656                               const struct coproc_reg *r)              \
 657        {                                                               \
 658                u32 val;                                                \
 659                                                                        \
 660                asm volatile("mrc p15, " __stringify(op1)               \
 661                             ", %0, c" __stringify(crn)                 \
 662                             ", c" __stringify(crm)                     \
 663                             ", " __stringify(op2) "\n" : "=r" (val));  \
 664                ((struct coproc_reg *)r)->val = val;                    \
 665        }
 666
 667FUNCTION_FOR32(0, 0, 0, 0, MIDR)
 668FUNCTION_FOR32(0, 0, 0, 1, CTR)
 669FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
 670FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
 671FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
 672FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
 673FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
 674FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
 675FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
 676FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
 677FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
 678FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
 679FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
 680FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
 681FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
 682FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
 683FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
 684FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
 685FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
 686FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
 687FUNCTION_FOR32(0, 0, 1, 7, AIDR)
 688
 689/* ->val is filled in by kvm_invariant_coproc_table_init() */
 690static struct coproc_reg invariant_cp15[] = {
 691        { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
 692        { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
 693        { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
 694        { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
 695        { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
 696
 697        { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
 698        { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
 699
 700        { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
 701        { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
 702        { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
 703        { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
 704        { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
 705        { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
 706        { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
 707        { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
 708
 709        { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
 710        { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
 711        { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
 712        { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
 713        { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
 714        { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
 715};
 716
 717/*
 718 * Reads a register value from a userspace address to a kernel
 719 * variable. Make sure that register size matches sizeof(*__val).
 720 */
 721static int reg_from_user(void *val, const void __user *uaddr, u64 id)
 722{
 723        if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
 724                return -EFAULT;
 725        return 0;
 726}
 727
 728/*
 729 * Writes a register value to a userspace address from a kernel variable.
 730 * Make sure that register size matches sizeof(*__val).
 731 */
 732static int reg_to_user(void __user *uaddr, const void *val, u64 id)
 733{
 734        if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
 735                return -EFAULT;
 736        return 0;
 737}
 738
 739static int get_invariant_cp15(u64 id, void __user *uaddr)
 740{
 741        struct coproc_params params;
 742        const struct coproc_reg *r;
 743        int ret;
 744
 745        if (!index_to_params(id, &params))
 746                return -ENOENT;
 747
 748        r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 749        if (!r)
 750                return -ENOENT;
 751
 752        ret = -ENOENT;
 753        if (KVM_REG_SIZE(id) == 4) {
 754                u32 val = r->val;
 755
 756                ret = reg_to_user(uaddr, &val, id);
 757        } else if (KVM_REG_SIZE(id) == 8) {
 758                ret = reg_to_user(uaddr, &r->val, id);
 759        }
 760        return ret;
 761}
 762
 763static int set_invariant_cp15(u64 id, void __user *uaddr)
 764{
 765        struct coproc_params params;
 766        const struct coproc_reg *r;
 767        int err;
 768        u64 val;
 769
 770        if (!index_to_params(id, &params))
 771                return -ENOENT;
 772        r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 773        if (!r)
 774                return -ENOENT;
 775
 776        err = -ENOENT;
 777        if (KVM_REG_SIZE(id) == 4) {
 778                u32 val32;
 779
 780                err = reg_from_user(&val32, uaddr, id);
 781                if (!err)
 782                        val = val32;
 783        } else if (KVM_REG_SIZE(id) == 8) {
 784                err = reg_from_user(&val, uaddr, id);
 785        }
 786        if (err)
 787                return err;
 788
 789        /* This is what we mean by invariant: you can't change it. */
 790        if (r->val != val)
 791                return -EINVAL;
 792
 793        return 0;
 794}
 795
 796static bool is_valid_cache(u32 val)
 797{
 798        u32 level, ctype;
 799
 800        if (val >= CSSELR_MAX)
 801                return false;
 802
 803        /* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
 804        level = (val >> 1);
 805        ctype = (cache_levels >> (level * 3)) & 7;
 806
 807        switch (ctype) {
 808        case 0: /* No cache */
 809                return false;
 810        case 1: /* Instruction cache only */
 811                return (val & 1);
 812        case 2: /* Data cache only */
 813        case 4: /* Unified cache */
 814                return !(val & 1);
 815        case 3: /* Separate instruction and data caches */
 816                return true;
 817        default: /* Reserved: we can't know instruction or data. */
 818                return false;
 819        }
 820}
 821
 822/* Which cache CCSIDR represents depends on CSSELR value. */
 823static u32 get_ccsidr(u32 csselr)
 824{
 825        u32 ccsidr;
 826
 827        /* Make sure noone else changes CSSELR during this! */
 828        local_irq_disable();
 829        /* Put value into CSSELR */
 830        asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr));
 831        isb();
 832        /* Read result out of CCSIDR */
 833        asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr));
 834        local_irq_enable();
 835
 836        return ccsidr;
 837}
 838
 839static int demux_c15_get(u64 id, void __user *uaddr)
 840{
 841        u32 val;
 842        u32 __user *uval = uaddr;
 843
 844        /* Fail if we have unknown bits set. */
 845        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 846                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 847                return -ENOENT;
 848
 849        switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 850        case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 851                if (KVM_REG_SIZE(id) != 4)
 852                        return -ENOENT;
 853                val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
 854                        >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
 855                if (!is_valid_cache(val))
 856                        return -ENOENT;
 857
 858                return put_user(get_ccsidr(val), uval);
 859        default:
 860                return -ENOENT;
 861        }
 862}
 863
 864static int demux_c15_set(u64 id, void __user *uaddr)
 865{
 866        u32 val, newval;
 867        u32 __user *uval = uaddr;
 868
 869        /* Fail if we have unknown bits set. */
 870        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 871                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 872                return -ENOENT;
 873
 874        switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 875        case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 876                if (KVM_REG_SIZE(id) != 4)
 877                        return -ENOENT;
 878                val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
 879                        >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
 880                if (!is_valid_cache(val))
 881                        return -ENOENT;
 882
 883                if (get_user(newval, uval))
 884                        return -EFAULT;
 885
 886                /* This is also invariant: you can't change it. */
 887                if (newval != get_ccsidr(val))
 888                        return -EINVAL;
 889                return 0;
 890        default:
 891                return -ENOENT;
 892        }
 893}
 894
 895#ifdef CONFIG_VFPv3
 896static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC,
 897                                   KVM_REG_ARM_VFP_FPSCR,
 898                                   KVM_REG_ARM_VFP_FPINST,
 899                                   KVM_REG_ARM_VFP_FPINST2,
 900                                   KVM_REG_ARM_VFP_MVFR0,
 901                                   KVM_REG_ARM_VFP_MVFR1,
 902                                   KVM_REG_ARM_VFP_FPSID };
 903
 904static unsigned int num_fp_regs(void)
 905{
 906        if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2)
 907                return 32;
 908        else
 909                return 16;
 910}
 911
 912static unsigned int num_vfp_regs(void)
 913{
 914        /* Normal FP regs + control regs. */
 915        return num_fp_regs() + ARRAY_SIZE(vfp_sysregs);
 916}
 917
 918static int copy_vfp_regids(u64 __user *uindices)
 919{
 920        unsigned int i;
 921        const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP;
 922        const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
 923
 924        for (i = 0; i < num_fp_regs(); i++) {
 925                if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i,
 926                             uindices))
 927                        return -EFAULT;
 928                uindices++;
 929        }
 930
 931        for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) {
 932                if (put_user(u32reg | vfp_sysregs[i], uindices))
 933                        return -EFAULT;
 934                uindices++;
 935        }
 936
 937        return num_vfp_regs();
 938}
 939
 940static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
 941{
 942        u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
 943        u32 val;
 944
 945        /* Fail if we have unknown bits set. */
 946        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 947                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 948                return -ENOENT;
 949
 950        if (vfpid < num_fp_regs()) {
 951                if (KVM_REG_SIZE(id) != 8)
 952                        return -ENOENT;
 953                return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid],
 954                                   id);
 955        }
 956
 957        /* FP control registers are all 32 bit. */
 958        if (KVM_REG_SIZE(id) != 4)
 959                return -ENOENT;
 960
 961        switch (vfpid) {
 962        case KVM_REG_ARM_VFP_FPEXC:
 963                return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id);
 964        case KVM_REG_ARM_VFP_FPSCR:
 965                return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id);
 966        case KVM_REG_ARM_VFP_FPINST:
 967                return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id);
 968        case KVM_REG_ARM_VFP_FPINST2:
 969                return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id);
 970        case KVM_REG_ARM_VFP_MVFR0:
 971                val = fmrx(MVFR0);
 972                return reg_to_user(uaddr, &val, id);
 973        case KVM_REG_ARM_VFP_MVFR1:
 974                val = fmrx(MVFR1);
 975                return reg_to_user(uaddr, &val, id);
 976        case KVM_REG_ARM_VFP_FPSID:
 977                val = fmrx(FPSID);
 978                return reg_to_user(uaddr, &val, id);
 979        default:
 980                return -ENOENT;
 981        }
 982}
 983
 984static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
 985{
 986        u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
 987        u32 val;
 988
 989        /* Fail if we have unknown bits set. */
 990        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 991                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 992                return -ENOENT;
 993
 994        if (vfpid < num_fp_regs()) {
 995                if (KVM_REG_SIZE(id) != 8)
 996                        return -ENOENT;
 997                return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid],
 998                                     uaddr, id);
 999        }
1000
1001        /* FP control registers are all 32 bit. */
1002        if (KVM_REG_SIZE(id) != 4)
1003                return -ENOENT;
1004
1005        switch (vfpid) {
1006        case KVM_REG_ARM_VFP_FPEXC:
1007                return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id);
1008        case KVM_REG_ARM_VFP_FPSCR:
1009                return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id);
1010        case KVM_REG_ARM_VFP_FPINST:
1011                return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id);
1012        case KVM_REG_ARM_VFP_FPINST2:
1013                return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id);
1014        /* These are invariant. */
1015        case KVM_REG_ARM_VFP_MVFR0:
1016                if (reg_from_user(&val, uaddr, id))
1017                        return -EFAULT;
1018                if (val != fmrx(MVFR0))
1019                        return -EINVAL;
1020                return 0;
1021        case KVM_REG_ARM_VFP_MVFR1:
1022                if (reg_from_user(&val, uaddr, id))
1023                        return -EFAULT;
1024                if (val != fmrx(MVFR1))
1025                        return -EINVAL;
1026                return 0;
1027        case KVM_REG_ARM_VFP_FPSID:
1028                if (reg_from_user(&val, uaddr, id))
1029                        return -EFAULT;
1030                if (val != fmrx(FPSID))
1031                        return -EINVAL;
1032                return 0;
1033        default:
1034                return -ENOENT;
1035        }
1036}
1037#else /* !CONFIG_VFPv3 */
1038static unsigned int num_vfp_regs(void)
1039{
1040        return 0;
1041}
1042
1043static int copy_vfp_regids(u64 __user *uindices)
1044{
1045        return 0;
1046}
1047
1048static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
1049{
1050        return -ENOENT;
1051}
1052
1053static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
1054{
1055        return -ENOENT;
1056}
1057#endif /* !CONFIG_VFPv3 */
1058
1059int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1060{
1061        const struct coproc_reg *r;
1062        void __user *uaddr = (void __user *)(long)reg->addr;
1063        int ret;
1064
1065        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1066                return demux_c15_get(reg->id, uaddr);
1067
1068        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1069                return vfp_get_reg(vcpu, reg->id, uaddr);
1070
1071        r = index_to_coproc_reg(vcpu, reg->id);
1072        if (!r)
1073                return get_invariant_cp15(reg->id, uaddr);
1074
1075        ret = -ENOENT;
1076        if (KVM_REG_SIZE(reg->id) == 8) {
1077                u64 val;
1078
1079                val = vcpu_cp15_reg64_get(vcpu, r);
1080                ret = reg_to_user(uaddr, &val, reg->id);
1081        } else if (KVM_REG_SIZE(reg->id) == 4) {
1082                ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id);
1083        }
1084
1085        return ret;
1086}
1087
1088int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1089{
1090        const struct coproc_reg *r;
1091        void __user *uaddr = (void __user *)(long)reg->addr;
1092        int ret;
1093
1094        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1095                return demux_c15_set(reg->id, uaddr);
1096
1097        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1098                return vfp_set_reg(vcpu, reg->id, uaddr);
1099
1100        r = index_to_coproc_reg(vcpu, reg->id);
1101        if (!r)
1102                return set_invariant_cp15(reg->id, uaddr);
1103
1104        ret = -ENOENT;
1105        if (KVM_REG_SIZE(reg->id) == 8) {
1106                u64 val;
1107
1108                ret = reg_from_user(&val, uaddr, reg->id);
1109                if (!ret)
1110                        vcpu_cp15_reg64_set(vcpu, r, val);
1111        } else if (KVM_REG_SIZE(reg->id) == 4) {
1112                ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id);
1113        }
1114
1115        return ret;
1116}
1117
1118static unsigned int num_demux_regs(void)
1119{
1120        unsigned int i, count = 0;
1121
1122        for (i = 0; i < CSSELR_MAX; i++)
1123                if (is_valid_cache(i))
1124                        count++;
1125
1126        return count;
1127}
1128
1129static int write_demux_regids(u64 __user *uindices)
1130{
1131        u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
1132        unsigned int i;
1133
1134        val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
1135        for (i = 0; i < CSSELR_MAX; i++) {
1136                if (!is_valid_cache(i))
1137                        continue;
1138                if (put_user(val | i, uindices))
1139                        return -EFAULT;
1140                uindices++;
1141        }
1142        return 0;
1143}
1144
1145static u64 cp15_to_index(const struct coproc_reg *reg)
1146{
1147        u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
1148        if (reg->is_64bit) {
1149                val |= KVM_REG_SIZE_U64;
1150                val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1151                /*
1152                 * CRn always denotes the primary coproc. reg. nr. for the
1153                 * in-kernel representation, but the user space API uses the
1154                 * CRm for the encoding, because it is modelled after the
1155                 * MRRC/MCRR instructions: see the ARM ARM rev. c page
1156                 * B3-1445
1157                 */
1158                val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
1159        } else {
1160                val |= KVM_REG_SIZE_U32;
1161                val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1162                val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
1163                val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
1164                val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
1165        }
1166        return val;
1167}
1168
1169static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
1170{
1171        if (!*uind)
1172                return true;
1173
1174        if (put_user(cp15_to_index(reg), *uind))
1175                return false;
1176
1177        (*uind)++;
1178        return true;
1179}
1180
1181/* Assumed ordered tables, see kvm_coproc_table_init. */
1182static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
1183{
1184        const struct coproc_reg *i1, *i2, *end1, *end2;
1185        unsigned int total = 0;
1186        size_t num;
1187
1188        /* We check for duplicates here, to allow arch-specific overrides. */
1189        i1 = get_target_table(vcpu->arch.target, &num);
1190        end1 = i1 + num;
1191        i2 = cp15_regs;
1192        end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
1193
1194        BUG_ON(i1 == end1 || i2 == end2);
1195
1196        /* Walk carefully, as both tables may refer to the same register. */
1197        while (i1 || i2) {
1198                int cmp = cmp_reg(i1, i2);
1199                /* target-specific overrides generic entry. */
1200                if (cmp <= 0) {
1201                        /* Ignore registers we trap but don't save. */
1202                        if (i1->reg) {
1203                                if (!copy_reg_to_user(i1, &uind))
1204                                        return -EFAULT;
1205                                total++;
1206                        }
1207                } else {
1208                        /* Ignore registers we trap but don't save. */
1209                        if (i2->reg) {
1210                                if (!copy_reg_to_user(i2, &uind))
1211                                        return -EFAULT;
1212                                total++;
1213                        }
1214                }
1215
1216                if (cmp <= 0 && ++i1 == end1)
1217                        i1 = NULL;
1218                if (cmp >= 0 && ++i2 == end2)
1219                        i2 = NULL;
1220        }
1221        return total;
1222}
1223
1224unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
1225{
1226        return ARRAY_SIZE(invariant_cp15)
1227                + num_demux_regs()
1228                + num_vfp_regs()
1229                + walk_cp15(vcpu, (u64 __user *)NULL);
1230}
1231
1232int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1233{
1234        unsigned int i;
1235        int err;
1236
1237        /* Then give them all the invariant registers' indices. */
1238        for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
1239                if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
1240                        return -EFAULT;
1241                uindices++;
1242        }
1243
1244        err = walk_cp15(vcpu, uindices);
1245        if (err < 0)
1246                return err;
1247        uindices += err;
1248
1249        err = copy_vfp_regids(uindices);
1250        if (err < 0)
1251                return err;
1252        uindices += err;
1253
1254        return write_demux_regids(uindices);
1255}
1256
1257void kvm_coproc_table_init(void)
1258{
1259        unsigned int i;
1260
1261        /* Make sure tables are unique and in order. */
1262        BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
1263        BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15)));
1264
1265        /* We abuse the reset function to overwrite the table itself. */
1266        for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
1267                invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
1268
1269        /*
1270         * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
1271         *
1272         *   If software reads the Cache Type fields from Ctype1
1273         *   upwards, once it has seen a value of 0b000, no caches
1274         *   exist at further-out levels of the hierarchy. So, for
1275         *   example, if Ctype3 is the first Cache Type field with a
1276         *   value of 0b000, the values of Ctype4 to Ctype7 must be
1277         *   ignored.
1278         */
1279        asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels));
1280        for (i = 0; i < 7; i++)
1281                if (((cache_levels >> (i*3)) & 7) == 0)
1282                        break;
1283        /* Clear all higher bits. */
1284        cache_levels &= (1 << (i*3))-1;
1285}
1286
1287/**
1288 * kvm_reset_coprocs - sets cp15 registers to reset value
1289 * @vcpu: The VCPU pointer
1290 *
1291 * This function finds the right table above and sets the registers on the
1292 * virtual CPU struct to their architecturally defined reset values.
1293 */
1294void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1295{
1296        size_t num;
1297        const struct coproc_reg *table;
1298
1299        /* Catch someone adding a register without putting in reset entry. */
1300        memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
1301
1302        /* Generic chip reset first (so target could override). */
1303        reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
1304
1305        table = get_target_table(vcpu->arch.target, &num);
1306        reset_coproc_regs(vcpu, table, num);
1307
1308        for (num = 1; num < NR_CP15_REGS; num++)
1309                if (vcpu_cp15(vcpu, num) == 0x42424242)
1310                        panic("Didn't reset vcpu_cp15(vcpu, %zi)", num);
1311}
1312