linux/arch/arm/kvm/coproc.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   3 * Authors: Rusty Russell <rusty@rustcorp.com.au>
   4 *          Christoffer Dall <c.dall@virtualopensystems.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License, version 2, as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  18 */
  19#include <linux/mm.h>
  20#include <linux/kvm_host.h>
  21#include <linux/uaccess.h>
  22#include <asm/kvm_arm.h>
  23#include <asm/kvm_host.h>
  24#include <asm/kvm_emulate.h>
  25#include <asm/kvm_coproc.h>
  26#include <asm/cacheflush.h>
  27#include <asm/cputype.h>
  28#include <trace/events/kvm.h>
  29#include <asm/vfp.h>
  30#include "../vfp/vfpinstr.h"
  31
  32#include "trace.h"
  33#include "coproc.h"
  34
  35
  36/******************************************************************************
  37 * Co-processor emulation
  38 *****************************************************************************/
  39
  40/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  41static u32 cache_levels;
  42
  43/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  44#define CSSELR_MAX 12
  45
  46int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
  47{
  48        kvm_inject_undefined(vcpu);
  49        return 1;
  50}
  51
  52int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  53{
  54        /*
  55         * We can get here, if the host has been built without VFPv3 support,
  56         * but the guest attempted a floating point operation.
  57         */
  58        kvm_inject_undefined(vcpu);
  59        return 1;
  60}
  61
  62int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  63{
  64        kvm_inject_undefined(vcpu);
  65        return 1;
  66}
  67
  68int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  69{
  70        kvm_inject_undefined(vcpu);
  71        return 1;
  72}
  73
  74static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
  75{
  76        /*
  77         * Compute guest MPIDR. We build a virtual cluster out of the
  78         * vcpu_id, but we read the 'U' bit from the underlying
  79         * hardware directly.
  80         */
  81        vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
  82                                     ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
  83                                     (vcpu->vcpu_id & 3));
  84}
  85
  86/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
  87static bool access_actlr(struct kvm_vcpu *vcpu,
  88                         const struct coproc_params *p,
  89                         const struct coproc_reg *r)
  90{
  91        if (p->is_write)
  92                return ignore_write(vcpu, p);
  93
  94        *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
  95        return true;
  96}
  97
  98/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */
  99static bool access_cbar(struct kvm_vcpu *vcpu,
 100                        const struct coproc_params *p,
 101                        const struct coproc_reg *r)
 102{
 103        if (p->is_write)
 104                return write_to_read_only(vcpu, p);
 105        return read_zero(vcpu, p);
 106}
 107
 108/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */
 109static bool access_l2ctlr(struct kvm_vcpu *vcpu,
 110                          const struct coproc_params *p,
 111                          const struct coproc_reg *r)
 112{
 113        if (p->is_write)
 114                return ignore_write(vcpu, p);
 115
 116        *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
 117        return true;
 118}
 119
 120static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 121{
 122        u32 l2ctlr, ncores;
 123
 124        asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
 125        l2ctlr &= ~(3 << 24);
 126        ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
 127        /* How many cores in the current cluster and the next ones */
 128        ncores -= (vcpu->vcpu_id & ~3);
 129        /* Cap it to the maximum number of cores in a single cluster */
 130        ncores = min(ncores, 3U);
 131        l2ctlr |= (ncores & 3) << 24;
 132
 133        vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
 134}
 135
 136static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 137{
 138        u32 actlr;
 139
 140        /* ACTLR contains SMP bit: make sure you create all cpus first! */
 141        asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
 142        /* Make the SMP bit consistent with the guest configuration */
 143        if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
 144                actlr |= 1U << 6;
 145        else
 146                actlr &= ~(1U << 6);
 147
 148        vcpu->arch.cp15[c1_ACTLR] = actlr;
 149}
 150
 151/*
 152 * TRM entries: A7:4.3.50, A15:4.3.49
 153 * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored).
 154 */
 155static bool access_l2ectlr(struct kvm_vcpu *vcpu,
 156                           const struct coproc_params *p,
 157                           const struct coproc_reg *r)
 158{
 159        if (p->is_write)
 160                return ignore_write(vcpu, p);
 161
 162        *vcpu_reg(vcpu, p->Rt1) = 0;
 163        return true;
 164}
 165
 166/* See note at ARM ARM B1.14.4 */
 167static bool access_dcsw(struct kvm_vcpu *vcpu,
 168                        const struct coproc_params *p,
 169                        const struct coproc_reg *r)
 170{
 171        unsigned long val;
 172        int cpu;
 173
 174        if (!p->is_write)
 175                return read_from_write_only(vcpu, p);
 176
 177        cpu = get_cpu();
 178
 179        cpumask_setall(&vcpu->arch.require_dcache_flush);
 180        cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
 181
 182        /* If we were already preempted, take the long way around */
 183        if (cpu != vcpu->arch.last_pcpu) {
 184                flush_cache_all();
 185                goto done;
 186        }
 187
 188        val = *vcpu_reg(vcpu, p->Rt1);
 189
 190        switch (p->CRm) {
 191        case 6:                 /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
 192        case 14:                /* DCCISW */
 193                asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
 194                break;
 195
 196        case 10:                /* DCCSW */
 197                asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
 198                break;
 199        }
 200
 201done:
 202        put_cpu();
 203
 204        return true;
 205}
 206
 207/*
 208 * We could trap ID_DFR0 and tell the guest we don't support performance
 209 * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
 210 * NAKed, so it will read the PMCR anyway.
 211 *
 212 * Therefore we tell the guest we have 0 counters.  Unfortunately, we
 213 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
 214 * all PM registers, which doesn't crash the guest kernel at least.
 215 */
 216static bool pm_fake(struct kvm_vcpu *vcpu,
 217                    const struct coproc_params *p,
 218                    const struct coproc_reg *r)
 219{
 220        if (p->is_write)
 221                return ignore_write(vcpu, p);
 222        else
 223                return read_zero(vcpu, p);
 224}
 225
 226#define access_pmcr pm_fake
 227#define access_pmcntenset pm_fake
 228#define access_pmcntenclr pm_fake
 229#define access_pmovsr pm_fake
 230#define access_pmselr pm_fake
 231#define access_pmceid0 pm_fake
 232#define access_pmceid1 pm_fake
 233#define access_pmccntr pm_fake
 234#define access_pmxevtyper pm_fake
 235#define access_pmxevcntr pm_fake
 236#define access_pmuserenr pm_fake
 237#define access_pmintenset pm_fake
 238#define access_pmintenclr pm_fake
 239
 240/* Architected CP15 registers.
 241 * CRn denotes the primary register number, but is copied to the CRm in the
 242 * user space API for 64-bit register access in line with the terminology used
 243 * in the ARM ARM.
 244 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
 245 *            registers preceding 32-bit ones.
 246 */
 247static const struct coproc_reg cp15_regs[] = {
 248        /* MPIDR: we use VMPIDR for guest access. */
 249        { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
 250                        NULL, reset_mpidr, c0_MPIDR },
 251
 252        /* CSSELR: swapped by interrupt.S. */
 253        { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
 254                        NULL, reset_unknown, c0_CSSELR },
 255
 256        /* ACTLR: trapped by HCR.TAC bit. */
 257        { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
 258                        access_actlr, reset_actlr, c1_ACTLR },
 259
 260        /* CPACR: swapped by interrupt.S. */
 261        { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
 262                        NULL, reset_val, c1_CPACR, 0x00000000 },
 263
 264        /* TTBR0/TTBR1: swapped by interrupt.S. */
 265        { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
 266        { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
 267
 268        /* TTBCR: swapped by interrupt.S. */
 269        { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
 270                        NULL, reset_val, c2_TTBCR, 0x00000000 },
 271
 272        /* DACR: swapped by interrupt.S. */
 273        { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
 274                        NULL, reset_unknown, c3_DACR },
 275
 276        /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
 277        { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
 278                        NULL, reset_unknown, c5_DFSR },
 279        { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
 280                        NULL, reset_unknown, c5_IFSR },
 281        { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
 282                        NULL, reset_unknown, c5_ADFSR },
 283        { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
 284                        NULL, reset_unknown, c5_AIFSR },
 285
 286        /* DFAR/IFAR: swapped by interrupt.S. */
 287        { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
 288                        NULL, reset_unknown, c6_DFAR },
 289        { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
 290                        NULL, reset_unknown, c6_IFAR },
 291
 292        /* PAR swapped by interrupt.S */
 293        { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
 294
 295        /*
 296         * DC{C,I,CI}SW operations:
 297         */
 298        { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
 299        { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
 300        { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
 301        /*
 302         * L2CTLR access (guest wants to know #CPUs).
 303         */
 304        { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
 305                        access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
 306        { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
 307
 308        /*
 309         * Dummy performance monitor implementation.
 310         */
 311        { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
 312        { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
 313        { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
 314        { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
 315        { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
 316        { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
 317        { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
 318        { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
 319        { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
 320        { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
 321        { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
 322        { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
 323        { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
 324
 325        /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
 326        { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
 327                        NULL, reset_unknown, c10_PRRR},
 328        { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
 329                        NULL, reset_unknown, c10_NMRR},
 330
 331        /* VBAR: swapped by interrupt.S. */
 332        { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
 333                        NULL, reset_val, c12_VBAR, 0x00000000 },
 334
 335        /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
 336        { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
 337                        NULL, reset_val, c13_CID, 0x00000000 },
 338        { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
 339                        NULL, reset_unknown, c13_TID_URW },
 340        { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
 341                        NULL, reset_unknown, c13_TID_URO },
 342        { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
 343                        NULL, reset_unknown, c13_TID_PRIV },
 344
 345        /* CNTKCTL: swapped by interrupt.S. */
 346        { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
 347                        NULL, reset_val, c14_CNTKCTL, 0x00000000 },
 348
 349        /* The Configuration Base Address Register. */
 350        { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
 351};
 352
 353/* Target specific emulation tables */
 354static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
 355
 356void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
 357{
 358        unsigned int i;
 359
 360        for (i = 1; i < table->num; i++)
 361                BUG_ON(cmp_reg(&table->table[i-1],
 362                               &table->table[i]) >= 0);
 363
 364        target_tables[table->target] = table;
 365}
 366
 367/* Get specific register table for this target. */
 368static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
 369{
 370        struct kvm_coproc_target_table *table;
 371
 372        table = target_tables[target];
 373        *num = table->num;
 374        return table->table;
 375}
 376
 377static const struct coproc_reg *find_reg(const struct coproc_params *params,
 378                                         const struct coproc_reg table[],
 379                                         unsigned int num)
 380{
 381        unsigned int i;
 382
 383        for (i = 0; i < num; i++) {
 384                const struct coproc_reg *r = &table[i];
 385
 386                if (params->is_64bit != r->is_64)
 387                        continue;
 388                if (params->CRn != r->CRn)
 389                        continue;
 390                if (params->CRm != r->CRm)
 391                        continue;
 392                if (params->Op1 != r->Op1)
 393                        continue;
 394                if (params->Op2 != r->Op2)
 395                        continue;
 396
 397                return r;
 398        }
 399        return NULL;
 400}
 401
 402static int emulate_cp15(struct kvm_vcpu *vcpu,
 403                        const struct coproc_params *params)
 404{
 405        size_t num;
 406        const struct coproc_reg *table, *r;
 407
 408        trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
 409                                   params->CRm, params->Op2, params->is_write);
 410
 411        table = get_target_table(vcpu->arch.target, &num);
 412
 413        /* Search target-specific then generic table. */
 414        r = find_reg(params, table, num);
 415        if (!r)
 416                r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
 417
 418        if (likely(r)) {
 419                /* If we don't have an accessor, we should never get here! */
 420                BUG_ON(!r->access);
 421
 422                if (likely(r->access(vcpu, params, r))) {
 423                        /* Skip instruction, since it was emulated */
 424                        kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 425                        return 1;
 426                }
 427                /* If access function fails, it should complain. */
 428        } else {
 429                kvm_err("Unsupported guest CP15 access at: %08lx\n",
 430                        *vcpu_pc(vcpu));
 431                print_cp_instr(params);
 432        }
 433        kvm_inject_undefined(vcpu);
 434        return 1;
 435}
 436
 437/**
 438 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
 439 * @vcpu: The VCPU pointer
 440 * @run:  The kvm_run struct
 441 */
 442int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 443{
 444        struct coproc_params params;
 445
 446        params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 447        params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 448        params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 449        params.is_64bit = true;
 450
 451        params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
 452        params.Op2 = 0;
 453        params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 454        params.CRn = 0;
 455
 456        return emulate_cp15(vcpu, &params);
 457}
 458
 459static void reset_coproc_regs(struct kvm_vcpu *vcpu,
 460                              const struct coproc_reg *table, size_t num)
 461{
 462        unsigned long i;
 463
 464        for (i = 0; i < num; i++)
 465                if (table[i].reset)
 466                        table[i].reset(vcpu, &table[i]);
 467}
 468
 469/**
 470 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
 471 * @vcpu: The VCPU pointer
 472 * @run:  The kvm_run struct
 473 */
 474int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 475{
 476        struct coproc_params params;
 477
 478        params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 479        params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 480        params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 481        params.is_64bit = false;
 482
 483        params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 484        params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
 485        params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
 486        params.Rt2 = 0;
 487
 488        return emulate_cp15(vcpu, &params);
 489}
 490
 491/******************************************************************************
 492 * Userspace API
 493 *****************************************************************************/
 494
 495static bool index_to_params(u64 id, struct coproc_params *params)
 496{
 497        switch (id & KVM_REG_SIZE_MASK) {
 498        case KVM_REG_SIZE_U32:
 499                /* Any unused index bits means it's not valid. */
 500                if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 501                           | KVM_REG_ARM_COPROC_MASK
 502                           | KVM_REG_ARM_32_CRN_MASK
 503                           | KVM_REG_ARM_CRM_MASK
 504                           | KVM_REG_ARM_OPC1_MASK
 505                           | KVM_REG_ARM_32_OPC2_MASK))
 506                        return false;
 507
 508                params->is_64bit = false;
 509                params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
 510                               >> KVM_REG_ARM_32_CRN_SHIFT);
 511                params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
 512                               >> KVM_REG_ARM_CRM_SHIFT);
 513                params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 514                               >> KVM_REG_ARM_OPC1_SHIFT);
 515                params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
 516                               >> KVM_REG_ARM_32_OPC2_SHIFT);
 517                return true;
 518        case KVM_REG_SIZE_U64:
 519                /* Any unused index bits means it's not valid. */
 520                if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 521                              | KVM_REG_ARM_COPROC_MASK
 522                              | KVM_REG_ARM_CRM_MASK
 523                              | KVM_REG_ARM_OPC1_MASK))
 524                        return false;
 525                params->is_64bit = true;
 526                /* CRm to CRn: see cp15_to_index for details */
 527                params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
 528                               >> KVM_REG_ARM_CRM_SHIFT);
 529                params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 530                               >> KVM_REG_ARM_OPC1_SHIFT);
 531                params->Op2 = 0;
 532                params->CRm = 0;
 533                return true;
 534        default:
 535                return false;
 536        }
 537}
 538
 539/* Decode an index value, and find the cp15 coproc_reg entry. */
 540static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
 541                                                    u64 id)
 542{
 543        size_t num;
 544        const struct coproc_reg *table, *r;
 545        struct coproc_params params;
 546
 547        /* We only do cp15 for now. */
 548        if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
 549                return NULL;
 550
 551        if (!index_to_params(id, &params))
 552                return NULL;
 553
 554        table = get_target_table(vcpu->arch.target, &num);
 555        r = find_reg(&params, table, num);
 556        if (!r)
 557                r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs));
 558
 559        /* Not saved in the cp15 array? */
 560        if (r && !r->reg)
 561                r = NULL;
 562
 563        return r;
 564}
 565
 566/*
 567 * These are the invariant cp15 registers: we let the guest see the host
 568 * versions of these, so they're part of the guest state.
 569 *
 570 * A future CPU may provide a mechanism to present different values to
 571 * the guest, or a future kvm may trap them.
 572 */
 573/* Unfortunately, there's no register-argument for mrc, so generate. */
 574#define FUNCTION_FOR32(crn, crm, op1, op2, name)                        \
 575        static void get_##name(struct kvm_vcpu *v,                      \
 576                               const struct coproc_reg *r)              \
 577        {                                                               \
 578                u32 val;                                                \
 579                                                                        \
 580                asm volatile("mrc p15, " __stringify(op1)               \
 581                             ", %0, c" __stringify(crn)                 \
 582                             ", c" __stringify(crm)                     \
 583                             ", " __stringify(op2) "\n" : "=r" (val));  \
 584                ((struct coproc_reg *)r)->val = val;                    \
 585        }
 586
 587FUNCTION_FOR32(0, 0, 0, 0, MIDR)
 588FUNCTION_FOR32(0, 0, 0, 1, CTR)
 589FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
 590FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
 591FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
 592FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
 593FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
 594FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
 595FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
 596FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
 597FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
 598FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
 599FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
 600FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
 601FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
 602FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
 603FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
 604FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
 605FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
 606FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
 607FUNCTION_FOR32(0, 0, 1, 7, AIDR)
 608
 609/* ->val is filled in by kvm_invariant_coproc_table_init() */
 610static struct coproc_reg invariant_cp15[] = {
 611        { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
 612        { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
 613        { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
 614        { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
 615        { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
 616
 617        { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
 618        { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
 619        { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
 620        { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
 621        { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
 622        { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
 623        { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
 624        { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
 625
 626        { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
 627        { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
 628        { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
 629        { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
 630        { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
 631        { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
 632
 633        { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
 634        { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
 635};
 636
 637static int reg_from_user(void *val, const void __user *uaddr, u64 id)
 638{
 639        /* This Just Works because we are little endian. */
 640        if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
 641                return -EFAULT;
 642        return 0;
 643}
 644
 645static int reg_to_user(void __user *uaddr, const void *val, u64 id)
 646{
 647        /* This Just Works because we are little endian. */
 648        if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
 649                return -EFAULT;
 650        return 0;
 651}
 652
 653static int get_invariant_cp15(u64 id, void __user *uaddr)
 654{
 655        struct coproc_params params;
 656        const struct coproc_reg *r;
 657
 658        if (!index_to_params(id, &params))
 659                return -ENOENT;
 660
 661        r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 662        if (!r)
 663                return -ENOENT;
 664
 665        return reg_to_user(uaddr, &r->val, id);
 666}
 667
 668static int set_invariant_cp15(u64 id, void __user *uaddr)
 669{
 670        struct coproc_params params;
 671        const struct coproc_reg *r;
 672        int err;
 673        u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
 674
 675        if (!index_to_params(id, &params))
 676                return -ENOENT;
 677        r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 678        if (!r)
 679                return -ENOENT;
 680
 681        err = reg_from_user(&val, uaddr, id);
 682        if (err)
 683                return err;
 684
 685        /* This is what we mean by invariant: you can't change it. */
 686        if (r->val != val)
 687                return -EINVAL;
 688
 689        return 0;
 690}
 691
 692static bool is_valid_cache(u32 val)
 693{
 694        u32 level, ctype;
 695
 696        if (val >= CSSELR_MAX)
 697                return -ENOENT;
 698
 699        /* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
 700        level = (val >> 1);
 701        ctype = (cache_levels >> (level * 3)) & 7;
 702
 703        switch (ctype) {
 704        case 0: /* No cache */
 705                return false;
 706        case 1: /* Instruction cache only */
 707                return (val & 1);
 708        case 2: /* Data cache only */
 709        case 4: /* Unified cache */
 710                return !(val & 1);
 711        case 3: /* Separate instruction and data caches */
 712                return true;
 713        default: /* Reserved: we can't know instruction or data. */
 714                return false;
 715        }
 716}
 717
 718/* Which cache CCSIDR represents depends on CSSELR value. */
 719static u32 get_ccsidr(u32 csselr)
 720{
 721        u32 ccsidr;
 722
 723        /* Make sure noone else changes CSSELR during this! */
 724        local_irq_disable();
 725        /* Put value into CSSELR */
 726        asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr));
 727        isb();
 728        /* Read result out of CCSIDR */
 729        asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr));
 730        local_irq_enable();
 731
 732        return ccsidr;
 733}
 734
 735static int demux_c15_get(u64 id, void __user *uaddr)
 736{
 737        u32 val;
 738        u32 __user *uval = uaddr;
 739
 740        /* Fail if we have unknown bits set. */
 741        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 742                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 743                return -ENOENT;
 744
 745        switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 746        case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 747                if (KVM_REG_SIZE(id) != 4)
 748                        return -ENOENT;
 749                val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
 750                        >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
 751                if (!is_valid_cache(val))
 752                        return -ENOENT;
 753
 754                return put_user(get_ccsidr(val), uval);
 755        default:
 756                return -ENOENT;
 757        }
 758}
 759
 760static int demux_c15_set(u64 id, void __user *uaddr)
 761{
 762        u32 val, newval;
 763        u32 __user *uval = uaddr;
 764
 765        /* Fail if we have unknown bits set. */
 766        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 767                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 768                return -ENOENT;
 769
 770        switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 771        case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 772                if (KVM_REG_SIZE(id) != 4)
 773                        return -ENOENT;
 774                val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
 775                        >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
 776                if (!is_valid_cache(val))
 777                        return -ENOENT;
 778
 779                if (get_user(newval, uval))
 780                        return -EFAULT;
 781
 782                /* This is also invariant: you can't change it. */
 783                if (newval != get_ccsidr(val))
 784                        return -EINVAL;
 785                return 0;
 786        default:
 787                return -ENOENT;
 788        }
 789}
 790
 791#ifdef CONFIG_VFPv3
 792static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC,
 793                                   KVM_REG_ARM_VFP_FPSCR,
 794                                   KVM_REG_ARM_VFP_FPINST,
 795                                   KVM_REG_ARM_VFP_FPINST2,
 796                                   KVM_REG_ARM_VFP_MVFR0,
 797                                   KVM_REG_ARM_VFP_MVFR1,
 798                                   KVM_REG_ARM_VFP_FPSID };
 799
 800static unsigned int num_fp_regs(void)
 801{
 802        if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2)
 803                return 32;
 804        else
 805                return 16;
 806}
 807
 808static unsigned int num_vfp_regs(void)
 809{
 810        /* Normal FP regs + control regs. */
 811        return num_fp_regs() + ARRAY_SIZE(vfp_sysregs);
 812}
 813
 814static int copy_vfp_regids(u64 __user *uindices)
 815{
 816        unsigned int i;
 817        const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP;
 818        const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
 819
 820        for (i = 0; i < num_fp_regs(); i++) {
 821                if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i,
 822                             uindices))
 823                        return -EFAULT;
 824                uindices++;
 825        }
 826
 827        for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) {
 828                if (put_user(u32reg | vfp_sysregs[i], uindices))
 829                        return -EFAULT;
 830                uindices++;
 831        }
 832
 833        return num_vfp_regs();
 834}
 835
 836static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
 837{
 838        u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
 839        u32 val;
 840
 841        /* Fail if we have unknown bits set. */
 842        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 843                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 844                return -ENOENT;
 845
 846        if (vfpid < num_fp_regs()) {
 847                if (KVM_REG_SIZE(id) != 8)
 848                        return -ENOENT;
 849                return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid],
 850                                   id);
 851        }
 852
 853        /* FP control registers are all 32 bit. */
 854        if (KVM_REG_SIZE(id) != 4)
 855                return -ENOENT;
 856
 857        switch (vfpid) {
 858        case KVM_REG_ARM_VFP_FPEXC:
 859                return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id);
 860        case KVM_REG_ARM_VFP_FPSCR:
 861                return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id);
 862        case KVM_REG_ARM_VFP_FPINST:
 863                return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id);
 864        case KVM_REG_ARM_VFP_FPINST2:
 865                return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id);
 866        case KVM_REG_ARM_VFP_MVFR0:
 867                val = fmrx(MVFR0);
 868                return reg_to_user(uaddr, &val, id);
 869        case KVM_REG_ARM_VFP_MVFR1:
 870                val = fmrx(MVFR1);
 871                return reg_to_user(uaddr, &val, id);
 872        case KVM_REG_ARM_VFP_FPSID:
 873                val = fmrx(FPSID);
 874                return reg_to_user(uaddr, &val, id);
 875        default:
 876                return -ENOENT;
 877        }
 878}
 879
 880static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
 881{
 882        u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
 883        u32 val;
 884
 885        /* Fail if we have unknown bits set. */
 886        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 887                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 888                return -ENOENT;
 889
 890        if (vfpid < num_fp_regs()) {
 891                if (KVM_REG_SIZE(id) != 8)
 892                        return -ENOENT;
 893                return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid],
 894                                     uaddr, id);
 895        }
 896
 897        /* FP control registers are all 32 bit. */
 898        if (KVM_REG_SIZE(id) != 4)
 899                return -ENOENT;
 900
 901        switch (vfpid) {
 902        case KVM_REG_ARM_VFP_FPEXC:
 903                return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id);
 904        case KVM_REG_ARM_VFP_FPSCR:
 905                return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id);
 906        case KVM_REG_ARM_VFP_FPINST:
 907                return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id);
 908        case KVM_REG_ARM_VFP_FPINST2:
 909                return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id);
 910        /* These are invariant. */
 911        case KVM_REG_ARM_VFP_MVFR0:
 912                if (reg_from_user(&val, uaddr, id))
 913                        return -EFAULT;
 914                if (val != fmrx(MVFR0))
 915                        return -EINVAL;
 916                return 0;
 917        case KVM_REG_ARM_VFP_MVFR1:
 918                if (reg_from_user(&val, uaddr, id))
 919                        return -EFAULT;
 920                if (val != fmrx(MVFR1))
 921                        return -EINVAL;
 922                return 0;
 923        case KVM_REG_ARM_VFP_FPSID:
 924                if (reg_from_user(&val, uaddr, id))
 925                        return -EFAULT;
 926                if (val != fmrx(FPSID))
 927                        return -EINVAL;
 928                return 0;
 929        default:
 930                return -ENOENT;
 931        }
 932}
 933#else /* !CONFIG_VFPv3 */
 934static unsigned int num_vfp_regs(void)
 935{
 936        return 0;
 937}
 938
 939static int copy_vfp_regids(u64 __user *uindices)
 940{
 941        return 0;
 942}
 943
 944static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
 945{
 946        return -ENOENT;
 947}
 948
 949static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
 950{
 951        return -ENOENT;
 952}
 953#endif /* !CONFIG_VFPv3 */
 954
 955int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 956{
 957        const struct coproc_reg *r;
 958        void __user *uaddr = (void __user *)(long)reg->addr;
 959
 960        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
 961                return demux_c15_get(reg->id, uaddr);
 962
 963        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
 964                return vfp_get_reg(vcpu, reg->id, uaddr);
 965
 966        r = index_to_coproc_reg(vcpu, reg->id);
 967        if (!r)
 968                return get_invariant_cp15(reg->id, uaddr);
 969
 970        /* Note: copies two regs if size is 64 bit. */
 971        return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id);
 972}
 973
 974int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 975{
 976        const struct coproc_reg *r;
 977        void __user *uaddr = (void __user *)(long)reg->addr;
 978
 979        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
 980                return demux_c15_set(reg->id, uaddr);
 981
 982        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
 983                return vfp_set_reg(vcpu, reg->id, uaddr);
 984
 985        r = index_to_coproc_reg(vcpu, reg->id);
 986        if (!r)
 987                return set_invariant_cp15(reg->id, uaddr);
 988
 989        /* Note: copies two regs if size is 64 bit */
 990        return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id);
 991}
 992
 993static unsigned int num_demux_regs(void)
 994{
 995        unsigned int i, count = 0;
 996
 997        for (i = 0; i < CSSELR_MAX; i++)
 998                if (is_valid_cache(i))
 999                        count++;
1000
1001        return count;
1002}
1003
1004static int write_demux_regids(u64 __user *uindices)
1005{
1006        u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
1007        unsigned int i;
1008
1009        val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
1010        for (i = 0; i < CSSELR_MAX; i++) {
1011                if (!is_valid_cache(i))
1012                        continue;
1013                if (put_user(val | i, uindices))
1014                        return -EFAULT;
1015                uindices++;
1016        }
1017        return 0;
1018}
1019
1020static u64 cp15_to_index(const struct coproc_reg *reg)
1021{
1022        u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
1023        if (reg->is_64) {
1024                val |= KVM_REG_SIZE_U64;
1025                val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1026                /*
1027                 * CRn always denotes the primary coproc. reg. nr. for the
1028                 * in-kernel representation, but the user space API uses the
1029                 * CRm for the encoding, because it is modelled after the
1030                 * MRRC/MCRR instructions: see the ARM ARM rev. c page
1031                 * B3-1445
1032                 */
1033                val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
1034        } else {
1035                val |= KVM_REG_SIZE_U32;
1036                val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1037                val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
1038                val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
1039                val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
1040        }
1041        return val;
1042}
1043
1044static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
1045{
1046        if (!*uind)
1047                return true;
1048
1049        if (put_user(cp15_to_index(reg), *uind))
1050                return false;
1051
1052        (*uind)++;
1053        return true;
1054}
1055
1056/* Assumed ordered tables, see kvm_coproc_table_init. */
1057static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
1058{
1059        const struct coproc_reg *i1, *i2, *end1, *end2;
1060        unsigned int total = 0;
1061        size_t num;
1062
1063        /* We check for duplicates here, to allow arch-specific overrides. */
1064        i1 = get_target_table(vcpu->arch.target, &num);
1065        end1 = i1 + num;
1066        i2 = cp15_regs;
1067        end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
1068
1069        BUG_ON(i1 == end1 || i2 == end2);
1070
1071        /* Walk carefully, as both tables may refer to the same register. */
1072        while (i1 || i2) {
1073                int cmp = cmp_reg(i1, i2);
1074                /* target-specific overrides generic entry. */
1075                if (cmp <= 0) {
1076                        /* Ignore registers we trap but don't save. */
1077                        if (i1->reg) {
1078                                if (!copy_reg_to_user(i1, &uind))
1079                                        return -EFAULT;
1080                                total++;
1081                        }
1082                } else {
1083                        /* Ignore registers we trap but don't save. */
1084                        if (i2->reg) {
1085                                if (!copy_reg_to_user(i2, &uind))
1086                                        return -EFAULT;
1087                                total++;
1088                        }
1089                }
1090
1091                if (cmp <= 0 && ++i1 == end1)
1092                        i1 = NULL;
1093                if (cmp >= 0 && ++i2 == end2)
1094                        i2 = NULL;
1095        }
1096        return total;
1097}
1098
1099unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
1100{
1101        return ARRAY_SIZE(invariant_cp15)
1102                + num_demux_regs()
1103                + num_vfp_regs()
1104                + walk_cp15(vcpu, (u64 __user *)NULL);
1105}
1106
1107int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1108{
1109        unsigned int i;
1110        int err;
1111
1112        /* Then give them all the invariant registers' indices. */
1113        for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
1114                if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
1115                        return -EFAULT;
1116                uindices++;
1117        }
1118
1119        err = walk_cp15(vcpu, uindices);
1120        if (err < 0)
1121                return err;
1122        uindices += err;
1123
1124        err = copy_vfp_regids(uindices);
1125        if (err < 0)
1126                return err;
1127        uindices += err;
1128
1129        return write_demux_regids(uindices);
1130}
1131
1132void kvm_coproc_table_init(void)
1133{
1134        unsigned int i;
1135
1136        /* Make sure tables are unique and in order. */
1137        for (i = 1; i < ARRAY_SIZE(cp15_regs); i++)
1138                BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0);
1139
1140        /* We abuse the reset function to overwrite the table itself. */
1141        for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
1142                invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
1143
1144        /*
1145         * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
1146         *
1147         *   If software reads the Cache Type fields from Ctype1
1148         *   upwards, once it has seen a value of 0b000, no caches
1149         *   exist at further-out levels of the hierarchy. So, for
1150         *   example, if Ctype3 is the first Cache Type field with a
1151         *   value of 0b000, the values of Ctype4 to Ctype7 must be
1152         *   ignored.
1153         */
1154        asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels));
1155        for (i = 0; i < 7; i++)
1156                if (((cache_levels >> (i*3)) & 7) == 0)
1157                        break;
1158        /* Clear all higher bits. */
1159        cache_levels &= (1 << (i*3))-1;
1160}
1161
1162/**
1163 * kvm_reset_coprocs - sets cp15 registers to reset value
1164 * @vcpu: The VCPU pointer
1165 *
1166 * This function finds the right table above and sets the registers on the
1167 * virtual CPU struct to their architecturally defined reset values.
1168 */
1169void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1170{
1171        size_t num;
1172        const struct coproc_reg *table;
1173
1174        /* Catch someone adding a register without putting in reset entry. */
1175        memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15));
1176
1177        /* Generic chip reset first (so target could override). */
1178        reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
1179
1180        table = get_target_table(vcpu->arch.target, &num);
1181        reset_coproc_regs(vcpu, table, num);
1182
1183        for (num = 1; num < NR_CP15_REGS; num++)
1184                if (vcpu->arch.cp15[num] == 0x42424242)
1185                        panic("Didn't reset vcpu->arch.cp15[%zi]", num);
1186}
1187