linux/arch/arm/kvm/coproc.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   3 * Authors: Rusty Russell <rusty@rustcorp.com.au>
   4 *          Christoffer Dall <c.dall@virtualopensystems.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License, version 2, as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  18 */
  19#include <linux/mm.h>
  20#include <linux/kvm_host.h>
  21#include <linux/uaccess.h>
  22#include <asm/kvm_arm.h>
  23#include <asm/kvm_host.h>
  24#include <asm/kvm_emulate.h>
  25#include <asm/kvm_coproc.h>
  26#include <asm/cacheflush.h>
  27#include <asm/cputype.h>
  28#include <trace/events/kvm.h>
  29#include <asm/vfp.h>
  30#include "../vfp/vfpinstr.h"
  31
  32#include "trace.h"
  33#include "coproc.h"
  34
  35
  36/******************************************************************************
  37 * Co-processor emulation
  38 *****************************************************************************/
  39
  40/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  41static u32 cache_levels;
  42
  43/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  44#define CSSELR_MAX 12
  45
  46int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
  47{
  48        kvm_inject_undefined(vcpu);
  49        return 1;
  50}
  51
  52int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  53{
  54        /*
  55         * We can get here, if the host has been built without VFPv3 support,
  56         * but the guest attempted a floating point operation.
  57         */
  58        kvm_inject_undefined(vcpu);
  59        return 1;
  60}
  61
  62int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  63{
  64        kvm_inject_undefined(vcpu);
  65        return 1;
  66}
  67
  68int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  69{
  70        kvm_inject_undefined(vcpu);
  71        return 1;
  72}
  73
  74/* See note at ARM ARM B1.14.4 */
  75static bool access_dcsw(struct kvm_vcpu *vcpu,
  76                        const struct coproc_params *p,
  77                        const struct coproc_reg *r)
  78{
  79        unsigned long val;
  80        int cpu;
  81
  82        if (!p->is_write)
  83                return read_from_write_only(vcpu, p);
  84
  85        cpu = get_cpu();
  86
  87        cpumask_setall(&vcpu->arch.require_dcache_flush);
  88        cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
  89
  90        /* If we were already preempted, take the long way around */
  91        if (cpu != vcpu->arch.last_pcpu) {
  92                flush_cache_all();
  93                goto done;
  94        }
  95
  96        val = *vcpu_reg(vcpu, p->Rt1);
  97
  98        switch (p->CRm) {
  99        case 6:                 /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
 100        case 14:                /* DCCISW */
 101                asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
 102                break;
 103
 104        case 10:                /* DCCSW */
 105                asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
 106                break;
 107        }
 108
 109done:
 110        put_cpu();
 111
 112        return true;
 113}
 114
 115/*
 116 * We could trap ID_DFR0 and tell the guest we don't support performance
 117 * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
 118 * NAKed, so it will read the PMCR anyway.
 119 *
 120 * Therefore we tell the guest we have 0 counters.  Unfortunately, we
 121 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
 122 * all PM registers, which doesn't crash the guest kernel at least.
 123 */
 124static bool pm_fake(struct kvm_vcpu *vcpu,
 125                    const struct coproc_params *p,
 126                    const struct coproc_reg *r)
 127{
 128        if (p->is_write)
 129                return ignore_write(vcpu, p);
 130        else
 131                return read_zero(vcpu, p);
 132}
 133
 134#define access_pmcr pm_fake
 135#define access_pmcntenset pm_fake
 136#define access_pmcntenclr pm_fake
 137#define access_pmovsr pm_fake
 138#define access_pmselr pm_fake
 139#define access_pmceid0 pm_fake
 140#define access_pmceid1 pm_fake
 141#define access_pmccntr pm_fake
 142#define access_pmxevtyper pm_fake
 143#define access_pmxevcntr pm_fake
 144#define access_pmuserenr pm_fake
 145#define access_pmintenset pm_fake
 146#define access_pmintenclr pm_fake
 147
 148/* Architected CP15 registers.
 149 * CRn denotes the primary register number, but is copied to the CRm in the
 150 * user space API for 64-bit register access in line with the terminology used
 151 * in the ARM ARM.
 152 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
 153 *            registers preceding 32-bit ones.
 154 */
 155static const struct coproc_reg cp15_regs[] = {
 156        /* CSSELR: swapped by interrupt.S. */
 157        { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
 158                        NULL, reset_unknown, c0_CSSELR },
 159
 160        /* TTBR0/TTBR1: swapped by interrupt.S. */
 161        { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
 162        { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
 163
 164        /* TTBCR: swapped by interrupt.S. */
 165        { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
 166                        NULL, reset_val, c2_TTBCR, 0x00000000 },
 167
 168        /* DACR: swapped by interrupt.S. */
 169        { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
 170                        NULL, reset_unknown, c3_DACR },
 171
 172        /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
 173        { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
 174                        NULL, reset_unknown, c5_DFSR },
 175        { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
 176                        NULL, reset_unknown, c5_IFSR },
 177        { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
 178                        NULL, reset_unknown, c5_ADFSR },
 179        { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
 180                        NULL, reset_unknown, c5_AIFSR },
 181
 182        /* DFAR/IFAR: swapped by interrupt.S. */
 183        { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
 184                        NULL, reset_unknown, c6_DFAR },
 185        { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
 186                        NULL, reset_unknown, c6_IFAR },
 187
 188        /* PAR swapped by interrupt.S */
 189        { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
 190
 191        /*
 192         * DC{C,I,CI}SW operations:
 193         */
 194        { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
 195        { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
 196        { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
 197        /*
 198         * Dummy performance monitor implementation.
 199         */
 200        { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
 201        { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
 202        { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
 203        { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
 204        { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
 205        { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
 206        { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
 207        { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
 208        { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
 209        { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
 210        { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
 211        { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
 212        { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
 213
 214        /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
 215        { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
 216                        NULL, reset_unknown, c10_PRRR},
 217        { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
 218                        NULL, reset_unknown, c10_NMRR},
 219
 220        /* VBAR: swapped by interrupt.S. */
 221        { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
 222                        NULL, reset_val, c12_VBAR, 0x00000000 },
 223
 224        /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
 225        { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
 226                        NULL, reset_val, c13_CID, 0x00000000 },
 227        { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
 228                        NULL, reset_unknown, c13_TID_URW },
 229        { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
 230                        NULL, reset_unknown, c13_TID_URO },
 231        { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
 232                        NULL, reset_unknown, c13_TID_PRIV },
 233
 234        /* CNTKCTL: swapped by interrupt.S. */
 235        { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
 236                        NULL, reset_val, c14_CNTKCTL, 0x00000000 },
 237};
 238
 239/* Target specific emulation tables */
 240static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
 241
 242void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
 243{
 244        target_tables[table->target] = table;
 245}
 246
 247/* Get specific register table for this target. */
 248static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
 249{
 250        struct kvm_coproc_target_table *table;
 251
 252        table = target_tables[target];
 253        *num = table->num;
 254        return table->table;
 255}
 256
 257static const struct coproc_reg *find_reg(const struct coproc_params *params,
 258                                         const struct coproc_reg table[],
 259                                         unsigned int num)
 260{
 261        unsigned int i;
 262
 263        for (i = 0; i < num; i++) {
 264                const struct coproc_reg *r = &table[i];
 265
 266                if (params->is_64bit != r->is_64)
 267                        continue;
 268                if (params->CRn != r->CRn)
 269                        continue;
 270                if (params->CRm != r->CRm)
 271                        continue;
 272                if (params->Op1 != r->Op1)
 273                        continue;
 274                if (params->Op2 != r->Op2)
 275                        continue;
 276
 277                return r;
 278        }
 279        return NULL;
 280}
 281
 282static int emulate_cp15(struct kvm_vcpu *vcpu,
 283                        const struct coproc_params *params)
 284{
 285        size_t num;
 286        const struct coproc_reg *table, *r;
 287
 288        trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
 289                                   params->CRm, params->Op2, params->is_write);
 290
 291        table = get_target_table(vcpu->arch.target, &num);
 292
 293        /* Search target-specific then generic table. */
 294        r = find_reg(params, table, num);
 295        if (!r)
 296                r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
 297
 298        if (likely(r)) {
 299                /* If we don't have an accessor, we should never get here! */
 300                BUG_ON(!r->access);
 301
 302                if (likely(r->access(vcpu, params, r))) {
 303                        /* Skip instruction, since it was emulated */
 304                        kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 305                        return 1;
 306                }
 307                /* If access function fails, it should complain. */
 308        } else {
 309                kvm_err("Unsupported guest CP15 access at: %08lx\n",
 310                        *vcpu_pc(vcpu));
 311                print_cp_instr(params);
 312        }
 313        kvm_inject_undefined(vcpu);
 314        return 1;
 315}
 316
 317/**
 318 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
 319 * @vcpu: The VCPU pointer
 320 * @run:  The kvm_run struct
 321 */
 322int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 323{
 324        struct coproc_params params;
 325
 326        params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 327        params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 328        params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 329        params.is_64bit = true;
 330
 331        params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
 332        params.Op2 = 0;
 333        params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 334        params.CRn = 0;
 335
 336        return emulate_cp15(vcpu, &params);
 337}
 338
 339static void reset_coproc_regs(struct kvm_vcpu *vcpu,
 340                              const struct coproc_reg *table, size_t num)
 341{
 342        unsigned long i;
 343
 344        for (i = 0; i < num; i++)
 345                if (table[i].reset)
 346                        table[i].reset(vcpu, &table[i]);
 347}
 348
 349/**
 350 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
 351 * @vcpu: The VCPU pointer
 352 * @run:  The kvm_run struct
 353 */
 354int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 355{
 356        struct coproc_params params;
 357
 358        params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 359        params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 360        params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 361        params.is_64bit = false;
 362
 363        params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 364        params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
 365        params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
 366        params.Rt2 = 0;
 367
 368        return emulate_cp15(vcpu, &params);
 369}
 370
 371/******************************************************************************
 372 * Userspace API
 373 *****************************************************************************/
 374
 375static bool index_to_params(u64 id, struct coproc_params *params)
 376{
 377        switch (id & KVM_REG_SIZE_MASK) {
 378        case KVM_REG_SIZE_U32:
 379                /* Any unused index bits means it's not valid. */
 380                if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 381                           | KVM_REG_ARM_COPROC_MASK
 382                           | KVM_REG_ARM_32_CRN_MASK
 383                           | KVM_REG_ARM_CRM_MASK
 384                           | KVM_REG_ARM_OPC1_MASK
 385                           | KVM_REG_ARM_32_OPC2_MASK))
 386                        return false;
 387
 388                params->is_64bit = false;
 389                params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
 390                               >> KVM_REG_ARM_32_CRN_SHIFT);
 391                params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
 392                               >> KVM_REG_ARM_CRM_SHIFT);
 393                params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 394                               >> KVM_REG_ARM_OPC1_SHIFT);
 395                params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
 396                               >> KVM_REG_ARM_32_OPC2_SHIFT);
 397                return true;
 398        case KVM_REG_SIZE_U64:
 399                /* Any unused index bits means it's not valid. */
 400                if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 401                              | KVM_REG_ARM_COPROC_MASK
 402                              | KVM_REG_ARM_CRM_MASK
 403                              | KVM_REG_ARM_OPC1_MASK))
 404                        return false;
 405                params->is_64bit = true;
 406                /* CRm to CRn: see cp15_to_index for details */
 407                params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
 408                               >> KVM_REG_ARM_CRM_SHIFT);
 409                params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 410                               >> KVM_REG_ARM_OPC1_SHIFT);
 411                params->Op2 = 0;
 412                params->CRm = 0;
 413                return true;
 414        default:
 415                return false;
 416        }
 417}
 418
 419/* Decode an index value, and find the cp15 coproc_reg entry. */
 420static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
 421                                                    u64 id)
 422{
 423        size_t num;
 424        const struct coproc_reg *table, *r;
 425        struct coproc_params params;
 426
 427        /* We only do cp15 for now. */
 428        if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
 429                return NULL;
 430
 431        if (!index_to_params(id, &params))
 432                return NULL;
 433
 434        table = get_target_table(vcpu->arch.target, &num);
 435        r = find_reg(&params, table, num);
 436        if (!r)
 437                r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs));
 438
 439        /* Not saved in the cp15 array? */
 440        if (r && !r->reg)
 441                r = NULL;
 442
 443        return r;
 444}
 445
 446/*
 447 * These are the invariant cp15 registers: we let the guest see the host
 448 * versions of these, so they're part of the guest state.
 449 *
 450 * A future CPU may provide a mechanism to present different values to
 451 * the guest, or a future kvm may trap them.
 452 */
 453/* Unfortunately, there's no register-argument for mrc, so generate. */
 454#define FUNCTION_FOR32(crn, crm, op1, op2, name)                        \
 455        static void get_##name(struct kvm_vcpu *v,                      \
 456                               const struct coproc_reg *r)              \
 457        {                                                               \
 458                u32 val;                                                \
 459                                                                        \
 460                asm volatile("mrc p15, " __stringify(op1)               \
 461                             ", %0, c" __stringify(crn)                 \
 462                             ", c" __stringify(crm)                     \
 463                             ", " __stringify(op2) "\n" : "=r" (val));  \
 464                ((struct coproc_reg *)r)->val = val;                    \
 465        }
 466
 467FUNCTION_FOR32(0, 0, 0, 0, MIDR)
 468FUNCTION_FOR32(0, 0, 0, 1, CTR)
 469FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
 470FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
 471FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
 472FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
 473FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
 474FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
 475FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
 476FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
 477FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
 478FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
 479FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
 480FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
 481FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
 482FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
 483FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
 484FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
 485FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
 486FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
 487FUNCTION_FOR32(0, 0, 1, 7, AIDR)
 488
 489/* ->val is filled in by kvm_invariant_coproc_table_init() */
 490static struct coproc_reg invariant_cp15[] = {
 491        { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
 492        { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
 493        { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
 494        { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
 495        { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
 496
 497        { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
 498        { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
 499        { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
 500        { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
 501        { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
 502        { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
 503        { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
 504        { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
 505
 506        { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
 507        { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
 508        { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
 509        { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
 510        { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
 511        { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
 512
 513        { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
 514        { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
 515};
 516
 517static int reg_from_user(void *val, const void __user *uaddr, u64 id)
 518{
 519        /* This Just Works because we are little endian. */
 520        if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
 521                return -EFAULT;
 522        return 0;
 523}
 524
 525static int reg_to_user(void __user *uaddr, const void *val, u64 id)
 526{
 527        /* This Just Works because we are little endian. */
 528        if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
 529                return -EFAULT;
 530        return 0;
 531}
 532
 533static int get_invariant_cp15(u64 id, void __user *uaddr)
 534{
 535        struct coproc_params params;
 536        const struct coproc_reg *r;
 537
 538        if (!index_to_params(id, &params))
 539                return -ENOENT;
 540
 541        r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 542        if (!r)
 543                return -ENOENT;
 544
 545        return reg_to_user(uaddr, &r->val, id);
 546}
 547
 548static int set_invariant_cp15(u64 id, void __user *uaddr)
 549{
 550        struct coproc_params params;
 551        const struct coproc_reg *r;
 552        int err;
 553        u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
 554
 555        if (!index_to_params(id, &params))
 556                return -ENOENT;
 557        r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 558        if (!r)
 559                return -ENOENT;
 560
 561        err = reg_from_user(&val, uaddr, id);
 562        if (err)
 563                return err;
 564
 565        /* This is what we mean by invariant: you can't change it. */
 566        if (r->val != val)
 567                return -EINVAL;
 568
 569        return 0;
 570}
 571
 572static bool is_valid_cache(u32 val)
 573{
 574        u32 level, ctype;
 575
 576        if (val >= CSSELR_MAX)
 577                return -ENOENT;
 578
 579        /* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
 580        level = (val >> 1);
 581        ctype = (cache_levels >> (level * 3)) & 7;
 582
 583        switch (ctype) {
 584        case 0: /* No cache */
 585                return false;
 586        case 1: /* Instruction cache only */
 587                return (val & 1);
 588        case 2: /* Data cache only */
 589        case 4: /* Unified cache */
 590                return !(val & 1);
 591        case 3: /* Separate instruction and data caches */
 592                return true;
 593        default: /* Reserved: we can't know instruction or data. */
 594                return false;
 595        }
 596}
 597
 598/* Which cache CCSIDR represents depends on CSSELR value. */
 599static u32 get_ccsidr(u32 csselr)
 600{
 601        u32 ccsidr;
 602
 603        /* Make sure noone else changes CSSELR during this! */
 604        local_irq_disable();
 605        /* Put value into CSSELR */
 606        asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr));
 607        isb();
 608        /* Read result out of CCSIDR */
 609        asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr));
 610        local_irq_enable();
 611
 612        return ccsidr;
 613}
 614
 615static int demux_c15_get(u64 id, void __user *uaddr)
 616{
 617        u32 val;
 618        u32 __user *uval = uaddr;
 619
 620        /* Fail if we have unknown bits set. */
 621        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 622                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 623                return -ENOENT;
 624
 625        switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 626        case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 627                if (KVM_REG_SIZE(id) != 4)
 628                        return -ENOENT;
 629                val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
 630                        >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
 631                if (!is_valid_cache(val))
 632                        return -ENOENT;
 633
 634                return put_user(get_ccsidr(val), uval);
 635        default:
 636                return -ENOENT;
 637        }
 638}
 639
 640static int demux_c15_set(u64 id, void __user *uaddr)
 641{
 642        u32 val, newval;
 643        u32 __user *uval = uaddr;
 644
 645        /* Fail if we have unknown bits set. */
 646        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 647                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 648                return -ENOENT;
 649
 650        switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 651        case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 652                if (KVM_REG_SIZE(id) != 4)
 653                        return -ENOENT;
 654                val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
 655                        >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
 656                if (!is_valid_cache(val))
 657                        return -ENOENT;
 658
 659                if (get_user(newval, uval))
 660                        return -EFAULT;
 661
 662                /* This is also invariant: you can't change it. */
 663                if (newval != get_ccsidr(val))
 664                        return -EINVAL;
 665                return 0;
 666        default:
 667                return -ENOENT;
 668        }
 669}
 670
 671#ifdef CONFIG_VFPv3
 672static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC,
 673                                   KVM_REG_ARM_VFP_FPSCR,
 674                                   KVM_REG_ARM_VFP_FPINST,
 675                                   KVM_REG_ARM_VFP_FPINST2,
 676                                   KVM_REG_ARM_VFP_MVFR0,
 677                                   KVM_REG_ARM_VFP_MVFR1,
 678                                   KVM_REG_ARM_VFP_FPSID };
 679
 680static unsigned int num_fp_regs(void)
 681{
 682        if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2)
 683                return 32;
 684        else
 685                return 16;
 686}
 687
 688static unsigned int num_vfp_regs(void)
 689{
 690        /* Normal FP regs + control regs. */
 691        return num_fp_regs() + ARRAY_SIZE(vfp_sysregs);
 692}
 693
 694static int copy_vfp_regids(u64 __user *uindices)
 695{
 696        unsigned int i;
 697        const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP;
 698        const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
 699
 700        for (i = 0; i < num_fp_regs(); i++) {
 701                if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i,
 702                             uindices))
 703                        return -EFAULT;
 704                uindices++;
 705        }
 706
 707        for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) {
 708                if (put_user(u32reg | vfp_sysregs[i], uindices))
 709                        return -EFAULT;
 710                uindices++;
 711        }
 712
 713        return num_vfp_regs();
 714}
 715
 716static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
 717{
 718        u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
 719        u32 val;
 720
 721        /* Fail if we have unknown bits set. */
 722        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 723                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 724                return -ENOENT;
 725
 726        if (vfpid < num_fp_regs()) {
 727                if (KVM_REG_SIZE(id) != 8)
 728                        return -ENOENT;
 729                return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid],
 730                                   id);
 731        }
 732
 733        /* FP control registers are all 32 bit. */
 734        if (KVM_REG_SIZE(id) != 4)
 735                return -ENOENT;
 736
 737        switch (vfpid) {
 738        case KVM_REG_ARM_VFP_FPEXC:
 739                return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id);
 740        case KVM_REG_ARM_VFP_FPSCR:
 741                return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id);
 742        case KVM_REG_ARM_VFP_FPINST:
 743                return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id);
 744        case KVM_REG_ARM_VFP_FPINST2:
 745                return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id);
 746        case KVM_REG_ARM_VFP_MVFR0:
 747                val = fmrx(MVFR0);
 748                return reg_to_user(uaddr, &val, id);
 749        case KVM_REG_ARM_VFP_MVFR1:
 750                val = fmrx(MVFR1);
 751                return reg_to_user(uaddr, &val, id);
 752        case KVM_REG_ARM_VFP_FPSID:
 753                val = fmrx(FPSID);
 754                return reg_to_user(uaddr, &val, id);
 755        default:
 756                return -ENOENT;
 757        }
 758}
 759
 760static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
 761{
 762        u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
 763        u32 val;
 764
 765        /* Fail if we have unknown bits set. */
 766        if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 767                   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 768                return -ENOENT;
 769
 770        if (vfpid < num_fp_regs()) {
 771                if (KVM_REG_SIZE(id) != 8)
 772                        return -ENOENT;
 773                return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid],
 774                                     uaddr, id);
 775        }
 776
 777        /* FP control registers are all 32 bit. */
 778        if (KVM_REG_SIZE(id) != 4)
 779                return -ENOENT;
 780
 781        switch (vfpid) {
 782        case KVM_REG_ARM_VFP_FPEXC:
 783                return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id);
 784        case KVM_REG_ARM_VFP_FPSCR:
 785                return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id);
 786        case KVM_REG_ARM_VFP_FPINST:
 787                return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id);
 788        case KVM_REG_ARM_VFP_FPINST2:
 789                return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id);
 790        /* These are invariant. */
 791        case KVM_REG_ARM_VFP_MVFR0:
 792                if (reg_from_user(&val, uaddr, id))
 793                        return -EFAULT;
 794                if (val != fmrx(MVFR0))
 795                        return -EINVAL;
 796                return 0;
 797        case KVM_REG_ARM_VFP_MVFR1:
 798                if (reg_from_user(&val, uaddr, id))
 799                        return -EFAULT;
 800                if (val != fmrx(MVFR1))
 801                        return -EINVAL;
 802                return 0;
 803        case KVM_REG_ARM_VFP_FPSID:
 804                if (reg_from_user(&val, uaddr, id))
 805                        return -EFAULT;
 806                if (val != fmrx(FPSID))
 807                        return -EINVAL;
 808                return 0;
 809        default:
 810                return -ENOENT;
 811        }
 812}
 813#else /* !CONFIG_VFPv3 */
 814static unsigned int num_vfp_regs(void)
 815{
 816        return 0;
 817}
 818
 819static int copy_vfp_regids(u64 __user *uindices)
 820{
 821        return 0;
 822}
 823
 824static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
 825{
 826        return -ENOENT;
 827}
 828
 829static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
 830{
 831        return -ENOENT;
 832}
 833#endif /* !CONFIG_VFPv3 */
 834
 835int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 836{
 837        const struct coproc_reg *r;
 838        void __user *uaddr = (void __user *)(long)reg->addr;
 839
 840        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
 841                return demux_c15_get(reg->id, uaddr);
 842
 843        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
 844                return vfp_get_reg(vcpu, reg->id, uaddr);
 845
 846        r = index_to_coproc_reg(vcpu, reg->id);
 847        if (!r)
 848                return get_invariant_cp15(reg->id, uaddr);
 849
 850        /* Note: copies two regs if size is 64 bit. */
 851        return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id);
 852}
 853
 854int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 855{
 856        const struct coproc_reg *r;
 857        void __user *uaddr = (void __user *)(long)reg->addr;
 858
 859        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
 860                return demux_c15_set(reg->id, uaddr);
 861
 862        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
 863                return vfp_set_reg(vcpu, reg->id, uaddr);
 864
 865        r = index_to_coproc_reg(vcpu, reg->id);
 866        if (!r)
 867                return set_invariant_cp15(reg->id, uaddr);
 868
 869        /* Note: copies two regs if size is 64 bit */
 870        return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id);
 871}
 872
 873static unsigned int num_demux_regs(void)
 874{
 875        unsigned int i, count = 0;
 876
 877        for (i = 0; i < CSSELR_MAX; i++)
 878                if (is_valid_cache(i))
 879                        count++;
 880
 881        return count;
 882}
 883
 884static int write_demux_regids(u64 __user *uindices)
 885{
 886        u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
 887        unsigned int i;
 888
 889        val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
 890        for (i = 0; i < CSSELR_MAX; i++) {
 891                if (!is_valid_cache(i))
 892                        continue;
 893                if (put_user(val | i, uindices))
 894                        return -EFAULT;
 895                uindices++;
 896        }
 897        return 0;
 898}
 899
 900static u64 cp15_to_index(const struct coproc_reg *reg)
 901{
 902        u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
 903        if (reg->is_64) {
 904                val |= KVM_REG_SIZE_U64;
 905                val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
 906                /*
 907                 * CRn always denotes the primary coproc. reg. nr. for the
 908                 * in-kernel representation, but the user space API uses the
 909                 * CRm for the encoding, because it is modelled after the
 910                 * MRRC/MCRR instructions: see the ARM ARM rev. c page
 911                 * B3-1445
 912                 */
 913                val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
 914        } else {
 915                val |= KVM_REG_SIZE_U32;
 916                val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
 917                val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
 918                val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
 919                val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
 920        }
 921        return val;
 922}
 923
 924static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
 925{
 926        if (!*uind)
 927                return true;
 928
 929        if (put_user(cp15_to_index(reg), *uind))
 930                return false;
 931
 932        (*uind)++;
 933        return true;
 934}
 935
 936/* Assumed ordered tables, see kvm_coproc_table_init. */
 937static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
 938{
 939        const struct coproc_reg *i1, *i2, *end1, *end2;
 940        unsigned int total = 0;
 941        size_t num;
 942
 943        /* We check for duplicates here, to allow arch-specific overrides. */
 944        i1 = get_target_table(vcpu->arch.target, &num);
 945        end1 = i1 + num;
 946        i2 = cp15_regs;
 947        end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
 948
 949        BUG_ON(i1 == end1 || i2 == end2);
 950
 951        /* Walk carefully, as both tables may refer to the same register. */
 952        while (i1 || i2) {
 953                int cmp = cmp_reg(i1, i2);
 954                /* target-specific overrides generic entry. */
 955                if (cmp <= 0) {
 956                        /* Ignore registers we trap but don't save. */
 957                        if (i1->reg) {
 958                                if (!copy_reg_to_user(i1, &uind))
 959                                        return -EFAULT;
 960                                total++;
 961                        }
 962                } else {
 963                        /* Ignore registers we trap but don't save. */
 964                        if (i2->reg) {
 965                                if (!copy_reg_to_user(i2, &uind))
 966                                        return -EFAULT;
 967                                total++;
 968                        }
 969                }
 970
 971                if (cmp <= 0 && ++i1 == end1)
 972                        i1 = NULL;
 973                if (cmp >= 0 && ++i2 == end2)
 974                        i2 = NULL;
 975        }
 976        return total;
 977}
 978
 979unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
 980{
 981        return ARRAY_SIZE(invariant_cp15)
 982                + num_demux_regs()
 983                + num_vfp_regs()
 984                + walk_cp15(vcpu, (u64 __user *)NULL);
 985}
 986
 987int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
 988{
 989        unsigned int i;
 990        int err;
 991
 992        /* Then give them all the invariant registers' indices. */
 993        for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
 994                if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
 995                        return -EFAULT;
 996                uindices++;
 997        }
 998
 999        err = walk_cp15(vcpu, uindices);
1000        if (err < 0)
1001                return err;
1002        uindices += err;
1003
1004        err = copy_vfp_regids(uindices);
1005        if (err < 0)
1006                return err;
1007        uindices += err;
1008
1009        return write_demux_regids(uindices);
1010}
1011
1012void kvm_coproc_table_init(void)
1013{
1014        unsigned int i;
1015
1016        /* Make sure tables are unique and in order. */
1017        for (i = 1; i < ARRAY_SIZE(cp15_regs); i++)
1018                BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0);
1019
1020        /* We abuse the reset function to overwrite the table itself. */
1021        for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
1022                invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
1023
1024        /*
1025         * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
1026         *
1027         *   If software reads the Cache Type fields from Ctype1
1028         *   upwards, once it has seen a value of 0b000, no caches
1029         *   exist at further-out levels of the hierarchy. So, for
1030         *   example, if Ctype3 is the first Cache Type field with a
1031         *   value of 0b000, the values of Ctype4 to Ctype7 must be
1032         *   ignored.
1033         */
1034        asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels));
1035        for (i = 0; i < 7; i++)
1036                if (((cache_levels >> (i*3)) & 7) == 0)
1037                        break;
1038        /* Clear all higher bits. */
1039        cache_levels &= (1 << (i*3))-1;
1040}
1041
1042/**
1043 * kvm_reset_coprocs - sets cp15 registers to reset value
1044 * @vcpu: The VCPU pointer
1045 *
1046 * This function finds the right table above and sets the registers on the
1047 * virtual CPU struct to their architecturally defined reset values.
1048 */
1049void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1050{
1051        size_t num;
1052        const struct coproc_reg *table;
1053
1054        /* Catch someone adding a register without putting in reset entry. */
1055        memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15));
1056
1057        /* Generic chip reset first (so target could override). */
1058        reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
1059
1060        table = get_target_table(vcpu->arch.target, &num);
1061        reset_coproc_regs(vcpu, table, num);
1062
1063        for (num = 1; num < NR_CP15_REGS; num++)
1064                if (vcpu->arch.cp15[num] == 0x42424242)
1065                        panic("Didn't reset vcpu->arch.cp15[%zi]", num);
1066}
1067