qemu/target/arm/cpu64.c
<<
>>
Prefs
   1/*
   2 * QEMU AArch64 CPU
   3 *
   4 * Copyright (c) 2013 Linaro Ltd
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version 2
   9 * of the License, or (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, see
  18 * <http://www.gnu.org/licenses/gpl-2.0.html>
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "qapi/error.h"
  23#include "cpu.h"
  24#include "qemu/module.h"
  25#if !defined(CONFIG_USER_ONLY)
  26#include "hw/loader.h"
  27#endif
  28#include "sysemu/kvm.h"
  29#include "kvm_arm.h"
  30#include "qapi/visitor.h"
  31
  32static inline void set_feature(CPUARMState *env, int feature)
  33{
  34    env->features |= 1ULL << feature;
  35}
  36
  37static inline void unset_feature(CPUARMState *env, int feature)
  38{
  39    env->features &= ~(1ULL << feature);
  40}
  41
  42#ifndef CONFIG_USER_ONLY
  43static uint64_t a57_a53_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  44{
  45    ARMCPU *cpu = env_archcpu(env);
  46
  47    /* Number of cores is in [25:24]; otherwise we RAZ */
  48    return (cpu->core_count - 1) << 24;
  49}
  50#endif
  51
  52static const ARMCPRegInfo cortex_a72_a57_a53_cp_reginfo[] = {
  53#ifndef CONFIG_USER_ONLY
  54    { .name = "L2CTLR_EL1", .state = ARM_CP_STATE_AA64,
  55      .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 2,
  56      .access = PL1_RW, .readfn = a57_a53_l2ctlr_read,
  57      .writefn = arm_cp_write_ignore },
  58    { .name = "L2CTLR",
  59      .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 2,
  60      .access = PL1_RW, .readfn = a57_a53_l2ctlr_read,
  61      .writefn = arm_cp_write_ignore },
  62#endif
  63    { .name = "L2ECTLR_EL1", .state = ARM_CP_STATE_AA64,
  64      .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 3,
  65      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
  66    { .name = "L2ECTLR",
  67      .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 3,
  68      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
  69    { .name = "L2ACTLR", .state = ARM_CP_STATE_BOTH,
  70      .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 0, .opc2 = 0,
  71      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
  72    { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
  73      .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 0,
  74      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
  75    { .name = "CPUACTLR",
  76      .cp = 15, .opc1 = 0, .crm = 15,
  77      .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
  78    { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
  79      .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 1,
  80      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
  81    { .name = "CPUECTLR",
  82      .cp = 15, .opc1 = 1, .crm = 15,
  83      .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
  84    { .name = "CPUMERRSR_EL1", .state = ARM_CP_STATE_AA64,
  85      .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 2,
  86      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
  87    { .name = "CPUMERRSR",
  88      .cp = 15, .opc1 = 2, .crm = 15,
  89      .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
  90    { .name = "L2MERRSR_EL1", .state = ARM_CP_STATE_AA64,
  91      .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 3,
  92      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
  93    { .name = "L2MERRSR",
  94      .cp = 15, .opc1 = 3, .crm = 15,
  95      .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
  96    REGINFO_SENTINEL
  97};
  98
  99static void aarch64_a57_initfn(Object *obj)
 100{
 101    ARMCPU *cpu = ARM_CPU(obj);
 102
 103    cpu->dtb_compatible = "arm,cortex-a57";
 104    set_feature(&cpu->env, ARM_FEATURE_V8);
 105    set_feature(&cpu->env, ARM_FEATURE_VFP4);
 106    set_feature(&cpu->env, ARM_FEATURE_NEON);
 107    set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
 108    set_feature(&cpu->env, ARM_FEATURE_AARCH64);
 109    set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
 110    set_feature(&cpu->env, ARM_FEATURE_EL2);
 111    set_feature(&cpu->env, ARM_FEATURE_EL3);
 112    set_feature(&cpu->env, ARM_FEATURE_PMU);
 113    cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57;
 114    cpu->midr = 0x411fd070;
 115    cpu->revidr = 0x00000000;
 116    cpu->reset_fpsid = 0x41034070;
 117    cpu->isar.mvfr0 = 0x10110222;
 118    cpu->isar.mvfr1 = 0x12111111;
 119    cpu->isar.mvfr2 = 0x00000043;
 120    cpu->ctr = 0x8444c004;
 121    cpu->reset_sctlr = 0x00c50838;
 122    cpu->id_pfr0 = 0x00000131;
 123    cpu->id_pfr1 = 0x00011011;
 124    cpu->id_dfr0 = 0x03010066;
 125    cpu->id_afr0 = 0x00000000;
 126    cpu->id_mmfr0 = 0x10101105;
 127    cpu->id_mmfr1 = 0x40000000;
 128    cpu->id_mmfr2 = 0x01260000;
 129    cpu->id_mmfr3 = 0x02102211;
 130    cpu->isar.id_isar0 = 0x02101110;
 131    cpu->isar.id_isar1 = 0x13112111;
 132    cpu->isar.id_isar2 = 0x21232042;
 133    cpu->isar.id_isar3 = 0x01112131;
 134    cpu->isar.id_isar4 = 0x00011142;
 135    cpu->isar.id_isar5 = 0x00011121;
 136    cpu->isar.id_isar6 = 0;
 137    cpu->isar.id_aa64pfr0 = 0x00002222;
 138    cpu->id_aa64dfr0 = 0x10305106;
 139    cpu->isar.id_aa64isar0 = 0x00011120;
 140    cpu->isar.id_aa64mmfr0 = 0x00001124;
 141    cpu->dbgdidr = 0x3516d000;
 142    cpu->clidr = 0x0a200023;
 143    cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
 144    cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
 145    cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
 146    cpu->dcz_blocksize = 4; /* 64 bytes */
 147    cpu->gic_num_lrs = 4;
 148    cpu->gic_vpribits = 5;
 149    cpu->gic_vprebits = 5;
 150    define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
 151}
 152
 153static void aarch64_a53_initfn(Object *obj)
 154{
 155    ARMCPU *cpu = ARM_CPU(obj);
 156
 157    cpu->dtb_compatible = "arm,cortex-a53";
 158    set_feature(&cpu->env, ARM_FEATURE_V8);
 159    set_feature(&cpu->env, ARM_FEATURE_VFP4);
 160    set_feature(&cpu->env, ARM_FEATURE_NEON);
 161    set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
 162    set_feature(&cpu->env, ARM_FEATURE_AARCH64);
 163    set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
 164    set_feature(&cpu->env, ARM_FEATURE_EL2);
 165    set_feature(&cpu->env, ARM_FEATURE_EL3);
 166    set_feature(&cpu->env, ARM_FEATURE_PMU);
 167    cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53;
 168    cpu->midr = 0x410fd034;
 169    cpu->revidr = 0x00000000;
 170    cpu->reset_fpsid = 0x41034070;
 171    cpu->isar.mvfr0 = 0x10110222;
 172    cpu->isar.mvfr1 = 0x12111111;
 173    cpu->isar.mvfr2 = 0x00000043;
 174    cpu->ctr = 0x84448004; /* L1Ip = VIPT */
 175    cpu->reset_sctlr = 0x00c50838;
 176    cpu->id_pfr0 = 0x00000131;
 177    cpu->id_pfr1 = 0x00011011;
 178    cpu->id_dfr0 = 0x03010066;
 179    cpu->id_afr0 = 0x00000000;
 180    cpu->id_mmfr0 = 0x10101105;
 181    cpu->id_mmfr1 = 0x40000000;
 182    cpu->id_mmfr2 = 0x01260000;
 183    cpu->id_mmfr3 = 0x02102211;
 184    cpu->isar.id_isar0 = 0x02101110;
 185    cpu->isar.id_isar1 = 0x13112111;
 186    cpu->isar.id_isar2 = 0x21232042;
 187    cpu->isar.id_isar3 = 0x01112131;
 188    cpu->isar.id_isar4 = 0x00011142;
 189    cpu->isar.id_isar5 = 0x00011121;
 190    cpu->isar.id_isar6 = 0;
 191    cpu->isar.id_aa64pfr0 = 0x00002222;
 192    cpu->id_aa64dfr0 = 0x10305106;
 193    cpu->isar.id_aa64isar0 = 0x00011120;
 194    cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
 195    cpu->dbgdidr = 0x3516d000;
 196    cpu->clidr = 0x0a200023;
 197    cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
 198    cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
 199    cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */
 200    cpu->dcz_blocksize = 4; /* 64 bytes */
 201    cpu->gic_num_lrs = 4;
 202    cpu->gic_vpribits = 5;
 203    cpu->gic_vprebits = 5;
 204    define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
 205}
 206
 207static void aarch64_a72_initfn(Object *obj)
 208{
 209    ARMCPU *cpu = ARM_CPU(obj);
 210
 211    cpu->dtb_compatible = "arm,cortex-a72";
 212    set_feature(&cpu->env, ARM_FEATURE_V8);
 213    set_feature(&cpu->env, ARM_FEATURE_VFP4);
 214    set_feature(&cpu->env, ARM_FEATURE_NEON);
 215    set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
 216    set_feature(&cpu->env, ARM_FEATURE_AARCH64);
 217    set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
 218    set_feature(&cpu->env, ARM_FEATURE_EL2);
 219    set_feature(&cpu->env, ARM_FEATURE_EL3);
 220    set_feature(&cpu->env, ARM_FEATURE_PMU);
 221    cpu->midr = 0x410fd083;
 222    cpu->revidr = 0x00000000;
 223    cpu->reset_fpsid = 0x41034080;
 224    cpu->isar.mvfr0 = 0x10110222;
 225    cpu->isar.mvfr1 = 0x12111111;
 226    cpu->isar.mvfr2 = 0x00000043;
 227    cpu->ctr = 0x8444c004;
 228    cpu->reset_sctlr = 0x00c50838;
 229    cpu->id_pfr0 = 0x00000131;
 230    cpu->id_pfr1 = 0x00011011;
 231    cpu->id_dfr0 = 0x03010066;
 232    cpu->id_afr0 = 0x00000000;
 233    cpu->id_mmfr0 = 0x10201105;
 234    cpu->id_mmfr1 = 0x40000000;
 235    cpu->id_mmfr2 = 0x01260000;
 236    cpu->id_mmfr3 = 0x02102211;
 237    cpu->isar.id_isar0 = 0x02101110;
 238    cpu->isar.id_isar1 = 0x13112111;
 239    cpu->isar.id_isar2 = 0x21232042;
 240    cpu->isar.id_isar3 = 0x01112131;
 241    cpu->isar.id_isar4 = 0x00011142;
 242    cpu->isar.id_isar5 = 0x00011121;
 243    cpu->isar.id_aa64pfr0 = 0x00002222;
 244    cpu->id_aa64dfr0 = 0x10305106;
 245    cpu->isar.id_aa64isar0 = 0x00011120;
 246    cpu->isar.id_aa64mmfr0 = 0x00001124;
 247    cpu->dbgdidr = 0x3516d000;
 248    cpu->clidr = 0x0a200023;
 249    cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
 250    cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
 251    cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */
 252    cpu->dcz_blocksize = 4; /* 64 bytes */
 253    cpu->gic_num_lrs = 4;
 254    cpu->gic_vpribits = 5;
 255    cpu->gic_vprebits = 5;
 256    define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
 257}
 258
 259void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
 260{
 261    /*
 262     * If any vector lengths are explicitly enabled with sve<N> properties,
 263     * then all other lengths are implicitly disabled.  If sve-max-vq is
 264     * specified then it is the same as explicitly enabling all lengths
 265     * up to and including the specified maximum, which means all larger
 266     * lengths will be implicitly disabled.  If no sve<N> properties
 267     * are enabled and sve-max-vq is not specified, then all lengths not
 268     * explicitly disabled will be enabled.  Additionally, all power-of-two
 269     * vector lengths less than the maximum enabled length will be
 270     * automatically enabled and all vector lengths larger than the largest
 271     * disabled power-of-two vector length will be automatically disabled.
 272     * Errors are generated if the user provided input that interferes with
 273     * any of the above.  Finally, if SVE is not disabled, then at least one
 274     * vector length must be enabled.
 275     */
 276    DECLARE_BITMAP(kvm_supported, ARM_MAX_VQ);
 277    DECLARE_BITMAP(tmp, ARM_MAX_VQ);
 278    uint32_t vq, max_vq = 0;
 279
 280    /* Collect the set of vector lengths supported by KVM. */
 281    bitmap_zero(kvm_supported, ARM_MAX_VQ);
 282    if (kvm_enabled() && kvm_arm_sve_supported(CPU(cpu))) {
 283        kvm_arm_sve_get_vls(CPU(cpu), kvm_supported);
 284    } else if (kvm_enabled()) {
 285        assert(!cpu_isar_feature(aa64_sve, cpu));
 286    }
 287
 288    /*
 289     * Process explicit sve<N> properties.
 290     * From the properties, sve_vq_map<N> implies sve_vq_init<N>.
 291     * Check first for any sve<N> enabled.
 292     */
 293    if (!bitmap_empty(cpu->sve_vq_map, ARM_MAX_VQ)) {
 294        max_vq = find_last_bit(cpu->sve_vq_map, ARM_MAX_VQ) + 1;
 295
 296        if (cpu->sve_max_vq && max_vq > cpu->sve_max_vq) {
 297            error_setg(errp, "cannot enable sve%d", max_vq * 128);
 298            error_append_hint(errp, "sve%d is larger than the maximum vector "
 299                              "length, sve-max-vq=%d (%d bits)\n",
 300                              max_vq * 128, cpu->sve_max_vq,
 301                              cpu->sve_max_vq * 128);
 302            return;
 303        }
 304
 305        if (kvm_enabled()) {
 306            /*
 307             * For KVM we have to automatically enable all supported unitialized
 308             * lengths, even when the smaller lengths are not all powers-of-two.
 309             */
 310            bitmap_andnot(tmp, kvm_supported, cpu->sve_vq_init, max_vq);
 311            bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
 312        } else {
 313            /* Propagate enabled bits down through required powers-of-two. */
 314            for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
 315                if (!test_bit(vq - 1, cpu->sve_vq_init)) {
 316                    set_bit(vq - 1, cpu->sve_vq_map);
 317                }
 318            }
 319        }
 320    } else if (cpu->sve_max_vq == 0) {
 321        /*
 322         * No explicit bits enabled, and no implicit bits from sve-max-vq.
 323         */
 324        if (!cpu_isar_feature(aa64_sve, cpu)) {
 325            /* SVE is disabled and so are all vector lengths.  Good. */
 326            return;
 327        }
 328
 329        if (kvm_enabled()) {
 330            /* Disabling a supported length disables all larger lengths. */
 331            for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
 332                if (test_bit(vq - 1, cpu->sve_vq_init) &&
 333                    test_bit(vq - 1, kvm_supported)) {
 334                    break;
 335                }
 336            }
 337            max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
 338            bitmap_andnot(cpu->sve_vq_map, kvm_supported,
 339                          cpu->sve_vq_init, max_vq);
 340            if (max_vq == 0 || bitmap_empty(cpu->sve_vq_map, max_vq)) {
 341                error_setg(errp, "cannot disable sve%d", vq * 128);
 342                error_append_hint(errp, "Disabling sve%d results in all "
 343                                  "vector lengths being disabled.\n",
 344                                  vq * 128);
 345                error_append_hint(errp, "With SVE enabled, at least one "
 346                                  "vector length must be enabled.\n");
 347                return;
 348            }
 349        } else {
 350            /* Disabling a power-of-two disables all larger lengths. */
 351            if (test_bit(0, cpu->sve_vq_init)) {
 352                error_setg(errp, "cannot disable sve128");
 353                error_append_hint(errp, "Disabling sve128 results in all "
 354                                  "vector lengths being disabled.\n");
 355                error_append_hint(errp, "With SVE enabled, at least one "
 356                                  "vector length must be enabled.\n");
 357                return;
 358            }
 359            for (vq = 2; vq <= ARM_MAX_VQ; vq <<= 1) {
 360                if (test_bit(vq - 1, cpu->sve_vq_init)) {
 361                    break;
 362                }
 363            }
 364            max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
 365            bitmap_complement(cpu->sve_vq_map, cpu->sve_vq_init, max_vq);
 366        }
 367
 368        max_vq = find_last_bit(cpu->sve_vq_map, max_vq) + 1;
 369    }
 370
 371    /*
 372     * Process the sve-max-vq property.
 373     * Note that we know from the above that no bit above
 374     * sve-max-vq is currently set.
 375     */
 376    if (cpu->sve_max_vq != 0) {
 377        max_vq = cpu->sve_max_vq;
 378
 379        if (!test_bit(max_vq - 1, cpu->sve_vq_map) &&
 380            test_bit(max_vq - 1, cpu->sve_vq_init)) {
 381            error_setg(errp, "cannot disable sve%d", max_vq * 128);
 382            error_append_hint(errp, "The maximum vector length must be "
 383                              "enabled, sve-max-vq=%d (%d bits)\n",
 384                              max_vq, max_vq * 128);
 385            return;
 386        }
 387
 388        /* Set all bits not explicitly set within sve-max-vq. */
 389        bitmap_complement(tmp, cpu->sve_vq_init, max_vq);
 390        bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
 391    }
 392
 393    /*
 394     * We should know what max-vq is now.  Also, as we're done
 395     * manipulating sve-vq-map, we ensure any bits above max-vq
 396     * are clear, just in case anybody looks.
 397     */
 398    assert(max_vq != 0);
 399    bitmap_clear(cpu->sve_vq_map, max_vq, ARM_MAX_VQ - max_vq);
 400
 401    if (kvm_enabled()) {
 402        /* Ensure the set of lengths matches what KVM supports. */
 403        bitmap_xor(tmp, cpu->sve_vq_map, kvm_supported, max_vq);
 404        if (!bitmap_empty(tmp, max_vq)) {
 405            vq = find_last_bit(tmp, max_vq) + 1;
 406            if (test_bit(vq - 1, cpu->sve_vq_map)) {
 407                if (cpu->sve_max_vq) {
 408                    error_setg(errp, "cannot set sve-max-vq=%d",
 409                               cpu->sve_max_vq);
 410                    error_append_hint(errp, "This KVM host does not support "
 411                                      "the vector length %d-bits.\n",
 412                                      vq * 128);
 413                    error_append_hint(errp, "It may not be possible to use "
 414                                      "sve-max-vq with this KVM host. Try "
 415                                      "using only sve<N> properties.\n");
 416                } else {
 417                    error_setg(errp, "cannot enable sve%d", vq * 128);
 418                    error_append_hint(errp, "This KVM host does not support "
 419                                      "the vector length %d-bits.\n",
 420                                      vq * 128);
 421                }
 422            } else {
 423                error_setg(errp, "cannot disable sve%d", vq * 128);
 424                error_append_hint(errp, "The KVM host requires all "
 425                                  "supported vector lengths smaller "
 426                                  "than %d bits to also be enabled.\n",
 427                                  max_vq * 128);
 428            }
 429            return;
 430        }
 431    } else {
 432        /* Ensure all required powers-of-two are enabled. */
 433        for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
 434            if (!test_bit(vq - 1, cpu->sve_vq_map)) {
 435                error_setg(errp, "cannot disable sve%d", vq * 128);
 436                error_append_hint(errp, "sve%d is required as it "
 437                                  "is a power-of-two length smaller than "
 438                                  "the maximum, sve%d\n",
 439                                  vq * 128, max_vq * 128);
 440                return;
 441            }
 442        }
 443    }
 444
 445    /*
 446     * Now that we validated all our vector lengths, the only question
 447     * left to answer is if we even want SVE at all.
 448     */
 449    if (!cpu_isar_feature(aa64_sve, cpu)) {
 450        error_setg(errp, "cannot enable sve%d", max_vq * 128);
 451        error_append_hint(errp, "SVE must be enabled to enable vector "
 452                          "lengths.\n");
 453        error_append_hint(errp, "Add sve=on to the CPU property list.\n");
 454        return;
 455    }
 456
 457    /* From now on sve_max_vq is the actual maximum supported length. */
 458    cpu->sve_max_vq = max_vq;
 459}
 460
 461static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name,
 462                                   void *opaque, Error **errp)
 463{
 464    ARMCPU *cpu = ARM_CPU(obj);
 465    uint32_t value;
 466
 467    /* All vector lengths are disabled when SVE is off. */
 468    if (!cpu_isar_feature(aa64_sve, cpu)) {
 469        value = 0;
 470    } else {
 471        value = cpu->sve_max_vq;
 472    }
 473    visit_type_uint32(v, name, &value, errp);
 474}
 475
 476static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
 477                                   void *opaque, Error **errp)
 478{
 479    ARMCPU *cpu = ARM_CPU(obj);
 480    Error *err = NULL;
 481    uint32_t max_vq;
 482
 483    visit_type_uint32(v, name, &max_vq, &err);
 484    if (err) {
 485        error_propagate(errp, err);
 486        return;
 487    }
 488
 489    if (kvm_enabled() && !kvm_arm_sve_supported(CPU(cpu))) {
 490        error_setg(errp, "cannot set sve-max-vq");
 491        error_append_hint(errp, "SVE not supported by KVM on this host\n");
 492        return;
 493    }
 494
 495    if (max_vq == 0 || max_vq > ARM_MAX_VQ) {
 496        error_setg(errp, "unsupported SVE vector length");
 497        error_append_hint(errp, "Valid sve-max-vq in range [1-%d]\n",
 498                          ARM_MAX_VQ);
 499        return;
 500    }
 501
 502    cpu->sve_max_vq = max_vq;
 503}
 504
 505static void cpu_arm_get_sve_vq(Object *obj, Visitor *v, const char *name,
 506                               void *opaque, Error **errp)
 507{
 508    ARMCPU *cpu = ARM_CPU(obj);
 509    uint32_t vq = atoi(&name[3]) / 128;
 510    bool value;
 511
 512    /* All vector lengths are disabled when SVE is off. */
 513    if (!cpu_isar_feature(aa64_sve, cpu)) {
 514        value = false;
 515    } else {
 516        value = test_bit(vq - 1, cpu->sve_vq_map);
 517    }
 518    visit_type_bool(v, name, &value, errp);
 519}
 520
 521static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name,
 522                               void *opaque, Error **errp)
 523{
 524    ARMCPU *cpu = ARM_CPU(obj);
 525    uint32_t vq = atoi(&name[3]) / 128;
 526    Error *err = NULL;
 527    bool value;
 528
 529    visit_type_bool(v, name, &value, &err);
 530    if (err) {
 531        error_propagate(errp, err);
 532        return;
 533    }
 534
 535    if (value && kvm_enabled() && !kvm_arm_sve_supported(CPU(cpu))) {
 536        error_setg(errp, "cannot enable %s", name);
 537        error_append_hint(errp, "SVE not supported by KVM on this host\n");
 538        return;
 539    }
 540
 541    if (value) {
 542        set_bit(vq - 1, cpu->sve_vq_map);
 543    } else {
 544        clear_bit(vq - 1, cpu->sve_vq_map);
 545    }
 546    set_bit(vq - 1, cpu->sve_vq_init);
 547}
 548
 549static void cpu_arm_get_sve(Object *obj, Visitor *v, const char *name,
 550                            void *opaque, Error **errp)
 551{
 552    ARMCPU *cpu = ARM_CPU(obj);
 553    bool value = cpu_isar_feature(aa64_sve, cpu);
 554
 555    visit_type_bool(v, name, &value, errp);
 556}
 557
 558static void cpu_arm_set_sve(Object *obj, Visitor *v, const char *name,
 559                            void *opaque, Error **errp)
 560{
 561    ARMCPU *cpu = ARM_CPU(obj);
 562    Error *err = NULL;
 563    bool value;
 564    uint64_t t;
 565
 566    visit_type_bool(v, name, &value, &err);
 567    if (err) {
 568        error_propagate(errp, err);
 569        return;
 570    }
 571
 572    if (value && kvm_enabled() && !kvm_arm_sve_supported(CPU(cpu))) {
 573        error_setg(errp, "'sve' feature not supported by KVM on this host");
 574        return;
 575    }
 576
 577    t = cpu->isar.id_aa64pfr0;
 578    t = FIELD_DP64(t, ID_AA64PFR0, SVE, value);
 579    cpu->isar.id_aa64pfr0 = t;
 580}
 581
 582void aarch64_add_sve_properties(Object *obj)
 583{
 584    uint32_t vq;
 585
 586    object_property_add(obj, "sve", "bool", cpu_arm_get_sve,
 587                        cpu_arm_set_sve, NULL, NULL, &error_fatal);
 588
 589    for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
 590        char name[8];
 591        sprintf(name, "sve%d", vq * 128);
 592        object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
 593                            cpu_arm_set_sve_vq, NULL, NULL, &error_fatal);
 594    }
 595}
 596
 597/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
 598 * otherwise, a CPU with as many features enabled as our emulation supports.
 599 * The version of '-cpu max' for qemu-system-arm is defined in cpu.c;
 600 * this only needs to handle 64 bits.
 601 */
 602static void aarch64_max_initfn(Object *obj)
 603{
 604    ARMCPU *cpu = ARM_CPU(obj);
 605
 606    if (kvm_enabled()) {
 607        kvm_arm_set_cpu_features_from_host(cpu);
 608    } else {
 609        uint64_t t;
 610        uint32_t u;
 611        aarch64_a57_initfn(obj);
 612
 613        /*
 614         * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real
 615         * one and try to apply errata workarounds or use impdef features we
 616         * don't provide.
 617         * An IMPLEMENTER field of 0 means "reserved for software use";
 618         * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers
 619         * to see which features are present";
 620         * the VARIANT, PARTNUM and REVISION fields are all implementation
 621         * defined and we choose to define PARTNUM just in case guest
 622         * code needs to distinguish this QEMU CPU from other software
 623         * implementations, though this shouldn't be needed.
 624         */
 625        t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0);
 626        t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf);
 627        t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q');
 628        t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0);
 629        t = FIELD_DP64(t, MIDR_EL1, REVISION, 0);
 630        cpu->midr = t;
 631
 632        t = cpu->isar.id_aa64isar0;
 633        t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */
 634        t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);
 635        t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* SHA512 */
 636        t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1);
 637        t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2);
 638        t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1);
 639        t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1);
 640        t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1);
 641        t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1);
 642        t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);
 643        t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1);
 644        t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* v8.5-CondM */
 645        t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1);
 646        cpu->isar.id_aa64isar0 = t;
 647
 648        t = cpu->isar.id_aa64isar1;
 649        t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1);
 650        t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);
 651        t = FIELD_DP64(t, ID_AA64ISAR1, APA, 1); /* PAuth, architected only */
 652        t = FIELD_DP64(t, ID_AA64ISAR1, API, 0);
 653        t = FIELD_DP64(t, ID_AA64ISAR1, GPA, 1);
 654        t = FIELD_DP64(t, ID_AA64ISAR1, GPI, 0);
 655        t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);
 656        t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);
 657        t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1);
 658        cpu->isar.id_aa64isar1 = t;
 659
 660        t = cpu->isar.id_aa64pfr0;
 661        t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
 662        t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);
 663        t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);
 664        cpu->isar.id_aa64pfr0 = t;
 665
 666        t = cpu->isar.id_aa64pfr1;
 667        t = FIELD_DP64(t, ID_AA64PFR1, BT, 1);
 668        cpu->isar.id_aa64pfr1 = t;
 669
 670        t = cpu->isar.id_aa64mmfr1;
 671        t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */
 672        t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);
 673        cpu->isar.id_aa64mmfr1 = t;
 674
 675        /* Replicate the same data to the 32-bit id registers.  */
 676        u = cpu->isar.id_isar5;
 677        u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */
 678        u = FIELD_DP32(u, ID_ISAR5, SHA1, 1);
 679        u = FIELD_DP32(u, ID_ISAR5, SHA2, 1);
 680        u = FIELD_DP32(u, ID_ISAR5, CRC32, 1);
 681        u = FIELD_DP32(u, ID_ISAR5, RDM, 1);
 682        u = FIELD_DP32(u, ID_ISAR5, VCMA, 1);
 683        cpu->isar.id_isar5 = u;
 684
 685        u = cpu->isar.id_isar6;
 686        u = FIELD_DP32(u, ID_ISAR6, JSCVT, 1);
 687        u = FIELD_DP32(u, ID_ISAR6, DP, 1);
 688        u = FIELD_DP32(u, ID_ISAR6, FHM, 1);
 689        u = FIELD_DP32(u, ID_ISAR6, SB, 1);
 690        u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1);
 691        cpu->isar.id_isar6 = u;
 692
 693        /*
 694         * FIXME: We do not yet support ARMv8.2-fp16 for AArch32 yet,
 695         * so do not set MVFR1.FPHP.  Strictly speaking this is not legal,
 696         * but it is also not legal to enable SVE without support for FP16,
 697         * and enabling SVE in system mode is more useful in the short term.
 698         */
 699
 700#ifdef CONFIG_USER_ONLY
 701        /* For usermode -cpu max we can use a larger and more efficient DCZ
 702         * blocksize since we don't have to follow what the hardware does.
 703         */
 704        cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
 705        cpu->dcz_blocksize = 7; /*  512 bytes */
 706#endif
 707    }
 708
 709    aarch64_add_sve_properties(obj);
 710    object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
 711                        cpu_max_set_sve_max_vq, NULL, NULL, &error_fatal);
 712}
 713
 714struct ARMCPUInfo {
 715    const char *name;
 716    void (*initfn)(Object *obj);
 717    void (*class_init)(ObjectClass *oc, void *data);
 718};
 719
 720static const ARMCPUInfo aarch64_cpus[] = {
 721    { .name = "cortex-a57",         .initfn = aarch64_a57_initfn },
 722    { .name = "cortex-a53",         .initfn = aarch64_a53_initfn },
 723    { .name = "cortex-a72",         .initfn = aarch64_a72_initfn },
 724    { .name = "max",                .initfn = aarch64_max_initfn },
 725    { .name = NULL }
 726};
 727
 728static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
 729{
 730    ARMCPU *cpu = ARM_CPU(obj);
 731
 732    return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
 733}
 734
 735static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
 736{
 737    ARMCPU *cpu = ARM_CPU(obj);
 738
 739    /* At this time, this property is only allowed if KVM is enabled.  This
 740     * restriction allows us to avoid fixing up functionality that assumes a
 741     * uniform execution state like do_interrupt.
 742     */
 743    if (value == false) {
 744        if (!kvm_enabled() || !kvm_arm_aarch32_supported(CPU(cpu))) {
 745            error_setg(errp, "'aarch64' feature cannot be disabled "
 746                             "unless KVM is enabled and 32-bit EL1 "
 747                             "is supported");
 748            return;
 749        }
 750        unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
 751    } else {
 752        set_feature(&cpu->env, ARM_FEATURE_AARCH64);
 753    }
 754}
 755
 756static void aarch64_cpu_initfn(Object *obj)
 757{
 758    object_property_add_bool(obj, "aarch64", aarch64_cpu_get_aarch64,
 759                             aarch64_cpu_set_aarch64, NULL);
 760    object_property_set_description(obj, "aarch64",
 761                                    "Set on/off to enable/disable aarch64 "
 762                                    "execution state ",
 763                                    NULL);
 764}
 765
 766static void aarch64_cpu_finalizefn(Object *obj)
 767{
 768}
 769
 770static gchar *aarch64_gdb_arch_name(CPUState *cs)
 771{
 772    return g_strdup("aarch64");
 773}
 774
 775static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
 776{
 777    CPUClass *cc = CPU_CLASS(oc);
 778
 779    cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
 780    cc->gdb_read_register = aarch64_cpu_gdb_read_register;
 781    cc->gdb_write_register = aarch64_cpu_gdb_write_register;
 782    cc->gdb_num_core_regs = 34;
 783    cc->gdb_core_xml_file = "aarch64-core.xml";
 784    cc->gdb_arch_name = aarch64_gdb_arch_name;
 785}
 786
 787static void aarch64_cpu_instance_init(Object *obj)
 788{
 789    ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
 790
 791    acc->info->initfn(obj);
 792    arm_cpu_post_init(obj);
 793}
 794
 795static void cpu_register_class_init(ObjectClass *oc, void *data)
 796{
 797    ARMCPUClass *acc = ARM_CPU_CLASS(oc);
 798
 799    acc->info = data;
 800}
 801
 802static void aarch64_cpu_register(const ARMCPUInfo *info)
 803{
 804    TypeInfo type_info = {
 805        .parent = TYPE_AARCH64_CPU,
 806        .instance_size = sizeof(ARMCPU),
 807        .instance_init = aarch64_cpu_instance_init,
 808        .class_size = sizeof(ARMCPUClass),
 809        .class_init = info->class_init ?: cpu_register_class_init,
 810        .class_data = (void *)info,
 811    };
 812
 813    type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
 814    type_register(&type_info);
 815    g_free((void *)type_info.name);
 816}
 817
 818static const TypeInfo aarch64_cpu_type_info = {
 819    .name = TYPE_AARCH64_CPU,
 820    .parent = TYPE_ARM_CPU,
 821    .instance_size = sizeof(ARMCPU),
 822    .instance_init = aarch64_cpu_initfn,
 823    .instance_finalize = aarch64_cpu_finalizefn,
 824    .abstract = true,
 825    .class_size = sizeof(AArch64CPUClass),
 826    .class_init = aarch64_cpu_class_init,
 827};
 828
 829static void aarch64_cpu_register_types(void)
 830{
 831    const ARMCPUInfo *info = aarch64_cpus;
 832
 833    type_register_static(&aarch64_cpu_type_info);
 834
 835    while (info->name) {
 836        aarch64_cpu_register(info);
 837        info++;
 838    }
 839}
 840
 841type_init(aarch64_cpu_register_types)
 842