qemu/target/arm/cpu64.c
<<
>>
Prefs
   1/*
   2 * QEMU AArch64 CPU
   3 *
   4 * Copyright (c) 2013 Linaro Ltd
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version 2
   9 * of the License, or (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, see
  18 * <http://www.gnu.org/licenses/gpl-2.0.html>
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "qapi/error.h"
  23#include "cpu.h"
  24#ifdef CONFIG_TCG
  25#include "hw/core/tcg-cpu-ops.h"
  26#endif /* CONFIG_TCG */
  27#include "qemu/module.h"
  28#if !defined(CONFIG_USER_ONLY)
  29#include "hw/loader.h"
  30#endif
  31#include "sysemu/kvm.h"
  32#include "kvm_arm.h"
  33#include "qapi/visitor.h"
  34#include "hw/qdev-properties.h"
  35
  36
  37#ifndef CONFIG_USER_ONLY
  38static uint64_t a57_a53_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  39{
  40    ARMCPU *cpu = env_archcpu(env);
  41
  42    /* Number of cores is in [25:24]; otherwise we RAZ */
  43    return (cpu->core_count - 1) << 24;
  44}
  45#endif
  46
  47static const ARMCPRegInfo cortex_a72_a57_a53_cp_reginfo[] = {
  48#ifndef CONFIG_USER_ONLY
  49    { .name = "L2CTLR_EL1", .state = ARM_CP_STATE_AA64,
  50      .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 2,
  51      .access = PL1_RW, .readfn = a57_a53_l2ctlr_read,
  52      .writefn = arm_cp_write_ignore },
  53    { .name = "L2CTLR",
  54      .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 2,
  55      .access = PL1_RW, .readfn = a57_a53_l2ctlr_read,
  56      .writefn = arm_cp_write_ignore },
  57#endif
  58    { .name = "L2ECTLR_EL1", .state = ARM_CP_STATE_AA64,
  59      .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 3,
  60      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
  61    { .name = "L2ECTLR",
  62      .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 3,
  63      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
  64    { .name = "L2ACTLR", .state = ARM_CP_STATE_BOTH,
  65      .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 0, .opc2 = 0,
  66      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
  67    { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
  68      .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 0,
  69      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
  70    { .name = "CPUACTLR",
  71      .cp = 15, .opc1 = 0, .crm = 15,
  72      .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
  73    { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
  74      .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 1,
  75      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
  76    { .name = "CPUECTLR",
  77      .cp = 15, .opc1 = 1, .crm = 15,
  78      .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
  79    { .name = "CPUMERRSR_EL1", .state = ARM_CP_STATE_AA64,
  80      .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 2,
  81      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
  82    { .name = "CPUMERRSR",
  83      .cp = 15, .opc1 = 2, .crm = 15,
  84      .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
  85    { .name = "L2MERRSR_EL1", .state = ARM_CP_STATE_AA64,
  86      .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 3,
  87      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
  88    { .name = "L2MERRSR",
  89      .cp = 15, .opc1 = 3, .crm = 15,
  90      .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
  91    REGINFO_SENTINEL
  92};
  93
  94static void aarch64_a57_initfn(Object *obj)
  95{
  96    ARMCPU *cpu = ARM_CPU(obj);
  97
  98    cpu->dtb_compatible = "arm,cortex-a57";
  99    set_feature(&cpu->env, ARM_FEATURE_V8);
 100    set_feature(&cpu->env, ARM_FEATURE_NEON);
 101    set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
 102    set_feature(&cpu->env, ARM_FEATURE_AARCH64);
 103    set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
 104    set_feature(&cpu->env, ARM_FEATURE_EL2);
 105    set_feature(&cpu->env, ARM_FEATURE_EL3);
 106    set_feature(&cpu->env, ARM_FEATURE_PMU);
 107    cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57;
 108    cpu->midr = 0x411fd070;
 109    cpu->revidr = 0x00000000;
 110    cpu->reset_fpsid = 0x41034070;
 111    cpu->isar.mvfr0 = 0x10110222;
 112    cpu->isar.mvfr1 = 0x12111111;
 113    cpu->isar.mvfr2 = 0x00000043;
 114    cpu->ctr = 0x8444c004;
 115    cpu->reset_sctlr = 0x00c50838;
 116    cpu->isar.id_pfr0 = 0x00000131;
 117    cpu->isar.id_pfr1 = 0x00011011;
 118    cpu->isar.id_dfr0 = 0x03010066;
 119    cpu->id_afr0 = 0x00000000;
 120    cpu->isar.id_mmfr0 = 0x10101105;
 121    cpu->isar.id_mmfr1 = 0x40000000;
 122    cpu->isar.id_mmfr2 = 0x01260000;
 123    cpu->isar.id_mmfr3 = 0x02102211;
 124    cpu->isar.id_isar0 = 0x02101110;
 125    cpu->isar.id_isar1 = 0x13112111;
 126    cpu->isar.id_isar2 = 0x21232042;
 127    cpu->isar.id_isar3 = 0x01112131;
 128    cpu->isar.id_isar4 = 0x00011142;
 129    cpu->isar.id_isar5 = 0x00011121;
 130    cpu->isar.id_isar6 = 0;
 131    cpu->isar.id_aa64pfr0 = 0x00002222;
 132    cpu->isar.id_aa64dfr0 = 0x10305106;
 133    cpu->isar.id_aa64isar0 = 0x00011120;
 134    cpu->isar.id_aa64mmfr0 = 0x00001124;
 135    cpu->isar.dbgdidr = 0x3516d000;
 136    cpu->clidr = 0x0a200023;
 137    cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
 138    cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
 139    cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
 140    cpu->dcz_blocksize = 4; /* 64 bytes */
 141    cpu->gic_num_lrs = 4;
 142    cpu->gic_vpribits = 5;
 143    cpu->gic_vprebits = 5;
 144    define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
 145}
 146
 147static void aarch64_a53_initfn(Object *obj)
 148{
 149    ARMCPU *cpu = ARM_CPU(obj);
 150
 151    cpu->dtb_compatible = "arm,cortex-a53";
 152    set_feature(&cpu->env, ARM_FEATURE_V8);
 153    set_feature(&cpu->env, ARM_FEATURE_NEON);
 154    set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
 155    set_feature(&cpu->env, ARM_FEATURE_AARCH64);
 156    set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
 157    set_feature(&cpu->env, ARM_FEATURE_EL2);
 158    set_feature(&cpu->env, ARM_FEATURE_EL3);
 159    set_feature(&cpu->env, ARM_FEATURE_PMU);
 160    cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53;
 161    cpu->midr = 0x410fd034;
 162    cpu->revidr = 0x00000000;
 163    cpu->reset_fpsid = 0x41034070;
 164    cpu->isar.mvfr0 = 0x10110222;
 165    cpu->isar.mvfr1 = 0x12111111;
 166    cpu->isar.mvfr2 = 0x00000043;
 167    cpu->ctr = 0x84448004; /* L1Ip = VIPT */
 168    cpu->reset_sctlr = 0x00c50838;
 169    cpu->isar.id_pfr0 = 0x00000131;
 170    cpu->isar.id_pfr1 = 0x00011011;
 171    cpu->isar.id_dfr0 = 0x03010066;
 172    cpu->id_afr0 = 0x00000000;
 173    cpu->isar.id_mmfr0 = 0x10101105;
 174    cpu->isar.id_mmfr1 = 0x40000000;
 175    cpu->isar.id_mmfr2 = 0x01260000;
 176    cpu->isar.id_mmfr3 = 0x02102211;
 177    cpu->isar.id_isar0 = 0x02101110;
 178    cpu->isar.id_isar1 = 0x13112111;
 179    cpu->isar.id_isar2 = 0x21232042;
 180    cpu->isar.id_isar3 = 0x01112131;
 181    cpu->isar.id_isar4 = 0x00011142;
 182    cpu->isar.id_isar5 = 0x00011121;
 183    cpu->isar.id_isar6 = 0;
 184    cpu->isar.id_aa64pfr0 = 0x00002222;
 185    cpu->isar.id_aa64dfr0 = 0x10305106;
 186    cpu->isar.id_aa64isar0 = 0x00011120;
 187    cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
 188    cpu->isar.dbgdidr = 0x3516d000;
 189    cpu->clidr = 0x0a200023;
 190    cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
 191    cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
 192    cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */
 193    cpu->dcz_blocksize = 4; /* 64 bytes */
 194    cpu->gic_num_lrs = 4;
 195    cpu->gic_vpribits = 5;
 196    cpu->gic_vprebits = 5;
 197    define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
 198}
 199
 200static void aarch64_a72_initfn(Object *obj)
 201{
 202    ARMCPU *cpu = ARM_CPU(obj);
 203
 204    cpu->dtb_compatible = "arm,cortex-a72";
 205    set_feature(&cpu->env, ARM_FEATURE_V8);
 206    set_feature(&cpu->env, ARM_FEATURE_NEON);
 207    set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
 208    set_feature(&cpu->env, ARM_FEATURE_AARCH64);
 209    set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
 210    set_feature(&cpu->env, ARM_FEATURE_EL2);
 211    set_feature(&cpu->env, ARM_FEATURE_EL3);
 212    set_feature(&cpu->env, ARM_FEATURE_PMU);
 213    cpu->midr = 0x410fd083;
 214    cpu->revidr = 0x00000000;
 215    cpu->reset_fpsid = 0x41034080;
 216    cpu->isar.mvfr0 = 0x10110222;
 217    cpu->isar.mvfr1 = 0x12111111;
 218    cpu->isar.mvfr2 = 0x00000043;
 219    cpu->ctr = 0x8444c004;
 220    cpu->reset_sctlr = 0x00c50838;
 221    cpu->isar.id_pfr0 = 0x00000131;
 222    cpu->isar.id_pfr1 = 0x00011011;
 223    cpu->isar.id_dfr0 = 0x03010066;
 224    cpu->id_afr0 = 0x00000000;
 225    cpu->isar.id_mmfr0 = 0x10201105;
 226    cpu->isar.id_mmfr1 = 0x40000000;
 227    cpu->isar.id_mmfr2 = 0x01260000;
 228    cpu->isar.id_mmfr3 = 0x02102211;
 229    cpu->isar.id_isar0 = 0x02101110;
 230    cpu->isar.id_isar1 = 0x13112111;
 231    cpu->isar.id_isar2 = 0x21232042;
 232    cpu->isar.id_isar3 = 0x01112131;
 233    cpu->isar.id_isar4 = 0x00011142;
 234    cpu->isar.id_isar5 = 0x00011121;
 235    cpu->isar.id_aa64pfr0 = 0x00002222;
 236    cpu->isar.id_aa64dfr0 = 0x10305106;
 237    cpu->isar.id_aa64isar0 = 0x00011120;
 238    cpu->isar.id_aa64mmfr0 = 0x00001124;
 239    cpu->isar.dbgdidr = 0x3516d000;
 240    cpu->clidr = 0x0a200023;
 241    cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
 242    cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
 243    cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */
 244    cpu->dcz_blocksize = 4; /* 64 bytes */
 245    cpu->gic_num_lrs = 4;
 246    cpu->gic_vpribits = 5;
 247    cpu->gic_vprebits = 5;
 248    define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
 249}
 250
 251void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
 252{
 253    /*
 254     * If any vector lengths are explicitly enabled with sve<N> properties,
 255     * then all other lengths are implicitly disabled.  If sve-max-vq is
 256     * specified then it is the same as explicitly enabling all lengths
 257     * up to and including the specified maximum, which means all larger
 258     * lengths will be implicitly disabled.  If no sve<N> properties
 259     * are enabled and sve-max-vq is not specified, then all lengths not
 260     * explicitly disabled will be enabled.  Additionally, all power-of-two
 261     * vector lengths less than the maximum enabled length will be
 262     * automatically enabled and all vector lengths larger than the largest
 263     * disabled power-of-two vector length will be automatically disabled.
 264     * Errors are generated if the user provided input that interferes with
 265     * any of the above.  Finally, if SVE is not disabled, then at least one
 266     * vector length must be enabled.
 267     */
 268    DECLARE_BITMAP(kvm_supported, ARM_MAX_VQ);
 269    DECLARE_BITMAP(tmp, ARM_MAX_VQ);
 270    uint32_t vq, max_vq = 0;
 271
 272    /* Collect the set of vector lengths supported by KVM. */
 273    bitmap_zero(kvm_supported, ARM_MAX_VQ);
 274    if (kvm_enabled() && kvm_arm_sve_supported()) {
 275        kvm_arm_sve_get_vls(CPU(cpu), kvm_supported);
 276    } else if (kvm_enabled()) {
 277        assert(!cpu_isar_feature(aa64_sve, cpu));
 278    }
 279
 280    /*
 281     * Process explicit sve<N> properties.
 282     * From the properties, sve_vq_map<N> implies sve_vq_init<N>.
 283     * Check first for any sve<N> enabled.
 284     */
 285    if (!bitmap_empty(cpu->sve_vq_map, ARM_MAX_VQ)) {
 286        max_vq = find_last_bit(cpu->sve_vq_map, ARM_MAX_VQ) + 1;
 287
 288        if (cpu->sve_max_vq && max_vq > cpu->sve_max_vq) {
 289            error_setg(errp, "cannot enable sve%d", max_vq * 128);
 290            error_append_hint(errp, "sve%d is larger than the maximum vector "
 291                              "length, sve-max-vq=%d (%d bits)\n",
 292                              max_vq * 128, cpu->sve_max_vq,
 293                              cpu->sve_max_vq * 128);
 294            return;
 295        }
 296
 297        if (kvm_enabled()) {
 298            /*
 299             * For KVM we have to automatically enable all supported unitialized
 300             * lengths, even when the smaller lengths are not all powers-of-two.
 301             */
 302            bitmap_andnot(tmp, kvm_supported, cpu->sve_vq_init, max_vq);
 303            bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
 304        } else {
 305            /* Propagate enabled bits down through required powers-of-two. */
 306            for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
 307                if (!test_bit(vq - 1, cpu->sve_vq_init)) {
 308                    set_bit(vq - 1, cpu->sve_vq_map);
 309                }
 310            }
 311        }
 312    } else if (cpu->sve_max_vq == 0) {
 313        /*
 314         * No explicit bits enabled, and no implicit bits from sve-max-vq.
 315         */
 316        if (!cpu_isar_feature(aa64_sve, cpu)) {
 317            /* SVE is disabled and so are all vector lengths.  Good. */
 318            return;
 319        }
 320
 321        if (kvm_enabled()) {
 322            /* Disabling a supported length disables all larger lengths. */
 323            for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
 324                if (test_bit(vq - 1, cpu->sve_vq_init) &&
 325                    test_bit(vq - 1, kvm_supported)) {
 326                    break;
 327                }
 328            }
 329            max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
 330            bitmap_andnot(cpu->sve_vq_map, kvm_supported,
 331                          cpu->sve_vq_init, max_vq);
 332            if (max_vq == 0 || bitmap_empty(cpu->sve_vq_map, max_vq)) {
 333                error_setg(errp, "cannot disable sve%d", vq * 128);
 334                error_append_hint(errp, "Disabling sve%d results in all "
 335                                  "vector lengths being disabled.\n",
 336                                  vq * 128);
 337                error_append_hint(errp, "With SVE enabled, at least one "
 338                                  "vector length must be enabled.\n");
 339                return;
 340            }
 341        } else {
 342            /* Disabling a power-of-two disables all larger lengths. */
 343            if (test_bit(0, cpu->sve_vq_init)) {
 344                error_setg(errp, "cannot disable sve128");
 345                error_append_hint(errp, "Disabling sve128 results in all "
 346                                  "vector lengths being disabled.\n");
 347                error_append_hint(errp, "With SVE enabled, at least one "
 348                                  "vector length must be enabled.\n");
 349                return;
 350            }
 351            for (vq = 2; vq <= ARM_MAX_VQ; vq <<= 1) {
 352                if (test_bit(vq - 1, cpu->sve_vq_init)) {
 353                    break;
 354                }
 355            }
 356            max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
 357            bitmap_complement(cpu->sve_vq_map, cpu->sve_vq_init, max_vq);
 358        }
 359
 360        max_vq = find_last_bit(cpu->sve_vq_map, max_vq) + 1;
 361    }
 362
 363    /*
 364     * Process the sve-max-vq property.
 365     * Note that we know from the above that no bit above
 366     * sve-max-vq is currently set.
 367     */
 368    if (cpu->sve_max_vq != 0) {
 369        max_vq = cpu->sve_max_vq;
 370
 371        if (!test_bit(max_vq - 1, cpu->sve_vq_map) &&
 372            test_bit(max_vq - 1, cpu->sve_vq_init)) {
 373            error_setg(errp, "cannot disable sve%d", max_vq * 128);
 374            error_append_hint(errp, "The maximum vector length must be "
 375                              "enabled, sve-max-vq=%d (%d bits)\n",
 376                              max_vq, max_vq * 128);
 377            return;
 378        }
 379
 380        /* Set all bits not explicitly set within sve-max-vq. */
 381        bitmap_complement(tmp, cpu->sve_vq_init, max_vq);
 382        bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
 383    }
 384
 385    /*
 386     * We should know what max-vq is now.  Also, as we're done
 387     * manipulating sve-vq-map, we ensure any bits above max-vq
 388     * are clear, just in case anybody looks.
 389     */
 390    assert(max_vq != 0);
 391    bitmap_clear(cpu->sve_vq_map, max_vq, ARM_MAX_VQ - max_vq);
 392
 393    if (kvm_enabled()) {
 394        /* Ensure the set of lengths matches what KVM supports. */
 395        bitmap_xor(tmp, cpu->sve_vq_map, kvm_supported, max_vq);
 396        if (!bitmap_empty(tmp, max_vq)) {
 397            vq = find_last_bit(tmp, max_vq) + 1;
 398            if (test_bit(vq - 1, cpu->sve_vq_map)) {
 399                if (cpu->sve_max_vq) {
 400                    error_setg(errp, "cannot set sve-max-vq=%d",
 401                               cpu->sve_max_vq);
 402                    error_append_hint(errp, "This KVM host does not support "
 403                                      "the vector length %d-bits.\n",
 404                                      vq * 128);
 405                    error_append_hint(errp, "It may not be possible to use "
 406                                      "sve-max-vq with this KVM host. Try "
 407                                      "using only sve<N> properties.\n");
 408                } else {
 409                    error_setg(errp, "cannot enable sve%d", vq * 128);
 410                    error_append_hint(errp, "This KVM host does not support "
 411                                      "the vector length %d-bits.\n",
 412                                      vq * 128);
 413                }
 414            } else {
 415                error_setg(errp, "cannot disable sve%d", vq * 128);
 416                error_append_hint(errp, "The KVM host requires all "
 417                                  "supported vector lengths smaller "
 418                                  "than %d bits to also be enabled.\n",
 419                                  max_vq * 128);
 420            }
 421            return;
 422        }
 423    } else {
 424        /* Ensure all required powers-of-two are enabled. */
 425        for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
 426            if (!test_bit(vq - 1, cpu->sve_vq_map)) {
 427                error_setg(errp, "cannot disable sve%d", vq * 128);
 428                error_append_hint(errp, "sve%d is required as it "
 429                                  "is a power-of-two length smaller than "
 430                                  "the maximum, sve%d\n",
 431                                  vq * 128, max_vq * 128);
 432                return;
 433            }
 434        }
 435    }
 436
 437    /*
 438     * Now that we validated all our vector lengths, the only question
 439     * left to answer is if we even want SVE at all.
 440     */
 441    if (!cpu_isar_feature(aa64_sve, cpu)) {
 442        error_setg(errp, "cannot enable sve%d", max_vq * 128);
 443        error_append_hint(errp, "SVE must be enabled to enable vector "
 444                          "lengths.\n");
 445        error_append_hint(errp, "Add sve=on to the CPU property list.\n");
 446        return;
 447    }
 448
 449    /* From now on sve_max_vq is the actual maximum supported length. */
 450    cpu->sve_max_vq = max_vq;
 451}
 452
 453static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name,
 454                                   void *opaque, Error **errp)
 455{
 456    ARMCPU *cpu = ARM_CPU(obj);
 457    uint32_t value;
 458
 459    /* All vector lengths are disabled when SVE is off. */
 460    if (!cpu_isar_feature(aa64_sve, cpu)) {
 461        value = 0;
 462    } else {
 463        value = cpu->sve_max_vq;
 464    }
 465    visit_type_uint32(v, name, &value, errp);
 466}
 467
 468static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
 469                                   void *opaque, Error **errp)
 470{
 471    ARMCPU *cpu = ARM_CPU(obj);
 472    uint32_t max_vq;
 473
 474    if (!visit_type_uint32(v, name, &max_vq, errp)) {
 475        return;
 476    }
 477
 478    if (kvm_enabled() && !kvm_arm_sve_supported()) {
 479        error_setg(errp, "cannot set sve-max-vq");
 480        error_append_hint(errp, "SVE not supported by KVM on this host\n");
 481        return;
 482    }
 483
 484    if (max_vq == 0 || max_vq > ARM_MAX_VQ) {
 485        error_setg(errp, "unsupported SVE vector length");
 486        error_append_hint(errp, "Valid sve-max-vq in range [1-%d]\n",
 487                          ARM_MAX_VQ);
 488        return;
 489    }
 490
 491    cpu->sve_max_vq = max_vq;
 492}
 493
 494/*
 495 * Note that cpu_arm_get/set_sve_vq cannot use the simpler
 496 * object_property_add_bool interface because they make use
 497 * of the contents of "name" to determine which bit on which
 498 * to operate.
 499 */
 500static void cpu_arm_get_sve_vq(Object *obj, Visitor *v, const char *name,
 501                               void *opaque, Error **errp)
 502{
 503    ARMCPU *cpu = ARM_CPU(obj);
 504    uint32_t vq = atoi(&name[3]) / 128;
 505    bool value;
 506
 507    /* All vector lengths are disabled when SVE is off. */
 508    if (!cpu_isar_feature(aa64_sve, cpu)) {
 509        value = false;
 510    } else {
 511        value = test_bit(vq - 1, cpu->sve_vq_map);
 512    }
 513    visit_type_bool(v, name, &value, errp);
 514}
 515
 516static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name,
 517                               void *opaque, Error **errp)
 518{
 519    ARMCPU *cpu = ARM_CPU(obj);
 520    uint32_t vq = atoi(&name[3]) / 128;
 521    bool value;
 522
 523    if (!visit_type_bool(v, name, &value, errp)) {
 524        return;
 525    }
 526
 527    if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
 528        error_setg(errp, "cannot enable %s", name);
 529        error_append_hint(errp, "SVE not supported by KVM on this host\n");
 530        return;
 531    }
 532
 533    if (value) {
 534        set_bit(vq - 1, cpu->sve_vq_map);
 535    } else {
 536        clear_bit(vq - 1, cpu->sve_vq_map);
 537    }
 538    set_bit(vq - 1, cpu->sve_vq_init);
 539}
 540
 541static bool cpu_arm_get_sve(Object *obj, Error **errp)
 542{
 543    ARMCPU *cpu = ARM_CPU(obj);
 544    return cpu_isar_feature(aa64_sve, cpu);
 545}
 546
 547static void cpu_arm_set_sve(Object *obj, bool value, Error **errp)
 548{
 549    ARMCPU *cpu = ARM_CPU(obj);
 550    uint64_t t;
 551
 552    if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
 553        error_setg(errp, "'sve' feature not supported by KVM on this host");
 554        return;
 555    }
 556
 557    t = cpu->isar.id_aa64pfr0;
 558    t = FIELD_DP64(t, ID_AA64PFR0, SVE, value);
 559    cpu->isar.id_aa64pfr0 = t;
 560}
 561
 562#ifdef CONFIG_USER_ONLY
 563/* Mirror linux /proc/sys/abi/sve_default_vector_length. */
 564static void cpu_arm_set_sve_default_vec_len(Object *obj, Visitor *v,
 565                                            const char *name, void *opaque,
 566                                            Error **errp)
 567{
 568    ARMCPU *cpu = ARM_CPU(obj);
 569    int32_t default_len, default_vq, remainder;
 570
 571    if (!visit_type_int32(v, name, &default_len, errp)) {
 572        return;
 573    }
 574
 575    /* Undocumented, but the kernel allows -1 to indicate "maximum". */
 576    if (default_len == -1) {
 577        cpu->sve_default_vq = ARM_MAX_VQ;
 578        return;
 579    }
 580
 581    default_vq = default_len / 16;
 582    remainder = default_len % 16;
 583
 584    /*
 585     * Note that the 512 max comes from include/uapi/asm/sve_context.h
 586     * and is the maximum architectural width of ZCR_ELx.LEN.
 587     */
 588    if (remainder || default_vq < 1 || default_vq > 512) {
 589        error_setg(errp, "cannot set sve-default-vector-length");
 590        if (remainder) {
 591            error_append_hint(errp, "Vector length not a multiple of 16\n");
 592        } else if (default_vq < 1) {
 593            error_append_hint(errp, "Vector length smaller than 16\n");
 594        } else {
 595            error_append_hint(errp, "Vector length larger than %d\n",
 596                              512 * 16);
 597        }
 598        return;
 599    }
 600
 601    cpu->sve_default_vq = default_vq;
 602}
 603
 604static void cpu_arm_get_sve_default_vec_len(Object *obj, Visitor *v,
 605                                            const char *name, void *opaque,
 606                                            Error **errp)
 607{
 608    ARMCPU *cpu = ARM_CPU(obj);
 609    int32_t value = cpu->sve_default_vq * 16;
 610
 611    visit_type_int32(v, name, &value, errp);
 612}
 613#endif
 614
 615void aarch64_add_sve_properties(Object *obj)
 616{
 617    uint32_t vq;
 618
 619    object_property_add_bool(obj, "sve", cpu_arm_get_sve, cpu_arm_set_sve);
 620
 621    for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
 622        char name[8];
 623        sprintf(name, "sve%d", vq * 128);
 624        object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
 625                            cpu_arm_set_sve_vq, NULL, NULL);
 626    }
 627
 628#ifdef CONFIG_USER_ONLY
 629    /* Mirror linux /proc/sys/abi/sve_default_vector_length. */
 630    object_property_add(obj, "sve-default-vector-length", "int32",
 631                        cpu_arm_get_sve_default_vec_len,
 632                        cpu_arm_set_sve_default_vec_len, NULL, NULL);
 633#endif
 634}
 635
 636void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
 637{
 638    int arch_val = 0, impdef_val = 0;
 639    uint64_t t;
 640
 641    /* TODO: Handle HaveEnhancedPAC, HaveEnhancedPAC2, HaveFPAC. */
 642    if (cpu->prop_pauth) {
 643        if (cpu->prop_pauth_impdef) {
 644            impdef_val = 1;
 645        } else {
 646            arch_val = 1;
 647        }
 648    } else if (cpu->prop_pauth_impdef) {
 649        error_setg(errp, "cannot enable pauth-impdef without pauth");
 650        error_append_hint(errp, "Add pauth=on to the CPU property list.\n");
 651    }
 652
 653    t = cpu->isar.id_aa64isar1;
 654    t = FIELD_DP64(t, ID_AA64ISAR1, APA, arch_val);
 655    t = FIELD_DP64(t, ID_AA64ISAR1, GPA, arch_val);
 656    t = FIELD_DP64(t, ID_AA64ISAR1, API, impdef_val);
 657    t = FIELD_DP64(t, ID_AA64ISAR1, GPI, impdef_val);
 658    cpu->isar.id_aa64isar1 = t;
 659}
 660
 661static Property arm_cpu_pauth_property =
 662    DEFINE_PROP_BOOL("pauth", ARMCPU, prop_pauth, true);
 663static Property arm_cpu_pauth_impdef_property =
 664    DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false);
 665
 666/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
 667 * otherwise, a CPU with as many features enabled as our emulation supports.
 668 * The version of '-cpu max' for qemu-system-arm is defined in cpu.c;
 669 * this only needs to handle 64 bits.
 670 */
 671static void aarch64_max_initfn(Object *obj)
 672{
 673    ARMCPU *cpu = ARM_CPU(obj);
 674
 675    if (kvm_enabled()) {
 676        kvm_arm_set_cpu_features_from_host(cpu);
 677    } else {
 678        uint64_t t;
 679        uint32_t u;
 680        aarch64_a57_initfn(obj);
 681
 682        /*
 683         * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real
 684         * one and try to apply errata workarounds or use impdef features we
 685         * don't provide.
 686         * An IMPLEMENTER field of 0 means "reserved for software use";
 687         * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers
 688         * to see which features are present";
 689         * the VARIANT, PARTNUM and REVISION fields are all implementation
 690         * defined and we choose to define PARTNUM just in case guest
 691         * code needs to distinguish this QEMU CPU from other software
 692         * implementations, though this shouldn't be needed.
 693         */
 694        t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0);
 695        t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf);
 696        t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q');
 697        t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0);
 698        t = FIELD_DP64(t, MIDR_EL1, REVISION, 0);
 699        cpu->midr = t;
 700
 701        t = cpu->isar.id_aa64isar0;
 702        t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */
 703        t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);
 704        t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* SHA512 */
 705        t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1);
 706        t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2);
 707        t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1);
 708        t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1);
 709        t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1);
 710        t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1);
 711        t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);
 712        t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1);
 713        t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* v8.5-CondM */
 714        t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */
 715        t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1);
 716        cpu->isar.id_aa64isar0 = t;
 717
 718        t = cpu->isar.id_aa64isar1;
 719        t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2);
 720        t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1);
 721        t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);
 722        t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);
 723        t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);
 724        t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1);
 725        t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1);
 726        t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* ARMv8.4-RCPC */
 727        t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1);
 728        cpu->isar.id_aa64isar1 = t;
 729
 730        t = cpu->isar.id_aa64pfr0;
 731        t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
 732        t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);
 733        t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);
 734        t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1);
 735        t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1);
 736        cpu->isar.id_aa64pfr0 = t;
 737
 738        t = cpu->isar.id_aa64pfr1;
 739        t = FIELD_DP64(t, ID_AA64PFR1, BT, 1);
 740        t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2);
 741        /*
 742         * Begin with full support for MTE. This will be downgraded to MTE=0
 743         * during realize if the board provides no tag memory, much like
 744         * we do for EL2 with the virtualization=on property.
 745         */
 746        t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3);
 747        cpu->isar.id_aa64pfr1 = t;
 748
 749        t = cpu->isar.id_aa64mmfr0;
 750        t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 5); /* PARange: 48 bits */
 751        cpu->isar.id_aa64mmfr0 = t;
 752
 753        t = cpu->isar.id_aa64mmfr1;
 754        t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */
 755        t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);
 756        t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1);
 757        t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* ATS1E1 */
 758        t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* VMID16 */
 759        t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* TTS2UXN */
 760        cpu->isar.id_aa64mmfr1 = t;
 761
 762        t = cpu->isar.id_aa64mmfr2;
 763        t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1);
 764        t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* TTCNP */
 765        t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* TTST */
 766        cpu->isar.id_aa64mmfr2 = t;
 767
 768        t = cpu->isar.id_aa64zfr0;
 769        t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1);
 770        t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2);  /* PMULL */
 771        t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1);
 772        t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1);
 773        t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1);
 774        t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1);
 775        t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1);
 776        t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1);
 777        t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1);
 778        cpu->isar.id_aa64zfr0 = t;
 779
 780        /* Replicate the same data to the 32-bit id registers.  */
 781        u = cpu->isar.id_isar5;
 782        u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */
 783        u = FIELD_DP32(u, ID_ISAR5, SHA1, 1);
 784        u = FIELD_DP32(u, ID_ISAR5, SHA2, 1);
 785        u = FIELD_DP32(u, ID_ISAR5, CRC32, 1);
 786        u = FIELD_DP32(u, ID_ISAR5, RDM, 1);
 787        u = FIELD_DP32(u, ID_ISAR5, VCMA, 1);
 788        cpu->isar.id_isar5 = u;
 789
 790        u = cpu->isar.id_isar6;
 791        u = FIELD_DP32(u, ID_ISAR6, JSCVT, 1);
 792        u = FIELD_DP32(u, ID_ISAR6, DP, 1);
 793        u = FIELD_DP32(u, ID_ISAR6, FHM, 1);
 794        u = FIELD_DP32(u, ID_ISAR6, SB, 1);
 795        u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1);
 796        u = FIELD_DP32(u, ID_ISAR6, BF16, 1);
 797        u = FIELD_DP32(u, ID_ISAR6, I8MM, 1);
 798        cpu->isar.id_isar6 = u;
 799
 800        u = cpu->isar.id_pfr0;
 801        u = FIELD_DP32(u, ID_PFR0, DIT, 1);
 802        cpu->isar.id_pfr0 = u;
 803
 804        u = cpu->isar.id_pfr2;
 805        u = FIELD_DP32(u, ID_PFR2, SSBS, 1);
 806        cpu->isar.id_pfr2 = u;
 807
 808        u = cpu->isar.id_mmfr3;
 809        u = FIELD_DP32(u, ID_MMFR3, PAN, 2); /* ATS1E1 */
 810        cpu->isar.id_mmfr3 = u;
 811
 812        u = cpu->isar.id_mmfr4;
 813        u = FIELD_DP32(u, ID_MMFR4, HPDS, 1); /* AA32HPD */
 814        u = FIELD_DP32(u, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
 815        u = FIELD_DP32(u, ID_MMFR4, CNP, 1); /* TTCNP */
 816        u = FIELD_DP32(u, ID_MMFR4, XNX, 1); /* TTS2UXN */
 817        cpu->isar.id_mmfr4 = u;
 818
 819        t = cpu->isar.id_aa64dfr0;
 820        t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */
 821        cpu->isar.id_aa64dfr0 = t;
 822
 823        u = cpu->isar.id_dfr0;
 824        u = FIELD_DP32(u, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
 825        cpu->isar.id_dfr0 = u;
 826
 827        u = cpu->isar.mvfr1;
 828        u = FIELD_DP32(u, MVFR1, FPHP, 3);      /* v8.2-FP16 */
 829        u = FIELD_DP32(u, MVFR1, SIMDHP, 2);    /* v8.2-FP16 */
 830        cpu->isar.mvfr1 = u;
 831
 832#ifdef CONFIG_USER_ONLY
 833        /* For usermode -cpu max we can use a larger and more efficient DCZ
 834         * blocksize since we don't have to follow what the hardware does.
 835         */
 836        cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
 837        cpu->dcz_blocksize = 7; /*  512 bytes */
 838#endif
 839
 840        /* Default to PAUTH on, with the architected algorithm. */
 841        qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_property);
 842        qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property);
 843    }
 844
 845    aarch64_add_sve_properties(obj);
 846    object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
 847                        cpu_max_set_sve_max_vq, NULL, NULL);
 848}
 849
 850static const ARMCPUInfo aarch64_cpus[] = {
 851    { .name = "cortex-a57",         .initfn = aarch64_a57_initfn },
 852    { .name = "cortex-a53",         .initfn = aarch64_a53_initfn },
 853    { .name = "cortex-a72",         .initfn = aarch64_a72_initfn },
 854    { .name = "max",                .initfn = aarch64_max_initfn },
 855};
 856
 857static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
 858{
 859    ARMCPU *cpu = ARM_CPU(obj);
 860
 861    return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
 862}
 863
 864static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
 865{
 866    ARMCPU *cpu = ARM_CPU(obj);
 867
 868    /* At this time, this property is only allowed if KVM is enabled.  This
 869     * restriction allows us to avoid fixing up functionality that assumes a
 870     * uniform execution state like do_interrupt.
 871     */
 872    if (value == false) {
 873        if (!kvm_enabled() || !kvm_arm_aarch32_supported()) {
 874            error_setg(errp, "'aarch64' feature cannot be disabled "
 875                             "unless KVM is enabled and 32-bit EL1 "
 876                             "is supported");
 877            return;
 878        }
 879        unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
 880    } else {
 881        set_feature(&cpu->env, ARM_FEATURE_AARCH64);
 882    }
 883}
 884
 885static void aarch64_cpu_finalizefn(Object *obj)
 886{
 887}
 888
 889static gchar *aarch64_gdb_arch_name(CPUState *cs)
 890{
 891    return g_strdup("aarch64");
 892}
 893
 894static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
 895{
 896    CPUClass *cc = CPU_CLASS(oc);
 897
 898    cc->gdb_read_register = aarch64_cpu_gdb_read_register;
 899    cc->gdb_write_register = aarch64_cpu_gdb_write_register;
 900    cc->gdb_num_core_regs = 34;
 901    cc->gdb_core_xml_file = "aarch64-core.xml";
 902    cc->gdb_arch_name = aarch64_gdb_arch_name;
 903
 904    object_class_property_add_bool(oc, "aarch64", aarch64_cpu_get_aarch64,
 905                                   aarch64_cpu_set_aarch64);
 906    object_class_property_set_description(oc, "aarch64",
 907                                          "Set on/off to enable/disable aarch64 "
 908                                          "execution state ");
 909}
 910
 911static void aarch64_cpu_instance_init(Object *obj)
 912{
 913    ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
 914
 915    acc->info->initfn(obj);
 916    arm_cpu_post_init(obj);
 917}
 918
 919static void cpu_register_class_init(ObjectClass *oc, void *data)
 920{
 921    ARMCPUClass *acc = ARM_CPU_CLASS(oc);
 922
 923    acc->info = data;
 924}
 925
 926void aarch64_cpu_register(const ARMCPUInfo *info)
 927{
 928    TypeInfo type_info = {
 929        .parent = TYPE_AARCH64_CPU,
 930        .instance_size = sizeof(ARMCPU),
 931        .instance_init = aarch64_cpu_instance_init,
 932        .class_size = sizeof(ARMCPUClass),
 933        .class_init = info->class_init ?: cpu_register_class_init,
 934        .class_data = (void *)info,
 935    };
 936
 937    type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
 938    type_register(&type_info);
 939    g_free((void *)type_info.name);
 940}
 941
 942static const TypeInfo aarch64_cpu_type_info = {
 943    .name = TYPE_AARCH64_CPU,
 944    .parent = TYPE_ARM_CPU,
 945    .instance_size = sizeof(ARMCPU),
 946    .instance_finalize = aarch64_cpu_finalizefn,
 947    .abstract = true,
 948    .class_size = sizeof(AArch64CPUClass),
 949    .class_init = aarch64_cpu_class_init,
 950};
 951
 952static void aarch64_cpu_register_types(void)
 953{
 954    size_t i;
 955
 956    type_register_static(&aarch64_cpu_type_info);
 957
 958    for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) {
 959        aarch64_cpu_register(&aarch64_cpus[i]);
 960    }
 961}
 962
 963type_init(aarch64_cpu_register_types)
 964