qemu/target/arm/cpu.c
<<
>>
Prefs
   1/*
   2 * QEMU ARM CPU
   3 *
   4 * Copyright (c) 2012 SUSE LINUX Products GmbH
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version 2
   9 * of the License, or (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, see
  18 * <http://www.gnu.org/licenses/gpl-2.0.html>
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "qemu/error-report.h"
  23#include "qapi/error.h"
  24#include "cpu.h"
  25#include "internals.h"
  26#include "qemu-common.h"
  27#include "exec/exec-all.h"
  28#include "hw/qdev-properties.h"
  29#if !defined(CONFIG_USER_ONLY)
  30#include "hw/loader.h"
  31#endif
  32#include "hw/arm/arm.h"
  33#include "sysemu/sysemu.h"
  34#include "sysemu/hw_accel.h"
  35#include "kvm_arm.h"
  36#include "disas/capstone.h"
  37
  38#include "hw/core/cpu-exec-gpio.h"
  39#include "hw/fdt_generic_util.h"
  40
  41#if !defined(CONFIG_USER_ONLY)
  42static void arm_cpu_set_irq(void *opaque, int irq, int level);
  43#endif
  44
  45static void arm_cpu_set_pc(CPUState *cs, vaddr value)
  46{
  47    ARMCPU *cpu = ARM_CPU(cs);
  48
  49    cpu->env.regs[15] = value;
  50}
  51
  52static vaddr arm_cpu_get_pc(CPUState *cs)
  53{
  54    ARMCPU *cpu = ARM_CPU(cs);
  55
  56    return cpu->env.regs[15];
  57}
  58
  59enum {
  60    ARM_DEBUG_CURRENT_EL,
  61    ARM_DEBUG_PHYS
  62};
  63
  64static const char *arm_debug_ctx[] = {
  65    [ARM_DEBUG_CURRENT_EL] = "current-el",
  66    [ARM_DEBUG_PHYS] = "phys",
  67    NULL
  68};
  69
  70static bool arm_cpu_has_work(CPUState *cs)
  71{
  72    ARMCPU *cpu = ARM_CPU(cs);
  73
  74    return (cpu->power_state != PSCI_OFF)
  75        && cs->interrupt_request &
  76        (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
  77         | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
  78         | CPU_INTERRUPT_EXITTB);
  79}
  80
  81void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHook *hook,
  82                                 void *opaque)
  83{
  84    /* We currently only support registering a single hook function */
  85    assert(!cpu->el_change_hook);
  86    cpu->el_change_hook = hook;
  87    cpu->el_change_hook_opaque = opaque;
  88}
  89
  90static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
  91{
  92    /* Reset a single ARMCPRegInfo register */
  93    ARMCPRegInfo *ri = value;
  94    ARMCPU *cpu = opaque;
  95
  96    if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS)) {
  97        return;
  98    }
  99
 100    if (ri->resetfn) {
 101        ri->resetfn(&cpu->env, ri);
 102        return;
 103    }
 104
 105    /* A zero offset is never possible as it would be regs[0]
 106     * so we use it to indicate that reset is being handled elsewhere.
 107     * This is basically only used for fields in non-core coprocessors
 108     * (like the pxa2xx ones).
 109     */
 110    if (!ri->fieldoffset) {
 111        return;
 112    }
 113
 114    if (cpreg_field_is_64bit(ri)) {
 115        CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue;
 116    } else {
 117        CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue;
 118    }
 119}
 120
 121static void cp_reg_check_reset(gpointer key, gpointer value,  gpointer opaque)
 122{
 123    /* Purely an assertion check: we've already done reset once,
 124     * so now check that running the reset for the cpreg doesn't
 125     * change its value. This traps bugs where two different cpregs
 126     * both try to reset the same state field but to different values.
 127     */
 128    ARMCPRegInfo *ri = value;
 129    ARMCPU *cpu = opaque;
 130    uint64_t oldvalue, newvalue;
 131
 132    if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS | ARM_CP_NO_RAW)) {
 133        return;
 134    }
 135
 136    oldvalue = read_raw_cp_reg(&cpu->env, ri);
 137    cp_reg_reset(key, value, opaque);
 138    newvalue = read_raw_cp_reg(&cpu->env, ri);
 139    assert(oldvalue == newvalue);
 140}
 141
 142#ifndef CONFIG_USER_ONLY
 143static void arm_gt_compute_scale(ARMCPU *s)
 144{
 145    Int128 ref_clk = 1000 * 1000 * 1000;
 146    Int128 ref_clk_scaled = int128_lshift(ref_clk,
 147                                       GTIMER_SCALE_SHIFT);
 148
 149    s->gt_scale = (uint64_t)int128_div(ref_clk_scaled,
 150                                        (Int128)s->gt_freq);
 151}
 152#endif
 153
 154/* CPUClass::reset() */
 155static void arm_cpu_reset(CPUState *s)
 156{
 157    ARMCPU *cpu = ARM_CPU(s);
 158    ARMCPUClass *acc = ARM_CPU_GET_CLASS(cpu);
 159    CPUARMState *env = &cpu->env;
 160#ifndef CONFIG_USER_ONLY
 161    CPUClass *cc = CPU_GET_CLASS(s);
 162    vaddr old_pc = cc->get_pc(s);
 163    int i;
 164#endif
 165
 166    acc->parent_reset(s);
 167
 168    memset(env, 0, offsetof(CPUARMState, end_reset_fields));
 169
 170    g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
 171    g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
 172
 173    env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
 174    env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0;
 175    env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1;
 176    env->vfp.xregs[ARM_VFP_MVFR2] = cpu->mvfr2;
 177
 178    cpu->power_state = cpu->start_powered_off || s->arch_halt_pin ?
 179                           PSCI_OFF : PSCI_ON;
 180    s->halted = cpu->start_powered_off || s->halt_pin || s->arch_halt_pin;
 181
 182    if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
 183        cpu->env.cp15.c14_cntfrq = cpu->gt_freq;
 184    }
 185    /* Reset value of SCTLR_V is controlled by input signal VINITHI.  */
 186    env->cp15.sctlr_ns &= ~SCTLR_V;
 187    env->cp15.sctlr_s &= ~SCTLR_V;
 188    env->cp15.sctlr_ns |= env->vinithi ? SCTLR_V : 0;
 189    env->cp15.sctlr_s |= env->vinithi ? SCTLR_V : 0;
 190
 191    if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
 192        env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
 193    }
 194
 195    if (arm_feature(env, ARM_FEATURE_AARCH64)) {
 196        /* 64 bit CPUs always start in 64 bit mode */
 197        env->aarch64 = 1;
 198#if defined(CONFIG_USER_ONLY)
 199        env->pstate = PSTATE_MODE_EL0t;
 200        /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
 201        env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
 202        /* and to the FP/Neon instructions */
 203        env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 2, 3);
 204#else
 205        /* Reset into the highest available EL */
 206        if (arm_feature(env, ARM_FEATURE_EL3)) {
 207            env->pstate = PSTATE_MODE_EL3h;
 208        } else if (arm_feature(env, ARM_FEATURE_EL2)) {
 209            env->pstate = PSTATE_MODE_EL2h;
 210        } else {
 211            env->pstate = PSTATE_MODE_EL1h;
 212        }
 213        env->pc = cpu->rvbar;
 214#endif
 215    } else {
 216#if defined(CONFIG_USER_ONLY)
 217        /* Userspace expects access to cp10 and cp11 for FP/Neon */
 218        env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 4, 0xf);
 219#endif
 220    }
 221
 222#if defined(CONFIG_USER_ONLY)
 223    env->uncached_cpsr = ARM_CPU_MODE_USR;
 224    /* For user mode we must enable access to coprocessors */
 225    env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
 226    if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
 227        env->cp15.c15_cpar = 3;
 228    } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
 229        env->cp15.c15_cpar = 1;
 230    }
 231#else
 232    /* SVC mode with interrupts disabled.  */
 233    env->uncached_cpsr = ARM_CPU_MODE_SVC;
 234    env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
 235
 236    if (arm_feature(env, ARM_FEATURE_M)) {
 237        uint32_t initial_msp; /* Loaded from 0x0 */
 238        uint32_t initial_pc; /* Loaded from 0x4 */
 239        uint8_t *rom;
 240
 241        if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
 242            env->v7m.secure = true;
 243        } else {
 244            /* This bit resets to 0 if security is supported, but 1 if
 245             * it is not. The bit is not present in v7M, but we set it
 246             * here so we can avoid having to make checks on it conditional
 247             * on ARM_FEATURE_V8 (we don't let the guest see the bit).
 248             */
 249            env->v7m.aircr = R_V7M_AIRCR_BFHFNMINS_MASK;
 250        }
 251
 252        /* In v7M the reset value of this bit is IMPDEF, but ARM recommends
 253         * that it resets to 1, so QEMU always does that rather than making
 254         * it dependent on CPU model. In v8M it is RES1.
 255         */
 256        env->v7m.ccr[M_REG_NS] = R_V7M_CCR_STKALIGN_MASK;
 257        env->v7m.ccr[M_REG_S] = R_V7M_CCR_STKALIGN_MASK;
 258        if (arm_feature(env, ARM_FEATURE_V8)) {
 259            /* in v8M the NONBASETHRDENA bit [0] is RES1 */
 260            env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_NONBASETHRDENA_MASK;
 261            env->v7m.ccr[M_REG_S] |= R_V7M_CCR_NONBASETHRDENA_MASK;
 262        }
 263
 264        /* Unlike A/R profile, M profile defines the reset LR value */
 265        env->regs[14] = 0xffffffff;
 266
 267        /* Load the initial SP and PC from the vector table at address 0 */
 268        rom = rom_ptr(0);
 269        if (rom) {
 270            /* Address zero is covered by ROM which hasn't yet been
 271             * copied into physical memory.
 272             */
 273            initial_msp = ldl_p(rom);
 274            initial_pc = ldl_p(rom + 4);
 275        } else {
 276            /* Address zero not covered by a ROM blob, or the ROM blob
 277             * is in non-modifiable memory and this is a second reset after
 278             * it got copied into memory. In the latter case, rom_ptr
 279             * will return a NULL pointer and we should use ldl_phys instead.
 280             */
 281            initial_msp = ldl_phys(s->as, 0);
 282            initial_pc = ldl_phys(s->as, 4);
 283        }
 284
 285        env->regs[13] = initial_msp & 0xFFFFFFFC;
 286        env->regs[15] = initial_pc & ~1;
 287        env->thumb = initial_pc & 1;
 288    }
 289
 290    /* AArch32 has a hard highvec setting of 0xFFFF0000.  If we are currently
 291     * executing as AArch32 then check if highvecs are enabled and
 292     * adjust the PC accordingly.
 293     */
 294    if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
 295        env->regs[15] = 0xFFFF0000;
 296    }
 297
 298    /* M profile requires that reset clears the exclusive monitor;
 299     * A profile does not, but clearing it makes more sense than having it
 300     * set with an exclusive access on address zero.
 301     */
 302    arm_clear_exclusive(env);
 303
 304    env->vfp.xregs[ARM_VFP_FPEXC] = 0;
 305#endif
 306
 307    if (arm_feature(env, ARM_FEATURE_PMSA)) {
 308        if (cpu->pmsav7_dregion > 0) {
 309            if (arm_feature(env, ARM_FEATURE_V8)) {
 310                memset(env->pmsav8.rbar[M_REG_NS], 0,
 311                       sizeof(*env->pmsav8.rbar[M_REG_NS])
 312                       * cpu->pmsav7_dregion);
 313                memset(env->pmsav8.rlar[M_REG_NS], 0,
 314                       sizeof(*env->pmsav8.rlar[M_REG_NS])
 315                       * cpu->pmsav7_dregion);
 316                if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
 317                    memset(env->pmsav8.rbar[M_REG_S], 0,
 318                           sizeof(*env->pmsav8.rbar[M_REG_S])
 319                           * cpu->pmsav7_dregion);
 320                    memset(env->pmsav8.rlar[M_REG_S], 0,
 321                           sizeof(*env->pmsav8.rlar[M_REG_S])
 322                           * cpu->pmsav7_dregion);
 323                }
 324            } else if (arm_feature(env, ARM_FEATURE_V7)) {
 325                memset(env->pmsav7.drbar, 0,
 326                       sizeof(*env->pmsav7.drbar) * cpu->pmsav7_dregion);
 327                memset(env->pmsav7.drsr, 0,
 328                       sizeof(*env->pmsav7.drsr) * cpu->pmsav7_dregion);
 329                memset(env->pmsav7.dracr, 0,
 330                       sizeof(*env->pmsav7.dracr) * cpu->pmsav7_dregion);
 331            }
 332        }
 333        env->pmsav7.rnr[M_REG_NS] = 0;
 334        env->pmsav7.rnr[M_REG_S] = 0;
 335        env->pmsav8.mair0[M_REG_NS] = 0;
 336        env->pmsav8.mair0[M_REG_S] = 0;
 337        env->pmsav8.mair1[M_REG_NS] = 0;
 338        env->pmsav8.mair1[M_REG_S] = 0;
 339    }
 340
 341    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
 342        if (cpu->sau_sregion > 0) {
 343            memset(env->sau.rbar, 0, sizeof(*env->sau.rbar) * cpu->sau_sregion);
 344            memset(env->sau.rlar, 0, sizeof(*env->sau.rlar) * cpu->sau_sregion);
 345        }
 346        env->sau.rnr = 0;
 347        /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what
 348         * the Cortex-M33 does.
 349         */
 350        env->sau.ctrl = 0;
 351    }
 352
 353    set_flush_to_zero(1, &env->vfp.standard_fp_status);
 354    set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
 355    set_default_nan_mode(1, &env->vfp.standard_fp_status);
 356    set_float_detect_tininess(float_tininess_before_rounding,
 357                              &env->vfp.fp_status);
 358    set_float_detect_tininess(float_tininess_before_rounding,
 359                              &env->vfp.standard_fp_status);
 360#ifndef CONFIG_USER_ONLY
 361    if (kvm_enabled()) {
 362        kvm_arm_reset_vcpu(cpu);
 363    }
 364
 365    if (!runstate_is_running()) {
 366        cc->set_pc(s, old_pc);
 367    }
 368#endif
 369
 370    cpu->is_in_wfi = false;
 371    qemu_set_irq(cpu->wfi, cpu->is_in_wfi);
 372
 373    hw_breakpoint_update_all(cpu);
 374    hw_watchpoint_update_all(cpu);
 375
 376#ifndef CONFIG_USER_ONLY
 377    if (cpu->env.memattr_ns) {
 378        env->memattr[MEM_ATTR_NS].attrs = *cpu->env.memattr_ns;
 379    }
 380
 381    if (cpu->env.memattr_s) {
 382        env->memattr[MEM_ATTR_SEC].attrs = *cpu->env.memattr_s;
 383    } else if (arm_feature(env, ARM_FEATURE_EL3)) {
 384            /* Only set secure mode if the CPU support EL3 */
 385            env->memattr[MEM_ATTR_SEC].attrs.secure = true;
 386    }
 387
 388    for (i = 0; i < ARRAY_SIZE(cpu->env.irq_wires); i++) {
 389        if (!arm_feature(env, ARM_FEATURE_EL2) && i >= ARM_CPU_VIRQ) {
 390            break;
 391        }
 392        arm_cpu_set_irq(cpu, i, cpu->env.irq_wires[i]);
 393    }
 394#endif
 395}
 396
 397bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
 398{
 399    CPUClass *cc = CPU_GET_CLASS(cs);
 400    ARMCPU *cpu = ARM_CPU(cs);
 401    CPUARMState *env = cs->env_ptr;
 402    uint32_t cur_el = arm_current_el(env);
 403    bool secure = arm_is_secure(env);
 404    uint32_t target_el;
 405    uint32_t excp_idx;
 406    bool ret = false;
 407    bool exit_wfi = false;
 408
 409    if (interrupt_request & CPU_INTERRUPT_FIQ) {
 410        excp_idx = EXCP_FIQ;
 411        target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
 412        if (arm_excp_unmasked(cs, excp_idx, target_el)) {
 413            cs->exception_index = excp_idx;
 414            env->exception.target_el = target_el;
 415            cc->do_interrupt(cs);
 416            ret = true;
 417            exit_wfi = true;
 418        }
 419    }
 420    if (interrupt_request & CPU_INTERRUPT_HARD) {
 421        excp_idx = EXCP_IRQ;
 422        target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
 423        if (arm_excp_unmasked(cs, excp_idx, target_el)) {
 424            cs->exception_index = excp_idx;
 425            env->exception.target_el = target_el;
 426            cc->do_interrupt(cs);
 427            ret = true;
 428            exit_wfi = true;
 429        }
 430    }
 431    if (interrupt_request & CPU_INTERRUPT_VIRQ) {
 432        excp_idx = EXCP_VIRQ;
 433        target_el = 1;
 434        if (arm_excp_unmasked(cs, excp_idx, target_el)) {
 435            cs->exception_index = excp_idx;
 436            env->exception.target_el = target_el;
 437            cc->do_interrupt(cs);
 438            ret = true;
 439        }
 440    }
 441    if (interrupt_request & CPU_INTERRUPT_VFIQ) {
 442        excp_idx = EXCP_VFIQ;
 443        target_el = 1;
 444        if (arm_excp_unmasked(cs, excp_idx, target_el)) {
 445            cs->exception_index = excp_idx;
 446            env->exception.target_el = target_el;
 447            cc->do_interrupt(cs);
 448            ret = true;
 449        }
 450    }
 451
 452    /* Xilinx: If we get here we want to make sure that we update the WFI
 453     * status to make sure that the PMU knows we are running again.
 454     */
 455    if (exit_wfi == true && cpu->is_in_wfi) {
 456        cpu->is_in_wfi = false;
 457        qemu_set_irq(cpu->wfi, 0);
 458    }
 459
 460    return ret;
 461}
 462
 463#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
 464static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
 465{
 466    CPUClass *cc = CPU_GET_CLASS(cs);
 467    ARMCPU *cpu = ARM_CPU(cs);
 468    CPUARMState *env = &cpu->env;
 469    bool ret = false;
 470
 471    /* ARMv7-M interrupt masking works differently than -A or -R.
 472     * There is no FIQ/IRQ distinction. Instead of I and F bits
 473     * masking FIQ and IRQ interrupts, an exception is taken only
 474     * if it is higher priority than the current execution priority
 475     * (which depends on state like BASEPRI, FAULTMASK and the
 476     * currently active exception).
 477     */
 478    if (interrupt_request & CPU_INTERRUPT_HARD
 479        && (armv7m_nvic_can_take_pending_exception(env->nvic))) {
 480        cs->exception_index = EXCP_IRQ;
 481        cc->do_interrupt(cs);
 482        ret = true;
 483    }
 484    return ret;
 485}
 486#endif
 487
 488#ifndef CONFIG_USER_ONLY
 489static void arm_cpu_set_irq(void *opaque, int irq, int level)
 490{
 491    ARMCPU *cpu = opaque;
 492    CPUARMState *env = &cpu->env;
 493    CPUState *cs = CPU(cpu);
 494    static const int mask[] = {
 495        [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
 496        [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
 497        [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
 498        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
 499    };
 500
 501    env->irq_wires[irq] = level;
 502
 503    switch (irq) {
 504    case ARM_CPU_VIRQ:
 505    case ARM_CPU_VFIQ:
 506        assert(arm_feature(env, ARM_FEATURE_EL2));
 507        /* fall through */
 508    case ARM_CPU_IRQ:
 509    case ARM_CPU_FIQ:
 510        if (level) {
 511            cpu_interrupt(cs, mask[irq]);
 512        } else {
 513            cpu_reset_interrupt(cs, mask[irq]);
 514        }
 515        break;
 516    default:
 517        g_assert_not_reached();
 518    }
 519}
 520
 521static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
 522{
 523#ifdef CONFIG_KVM
 524    ARMCPU *cpu = opaque;
 525    CPUState *cs = CPU(cpu);
 526    int kvm_irq = KVM_ARM_IRQ_TYPE_CPU << KVM_ARM_IRQ_TYPE_SHIFT;
 527
 528    switch (irq) {
 529    case ARM_CPU_IRQ:
 530        kvm_irq |= KVM_ARM_IRQ_CPU_IRQ;
 531        break;
 532    case ARM_CPU_FIQ:
 533        kvm_irq |= KVM_ARM_IRQ_CPU_FIQ;
 534        break;
 535    default:
 536        g_assert_not_reached();
 537    }
 538    kvm_irq |= cs->cpu_index << KVM_ARM_IRQ_VCPU_SHIFT;
 539    kvm_set_irq(kvm_state, kvm_irq, level ? 1 : 0);
 540#endif
 541}
 542
 543static bool arm_cpu_virtio_is_big_endian(CPUState *cs)
 544{
 545    ARMCPU *cpu = ARM_CPU(cs);
 546    CPUARMState *env = &cpu->env;
 547
 548    cpu_synchronize_state(cs);
 549    return arm_cpu_data_is_big_endian(env);
 550}
 551
 552#endif
 553
 554#ifndef CONFIG_USER_ONLY
 555static void arm_cpu_set_ncpuhalt(void *opaque, int irq, int level)
 556{
 557    CPUState *cs = opaque;
 558    ARMCPU *cpu = ARM_CPU(cs);
 559    int old_value = cs->arch_halt_pin;
 560
 561    /* FIXME: This code should be active in order to implement the semantic
 562     * where an already running CPU cannot be halted. This doesn't work though,
 563     * as QEMU can not make any guarantees on initial ordering of setting the
 564     * halt/reset GPIOs on machine init. So just make nCPUHALT a regular halt
 565     * for the moment.
 566     */
 567#if 0
 568    if (!cs->reset_pin) {
 569        return;
 570    }
 571#endif
 572    cs->arch_halt_pin = level;
 573    /* As we set the powered_off status on CPU reset we need to make sure that
 574     * we unset it as well.
 575     */
 576    cpu->power_state = level ? PSCI_OFF : PSCI_ON;
 577    cpu_halt_update(cs);
 578
 579    if (cs->arch_halt_pin != old_value && !cs->arch_halt_pin) {
 580        cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
 581    }
 582}
 583
 584static void arm_cpu_set_vinithi(void *opaque, int irq, int level)
 585{
 586    CPUState *cs = opaque;
 587    ARMCPU *cpu = ARM_CPU(cs);
 588
 589    cpu->env.vinithi = level;
 590}
 591
 592static void arm_cpu_set_mr_secure(Object *obj, Visitor *v, const char *name,
 593                                  void *opaque, Error **errp)
 594{
 595    ARMCPU *ac = ARM_CPU(obj);
 596    AddressSpace *as;
 597    Error *local_err = NULL;
 598    char *path = NULL;
 599
 600    qemu_log("set mr_secure\n");
 601    visit_type_str(v, name, &path, &local_err);
 602
 603    if (!local_err && strcmp(path, "") != 0) {
 604        ac->mr_secure = MEMORY_REGION(object_resolve_link(obj, name, path,
 605                                      &local_err));
 606    }
 607
 608    if (local_err) {
 609        error_propagate(errp, local_err);
 610        return;
 611    }
 612
 613    object_ref(OBJECT(ac->mr_secure));
 614    as = g_malloc0(sizeof *as);
 615    address_space_init(as, ac->mr_secure, NULL);
 616    ac->as_secure = as;
 617}
 618#endif
 619
 620static inline void set_feature(CPUARMState *env, int feature)
 621{
 622    env->features |= 1ULL << feature;
 623}
 624
 625static inline void unset_feature(CPUARMState *env, int feature)
 626{
 627    env->features &= ~(1ULL << feature);
 628}
 629
 630static int
 631print_insn_thumb1(bfd_vma pc, disassemble_info *info)
 632{
 633  return print_insn_arm(pc | 1, info);
 634}
 635
 636static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
 637{
 638    ARMCPU *ac = ARM_CPU(cpu);
 639    CPUARMState *env = &ac->env;
 640    bool sctlr_b;
 641
 642    if (is_a64(env)) {
 643        /* We might not be compiled with the A64 disassembler
 644         * because it needs a C++ compiler. Leave print_insn
 645         * unset in this case to use the caller default behaviour.
 646         */
 647#if defined(CONFIG_ARM_A64_DIS)
 648        info->print_insn = print_insn_arm_a64;
 649#endif
 650        info->cap_arch = CS_ARCH_ARM64;
 651        info->cap_insn_unit = 4;
 652        info->cap_insn_split = 4;
 653    } else {
 654        int cap_mode;
 655        if (env->thumb) {
 656            info->print_insn = print_insn_thumb1;
 657            info->cap_insn_unit = 2;
 658            info->cap_insn_split = 4;
 659            cap_mode = CS_MODE_THUMB;
 660        } else {
 661            info->print_insn = print_insn_arm;
 662            info->cap_insn_unit = 4;
 663            info->cap_insn_split = 4;
 664            cap_mode = CS_MODE_ARM;
 665        }
 666        if (arm_feature(env, ARM_FEATURE_V8)) {
 667            cap_mode |= CS_MODE_V8;
 668        }
 669        if (arm_feature(env, ARM_FEATURE_M)) {
 670            cap_mode |= CS_MODE_MCLASS;
 671        }
 672        info->cap_arch = CS_ARCH_ARM;
 673        info->cap_mode = cap_mode;
 674    }
 675
 676    sctlr_b = arm_sctlr_b(env);
 677    if (bswap_code(sctlr_b)) {
 678#ifdef TARGET_WORDS_BIGENDIAN
 679        info->endian = BFD_ENDIAN_LITTLE;
 680#else
 681        info->endian = BFD_ENDIAN_BIG;
 682#endif
 683    }
 684    info->flags &= ~INSN_ARM_BE32;
 685#ifndef CONFIG_USER_ONLY
 686    if (sctlr_b) {
 687        info->flags |= INSN_ARM_BE32;
 688    }
 689#endif
 690}
 691
 692uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz)
 693{
 694    uint32_t Aff1 = idx / clustersz;
 695    uint32_t Aff0 = idx % clustersz;
 696    return (Aff1 << ARM_AFF1_SHIFT) | Aff0;
 697}
 698
 699static void arm_cpu_initfn(Object *obj)
 700{
 701    CPUState *cs = CPU(obj);
 702    ARMCPU *cpu = ARM_CPU(obj);
 703
 704    cs->env_ptr = &cpu->env;
 705    cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal,
 706                                         g_free, g_free);
 707
 708#ifndef CONFIG_USER_ONLY
 709    /* Our inbound IRQ and FIQ lines */
 710    if (kvm_enabled()) {
 711        /* VIRQ and VFIQ are unused with KVM but we add them to maintain
 712         * the same interface as non-KVM CPUs.
 713         */
 714        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
 715    } else {
 716        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
 717    }
 718
 719    qdev_init_gpio_in_named(DEVICE(cpu), arm_cpu_set_ncpuhalt, "ncpuhalt", 1);
 720    qdev_init_gpio_in_named(DEVICE(cpu), arm_cpu_set_vinithi, "vinithi", 1);
 721
 722    qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
 723                       ARRAY_SIZE(cpu->gt_timer_outputs));
 724
 725    qdev_init_gpio_out_named(DEVICE(cpu), &cpu->wfi, "wfi", 1);
 726
 727    qdev_init_gpio_out_named(DEVICE(cpu), &cpu->gicv3_maintenance_interrupt,
 728                             "gicv3-maintenance-interrupt", 1);
 729    qdev_init_gpio_out_named(DEVICE(cpu), &cpu->pmu_interrupt,
 730                             "pmu-interrupt", 1);
 731#endif
 732
 733    /* DTB consumers generally don't in fact care what the 'compatible'
 734     * string is, so always provide some string and trust that a hypothetical
 735     * picky DTB consumer will also provide a helpful error message.
 736     */
 737    cpu->dtb_compatible = "qemu,unknown";
 738    cpu->psci_version = 1; /* By default assume PSCI v0.1 */
 739    cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
 740
 741    if (tcg_enabled()) {
 742        cpu->psci_version = 2; /* TCG implements PSCI 0.2 */
 743    }
 744
 745#ifndef CONFIG_USER_ONLY
 746    object_property_add(obj, "mr-secure", "link<" TYPE_MEMORY_REGION ">",
 747                        NULL, /* FIXME: Implement the getter */
 748                        arm_cpu_set_mr_secure,
 749                        NULL, /* FIXME: Implement the cleanup */
 750                        NULL, &error_abort);
 751
 752    object_property_add_link(obj, "memattr_ns", TYPE_MEMORY_TRANSACTION_ATTR,
 753                             (Object **)&cpu->env.memattr_ns,
 754                             qdev_prop_allow_set_link_before_realize,
 755                             OBJ_PROP_LINK_UNREF_ON_RELEASE,
 756                             &error_abort);
 757
 758    object_property_add_link(obj, "memattr_s", TYPE_MEMORY_TRANSACTION_ATTR,
 759                             (Object **)&cpu->env.memattr_s,
 760                             qdev_prop_allow_set_link_before_realize,
 761                             OBJ_PROP_LINK_UNREF_ON_RELEASE,
 762                             &error_abort);
 763#endif
 764}
 765
 766static Property arm_cpu_reset_cbar_property =
 767            DEFINE_PROP_UINT64("reset-cbar", ARMCPU, reset_cbar, 0);
 768
 769static Property arm_cpu_reset_hivecs_property =
 770            DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);
 771
 772static void arm_cpu_get_rvbar(Object *obj, Visitor *v,
 773                              const char *name, void *opaque,
 774                              Error **errp)
 775{
 776    ARMCPU *cpu = ARM_CPU(obj);
 777    Error *local_err = NULL;
 778
 779    visit_type_uint64(v, name, &cpu->rvbar, &local_err);
 780    if (local_err) {
 781        error_propagate(errp, local_err);
 782    }
 783}
 784
 785static void arm_cpu_set_rvbar(Object *obj, Visitor *v,
 786                              const char *name, void *opaque,
 787                              Error **errp)
 788{
 789    ARMCPU *cpu = ARM_CPU(obj);
 790    Error *local_err = NULL;
 791
 792    visit_type_uint64(v, name, &cpu->rvbar, &local_err);
 793    if (local_err) {
 794        error_propagate(errp, local_err);
 795    }
 796}
 797
 798#ifndef CONFIG_USER_ONLY
 799static void arm_cpu_set_memattr_secure(Object *obj, Visitor *v,
 800                                          const char *name, void *opaque,
 801                                          Error **errp)
 802{
 803    ARMCPU *cpu = ARM_CPU(obj);
 804    bool secure;
 805    visit_type_bool(v, name, &secure,
 806                    errp);
 807    cpu->env.memattr[MEM_ATTR_NS].attrs.secure = secure;
 808}
 809#endif
 810
 811static Property arm_cpu_has_el2_property =
 812            DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);
 813
 814static Property arm_cpu_has_el3_property =
 815            DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true);
 816
 817static Property arm_cpu_cfgend_property =
 818            DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false);
 819
 820/* use property name "pmu" to match other archs and virt tools */
 821static Property arm_cpu_has_pmu_property =
 822            DEFINE_PROP_BOOL("pmu", ARMCPU, has_pmu, true);
 823
 824static Property arm_cpu_has_mpu_property =
 825            DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
 826
 827/* This is like DEFINE_PROP_UINT32 but it doesn't set the default value,
 828 * because the CPU initfn will have already set cpu->pmsav7_dregion to
 829 * the right value for that particular CPU type, and we don't want
 830 * to override that with an incorrect constant value.
 831 */
 832static Property arm_cpu_pmsav7_dregion_property =
 833            DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU,
 834                                           pmsav7_dregion,
 835                                           qdev_prop_uint32, uint32_t);
 836
 837static void arm_cpu_post_init(Object *obj)
 838{
 839    ARMCPU *cpu = ARM_CPU(obj);
 840
 841    /* M profile implies PMSA. We have to do this here rather than
 842     * in realize with the other feature-implication checks because
 843     * we look at the PMSA bit to see if we should add some properties.
 844     */
 845    if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
 846        set_feature(&cpu->env, ARM_FEATURE_PMSA);
 847    }
 848
 849    if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
 850        arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
 851        qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property,
 852                                 &error_abort);
 853    }
 854
 855    if (!arm_feature(&cpu->env, ARM_FEATURE_M)) {
 856        qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_hivecs_property,
 857                                 &error_abort);
 858    }
 859
 860    if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
 861        object_property_add(obj, "rvbar", "uint64",
 862                            arm_cpu_get_rvbar,
 863                            arm_cpu_set_rvbar,
 864                            NULL, NULL, &error_abort);
 865    }
 866
 867#ifndef CONFIG_USER_ONLY
 868    if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
 869        object_property_add(obj, "memattr-secure", "bool",
 870                            NULL, arm_cpu_set_memattr_secure,
 871                            NULL, NULL, &error_abort);
 872    }
 873#endif
 874
 875    if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
 876        /* Add the has_el3 state CPU property only if EL3 is allowed.  This will
 877         * prevent "has_el3" from existing on CPUs which cannot support EL3.
 878         */
 879        qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el3_property,
 880                                 &error_abort);
 881
 882#ifndef CONFIG_USER_ONLY
 883        object_property_add_link(obj, "secure-memory",
 884                                 TYPE_MEMORY_REGION,
 885                                 (Object **)&cpu->secure_memory,
 886                                 qdev_prop_allow_set_link_before_realize,
 887                                 OBJ_PROP_LINK_UNREF_ON_RELEASE,
 888                                 &error_abort);
 889#endif
 890    }
 891
 892    if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
 893        qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el2_property,
 894                                 &error_abort);
 895    }
 896
 897    if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
 898        qdev_property_add_static(DEVICE(obj), &arm_cpu_has_pmu_property,
 899                                 &error_abort);
 900    }
 901
 902    if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
 903        qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property,
 904                                 &error_abort);
 905        if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
 906            qdev_property_add_static(DEVICE(obj),
 907                                     &arm_cpu_pmsav7_dregion_property,
 908                                     &error_abort);
 909        }
 910    }
 911
 912    qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property,
 913                             &error_abort);
 914}
 915
 916static void arm_cpu_finalizefn(Object *obj)
 917{
 918    ARMCPU *cpu = ARM_CPU(obj);
 919    g_hash_table_destroy(cpu->cp_regs);
 920}
 921
 922static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
 923{
 924    CPUState *cs = CPU(dev);
 925    ARMCPU *cpu = ARM_CPU(dev);
 926    ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev);
 927    CPUARMState *env = &cpu->env;
 928    int pagebits;
 929    Error *local_err = NULL;
 930#ifndef CONFIG_USER_ONLY
 931    AddressSpace *as;
 932#endif
 933
 934    cpu_exec_realizefn(cs, &local_err);
 935    if (local_err != NULL) {
 936        error_propagate(errp, local_err);
 937        return;
 938    }
 939
 940    /* Use the default AS as the NS one.  */
 941    cpu->as_ns = cs->as;
 942    if (!cpu->as_secure) {
 943        cpu->as_secure = cs->as;
 944    }
 945
 946    /* Some features automatically imply others: */
 947    if (arm_feature(env, ARM_FEATURE_V8)) {
 948        set_feature(env, ARM_FEATURE_V7);
 949        set_feature(env, ARM_FEATURE_ARM_DIV);
 950        set_feature(env, ARM_FEATURE_LPAE);
 951    }
 952    if (arm_feature(env, ARM_FEATURE_V7)) {
 953        set_feature(env, ARM_FEATURE_VAPA);
 954        set_feature(env, ARM_FEATURE_THUMB2);
 955        set_feature(env, ARM_FEATURE_MPIDR);
 956        if (!arm_feature(env, ARM_FEATURE_M)) {
 957            set_feature(env, ARM_FEATURE_V6K);
 958        } else {
 959            set_feature(env, ARM_FEATURE_V6);
 960        }
 961
 962        /* Always define VBAR for V7 CPUs even if it doesn't exist in
 963         * non-EL3 configs. This is needed by some legacy boards.
 964         */
 965        set_feature(env, ARM_FEATURE_VBAR);
 966    }
 967    if (arm_feature(env, ARM_FEATURE_V6K)) {
 968        set_feature(env, ARM_FEATURE_V6);
 969        set_feature(env, ARM_FEATURE_MVFR);
 970    }
 971    if (arm_feature(env, ARM_FEATURE_V6)) {
 972        set_feature(env, ARM_FEATURE_V5);
 973        set_feature(env, ARM_FEATURE_JAZELLE);
 974        if (!arm_feature(env, ARM_FEATURE_M)) {
 975            set_feature(env, ARM_FEATURE_AUXCR);
 976        }
 977    }
 978    if (arm_feature(env, ARM_FEATURE_V5)) {
 979        set_feature(env, ARM_FEATURE_V4T);
 980    }
 981    if (arm_feature(env, ARM_FEATURE_M)) {
 982        set_feature(env, ARM_FEATURE_THUMB_DIV);
 983    }
 984    if (arm_feature(env, ARM_FEATURE_ARM_DIV)) {
 985        set_feature(env, ARM_FEATURE_THUMB_DIV);
 986    }
 987    if (arm_feature(env, ARM_FEATURE_VFP4)) {
 988        set_feature(env, ARM_FEATURE_VFP3);
 989        set_feature(env, ARM_FEATURE_VFP_FP16);
 990    }
 991    if (arm_feature(env, ARM_FEATURE_VFP3)) {
 992        set_feature(env, ARM_FEATURE_VFP);
 993    }
 994    if (arm_feature(env, ARM_FEATURE_LPAE)) {
 995        set_feature(env, ARM_FEATURE_V7MP);
 996        set_feature(env, ARM_FEATURE_PXN);
 997    }
 998    if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
 999        set_feature(env, ARM_FEATURE_CBAR);
1000    }
1001    if (arm_feature(env, ARM_FEATURE_THUMB2) &&
1002        !arm_feature(env, ARM_FEATURE_M)) {
1003        set_feature(env, ARM_FEATURE_THUMB_DSP);
1004    }
1005
1006    if (arm_feature(env, ARM_FEATURE_V7) &&
1007        !arm_feature(env, ARM_FEATURE_M) &&
1008        !arm_feature(env, ARM_FEATURE_PMSA)) {
1009        /* v7VMSA drops support for the old ARMv5 tiny pages, so we
1010         * can use 4K pages.
1011         */
1012        pagebits = 12;
1013    } else {
1014        /* For CPUs which might have tiny 1K pages, or which have an
1015         * MPU and might have small region sizes, stick with 1K pages.
1016         */
1017        pagebits = 10;
1018    }
1019    if (!set_preferred_target_page_bits(pagebits)) {
1020        /* This can only ever happen for hotplugging a CPU, or if
1021         * the board code incorrectly creates a CPU which it has
1022         * promised via minimum_page_size that it will not.
1023         */
1024        error_setg(errp, "This CPU requires a smaller page size than the "
1025                   "system is using");
1026        return;
1027    }
1028
1029    /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it.
1030     * We don't support setting cluster ID ([16..23]) (known as Aff2
1031     * in later ARM ARM versions), or any of the higher affinity level fields,
1032     * so these bits always RAZ.
1033     */
1034    if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) {
1035        cpu->mp_affinity = arm_cpu_mp_affinity(cs->cpu_index,
1036                                               ARM_DEFAULT_CPUS_PER_CLUSTER);
1037    }
1038
1039    if (cpu->reset_hivecs) {
1040            cpu->reset_sctlr |= (1 << 13);
1041    }
1042
1043    if (cpu->cfgend) {
1044        if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1045            cpu->reset_sctlr |= SCTLR_EE;
1046        } else {
1047            cpu->reset_sctlr |= SCTLR_B;
1048        }
1049    }
1050
1051    if (!cpu->has_el3) {
1052        /* If the has_el3 CPU property is disabled then we need to disable the
1053         * feature.
1054         */
1055        unset_feature(env, ARM_FEATURE_EL3);
1056
1057        /* Disable the security extension feature bits in the processor feature
1058         * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
1059         */
1060        cpu->id_pfr1 &= ~0xf0;
1061        cpu->id_aa64pfr0 &= ~0xf000;
1062    }
1063
1064    if (!cpu->has_el2) {
1065        unset_feature(env, ARM_FEATURE_EL2);
1066    }
1067
1068    if (!cpu->has_pmu) {
1069        unset_feature(env, ARM_FEATURE_PMU);
1070        cpu->id_aa64dfr0 &= ~0xf00;
1071    }
1072
1073    if (!arm_feature(env, ARM_FEATURE_EL2)) {
1074        /* Disable the hypervisor feature bits in the processor feature
1075         * registers if we don't have EL2. These are id_pfr1[15:12] and
1076         * id_aa64pfr0_el1[11:8].
1077         */
1078        cpu->id_aa64pfr0 &= ~0xf00;
1079        cpu->id_pfr1 &= ~0xf000;
1080    }
1081
1082    /* MPU can be configured out of a PMSA CPU either by setting has-mpu
1083     * to false or by setting pmsav7-dregion to 0.
1084     */
1085    if (!cpu->has_mpu) {
1086        cpu->pmsav7_dregion = 0;
1087    }
1088    if (cpu->pmsav7_dregion == 0) {
1089        cpu->has_mpu = false;
1090    }
1091
1092    if (arm_feature(env, ARM_FEATURE_PMSA) &&
1093        arm_feature(env, ARM_FEATURE_V7)) {
1094        uint32_t nr = cpu->pmsav7_dregion;
1095
1096        if (nr > 0xff) {
1097            error_setg(errp, "PMSAv7 MPU #regions invalid %" PRIu32, nr);
1098            return;
1099        }
1100
1101        if (nr) {
1102            if (arm_feature(env, ARM_FEATURE_V8)) {
1103                /* PMSAv8 */
1104                env->pmsav8.rbar[M_REG_NS] = g_new0(uint32_t, nr);
1105                env->pmsav8.rlar[M_REG_NS] = g_new0(uint32_t, nr);
1106                if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1107                    env->pmsav8.rbar[M_REG_S] = g_new0(uint32_t, nr);
1108                    env->pmsav8.rlar[M_REG_S] = g_new0(uint32_t, nr);
1109                }
1110            } else {
1111                env->pmsav7.drbar = g_new0(uint32_t, nr);
1112                env->pmsav7.drsr = g_new0(uint32_t, nr);
1113                env->pmsav7.dracr = g_new0(uint32_t, nr);
1114            }
1115        }
1116    }
1117
1118    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1119        uint32_t nr = cpu->sau_sregion;
1120
1121        if (nr > 0xff) {
1122            error_setg(errp, "v8M SAU #regions invalid %" PRIu32, nr);
1123            return;
1124        }
1125
1126        if (nr) {
1127            env->sau.rbar = g_new0(uint32_t, nr);
1128            env->sau.rlar = g_new0(uint32_t, nr);
1129        }
1130    }
1131
1132    if (arm_feature(env, ARM_FEATURE_EL3)) {
1133        set_feature(env, ARM_FEATURE_VBAR);
1134    }
1135
1136#ifndef CONFIG_USER_ONLY
1137    if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
1138        if (!cpu->gt_freq) {
1139            error_setg(errp, "gtimer frequency 0 is invalid");
1140            return;
1141        }
1142        arm_gt_compute_scale(cpu);
1143        if (!cpu->gt_scale) {
1144            error_setg(errp, "gtimer frequency cannot be greater"
1145                               " than QEMU_CLOCK_VIRTUAL");
1146            return;
1147        }
1148        cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL,
1149                                               1, arm_gt_ptimer_cb, cpu);
1150        cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL,
1151                                               1, arm_gt_vtimer_cb, cpu);
1152        cpu->gt_timer[GTIMER_HYP] = timer_new(QEMU_CLOCK_VIRTUAL,
1153                                               1, arm_gt_htimer_cb, cpu);
1154        cpu->gt_timer[GTIMER_SEC] = timer_new(QEMU_CLOCK_VIRTUAL,
1155                                               1, arm_gt_stimer_cb, cpu);
1156    }
1157#endif
1158    register_cp_regs_for_features(cpu);
1159    arm_cpu_register_gdb_regs_for_features(cpu);
1160
1161    init_cpreg_list(cpu);
1162
1163#ifndef CONFIG_USER_ONLY
1164/* Xilinx: We always want to ensure that two address spaces are created
1165 *         because we allow the secure bit to be overwritten from the outside
1166 *         and in future this could be run time configurable.
1167 */
1168#define CPU_NO_EL3_SEC_ENABLE 1
1169    if (cpu->has_el3 || CPU_NO_EL3_SEC_ENABLE) {
1170        cs->num_ases = 2;
1171    } else {
1172        cs->num_ases = 1;
1173    }
1174
1175    if (cpu->has_el3 || arm_feature(env, ARM_FEATURE_M_SECURITY) ||
1176        CPU_NO_EL3_SEC_ENABLE) {
1177        as = g_new0(AddressSpace, 1);
1178
1179        cs->num_ases = 2;
1180
1181        if (!cpu->secure_memory) {
1182            cpu->secure_memory = cs->memory;
1183        }
1184        address_space_init(as, cpu->secure_memory, "cpu-secure-memory");
1185        cpu_address_space_init(cs, as, ARMASIdx_S);
1186    } else {
1187        cs->num_ases = 1;
1188    }
1189    as = g_new0(AddressSpace, 1);
1190    address_space_init(as, cs->memory, "cpu-memory");
1191    cpu_address_space_init(cs, as, ARMASIdx_NS);
1192
1193    /* No core_count specified, default to smp_cpus. */
1194    if (cpu->core_count == -1) {
1195        cpu->core_count = smp_cpus;
1196    }
1197#endif
1198
1199    qemu_init_vcpu(cs);
1200    cpu_reset(cs);
1201
1202    acc->parent_realize(dev, errp);
1203}
1204
1205static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
1206{
1207    ObjectClass *oc;
1208    char *typename;
1209    char **cpuname;
1210
1211    cpuname = g_strsplit(cpu_model, ",", 1);
1212    typename = g_strdup_printf(ARM_CPU_TYPE_NAME("%s"), cpuname[0]);
1213    oc = object_class_by_name(typename);
1214    g_strfreev(cpuname);
1215    g_free(typename);
1216    if (!oc || !object_class_dynamic_cast(oc, TYPE_ARM_CPU) ||
1217        object_class_is_abstract(oc)) {
1218        return NULL;
1219    }
1220    return oc;
1221}
1222
1223/* CPU models. These are not needed for the AArch64 linux-user build. */
1224#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
1225
1226static void arm926_initfn(Object *obj)
1227{
1228    ARMCPU *cpu = ARM_CPU(obj);
1229
1230    cpu->dtb_compatible = "arm,arm926";
1231    set_feature(&cpu->env, ARM_FEATURE_V5);
1232    set_feature(&cpu->env, ARM_FEATURE_VFP);
1233    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1234    set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
1235    set_feature(&cpu->env, ARM_FEATURE_JAZELLE);
1236    cpu->midr = 0x41069265;
1237    cpu->reset_fpsid = 0x41011090;
1238    cpu->ctr = 0x1dd20d2;
1239    cpu->reset_sctlr = 0x00090078;
1240}
1241
1242static void arm946_initfn(Object *obj)
1243{
1244    ARMCPU *cpu = ARM_CPU(obj);
1245
1246    cpu->dtb_compatible = "arm,arm946";
1247    set_feature(&cpu->env, ARM_FEATURE_V5);
1248    set_feature(&cpu->env, ARM_FEATURE_PMSA);
1249    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1250    cpu->midr = 0x41059461;
1251    cpu->ctr = 0x0f004006;
1252    cpu->reset_sctlr = 0x00000078;
1253}
1254
1255static void arm1026_initfn(Object *obj)
1256{
1257    ARMCPU *cpu = ARM_CPU(obj);
1258
1259    cpu->dtb_compatible = "arm,arm1026";
1260    set_feature(&cpu->env, ARM_FEATURE_V5);
1261    set_feature(&cpu->env, ARM_FEATURE_VFP);
1262    set_feature(&cpu->env, ARM_FEATURE_AUXCR);
1263    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1264    set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
1265    set_feature(&cpu->env, ARM_FEATURE_JAZELLE);
1266    cpu->midr = 0x4106a262;
1267    cpu->reset_fpsid = 0x410110a0;
1268    cpu->ctr = 0x1dd20d2;
1269    cpu->reset_sctlr = 0x00090078;
1270    cpu->reset_auxcr = 1;
1271    {
1272        /* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */
1273        ARMCPRegInfo ifar = {
1274            .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
1275            .access = PL1_RW,
1276            .fieldoffset = offsetof(CPUARMState, cp15.ifar_ns),
1277            .resetvalue = 0
1278        };
1279        define_one_arm_cp_reg(cpu, &ifar);
1280    }
1281}
1282
1283static void arm1136_r2_initfn(Object *obj)
1284{
1285    ARMCPU *cpu = ARM_CPU(obj);
1286    /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
1287     * older core than plain "arm1136". In particular this does not
1288     * have the v6K features.
1289     * These ID register values are correct for 1136 but may be wrong
1290     * for 1136_r2 (in particular r0p2 does not actually implement most
1291     * of the ID registers).
1292     */
1293
1294    cpu->dtb_compatible = "arm,arm1136";
1295    set_feature(&cpu->env, ARM_FEATURE_V6);
1296    set_feature(&cpu->env, ARM_FEATURE_VFP);
1297    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1298    set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
1299    set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
1300    cpu->midr = 0x4107b362;
1301    cpu->reset_fpsid = 0x410120b4;
1302    cpu->mvfr0 = 0x11111111;
1303    cpu->mvfr1 = 0x00000000;
1304    cpu->ctr = 0x1dd20d2;
1305    cpu->reset_sctlr = 0x00050078;
1306    cpu->id_pfr0 = 0x111;
1307    cpu->id_pfr1 = 0x1;
1308    cpu->id_dfr0 = 0x2;
1309    cpu->id_afr0 = 0x3;
1310    cpu->id_mmfr0 = 0x01130003;
1311    cpu->id_mmfr1 = 0x10030302;
1312    cpu->id_mmfr2 = 0x01222110;
1313    cpu->id_isar0 = 0x00140011;
1314    cpu->id_isar1 = 0x12002111;
1315    cpu->id_isar2 = 0x11231111;
1316    cpu->id_isar3 = 0x01102131;
1317    cpu->id_isar4 = 0x141;
1318    cpu->reset_auxcr = 7;
1319}
1320
1321static void arm1136_initfn(Object *obj)
1322{
1323    ARMCPU *cpu = ARM_CPU(obj);
1324
1325    cpu->dtb_compatible = "arm,arm1136";
1326    set_feature(&cpu->env, ARM_FEATURE_V6K);
1327    set_feature(&cpu->env, ARM_FEATURE_V6);
1328    set_feature(&cpu->env, ARM_FEATURE_VFP);
1329    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1330    set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
1331    set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
1332    cpu->midr = 0x4117b363;
1333    cpu->reset_fpsid = 0x410120b4;
1334    cpu->mvfr0 = 0x11111111;
1335    cpu->mvfr1 = 0x00000000;
1336    cpu->ctr = 0x1dd20d2;
1337    cpu->reset_sctlr = 0x00050078;
1338    cpu->id_pfr0 = 0x111;
1339    cpu->id_pfr1 = 0x1;
1340    cpu->id_dfr0 = 0x2;
1341    cpu->id_afr0 = 0x3;
1342    cpu->id_mmfr0 = 0x01130003;
1343    cpu->id_mmfr1 = 0x10030302;
1344    cpu->id_mmfr2 = 0x01222110;
1345    cpu->id_isar0 = 0x00140011;
1346    cpu->id_isar1 = 0x12002111;
1347    cpu->id_isar2 = 0x11231111;
1348    cpu->id_isar3 = 0x01102131;
1349    cpu->id_isar4 = 0x141;
1350    cpu->reset_auxcr = 7;
1351}
1352
1353static void arm1176_initfn(Object *obj)
1354{
1355    ARMCPU *cpu = ARM_CPU(obj);
1356
1357    cpu->dtb_compatible = "arm,arm1176";
1358    set_feature(&cpu->env, ARM_FEATURE_V6K);
1359    set_feature(&cpu->env, ARM_FEATURE_VFP);
1360    set_feature(&cpu->env, ARM_FEATURE_VAPA);
1361    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1362    set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
1363    set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
1364    set_feature(&cpu->env, ARM_FEATURE_EL3);
1365    cpu->midr = 0x410fb767;
1366    cpu->reset_fpsid = 0x410120b5;
1367    cpu->mvfr0 = 0x11111111;
1368    cpu->mvfr1 = 0x00000000;
1369    cpu->ctr = 0x1dd20d2;
1370    cpu->reset_sctlr = 0x00050078;
1371    cpu->id_pfr0 = 0x111;
1372    cpu->id_pfr1 = 0x11;
1373    cpu->id_dfr0 = 0x33;
1374    cpu->id_afr0 = 0;
1375    cpu->id_mmfr0 = 0x01130003;
1376    cpu->id_mmfr1 = 0x10030302;
1377    cpu->id_mmfr2 = 0x01222100;
1378    cpu->id_isar0 = 0x0140011;
1379    cpu->id_isar1 = 0x12002111;
1380    cpu->id_isar2 = 0x11231121;
1381    cpu->id_isar3 = 0x01102131;
1382    cpu->id_isar4 = 0x01141;
1383    cpu->reset_auxcr = 7;
1384}
1385
1386static void arm11mpcore_initfn(Object *obj)
1387{
1388    ARMCPU *cpu = ARM_CPU(obj);
1389
1390    cpu->dtb_compatible = "arm,arm11mpcore";
1391    set_feature(&cpu->env, ARM_FEATURE_V6K);
1392    set_feature(&cpu->env, ARM_FEATURE_VFP);
1393    set_feature(&cpu->env, ARM_FEATURE_VAPA);
1394    set_feature(&cpu->env, ARM_FEATURE_MPIDR);
1395    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1396    cpu->midr = 0x410fb022;
1397    cpu->reset_fpsid = 0x410120b4;
1398    cpu->mvfr0 = 0x11111111;
1399    cpu->mvfr1 = 0x00000000;
1400    cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */
1401    cpu->id_pfr0 = 0x111;
1402    cpu->id_pfr1 = 0x1;
1403    cpu->id_dfr0 = 0;
1404    cpu->id_afr0 = 0x2;
1405    cpu->id_mmfr0 = 0x01100103;
1406    cpu->id_mmfr1 = 0x10020302;
1407    cpu->id_mmfr2 = 0x01222000;
1408    cpu->id_isar0 = 0x00100011;
1409    cpu->id_isar1 = 0x12002111;
1410    cpu->id_isar2 = 0x11221011;
1411    cpu->id_isar3 = 0x01102131;
1412    cpu->id_isar4 = 0x141;
1413    cpu->reset_auxcr = 1;
1414}
1415
1416static void cortex_m3_initfn(Object *obj)
1417{
1418    ARMCPU *cpu = ARM_CPU(obj);
1419    set_feature(&cpu->env, ARM_FEATURE_V7);
1420    set_feature(&cpu->env, ARM_FEATURE_M);
1421    cpu->midr = 0x410fc231;
1422    cpu->pmsav7_dregion = 8;
1423}
1424
1425static void cortex_m4_initfn(Object *obj)
1426{
1427    ARMCPU *cpu = ARM_CPU(obj);
1428
1429    set_feature(&cpu->env, ARM_FEATURE_V7);
1430    set_feature(&cpu->env, ARM_FEATURE_M);
1431    set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
1432    cpu->midr = 0x410fc240; /* r0p0 */
1433    cpu->pmsav7_dregion = 8;
1434}
1435
1436static void arm_v7m_class_init(ObjectClass *oc, void *data)
1437{
1438    CPUClass *cc = CPU_CLASS(oc);
1439
1440#ifndef CONFIG_USER_ONLY
1441    cc->do_interrupt = arm_v7m_cpu_do_interrupt;
1442#endif
1443
1444    cc->cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt;
1445}
1446
1447static const ARMCPRegInfo cortexr5_cp_reginfo[] = {
1448    /* Dummy the TCM region regs for the moment */
1449    { .name = "ATCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
1450      .access = PL1_RW, .type = ARM_CP_CONST },
1451    { .name = "BTCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
1452      .access = PL1_RW, .type = ARM_CP_CONST },
1453    { .name = "DCACHE_INVAL", .cp = 15, .opc1 = 0, .crn = 15, .crm = 5,
1454      .opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP },
1455    REGINFO_SENTINEL
1456};
1457
1458static void cortex_r4_initfn(Object *obj)
1459{
1460    ARMCPU *cpu = ARM_CPU(obj);
1461    set_feature(&cpu->env, ARM_FEATURE_V7);
1462    set_feature(&cpu->env, ARM_FEATURE_THUMB_DIV);
1463    set_feature(&cpu->env, ARM_FEATURE_PMSA);
1464    cpu->midr = 0x411FC144; /* r1p4 */
1465    cpu->id_pfr0 = 0x0131;
1466    cpu->id_pfr1 = 0x001;
1467    cpu->id_dfr0 = 0x010400;
1468    cpu->id_afr0 = 0x0;
1469    cpu->id_mmfr0 = 0x0210030;
1470    cpu->id_mmfr1 = 0x00000000;
1471    cpu->id_mmfr2 = 0x01200000;
1472    cpu->id_mmfr3 = 0x0211;
1473    cpu->id_isar0 = 0x1101111;
1474    cpu->id_isar1 = 0x13112111;
1475    cpu->id_isar2 = 0x21232131;
1476    cpu->id_isar3 = 0x01112131;
1477    cpu->id_isar4 = 0x0010142;
1478    cpu->id_isar5 = 0x0;
1479    cpu->mp_is_up = true;
1480}
1481
1482static void cortex_r5_initfn(Object *obj)
1483{
1484    ARMCPU *cpu = ARM_CPU(obj);
1485
1486    set_feature(&cpu->env, ARM_FEATURE_V7);
1487    set_feature(&cpu->env, ARM_FEATURE_THUMB_DIV);
1488    set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
1489    set_feature(&cpu->env, ARM_FEATURE_V7MP);
1490    set_feature(&cpu->env, ARM_FEATURE_PMSA);
1491    cpu->midr = 0x411fc153; /* r1p3 */
1492    cpu->id_pfr0 = 0x0131;
1493    cpu->id_pfr1 = 0x001;
1494    cpu->id_dfr0 = 0x010400;
1495    cpu->id_afr0 = 0x0;
1496    cpu->id_mmfr0 = 0x0210030;
1497    cpu->id_mmfr1 = 0x00000000;
1498    cpu->id_mmfr2 = 0x01200000;
1499    cpu->id_mmfr3 = 0x0211;
1500    cpu->id_isar0 = 0x2101111;
1501    cpu->id_isar1 = 0x13112111;
1502    cpu->id_isar2 = 0x21232141;
1503    cpu->id_isar3 = 0x01112131;
1504    cpu->id_isar4 = 0x0010142;
1505    cpu->id_isar5 = 0x0;
1506    cpu->mp_is_up = true;
1507    cpu->pmsav7_dregion = 16;
1508    define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
1509}
1510
1511static void cortex_r5f_initfn(Object *obj)
1512{
1513    ARMCPU *cpu = ARM_CPU(obj);
1514
1515    cortex_r5_initfn(obj);
1516    set_feature(&cpu->env, ARM_FEATURE_VFP3);
1517}
1518
1519static const ARMCPRegInfo cortexa8_cp_reginfo[] = {
1520    { .name = "L2LOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 0,
1521      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1522    { .name = "L2AUXCR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
1523      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1524    REGINFO_SENTINEL
1525};
1526
1527static void cortex_a8_initfn(Object *obj)
1528{
1529    ARMCPU *cpu = ARM_CPU(obj);
1530
1531    cpu->dtb_compatible = "arm,cortex-a8";
1532    set_feature(&cpu->env, ARM_FEATURE_V7);
1533    set_feature(&cpu->env, ARM_FEATURE_VFP3);
1534    set_feature(&cpu->env, ARM_FEATURE_NEON);
1535    set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
1536    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1537    set_feature(&cpu->env, ARM_FEATURE_EL3);
1538    cpu->midr = 0x410fc080;
1539    cpu->reset_fpsid = 0x410330c0;
1540    cpu->mvfr0 = 0x11110222;
1541    cpu->mvfr1 = 0x00011100;
1542    cpu->ctr = 0x82048004;
1543    cpu->reset_sctlr = 0x00c50078;
1544    cpu->id_pfr0 = 0x1031;
1545    cpu->id_pfr1 = 0x11;
1546    cpu->id_dfr0 = 0x400;
1547    cpu->id_afr0 = 0;
1548    cpu->id_mmfr0 = 0x31100003;
1549    cpu->id_mmfr1 = 0x20000000;
1550    cpu->id_mmfr2 = 0x01202000;
1551    cpu->id_mmfr3 = 0x11;
1552    cpu->id_isar0 = 0x00101111;
1553    cpu->id_isar1 = 0x12112111;
1554    cpu->id_isar2 = 0x21232031;
1555    cpu->id_isar3 = 0x11112131;
1556    cpu->id_isar4 = 0x00111142;
1557    cpu->dbgdidr = 0x15141000;
1558    cpu->clidr = (1 << 27) | (2 << 24) | 3;
1559    cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
1560    cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */
1561    cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */
1562    cpu->reset_auxcr = 2;
1563    define_arm_cp_regs(cpu, cortexa8_cp_reginfo);
1564}
1565
1566static const ARMCPRegInfo cortexa9_cp_reginfo[] = {
1567    /* power_control should be set to maximum latency. Again,
1568     * default to 0 and set by private hook
1569     */
1570    { .name = "A9_PWRCTL", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
1571      .access = PL1_RW, .resetvalue = 0,
1572      .fieldoffset = offsetof(CPUARMState, cp15.c15_power_control) },
1573    { .name = "A9_DIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 1,
1574      .access = PL1_RW, .resetvalue = 0,
1575      .fieldoffset = offsetof(CPUARMState, cp15.c15_diagnostic) },
1576    { .name = "A9_PWRDIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 2,
1577      .access = PL1_RW, .resetvalue = 0,
1578      .fieldoffset = offsetof(CPUARMState, cp15.c15_power_diagnostic) },
1579    { .name = "NEONBUSY", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
1580      .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
1581    /* TLB lockdown control */
1582    { .name = "TLB_LOCKR", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 2,
1583      .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
1584    { .name = "TLB_LOCKW", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 4,
1585      .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
1586    { .name = "TLB_VA", .cp = 15, .crn = 15, .crm = 5, .opc1 = 5, .opc2 = 2,
1587      .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
1588    { .name = "TLB_PA", .cp = 15, .crn = 15, .crm = 6, .opc1 = 5, .opc2 = 2,
1589      .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
1590    { .name = "TLB_ATTR", .cp = 15, .crn = 15, .crm = 7, .opc1 = 5, .opc2 = 2,
1591      .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
1592    REGINFO_SENTINEL
1593};
1594
1595static void cortex_a9_initfn(Object *obj)
1596{
1597    ARMCPU *cpu = ARM_CPU(obj);
1598
1599    cpu->dtb_compatible = "arm,cortex-a9";
1600    set_feature(&cpu->env, ARM_FEATURE_V7);
1601    set_feature(&cpu->env, ARM_FEATURE_VFP3);
1602    set_feature(&cpu->env, ARM_FEATURE_VFP_FP16);
1603    set_feature(&cpu->env, ARM_FEATURE_NEON);
1604    set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
1605    set_feature(&cpu->env, ARM_FEATURE_EL3);
1606    /* Note that A9 supports the MP extensions even for
1607     * A9UP and single-core A9MP (which are both different
1608     * and valid configurations; we don't model A9UP).
1609     */
1610    set_feature(&cpu->env, ARM_FEATURE_V7MP);
1611    set_feature(&cpu->env, ARM_FEATURE_CBAR);
1612    cpu->midr = 0x410fc090;
1613    cpu->reset_fpsid = 0x41033090;
1614    cpu->mvfr0 = 0x11110222;
1615    cpu->mvfr1 = 0x01111111;
1616    cpu->ctr = 0x80038003;
1617    cpu->reset_sctlr = 0x00c50078;
1618    cpu->id_pfr0 = 0x1031;
1619    cpu->id_pfr1 = 0x11;
1620    cpu->id_dfr0 = 0x000;
1621    cpu->id_afr0 = 0;
1622    cpu->id_mmfr0 = 0x00100103;
1623    cpu->id_mmfr1 = 0x20000000;
1624    cpu->id_mmfr2 = 0x01230000;
1625    cpu->id_mmfr3 = 0x00002111;
1626    cpu->id_isar0 = 0x00101111;
1627    cpu->id_isar1 = 0x13112111;
1628    cpu->id_isar2 = 0x21232041;
1629    cpu->id_isar3 = 0x11112131;
1630    cpu->id_isar4 = 0x00111142;
1631    cpu->dbgdidr = 0x35141000;
1632    cpu->clidr = (1 << 27) | (1 << 24) | 3;
1633    cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
1634    cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */
1635    define_arm_cp_regs(cpu, cortexa9_cp_reginfo);
1636}
1637
1638#ifndef CONFIG_USER_ONLY
1639static uint64_t a15_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1640{
1641    /* Linux wants the number of processors from here.
1642     * Might as well set the interrupt-controller bit too.
1643     */
1644    return ((smp_cpus - 1) << 24) | (1 << 23);
1645}
1646#endif
1647
1648static const ARMCPRegInfo cortexa15_cp_reginfo[] = {
1649#ifndef CONFIG_USER_ONLY
1650    { .name = "L2CTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
1651      .access = PL1_RW, .resetvalue = 0, .readfn = a15_l2ctlr_read,
1652      .writefn = arm_cp_write_ignore, },
1653#endif
1654    { .name = "L2ECTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 3,
1655      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1656    REGINFO_SENTINEL
1657};
1658
1659static void cortex_a7_initfn(Object *obj)
1660{
1661    ARMCPU *cpu = ARM_CPU(obj);
1662
1663    cpu->dtb_compatible = "arm,cortex-a7";
1664    set_feature(&cpu->env, ARM_FEATURE_V7);
1665    set_feature(&cpu->env, ARM_FEATURE_VFP4);
1666    set_feature(&cpu->env, ARM_FEATURE_NEON);
1667    set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
1668    set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
1669    set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
1670    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1671    set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
1672    set_feature(&cpu->env, ARM_FEATURE_LPAE);
1673    set_feature(&cpu->env, ARM_FEATURE_EL3);
1674    cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7;
1675    cpu->midr = 0x410fc075;
1676    cpu->reset_fpsid = 0x41023075;
1677    cpu->mvfr0 = 0x10110222;
1678    cpu->mvfr1 = 0x11111111;
1679    cpu->ctr = 0x84448003;
1680    cpu->reset_sctlr = 0x00c50078;
1681    cpu->id_pfr0 = 0x00001131;
1682    cpu->id_pfr1 = 0x00011011;
1683    cpu->id_dfr0 = 0x02010555;
1684    cpu->pmceid0 = 0x00000000;
1685    cpu->pmceid1 = 0x00000000;
1686    cpu->id_afr0 = 0x00000000;
1687    cpu->id_mmfr0 = 0x10101105;
1688    cpu->id_mmfr1 = 0x40000000;
1689    cpu->id_mmfr2 = 0x01240000;
1690    cpu->id_mmfr3 = 0x02102211;
1691    cpu->id_isar0 = 0x01101110;
1692    cpu->id_isar1 = 0x13112111;
1693    cpu->id_isar2 = 0x21232041;
1694    cpu->id_isar3 = 0x11112131;
1695    cpu->id_isar4 = 0x10011142;
1696    cpu->dbgdidr = 0x3515f005;
1697    cpu->clidr = 0x0a200023;
1698    cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
1699    cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
1700    cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
1701    define_arm_cp_regs(cpu, cortexa15_cp_reginfo); /* Same as A15 */
1702}
1703
1704static void cortex_a15_initfn(Object *obj)
1705{
1706    ARMCPU *cpu = ARM_CPU(obj);
1707
1708    cpu->dtb_compatible = "arm,cortex-a15";
1709    set_feature(&cpu->env, ARM_FEATURE_V7);
1710    set_feature(&cpu->env, ARM_FEATURE_VFP4);
1711    set_feature(&cpu->env, ARM_FEATURE_NEON);
1712    set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
1713    set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
1714    set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
1715    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1716    set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
1717    set_feature(&cpu->env, ARM_FEATURE_LPAE);
1718    set_feature(&cpu->env, ARM_FEATURE_EL3);
1719    cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15;
1720    cpu->midr = 0x412fc0f1;
1721    cpu->reset_fpsid = 0x410430f0;
1722    cpu->mvfr0 = 0x10110222;
1723    cpu->mvfr1 = 0x11111111;
1724    cpu->ctr = 0x8444c004;
1725    cpu->reset_sctlr = 0x00c50078;
1726    cpu->id_pfr0 = 0x00001131;
1727    cpu->id_pfr1 = 0x00011011;
1728    cpu->id_dfr0 = 0x02010555;
1729    cpu->pmceid0 = 0x0000000;
1730    cpu->pmceid1 = 0x00000000;
1731    cpu->id_afr0 = 0x00000000;
1732    cpu->id_mmfr0 = 0x10201105;
1733    cpu->id_mmfr1 = 0x20000000;
1734    cpu->id_mmfr2 = 0x01240000;
1735    cpu->id_mmfr3 = 0x02102211;
1736    cpu->id_isar0 = 0x02101110;
1737    cpu->id_isar1 = 0x13112111;
1738    cpu->id_isar2 = 0x21232041;
1739    cpu->id_isar3 = 0x11112131;
1740    cpu->id_isar4 = 0x10011142;
1741    cpu->dbgdidr = 0x3515f021;
1742    cpu->clidr = 0x0a200023;
1743    cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
1744    cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
1745    cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
1746    define_arm_cp_regs(cpu, cortexa15_cp_reginfo);
1747}
1748
1749static void ti925t_initfn(Object *obj)
1750{
1751    ARMCPU *cpu = ARM_CPU(obj);
1752    set_feature(&cpu->env, ARM_FEATURE_V4T);
1753    set_feature(&cpu->env, ARM_FEATURE_OMAPCP);
1754    cpu->midr = ARM_CPUID_TI925T;
1755    cpu->ctr = 0x5109149;
1756    cpu->reset_sctlr = 0x00000070;
1757}
1758
1759static void sa1100_initfn(Object *obj)
1760{
1761    ARMCPU *cpu = ARM_CPU(obj);
1762
1763    cpu->dtb_compatible = "intel,sa1100";
1764    set_feature(&cpu->env, ARM_FEATURE_STRONGARM);
1765    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1766    cpu->midr = 0x4401A11B;
1767    cpu->reset_sctlr = 0x00000070;
1768}
1769
1770static void sa1110_initfn(Object *obj)
1771{
1772    ARMCPU *cpu = ARM_CPU(obj);
1773    set_feature(&cpu->env, ARM_FEATURE_STRONGARM);
1774    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1775    cpu->midr = 0x6901B119;
1776    cpu->reset_sctlr = 0x00000070;
1777}
1778
1779static void pxa250_initfn(Object *obj)
1780{
1781    ARMCPU *cpu = ARM_CPU(obj);
1782
1783    cpu->dtb_compatible = "marvell,xscale";
1784    set_feature(&cpu->env, ARM_FEATURE_V5);
1785    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1786    cpu->midr = 0x69052100;
1787    cpu->ctr = 0xd172172;
1788    cpu->reset_sctlr = 0x00000078;
1789}
1790
1791static void pxa255_initfn(Object *obj)
1792{
1793    ARMCPU *cpu = ARM_CPU(obj);
1794
1795    cpu->dtb_compatible = "marvell,xscale";
1796    set_feature(&cpu->env, ARM_FEATURE_V5);
1797    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1798    cpu->midr = 0x69052d00;
1799    cpu->ctr = 0xd172172;
1800    cpu->reset_sctlr = 0x00000078;
1801}
1802
1803static void pxa260_initfn(Object *obj)
1804{
1805    ARMCPU *cpu = ARM_CPU(obj);
1806
1807    cpu->dtb_compatible = "marvell,xscale";
1808    set_feature(&cpu->env, ARM_FEATURE_V5);
1809    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1810    cpu->midr = 0x69052903;
1811    cpu->ctr = 0xd172172;
1812    cpu->reset_sctlr = 0x00000078;
1813}
1814
1815static void pxa261_initfn(Object *obj)
1816{
1817    ARMCPU *cpu = ARM_CPU(obj);
1818
1819    cpu->dtb_compatible = "marvell,xscale";
1820    set_feature(&cpu->env, ARM_FEATURE_V5);
1821    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1822    cpu->midr = 0x69052d05;
1823    cpu->ctr = 0xd172172;
1824    cpu->reset_sctlr = 0x00000078;
1825}
1826
1827static void pxa262_initfn(Object *obj)
1828{
1829    ARMCPU *cpu = ARM_CPU(obj);
1830
1831    cpu->dtb_compatible = "marvell,xscale";
1832    set_feature(&cpu->env, ARM_FEATURE_V5);
1833    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1834    cpu->midr = 0x69052d06;
1835    cpu->ctr = 0xd172172;
1836    cpu->reset_sctlr = 0x00000078;
1837}
1838
1839static void pxa270a0_initfn(Object *obj)
1840{
1841    ARMCPU *cpu = ARM_CPU(obj);
1842
1843    cpu->dtb_compatible = "marvell,xscale";
1844    set_feature(&cpu->env, ARM_FEATURE_V5);
1845    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1846    set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1847    cpu->midr = 0x69054110;
1848    cpu->ctr = 0xd172172;
1849    cpu->reset_sctlr = 0x00000078;
1850}
1851
1852static void pxa270a1_initfn(Object *obj)
1853{
1854    ARMCPU *cpu = ARM_CPU(obj);
1855
1856    cpu->dtb_compatible = "marvell,xscale";
1857    set_feature(&cpu->env, ARM_FEATURE_V5);
1858    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1859    set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1860    cpu->midr = 0x69054111;
1861    cpu->ctr = 0xd172172;
1862    cpu->reset_sctlr = 0x00000078;
1863}
1864
1865static void pxa270b0_initfn(Object *obj)
1866{
1867    ARMCPU *cpu = ARM_CPU(obj);
1868
1869    cpu->dtb_compatible = "marvell,xscale";
1870    set_feature(&cpu->env, ARM_FEATURE_V5);
1871    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1872    set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1873    cpu->midr = 0x69054112;
1874    cpu->ctr = 0xd172172;
1875    cpu->reset_sctlr = 0x00000078;
1876}
1877
1878static void pxa270b1_initfn(Object *obj)
1879{
1880    ARMCPU *cpu = ARM_CPU(obj);
1881
1882    cpu->dtb_compatible = "marvell,xscale";
1883    set_feature(&cpu->env, ARM_FEATURE_V5);
1884    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1885    set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1886    cpu->midr = 0x69054113;
1887    cpu->ctr = 0xd172172;
1888    cpu->reset_sctlr = 0x00000078;
1889}
1890
1891static void pxa270c0_initfn(Object *obj)
1892{
1893    ARMCPU *cpu = ARM_CPU(obj);
1894
1895    cpu->dtb_compatible = "marvell,xscale";
1896    set_feature(&cpu->env, ARM_FEATURE_V5);
1897    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1898    set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1899    cpu->midr = 0x69054114;
1900    cpu->ctr = 0xd172172;
1901    cpu->reset_sctlr = 0x00000078;
1902}
1903
1904static void pxa270c5_initfn(Object *obj)
1905{
1906    ARMCPU *cpu = ARM_CPU(obj);
1907
1908    cpu->dtb_compatible = "marvell,xscale";
1909    set_feature(&cpu->env, ARM_FEATURE_V5);
1910    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1911    set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1912    cpu->midr = 0x69054117;
1913    cpu->ctr = 0xd172172;
1914    cpu->reset_sctlr = 0x00000078;
1915}
1916
1917#ifdef CONFIG_USER_ONLY
1918static void arm_any_initfn(Object *obj)
1919{
1920    ARMCPU *cpu = ARM_CPU(obj);
1921    set_feature(&cpu->env, ARM_FEATURE_V8);
1922    set_feature(&cpu->env, ARM_FEATURE_VFP4);
1923    set_feature(&cpu->env, ARM_FEATURE_NEON);
1924    set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
1925    set_feature(&cpu->env, ARM_FEATURE_V8_AES);
1926    set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
1927    set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
1928    set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
1929    set_feature(&cpu->env, ARM_FEATURE_CRC);
1930    cpu->midr = 0xffffffff;
1931}
1932#endif
1933
1934#endif /* !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) */
1935
1936typedef struct ARMCPUInfo {
1937    const char *name;
1938    void (*initfn)(Object *obj);
1939    void (*class_init)(ObjectClass *oc, void *data);
1940} ARMCPUInfo;
1941
1942static const ARMCPUInfo arm_cpus[] = {
1943#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
1944    { .name = "arm926",      .initfn = arm926_initfn },
1945    { .name = "arm946",      .initfn = arm946_initfn },
1946    { .name = "arm1026",     .initfn = arm1026_initfn },
1947    /* What QEMU calls "arm1136-r2" is actually the 1136 r0p2, i.e. an
1948     * older core than plain "arm1136". In particular this does not
1949     * have the v6K features.
1950     */
1951    { .name = "arm1136-r2",  .initfn = arm1136_r2_initfn },
1952    { .name = "arm1136",     .initfn = arm1136_initfn },
1953    { .name = "arm1176",     .initfn = arm1176_initfn },
1954    { .name = "arm11mpcore", .initfn = arm11mpcore_initfn },
1955    { .name = "cortex-m3",   .initfn = cortex_m3_initfn,
1956                             .class_init = arm_v7m_class_init },
1957    { .name = "cortex-m4",   .initfn = cortex_m4_initfn,
1958                             .class_init = arm_v7m_class_init },
1959    { .name = "cortex-r4",   .initfn = cortex_r4_initfn },
1960    { .name = "cortex-r5",   .initfn = cortex_r5_initfn },
1961    { .name = "cortex-r5f",  .initfn = cortex_r5f_initfn },
1962    { .name = "cortex-a7",   .initfn = cortex_a7_initfn },
1963    { .name = "cortex-a8",   .initfn = cortex_a8_initfn },
1964    { .name = "cortex-a9",   .initfn = cortex_a9_initfn },
1965    { .name = "cortex-a15",  .initfn = cortex_a15_initfn },
1966    { .name = "ti925t",      .initfn = ti925t_initfn },
1967    { .name = "sa1100",      .initfn = sa1100_initfn },
1968    { .name = "sa1110",      .initfn = sa1110_initfn },
1969    { .name = "pxa250",      .initfn = pxa250_initfn },
1970    { .name = "pxa255",      .initfn = pxa255_initfn },
1971    { .name = "pxa260",      .initfn = pxa260_initfn },
1972    { .name = "pxa261",      .initfn = pxa261_initfn },
1973    { .name = "pxa262",      .initfn = pxa262_initfn },
1974    /* "pxa270" is an alias for "pxa270-a0" */
1975    { .name = "pxa270",      .initfn = pxa270a0_initfn },
1976    { .name = "pxa270-a0",   .initfn = pxa270a0_initfn },
1977    { .name = "pxa270-a1",   .initfn = pxa270a1_initfn },
1978    { .name = "pxa270-b0",   .initfn = pxa270b0_initfn },
1979    { .name = "pxa270-b1",   .initfn = pxa270b1_initfn },
1980    { .name = "pxa270-c0",   .initfn = pxa270c0_initfn },
1981    { .name = "pxa270-c5",   .initfn = pxa270c5_initfn },
1982#ifdef CONFIG_USER_ONLY
1983    { .name = "any",         .initfn = arm_any_initfn },
1984#endif
1985#endif
1986    { .name = NULL }
1987};
1988
1989static Property arm_cpu_properties[] = {
1990    DEFINE_PROP_BOOL("start-powered-off", ARMCPU, start_powered_off, false),
1991    DEFINE_PROP_UINT32("psci-conduit", ARMCPU, psci_conduit, 0),
1992    DEFINE_PROP_UINT32("midr", ARMCPU, midr, 0),
1993    DEFINE_PROP_UINT32("ctr", ARMCPU, ctr, 0),
1994    DEFINE_PROP_UINT32("clidr", ARMCPU, clidr, 0),
1995    DEFINE_PROP_UINT32("id_pfr0", ARMCPU, id_pfr0, 0),
1996    DEFINE_PROP_UINT32("id_pfr1", ARMCPU, id_pfr1, 0),
1997    DEFINE_PROP_UINT32("ccsidr0", ARMCPU, ccsidr[0], 0),
1998    DEFINE_PROP_UINT32("ccsidr1", ARMCPU, ccsidr[1], 0),
1999    DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
2000                        mp_affinity, ARM64_AFFINITY_INVALID),
2001    DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
2002    DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1),
2003    DEFINE_PROP_UINT64("generic-timer-frequency", ARMCPU, gt_freq, 62500000),
2004    DEFINE_PROP_END_OF_LIST()
2005};
2006
2007static void set_debug_context(CPUState *cs, unsigned int ctx)
2008{
2009    ARMCPU *cpu = ARM_CPU(cs);
2010    switch (ctx) {
2011    case ARM_DEBUG_CURRENT_EL:
2012        cpu->env.debug_ctx = DEBUG_CURRENT_EL;
2013        break;
2014
2015    case ARM_DEBUG_PHYS:
2016        cpu->env.debug_ctx = DEBUG_PHYS;
2017        break;
2018    }
2019}
2020
2021static void arm_cpu_pwr_cntrl(void *opaque, int n, int level)
2022{
2023    DeviceClass *dc_parent = DEVICE_CLASS(ARM_CPU_PARENT_CLASS);
2024    ARMCPU *cpu = ARM_CPU(opaque);
2025
2026    cpu->power_state = level ? PSCI_ON : PSCI_OFF;
2027    dc_parent->pwr_cntrl(opaque, n, level);
2028}
2029
2030#ifdef CONFIG_USER_ONLY
2031static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
2032                                    int mmu_idx)
2033{
2034    ARMCPU *cpu = ARM_CPU(cs);
2035    CPUARMState *env = &cpu->env;
2036
2037    env->exception.vaddress = address;
2038    if (rw == 2) {
2039        cs->exception_index = EXCP_PREFETCH_ABORT;
2040    } else {
2041        cs->exception_index = EXCP_DATA_ABORT;
2042    }
2043    return 1;
2044}
2045#endif
2046
2047static gchar *arm_gdb_arch_name(CPUState *cs)
2048{
2049    ARMCPU *cpu = ARM_CPU(cs);
2050    CPUARMState *env = &cpu->env;
2051
2052    if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2053        return g_strdup("iwmmxt");
2054    }
2055    return g_strdup("arm");
2056}
2057
2058static void arm_cpu_class_init(ObjectClass *oc, void *data)
2059{
2060    ARMCPUClass *acc = ARM_CPU_CLASS(oc);
2061    CPUClass *cc = CPU_CLASS(acc);
2062    DeviceClass *dc = DEVICE_CLASS(oc);
2063
2064    acc->parent_realize = dc->realize;
2065    dc->realize = arm_cpu_realizefn;
2066    dc->props = arm_cpu_properties;
2067    dc->pwr_cntrl = arm_cpu_pwr_cntrl;
2068
2069    acc->parent_reset = cc->reset;
2070    cc->reset = arm_cpu_reset;
2071
2072    cc->class_by_name = arm_cpu_class_by_name;
2073    cc->has_work = arm_cpu_has_work;
2074    cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
2075    cc->dump_state = arm_cpu_dump_state;
2076    cc->set_pc = arm_cpu_set_pc;
2077    cc->get_pc = arm_cpu_get_pc;
2078    cc->debug_contexts = arm_debug_ctx;
2079    cc->set_debug_context = set_debug_context;
2080    cc->gdb_read_register = arm_cpu_gdb_read_register;
2081    cc->gdb_write_register = arm_cpu_gdb_write_register;
2082#ifdef CONFIG_USER_ONLY
2083    cc->handle_mmu_fault = arm_cpu_handle_mmu_fault;
2084#else
2085    dc->rst_cntrl = cpu_reset_gpio;
2086    cc->do_interrupt = arm_cpu_do_interrupt;
2087    cc->do_unaligned_access = arm_cpu_do_unaligned_access;
2088    cc->do_transaction_failed = arm_cpu_do_transaction_failed;
2089    cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
2090    cc->asidx_from_attrs = arm_asidx_from_attrs;
2091    cc->vmsd = &vmstate_arm_cpu;
2092    cc->virtio_is_big_endian = arm_cpu_virtio_is_big_endian;
2093    cc->write_elf64_note = arm_cpu_write_elf64_note;
2094    cc->write_elf32_note = arm_cpu_write_elf32_note;
2095#endif
2096    cc->gdb_num_core_regs = 32;
2097    cc->gdb_core_xml_file = "arm-core.xml";
2098    cc->gdb_arch_name = arm_gdb_arch_name;
2099    cc->gdb_stop_before_watchpoint = true;
2100    cc->debug_excp_handler = arm_debug_excp_handler;
2101    cc->debug_check_watchpoint = arm_debug_check_watchpoint;
2102#if !defined(CONFIG_USER_ONLY)
2103    cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
2104#endif
2105
2106    cc->disas_set_info = arm_disas_set_info;
2107#ifdef CONFIG_TCG
2108    cc->tcg_initialize = arm_translate_init;
2109#endif
2110}
2111
2112static void cpu_register(const ARMCPUInfo *info)
2113{
2114    TypeInfo type_info = {
2115        .parent = TYPE_ARM_CPU,
2116        .instance_size = sizeof(ARMCPU),
2117        .instance_init = info->initfn,
2118        .class_size = sizeof(ARMCPUClass),
2119        .class_init = info->class_init,
2120    };
2121
2122    type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
2123    type_register(&type_info);
2124    g_free((void *)type_info.name);
2125}
2126
2127static const TypeInfo arm_cpu_type_info = {
2128    .name = TYPE_ARM_CPU,
2129    .parent = TYPE_CPU,
2130    .instance_size = sizeof(ARMCPU),
2131    .instance_init = arm_cpu_initfn,
2132    .instance_post_init = arm_cpu_post_init,
2133    .instance_finalize = arm_cpu_finalizefn,
2134    .abstract = true,
2135    .class_size = sizeof(ARMCPUClass),
2136    .class_init = arm_cpu_class_init,
2137};
2138
2139static void arm_cpu_register_types(void)
2140{
2141    const ARMCPUInfo *info = arm_cpus;
2142
2143    type_register_static(&arm_cpu_type_info);
2144
2145    while (info->name) {
2146        cpu_register(info);
2147        info++;
2148    }
2149}
2150
2151type_init(arm_cpu_register_types)
2152
2153#ifndef CONFIG_USER_ONLY
2154
2155static int armv8_timer_fdt_init(char *node_path, FDTMachineInfo *fdti,
2156                                void *priv)
2157{
2158    CPUState *cpu;
2159    bool map_mode = false;
2160    qemu_irq *sec_irqs = fdt_get_irq(fdti, node_path, 0, &map_mode);
2161    qemu_irq *ns_irqs = fdt_get_irq(fdti, node_path, 1, &map_mode);
2162    qemu_irq *v_irqs = fdt_get_irq(fdti, node_path, 2, &map_mode);
2163    qemu_irq *h_irqs = fdt_get_irq(fdti, node_path, 3, &map_mode);
2164
2165    assert(!map_mode); /* not supported for PPI */
2166
2167    for (cpu = first_cpu; cpu; cpu = CPU_NEXT(cpu)) {
2168        ARMCPU *acpu = ARM_CPU(cpu);
2169
2170        if (!arm_feature(&acpu->env, ARM_FEATURE_GENERIC_TIMER)) {
2171            continue;
2172        }
2173        assert(*sec_irqs);
2174        assert(*ns_irqs);
2175        assert(*v_irqs);
2176        assert(*h_irqs);
2177        qdev_connect_gpio_out(DEVICE(acpu), 0, *ns_irqs++);
2178        qdev_connect_gpio_out(DEVICE(acpu), 1, *v_irqs++);
2179        qdev_connect_gpio_out(DEVICE(acpu), 2, *h_irqs++);
2180        qdev_connect_gpio_out(DEVICE(acpu), 3, *sec_irqs++);
2181    }
2182
2183    return 0;
2184}
2185
2186fdt_register_compatibility_n(armv8_timer_fdt_init,
2187                             "compatible:arm,armv8-timer", 13);
2188
2189#endif
2190
2191static const TypeInfo fdt_qom_aliases [] = {
2192    {   .name = "arm.cortex-a9",            .parent = "cortex-a9-arm-cpu"  },
2193};
2194
2195static void fdt_generic_register_types(void)
2196{
2197    int i;
2198
2199    for (i = 0; i < ARRAY_SIZE(fdt_qom_aliases); ++i) {
2200        type_register_static(&fdt_qom_aliases[i]);
2201    }
2202}
2203
2204type_init(fdt_generic_register_types)
2205