qemu/hw/intc/armv7m_nvic.c
<<
>>
Prefs
   1/*
   2 * ARM Nested Vectored Interrupt Controller
   3 *
   4 * Copyright (c) 2006-2007 CodeSourcery.
   5 * Written by Paul Brook
   6 *
   7 * This code is licensed under the GPL.
   8 *
   9 * The ARMv7M System controller is fairly tightly tied in with the
  10 * NVIC.  Much of that is also implemented here.
  11 */
  12
  13#include "qemu/osdep.h"
  14#include "qapi/error.h"
  15#include "qemu-common.h"
  16#include "cpu.h"
  17#include "hw/sysbus.h"
  18#include "qemu/timer.h"
  19#include "hw/arm/arm.h"
  20#include "hw/intc/armv7m_nvic.h"
  21#include "target/arm/cpu.h"
  22#include "exec/exec-all.h"
  23#include "qemu/log.h"
  24#include "trace.h"
  25
  26/* IRQ number counting:
  27 *
  28 * the num-irq property counts the number of external IRQ lines
  29 *
  30 * NVICState::num_irq counts the total number of exceptions
  31 * (external IRQs, the 15 internal exceptions including reset,
  32 * and one for the unused exception number 0).
  33 *
  34 * NVIC_MAX_IRQ is the highest permitted number of external IRQ lines.
  35 *
  36 * NVIC_MAX_VECTORS is the highest permitted number of exceptions.
  37 *
  38 * Iterating through all exceptions should typically be done with
  39 * for (i = 1; i < s->num_irq; i++) to avoid the unused slot 0.
  40 *
  41 * The external qemu_irq lines are the NVIC's external IRQ lines,
  42 * so line 0 is exception 16.
  43 *
  44 * In the terminology of the architecture manual, "interrupts" are
  45 * a subcategory of exception referring to the external interrupts
  46 * (which are exception numbers NVIC_FIRST_IRQ and upward).
  47 * For historical reasons QEMU tends to use "interrupt" and
  48 * "exception" more or less interchangeably.
  49 */
  50#define NVIC_FIRST_IRQ NVIC_INTERNAL_VECTORS
  51#define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ)
  52
  53/* Effective running priority of the CPU when no exception is active
  54 * (higher than the highest possible priority value)
  55 */
  56#define NVIC_NOEXC_PRIO 0x100
  57/* Maximum priority of non-secure exceptions when AIRCR.PRIS is set */
  58#define NVIC_NS_PRIO_LIMIT 0x80
  59
  60static const uint8_t nvic_id[] = {
  61    0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1
  62};
  63
  64static int nvic_pending_prio(NVICState *s)
  65{
  66    /* return the group priority of the current pending interrupt,
  67     * or NVIC_NOEXC_PRIO if no interrupt is pending
  68     */
  69    return s->vectpending_prio;
  70}
  71
  72/* Return the value of the ISCR RETTOBASE bit:
  73 * 1 if there is exactly one active exception
  74 * 0 if there is more than one active exception
  75 * UNKNOWN if there are no active exceptions (we choose 1,
  76 * which matches the choice Cortex-M3 is documented as making).
  77 *
  78 * NB: some versions of the documentation talk about this
  79 * counting "active exceptions other than the one shown by IPSR";
  80 * this is only different in the obscure corner case where guest
  81 * code has manually deactivated an exception and is about
  82 * to fail an exception-return integrity check. The definition
  83 * above is the one from the v8M ARM ARM and is also in line
  84 * with the behaviour documented for the Cortex-M3.
  85 */
  86static bool nvic_rettobase(NVICState *s)
  87{
  88    int irq, nhand = 0;
  89    bool check_sec = arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
  90
  91    for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) {
  92        if (s->vectors[irq].active ||
  93            (check_sec && irq < NVIC_INTERNAL_VECTORS &&
  94             s->sec_vectors[irq].active)) {
  95            nhand++;
  96            if (nhand == 2) {
  97                return 0;
  98            }
  99        }
 100    }
 101
 102    return 1;
 103}
 104
 105/* Return the value of the ISCR ISRPENDING bit:
 106 * 1 if an external interrupt is pending
 107 * 0 if no external interrupt is pending
 108 */
 109static bool nvic_isrpending(NVICState *s)
 110{
 111    int irq;
 112
 113    /* We can shortcut if the highest priority pending interrupt
 114     * happens to be external or if there is nothing pending.
 115     */
 116    if (s->vectpending > NVIC_FIRST_IRQ) {
 117        return true;
 118    }
 119    if (s->vectpending == 0) {
 120        return false;
 121    }
 122
 123    for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) {
 124        if (s->vectors[irq].pending) {
 125            return true;
 126        }
 127    }
 128    return false;
 129}
 130
 131static bool exc_is_banked(int exc)
 132{
 133    /* Return true if this is one of the limited set of exceptions which
 134     * are banked (and thus have state in sec_vectors[])
 135     */
 136    return exc == ARMV7M_EXCP_HARD ||
 137        exc == ARMV7M_EXCP_MEM ||
 138        exc == ARMV7M_EXCP_USAGE ||
 139        exc == ARMV7M_EXCP_SVC ||
 140        exc == ARMV7M_EXCP_PENDSV ||
 141        exc == ARMV7M_EXCP_SYSTICK;
 142}
 143
 144/* Return a mask word which clears the subpriority bits from
 145 * a priority value for an M-profile exception, leaving only
 146 * the group priority.
 147 */
 148static inline uint32_t nvic_gprio_mask(NVICState *s, bool secure)
 149{
 150    return ~0U << (s->prigroup[secure] + 1);
 151}
 152
 153static bool exc_targets_secure(NVICState *s, int exc)
 154{
 155    /* Return true if this non-banked exception targets Secure state. */
 156    if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
 157        return false;
 158    }
 159
 160    if (exc >= NVIC_FIRST_IRQ) {
 161        return !s->itns[exc];
 162    }
 163
 164    /* Function shouldn't be called for banked exceptions. */
 165    assert(!exc_is_banked(exc));
 166
 167    switch (exc) {
 168    case ARMV7M_EXCP_NMI:
 169    case ARMV7M_EXCP_BUS:
 170        return !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
 171    case ARMV7M_EXCP_SECURE:
 172        return true;
 173    case ARMV7M_EXCP_DEBUG:
 174        /* TODO: controlled by DEMCR.SDME, which we don't yet implement */
 175        return false;
 176    default:
 177        /* reset, and reserved (unused) low exception numbers.
 178         * We'll get called by code that loops through all the exception
 179         * numbers, but it doesn't matter what we return here as these
 180         * non-existent exceptions will never be pended or active.
 181         */
 182        return true;
 183    }
 184}
 185
 186static int exc_group_prio(NVICState *s, int rawprio, bool targets_secure)
 187{
 188    /* Return the group priority for this exception, given its raw
 189     * (group-and-subgroup) priority value and whether it is targeting
 190     * secure state or not.
 191     */
 192    if (rawprio < 0) {
 193        return rawprio;
 194    }
 195    rawprio &= nvic_gprio_mask(s, targets_secure);
 196    /* AIRCR.PRIS causes us to squash all NS priorities into the
 197     * lower half of the total range
 198     */
 199    if (!targets_secure &&
 200        (s->cpu->env.v7m.aircr & R_V7M_AIRCR_PRIS_MASK)) {
 201        rawprio = (rawprio >> 1) + NVIC_NS_PRIO_LIMIT;
 202    }
 203    return rawprio;
 204}
 205
 206/* Recompute vectpending and exception_prio for a CPU which implements
 207 * the Security extension
 208 */
 209static void nvic_recompute_state_secure(NVICState *s)
 210{
 211    int i, bank;
 212    int pend_prio = NVIC_NOEXC_PRIO;
 213    int active_prio = NVIC_NOEXC_PRIO;
 214    int pend_irq = 0;
 215    bool pending_is_s_banked = false;
 216
 217    /* R_CQRV: precedence is by:
 218     *  - lowest group priority; if both the same then
 219     *  - lowest subpriority; if both the same then
 220     *  - lowest exception number; if both the same (ie banked) then
 221     *  - secure exception takes precedence
 222     * Compare pseudocode RawExecutionPriority.
 223     * Annoyingly, now we have two prigroup values (for S and NS)
 224     * we can't do the loop comparison on raw priority values.
 225     */
 226    for (i = 1; i < s->num_irq; i++) {
 227        for (bank = M_REG_S; bank >= M_REG_NS; bank--) {
 228            VecInfo *vec;
 229            int prio;
 230            bool targets_secure;
 231
 232            if (bank == M_REG_S) {
 233                if (!exc_is_banked(i)) {
 234                    continue;
 235                }
 236                vec = &s->sec_vectors[i];
 237                targets_secure = true;
 238            } else {
 239                vec = &s->vectors[i];
 240                targets_secure = !exc_is_banked(i) && exc_targets_secure(s, i);
 241            }
 242
 243            prio = exc_group_prio(s, vec->prio, targets_secure);
 244            if (vec->enabled && vec->pending && prio < pend_prio) {
 245                pend_prio = prio;
 246                pend_irq = i;
 247                pending_is_s_banked = (bank == M_REG_S);
 248            }
 249            if (vec->active && prio < active_prio) {
 250                active_prio = prio;
 251            }
 252        }
 253    }
 254
 255    s->vectpending_is_s_banked = pending_is_s_banked;
 256    s->vectpending = pend_irq;
 257    s->vectpending_prio = pend_prio;
 258    s->exception_prio = active_prio;
 259
 260    trace_nvic_recompute_state_secure(s->vectpending,
 261                                      s->vectpending_is_s_banked,
 262                                      s->vectpending_prio,
 263                                      s->exception_prio);
 264}
 265
 266/* Recompute vectpending and exception_prio */
 267static void nvic_recompute_state(NVICState *s)
 268{
 269    int i;
 270    int pend_prio = NVIC_NOEXC_PRIO;
 271    int active_prio = NVIC_NOEXC_PRIO;
 272    int pend_irq = 0;
 273
 274    /* In theory we could write one function that handled both
 275     * the "security extension present" and "not present"; however
 276     * the security related changes significantly complicate the
 277     * recomputation just by themselves and mixing both cases together
 278     * would be even worse, so we retain a separate non-secure-only
 279     * version for CPUs which don't implement the security extension.
 280     */
 281    if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
 282        nvic_recompute_state_secure(s);
 283        return;
 284    }
 285
 286    for (i = 1; i < s->num_irq; i++) {
 287        VecInfo *vec = &s->vectors[i];
 288
 289        if (vec->enabled && vec->pending && vec->prio < pend_prio) {
 290            pend_prio = vec->prio;
 291            pend_irq = i;
 292        }
 293        if (vec->active && vec->prio < active_prio) {
 294            active_prio = vec->prio;
 295        }
 296    }
 297
 298    if (active_prio > 0) {
 299        active_prio &= nvic_gprio_mask(s, false);
 300    }
 301
 302    if (pend_prio > 0) {
 303        pend_prio &= nvic_gprio_mask(s, false);
 304    }
 305
 306    s->vectpending = pend_irq;
 307    s->vectpending_prio = pend_prio;
 308    s->exception_prio = active_prio;
 309
 310    trace_nvic_recompute_state(s->vectpending,
 311                               s->vectpending_prio,
 312                               s->exception_prio);
 313}
 314
 315/* Return the current execution priority of the CPU
 316 * (equivalent to the pseudocode ExecutionPriority function).
 317 * This is a value between -2 (NMI priority) and NVIC_NOEXC_PRIO.
 318 */
 319static inline int nvic_exec_prio(NVICState *s)
 320{
 321    CPUARMState *env = &s->cpu->env;
 322    int running = NVIC_NOEXC_PRIO;
 323
 324    if (env->v7m.basepri[M_REG_NS] > 0) {
 325        running = exc_group_prio(s, env->v7m.basepri[M_REG_NS], M_REG_NS);
 326    }
 327
 328    if (env->v7m.basepri[M_REG_S] > 0) {
 329        int basepri = exc_group_prio(s, env->v7m.basepri[M_REG_S], M_REG_S);
 330        if (running > basepri) {
 331            running = basepri;
 332        }
 333    }
 334
 335    if (env->v7m.primask[M_REG_NS]) {
 336        if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
 337            if (running > NVIC_NS_PRIO_LIMIT) {
 338                running = NVIC_NS_PRIO_LIMIT;
 339            }
 340        } else {
 341            running = 0;
 342        }
 343    }
 344
 345    if (env->v7m.primask[M_REG_S]) {
 346        running = 0;
 347    }
 348
 349    if (env->v7m.faultmask[M_REG_NS]) {
 350        if (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
 351            running = -1;
 352        } else {
 353            if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
 354                if (running > NVIC_NS_PRIO_LIMIT) {
 355                    running = NVIC_NS_PRIO_LIMIT;
 356                }
 357            } else {
 358                running = 0;
 359            }
 360        }
 361    }
 362
 363    if (env->v7m.faultmask[M_REG_S]) {
 364        running = (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) ? -3 : -1;
 365    }
 366
 367    /* consider priority of active handler */
 368    return MIN(running, s->exception_prio);
 369}
 370
 371bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure)
 372{
 373    /* Return true if the requested execution priority is negative
 374     * for the specified security state, ie that security state
 375     * has an active NMI or HardFault or has set its FAULTMASK.
 376     * Note that this is not the same as whether the execution
 377     * priority is actually negative (for instance AIRCR.PRIS may
 378     * mean we don't allow FAULTMASK_NS to actually make the execution
 379     * priority negative). Compare pseudocode IsReqExcPriNeg().
 380     */
 381    NVICState *s = opaque;
 382
 383    if (s->cpu->env.v7m.faultmask[secure]) {
 384        return true;
 385    }
 386
 387    if (secure ? s->sec_vectors[ARMV7M_EXCP_HARD].active :
 388        s->vectors[ARMV7M_EXCP_HARD].active) {
 389        return true;
 390    }
 391
 392    if (s->vectors[ARMV7M_EXCP_NMI].active &&
 393        exc_targets_secure(s, ARMV7M_EXCP_NMI) == secure) {
 394        return true;
 395    }
 396
 397    return false;
 398}
 399
 400bool armv7m_nvic_can_take_pending_exception(void *opaque)
 401{
 402    NVICState *s = opaque;
 403
 404    return nvic_exec_prio(s) > nvic_pending_prio(s);
 405}
 406
 407int armv7m_nvic_raw_execution_priority(void *opaque)
 408{
 409    NVICState *s = opaque;
 410
 411    return s->exception_prio;
 412}
 413
 414/* caller must call nvic_irq_update() after this.
 415 * secure indicates the bank to use for banked exceptions (we assert if
 416 * we are passed secure=true for a non-banked exception).
 417 */
 418static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio)
 419{
 420    assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
 421    assert(irq < s->num_irq);
 422
 423    if (secure) {
 424        assert(exc_is_banked(irq));
 425        s->sec_vectors[irq].prio = prio;
 426    } else {
 427        s->vectors[irq].prio = prio;
 428    }
 429
 430    trace_nvic_set_prio(irq, secure, prio);
 431}
 432
 433/* Return the current raw priority register value.
 434 * secure indicates the bank to use for banked exceptions (we assert if
 435 * we are passed secure=true for a non-banked exception).
 436 */
 437static int get_prio(NVICState *s, unsigned irq, bool secure)
 438{
 439    assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
 440    assert(irq < s->num_irq);
 441
 442    if (secure) {
 443        assert(exc_is_banked(irq));
 444        return s->sec_vectors[irq].prio;
 445    } else {
 446        return s->vectors[irq].prio;
 447    }
 448}
 449
 450/* Recompute state and assert irq line accordingly.
 451 * Must be called after changes to:
 452 *  vec->active, vec->enabled, vec->pending or vec->prio for any vector
 453 *  prigroup
 454 */
 455static void nvic_irq_update(NVICState *s)
 456{
 457    int lvl;
 458    int pend_prio;
 459
 460    nvic_recompute_state(s);
 461    pend_prio = nvic_pending_prio(s);
 462
 463    /* Raise NVIC output if this IRQ would be taken, except that we
 464     * ignore the effects of the BASEPRI, FAULTMASK and PRIMASK (which
 465     * will be checked for in arm_v7m_cpu_exec_interrupt()); changes
 466     * to those CPU registers don't cause us to recalculate the NVIC
 467     * pending info.
 468     */
 469    lvl = (pend_prio < s->exception_prio);
 470    trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl);
 471    qemu_set_irq(s->excpout, lvl);
 472}
 473
 474/**
 475 * armv7m_nvic_clear_pending: mark the specified exception as not pending
 476 * @opaque: the NVIC
 477 * @irq: the exception number to mark as not pending
 478 * @secure: false for non-banked exceptions or for the nonsecure
 479 * version of a banked exception, true for the secure version of a banked
 480 * exception.
 481 *
 482 * Marks the specified exception as not pending. Note that we will assert()
 483 * if @secure is true and @irq does not specify one of the fixed set
 484 * of architecturally banked exceptions.
 485 */
 486static void armv7m_nvic_clear_pending(void *opaque, int irq, bool secure)
 487{
 488    NVICState *s = (NVICState *)opaque;
 489    VecInfo *vec;
 490
 491    assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
 492
 493    if (secure) {
 494        assert(exc_is_banked(irq));
 495        vec = &s->sec_vectors[irq];
 496    } else {
 497        vec = &s->vectors[irq];
 498    }
 499    trace_nvic_clear_pending(irq, secure, vec->enabled, vec->prio);
 500    if (vec->pending) {
 501        vec->pending = 0;
 502        nvic_irq_update(s);
 503    }
 504}
 505
 506static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure,
 507                                       bool derived)
 508{
 509    /* Pend an exception, including possibly escalating it to HardFault.
 510     *
 511     * This function handles both "normal" pending of interrupts and
 512     * exceptions, and also derived exceptions (ones which occur as
 513     * a result of trying to take some other exception).
 514     *
 515     * If derived == true, the caller guarantees that we are part way through
 516     * trying to take an exception (but have not yet called
 517     * armv7m_nvic_acknowledge_irq() to make it active), and so:
 518     *  - s->vectpending is the "original exception" we were trying to take
 519     *  - irq is the "derived exception"
 520     *  - nvic_exec_prio(s) gives the priority before exception entry
 521     * Here we handle the prioritization logic which the pseudocode puts
 522     * in the DerivedLateArrival() function.
 523     */
 524
 525    NVICState *s = (NVICState *)opaque;
 526    bool banked = exc_is_banked(irq);
 527    VecInfo *vec;
 528    bool targets_secure;
 529
 530    assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
 531    assert(!secure || banked);
 532
 533    vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
 534
 535    targets_secure = banked ? secure : exc_targets_secure(s, irq);
 536
 537    trace_nvic_set_pending(irq, secure, targets_secure,
 538                           derived, vec->enabled, vec->prio);
 539
 540    if (derived) {
 541        /* Derived exceptions are always synchronous. */
 542        assert(irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV);
 543
 544        if (irq == ARMV7M_EXCP_DEBUG &&
 545            exc_group_prio(s, vec->prio, secure) >= nvic_exec_prio(s)) {
 546            /* DebugMonitorFault, but its priority is lower than the
 547             * preempted exception priority: just ignore it.
 548             */
 549            return;
 550        }
 551
 552        if (irq == ARMV7M_EXCP_HARD && vec->prio >= s->vectpending_prio) {
 553            /* If this is a terminal exception (one which means we cannot
 554             * take the original exception, like a failure to read its
 555             * vector table entry), then we must take the derived exception.
 556             * If the derived exception can't take priority over the
 557             * original exception, then we go into Lockup.
 558             *
 559             * For QEMU, we rely on the fact that a derived exception is
 560             * terminal if and only if it's reported to us as HardFault,
 561             * which saves having to have an extra argument is_terminal
 562             * that we'd only use in one place.
 563             */
 564            cpu_abort(&s->cpu->parent_obj,
 565                      "Lockup: can't take terminal derived exception "
 566                      "(original exception priority %d)\n",
 567                      s->vectpending_prio);
 568        }
 569        /* We now continue with the same code as for a normal pending
 570         * exception, which will cause us to pend the derived exception.
 571         * We'll then take either the original or the derived exception
 572         * based on which is higher priority by the usual mechanism
 573         * for selecting the highest priority pending interrupt.
 574         */
 575    }
 576
 577    if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) {
 578        /* If a synchronous exception is pending then it may be
 579         * escalated to HardFault if:
 580         *  * it is equal or lower priority to current execution
 581         *  * it is disabled
 582         * (ie we need to take it immediately but we can't do so).
 583         * Asynchronous exceptions (and interrupts) simply remain pending.
 584         *
 585         * For QEMU, we don't have any imprecise (asynchronous) faults,
 586         * so we can assume that PREFETCH_ABORT and DATA_ABORT are always
 587         * synchronous.
 588         * Debug exceptions are awkward because only Debug exceptions
 589         * resulting from the BKPT instruction should be escalated,
 590         * but we don't currently implement any Debug exceptions other
 591         * than those that result from BKPT, so we treat all debug exceptions
 592         * as needing escalation.
 593         *
 594         * This all means we can identify whether to escalate based only on
 595         * the exception number and don't (yet) need the caller to explicitly
 596         * tell us whether this exception is synchronous or not.
 597         */
 598        int running = nvic_exec_prio(s);
 599        bool escalate = false;
 600
 601        if (exc_group_prio(s, vec->prio, secure) >= running) {
 602            trace_nvic_escalate_prio(irq, vec->prio, running);
 603            escalate = true;
 604        } else if (!vec->enabled) {
 605            trace_nvic_escalate_disabled(irq);
 606            escalate = true;
 607        }
 608
 609        if (escalate) {
 610
 611            /* We need to escalate this exception to a synchronous HardFault.
 612             * If BFHFNMINS is set then we escalate to the banked HF for
 613             * the target security state of the original exception; otherwise
 614             * we take a Secure HardFault.
 615             */
 616            irq = ARMV7M_EXCP_HARD;
 617            if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
 618                (targets_secure ||
 619                 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
 620                vec = &s->sec_vectors[irq];
 621            } else {
 622                vec = &s->vectors[irq];
 623            }
 624            if (running <= vec->prio) {
 625                /* We want to escalate to HardFault but we can't take the
 626                 * synchronous HardFault at this point either. This is a
 627                 * Lockup condition due to a guest bug. We don't model
 628                 * Lockup, so report via cpu_abort() instead.
 629                 */
 630                cpu_abort(&s->cpu->parent_obj,
 631                          "Lockup: can't escalate %d to HardFault "
 632                          "(current priority %d)\n", irq, running);
 633            }
 634
 635            /* HF may be banked but there is only one shared HFSR */
 636            s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
 637        }
 638    }
 639
 640    if (!vec->pending) {
 641        vec->pending = 1;
 642        nvic_irq_update(s);
 643    }
 644}
 645
 646void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
 647{
 648    do_armv7m_nvic_set_pending(opaque, irq, secure, false);
 649}
 650
 651void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure)
 652{
 653    do_armv7m_nvic_set_pending(opaque, irq, secure, true);
 654}
 655
 656/* Make pending IRQ active.  */
 657void armv7m_nvic_acknowledge_irq(void *opaque)
 658{
 659    NVICState *s = (NVICState *)opaque;
 660    CPUARMState *env = &s->cpu->env;
 661    const int pending = s->vectpending;
 662    const int running = nvic_exec_prio(s);
 663    VecInfo *vec;
 664
 665    assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
 666
 667    if (s->vectpending_is_s_banked) {
 668        vec = &s->sec_vectors[pending];
 669    } else {
 670        vec = &s->vectors[pending];
 671    }
 672
 673    assert(vec->enabled);
 674    assert(vec->pending);
 675
 676    assert(s->vectpending_prio < running);
 677
 678    trace_nvic_acknowledge_irq(pending, s->vectpending_prio);
 679
 680    vec->active = 1;
 681    vec->pending = 0;
 682
 683    write_v7m_exception(env, s->vectpending);
 684
 685    nvic_irq_update(s);
 686}
 687
 688void armv7m_nvic_get_pending_irq_info(void *opaque,
 689                                      int *pirq, bool *ptargets_secure)
 690{
 691    NVICState *s = (NVICState *)opaque;
 692    const int pending = s->vectpending;
 693    bool targets_secure;
 694
 695    assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
 696
 697    if (s->vectpending_is_s_banked) {
 698        targets_secure = true;
 699    } else {
 700        targets_secure = !exc_is_banked(pending) &&
 701            exc_targets_secure(s, pending);
 702    }
 703
 704    trace_nvic_get_pending_irq_info(pending, targets_secure);
 705
 706    *ptargets_secure = targets_secure;
 707    *pirq = pending;
 708}
 709
 710int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
 711{
 712    NVICState *s = (NVICState *)opaque;
 713    VecInfo *vec;
 714    int ret;
 715
 716    assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
 717
 718    if (secure && exc_is_banked(irq)) {
 719        vec = &s->sec_vectors[irq];
 720    } else {
 721        vec = &s->vectors[irq];
 722    }
 723
 724    trace_nvic_complete_irq(irq, secure);
 725
 726    if (!vec->active) {
 727        /* Tell the caller this was an illegal exception return */
 728        return -1;
 729    }
 730
 731    ret = nvic_rettobase(s);
 732
 733    vec->active = 0;
 734    if (vec->level) {
 735        /* Re-pend the exception if it's still held high; only
 736         * happens for extenal IRQs
 737         */
 738        assert(irq >= NVIC_FIRST_IRQ);
 739        vec->pending = 1;
 740    }
 741
 742    nvic_irq_update(s);
 743
 744    return ret;
 745}
 746
 747/* callback when external interrupt line is changed */
 748static void set_irq_level(void *opaque, int n, int level)
 749{
 750    NVICState *s = opaque;
 751    VecInfo *vec;
 752
 753    n += NVIC_FIRST_IRQ;
 754
 755    assert(n >= NVIC_FIRST_IRQ && n < s->num_irq);
 756
 757    trace_nvic_set_irq_level(n, level);
 758
 759    /* The pending status of an external interrupt is
 760     * latched on rising edge and exception handler return.
 761     *
 762     * Pulsing the IRQ will always run the handler
 763     * once, and the handler will re-run until the
 764     * level is low when the handler completes.
 765     */
 766    vec = &s->vectors[n];
 767    if (level != vec->level) {
 768        vec->level = level;
 769        if (level) {
 770            armv7m_nvic_set_pending(s, n, false);
 771        }
 772    }
 773}
 774
 775static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
 776{
 777    ARMCPU *cpu = s->cpu;
 778    uint32_t val;
 779
 780    switch (offset) {
 781    case 4: /* Interrupt Control Type.  */
 782        return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
 783    case 0xc: /* CPPWR */
 784        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
 785            goto bad_offset;
 786        }
 787        /* We make the IMPDEF choice that nothing can ever go into a
 788         * non-retentive power state, which allows us to RAZ/WI this.
 789         */
 790        return 0;
 791    case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
 792    {
 793        int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
 794        int i;
 795
 796        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
 797            goto bad_offset;
 798        }
 799        if (!attrs.secure) {
 800            return 0;
 801        }
 802        val = 0;
 803        for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
 804            if (s->itns[startvec + i]) {
 805                val |= (1 << i);
 806            }
 807        }
 808        return val;
 809    }
 810    case 0xd00: /* CPUID Base.  */
 811        return cpu->midr;
 812    case 0xd04: /* Interrupt Control State (ICSR) */
 813        /* VECTACTIVE */
 814        val = cpu->env.v7m.exception;
 815        /* VECTPENDING */
 816        val |= (s->vectpending & 0xff) << 12;
 817        /* ISRPENDING - set if any external IRQ is pending */
 818        if (nvic_isrpending(s)) {
 819            val |= (1 << 22);
 820        }
 821        /* RETTOBASE - set if only one handler is active */
 822        if (nvic_rettobase(s)) {
 823            val |= (1 << 11);
 824        }
 825        if (attrs.secure) {
 826            /* PENDSTSET */
 827            if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].pending) {
 828                val |= (1 << 26);
 829            }
 830            /* PENDSVSET */
 831            if (s->sec_vectors[ARMV7M_EXCP_PENDSV].pending) {
 832                val |= (1 << 28);
 833            }
 834        } else {
 835            /* PENDSTSET */
 836            if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) {
 837                val |= (1 << 26);
 838            }
 839            /* PENDSVSET */
 840            if (s->vectors[ARMV7M_EXCP_PENDSV].pending) {
 841                val |= (1 << 28);
 842            }
 843        }
 844        /* NMIPENDSET */
 845        if ((attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))
 846            && s->vectors[ARMV7M_EXCP_NMI].pending) {
 847            val |= (1 << 31);
 848        }
 849        /* ISRPREEMPT: RES0 when halting debug not implemented */
 850        /* STTNS: RES0 for the Main Extension */
 851        return val;
 852    case 0xd08: /* Vector Table Offset.  */
 853        return cpu->env.v7m.vecbase[attrs.secure];
 854    case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
 855        val = 0xfa050000 | (s->prigroup[attrs.secure] << 8);
 856        if (attrs.secure) {
 857            /* s->aircr stores PRIS, BFHFNMINS, SYSRESETREQS */
 858            val |= cpu->env.v7m.aircr;
 859        } else {
 860            if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
 861                /* BFHFNMINS is R/O from NS; other bits are RAZ/WI. If
 862                 * security isn't supported then BFHFNMINS is RAO (and
 863                 * the bit in env.v7m.aircr is always set).
 864                 */
 865                val |= cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK;
 866            }
 867        }
 868        return val;
 869    case 0xd10: /* System Control.  */
 870        return cpu->env.v7m.scr[attrs.secure];
 871    case 0xd14: /* Configuration Control.  */
 872        /* The BFHFNMIGN bit is the only non-banked bit; we
 873         * keep it in the non-secure copy of the register.
 874         */
 875        val = cpu->env.v7m.ccr[attrs.secure];
 876        val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
 877        return val;
 878    case 0xd24: /* System Handler Control and State (SHCSR) */
 879        val = 0;
 880        if (attrs.secure) {
 881            if (s->sec_vectors[ARMV7M_EXCP_MEM].active) {
 882                val |= (1 << 0);
 883            }
 884            if (s->sec_vectors[ARMV7M_EXCP_HARD].active) {
 885                val |= (1 << 2);
 886            }
 887            if (s->sec_vectors[ARMV7M_EXCP_USAGE].active) {
 888                val |= (1 << 3);
 889            }
 890            if (s->sec_vectors[ARMV7M_EXCP_SVC].active) {
 891                val |= (1 << 7);
 892            }
 893            if (s->sec_vectors[ARMV7M_EXCP_PENDSV].active) {
 894                val |= (1 << 10);
 895            }
 896            if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].active) {
 897                val |= (1 << 11);
 898            }
 899            if (s->sec_vectors[ARMV7M_EXCP_USAGE].pending) {
 900                val |= (1 << 12);
 901            }
 902            if (s->sec_vectors[ARMV7M_EXCP_MEM].pending) {
 903                val |= (1 << 13);
 904            }
 905            if (s->sec_vectors[ARMV7M_EXCP_SVC].pending) {
 906                val |= (1 << 15);
 907            }
 908            if (s->sec_vectors[ARMV7M_EXCP_MEM].enabled) {
 909                val |= (1 << 16);
 910            }
 911            if (s->sec_vectors[ARMV7M_EXCP_USAGE].enabled) {
 912                val |= (1 << 18);
 913            }
 914            if (s->sec_vectors[ARMV7M_EXCP_HARD].pending) {
 915                val |= (1 << 21);
 916            }
 917            /* SecureFault is not banked but is always RAZ/WI to NS */
 918            if (s->vectors[ARMV7M_EXCP_SECURE].active) {
 919                val |= (1 << 4);
 920            }
 921            if (s->vectors[ARMV7M_EXCP_SECURE].enabled) {
 922                val |= (1 << 19);
 923            }
 924            if (s->vectors[ARMV7M_EXCP_SECURE].pending) {
 925                val |= (1 << 20);
 926            }
 927        } else {
 928            if (s->vectors[ARMV7M_EXCP_MEM].active) {
 929                val |= (1 << 0);
 930            }
 931            if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
 932                /* HARDFAULTACT, HARDFAULTPENDED not present in v7M */
 933                if (s->vectors[ARMV7M_EXCP_HARD].active) {
 934                    val |= (1 << 2);
 935                }
 936                if (s->vectors[ARMV7M_EXCP_HARD].pending) {
 937                    val |= (1 << 21);
 938                }
 939            }
 940            if (s->vectors[ARMV7M_EXCP_USAGE].active) {
 941                val |= (1 << 3);
 942            }
 943            if (s->vectors[ARMV7M_EXCP_SVC].active) {
 944                val |= (1 << 7);
 945            }
 946            if (s->vectors[ARMV7M_EXCP_PENDSV].active) {
 947                val |= (1 << 10);
 948            }
 949            if (s->vectors[ARMV7M_EXCP_SYSTICK].active) {
 950                val |= (1 << 11);
 951            }
 952            if (s->vectors[ARMV7M_EXCP_USAGE].pending) {
 953                val |= (1 << 12);
 954            }
 955            if (s->vectors[ARMV7M_EXCP_MEM].pending) {
 956                val |= (1 << 13);
 957            }
 958            if (s->vectors[ARMV7M_EXCP_SVC].pending) {
 959                val |= (1 << 15);
 960            }
 961            if (s->vectors[ARMV7M_EXCP_MEM].enabled) {
 962                val |= (1 << 16);
 963            }
 964            if (s->vectors[ARMV7M_EXCP_USAGE].enabled) {
 965                val |= (1 << 18);
 966            }
 967        }
 968        if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
 969            if (s->vectors[ARMV7M_EXCP_BUS].active) {
 970                val |= (1 << 1);
 971            }
 972            if (s->vectors[ARMV7M_EXCP_BUS].pending) {
 973                val |= (1 << 14);
 974            }
 975            if (s->vectors[ARMV7M_EXCP_BUS].enabled) {
 976                val |= (1 << 17);
 977            }
 978            if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
 979                s->vectors[ARMV7M_EXCP_NMI].active) {
 980                /* NMIACT is not present in v7M */
 981                val |= (1 << 5);
 982            }
 983        }
 984
 985        /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
 986        if (s->vectors[ARMV7M_EXCP_DEBUG].active) {
 987            val |= (1 << 8);
 988        }
 989        return val;
 990    case 0xd2c: /* Hard Fault Status.  */
 991        return cpu->env.v7m.hfsr;
 992    case 0xd30: /* Debug Fault Status.  */
 993        return cpu->env.v7m.dfsr;
 994    case 0xd34: /* MMFAR MemManage Fault Address */
 995        return cpu->env.v7m.mmfar[attrs.secure];
 996    case 0xd38: /* Bus Fault Address.  */
 997        return cpu->env.v7m.bfar;
 998    case 0xd3c: /* Aux Fault Status.  */
 999        /* TODO: Implement fault status registers.  */
1000        qemu_log_mask(LOG_UNIMP,
1001                      "Aux Fault status registers unimplemented\n");
1002        return 0;
1003    case 0xd40: /* PFR0.  */
1004        return cpu->id_pfr0;
1005    case 0xd44: /* PFR1.  */
1006        return cpu->id_pfr1;
1007    case 0xd48: /* DFR0.  */
1008        return cpu->id_dfr0;
1009    case 0xd4c: /* AFR0.  */
1010        return cpu->id_afr0;
1011    case 0xd50: /* MMFR0.  */
1012        return cpu->id_mmfr0;
1013    case 0xd54: /* MMFR1.  */
1014        return cpu->id_mmfr1;
1015    case 0xd58: /* MMFR2.  */
1016        return cpu->id_mmfr2;
1017    case 0xd5c: /* MMFR3.  */
1018        return cpu->id_mmfr3;
1019    case 0xd60: /* ISAR0.  */
1020        return cpu->id_isar0;
1021    case 0xd64: /* ISAR1.  */
1022        return cpu->id_isar1;
1023    case 0xd68: /* ISAR2.  */
1024        return cpu->id_isar2;
1025    case 0xd6c: /* ISAR3.  */
1026        return cpu->id_isar3;
1027    case 0xd70: /* ISAR4.  */
1028        return cpu->id_isar4;
1029    case 0xd74: /* ISAR5.  */
1030        return cpu->id_isar5;
1031    case 0xd78: /* CLIDR */
1032        return cpu->clidr;
1033    case 0xd7c: /* CTR */
1034        return cpu->ctr;
1035    case 0xd80: /* CSSIDR */
1036    {
1037        int idx = cpu->env.v7m.csselr[attrs.secure] & R_V7M_CSSELR_INDEX_MASK;
1038        return cpu->ccsidr[idx];
1039    }
1040    case 0xd84: /* CSSELR */
1041        return cpu->env.v7m.csselr[attrs.secure];
1042    /* TODO: Implement debug registers.  */
1043    case 0xd90: /* MPU_TYPE */
1044        /* Unified MPU; if the MPU is not present this value is zero */
1045        return cpu->pmsav7_dregion << 8;
1046        break;
1047    case 0xd94: /* MPU_CTRL */
1048        return cpu->env.v7m.mpu_ctrl[attrs.secure];
1049    case 0xd98: /* MPU_RNR */
1050        return cpu->env.pmsav7.rnr[attrs.secure];
1051    case 0xd9c: /* MPU_RBAR */
1052    case 0xda4: /* MPU_RBAR_A1 */
1053    case 0xdac: /* MPU_RBAR_A2 */
1054    case 0xdb4: /* MPU_RBAR_A3 */
1055    {
1056        int region = cpu->env.pmsav7.rnr[attrs.secure];
1057
1058        if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1059            /* PMSAv8M handling of the aliases is different from v7M:
1060             * aliases A1, A2, A3 override the low two bits of the region
1061             * number in MPU_RNR, and there is no 'region' field in the
1062             * RBAR register.
1063             */
1064            int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1065            if (aliasno) {
1066                region = deposit32(region, 0, 2, aliasno);
1067            }
1068            if (region >= cpu->pmsav7_dregion) {
1069                return 0;
1070            }
1071            return cpu->env.pmsav8.rbar[attrs.secure][region];
1072        }
1073
1074        if (region >= cpu->pmsav7_dregion) {
1075            return 0;
1076        }
1077        return (cpu->env.pmsav7.drbar[region] & ~0x1f) | (region & 0xf);
1078    }
1079    case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
1080    case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
1081    case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
1082    case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
1083    {
1084        int region = cpu->env.pmsav7.rnr[attrs.secure];
1085
1086        if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1087            /* PMSAv8M handling of the aliases is different from v7M:
1088             * aliases A1, A2, A3 override the low two bits of the region
1089             * number in MPU_RNR.
1090             */
1091            int aliasno = (offset - 0xda0) / 8; /* 0..3 */
1092            if (aliasno) {
1093                region = deposit32(region, 0, 2, aliasno);
1094            }
1095            if (region >= cpu->pmsav7_dregion) {
1096                return 0;
1097            }
1098            return cpu->env.pmsav8.rlar[attrs.secure][region];
1099        }
1100
1101        if (region >= cpu->pmsav7_dregion) {
1102            return 0;
1103        }
1104        return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) |
1105            (cpu->env.pmsav7.drsr[region] & 0xffff);
1106    }
1107    case 0xdc0: /* MPU_MAIR0 */
1108        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1109            goto bad_offset;
1110        }
1111        return cpu->env.pmsav8.mair0[attrs.secure];
1112    case 0xdc4: /* MPU_MAIR1 */
1113        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1114            goto bad_offset;
1115        }
1116        return cpu->env.pmsav8.mair1[attrs.secure];
1117    case 0xdd0: /* SAU_CTRL */
1118        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1119            goto bad_offset;
1120        }
1121        if (!attrs.secure) {
1122            return 0;
1123        }
1124        return cpu->env.sau.ctrl;
1125    case 0xdd4: /* SAU_TYPE */
1126        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1127            goto bad_offset;
1128        }
1129        if (!attrs.secure) {
1130            return 0;
1131        }
1132        return cpu->sau_sregion;
1133    case 0xdd8: /* SAU_RNR */
1134        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1135            goto bad_offset;
1136        }
1137        if (!attrs.secure) {
1138            return 0;
1139        }
1140        return cpu->env.sau.rnr;
1141    case 0xddc: /* SAU_RBAR */
1142    {
1143        int region = cpu->env.sau.rnr;
1144
1145        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1146            goto bad_offset;
1147        }
1148        if (!attrs.secure) {
1149            return 0;
1150        }
1151        if (region >= cpu->sau_sregion) {
1152            return 0;
1153        }
1154        return cpu->env.sau.rbar[region];
1155    }
1156    case 0xde0: /* SAU_RLAR */
1157    {
1158        int region = cpu->env.sau.rnr;
1159
1160        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1161            goto bad_offset;
1162        }
1163        if (!attrs.secure) {
1164            return 0;
1165        }
1166        if (region >= cpu->sau_sregion) {
1167            return 0;
1168        }
1169        return cpu->env.sau.rlar[region];
1170    }
1171    case 0xde4: /* SFSR */
1172        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1173            goto bad_offset;
1174        }
1175        if (!attrs.secure) {
1176            return 0;
1177        }
1178        return cpu->env.v7m.sfsr;
1179    case 0xde8: /* SFAR */
1180        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1181            goto bad_offset;
1182        }
1183        if (!attrs.secure) {
1184            return 0;
1185        }
1186        return cpu->env.v7m.sfar;
1187    default:
1188    bad_offset:
1189        qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
1190        return 0;
1191    }
1192}
1193
1194static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
1195                        MemTxAttrs attrs)
1196{
1197    ARMCPU *cpu = s->cpu;
1198
1199    switch (offset) {
1200    case 0xc: /* CPPWR */
1201        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1202            goto bad_offset;
1203        }
1204        /* Make the IMPDEF choice to RAZ/WI this. */
1205        break;
1206    case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
1207    {
1208        int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
1209        int i;
1210
1211        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1212            goto bad_offset;
1213        }
1214        if (!attrs.secure) {
1215            break;
1216        }
1217        for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
1218            s->itns[startvec + i] = (value >> i) & 1;
1219        }
1220        nvic_irq_update(s);
1221        break;
1222    }
1223    case 0xd04: /* Interrupt Control State (ICSR) */
1224        if (attrs.secure || cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1225            if (value & (1 << 31)) {
1226                armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
1227            } else if (value & (1 << 30) &&
1228                       arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1229                /* PENDNMICLR didn't exist in v7M */
1230                armv7m_nvic_clear_pending(s, ARMV7M_EXCP_NMI, false);
1231            }
1232        }
1233        if (value & (1 << 28)) {
1234            armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1235        } else if (value & (1 << 27)) {
1236            armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1237        }
1238        if (value & (1 << 26)) {
1239            armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1240        } else if (value & (1 << 25)) {
1241            armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1242        }
1243        break;
1244    case 0xd08: /* Vector Table Offset.  */
1245        cpu->env.v7m.vecbase[attrs.secure] = value & 0xffffff80;
1246        break;
1247    case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
1248        if ((value >> R_V7M_AIRCR_VECTKEY_SHIFT) == 0x05fa) {
1249            if (value & R_V7M_AIRCR_SYSRESETREQ_MASK) {
1250                if (attrs.secure ||
1251                    !(cpu->env.v7m.aircr & R_V7M_AIRCR_SYSRESETREQS_MASK)) {
1252                    qemu_irq_pulse(s->sysresetreq);
1253                }
1254            }
1255            if (value & R_V7M_AIRCR_VECTCLRACTIVE_MASK) {
1256                qemu_log_mask(LOG_GUEST_ERROR,
1257                              "Setting VECTCLRACTIVE when not in DEBUG mode "
1258                              "is UNPREDICTABLE\n");
1259            }
1260            if (value & R_V7M_AIRCR_VECTRESET_MASK) {
1261                /* NB: this bit is RES0 in v8M */
1262                qemu_log_mask(LOG_GUEST_ERROR,
1263                              "Setting VECTRESET when not in DEBUG mode "
1264                              "is UNPREDICTABLE\n");
1265            }
1266            s->prigroup[attrs.secure] = extract32(value,
1267                                                  R_V7M_AIRCR_PRIGROUP_SHIFT,
1268                                                  R_V7M_AIRCR_PRIGROUP_LENGTH);
1269            if (attrs.secure) {
1270                /* These bits are only writable by secure */
1271                cpu->env.v7m.aircr = value &
1272                    (R_V7M_AIRCR_SYSRESETREQS_MASK |
1273                     R_V7M_AIRCR_BFHFNMINS_MASK |
1274                     R_V7M_AIRCR_PRIS_MASK);
1275                /* BFHFNMINS changes the priority of Secure HardFault, and
1276                 * allows a pending Non-secure HardFault to preempt (which
1277                 * we implement by marking it enabled).
1278                 */
1279                if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1280                    s->sec_vectors[ARMV7M_EXCP_HARD].prio = -3;
1281                    s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
1282                } else {
1283                    s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
1284                    s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
1285                }
1286            }
1287            nvic_irq_update(s);
1288        }
1289        break;
1290    case 0xd10: /* System Control.  */
1291        /* We don't implement deep-sleep so these bits are RAZ/WI.
1292         * The other bits in the register are banked.
1293         * QEMU's implementation ignores SEVONPEND and SLEEPONEXIT, which
1294         * is architecturally permitted.
1295         */
1296        value &= ~(R_V7M_SCR_SLEEPDEEP_MASK | R_V7M_SCR_SLEEPDEEPS_MASK);
1297        cpu->env.v7m.scr[attrs.secure] = value;
1298        break;
1299    case 0xd14: /* Configuration Control.  */
1300        /* Enforce RAZ/WI on reserved and must-RAZ/WI bits */
1301        value &= (R_V7M_CCR_STKALIGN_MASK |
1302                  R_V7M_CCR_BFHFNMIGN_MASK |
1303                  R_V7M_CCR_DIV_0_TRP_MASK |
1304                  R_V7M_CCR_UNALIGN_TRP_MASK |
1305                  R_V7M_CCR_USERSETMPEND_MASK |
1306                  R_V7M_CCR_NONBASETHRDENA_MASK);
1307
1308        if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1309            /* v8M makes NONBASETHRDENA and STKALIGN be RES1 */
1310            value |= R_V7M_CCR_NONBASETHRDENA_MASK
1311                | R_V7M_CCR_STKALIGN_MASK;
1312        }
1313        if (attrs.secure) {
1314            /* the BFHFNMIGN bit is not banked; keep that in the NS copy */
1315            cpu->env.v7m.ccr[M_REG_NS] =
1316                (cpu->env.v7m.ccr[M_REG_NS] & ~R_V7M_CCR_BFHFNMIGN_MASK)
1317                | (value & R_V7M_CCR_BFHFNMIGN_MASK);
1318            value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1319        }
1320
1321        cpu->env.v7m.ccr[attrs.secure] = value;
1322        break;
1323    case 0xd24: /* System Handler Control and State (SHCSR) */
1324        if (attrs.secure) {
1325            s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1326            /* Secure HardFault active bit cannot be written */
1327            s->sec_vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1328            s->sec_vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1329            s->sec_vectors[ARMV7M_EXCP_PENDSV].active =
1330                (value & (1 << 10)) != 0;
1331            s->sec_vectors[ARMV7M_EXCP_SYSTICK].active =
1332                (value & (1 << 11)) != 0;
1333            s->sec_vectors[ARMV7M_EXCP_USAGE].pending =
1334                (value & (1 << 12)) != 0;
1335            s->sec_vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1336            s->sec_vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1337            s->sec_vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1338            s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1339            s->sec_vectors[ARMV7M_EXCP_USAGE].enabled =
1340                (value & (1 << 18)) != 0;
1341            s->sec_vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1342            /* SecureFault not banked, but RAZ/WI to NS */
1343            s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0;
1344            s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0;
1345            s->vectors[ARMV7M_EXCP_SECURE].pending = (value & (1 << 20)) != 0;
1346        } else {
1347            s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1348            if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1349                /* HARDFAULTPENDED is not present in v7M */
1350                s->vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1351            }
1352            s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1353            s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1354            s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0;
1355            s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0;
1356            s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0;
1357            s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1358            s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1359            s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1360            s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0;
1361        }
1362        if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1363            s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0;
1364            s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0;
1365            s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1366        }
1367        /* NMIACT can only be written if the write is of a zero, with
1368         * BFHFNMINS 1, and by the CPU in secure state via the NS alias.
1369         */
1370        if (!attrs.secure && cpu->env.v7m.secure &&
1371            (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1372            (value & (1 << 5)) == 0) {
1373            s->vectors[ARMV7M_EXCP_NMI].active = 0;
1374        }
1375        /* HARDFAULTACT can only be written if the write is of a zero
1376         * to the non-secure HardFault state by the CPU in secure state.
1377         * The only case where we can be targeting the non-secure HF state
1378         * when in secure state is if this is a write via the NS alias
1379         * and BFHFNMINS is 1.
1380         */
1381        if (!attrs.secure && cpu->env.v7m.secure &&
1382            (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1383            (value & (1 << 2)) == 0) {
1384            s->vectors[ARMV7M_EXCP_HARD].active = 0;
1385        }
1386
1387        /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
1388        s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0;
1389        nvic_irq_update(s);
1390        break;
1391    case 0xd2c: /* Hard Fault Status.  */
1392        cpu->env.v7m.hfsr &= ~value; /* W1C */
1393        break;
1394    case 0xd30: /* Debug Fault Status.  */
1395        cpu->env.v7m.dfsr &= ~value; /* W1C */
1396        break;
1397    case 0xd34: /* Mem Manage Address.  */
1398        cpu->env.v7m.mmfar[attrs.secure] = value;
1399        return;
1400    case 0xd38: /* Bus Fault Address.  */
1401        cpu->env.v7m.bfar = value;
1402        return;
1403    case 0xd3c: /* Aux Fault Status.  */
1404        qemu_log_mask(LOG_UNIMP,
1405                      "NVIC: Aux fault status registers unimplemented\n");
1406        break;
1407    case 0xd84: /* CSSELR */
1408        if (!arm_v7m_csselr_razwi(cpu)) {
1409            cpu->env.v7m.csselr[attrs.secure] = value & R_V7M_CSSELR_INDEX_MASK;
1410        }
1411        break;
1412    case 0xd90: /* MPU_TYPE */
1413        return; /* RO */
1414    case 0xd94: /* MPU_CTRL */
1415        if ((value &
1416             (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK))
1417            == R_V7M_MPU_CTRL_HFNMIENA_MASK) {
1418            qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is "
1419                          "UNPREDICTABLE\n");
1420        }
1421        cpu->env.v7m.mpu_ctrl[attrs.secure]
1422            = value & (R_V7M_MPU_CTRL_ENABLE_MASK |
1423                       R_V7M_MPU_CTRL_HFNMIENA_MASK |
1424                       R_V7M_MPU_CTRL_PRIVDEFENA_MASK);
1425        tlb_flush(CPU(cpu));
1426        break;
1427    case 0xd98: /* MPU_RNR */
1428        if (value >= cpu->pmsav7_dregion) {
1429            qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %"
1430                          PRIu32 "/%" PRIu32 "\n",
1431                          value, cpu->pmsav7_dregion);
1432        } else {
1433            cpu->env.pmsav7.rnr[attrs.secure] = value;
1434        }
1435        break;
1436    case 0xd9c: /* MPU_RBAR */
1437    case 0xda4: /* MPU_RBAR_A1 */
1438    case 0xdac: /* MPU_RBAR_A2 */
1439    case 0xdb4: /* MPU_RBAR_A3 */
1440    {
1441        int region;
1442
1443        if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1444            /* PMSAv8M handling of the aliases is different from v7M:
1445             * aliases A1, A2, A3 override the low two bits of the region
1446             * number in MPU_RNR, and there is no 'region' field in the
1447             * RBAR register.
1448             */
1449            int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1450
1451            region = cpu->env.pmsav7.rnr[attrs.secure];
1452            if (aliasno) {
1453                region = deposit32(region, 0, 2, aliasno);
1454            }
1455            if (region >= cpu->pmsav7_dregion) {
1456                return;
1457            }
1458            cpu->env.pmsav8.rbar[attrs.secure][region] = value;
1459            tlb_flush(CPU(cpu));
1460            return;
1461        }
1462
1463        if (value & (1 << 4)) {
1464            /* VALID bit means use the region number specified in this
1465             * value and also update MPU_RNR.REGION with that value.
1466             */
1467            region = extract32(value, 0, 4);
1468            if (region >= cpu->pmsav7_dregion) {
1469                qemu_log_mask(LOG_GUEST_ERROR,
1470                              "MPU region out of range %u/%" PRIu32 "\n",
1471                              region, cpu->pmsav7_dregion);
1472                return;
1473            }
1474            cpu->env.pmsav7.rnr[attrs.secure] = region;
1475        } else {
1476            region = cpu->env.pmsav7.rnr[attrs.secure];
1477        }
1478
1479        if (region >= cpu->pmsav7_dregion) {
1480            return;
1481        }
1482
1483        cpu->env.pmsav7.drbar[region] = value & ~0x1f;
1484        tlb_flush(CPU(cpu));
1485        break;
1486    }
1487    case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
1488    case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
1489    case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
1490    case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
1491    {
1492        int region = cpu->env.pmsav7.rnr[attrs.secure];
1493
1494        if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1495            /* PMSAv8M handling of the aliases is different from v7M:
1496             * aliases A1, A2, A3 override the low two bits of the region
1497             * number in MPU_RNR.
1498             */
1499            int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1500
1501            region = cpu->env.pmsav7.rnr[attrs.secure];
1502            if (aliasno) {
1503                region = deposit32(region, 0, 2, aliasno);
1504            }
1505            if (region >= cpu->pmsav7_dregion) {
1506                return;
1507            }
1508            cpu->env.pmsav8.rlar[attrs.secure][region] = value;
1509            tlb_flush(CPU(cpu));
1510            return;
1511        }
1512
1513        if (region >= cpu->pmsav7_dregion) {
1514            return;
1515        }
1516
1517        cpu->env.pmsav7.drsr[region] = value & 0xff3f;
1518        cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f;
1519        tlb_flush(CPU(cpu));
1520        break;
1521    }
1522    case 0xdc0: /* MPU_MAIR0 */
1523        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1524            goto bad_offset;
1525        }
1526        if (cpu->pmsav7_dregion) {
1527            /* Register is RES0 if no MPU regions are implemented */
1528            cpu->env.pmsav8.mair0[attrs.secure] = value;
1529        }
1530        /* We don't need to do anything else because memory attributes
1531         * only affect cacheability, and we don't implement caching.
1532         */
1533        break;
1534    case 0xdc4: /* MPU_MAIR1 */
1535        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1536            goto bad_offset;
1537        }
1538        if (cpu->pmsav7_dregion) {
1539            /* Register is RES0 if no MPU regions are implemented */
1540            cpu->env.pmsav8.mair1[attrs.secure] = value;
1541        }
1542        /* We don't need to do anything else because memory attributes
1543         * only affect cacheability, and we don't implement caching.
1544         */
1545        break;
1546    case 0xdd0: /* SAU_CTRL */
1547        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1548            goto bad_offset;
1549        }
1550        if (!attrs.secure) {
1551            return;
1552        }
1553        cpu->env.sau.ctrl = value & 3;
1554        break;
1555    case 0xdd4: /* SAU_TYPE */
1556        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1557            goto bad_offset;
1558        }
1559        break;
1560    case 0xdd8: /* SAU_RNR */
1561        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1562            goto bad_offset;
1563        }
1564        if (!attrs.secure) {
1565            return;
1566        }
1567        if (value >= cpu->sau_sregion) {
1568            qemu_log_mask(LOG_GUEST_ERROR, "SAU region out of range %"
1569                          PRIu32 "/%" PRIu32 "\n",
1570                          value, cpu->sau_sregion);
1571        } else {
1572            cpu->env.sau.rnr = value;
1573        }
1574        break;
1575    case 0xddc: /* SAU_RBAR */
1576    {
1577        int region = cpu->env.sau.rnr;
1578
1579        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1580            goto bad_offset;
1581        }
1582        if (!attrs.secure) {
1583            return;
1584        }
1585        if (region >= cpu->sau_sregion) {
1586            return;
1587        }
1588        cpu->env.sau.rbar[region] = value & ~0x1f;
1589        tlb_flush(CPU(cpu));
1590        break;
1591    }
1592    case 0xde0: /* SAU_RLAR */
1593    {
1594        int region = cpu->env.sau.rnr;
1595
1596        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1597            goto bad_offset;
1598        }
1599        if (!attrs.secure) {
1600            return;
1601        }
1602        if (region >= cpu->sau_sregion) {
1603            return;
1604        }
1605        cpu->env.sau.rlar[region] = value & ~0x1c;
1606        tlb_flush(CPU(cpu));
1607        break;
1608    }
1609    case 0xde4: /* SFSR */
1610        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1611            goto bad_offset;
1612        }
1613        if (!attrs.secure) {
1614            return;
1615        }
1616        cpu->env.v7m.sfsr &= ~value; /* W1C */
1617        break;
1618    case 0xde8: /* SFAR */
1619        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1620            goto bad_offset;
1621        }
1622        if (!attrs.secure) {
1623            return;
1624        }
1625        cpu->env.v7m.sfsr = value;
1626        break;
1627    case 0xf00: /* Software Triggered Interrupt Register */
1628    {
1629        int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
1630        if (excnum < s->num_irq) {
1631            armv7m_nvic_set_pending(s, excnum, false);
1632        }
1633        break;
1634    }
1635    case 0xf50: /* ICIALLU */
1636    case 0xf58: /* ICIMVAU */
1637    case 0xf5c: /* DCIMVAC */
1638    case 0xf60: /* DCISW */
1639    case 0xf64: /* DCCMVAU */
1640    case 0xf68: /* DCCMVAC */
1641    case 0xf6c: /* DCCSW */
1642    case 0xf70: /* DCCIMVAC */
1643    case 0xf74: /* DCCISW */
1644    case 0xf78: /* BPIALL */
1645        /* Cache and branch predictor maintenance: for QEMU these always NOP */
1646        break;
1647    default:
1648    bad_offset:
1649        qemu_log_mask(LOG_GUEST_ERROR,
1650                      "NVIC: Bad write offset 0x%x\n", offset);
1651    }
1652}
1653
1654static bool nvic_user_access_ok(NVICState *s, hwaddr offset, MemTxAttrs attrs)
1655{
1656    /* Return true if unprivileged access to this register is permitted. */
1657    switch (offset) {
1658    case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */
1659        /* For access via STIR_NS it is the NS CCR.USERSETMPEND that
1660         * controls access even though the CPU is in Secure state (I_QDKX).
1661         */
1662        return s->cpu->env.v7m.ccr[attrs.secure] & R_V7M_CCR_USERSETMPEND_MASK;
1663    default:
1664        /* All other user accesses cause a BusFault unconditionally */
1665        return false;
1666    }
1667}
1668
1669static int shpr_bank(NVICState *s, int exc, MemTxAttrs attrs)
1670{
1671    /* Behaviour for the SHPR register field for this exception:
1672     * return M_REG_NS to use the nonsecure vector (including for
1673     * non-banked exceptions), M_REG_S for the secure version of
1674     * a banked exception, and -1 if this field should RAZ/WI.
1675     */
1676    switch (exc) {
1677    case ARMV7M_EXCP_MEM:
1678    case ARMV7M_EXCP_USAGE:
1679    case ARMV7M_EXCP_SVC:
1680    case ARMV7M_EXCP_PENDSV:
1681    case ARMV7M_EXCP_SYSTICK:
1682        /* Banked exceptions */
1683        return attrs.secure;
1684    case ARMV7M_EXCP_BUS:
1685        /* Not banked, RAZ/WI from nonsecure if BFHFNMINS is zero */
1686        if (!attrs.secure &&
1687            !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1688            return -1;
1689        }
1690        return M_REG_NS;
1691    case ARMV7M_EXCP_SECURE:
1692        /* Not banked, RAZ/WI from nonsecure */
1693        if (!attrs.secure) {
1694            return -1;
1695        }
1696        return M_REG_NS;
1697    case ARMV7M_EXCP_DEBUG:
1698        /* Not banked. TODO should RAZ/WI if DEMCR.SDME is set */
1699        return M_REG_NS;
1700    case 8 ... 10:
1701    case 13:
1702        /* RES0 */
1703        return -1;
1704    default:
1705        /* Not reachable due to decode of SHPR register addresses */
1706        g_assert_not_reached();
1707    }
1708}
1709
1710static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
1711                                    uint64_t *data, unsigned size,
1712                                    MemTxAttrs attrs)
1713{
1714    NVICState *s = (NVICState *)opaque;
1715    uint32_t offset = addr;
1716    unsigned i, startvec, end;
1717    uint32_t val;
1718
1719    if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
1720        /* Generate BusFault for unprivileged accesses */
1721        return MEMTX_ERROR;
1722    }
1723
1724    switch (offset) {
1725    /* reads of set and clear both return the status */
1726    case 0x100 ... 0x13f: /* NVIC Set enable */
1727        offset += 0x80;
1728        /* fall through */
1729    case 0x180 ... 0x1bf: /* NVIC Clear enable */
1730        val = 0;
1731        startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ; /* vector # */
1732
1733        for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
1734            if (s->vectors[startvec + i].enabled &&
1735                (attrs.secure || s->itns[startvec + i])) {
1736                val |= (1 << i);
1737            }
1738        }
1739        break;
1740    case 0x200 ... 0x23f: /* NVIC Set pend */
1741        offset += 0x80;
1742        /* fall through */
1743    case 0x280 ... 0x2bf: /* NVIC Clear pend */
1744        val = 0;
1745        startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
1746        for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
1747            if (s->vectors[startvec + i].pending &&
1748                (attrs.secure || s->itns[startvec + i])) {
1749                val |= (1 << i);
1750            }
1751        }
1752        break;
1753    case 0x300 ... 0x33f: /* NVIC Active */
1754        val = 0;
1755        startvec = 8 * (offset - 0x300) + NVIC_FIRST_IRQ; /* vector # */
1756
1757        for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
1758            if (s->vectors[startvec + i].active &&
1759                (attrs.secure || s->itns[startvec + i])) {
1760                val |= (1 << i);
1761            }
1762        }
1763        break;
1764    case 0x400 ... 0x5ef: /* NVIC Priority */
1765        val = 0;
1766        startvec = offset - 0x400 + NVIC_FIRST_IRQ; /* vector # */
1767
1768        for (i = 0; i < size && startvec + i < s->num_irq; i++) {
1769            if (attrs.secure || s->itns[startvec + i]) {
1770                val |= s->vectors[startvec + i].prio << (8 * i);
1771            }
1772        }
1773        break;
1774    case 0xd18 ... 0xd23: /* System Handler Priority (SHPR1, SHPR2, SHPR3) */
1775        val = 0;
1776        for (i = 0; i < size; i++) {
1777            unsigned hdlidx = (offset - 0xd14) + i;
1778            int sbank = shpr_bank(s, hdlidx, attrs);
1779
1780            if (sbank < 0) {
1781                continue;
1782            }
1783            val = deposit32(val, i * 8, 8, get_prio(s, hdlidx, sbank));
1784        }
1785        break;
1786    case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
1787        /* The BFSR bits [15:8] are shared between security states
1788         * and we store them in the NS copy
1789         */
1790        val = s->cpu->env.v7m.cfsr[attrs.secure];
1791        val |= s->cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK;
1792        val = extract32(val, (offset - 0xd28) * 8, size * 8);
1793        break;
1794    case 0xfe0 ... 0xfff: /* ID.  */
1795        if (offset & 3) {
1796            val = 0;
1797        } else {
1798            val = nvic_id[(offset - 0xfe0) >> 2];
1799        }
1800        break;
1801    default:
1802        if (size == 4) {
1803            val = nvic_readl(s, offset, attrs);
1804        } else {
1805            qemu_log_mask(LOG_GUEST_ERROR,
1806                          "NVIC: Bad read of size %d at offset 0x%x\n",
1807                          size, offset);
1808            val = 0;
1809        }
1810    }
1811
1812    trace_nvic_sysreg_read(addr, val, size);
1813    *data = val;
1814    return MEMTX_OK;
1815}
1816
1817static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
1818                                     uint64_t value, unsigned size,
1819                                     MemTxAttrs attrs)
1820{
1821    NVICState *s = (NVICState *)opaque;
1822    uint32_t offset = addr;
1823    unsigned i, startvec, end;
1824    unsigned setval = 0;
1825
1826    trace_nvic_sysreg_write(addr, value, size);
1827
1828    if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
1829        /* Generate BusFault for unprivileged accesses */
1830        return MEMTX_ERROR;
1831    }
1832
1833    switch (offset) {
1834    case 0x100 ... 0x13f: /* NVIC Set enable */
1835        offset += 0x80;
1836        setval = 1;
1837        /* fall through */
1838    case 0x180 ... 0x1bf: /* NVIC Clear enable */
1839        startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
1840
1841        for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
1842            if (value & (1 << i) &&
1843                (attrs.secure || s->itns[startvec + i])) {
1844                s->vectors[startvec + i].enabled = setval;
1845            }
1846        }
1847        nvic_irq_update(s);
1848        return MEMTX_OK;
1849    case 0x200 ... 0x23f: /* NVIC Set pend */
1850        /* the special logic in armv7m_nvic_set_pending()
1851         * is not needed since IRQs are never escalated
1852         */
1853        offset += 0x80;
1854        setval = 1;
1855        /* fall through */
1856    case 0x280 ... 0x2bf: /* NVIC Clear pend */
1857        startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
1858
1859        for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
1860            if (value & (1 << i) &&
1861                (attrs.secure || s->itns[startvec + i])) {
1862                s->vectors[startvec + i].pending = setval;
1863            }
1864        }
1865        nvic_irq_update(s);
1866        return MEMTX_OK;
1867    case 0x300 ... 0x33f: /* NVIC Active */
1868        return MEMTX_OK; /* R/O */
1869    case 0x400 ... 0x5ef: /* NVIC Priority */
1870        startvec = (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
1871
1872        for (i = 0; i < size && startvec + i < s->num_irq; i++) {
1873            if (attrs.secure || s->itns[startvec + i]) {
1874                set_prio(s, startvec + i, false, (value >> (i * 8)) & 0xff);
1875            }
1876        }
1877        nvic_irq_update(s);
1878        return MEMTX_OK;
1879    case 0xd18 ... 0xd23: /* System Handler Priority (SHPR1, SHPR2, SHPR3) */
1880        for (i = 0; i < size; i++) {
1881            unsigned hdlidx = (offset - 0xd14) + i;
1882            int newprio = extract32(value, i * 8, 8);
1883            int sbank = shpr_bank(s, hdlidx, attrs);
1884
1885            if (sbank < 0) {
1886                continue;
1887            }
1888            set_prio(s, hdlidx, sbank, newprio);
1889        }
1890        nvic_irq_update(s);
1891        return MEMTX_OK;
1892    case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
1893        /* All bits are W1C, so construct 32 bit value with 0s in
1894         * the parts not written by the access size
1895         */
1896        value <<= ((offset - 0xd28) * 8);
1897
1898        s->cpu->env.v7m.cfsr[attrs.secure] &= ~value;
1899        if (attrs.secure) {
1900            /* The BFSR bits [15:8] are shared between security states
1901             * and we store them in the NS copy.
1902             */
1903            s->cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK);
1904        }
1905        return MEMTX_OK;
1906    }
1907    if (size == 4) {
1908        nvic_writel(s, offset, value, attrs);
1909        return MEMTX_OK;
1910    }
1911    qemu_log_mask(LOG_GUEST_ERROR,
1912                  "NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
1913    /* This is UNPREDICTABLE; treat as RAZ/WI */
1914    return MEMTX_OK;
1915}
1916
1917static const MemoryRegionOps nvic_sysreg_ops = {
1918    .read_with_attrs = nvic_sysreg_read,
1919    .write_with_attrs = nvic_sysreg_write,
1920    .endianness = DEVICE_NATIVE_ENDIAN,
1921};
1922
1923static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr,
1924                                        uint64_t value, unsigned size,
1925                                        MemTxAttrs attrs)
1926{
1927    MemoryRegion *mr = opaque;
1928
1929    if (attrs.secure) {
1930        /* S accesses to the alias act like NS accesses to the real region */
1931        attrs.secure = 0;
1932        return memory_region_dispatch_write(mr, addr, value, size, attrs);
1933    } else {
1934        /* NS attrs are RAZ/WI for privileged, and BusFault for user */
1935        if (attrs.user) {
1936            return MEMTX_ERROR;
1937        }
1938        return MEMTX_OK;
1939    }
1940}
1941
1942static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr,
1943                                       uint64_t *data, unsigned size,
1944                                       MemTxAttrs attrs)
1945{
1946    MemoryRegion *mr = opaque;
1947
1948    if (attrs.secure) {
1949        /* S accesses to the alias act like NS accesses to the real region */
1950        attrs.secure = 0;
1951        return memory_region_dispatch_read(mr, addr, data, size, attrs);
1952    } else {
1953        /* NS attrs are RAZ/WI for privileged, and BusFault for user */
1954        if (attrs.user) {
1955            return MEMTX_ERROR;
1956        }
1957        *data = 0;
1958        return MEMTX_OK;
1959    }
1960}
1961
1962static const MemoryRegionOps nvic_sysreg_ns_ops = {
1963    .read_with_attrs = nvic_sysreg_ns_read,
1964    .write_with_attrs = nvic_sysreg_ns_write,
1965    .endianness = DEVICE_NATIVE_ENDIAN,
1966};
1967
1968static MemTxResult nvic_systick_write(void *opaque, hwaddr addr,
1969                                      uint64_t value, unsigned size,
1970                                      MemTxAttrs attrs)
1971{
1972    NVICState *s = opaque;
1973    MemoryRegion *mr;
1974
1975    /* Direct the access to the correct systick */
1976    mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
1977    return memory_region_dispatch_write(mr, addr, value, size, attrs);
1978}
1979
1980static MemTxResult nvic_systick_read(void *opaque, hwaddr addr,
1981                                     uint64_t *data, unsigned size,
1982                                     MemTxAttrs attrs)
1983{
1984    NVICState *s = opaque;
1985    MemoryRegion *mr;
1986
1987    /* Direct the access to the correct systick */
1988    mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
1989    return memory_region_dispatch_read(mr, addr, data, size, attrs);
1990}
1991
1992static const MemoryRegionOps nvic_systick_ops = {
1993    .read_with_attrs = nvic_systick_read,
1994    .write_with_attrs = nvic_systick_write,
1995    .endianness = DEVICE_NATIVE_ENDIAN,
1996};
1997
1998static int nvic_post_load(void *opaque, int version_id)
1999{
2000    NVICState *s = opaque;
2001    unsigned i;
2002    int resetprio;
2003
2004    /* Check for out of range priority settings */
2005    resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
2006
2007    if (s->vectors[ARMV7M_EXCP_RESET].prio != resetprio ||
2008        s->vectors[ARMV7M_EXCP_NMI].prio != -2 ||
2009        s->vectors[ARMV7M_EXCP_HARD].prio != -1) {
2010        return 1;
2011    }
2012    for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) {
2013        if (s->vectors[i].prio & ~0xff) {
2014            return 1;
2015        }
2016    }
2017
2018    nvic_recompute_state(s);
2019
2020    return 0;
2021}
2022
2023static const VMStateDescription vmstate_VecInfo = {
2024    .name = "armv7m_nvic_info",
2025    .version_id = 1,
2026    .minimum_version_id = 1,
2027    .fields = (VMStateField[]) {
2028        VMSTATE_INT16(prio, VecInfo),
2029        VMSTATE_UINT8(enabled, VecInfo),
2030        VMSTATE_UINT8(pending, VecInfo),
2031        VMSTATE_UINT8(active, VecInfo),
2032        VMSTATE_UINT8(level, VecInfo),
2033        VMSTATE_END_OF_LIST()
2034    }
2035};
2036
2037static bool nvic_security_needed(void *opaque)
2038{
2039    NVICState *s = opaque;
2040
2041    return arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
2042}
2043
2044static int nvic_security_post_load(void *opaque, int version_id)
2045{
2046    NVICState *s = opaque;
2047    int i;
2048
2049    /* Check for out of range priority settings */
2050    if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1
2051        && s->sec_vectors[ARMV7M_EXCP_HARD].prio != -3) {
2052        /* We can't cross-check against AIRCR.BFHFNMINS as we don't know
2053         * if the CPU state has been migrated yet; a mismatch won't
2054         * cause the emulation to blow up, though.
2055         */
2056        return 1;
2057    }
2058    for (i = ARMV7M_EXCP_MEM; i < ARRAY_SIZE(s->sec_vectors); i++) {
2059        if (s->sec_vectors[i].prio & ~0xff) {
2060            return 1;
2061        }
2062    }
2063    return 0;
2064}
2065
2066static const VMStateDescription vmstate_nvic_security = {
2067    .name = "armv7m_nvic/m-security",
2068    .version_id = 1,
2069    .minimum_version_id = 1,
2070    .needed = nvic_security_needed,
2071    .post_load = &nvic_security_post_load,
2072    .fields = (VMStateField[]) {
2073        VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1,
2074                             vmstate_VecInfo, VecInfo),
2075        VMSTATE_UINT32(prigroup[M_REG_S], NVICState),
2076        VMSTATE_BOOL_ARRAY(itns, NVICState, NVIC_MAX_VECTORS),
2077        VMSTATE_END_OF_LIST()
2078    }
2079};
2080
2081static const VMStateDescription vmstate_nvic = {
2082    .name = "armv7m_nvic",
2083    .version_id = 4,
2084    .minimum_version_id = 4,
2085    .post_load = &nvic_post_load,
2086    .fields = (VMStateField[]) {
2087        VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1,
2088                             vmstate_VecInfo, VecInfo),
2089        VMSTATE_UINT32(prigroup[M_REG_NS], NVICState),
2090        VMSTATE_END_OF_LIST()
2091    },
2092    .subsections = (const VMStateDescription*[]) {
2093        &vmstate_nvic_security,
2094        NULL
2095    }
2096};
2097
2098static Property props_nvic[] = {
2099    /* Number of external IRQ lines (so excluding the 16 internal exceptions) */
2100    DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64),
2101    DEFINE_PROP_END_OF_LIST()
2102};
2103
2104static void armv7m_nvic_reset(DeviceState *dev)
2105{
2106    int resetprio;
2107    NVICState *s = NVIC(dev);
2108
2109    memset(s->vectors, 0, sizeof(s->vectors));
2110    memset(s->sec_vectors, 0, sizeof(s->sec_vectors));
2111    s->prigroup[M_REG_NS] = 0;
2112    s->prigroup[M_REG_S] = 0;
2113
2114    s->vectors[ARMV7M_EXCP_NMI].enabled = 1;
2115    /* MEM, BUS, and USAGE are enabled through
2116     * the System Handler Control register
2117     */
2118    s->vectors[ARMV7M_EXCP_SVC].enabled = 1;
2119    s->vectors[ARMV7M_EXCP_DEBUG].enabled = 1;
2120    s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
2121    s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
2122
2123    resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
2124    s->vectors[ARMV7M_EXCP_RESET].prio = resetprio;
2125    s->vectors[ARMV7M_EXCP_NMI].prio = -2;
2126    s->vectors[ARMV7M_EXCP_HARD].prio = -1;
2127
2128    if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2129        s->sec_vectors[ARMV7M_EXCP_HARD].enabled = 1;
2130        s->sec_vectors[ARMV7M_EXCP_SVC].enabled = 1;
2131        s->sec_vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
2132        s->sec_vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
2133
2134        /* AIRCR.BFHFNMINS resets to 0 so Secure HF is priority -1 (R_CMTC) */
2135        s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
2136        /* If AIRCR.BFHFNMINS is 0 then NS HF is (effectively) disabled */
2137        s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
2138    } else {
2139        s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
2140    }
2141
2142    /* Strictly speaking the reset handler should be enabled.
2143     * However, we don't simulate soft resets through the NVIC,
2144     * and the reset vector should never be pended.
2145     * So we leave it disabled to catch logic errors.
2146     */
2147
2148    s->exception_prio = NVIC_NOEXC_PRIO;
2149    s->vectpending = 0;
2150    s->vectpending_is_s_banked = false;
2151    s->vectpending_prio = NVIC_NOEXC_PRIO;
2152
2153    if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2154        memset(s->itns, 0, sizeof(s->itns));
2155    } else {
2156        /* This state is constant and not guest accessible in a non-security
2157         * NVIC; we set the bits to true to avoid having to do a feature
2158         * bit check in the NVIC enable/pend/etc register accessors.
2159         */
2160        int i;
2161
2162        for (i = NVIC_FIRST_IRQ; i < ARRAY_SIZE(s->itns); i++) {
2163            s->itns[i] = true;
2164        }
2165    }
2166}
2167
2168static void nvic_systick_trigger(void *opaque, int n, int level)
2169{
2170    NVICState *s = opaque;
2171
2172    if (level) {
2173        /* SysTick just asked us to pend its exception.
2174         * (This is different from an external interrupt line's
2175         * behaviour.)
2176         * n == 0 : NonSecure systick
2177         * n == 1 : Secure systick
2178         */
2179        armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, n);
2180    }
2181}
2182
2183static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
2184{
2185    NVICState *s = NVIC(dev);
2186    Error *err = NULL;
2187    int regionlen;
2188
2189    s->cpu = ARM_CPU(qemu_get_cpu(0));
2190
2191    if (!s->cpu || !arm_feature(&s->cpu->env, ARM_FEATURE_M)) {
2192        error_setg(errp, "The NVIC can only be used with a Cortex-M CPU");
2193        return;
2194    }
2195
2196    if (s->num_irq > NVIC_MAX_IRQ) {
2197        error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq);
2198        return;
2199    }
2200
2201    qdev_init_gpio_in(dev, set_irq_level, s->num_irq);
2202
2203    /* include space for internal exception vectors */
2204    s->num_irq += NVIC_FIRST_IRQ;
2205
2206    object_property_set_bool(OBJECT(&s->systick[M_REG_NS]), true,
2207                             "realized", &err);
2208    if (err != NULL) {
2209        error_propagate(errp, err);
2210        return;
2211    }
2212    sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), 0,
2213                       qdev_get_gpio_in_named(dev, "systick-trigger",
2214                                              M_REG_NS));
2215
2216    if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2217        /* We couldn't init the secure systick device in instance_init
2218         * as we didn't know then if the CPU had the security extensions;
2219         * so we have to do it here.
2220         */
2221        object_initialize(&s->systick[M_REG_S], sizeof(s->systick[M_REG_S]),
2222                          TYPE_SYSTICK);
2223        qdev_set_parent_bus(DEVICE(&s->systick[M_REG_S]), sysbus_get_default());
2224
2225        object_property_set_bool(OBJECT(&s->systick[M_REG_S]), true,
2226                                 "realized", &err);
2227        if (err != NULL) {
2228            error_propagate(errp, err);
2229            return;
2230        }
2231        sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_S]), 0,
2232                           qdev_get_gpio_in_named(dev, "systick-trigger",
2233                                                  M_REG_S));
2234    }
2235
2236    /* The NVIC and System Control Space (SCS) starts at 0xe000e000
2237     * and looks like this:
2238     *  0x004 - ICTR
2239     *  0x010 - 0xff - systick
2240     *  0x100..0x7ec - NVIC
2241     *  0x7f0..0xcff - Reserved
2242     *  0xd00..0xd3c - SCS registers
2243     *  0xd40..0xeff - Reserved or Not implemented
2244     *  0xf00 - STIR
2245     *
2246     * Some registers within this space are banked between security states.
2247     * In v8M there is a second range 0xe002e000..0xe002efff which is the
2248     * NonSecure alias SCS; secure accesses to this behave like NS accesses
2249     * to the main SCS range, and non-secure accesses (including when
2250     * the security extension is not implemented) are RAZ/WI.
2251     * Note that both the main SCS range and the alias range are defined
2252     * to be exempt from memory attribution (R_BLJT) and so the memory
2253     * transaction attribute always matches the current CPU security
2254     * state (attrs.secure == env->v7m.secure). In the nvic_sysreg_ns_ops
2255     * wrappers we change attrs.secure to indicate the NS access; so
2256     * generally code determining which banked register to use should
2257     * use attrs.secure; code determining actual behaviour of the system
2258     * should use env->v7m.secure.
2259     */
2260    regionlen = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? 0x21000 : 0x1000;
2261    memory_region_init(&s->container, OBJECT(s), "nvic", regionlen);
2262    /* The system register region goes at the bottom of the priority
2263     * stack as it covers the whole page.
2264     */
2265    memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s,
2266                          "nvic_sysregs", 0x1000);
2267    memory_region_add_subregion(&s->container, 0, &s->sysregmem);
2268
2269    memory_region_init_io(&s->systickmem, OBJECT(s),
2270                          &nvic_systick_ops, s,
2271                          "nvic_systick", 0xe0);
2272
2273    memory_region_add_subregion_overlap(&s->container, 0x10,
2274                                        &s->systickmem, 1);
2275
2276    if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
2277        memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s),
2278                              &nvic_sysreg_ns_ops, &s->sysregmem,
2279                              "nvic_sysregs_ns", 0x1000);
2280        memory_region_add_subregion(&s->container, 0x20000, &s->sysreg_ns_mem);
2281        memory_region_init_io(&s->systick_ns_mem, OBJECT(s),
2282                              &nvic_sysreg_ns_ops, &s->systickmem,
2283                              "nvic_systick_ns", 0xe0);
2284        memory_region_add_subregion_overlap(&s->container, 0x20010,
2285                                            &s->systick_ns_mem, 1);
2286    }
2287
2288    sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container);
2289}
2290
2291static void armv7m_nvic_instance_init(Object *obj)
2292{
2293    /* We have a different default value for the num-irq property
2294     * than our superclass. This function runs after qdev init
2295     * has set the defaults from the Property array and before
2296     * any user-specified property setting, so just modify the
2297     * value in the GICState struct.
2298     */
2299    DeviceState *dev = DEVICE(obj);
2300    NVICState *nvic = NVIC(obj);
2301    SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
2302
2303    sysbus_init_child_obj(obj, "systick-reg-ns", &nvic->systick[M_REG_NS],
2304                          sizeof(nvic->systick[M_REG_NS]), TYPE_SYSTICK);
2305    /* We can't initialize the secure systick here, as we don't know
2306     * yet if we need it.
2307     */
2308
2309    sysbus_init_irq(sbd, &nvic->excpout);
2310    qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1);
2311    qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger",
2312                            M_REG_NUM_BANKS);
2313}
2314
2315static void armv7m_nvic_class_init(ObjectClass *klass, void *data)
2316{
2317    DeviceClass *dc = DEVICE_CLASS(klass);
2318
2319    dc->vmsd  = &vmstate_nvic;
2320    dc->props = props_nvic;
2321    dc->reset = armv7m_nvic_reset;
2322    dc->realize = armv7m_nvic_realize;
2323}
2324
2325static const TypeInfo armv7m_nvic_info = {
2326    .name          = TYPE_NVIC,
2327    .parent        = TYPE_SYS_BUS_DEVICE,
2328    .instance_init = armv7m_nvic_instance_init,
2329    .instance_size = sizeof(NVICState),
2330    .class_init    = armv7m_nvic_class_init,
2331    .class_size    = sizeof(SysBusDeviceClass),
2332};
2333
2334static void armv7m_nvic_register_types(void)
2335{
2336    type_register_static(&armv7m_nvic_info);
2337}
2338
2339type_init(armv7m_nvic_register_types)
2340