qemu/hw/intc/armv7m_nvic.c
<<
>>
Prefs
   1/*
   2 * ARM Nested Vectored Interrupt Controller
   3 *
   4 * Copyright (c) 2006-2007 CodeSourcery.
   5 * Written by Paul Brook
   6 *
   7 * This code is licensed under the GPL.
   8 *
   9 * The ARMv7M System controller is fairly tightly tied in with the
  10 * NVIC.  Much of that is also implemented here.
  11 */
  12
  13#include "qemu/osdep.h"
  14#include "qapi/error.h"
  15#include "qemu-common.h"
  16#include "cpu.h"
  17#include "hw/sysbus.h"
  18#include "qemu/timer.h"
  19#include "hw/arm/arm.h"
  20#include "hw/intc/armv7m_nvic.h"
  21#include "target/arm/cpu.h"
  22#include "exec/exec-all.h"
  23#include "qemu/log.h"
  24#include "trace.h"
  25
  26/* IRQ number counting:
  27 *
  28 * the num-irq property counts the number of external IRQ lines
  29 *
  30 * NVICState::num_irq counts the total number of exceptions
  31 * (external IRQs, the 15 internal exceptions including reset,
  32 * and one for the unused exception number 0).
  33 *
  34 * NVIC_MAX_IRQ is the highest permitted number of external IRQ lines.
  35 *
  36 * NVIC_MAX_VECTORS is the highest permitted number of exceptions.
  37 *
  38 * Iterating through all exceptions should typically be done with
  39 * for (i = 1; i < s->num_irq; i++) to avoid the unused slot 0.
  40 *
  41 * The external qemu_irq lines are the NVIC's external IRQ lines,
  42 * so line 0 is exception 16.
  43 *
  44 * In the terminology of the architecture manual, "interrupts" are
  45 * a subcategory of exception referring to the external interrupts
  46 * (which are exception numbers NVIC_FIRST_IRQ and upward).
  47 * For historical reasons QEMU tends to use "interrupt" and
  48 * "exception" more or less interchangeably.
  49 */
  50#define NVIC_FIRST_IRQ NVIC_INTERNAL_VECTORS
  51#define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ)
  52
  53/* Effective running priority of the CPU when no exception is active
  54 * (higher than the highest possible priority value)
  55 */
  56#define NVIC_NOEXC_PRIO 0x100
  57/* Maximum priority of non-secure exceptions when AIRCR.PRIS is set */
  58#define NVIC_NS_PRIO_LIMIT 0x80
  59
  60static const uint8_t nvic_id[] = {
  61    0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1
  62};
  63
  64static int nvic_pending_prio(NVICState *s)
  65{
  66    /* return the group priority of the current pending interrupt,
  67     * or NVIC_NOEXC_PRIO if no interrupt is pending
  68     */
  69    return s->vectpending_prio;
  70}
  71
  72/* Return the value of the ISCR RETTOBASE bit:
  73 * 1 if there is exactly one active exception
  74 * 0 if there is more than one active exception
  75 * UNKNOWN if there are no active exceptions (we choose 1,
  76 * which matches the choice Cortex-M3 is documented as making).
  77 *
  78 * NB: some versions of the documentation talk about this
  79 * counting "active exceptions other than the one shown by IPSR";
  80 * this is only different in the obscure corner case where guest
  81 * code has manually deactivated an exception and is about
  82 * to fail an exception-return integrity check. The definition
  83 * above is the one from the v8M ARM ARM and is also in line
  84 * with the behaviour documented for the Cortex-M3.
  85 */
  86static bool nvic_rettobase(NVICState *s)
  87{
  88    int irq, nhand = 0;
  89    bool check_sec = arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
  90
  91    for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) {
  92        if (s->vectors[irq].active ||
  93            (check_sec && irq < NVIC_INTERNAL_VECTORS &&
  94             s->sec_vectors[irq].active)) {
  95            nhand++;
  96            if (nhand == 2) {
  97                return 0;
  98            }
  99        }
 100    }
 101
 102    return 1;
 103}
 104
 105/* Return the value of the ISCR ISRPENDING bit:
 106 * 1 if an external interrupt is pending
 107 * 0 if no external interrupt is pending
 108 */
 109static bool nvic_isrpending(NVICState *s)
 110{
 111    int irq;
 112
 113    /* We can shortcut if the highest priority pending interrupt
 114     * happens to be external or if there is nothing pending.
 115     */
 116    if (s->vectpending > NVIC_FIRST_IRQ) {
 117        return true;
 118    }
 119    if (s->vectpending == 0) {
 120        return false;
 121    }
 122
 123    for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) {
 124        if (s->vectors[irq].pending) {
 125            return true;
 126        }
 127    }
 128    return false;
 129}
 130
 131static bool exc_is_banked(int exc)
 132{
 133    /* Return true if this is one of the limited set of exceptions which
 134     * are banked (and thus have state in sec_vectors[])
 135     */
 136    return exc == ARMV7M_EXCP_HARD ||
 137        exc == ARMV7M_EXCP_MEM ||
 138        exc == ARMV7M_EXCP_USAGE ||
 139        exc == ARMV7M_EXCP_SVC ||
 140        exc == ARMV7M_EXCP_PENDSV ||
 141        exc == ARMV7M_EXCP_SYSTICK;
 142}
 143
 144/* Return a mask word which clears the subpriority bits from
 145 * a priority value for an M-profile exception, leaving only
 146 * the group priority.
 147 */
 148static inline uint32_t nvic_gprio_mask(NVICState *s, bool secure)
 149{
 150    return ~0U << (s->prigroup[secure] + 1);
 151}
 152
 153static bool exc_targets_secure(NVICState *s, int exc)
 154{
 155    /* Return true if this non-banked exception targets Secure state. */
 156    if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
 157        return false;
 158    }
 159
 160    if (exc >= NVIC_FIRST_IRQ) {
 161        return !s->itns[exc];
 162    }
 163
 164    /* Function shouldn't be called for banked exceptions. */
 165    assert(!exc_is_banked(exc));
 166
 167    switch (exc) {
 168    case ARMV7M_EXCP_NMI:
 169    case ARMV7M_EXCP_BUS:
 170        return !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
 171    case ARMV7M_EXCP_SECURE:
 172        return true;
 173    case ARMV7M_EXCP_DEBUG:
 174        /* TODO: controlled by DEMCR.SDME, which we don't yet implement */
 175        return false;
 176    default:
 177        /* reset, and reserved (unused) low exception numbers.
 178         * We'll get called by code that loops through all the exception
 179         * numbers, but it doesn't matter what we return here as these
 180         * non-existent exceptions will never be pended or active.
 181         */
 182        return true;
 183    }
 184}
 185
 186static int exc_group_prio(NVICState *s, int rawprio, bool targets_secure)
 187{
 188    /* Return the group priority for this exception, given its raw
 189     * (group-and-subgroup) priority value and whether it is targeting
 190     * secure state or not.
 191     */
 192    if (rawprio < 0) {
 193        return rawprio;
 194    }
 195    rawprio &= nvic_gprio_mask(s, targets_secure);
 196    /* AIRCR.PRIS causes us to squash all NS priorities into the
 197     * lower half of the total range
 198     */
 199    if (!targets_secure &&
 200        (s->cpu->env.v7m.aircr & R_V7M_AIRCR_PRIS_MASK)) {
 201        rawprio = (rawprio >> 1) + NVIC_NS_PRIO_LIMIT;
 202    }
 203    return rawprio;
 204}
 205
 206/* Recompute vectpending and exception_prio for a CPU which implements
 207 * the Security extension
 208 */
 209static void nvic_recompute_state_secure(NVICState *s)
 210{
 211    int i, bank;
 212    int pend_prio = NVIC_NOEXC_PRIO;
 213    int active_prio = NVIC_NOEXC_PRIO;
 214    int pend_irq = 0;
 215    bool pending_is_s_banked = false;
 216
 217    /* R_CQRV: precedence is by:
 218     *  - lowest group priority; if both the same then
 219     *  - lowest subpriority; if both the same then
 220     *  - lowest exception number; if both the same (ie banked) then
 221     *  - secure exception takes precedence
 222     * Compare pseudocode RawExecutionPriority.
 223     * Annoyingly, now we have two prigroup values (for S and NS)
 224     * we can't do the loop comparison on raw priority values.
 225     */
 226    for (i = 1; i < s->num_irq; i++) {
 227        for (bank = M_REG_S; bank >= M_REG_NS; bank--) {
 228            VecInfo *vec;
 229            int prio;
 230            bool targets_secure;
 231
 232            if (bank == M_REG_S) {
 233                if (!exc_is_banked(i)) {
 234                    continue;
 235                }
 236                vec = &s->sec_vectors[i];
 237                targets_secure = true;
 238            } else {
 239                vec = &s->vectors[i];
 240                targets_secure = !exc_is_banked(i) && exc_targets_secure(s, i);
 241            }
 242
 243            prio = exc_group_prio(s, vec->prio, targets_secure);
 244            if (vec->enabled && vec->pending && prio < pend_prio) {
 245                pend_prio = prio;
 246                pend_irq = i;
 247                pending_is_s_banked = (bank == M_REG_S);
 248            }
 249            if (vec->active && prio < active_prio) {
 250                active_prio = prio;
 251            }
 252        }
 253    }
 254
 255    s->vectpending_is_s_banked = pending_is_s_banked;
 256    s->vectpending = pend_irq;
 257    s->vectpending_prio = pend_prio;
 258    s->exception_prio = active_prio;
 259
 260    trace_nvic_recompute_state_secure(s->vectpending,
 261                                      s->vectpending_is_s_banked,
 262                                      s->vectpending_prio,
 263                                      s->exception_prio);
 264}
 265
 266/* Recompute vectpending and exception_prio */
 267static void nvic_recompute_state(NVICState *s)
 268{
 269    int i;
 270    int pend_prio = NVIC_NOEXC_PRIO;
 271    int active_prio = NVIC_NOEXC_PRIO;
 272    int pend_irq = 0;
 273
 274    /* In theory we could write one function that handled both
 275     * the "security extension present" and "not present"; however
 276     * the security related changes significantly complicate the
 277     * recomputation just by themselves and mixing both cases together
 278     * would be even worse, so we retain a separate non-secure-only
 279     * version for CPUs which don't implement the security extension.
 280     */
 281    if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
 282        nvic_recompute_state_secure(s);
 283        return;
 284    }
 285
 286    for (i = 1; i < s->num_irq; i++) {
 287        VecInfo *vec = &s->vectors[i];
 288
 289        if (vec->enabled && vec->pending && vec->prio < pend_prio) {
 290            pend_prio = vec->prio;
 291            pend_irq = i;
 292        }
 293        if (vec->active && vec->prio < active_prio) {
 294            active_prio = vec->prio;
 295        }
 296    }
 297
 298    if (active_prio > 0) {
 299        active_prio &= nvic_gprio_mask(s, false);
 300    }
 301
 302    if (pend_prio > 0) {
 303        pend_prio &= nvic_gprio_mask(s, false);
 304    }
 305
 306    s->vectpending = pend_irq;
 307    s->vectpending_prio = pend_prio;
 308    s->exception_prio = active_prio;
 309
 310    trace_nvic_recompute_state(s->vectpending,
 311                               s->vectpending_prio,
 312                               s->exception_prio);
 313}
 314
 315/* Return the current execution priority of the CPU
 316 * (equivalent to the pseudocode ExecutionPriority function).
 317 * This is a value between -2 (NMI priority) and NVIC_NOEXC_PRIO.
 318 */
 319static inline int nvic_exec_prio(NVICState *s)
 320{
 321    CPUARMState *env = &s->cpu->env;
 322    int running = NVIC_NOEXC_PRIO;
 323
 324    if (env->v7m.basepri[M_REG_NS] > 0) {
 325        running = exc_group_prio(s, env->v7m.basepri[M_REG_NS], M_REG_NS);
 326    }
 327
 328    if (env->v7m.basepri[M_REG_S] > 0) {
 329        int basepri = exc_group_prio(s, env->v7m.basepri[M_REG_S], M_REG_S);
 330        if (running > basepri) {
 331            running = basepri;
 332        }
 333    }
 334
 335    if (env->v7m.primask[M_REG_NS]) {
 336        if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
 337            if (running > NVIC_NS_PRIO_LIMIT) {
 338                running = NVIC_NS_PRIO_LIMIT;
 339            }
 340        } else {
 341            running = 0;
 342        }
 343    }
 344
 345    if (env->v7m.primask[M_REG_S]) {
 346        running = 0;
 347    }
 348
 349    if (env->v7m.faultmask[M_REG_NS]) {
 350        if (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
 351            running = -1;
 352        } else {
 353            if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
 354                if (running > NVIC_NS_PRIO_LIMIT) {
 355                    running = NVIC_NS_PRIO_LIMIT;
 356                }
 357            } else {
 358                running = 0;
 359            }
 360        }
 361    }
 362
 363    if (env->v7m.faultmask[M_REG_S]) {
 364        running = (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) ? -3 : -1;
 365    }
 366
 367    /* consider priority of active handler */
 368    return MIN(running, s->exception_prio);
 369}
 370
 371bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure)
 372{
 373    /* Return true if the requested execution priority is negative
 374     * for the specified security state, ie that security state
 375     * has an active NMI or HardFault or has set its FAULTMASK.
 376     * Note that this is not the same as whether the execution
 377     * priority is actually negative (for instance AIRCR.PRIS may
 378     * mean we don't allow FAULTMASK_NS to actually make the execution
 379     * priority negative). Compare pseudocode IsReqExcPriNeg().
 380     */
 381    NVICState *s = opaque;
 382
 383    if (s->cpu->env.v7m.faultmask[secure]) {
 384        return true;
 385    }
 386
 387    if (secure ? s->sec_vectors[ARMV7M_EXCP_HARD].active :
 388        s->vectors[ARMV7M_EXCP_HARD].active) {
 389        return true;
 390    }
 391
 392    if (s->vectors[ARMV7M_EXCP_NMI].active &&
 393        exc_targets_secure(s, ARMV7M_EXCP_NMI) == secure) {
 394        return true;
 395    }
 396
 397    return false;
 398}
 399
 400bool armv7m_nvic_can_take_pending_exception(void *opaque)
 401{
 402    NVICState *s = opaque;
 403
 404    return nvic_exec_prio(s) > nvic_pending_prio(s);
 405}
 406
 407int armv7m_nvic_raw_execution_priority(void *opaque)
 408{
 409    NVICState *s = opaque;
 410
 411    return s->exception_prio;
 412}
 413
 414/* caller must call nvic_irq_update() after this.
 415 * secure indicates the bank to use for banked exceptions (we assert if
 416 * we are passed secure=true for a non-banked exception).
 417 */
 418static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio)
 419{
 420    assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
 421    assert(irq < s->num_irq);
 422
 423    if (secure) {
 424        assert(exc_is_banked(irq));
 425        s->sec_vectors[irq].prio = prio;
 426    } else {
 427        s->vectors[irq].prio = prio;
 428    }
 429
 430    trace_nvic_set_prio(irq, secure, prio);
 431}
 432
 433/* Return the current raw priority register value.
 434 * secure indicates the bank to use for banked exceptions (we assert if
 435 * we are passed secure=true for a non-banked exception).
 436 */
 437static int get_prio(NVICState *s, unsigned irq, bool secure)
 438{
 439    assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
 440    assert(irq < s->num_irq);
 441
 442    if (secure) {
 443        assert(exc_is_banked(irq));
 444        return s->sec_vectors[irq].prio;
 445    } else {
 446        return s->vectors[irq].prio;
 447    }
 448}
 449
 450/* Recompute state and assert irq line accordingly.
 451 * Must be called after changes to:
 452 *  vec->active, vec->enabled, vec->pending or vec->prio for any vector
 453 *  prigroup
 454 */
 455static void nvic_irq_update(NVICState *s)
 456{
 457    int lvl;
 458    int pend_prio;
 459
 460    nvic_recompute_state(s);
 461    pend_prio = nvic_pending_prio(s);
 462
 463    /* Raise NVIC output if this IRQ would be taken, except that we
 464     * ignore the effects of the BASEPRI, FAULTMASK and PRIMASK (which
 465     * will be checked for in arm_v7m_cpu_exec_interrupt()); changes
 466     * to those CPU registers don't cause us to recalculate the NVIC
 467     * pending info.
 468     */
 469    lvl = (pend_prio < s->exception_prio);
 470    trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl);
 471    qemu_set_irq(s->excpout, lvl);
 472}
 473
 474/**
 475 * armv7m_nvic_clear_pending: mark the specified exception as not pending
 476 * @opaque: the NVIC
 477 * @irq: the exception number to mark as not pending
 478 * @secure: false for non-banked exceptions or for the nonsecure
 479 * version of a banked exception, true for the secure version of a banked
 480 * exception.
 481 *
 482 * Marks the specified exception as not pending. Note that we will assert()
 483 * if @secure is true and @irq does not specify one of the fixed set
 484 * of architecturally banked exceptions.
 485 */
 486static void armv7m_nvic_clear_pending(void *opaque, int irq, bool secure)
 487{
 488    NVICState *s = (NVICState *)opaque;
 489    VecInfo *vec;
 490
 491    assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
 492
 493    if (secure) {
 494        assert(exc_is_banked(irq));
 495        vec = &s->sec_vectors[irq];
 496    } else {
 497        vec = &s->vectors[irq];
 498    }
 499    trace_nvic_clear_pending(irq, secure, vec->enabled, vec->prio);
 500    if (vec->pending) {
 501        vec->pending = 0;
 502        nvic_irq_update(s);
 503    }
 504}
 505
 506void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
 507{
 508    NVICState *s = (NVICState *)opaque;
 509    bool banked = exc_is_banked(irq);
 510    VecInfo *vec;
 511
 512    assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
 513    assert(!secure || banked);
 514
 515    vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
 516
 517    trace_nvic_set_pending(irq, secure, vec->enabled, vec->prio);
 518
 519    if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) {
 520        /* If a synchronous exception is pending then it may be
 521         * escalated to HardFault if:
 522         *  * it is equal or lower priority to current execution
 523         *  * it is disabled
 524         * (ie we need to take it immediately but we can't do so).
 525         * Asynchronous exceptions (and interrupts) simply remain pending.
 526         *
 527         * For QEMU, we don't have any imprecise (asynchronous) faults,
 528         * so we can assume that PREFETCH_ABORT and DATA_ABORT are always
 529         * synchronous.
 530         * Debug exceptions are awkward because only Debug exceptions
 531         * resulting from the BKPT instruction should be escalated,
 532         * but we don't currently implement any Debug exceptions other
 533         * than those that result from BKPT, so we treat all debug exceptions
 534         * as needing escalation.
 535         *
 536         * This all means we can identify whether to escalate based only on
 537         * the exception number and don't (yet) need the caller to explicitly
 538         * tell us whether this exception is synchronous or not.
 539         */
 540        int running = nvic_exec_prio(s);
 541        bool escalate = false;
 542
 543        if (exc_group_prio(s, vec->prio, secure) >= running) {
 544            trace_nvic_escalate_prio(irq, vec->prio, running);
 545            escalate = true;
 546        } else if (!vec->enabled) {
 547            trace_nvic_escalate_disabled(irq);
 548            escalate = true;
 549        }
 550
 551        if (escalate) {
 552
 553            /* We need to escalate this exception to a synchronous HardFault.
 554             * If BFHFNMINS is set then we escalate to the banked HF for
 555             * the target security state of the original exception; otherwise
 556             * we take a Secure HardFault.
 557             */
 558            irq = ARMV7M_EXCP_HARD;
 559            if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
 560                (secure ||
 561                 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
 562                vec = &s->sec_vectors[irq];
 563            } else {
 564                vec = &s->vectors[irq];
 565            }
 566            if (running <= vec->prio) {
 567                /* We want to escalate to HardFault but we can't take the
 568                 * synchronous HardFault at this point either. This is a
 569                 * Lockup condition due to a guest bug. We don't model
 570                 * Lockup, so report via cpu_abort() instead.
 571                 */
 572                cpu_abort(&s->cpu->parent_obj,
 573                          "Lockup: can't escalate %d to HardFault "
 574                          "(current priority %d)\n", irq, running);
 575            }
 576
 577            /* HF may be banked but there is only one shared HFSR */
 578            s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
 579        }
 580    }
 581
 582    if (!vec->pending) {
 583        vec->pending = 1;
 584        nvic_irq_update(s);
 585    }
 586}
 587
 588/* Make pending IRQ active.  */
 589bool armv7m_nvic_acknowledge_irq(void *opaque)
 590{
 591    NVICState *s = (NVICState *)opaque;
 592    CPUARMState *env = &s->cpu->env;
 593    const int pending = s->vectpending;
 594    const int running = nvic_exec_prio(s);
 595    VecInfo *vec;
 596    bool targets_secure;
 597
 598    assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
 599
 600    if (s->vectpending_is_s_banked) {
 601        vec = &s->sec_vectors[pending];
 602        targets_secure = true;
 603    } else {
 604        vec = &s->vectors[pending];
 605        targets_secure = !exc_is_banked(s->vectpending) &&
 606            exc_targets_secure(s, s->vectpending);
 607    }
 608
 609    assert(vec->enabled);
 610    assert(vec->pending);
 611
 612    assert(s->vectpending_prio < running);
 613
 614    trace_nvic_acknowledge_irq(pending, s->vectpending_prio, targets_secure);
 615
 616    vec->active = 1;
 617    vec->pending = 0;
 618
 619    write_v7m_exception(env, s->vectpending);
 620
 621    nvic_irq_update(s);
 622
 623    return targets_secure;
 624}
 625
 626int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
 627{
 628    NVICState *s = (NVICState *)opaque;
 629    VecInfo *vec;
 630    int ret;
 631
 632    assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
 633
 634    if (secure && exc_is_banked(irq)) {
 635        vec = &s->sec_vectors[irq];
 636    } else {
 637        vec = &s->vectors[irq];
 638    }
 639
 640    trace_nvic_complete_irq(irq, secure);
 641
 642    if (!vec->active) {
 643        /* Tell the caller this was an illegal exception return */
 644        return -1;
 645    }
 646
 647    ret = nvic_rettobase(s);
 648
 649    vec->active = 0;
 650    if (vec->level) {
 651        /* Re-pend the exception if it's still held high; only
 652         * happens for extenal IRQs
 653         */
 654        assert(irq >= NVIC_FIRST_IRQ);
 655        vec->pending = 1;
 656    }
 657
 658    nvic_irq_update(s);
 659
 660    return ret;
 661}
 662
 663/* callback when external interrupt line is changed */
 664static void set_irq_level(void *opaque, int n, int level)
 665{
 666    NVICState *s = opaque;
 667    VecInfo *vec;
 668
 669    n += NVIC_FIRST_IRQ;
 670
 671    assert(n >= NVIC_FIRST_IRQ && n < s->num_irq);
 672
 673    trace_nvic_set_irq_level(n, level);
 674
 675    /* The pending status of an external interrupt is
 676     * latched on rising edge and exception handler return.
 677     *
 678     * Pulsing the IRQ will always run the handler
 679     * once, and the handler will re-run until the
 680     * level is low when the handler completes.
 681     */
 682    vec = &s->vectors[n];
 683    if (level != vec->level) {
 684        vec->level = level;
 685        if (level) {
 686            armv7m_nvic_set_pending(s, n, false);
 687        }
 688    }
 689}
 690
 691static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
 692{
 693    ARMCPU *cpu = s->cpu;
 694    uint32_t val;
 695
 696    switch (offset) {
 697    case 4: /* Interrupt Control Type.  */
 698        return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
 699    case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
 700    {
 701        int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
 702        int i;
 703
 704        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
 705            goto bad_offset;
 706        }
 707        if (!attrs.secure) {
 708            return 0;
 709        }
 710        val = 0;
 711        for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
 712            if (s->itns[startvec + i]) {
 713                val |= (1 << i);
 714            }
 715        }
 716        return val;
 717    }
 718    case 0xd00: /* CPUID Base.  */
 719        return cpu->midr;
 720    case 0xd04: /* Interrupt Control State (ICSR) */
 721        /* VECTACTIVE */
 722        val = cpu->env.v7m.exception;
 723        /* VECTPENDING */
 724        val |= (s->vectpending & 0xff) << 12;
 725        /* ISRPENDING - set if any external IRQ is pending */
 726        if (nvic_isrpending(s)) {
 727            val |= (1 << 22);
 728        }
 729        /* RETTOBASE - set if only one handler is active */
 730        if (nvic_rettobase(s)) {
 731            val |= (1 << 11);
 732        }
 733        if (attrs.secure) {
 734            /* PENDSTSET */
 735            if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].pending) {
 736                val |= (1 << 26);
 737            }
 738            /* PENDSVSET */
 739            if (s->sec_vectors[ARMV7M_EXCP_PENDSV].pending) {
 740                val |= (1 << 28);
 741            }
 742        } else {
 743            /* PENDSTSET */
 744            if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) {
 745                val |= (1 << 26);
 746            }
 747            /* PENDSVSET */
 748            if (s->vectors[ARMV7M_EXCP_PENDSV].pending) {
 749                val |= (1 << 28);
 750            }
 751        }
 752        /* NMIPENDSET */
 753        if ((cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
 754            s->vectors[ARMV7M_EXCP_NMI].pending) {
 755            val |= (1 << 31);
 756        }
 757        /* ISRPREEMPT: RES0 when halting debug not implemented */
 758        /* STTNS: RES0 for the Main Extension */
 759        return val;
 760    case 0xd08: /* Vector Table Offset.  */
 761        return cpu->env.v7m.vecbase[attrs.secure];
 762    case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
 763        val = 0xfa050000 | (s->prigroup[attrs.secure] << 8);
 764        if (attrs.secure) {
 765            /* s->aircr stores PRIS, BFHFNMINS, SYSRESETREQS */
 766            val |= cpu->env.v7m.aircr;
 767        } else {
 768            if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
 769                /* BFHFNMINS is R/O from NS; other bits are RAZ/WI. If
 770                 * security isn't supported then BFHFNMINS is RAO (and
 771                 * the bit in env.v7m.aircr is always set).
 772                 */
 773                val |= cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK;
 774            }
 775        }
 776        return val;
 777    case 0xd10: /* System Control.  */
 778        /* TODO: Implement SLEEPONEXIT.  */
 779        return 0;
 780    case 0xd14: /* Configuration Control.  */
 781        /* The BFHFNMIGN bit is the only non-banked bit; we
 782         * keep it in the non-secure copy of the register.
 783         */
 784        val = cpu->env.v7m.ccr[attrs.secure];
 785        val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
 786        return val;
 787    case 0xd24: /* System Handler Control and State (SHCSR) */
 788        val = 0;
 789        if (attrs.secure) {
 790            if (s->sec_vectors[ARMV7M_EXCP_MEM].active) {
 791                val |= (1 << 0);
 792            }
 793            if (s->sec_vectors[ARMV7M_EXCP_HARD].active) {
 794                val |= (1 << 2);
 795            }
 796            if (s->sec_vectors[ARMV7M_EXCP_USAGE].active) {
 797                val |= (1 << 3);
 798            }
 799            if (s->sec_vectors[ARMV7M_EXCP_SVC].active) {
 800                val |= (1 << 7);
 801            }
 802            if (s->sec_vectors[ARMV7M_EXCP_PENDSV].active) {
 803                val |= (1 << 10);
 804            }
 805            if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].active) {
 806                val |= (1 << 11);
 807            }
 808            if (s->sec_vectors[ARMV7M_EXCP_USAGE].pending) {
 809                val |= (1 << 12);
 810            }
 811            if (s->sec_vectors[ARMV7M_EXCP_MEM].pending) {
 812                val |= (1 << 13);
 813            }
 814            if (s->sec_vectors[ARMV7M_EXCP_SVC].pending) {
 815                val |= (1 << 15);
 816            }
 817            if (s->sec_vectors[ARMV7M_EXCP_MEM].enabled) {
 818                val |= (1 << 16);
 819            }
 820            if (s->sec_vectors[ARMV7M_EXCP_USAGE].enabled) {
 821                val |= (1 << 18);
 822            }
 823            if (s->sec_vectors[ARMV7M_EXCP_HARD].pending) {
 824                val |= (1 << 21);
 825            }
 826            /* SecureFault is not banked but is always RAZ/WI to NS */
 827            if (s->vectors[ARMV7M_EXCP_SECURE].active) {
 828                val |= (1 << 4);
 829            }
 830            if (s->vectors[ARMV7M_EXCP_SECURE].enabled) {
 831                val |= (1 << 19);
 832            }
 833            if (s->vectors[ARMV7M_EXCP_SECURE].pending) {
 834                val |= (1 << 20);
 835            }
 836        } else {
 837            if (s->vectors[ARMV7M_EXCP_MEM].active) {
 838                val |= (1 << 0);
 839            }
 840            if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
 841                /* HARDFAULTACT, HARDFAULTPENDED not present in v7M */
 842                if (s->vectors[ARMV7M_EXCP_HARD].active) {
 843                    val |= (1 << 2);
 844                }
 845                if (s->vectors[ARMV7M_EXCP_HARD].pending) {
 846                    val |= (1 << 21);
 847                }
 848            }
 849            if (s->vectors[ARMV7M_EXCP_USAGE].active) {
 850                val |= (1 << 3);
 851            }
 852            if (s->vectors[ARMV7M_EXCP_SVC].active) {
 853                val |= (1 << 7);
 854            }
 855            if (s->vectors[ARMV7M_EXCP_PENDSV].active) {
 856                val |= (1 << 10);
 857            }
 858            if (s->vectors[ARMV7M_EXCP_SYSTICK].active) {
 859                val |= (1 << 11);
 860            }
 861            if (s->vectors[ARMV7M_EXCP_USAGE].pending) {
 862                val |= (1 << 12);
 863            }
 864            if (s->vectors[ARMV7M_EXCP_MEM].pending) {
 865                val |= (1 << 13);
 866            }
 867            if (s->vectors[ARMV7M_EXCP_SVC].pending) {
 868                val |= (1 << 15);
 869            }
 870            if (s->vectors[ARMV7M_EXCP_MEM].enabled) {
 871                val |= (1 << 16);
 872            }
 873            if (s->vectors[ARMV7M_EXCP_USAGE].enabled) {
 874                val |= (1 << 18);
 875            }
 876        }
 877        if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
 878            if (s->vectors[ARMV7M_EXCP_BUS].active) {
 879                val |= (1 << 1);
 880            }
 881            if (s->vectors[ARMV7M_EXCP_BUS].pending) {
 882                val |= (1 << 14);
 883            }
 884            if (s->vectors[ARMV7M_EXCP_BUS].enabled) {
 885                val |= (1 << 17);
 886            }
 887            if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
 888                s->vectors[ARMV7M_EXCP_NMI].active) {
 889                /* NMIACT is not present in v7M */
 890                val |= (1 << 5);
 891            }
 892        }
 893
 894        /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
 895        if (s->vectors[ARMV7M_EXCP_DEBUG].active) {
 896            val |= (1 << 8);
 897        }
 898        return val;
 899    case 0xd28: /* Configurable Fault Status.  */
 900        /* The BFSR bits [15:8] are shared between security states
 901         * and we store them in the NS copy
 902         */
 903        val = cpu->env.v7m.cfsr[attrs.secure];
 904        val |= cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK;
 905        return val;
 906    case 0xd2c: /* Hard Fault Status.  */
 907        return cpu->env.v7m.hfsr;
 908    case 0xd30: /* Debug Fault Status.  */
 909        return cpu->env.v7m.dfsr;
 910    case 0xd34: /* MMFAR MemManage Fault Address */
 911        return cpu->env.v7m.mmfar[attrs.secure];
 912    case 0xd38: /* Bus Fault Address.  */
 913        return cpu->env.v7m.bfar;
 914    case 0xd3c: /* Aux Fault Status.  */
 915        /* TODO: Implement fault status registers.  */
 916        qemu_log_mask(LOG_UNIMP,
 917                      "Aux Fault status registers unimplemented\n");
 918        return 0;
 919    case 0xd40: /* PFR0.  */
 920        return 0x00000030;
 921    case 0xd44: /* PRF1.  */
 922        return 0x00000200;
 923    case 0xd48: /* DFR0.  */
 924        return 0x00100000;
 925    case 0xd4c: /* AFR0.  */
 926        return 0x00000000;
 927    case 0xd50: /* MMFR0.  */
 928        return 0x00000030;
 929    case 0xd54: /* MMFR1.  */
 930        return 0x00000000;
 931    case 0xd58: /* MMFR2.  */
 932        return 0x00000000;
 933    case 0xd5c: /* MMFR3.  */
 934        return 0x00000000;
 935    case 0xd60: /* ISAR0.  */
 936        return 0x01141110;
 937    case 0xd64: /* ISAR1.  */
 938        return 0x02111000;
 939    case 0xd68: /* ISAR2.  */
 940        return 0x21112231;
 941    case 0xd6c: /* ISAR3.  */
 942        return 0x01111110;
 943    case 0xd70: /* ISAR4.  */
 944        return 0x01310102;
 945    /* TODO: Implement debug registers.  */
 946    case 0xd90: /* MPU_TYPE */
 947        /* Unified MPU; if the MPU is not present this value is zero */
 948        return cpu->pmsav7_dregion << 8;
 949        break;
 950    case 0xd94: /* MPU_CTRL */
 951        return cpu->env.v7m.mpu_ctrl[attrs.secure];
 952    case 0xd98: /* MPU_RNR */
 953        return cpu->env.pmsav7.rnr[attrs.secure];
 954    case 0xd9c: /* MPU_RBAR */
 955    case 0xda4: /* MPU_RBAR_A1 */
 956    case 0xdac: /* MPU_RBAR_A2 */
 957    case 0xdb4: /* MPU_RBAR_A3 */
 958    {
 959        int region = cpu->env.pmsav7.rnr[attrs.secure];
 960
 961        if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
 962            /* PMSAv8M handling of the aliases is different from v7M:
 963             * aliases A1, A2, A3 override the low two bits of the region
 964             * number in MPU_RNR, and there is no 'region' field in the
 965             * RBAR register.
 966             */
 967            int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
 968            if (aliasno) {
 969                region = deposit32(region, 0, 2, aliasno);
 970            }
 971            if (region >= cpu->pmsav7_dregion) {
 972                return 0;
 973            }
 974            return cpu->env.pmsav8.rbar[attrs.secure][region];
 975        }
 976
 977        if (region >= cpu->pmsav7_dregion) {
 978            return 0;
 979        }
 980        return (cpu->env.pmsav7.drbar[region] & ~0x1f) | (region & 0xf);
 981    }
 982    case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
 983    case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
 984    case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
 985    case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
 986    {
 987        int region = cpu->env.pmsav7.rnr[attrs.secure];
 988
 989        if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
 990            /* PMSAv8M handling of the aliases is different from v7M:
 991             * aliases A1, A2, A3 override the low two bits of the region
 992             * number in MPU_RNR.
 993             */
 994            int aliasno = (offset - 0xda0) / 8; /* 0..3 */
 995            if (aliasno) {
 996                region = deposit32(region, 0, 2, aliasno);
 997            }
 998            if (region >= cpu->pmsav7_dregion) {
 999                return 0;
1000            }
1001            return cpu->env.pmsav8.rlar[attrs.secure][region];
1002        }
1003
1004        if (region >= cpu->pmsav7_dregion) {
1005            return 0;
1006        }
1007        return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) |
1008            (cpu->env.pmsav7.drsr[region] & 0xffff);
1009    }
1010    case 0xdc0: /* MPU_MAIR0 */
1011        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1012            goto bad_offset;
1013        }
1014        return cpu->env.pmsav8.mair0[attrs.secure];
1015    case 0xdc4: /* MPU_MAIR1 */
1016        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1017            goto bad_offset;
1018        }
1019        return cpu->env.pmsav8.mair1[attrs.secure];
1020    case 0xdd0: /* SAU_CTRL */
1021        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1022            goto bad_offset;
1023        }
1024        if (!attrs.secure) {
1025            return 0;
1026        }
1027        return cpu->env.sau.ctrl;
1028    case 0xdd4: /* SAU_TYPE */
1029        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1030            goto bad_offset;
1031        }
1032        if (!attrs.secure) {
1033            return 0;
1034        }
1035        return cpu->sau_sregion;
1036    case 0xdd8: /* SAU_RNR */
1037        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1038            goto bad_offset;
1039        }
1040        if (!attrs.secure) {
1041            return 0;
1042        }
1043        return cpu->env.sau.rnr;
1044    case 0xddc: /* SAU_RBAR */
1045    {
1046        int region = cpu->env.sau.rnr;
1047
1048        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1049            goto bad_offset;
1050        }
1051        if (!attrs.secure) {
1052            return 0;
1053        }
1054        if (region >= cpu->sau_sregion) {
1055            return 0;
1056        }
1057        return cpu->env.sau.rbar[region];
1058    }
1059    case 0xde0: /* SAU_RLAR */
1060    {
1061        int region = cpu->env.sau.rnr;
1062
1063        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1064            goto bad_offset;
1065        }
1066        if (!attrs.secure) {
1067            return 0;
1068        }
1069        if (region >= cpu->sau_sregion) {
1070            return 0;
1071        }
1072        return cpu->env.sau.rlar[region];
1073    }
1074    case 0xde4: /* SFSR */
1075        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1076            goto bad_offset;
1077        }
1078        if (!attrs.secure) {
1079            return 0;
1080        }
1081        return cpu->env.v7m.sfsr;
1082    case 0xde8: /* SFAR */
1083        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1084            goto bad_offset;
1085        }
1086        if (!attrs.secure) {
1087            return 0;
1088        }
1089        return cpu->env.v7m.sfar;
1090    default:
1091    bad_offset:
1092        qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
1093        return 0;
1094    }
1095}
1096
1097static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
1098                        MemTxAttrs attrs)
1099{
1100    ARMCPU *cpu = s->cpu;
1101
1102    switch (offset) {
1103    case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
1104    {
1105        int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
1106        int i;
1107
1108        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1109            goto bad_offset;
1110        }
1111        if (!attrs.secure) {
1112            break;
1113        }
1114        for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
1115            s->itns[startvec + i] = (value >> i) & 1;
1116        }
1117        nvic_irq_update(s);
1118        break;
1119    }
1120    case 0xd04: /* Interrupt Control State (ICSR) */
1121        if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1122            if (value & (1 << 31)) {
1123                armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
1124            } else if (value & (1 << 30) &&
1125                       arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1126                /* PENDNMICLR didn't exist in v7M */
1127                armv7m_nvic_clear_pending(s, ARMV7M_EXCP_NMI, false);
1128            }
1129        }
1130        if (value & (1 << 28)) {
1131            armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1132        } else if (value & (1 << 27)) {
1133            armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1134        }
1135        if (value & (1 << 26)) {
1136            armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1137        } else if (value & (1 << 25)) {
1138            armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1139        }
1140        break;
1141    case 0xd08: /* Vector Table Offset.  */
1142        cpu->env.v7m.vecbase[attrs.secure] = value & 0xffffff80;
1143        break;
1144    case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
1145        if ((value >> R_V7M_AIRCR_VECTKEY_SHIFT) == 0x05fa) {
1146            if (value & R_V7M_AIRCR_SYSRESETREQ_MASK) {
1147                if (attrs.secure ||
1148                    !(cpu->env.v7m.aircr & R_V7M_AIRCR_SYSRESETREQS_MASK)) {
1149                    qemu_irq_pulse(s->sysresetreq);
1150                }
1151            }
1152            if (value & R_V7M_AIRCR_VECTCLRACTIVE_MASK) {
1153                qemu_log_mask(LOG_GUEST_ERROR,
1154                              "Setting VECTCLRACTIVE when not in DEBUG mode "
1155                              "is UNPREDICTABLE\n");
1156            }
1157            if (value & R_V7M_AIRCR_VECTRESET_MASK) {
1158                /* NB: this bit is RES0 in v8M */
1159                qemu_log_mask(LOG_GUEST_ERROR,
1160                              "Setting VECTRESET when not in DEBUG mode "
1161                              "is UNPREDICTABLE\n");
1162            }
1163            s->prigroup[attrs.secure] = extract32(value,
1164                                                  R_V7M_AIRCR_PRIGROUP_SHIFT,
1165                                                  R_V7M_AIRCR_PRIGROUP_LENGTH);
1166            if (attrs.secure) {
1167                /* These bits are only writable by secure */
1168                cpu->env.v7m.aircr = value &
1169                    (R_V7M_AIRCR_SYSRESETREQS_MASK |
1170                     R_V7M_AIRCR_BFHFNMINS_MASK |
1171                     R_V7M_AIRCR_PRIS_MASK);
1172                /* BFHFNMINS changes the priority of Secure HardFault, and
1173                 * allows a pending Non-secure HardFault to preempt (which
1174                 * we implement by marking it enabled).
1175                 */
1176                if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1177                    s->sec_vectors[ARMV7M_EXCP_HARD].prio = -3;
1178                    s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
1179                } else {
1180                    s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
1181                    s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
1182                }
1183            }
1184            nvic_irq_update(s);
1185        }
1186        break;
1187    case 0xd10: /* System Control.  */
1188        /* TODO: Implement control registers.  */
1189        qemu_log_mask(LOG_UNIMP, "NVIC: SCR unimplemented\n");
1190        break;
1191    case 0xd14: /* Configuration Control.  */
1192        /* Enforce RAZ/WI on reserved and must-RAZ/WI bits */
1193        value &= (R_V7M_CCR_STKALIGN_MASK |
1194                  R_V7M_CCR_BFHFNMIGN_MASK |
1195                  R_V7M_CCR_DIV_0_TRP_MASK |
1196                  R_V7M_CCR_UNALIGN_TRP_MASK |
1197                  R_V7M_CCR_USERSETMPEND_MASK |
1198                  R_V7M_CCR_NONBASETHRDENA_MASK);
1199
1200        if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1201            /* v8M makes NONBASETHRDENA and STKALIGN be RES1 */
1202            value |= R_V7M_CCR_NONBASETHRDENA_MASK
1203                | R_V7M_CCR_STKALIGN_MASK;
1204        }
1205        if (attrs.secure) {
1206            /* the BFHFNMIGN bit is not banked; keep that in the NS copy */
1207            cpu->env.v7m.ccr[M_REG_NS] =
1208                (cpu->env.v7m.ccr[M_REG_NS] & ~R_V7M_CCR_BFHFNMIGN_MASK)
1209                | (value & R_V7M_CCR_BFHFNMIGN_MASK);
1210            value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1211        }
1212
1213        cpu->env.v7m.ccr[attrs.secure] = value;
1214        break;
1215    case 0xd24: /* System Handler Control and State (SHCSR) */
1216        if (attrs.secure) {
1217            s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1218            /* Secure HardFault active bit cannot be written */
1219            s->sec_vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1220            s->sec_vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1221            s->sec_vectors[ARMV7M_EXCP_PENDSV].active =
1222                (value & (1 << 10)) != 0;
1223            s->sec_vectors[ARMV7M_EXCP_SYSTICK].active =
1224                (value & (1 << 11)) != 0;
1225            s->sec_vectors[ARMV7M_EXCP_USAGE].pending =
1226                (value & (1 << 12)) != 0;
1227            s->sec_vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1228            s->sec_vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1229            s->sec_vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1230            s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1231            s->sec_vectors[ARMV7M_EXCP_USAGE].enabled =
1232                (value & (1 << 18)) != 0;
1233            s->sec_vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1234            /* SecureFault not banked, but RAZ/WI to NS */
1235            s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0;
1236            s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0;
1237            s->vectors[ARMV7M_EXCP_SECURE].pending = (value & (1 << 20)) != 0;
1238        } else {
1239            s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1240            if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1241                /* HARDFAULTPENDED is not present in v7M */
1242                s->vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1243            }
1244            s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1245            s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1246            s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0;
1247            s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0;
1248            s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0;
1249            s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1250            s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1251            s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1252            s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0;
1253        }
1254        if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1255            s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0;
1256            s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0;
1257            s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1258        }
1259        /* NMIACT can only be written if the write is of a zero, with
1260         * BFHFNMINS 1, and by the CPU in secure state via the NS alias.
1261         */
1262        if (!attrs.secure && cpu->env.v7m.secure &&
1263            (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1264            (value & (1 << 5)) == 0) {
1265            s->vectors[ARMV7M_EXCP_NMI].active = 0;
1266        }
1267        /* HARDFAULTACT can only be written if the write is of a zero
1268         * to the non-secure HardFault state by the CPU in secure state.
1269         * The only case where we can be targeting the non-secure HF state
1270         * when in secure state is if this is a write via the NS alias
1271         * and BFHFNMINS is 1.
1272         */
1273        if (!attrs.secure && cpu->env.v7m.secure &&
1274            (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1275            (value & (1 << 2)) == 0) {
1276            s->vectors[ARMV7M_EXCP_HARD].active = 0;
1277        }
1278
1279        /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
1280        s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0;
1281        nvic_irq_update(s);
1282        break;
1283    case 0xd28: /* Configurable Fault Status.  */
1284        cpu->env.v7m.cfsr[attrs.secure] &= ~value; /* W1C */
1285        if (attrs.secure) {
1286            /* The BFSR bits [15:8] are shared between security states
1287             * and we store them in the NS copy.
1288             */
1289            cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK);
1290        }
1291        break;
1292    case 0xd2c: /* Hard Fault Status.  */
1293        cpu->env.v7m.hfsr &= ~value; /* W1C */
1294        break;
1295    case 0xd30: /* Debug Fault Status.  */
1296        cpu->env.v7m.dfsr &= ~value; /* W1C */
1297        break;
1298    case 0xd34: /* Mem Manage Address.  */
1299        cpu->env.v7m.mmfar[attrs.secure] = value;
1300        return;
1301    case 0xd38: /* Bus Fault Address.  */
1302        cpu->env.v7m.bfar = value;
1303        return;
1304    case 0xd3c: /* Aux Fault Status.  */
1305        qemu_log_mask(LOG_UNIMP,
1306                      "NVIC: Aux fault status registers unimplemented\n");
1307        break;
1308    case 0xd90: /* MPU_TYPE */
1309        return; /* RO */
1310    case 0xd94: /* MPU_CTRL */
1311        if ((value &
1312             (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK))
1313            == R_V7M_MPU_CTRL_HFNMIENA_MASK) {
1314            qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is "
1315                          "UNPREDICTABLE\n");
1316        }
1317        cpu->env.v7m.mpu_ctrl[attrs.secure]
1318            = value & (R_V7M_MPU_CTRL_ENABLE_MASK |
1319                       R_V7M_MPU_CTRL_HFNMIENA_MASK |
1320                       R_V7M_MPU_CTRL_PRIVDEFENA_MASK);
1321        tlb_flush(CPU(cpu));
1322        break;
1323    case 0xd98: /* MPU_RNR */
1324        if (value >= cpu->pmsav7_dregion) {
1325            qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %"
1326                          PRIu32 "/%" PRIu32 "\n",
1327                          value, cpu->pmsav7_dregion);
1328        } else {
1329            cpu->env.pmsav7.rnr[attrs.secure] = value;
1330        }
1331        break;
1332    case 0xd9c: /* MPU_RBAR */
1333    case 0xda4: /* MPU_RBAR_A1 */
1334    case 0xdac: /* MPU_RBAR_A2 */
1335    case 0xdb4: /* MPU_RBAR_A3 */
1336    {
1337        int region;
1338
1339        if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1340            /* PMSAv8M handling of the aliases is different from v7M:
1341             * aliases A1, A2, A3 override the low two bits of the region
1342             * number in MPU_RNR, and there is no 'region' field in the
1343             * RBAR register.
1344             */
1345            int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1346
1347            region = cpu->env.pmsav7.rnr[attrs.secure];
1348            if (aliasno) {
1349                region = deposit32(region, 0, 2, aliasno);
1350            }
1351            if (region >= cpu->pmsav7_dregion) {
1352                return;
1353            }
1354            cpu->env.pmsav8.rbar[attrs.secure][region] = value;
1355            tlb_flush(CPU(cpu));
1356            return;
1357        }
1358
1359        if (value & (1 << 4)) {
1360            /* VALID bit means use the region number specified in this
1361             * value and also update MPU_RNR.REGION with that value.
1362             */
1363            region = extract32(value, 0, 4);
1364            if (region >= cpu->pmsav7_dregion) {
1365                qemu_log_mask(LOG_GUEST_ERROR,
1366                              "MPU region out of range %u/%" PRIu32 "\n",
1367                              region, cpu->pmsav7_dregion);
1368                return;
1369            }
1370            cpu->env.pmsav7.rnr[attrs.secure] = region;
1371        } else {
1372            region = cpu->env.pmsav7.rnr[attrs.secure];
1373        }
1374
1375        if (region >= cpu->pmsav7_dregion) {
1376            return;
1377        }
1378
1379        cpu->env.pmsav7.drbar[region] = value & ~0x1f;
1380        tlb_flush(CPU(cpu));
1381        break;
1382    }
1383    case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
1384    case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
1385    case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
1386    case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
1387    {
1388        int region = cpu->env.pmsav7.rnr[attrs.secure];
1389
1390        if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1391            /* PMSAv8M handling of the aliases is different from v7M:
1392             * aliases A1, A2, A3 override the low two bits of the region
1393             * number in MPU_RNR.
1394             */
1395            int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1396
1397            region = cpu->env.pmsav7.rnr[attrs.secure];
1398            if (aliasno) {
1399                region = deposit32(region, 0, 2, aliasno);
1400            }
1401            if (region >= cpu->pmsav7_dregion) {
1402                return;
1403            }
1404            cpu->env.pmsav8.rlar[attrs.secure][region] = value;
1405            tlb_flush(CPU(cpu));
1406            return;
1407        }
1408
1409        if (region >= cpu->pmsav7_dregion) {
1410            return;
1411        }
1412
1413        cpu->env.pmsav7.drsr[region] = value & 0xff3f;
1414        cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f;
1415        tlb_flush(CPU(cpu));
1416        break;
1417    }
1418    case 0xdc0: /* MPU_MAIR0 */
1419        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1420            goto bad_offset;
1421        }
1422        if (cpu->pmsav7_dregion) {
1423            /* Register is RES0 if no MPU regions are implemented */
1424            cpu->env.pmsav8.mair0[attrs.secure] = value;
1425        }
1426        /* We don't need to do anything else because memory attributes
1427         * only affect cacheability, and we don't implement caching.
1428         */
1429        break;
1430    case 0xdc4: /* MPU_MAIR1 */
1431        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1432            goto bad_offset;
1433        }
1434        if (cpu->pmsav7_dregion) {
1435            /* Register is RES0 if no MPU regions are implemented */
1436            cpu->env.pmsav8.mair1[attrs.secure] = value;
1437        }
1438        /* We don't need to do anything else because memory attributes
1439         * only affect cacheability, and we don't implement caching.
1440         */
1441        break;
1442    case 0xdd0: /* SAU_CTRL */
1443        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1444            goto bad_offset;
1445        }
1446        if (!attrs.secure) {
1447            return;
1448        }
1449        cpu->env.sau.ctrl = value & 3;
1450        break;
1451    case 0xdd4: /* SAU_TYPE */
1452        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1453            goto bad_offset;
1454        }
1455        break;
1456    case 0xdd8: /* SAU_RNR */
1457        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1458            goto bad_offset;
1459        }
1460        if (!attrs.secure) {
1461            return;
1462        }
1463        if (value >= cpu->sau_sregion) {
1464            qemu_log_mask(LOG_GUEST_ERROR, "SAU region out of range %"
1465                          PRIu32 "/%" PRIu32 "\n",
1466                          value, cpu->sau_sregion);
1467        } else {
1468            cpu->env.sau.rnr = value;
1469        }
1470        break;
1471    case 0xddc: /* SAU_RBAR */
1472    {
1473        int region = cpu->env.sau.rnr;
1474
1475        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1476            goto bad_offset;
1477        }
1478        if (!attrs.secure) {
1479            return;
1480        }
1481        if (region >= cpu->sau_sregion) {
1482            return;
1483        }
1484        cpu->env.sau.rbar[region] = value & ~0x1f;
1485        tlb_flush(CPU(cpu));
1486        break;
1487    }
1488    case 0xde0: /* SAU_RLAR */
1489    {
1490        int region = cpu->env.sau.rnr;
1491
1492        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1493            goto bad_offset;
1494        }
1495        if (!attrs.secure) {
1496            return;
1497        }
1498        if (region >= cpu->sau_sregion) {
1499            return;
1500        }
1501        cpu->env.sau.rlar[region] = value & ~0x1c;
1502        tlb_flush(CPU(cpu));
1503        break;
1504    }
1505    case 0xde4: /* SFSR */
1506        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1507            goto bad_offset;
1508        }
1509        if (!attrs.secure) {
1510            return;
1511        }
1512        cpu->env.v7m.sfsr &= ~value; /* W1C */
1513        break;
1514    case 0xde8: /* SFAR */
1515        if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1516            goto bad_offset;
1517        }
1518        if (!attrs.secure) {
1519            return;
1520        }
1521        cpu->env.v7m.sfsr = value;
1522        break;
1523    case 0xf00: /* Software Triggered Interrupt Register */
1524    {
1525        int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
1526        if (excnum < s->num_irq) {
1527            armv7m_nvic_set_pending(s, excnum, false);
1528        }
1529        break;
1530    }
1531    default:
1532    bad_offset:
1533        qemu_log_mask(LOG_GUEST_ERROR,
1534                      "NVIC: Bad write offset 0x%x\n", offset);
1535    }
1536}
1537
1538static bool nvic_user_access_ok(NVICState *s, hwaddr offset, MemTxAttrs attrs)
1539{
1540    /* Return true if unprivileged access to this register is permitted. */
1541    switch (offset) {
1542    case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */
1543        /* For access via STIR_NS it is the NS CCR.USERSETMPEND that
1544         * controls access even though the CPU is in Secure state (I_QDKX).
1545         */
1546        return s->cpu->env.v7m.ccr[attrs.secure] & R_V7M_CCR_USERSETMPEND_MASK;
1547    default:
1548        /* All other user accesses cause a BusFault unconditionally */
1549        return false;
1550    }
1551}
1552
1553static int shpr_bank(NVICState *s, int exc, MemTxAttrs attrs)
1554{
1555    /* Behaviour for the SHPR register field for this exception:
1556     * return M_REG_NS to use the nonsecure vector (including for
1557     * non-banked exceptions), M_REG_S for the secure version of
1558     * a banked exception, and -1 if this field should RAZ/WI.
1559     */
1560    switch (exc) {
1561    case ARMV7M_EXCP_MEM:
1562    case ARMV7M_EXCP_USAGE:
1563    case ARMV7M_EXCP_SVC:
1564    case ARMV7M_EXCP_PENDSV:
1565    case ARMV7M_EXCP_SYSTICK:
1566        /* Banked exceptions */
1567        return attrs.secure;
1568    case ARMV7M_EXCP_BUS:
1569        /* Not banked, RAZ/WI from nonsecure if BFHFNMINS is zero */
1570        if (!attrs.secure &&
1571            !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1572            return -1;
1573        }
1574        return M_REG_NS;
1575    case ARMV7M_EXCP_SECURE:
1576        /* Not banked, RAZ/WI from nonsecure */
1577        if (!attrs.secure) {
1578            return -1;
1579        }
1580        return M_REG_NS;
1581    case ARMV7M_EXCP_DEBUG:
1582        /* Not banked. TODO should RAZ/WI if DEMCR.SDME is set */
1583        return M_REG_NS;
1584    case 8 ... 10:
1585    case 13:
1586        /* RES0 */
1587        return -1;
1588    default:
1589        /* Not reachable due to decode of SHPR register addresses */
1590        g_assert_not_reached();
1591    }
1592}
1593
1594static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
1595                                    uint64_t *data, unsigned size,
1596                                    MemTxAttrs attrs)
1597{
1598    NVICState *s = (NVICState *)opaque;
1599    uint32_t offset = addr;
1600    unsigned i, startvec, end;
1601    uint32_t val;
1602
1603    if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
1604        /* Generate BusFault for unprivileged accesses */
1605        return MEMTX_ERROR;
1606    }
1607
1608    switch (offset) {
1609    /* reads of set and clear both return the status */
1610    case 0x100 ... 0x13f: /* NVIC Set enable */
1611        offset += 0x80;
1612        /* fall through */
1613    case 0x180 ... 0x1bf: /* NVIC Clear enable */
1614        val = 0;
1615        startvec = offset - 0x180 + NVIC_FIRST_IRQ; /* vector # */
1616
1617        for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
1618            if (s->vectors[startvec + i].enabled &&
1619                (attrs.secure || s->itns[startvec + i])) {
1620                val |= (1 << i);
1621            }
1622        }
1623        break;
1624    case 0x200 ... 0x23f: /* NVIC Set pend */
1625        offset += 0x80;
1626        /* fall through */
1627    case 0x280 ... 0x2bf: /* NVIC Clear pend */
1628        val = 0;
1629        startvec = offset - 0x280 + NVIC_FIRST_IRQ; /* vector # */
1630        for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
1631            if (s->vectors[startvec + i].pending &&
1632                (attrs.secure || s->itns[startvec + i])) {
1633                val |= (1 << i);
1634            }
1635        }
1636        break;
1637    case 0x300 ... 0x33f: /* NVIC Active */
1638        val = 0;
1639        startvec = offset - 0x300 + NVIC_FIRST_IRQ; /* vector # */
1640
1641        for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
1642            if (s->vectors[startvec + i].active &&
1643                (attrs.secure || s->itns[startvec + i])) {
1644                val |= (1 << i);
1645            }
1646        }
1647        break;
1648    case 0x400 ... 0x5ef: /* NVIC Priority */
1649        val = 0;
1650        startvec = offset - 0x400 + NVIC_FIRST_IRQ; /* vector # */
1651
1652        for (i = 0; i < size && startvec + i < s->num_irq; i++) {
1653            if (attrs.secure || s->itns[startvec + i]) {
1654                val |= s->vectors[startvec + i].prio << (8 * i);
1655            }
1656        }
1657        break;
1658    case 0xd18 ... 0xd23: /* System Handler Priority (SHPR1, SHPR2, SHPR3) */
1659        val = 0;
1660        for (i = 0; i < size; i++) {
1661            unsigned hdlidx = (offset - 0xd14) + i;
1662            int sbank = shpr_bank(s, hdlidx, attrs);
1663
1664            if (sbank < 0) {
1665                continue;
1666            }
1667            val = deposit32(val, i * 8, 8, get_prio(s, hdlidx, sbank));
1668        }
1669        break;
1670    case 0xfe0 ... 0xfff: /* ID.  */
1671        if (offset & 3) {
1672            val = 0;
1673        } else {
1674            val = nvic_id[(offset - 0xfe0) >> 2];
1675        }
1676        break;
1677    default:
1678        if (size == 4) {
1679            val = nvic_readl(s, offset, attrs);
1680        } else {
1681            qemu_log_mask(LOG_GUEST_ERROR,
1682                          "NVIC: Bad read of size %d at offset 0x%x\n",
1683                          size, offset);
1684            val = 0;
1685        }
1686    }
1687
1688    trace_nvic_sysreg_read(addr, val, size);
1689    *data = val;
1690    return MEMTX_OK;
1691}
1692
1693static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
1694                                     uint64_t value, unsigned size,
1695                                     MemTxAttrs attrs)
1696{
1697    NVICState *s = (NVICState *)opaque;
1698    uint32_t offset = addr;
1699    unsigned i, startvec, end;
1700    unsigned setval = 0;
1701
1702    trace_nvic_sysreg_write(addr, value, size);
1703
1704    if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
1705        /* Generate BusFault for unprivileged accesses */
1706        return MEMTX_ERROR;
1707    }
1708
1709    switch (offset) {
1710    case 0x100 ... 0x13f: /* NVIC Set enable */
1711        offset += 0x80;
1712        setval = 1;
1713        /* fall through */
1714    case 0x180 ... 0x1bf: /* NVIC Clear enable */
1715        startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
1716
1717        for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
1718            if (value & (1 << i) &&
1719                (attrs.secure || s->itns[startvec + i])) {
1720                s->vectors[startvec + i].enabled = setval;
1721            }
1722        }
1723        nvic_irq_update(s);
1724        return MEMTX_OK;
1725    case 0x200 ... 0x23f: /* NVIC Set pend */
1726        /* the special logic in armv7m_nvic_set_pending()
1727         * is not needed since IRQs are never escalated
1728         */
1729        offset += 0x80;
1730        setval = 1;
1731        /* fall through */
1732    case 0x280 ... 0x2bf: /* NVIC Clear pend */
1733        startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
1734
1735        for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
1736            if (value & (1 << i) &&
1737                (attrs.secure || s->itns[startvec + i])) {
1738                s->vectors[startvec + i].pending = setval;
1739            }
1740        }
1741        nvic_irq_update(s);
1742        return MEMTX_OK;
1743    case 0x300 ... 0x33f: /* NVIC Active */
1744        return MEMTX_OK; /* R/O */
1745    case 0x400 ... 0x5ef: /* NVIC Priority */
1746        startvec = 8 * (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
1747
1748        for (i = 0; i < size && startvec + i < s->num_irq; i++) {
1749            if (attrs.secure || s->itns[startvec + i]) {
1750                set_prio(s, startvec + i, false, (value >> (i * 8)) & 0xff);
1751            }
1752        }
1753        nvic_irq_update(s);
1754        return MEMTX_OK;
1755    case 0xd18 ... 0xd23: /* System Handler Priority (SHPR1, SHPR2, SHPR3) */
1756        for (i = 0; i < size; i++) {
1757            unsigned hdlidx = (offset - 0xd14) + i;
1758            int newprio = extract32(value, i * 8, 8);
1759            int sbank = shpr_bank(s, hdlidx, attrs);
1760
1761            if (sbank < 0) {
1762                continue;
1763            }
1764            set_prio(s, hdlidx, sbank, newprio);
1765        }
1766        nvic_irq_update(s);
1767        return MEMTX_OK;
1768    }
1769    if (size == 4) {
1770        nvic_writel(s, offset, value, attrs);
1771        return MEMTX_OK;
1772    }
1773    qemu_log_mask(LOG_GUEST_ERROR,
1774                  "NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
1775    /* This is UNPREDICTABLE; treat as RAZ/WI */
1776    return MEMTX_OK;
1777}
1778
1779static const MemoryRegionOps nvic_sysreg_ops = {
1780    .read_with_attrs = nvic_sysreg_read,
1781    .write_with_attrs = nvic_sysreg_write,
1782    .endianness = DEVICE_NATIVE_ENDIAN,
1783};
1784
1785static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr,
1786                                        uint64_t value, unsigned size,
1787                                        MemTxAttrs attrs)
1788{
1789    if (attrs.secure) {
1790        /* S accesses to the alias act like NS accesses to the real region */
1791        attrs.secure = 0;
1792        return nvic_sysreg_write(opaque, addr, value, size, attrs);
1793    } else {
1794        /* NS attrs are RAZ/WI for privileged, and BusFault for user */
1795        if (attrs.user) {
1796            return MEMTX_ERROR;
1797        }
1798        return MEMTX_OK;
1799    }
1800}
1801
1802static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr,
1803                                       uint64_t *data, unsigned size,
1804                                       MemTxAttrs attrs)
1805{
1806    if (attrs.secure) {
1807        /* S accesses to the alias act like NS accesses to the real region */
1808        attrs.secure = 0;
1809        return nvic_sysreg_read(opaque, addr, data, size, attrs);
1810    } else {
1811        /* NS attrs are RAZ/WI for privileged, and BusFault for user */
1812        if (attrs.user) {
1813            return MEMTX_ERROR;
1814        }
1815        *data = 0;
1816        return MEMTX_OK;
1817    }
1818}
1819
1820static const MemoryRegionOps nvic_sysreg_ns_ops = {
1821    .read_with_attrs = nvic_sysreg_ns_read,
1822    .write_with_attrs = nvic_sysreg_ns_write,
1823    .endianness = DEVICE_NATIVE_ENDIAN,
1824};
1825
1826static int nvic_post_load(void *opaque, int version_id)
1827{
1828    NVICState *s = opaque;
1829    unsigned i;
1830    int resetprio;
1831
1832    /* Check for out of range priority settings */
1833    resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
1834
1835    if (s->vectors[ARMV7M_EXCP_RESET].prio != resetprio ||
1836        s->vectors[ARMV7M_EXCP_NMI].prio != -2 ||
1837        s->vectors[ARMV7M_EXCP_HARD].prio != -1) {
1838        return 1;
1839    }
1840    for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) {
1841        if (s->vectors[i].prio & ~0xff) {
1842            return 1;
1843        }
1844    }
1845
1846    nvic_recompute_state(s);
1847
1848    return 0;
1849}
1850
1851static const VMStateDescription vmstate_VecInfo = {
1852    .name = "armv7m_nvic_info",
1853    .version_id = 1,
1854    .minimum_version_id = 1,
1855    .fields = (VMStateField[]) {
1856        VMSTATE_INT16(prio, VecInfo),
1857        VMSTATE_UINT8(enabled, VecInfo),
1858        VMSTATE_UINT8(pending, VecInfo),
1859        VMSTATE_UINT8(active, VecInfo),
1860        VMSTATE_UINT8(level, VecInfo),
1861        VMSTATE_END_OF_LIST()
1862    }
1863};
1864
1865static bool nvic_security_needed(void *opaque)
1866{
1867    NVICState *s = opaque;
1868
1869    return arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
1870}
1871
1872static int nvic_security_post_load(void *opaque, int version_id)
1873{
1874    NVICState *s = opaque;
1875    int i;
1876
1877    /* Check for out of range priority settings */
1878    if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1
1879        && s->sec_vectors[ARMV7M_EXCP_HARD].prio != -3) {
1880        /* We can't cross-check against AIRCR.BFHFNMINS as we don't know
1881         * if the CPU state has been migrated yet; a mismatch won't
1882         * cause the emulation to blow up, though.
1883         */
1884        return 1;
1885    }
1886    for (i = ARMV7M_EXCP_MEM; i < ARRAY_SIZE(s->sec_vectors); i++) {
1887        if (s->sec_vectors[i].prio & ~0xff) {
1888            return 1;
1889        }
1890    }
1891    return 0;
1892}
1893
1894static const VMStateDescription vmstate_nvic_security = {
1895    .name = "nvic/m-security",
1896    .version_id = 1,
1897    .minimum_version_id = 1,
1898    .needed = nvic_security_needed,
1899    .post_load = &nvic_security_post_load,
1900    .fields = (VMStateField[]) {
1901        VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1,
1902                             vmstate_VecInfo, VecInfo),
1903        VMSTATE_UINT32(prigroup[M_REG_S], NVICState),
1904        VMSTATE_BOOL_ARRAY(itns, NVICState, NVIC_MAX_VECTORS),
1905        VMSTATE_END_OF_LIST()
1906    }
1907};
1908
1909static const VMStateDescription vmstate_nvic = {
1910    .name = "armv7m_nvic",
1911    .version_id = 4,
1912    .minimum_version_id = 4,
1913    .post_load = &nvic_post_load,
1914    .fields = (VMStateField[]) {
1915        VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1,
1916                             vmstate_VecInfo, VecInfo),
1917        VMSTATE_UINT32(prigroup[M_REG_NS], NVICState),
1918        VMSTATE_END_OF_LIST()
1919    },
1920    .subsections = (const VMStateDescription*[]) {
1921        &vmstate_nvic_security,
1922        NULL
1923    }
1924};
1925
1926static Property props_nvic[] = {
1927    /* Number of external IRQ lines (so excluding the 16 internal exceptions) */
1928    DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64),
1929    DEFINE_PROP_END_OF_LIST()
1930};
1931
1932static void armv7m_nvic_reset(DeviceState *dev)
1933{
1934    int resetprio;
1935    NVICState *s = NVIC(dev);
1936
1937    memset(s->vectors, 0, sizeof(s->vectors));
1938    memset(s->sec_vectors, 0, sizeof(s->sec_vectors));
1939    s->prigroup[M_REG_NS] = 0;
1940    s->prigroup[M_REG_S] = 0;
1941
1942    s->vectors[ARMV7M_EXCP_NMI].enabled = 1;
1943    /* MEM, BUS, and USAGE are enabled through
1944     * the System Handler Control register
1945     */
1946    s->vectors[ARMV7M_EXCP_SVC].enabled = 1;
1947    s->vectors[ARMV7M_EXCP_DEBUG].enabled = 1;
1948    s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
1949    s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
1950
1951    resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
1952    s->vectors[ARMV7M_EXCP_RESET].prio = resetprio;
1953    s->vectors[ARMV7M_EXCP_NMI].prio = -2;
1954    s->vectors[ARMV7M_EXCP_HARD].prio = -1;
1955
1956    if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
1957        s->sec_vectors[ARMV7M_EXCP_HARD].enabled = 1;
1958        s->sec_vectors[ARMV7M_EXCP_SVC].enabled = 1;
1959        s->sec_vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
1960        s->sec_vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
1961
1962        /* AIRCR.BFHFNMINS resets to 0 so Secure HF is priority -1 (R_CMTC) */
1963        s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
1964        /* If AIRCR.BFHFNMINS is 0 then NS HF is (effectively) disabled */
1965        s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
1966    } else {
1967        s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
1968    }
1969
1970    /* Strictly speaking the reset handler should be enabled.
1971     * However, we don't simulate soft resets through the NVIC,
1972     * and the reset vector should never be pended.
1973     * So we leave it disabled to catch logic errors.
1974     */
1975
1976    s->exception_prio = NVIC_NOEXC_PRIO;
1977    s->vectpending = 0;
1978    s->vectpending_is_s_banked = false;
1979    s->vectpending_prio = NVIC_NOEXC_PRIO;
1980
1981    if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
1982        memset(s->itns, 0, sizeof(s->itns));
1983    } else {
1984        /* This state is constant and not guest accessible in a non-security
1985         * NVIC; we set the bits to true to avoid having to do a feature
1986         * bit check in the NVIC enable/pend/etc register accessors.
1987         */
1988        int i;
1989
1990        for (i = NVIC_FIRST_IRQ; i < ARRAY_SIZE(s->itns); i++) {
1991            s->itns[i] = true;
1992        }
1993    }
1994}
1995
1996static void nvic_systick_trigger(void *opaque, int n, int level)
1997{
1998    NVICState *s = opaque;
1999
2000    if (level) {
2001        /* SysTick just asked us to pend its exception.
2002         * (This is different from an external interrupt line's
2003         * behaviour.)
2004         * TODO: when we implement the banked systicks we must make
2005         * this pend the correct banked exception.
2006         */
2007        armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, false);
2008    }
2009}
2010
2011static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
2012{
2013    NVICState *s = NVIC(dev);
2014    SysBusDevice *systick_sbd;
2015    Error *err = NULL;
2016    int regionlen;
2017
2018    s->cpu = ARM_CPU(qemu_get_cpu(0));
2019    assert(s->cpu);
2020
2021    if (s->num_irq > NVIC_MAX_IRQ) {
2022        error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq);
2023        return;
2024    }
2025
2026    qdev_init_gpio_in(dev, set_irq_level, s->num_irq);
2027
2028    /* include space for internal exception vectors */
2029    s->num_irq += NVIC_FIRST_IRQ;
2030
2031    object_property_set_bool(OBJECT(&s->systick), true, "realized", &err);
2032    if (err != NULL) {
2033        error_propagate(errp, err);
2034        return;
2035    }
2036    systick_sbd = SYS_BUS_DEVICE(&s->systick);
2037    sysbus_connect_irq(systick_sbd, 0,
2038                       qdev_get_gpio_in_named(dev, "systick-trigger", 0));
2039
2040    /* The NVIC and System Control Space (SCS) starts at 0xe000e000
2041     * and looks like this:
2042     *  0x004 - ICTR
2043     *  0x010 - 0xff - systick
2044     *  0x100..0x7ec - NVIC
2045     *  0x7f0..0xcff - Reserved
2046     *  0xd00..0xd3c - SCS registers
2047     *  0xd40..0xeff - Reserved or Not implemented
2048     *  0xf00 - STIR
2049     *
2050     * Some registers within this space are banked between security states.
2051     * In v8M there is a second range 0xe002e000..0xe002efff which is the
2052     * NonSecure alias SCS; secure accesses to this behave like NS accesses
2053     * to the main SCS range, and non-secure accesses (including when
2054     * the security extension is not implemented) are RAZ/WI.
2055     * Note that both the main SCS range and the alias range are defined
2056     * to be exempt from memory attribution (R_BLJT) and so the memory
2057     * transaction attribute always matches the current CPU security
2058     * state (attrs.secure == env->v7m.secure). In the nvic_sysreg_ns_ops
2059     * wrappers we change attrs.secure to indicate the NS access; so
2060     * generally code determining which banked register to use should
2061     * use attrs.secure; code determining actual behaviour of the system
2062     * should use env->v7m.secure.
2063     */
2064    regionlen = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? 0x21000 : 0x1000;
2065    memory_region_init(&s->container, OBJECT(s), "nvic", regionlen);
2066    /* The system register region goes at the bottom of the priority
2067     * stack as it covers the whole page.
2068     */
2069    memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s,
2070                          "nvic_sysregs", 0x1000);
2071    memory_region_add_subregion(&s->container, 0, &s->sysregmem);
2072    memory_region_add_subregion_overlap(&s->container, 0x10,
2073                                        sysbus_mmio_get_region(systick_sbd, 0),
2074                                        1);
2075
2076    if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
2077        memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s),
2078                              &nvic_sysreg_ns_ops, s,
2079                              "nvic_sysregs_ns", 0x1000);
2080        memory_region_add_subregion(&s->container, 0x20000, &s->sysreg_ns_mem);
2081    }
2082
2083    sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container);
2084}
2085
2086static void armv7m_nvic_instance_init(Object *obj)
2087{
2088    /* We have a different default value for the num-irq property
2089     * than our superclass. This function runs after qdev init
2090     * has set the defaults from the Property array and before
2091     * any user-specified property setting, so just modify the
2092     * value in the GICState struct.
2093     */
2094    DeviceState *dev = DEVICE(obj);
2095    NVICState *nvic = NVIC(obj);
2096    SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
2097
2098    object_initialize(&nvic->systick, sizeof(nvic->systick), TYPE_SYSTICK);
2099    qdev_set_parent_bus(DEVICE(&nvic->systick), sysbus_get_default());
2100
2101    sysbus_init_irq(sbd, &nvic->excpout);
2102    qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1);
2103    qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger", 1);
2104}
2105
2106static void armv7m_nvic_class_init(ObjectClass *klass, void *data)
2107{
2108    DeviceClass *dc = DEVICE_CLASS(klass);
2109
2110    dc->vmsd  = &vmstate_nvic;
2111    dc->props = props_nvic;
2112    dc->reset = armv7m_nvic_reset;
2113    dc->realize = armv7m_nvic_realize;
2114}
2115
2116static const TypeInfo armv7m_nvic_info = {
2117    .name          = TYPE_NVIC,
2118    .parent        = TYPE_SYS_BUS_DEVICE,
2119    .instance_init = armv7m_nvic_instance_init,
2120    .instance_size = sizeof(NVICState),
2121    .class_init    = armv7m_nvic_class_init,
2122    .class_size    = sizeof(SysBusDeviceClass),
2123};
2124
2125static void armv7m_nvic_register_types(void)
2126{
2127    type_register_static(&armv7m_nvic_info);
2128}
2129
2130type_init(armv7m_nvic_register_types)
2131